Get all links of a website
wget --spider --recursive --no-directories --output-file=urls.txt https://example.com
(echo "url"; wget --spider --recursive --no-parent --no-verbose WEBSITE 2>&1 | grep -F " URL:" | grep -Eo 'https?://[^ ]+' | sort -u) > urls.csv