Get all links of a website
wget --spider --recursive --no-directories --output-file=urls.txt https://example.com
jq -n -R -r '[inputs | scan("https://services\\.mde\\.maryland\\.gov/Application/PermitInfoDetails\\?permitTypeId=[0-9]+")] | unique | .[]' urls.txt
(echo "url"; wget --spider --recursive --no-parent --no-verbose WEBSITE 2>&1 | grep -F " URL:" | grep -Eo 'https?://[^ ]+' | sort -u) > urls.csv