· 5 years ago · Sep 29, 2020, 08:10 PM
1import time
2from zapv2 import ZAPv2
3import os
4
5spidersub = open("https-subs.txt", "r") #the file contain subdomains you want to spider
6apikey = '<your api key here' # you can get this from tools > option > apikey
7os.mkdir('spiderurl') # only for linux
8
9for sub in spidersub :
10 sub1=sub.strip()
11 joined = os.path.join(sub1[8:] +".txt")
12 spiderurl = open('./spiderurl/' + joined, "a+")
13 spiderurl.write("spider-urls for " + sub)
14 spiderurl.write("=" * (16 + len(str(sub))) + '\n')
15 zap =ZAPv2(apikey=apikey, proxies={'http': 'http://127.0.0.1:8888', 'https': 'http://127.0.0.1:8888'})
16 print('Spidering target {}'.format(sub))
17 scanID = zap.spider.scan(sub)
18 while int(zap.spider.status(scanID)) < 100:
19 print('Spider progress %: {}'.format(zap.spider.status(scanID)))
20 time.sleep(10) # time between check of progress
21 print('Spider has completed!')
22 urls= '\n'.join(map(str, zap.spider.results(scanID)))
23 spiderurl.write(urls)
24 spiderurl.write('\n' + ("=" * 100) + '\n')
25
26
27