importpandasaspdfrommathimportceilimportreimportrequestsimportrefrombs4importBeautifulSoupfromopenpyxlimportWorkbookfromopenpyxlimportload_workbookclassCveDetailSpider2():
result_num=0name='cve_detail'allowed_domains= ['https://www.cvedetails.com']
start_urls= [
"https://www.cvedetails.com/vulnerability-list/year-"+str(i) +"/vulnerabilities.html"foriinrange(1999,2000)
]
headers= {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)' \
' AppleWebKit/537.36 (KHTML, like Gecko)' \
' Chrome/58.0.3029.110 Safari/537.3'}
defget_url(self, page, year, trc):
return"https://www.cvedetails.com/vulnerability-list.php?vendor_id=0&product_id=0&version_id=0&page={}&hasexp=0&opdos=0&opec=0&opov=0&opcsrf=0&opgpriv=0&opsqli=0&opxss=0&opdirt=0&opmemc=0&ophttprs=0&opbyp=0&opfileinc=0&opginf=0&cvssscoremin=0&cvssscoremax=0&year={}&month=0&cweid=0&order=1&trc={}&sha=b87d72f681722fd5f26c1153b2202a4f05acfff1".format(
page, year, trc)
defparse(self,start_y=1999,end_y=2000):
foryearinrange(start_y, end_y+1):
response=requests.get("https://www.cvedetails.com/vulnerability-list/year-"+str(year) +"/vulnerabilities.html", headers=self.headers)
soup=BeautifulSoup(response.content, 'html.parser')
rows=soup.find_all('div', {'id': 'pagingb'})
forrowinrows[0].find_all('a',href=True):
newurl="https://www.cvedetails.com"+row['href']
tag=self.parse1(newurl,year)
iftag==0:
print('continue!---',year)
continueiftag==1:
print('break!---',year)
breakdefparse1(self, url,year):
response=requests.get(url, headers=self.headers)
soup=BeautifulSoup(response.content, 'html.parser')
rows=soup.find_all('tr', {'class': 'srrowns'})
self.result_num+=len(rows)
ifself.result_num<6000:
print("跳过 result_num:%d"%self.result_num)
return0ifself.result_num>10000:
print("停止 result_num:%d"%self.result_num)
self.result_num=0return1print('self.result_num:',self.result_num)
forrowinrows:
durl="https://www.cvedetails.com"+row.find_all('a', href=True)[0]['href']
print(durl)
ifdurl.split('/')[-2].split('-')[1]!=str(year):
print('{0}年数据已完成!!跳到下一年'.format(year))
self.result_num=0return1ifdurlnotin ['https://www.cvedetails.com/cve/CVE-2010-4609/',
'https://www.cvedetails.com/cve/CVE-2010-2812/',
'https://www.cvedetails.com/cve/CVE-2014-0629/',
"https://www.cvedetails.com/cve/CVE-2017-14064/",
"https://www.cvedetails.com/cve/CVE-2017-11027/",
"https://www.cvedetails.com/cve/CVE-2017-8760/" ]:
self.parse2(durl)
defparse2(self, path2):
response=requests.get(path2, headers=self.headers)
soup=BeautifulSoup(response.content, 'html.parser')
rows=soup.find_all('h1')
cveid=""iflen(rows)>0:
print(rows[0].find_all('a', href=True)[0].text)
cveid=rows[0].find_all('a', href=True)[0].textifcveidinexist_cv1:
print('{0}已存在,跳过!!'.format(cveid))
returnrows=soup.find_all('div', {'class': 'cvedetailssummary'})
describe=''iflen(rows) >0:
describe_str=rows[0].textdescribe_str=describe_str.strip()
describe=describe_str.split('\t')[0]
score=''iflen(rows) >0:
rows=soup.find_all('div', {'class': 'cvssbox'})
iflen(rows) >0:
print(rows[0].text)
score=rows[0].textvulntype=""iflen(rows) >0:
rows=soup.find_all('span', {'class': 'vt_dos'})
iflen(rows) >0:
print(rows[0].text)
vulntype=rows[0].textproducttype= []
vendor= []
product= []
version= []
rows_table=soup.find_all('table', {'class': 'listtable', 'id': 'vulnprodstable'})
iflen(rows_table) >0:
rows_tr=rows_table[0].find_all('tr')
tr_num=len(rows_tr)
iftr_num>1:
foriinrange(1,tr_num):
rows_td=rows_tr[i].find_all('td')
iflen(rows_td) >1:
producttype.append(rows_td[1].text.strip())
vendor.append(rows_td[2].text.strip())
product.append(rows_td[3].text.strip())
version.append(rows_td[4].text.strip())
item= {}
item['cveid'],item['describe'], item['score'], item['vulntype'], item['producttype'], item['vendor'], item['product'], item[
'version'] =cveid, describe,score, vulntype, "|".join(set(producttype)), "|".join(set(vendor)), "|".join(set(product)), "|".join(set(version))
print(item)
filename='output(2015).xlsx'try:
workbook=load_workbook(filename)
worksheet=workbook.activeexceptFileNotFoundError:
workbook=Workbook()
worksheet=workbook.activeheader= ['cveid', 'describe','score', 'vulntype', 'producttype', 'vendor', 'product', 'version']
worksheet.append(header)
finally:
values= [item['cveid'],item['describe'], item['score'], item['vulntype'], item['producttype'], item['vendor'], item['product'], item[
'version']]
worksheet.append(values)
workbook.save(filename)
workbook.close()
defget_cve_data(start_page: int, num_records: int) ->None:
url=f'https://www.cvedetails.com/vulnerability-list.php?' \
f'vendor_id=0&product_id=0&version_id=0&page={start_page}' \
f'&numrows={num_records}&hasexp=0&opdos=0&opec=0&opov=0' \
f'&opcsrf=0&opgpriv=0&opsqli=0&opxss=0&opdirt=0&opmemc=0' \
f'&ophttprs=0&opbyp=0&opfileinc=0&opginf=0&cvssscoremin=0' \
f'&cvssscoremax=0&year=0&month=0&cweid=0&order=1&trc=0&sha=' \
f'8a181058fa3202146b2bbf6c9a982505c6d25cc3'headers= {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)' \
' AppleWebKit/537.36 (KHTML, like Gecko)' \
' Chrome/58.0.3029.110 Safari/537.3'}
response=requests.get(url, headers=headers)
soup=BeautifulSoup(response.content, 'html.parser')
rows=soup.find_all('tr', {'class': 'srrowns'})
forrowinrows:
path2=row.find_all('a', href=True)[0]['href']
path2="https://www.cvedetails.com"+path2path2='https://www.cvedetails.com/cve/CVE-1999-1567/'print(path2)
response=requests.get(path2, headers=headers)
soup=BeautifulSoup(response.content, 'html.parser')
rows=soup.find_all('h1')
print(rows[0].find_all('a',href=True)[0].text)
cveid=rows[0].find_all('a',href=True)[0].textrows=soup.find_all('div',{'class': 'cvssbox'})
print(rows[0].text)
score=rows[0].textrows=soup.find_all('span', {'class': 'vt_dos'})
print(rows[0].text)
vulntype=rows[0].textrows=soup.find_all('table', {'class': 'listtable','id':'vulnprodstable'})
rows=rows[0].find_all('td')
print(rows[1].text.strip())
producttype=rows[1].text.strip()
vendor=rows[2].text.strip()
product=rows[3].text.strip()
version=rows[4].text.strip()
item={}
item['cveid'], item['score'], item['vulntype'], item['producttype'], item['vendor'], item['product'], item[
'version'] =cveid, score, vulntype,producttype, vendor, product, versionprint(item)
if__name__=='__main__':
d1=pd.read_excel('all_spyder(中文).xlsx')
exist_cv1=d1['cve'].tolist()
d1=pd.read_excel('output(2015).xlsx')
exist_cv2=d1['cveid'].tolist()
exist_cv1.extend(exist_cv2)
exist_cv1=list(set(exist_cv1))
print(exist_cv1[:10])
crawler_tool=CveDetailSpider2()
crawler_tool.parse(start_y=2017,end_y=2017)