系统实现流程如下:
此次项目数据采集部分使用的Python库有:requests,xlwt,json,matplotlib,tkinter,os,re,time
import datetime
import time
import socket
import pymysql
import re
import urllib.parse
from urllib import error
from bs4 import BeautifulSoup # 导入urllib库的request模块
import lxml # 文档解析器
import os # os模块就是对操作系统进行操作
import numpy as np # 列表、字典、字符串等中计算元素重复的次数
数据爬取:
def get_html_text(index_url,page_url):
for i in range(1,80):
url = index_url + str(i) + '&showtj=&showhot=&author=&key=&code='
response = urllib.request.Request(url, headers=headers)
try:
res = urllib.request.urlopen(response) # 调用urlopen()从服务器获取网页响应(respone),其返回的响应是一个实例
html = res.read().decode('utf-8') # 调用返回响应示例中的read(),可以读取html
soup = BeautifulSoup(html, 'lxml')
result = soup.find_all('ul', class_="cpquery") # 和上面的不同,这里要闻在'ul,class = news-txtd'中,所以要修改一下。
download_soup = BeautifulSoup(str(result), 'lxml')
url_all = download_soup.find_all('a')
hours=download_soup.find_all('span', class_="newslist-time")
hours = re.findall('\d{4}-\d{1,2}-\d{1,2}', str(result))
for a_url in url_all:
a_title = a_url.get_text()
titles.append(a_title)
a_url = a_url.get('href')
a_url = urllib.parse.urljoin(page_url, a_url)
urls.append(a_url)
for hour in hours:
hour = datetime.datetime.strptime(hour, '%Y-%m-%d')
times.append(hour)
# time.sleep(2) # 暂停下载页面2秒
# socket.setdefaulttimeout(15) # 控制现在内容的时间
except urllib.error.URLError as e:
if hasattr(e, 'reason'):
print("连接失败!", e.reason)
for i in range(len(urls)):
try:
data = urllib.request.Request(urls[i], headers=headers)
res = urllib.request.urlopen(data)
page_info = res.read().decode('utf-8')
soup = BeautifulSoup(page_info, 'lxml')
text = ""
now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
for p in soup.select('p'):
text += p.get_text()
text += "\n"
txt(urls[i], titles[i], text, times[i], now)
print(i)
print("success!")
except OSError:
print("解析错误!")
pass # 如果报错就不管,继续读取下一个url
continue
# else:
# url = 'http://wjw.liaocheng.gov.cn/xwzx_12609/gzdt/index.html'
# response = urllib.request.Request(url, headers=headers)
# try:
# res = urllib.request.urlopen(response) # 调用urlopen()从服务器获取网页响应(respone),其返回的响应是一个实例
# html = res.read().decode('utf-8') # 调用返回响应示例中的read(),可以读取html
# soup = BeautifulSoup(html, 'lxml')
# result = soup.find_all('ul', class_='news-list news-list9') # 和上面的不同,这里要闻在'ul,class = news-txtd'中,所以要修改一下。
# download_soup = BeautifulSoup(str(result), 'lxml')
# url_all = download_soup.find_all('a')
# hours = re.findall('\d{4}-\d{1,2}-\d{1,2}', str(result))
# for a_url in url_all:
# a_title = a_url.get_text()
# titles.append(a_title)
# a_url = a_url.get('href')
# a_url = urllib.parse.urljoin(page_url, a_url)
# urls.append(a_url)
# for hour in hours:
# hour = datetime.datetime.strptime(hour, '%Y-%m-%d')
# times.append(hour)
# # time.sleep(2) # 暂停下载页面2秒
# # socket.setdefaulttimeout(15) # 控制现在内容的时间
# except urllib.error.URLError as e:
# if hasattr(e, 'reason'):
# print("连接失败!", e.reason)
删除数据库
def delete_data():
# 连接数据库
db = pymysql.connect(
host='XXXX.XXXX.XXXX.XXXX', # 数据库地址
port=3306, # 数据库端口号
user='XXXX', # 数据库账号
password='XXXXXXXXXX', # 数据库密码
db='python',
use_unicode=True,
charset="utf8") # 数据库表名
# 创建数据库对象
conn = db.cursor()
try:
conn.execute("DELETE from filedata_bak where city='滨州市(新闻)' and datakinds=0")
db.commit()
except Exception as e:
print(e)
db.rollback()
保存数据到数据库
def txt(urls,title, content, hour,now): # 定义函数名
# 连接数据库
db = pymysql.connect(
host='XXXX.XXXX.XXXX.XXXX', # 数据库地址
port=3306, # 数据库端口号
user='XXXX', # 数据库账号
password='XXXXXXXXXX', # 数据库密码
db='python',
use_unicode=True,
charset="utf8") # 数据库表名
# 创建数据库对象
conn = db.cursor()
try:
# 执行sql语句,插入数据
conn.execute(
"insert ignore into filedata_bak(websitename,datalink,title,content,datatype,city,province,datakinds,pubdate,createtime) values('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')" % (
"滨州市卫生健康委员会", urls, title, content, "文本", "滨州市(新闻)", "山东省", 0, hour, now))
# 提交到数据库执行
db.commit()
print('恭喜您,导入数据成功!')
except:
# 发生错误时回滚
db.rollback()
print('sorry,导入数据失败!')
db.commit() # 关闭数据库连接
conn.close() # 关闭指针对象
db.close() # 关闭连接对象
def main():
page_url = 'http://wjw.binzhou.gov.cn/'
index_url = 'http://wjw.binzhou.gov.cn/xinwen/class/?2.html&page='
get_html_text(index_url, page_url)
if name == '__main__':
delete_data()
headers = {
'User-Agent': ' Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36'
}
urls = []
times = []
titles = []
main()