Cacti的流量图爬取-阿里云开发者社区

开发者社区> 开发与运维> 正文
登录阅读全文

Cacti的流量图爬取

简介: cacti交换机流量图检测工具

Cacti的流量图爬取
Windows环境


from PIL import Image
from io import BytesIO
from docx import Document
from docx.shared import Inches
from bs4 import BeautifulSoup
from http import cookiejar

import datetime
import glob
import shutil
import os
import re
import requests
import urllib

#路径变量
SOURCEPATH=""
Ops=""
INSPECTION=""
MONITOR=""
SHELVES=""

#地点变量
add = "ADDRESS"

#获取cookie
FILENAME = open("cookie.txt", 'w+')
filename = 'cookie.txt'




def GetCookie():
    url = 'url'
    file=urllib.request.urlopen(url)
    #print(file.getcode)
    message = file.info()
    CookieStr = str(message)
    CookieVlue = re.findall(r'Cacti=[a-zA-Z0-9]+',CookieStr)[0]
    print(CookieVlue)
    return CookieVlue

#爬取数据
document = Document()
url='url'
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
    'Cookie':'Cacti=fdsafdsaw32342',
}


for i in (2,3,4,5,6,7,8,9,10,11,34,35,36):
    #目标页面
    for n in (1,3,6):
        #目标目录
        payload1 = {'action':'tree','tree_id':'2','leaf_id':'%s'%i,'page':'%d'%n}

        for m in(350,410,528,472,588,110,170,230,290,1085,1116,1142):
            #流量图ID号

            #正则规则
            Regular1 = '(graph\Simage\Sphp.*local_graph_id=%d.*end=\d+)'%m
            print (Regular1)
            First_Page = requests.get(url,headers=headers,params=payload1)
            print (First_Page.url)

            #清洗数据,获取流量图URL
            plt = re.findall(Regular1,First_Page.text)
            print(plt)
            if len(plt):
                a=(plt[0])
            else:
                True
            JPG_url = ( '<URL>'+ a)
            print( '<URL>'+ a)
            JPG_url_r = JPG_url.replace(';','&')
            print(JPG_url_r)

            #获取图片二进制数据,并保存成doc
            r = requests.get(JPG_url_r,headers=headers)
            image = Image.open(BytesIO(r.content))
            image.save('image%d.bmp'%i)
    document.add_picture('image%d.bmp'%i, width=Inches(6))
document.save('FRALYCHEN.docx')

#复制巡检报告文件并标记日期
T=datetime.datetime.now()

src_file = '00' + add + 'FRALYCHEN.xls'
dst_file = "2019.{}.{}".format(T.month, T.day) + add + 'FRALYCHEN.xls'
shutil.copyfile(SOURCEPATH + src_file,INSPECTION + dst_file)

MonScfile = '00' + add + 'FRALYCHEN.docx'
MonDtfile = "2019.{}.{}".format(T.month, T.day) + add + 'FRALYCHEN.docx'
shutil.copyfile(SOURCEPATH + MonScfile,MONITOR + MonDtfile)

#删除文件
os.remove(SOURCEPATH + MonScfile)
for infile in glob.glob(os.path.join(SOURCEPATH,'*.bmp')):
    os.remove(infile)

#SVN提交
def SvnCommit():
    os.chdir(Ops)
    os.system('svn cleanup')
    r=os.popen('svn st')
    info=r.readlines()
    print(info)

    for line in info:
        line=line.strip('\n').split('       ')
        one=line[0]
        two=line[1]
        print(two)
        if one == '?':
            os.system('svn add %s' % two)
            os.system('svn commit -m %s' % two)
        elif one == '!':
            os.system('svn del %s' %two)
            os.system('svn commit -m "clear"')
SvnCommit()

版权声明:本文内容由阿里云实名注册用户自发贡献,版权归原作者所有,阿里云开发者社区不拥有其著作权,亦不承担相应法律责任。具体规则请查看《阿里云开发者社区用户服务协议》和《阿里云开发者社区知识产权保护指引》。如果您发现本社区中有涉嫌抄袭的内容,填写侵权投诉表单进行举报,一经查实,本社区将立刻删除涉嫌侵权内容。

分享:
开发与运维
使用钉钉扫一扫加入圈子
+ 订阅

集结各类场景实战经验,助你开发运维畅行无忧

其他文章