python window使用paramiko简单监控数据指标数据采集-阿里云开发者社区

开发者社区> cloud_ruiy> 正文

python window使用paramiko简单监控数据指标数据采集

简介: #!/usr/bin/python #-*- coding: utf-8 -*- #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++# #Name : collMonitorDataToDB.
+关注继续查看
#!/usr/bin/python
#-*- coding: utf-8 -*-

#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
#Name : collMonitorDataToDB.py    #
#Created : 2017/07/06    #
#Author : @ruiy    #
#Version : 2.0    #
#Copyright : 2016 ~ 2017 ahwater.net Corporation.`    #
#Description : collection monitor indicator data to DB.    #
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#

import pyodbc
import sys
import os
import commands
import datetime
import paramiko
import re
import ConfigParser
import codecs
import chardet
#import psutil

#import sys
#reload(sys)
#sys.setdefaultencoding('utf8')

#监控表字段主要:
# id,timestramp,location,ip,hostname,port,port_est_counts,cpu_use_ratio
# mem_total,mem_free,mem_use_ratio
# disk_drive_c_total,disk_drive_c_free,disk_drive_c_use_ratio
# send_flow,recv_flow,send_packets,recv_packets

#
#内存
#总内存容量(单位-字节bytes): wmic memorychip get capacity
#剩余内存容量(单位-kbytes): wmic OS get FreePhysicalMemory

#磁盘 
#查看物理磁盘: wmic DISKDRIVE get deviceid,Caption,size,InterfaceType
#查看逻辑分区: wmic LOGICALDISK get name,Description,filesystem,size,freespace
#获取指定分区信息: fsutil volume diskfree c:
#获取磁盘分区total or fre 或用 wmic LOGICALDISK get name,Description,filesystem,size,freespace

#cpu
#查看cpu核数: wmic cpu get name,addresswidth,processorid
#获取cpu实时使用率: wmic cpu get LoadPercentage

#process
#process list: wmic process get Caption,KernelModeTime,UserModeTime

#mystring.strip().replace(' ', '').replace('\n', '').replace('\t', '').replace('\r', '').strip()

#网络流量信息

#database source read conf
dbconf=ConfigParser.SafeConfigParser()
#with codecs.open('../conf/config.properties','r',encoding='utf-8') as f:
# dbconf.readfp(f)
dbconf.read('../conf/config.properties')

db_driver=dbconf.get('db','driver')
db_server=dbconf.get('db','server')
db_database=dbconf.get('db','database')
db_uid=dbconf.get('db','uid')
db_pwd=dbconf.get('db','pwd')

#print type(db_server)
#print type(db_database)
#print type(db_uid)
#print type(db_pwd)

#python conn sql server2008R2
#读取配置文件
"""
conn = pyodbc.connect(
driver='{sql server native client 10.0}';
server=%s;
database=%s;
uid=%s;
pwd=%s;
)
"""

#debug
#conn = pyodbc.connect('driver={sql server native client 10.0};server=%s;database=%s;uid=%s;pwd=%s;'%(db_server,db_database,db_uid,db_pwd))
#conn_info = ('Driver{MySQL51};Server=%s;Port=%s;Database=%s;User=%s; Password=%s;Option=3;'%(host, port, database, user,password))

#conn1_info=('Driver={sql server native client 10.0};server=%s;database=%s;uid=%s;pwd=%s;'%('10.34.1.30','LogFeedback','sa','ahswyc'))
#print conn1_info 
conn_info=('driver=%s;server=%s;database=%s;uid=%s;pwd=%s;'%(db_driver,db_server,db_database,db_uid,db_pwd))
#print conn_info
conn=pyodbc.connect(conn_info)

#固定配置
#conn = pyodbc.connect('driver={sql server native client 10.0};server=10.34.1.30;database=LogFeedback;uid=sa;pwd=ahswyc;')
"""
conn = pyodbc.connect(
driver='{sql server native client 10.0}',
server='10.34.1.30',
database='LogFeedback',
uid='sa',
pwd='ahswyc'
)
"""

 

cursor = conn.cursor()

#参考调试,入库测试语句
#cursor.execute("insert into iisEstablishConnCounts(timestramp,iisHostIp,connCounts) values('2017/07/06:22:10','10.34.1.23',90)")

#sql入库字段基于变量
#80端口连接数统计
#netstat -na -p tcp| findstr 80 | find /C "ESTABLISH"

#cmd="netstat -na -p tcp| findstr 80 | find /C \"ESTABLISH\""
#(status,output) = commands.getstatusoutput("%s" % cmd)

#python执行调用系统命令并将结果保存到变量
#注意在linux下用python执行系统命令并将结果保存到变量与windows不同如下语句
#cmd="netstat -na -p tcp| findstr 80 | find /C \"ESTABLISH\""
#(status,output) = commands.getstatusoutput("%s" % cmd)

#获取监控时间戳
dt= datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
#print(dt)
dateTime =dt
#print("debug1: ",dateTime)
print("current dataTime: ",dateTime)
#获取本地的弃用,程序主要用于获取远程主机的监控数据

#调试暂时打开
#portCounts=os.popen("netstat -na -p tcp| findstr 80 | find /C \"ESTABLISH\"").read() 
#print("debug2: ",portCounts)

#人工配置数据
location=dbconf.get('const_18','location_18')
describe=dbconf.get('const_18','describe_18')
countport=dbconf.get('const_18','countport_18')

#print(chardet.detect(location.encode('utf-8')))
#print type(location)
#op=('%s' % location).encode('gbk')
#print op
#localT=ur'省水文局'
#localT='水文局3tets1123'.decode('utf-8')
#localT=location.decode('utf-8').encode('gbk')
print location
localT=location
#localT="anhui shuiwen ju"
print("monitr location: ",localT)
#因为在被监控机部署ssh server,所以ip是同一个,都是被监控机器ip
ssh_ip=dbconf.get('ssh_18','ssh_ip_18')
mip=ssh_ip
print("monitor ip:",mip)

mport=countport
print("port: ",mport)


#paramiko日志
#logfiles=datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
#os.environ['logfiles'] = str(logfiles)
#paramiko.util.log_to_file(os.system(echo '../logs/$logfiles.txt'))
#paramiko.util.log_to_file("../logs/{logfiles}".txt)


#远程监控数据获取
#paramiko ssh跨机建立
#transport = paramiko.Transport(('192.168.11.181',22))
tail_ip=ssh_ip.split('.')[3]
logfiles=datetime.datetime.now().strftime('%Y-%m-%d_%H_%M_%S')
paramiko.util.log_to_file('../logs/%s-%s.txt'% (logfiles,tail_ip))

"""
transport = paramiko.Transport(('10.34.1.23', 22))
transport.connect(username='ahwater', password='Aa7788..')
ssh = paramiko.SSHClient()
ssh._transport = transport
"""
#ssh conn read conf
#ssh连接属性读取配置文件
#ssh_ip=dbconf.get('ssh_18','ssh_ip_18')
ssh_port=int(dbconf.get('ssh_18','ssh_port_18'))
ssh_username=dbconf.get('ssh_18','ssh_username_18')
ssh_password=dbconf.get('ssh_18','ssh_password_18')
#print ssh_password
transport = paramiko.Transport((ssh_ip, ssh_port))
transport.connect(username=ssh_username, password=ssh_password)
ssh = paramiko.SSHClient()
ssh._transport = transport


#主机名
cmd01='hostname'
stdin, stdout, stderr = ssh.exec_command(cmd01)
#print(stdout.read())
data01=stdout.read().strip().replace(' ', '').replace('\t', '').replace('\r', '').strip()
print("monitor hostname: ",data01)

#端口连接数统计
#在人工配置数据处填写的端口的端口establish port counts
cmd02='netstat -na'
stdin, stdout, stderr = ssh.exec_command(cmd02)
data02=stdout.read().count('80')
#data02=stdout.read().count(mport)
print("port est counts: ",data02)

#cpu使用率
cmd03='wmic cpu get LoadPercentage'
stdin, stdout, stderr = ssh.exec_command(cmd03)
#da03=stdout.read().strip('LoadPercentage').replace('\n', '').replace('\t', '').replace('\r', '').replace(' ','').strip()
#da03=stdout.read().strip('LoadPercentage').replace(' ','').replace('\n','').replace('\t', '').replace('\r', '')
da03=stdout.read().strip('LoadPercentage').replace('\n', '').replace('\t', '').replace('\r', '').replace(' ','').replace('\n','').strip()
#print(da03)
statis=0
counts=0
for i in da03:
#cpu物理核心统计
counts = counts + 1
statis = statis + int(i)
try:
#print(counts)
data03=round(float(statis)/counts/100,6)
#data003="'" +data03 +"'"
print("cpu use ratio: ",data03)
except:
pass

#内存总量/Gb
"""
cmd04='wmic memorychip get capacity'
stdin,stdout,stderr = ssh.exec_command(cmd04)
da04=stdout.read().strip('Capacity').replace('\n','').replace('\t','').replace('\r','').replace(' ','').strip()
data04=float(da04)/1024/1024/1024
print("mem total Gb: ",data04)
"""
cmd04='wmic memorychip get capacity'
stdin,stdout,stderr = ssh.exec_command(cmd04)
d4_1=stdout.read().strip('Capacity').replace(' ','').replace('\t','').replace('\r','').strip()
d4_2=(' '.join(filter(lambda x: x, d4_1.split(' '))))
d4_3=d4_2.split('\n')

counts_4=0
for i in d4_3:
counts_4 = counts_4 + int(i)

data04=float(counts_4)/1024/1024/1024
print("mem total Gb: ",data04)


#内存剩余量/Gb
cmd05='wmic OS get FreePhysicalMemory'
stdin,stdout,stderr = ssh.exec_command(cmd05)
da05=stdout.read().strip('FreePhysicalMemory').replace('\n','').replace('\t','').replace('\r','').replace(' ','').strip()
data05=round(float(da05)/1024/1024,4)
print("mem free Gb: ",data05)

#内存使用率
data06=round(float((data04 - data05)) / data04,4)
print("mem use ratio: ",data06)

#磁盘信息,根系统盘C:
#cmd07='fsutil volume diskfree c:'
cmd07='wmic LOGICALDISK get FreeSpace,Size'
#C盘总量
stdin,stdout,stderr = ssh.exec_command(cmd07)
#删除FreeSpace,Size字符
d7_1=stdout.read().strip().replace('FreeSpace','').replace('Size','')
#删除r-n
d7_2=d7_1.strip().replace('\r','').replace('\n','')
#替换多个' '为单个
d7_3=(' '.join(filter(lambda x: x, d7_2.split(' '))))
#转换str->list
disk_data=d7_3.split(' ')

#获取C分区盘总量Gb,获取的数据默认单位是bytes
data07=round(float(disk_data[1])/1024/1024/1024,4)
print("C disk total Gb:",data07)
#获取C分区盘剩余量Gb
data08=round(float(disk_data[0])/1024/1024/1024,4)
print("C disk free Gb:",data08)
#C分区盘使用率
data09=round((data07 - data08) / data07,4)
print("C disk space use ratio: ",data09)

#获取网络流量信息
cmd08='netstat -e'
stdin, stdout, stderr = ssh.exec_command(cmd08)
d8_1=stdout.read().strip().rstrip().lstrip().replace('\r','').replace('\n','')
d8_2=(' '.join(filter(lambda x: x, d8_1.split(' '))))
d8_3=d8_2.split(' ')

#数据流默认bytes,把汉字字符剔除
net_data=re.sub('[^\u4e00-\u9fa5]','',d8_3[4])
#发送的流量累加总计Gb
data10=round(float(net_data)/1024/1024/1024,4)
print("send trafic flow Gb: ",data10)
#接收的流量累加总计
#net_data2=re.sub('[^\u4e00-\u9fa5]','',d8_3[3])
net_data2=d8_3[3]
data11=round(float(net_data2)/1024/1024/1024,4)
print("recv trafic flow Gb: ",data11)

#发送的数据包累加总数Tcp/ip层
#发送的数据包累加总数
data12=int(re.sub('[^\u4e00-\u9fa5]','',d8_3[6]))
#data12=round(float(net_data3)/1024/1024/1024,4)
print("send packets: ",data12)

#接收的数据包累计
data13=int(d8_3[5])
print("recv packets: ",data13)

#数据入库
#字段值基于变量的sql语句模型
sql_debug = """insert into iisEstablishConnCounts(timestramp,iisHostIp,connCounts) values(
%(timestramp)s,
'10.34.1.23',
%(connCounts)s
)
"""
# id,timestramp,location,ip,hostname,port,port_est_counts,cpu_use_ratio
# mem_total,mem_free,mem_use_ratio
# disk_drive_c_total,disk_drive_c_free,disk_drive_c_use_ratio
# send_flow,recv_flow,send_packets,recv_packets

sql = """insert into ahwater_perf_monitor(timestramp,location,ip,hostname,host_use_description,port,port_est_counts,cpu_use_ratio,
mem_total,mem_free,mem_use_ratio,
disk_drive_c_total,disk_drive_c_free,disk_drive_c_use_ratio,
send_flow,recv_flow,send_packets,recv_packets) values(
%(timestramp)s,
%(location)s,
%(ip)s,
%(hostname)s,
%(host_use_description)s,
%(port)s,
%(port_est_counts)s,
%(cpu_use_ratio)s,
%(mem_total)s,
%(mem_free)s,
%(mem_use_ratio)s,
%(disk_drive_c_total)s,
%(disk_drive_c_free)s,
%(disk_drive_c_use_ratio)s,
%(send_flow)s,
%(recv_flow)s,
%(send_packets)s,
%(recv_packets)s
)
"""

#print(sql)
#cursor.execute(sql_debug % dict(timestramp = dateTime,connCounts = portCounts))

#print("\n")

"""
print dateTime
print localT
print mip
print data01
print mport
print data02
print data03
print data04
print data05
print data06
print data07
print data08
print data09
print data10
print data11
print data12
print data13
"""
#print describe

cursor.execute(sql % dict(
timestramp="'" + dateTime + "'",
location="'" + localT + "'",
ip="'" + mip + "'",
hostname="'" + data01 + "'",
host_use_description="'" + describe + "'",
port=mport,
port_est_counts=data02,
cpu_use_ratio=data03,
mem_total=data04,
mem_free=data05,
mem_use_ratio=data06,
disk_drive_c_total=data07,
disk_drive_c_free=data08,
disk_drive_c_use_ratio=data09,
send_flow=data10,
recv_flow=data11,
send_packets=data12,
recv_packets=data13
))


"""
cursor.execute(sql % dict(
timestramp=dateTime,
location=localT,
ip=mip,
hostname=data01,
port=mport,
port_est_counts="'" +str(data02) + "'",
cpu_use_ratio="," +str(data03) + "'",
mem_total="'"+str(data04)+"'",
mem_free="'"+str(data05)+"'",
mem_use_ratio="'"+str(data06)+"'",
disk_drive_c_total="'"+str(data07)+"'",
disk_drive_c_free="'" +str(data08)+"'",
disk_drive_c_use_ratio="'"+str(data09)+"'",
send_flow="'"+str(data10)+"'",
recv_flow="'"+str(data11)+"'",
send_packets=data12,
recv_packets=data13
))
"""

#关闭pyodbc conn连接
conn.commit()
conn.close()
#关闭paramiko ssh回话
transport.close()

 

版权声明:本文内容由阿里云实名注册用户自发贡献,版权归原作者所有,阿里云开发者社区不拥有其著作权,亦不承担相应法律责任。具体规则请查看《阿里云开发者社区用户服务协议》和《阿里云开发者社区知识产权保护指引》。如果您发现本社区中有涉嫌抄袭的内容,填写侵权投诉表单进行举报,一经查实,本社区将立刻删除涉嫌侵权内容。

相关文章
纯Python实现鸢尾属植物数据集神经网络模型
本文以Python代码完成整个鸾尾花图像分类任务,没有调用任何的数据包,适合新手阅读理解,并动手实践体验下机器学习方法的大致流程。
10126 0
python os.startfile python实现双击运行程序 python监控windows程序 监控进程不在时重新启动
用python监控您的window服务 原创作品,允许转载,转载时请务必以超链接形式标明文章 原始出处 、作者信息和本声明。否则将追究法律责任。http://world77.blog.51cto.com/414605/782935     最近比较烦,研发给的pc服务版本在虚拟机上已经开始给客户使用了,服务老是莫名的死翘翘,客户不停的电话给我,搞的我心情很差,于是在一个下午,静下心来,用python写了个简单的监控进程的脚本,当发现进程消失的时候,立即调用服务,开启服务。
1613 0
使用Spark Streaming SQL进行PV/UV统计
PV/UV统计是流式分析一个常见的场景。通过PV可以对访问的网站做流量或热点分析,例如广告主可以通过PV值预估投放广告网页所带来的流量以及广告收入。另外一些场景需要对访问的用户作分析,比如分析用户的网页点击行为,此时就需要对UV做统计。
6342 0
Python WMI获取Windows系统信息 监控系统
1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 #http://www.cnblogs.com/liu-ke/ 4 import wmi 5 import os 6 import sys 7 import...
1042 0
使用Spark SQL进行流式机器学习计算(上)
今天来和大家简单说一下如何使用Spark SQL进行流式数据的机器学习处理
1717 0
+关注
cloud_ruiy
爱技术,爱开源,爱linux! 在技术成长道路上,能拉一把就拉一把,不藏着不掖着! 我由衷希望能和有理想敢吃苦的it人一起共同进步,共同成长! 虽然我现在没有大牛的能力,但是我有大牛的心态. 技术领域:shell编程,C编程,嵌入式开发.hadoop大数据,桉树,onenebul
1715
文章
1
问答
文章排行榜
最热
最新
相关电子书
更多
《2021云上架构与运维峰会演讲合集》
立即下载
《零基础CSS入门教程》
立即下载
《零基础HTML入门教程》
立即下载