四、常用终端命令
#查看文件描述符使用情况的命令是:
lsof -i -n -P
lsof -i -n -P | grep :1883
#查看1883端口的连接情况,观察TCP状态图
netstat -nalp|grep 1883
#查看1883端口的客户端连接数
netstat -nalp|grep 1883|wc -l
#查看已连接的数目
netstat -na|grep ESTABLISHED|wc -l
#修改当前进程的最大文件数
ulimit -n 102400
#查看进程是否在,指定进程名
netstat -lnpt | grep epltest
#查看进程是否在,指定端口号
netstat -tunlp|egrep "(1883|1982)"
netstat -tunlp|egrep "1883"
netstat -tunlp|egrep 1883
#终止进程
kill -9 <nginx进程号>
#查看日志
tail -f log\*.log
五、TCP连接状态详解
LISTEN: 侦听来自远方的TCP端口的连接请求
SYN-SENT: 再发送连接请求后等待匹配的连接请求
SYN-RECEIVED:再收到和发送一个连接请求后等待对方对连接请求的确认
ESTABLISHED: 代表一个打开的连接
FIN-WAIT-1: 等待远程TCP连接中断请求,或先前的连接中断请求的确认
FIN-WAIT-2: 从远程TCP等待连接中断请求
CLOSE-WAIT: 等待从本地用户发来的连接中断请求
CLOSING: 等待远程TCP对连接中断的确认
LAST-ACK: 等待原来的发向远程TCP的连接中断请求的确认
TIME-WAIT: 等待足够的时间以确保远程TCP接收到连接中断请求的确认
CLOSED: 没有任何连接状态
六、Linux网络参数
#Linux参数调优,修改文件 /etc/sysctl.conf ,在末尾追加这些文字
fs.file-max = 2097152 fs.nr_open = 2097152 net.core.somaxconn = 65535 net.core.rmem_default = 65535 net.core.wmem_default = 65535 net.core.rmem_max = 8388608 net.core.wmem_max = 83886080 net.core.optmem_max = 40960 net.ipv4.tcp_rmem = 4096 87380 83886080 net.ipv4.tcp_wmem = 4096 65535 83886080 net.ipv4.tcp_mem = 8388608 8388608 83886080 net.ipv4.ip_local_port_range = 1025 65000 net.ipv4.tcp_max_syn_backlog = 16384 net.core.netdev_max_backlog = 16384 net.ipv4.tcp_fin_timeout = 15
#修改完,输入终端命令,使能生效 sysctl -p
#允许当前会话/进程打开文件句柄数
ulimit -n 1048576
第二篇 TCP client,压力测试代码,注意是.cpp文件。下载https://download.csdn.net/download/libaineu2004/10468728
//参考了Linux高性能服务器编程,chapter-16,16-4stress_client.cpp #include <stdlib.h> #include <stdio.h> #include <assert.h> #include <unistd.h> #include <sys/types.h> #include <sys/epoll.h> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include <string.h> #include <errno.h> #include <signal.h> #include <sys/resource.h> /*setrlimit */ #include <fcntl.h> //daemonize #define CONFIG_MIN_RESERVED_FDS 32 #define CONFIG_FDSET_INCR (CONFIG_MIN_RESERVED_FDS+96) //./mystressclient 172.16.6.161 8011 1000 1 //./mystressclient 172.16.6.161 1884 10 1 //请注意,http的内容主体,必须含有两次的换行,即\r\n\r\n //static const char* request = "GET http://localhost/index.html HTTP/1.1\r\nConnection: keep-alive\r\n\r\n"; //static const char* request = "hello world\r\n"; static const char* request = "GET /msg_server HTTP/1.1\r\nConnection: keep-alive\r\n\r\n";//GET //static const char* request = "POST /msg_server HTTP/1.1\r\nConnection: keep-alive\r\n\r\n";//POST int stop = 0; int setnonblocking( int fd ) { int old_option = fcntl( fd, F_GETFL ); int new_option = old_option | O_NONBLOCK; fcntl( fd, F_SETFL, new_option ); return old_option; } void addfd( int epoll_fd, int fd ) { epoll_event event; event.data.fd = fd; //event.events = EPOLLOUT | EPOLLET | EPOLLERR; event.events = EPOLLOUT | EPOLLERR; epoll_ctl( epoll_fd, EPOLL_CTL_ADD, fd, &event ); setnonblocking( fd ); } bool write_nbytes( int sockfd, const char* buffer, int len ) { int bytes_write = 0; printf( "write out %d bytes to socket %d\n", len, sockfd ); while( 1 ) { bytes_write = send( sockfd, buffer, len, 0 ); if ( bytes_write == -1 ) { return false; } else if ( bytes_write == 0 ) { return false; } len -= bytes_write; buffer = buffer + bytes_write; if ( len <= 0 ) { return true; } } } bool read_once( int sockfd, char* buffer, int len ) { int bytes_read = 0; memset( buffer, '\0', len ); bytes_read = recv( sockfd, buffer, len, 0 ); if ( bytes_read == -1 ) { return false; } else if ( bytes_read == 0 ) { return false; } printf( "read in %d bytes from socket %d with content: %s\n", bytes_read, sockfd, buffer ); return true; } void start_conn( int epoll_fd, int num, const char* ip, int port, int space ) { if (num <= 0 || port <= 0 || space <= 0) { exit(0); } struct sockaddr_in address; bzero( &address, sizeof( address ) ); address.sin_family = AF_INET; inet_pton( AF_INET, ip, &address.sin_addr ); address.sin_port = htons( port ); for ( int i = 0; i < num; ++i ) { if ((i % space) == 0) { sleep( 1 );//1s } int sockfd = socket( PF_INET, SOCK_STREAM, 0 ); if ( sockfd < 0 ) { continue; } if ( connect( sockfd, ( struct sockaddr* )&address, sizeof( address ) ) == 0 ) { printf( "build connection %d\n", i ); addfd( epoll_fd, sockfd ); } else { printf( "create fail\n" ); } } } void close_conn( int epoll_fd, int sockfd ) { epoll_ctl( epoll_fd, EPOLL_CTL_DEL, sockfd, 0 ); close( sockfd ); } void daemonize(void) { //come from /redis/server.c/daemonize() int fd; if (fork() != 0) exit(0); /* parent exits */ setsid(); /* create a new session */ /* Every output goes to /dev/null. If Redis is daemonized but * the 'logfile' is set to 'stdout' in the configuration file * it will not log at all. */ if ((fd = open("/dev/null", O_RDWR, 0)) != -1) { dup2(fd, STDIN_FILENO); dup2(fd, STDOUT_FILENO); dup2(fd, STDERR_FILENO); if (fd > STDERR_FILENO) close(fd); } } /* This function will try to raise the max number of open files accordingly to * the configured max number of clients. It also reserves a number of file * descriptors (CONFIG_MIN_RESERVED_FDS) for extra operations of * persistence, listening sockets, log files and so forth. * * If it will not be possible to set the limit accordingly to the configured * max number of clients, the function will do the reverse setting * server.maxclients to the value that we can actually handle. */ void adjustOpenFilesLimit(int maxclients) { //come from /redis/server.c/adjustOpenFilesLimit() rlim_t maxfiles = maxclients+CONFIG_MIN_RESERVED_FDS; struct rlimit limit; if (getrlimit(RLIMIT_NOFILE,&limit) == -1) { printf("Unable to obtain the current NOFILE limit (%s), assuming 1024 and setting the max clients configuration accordingly.", strerror(errno)); maxclients = 1024-CONFIG_MIN_RESERVED_FDS; } else { rlim_t oldlimit = limit.rlim_cur; /* Set the max number of files if the current limit is not enough * for our needs. */ if (oldlimit < maxfiles) { rlim_t bestlimit; int setrlimit_error = 0; /* Try to set the file limit to match 'maxfiles' or at least * to the higher value supported less than maxfiles. */ bestlimit = maxfiles; while(bestlimit > oldlimit) { rlim_t decr_step = 16; limit.rlim_cur = bestlimit; limit.rlim_max = bestlimit; if (setrlimit(RLIMIT_NOFILE,&limit) != -1) break; setrlimit_error = errno; /* We failed to set file limit to 'bestlimit'. Try with a * smaller limit decrementing by a few FDs per iteration. */ if (bestlimit < decr_step) break; bestlimit -= decr_step; } /* Assume that the limit we get initially is still valid if * our last try was even lower. */ if (bestlimit < oldlimit) bestlimit = oldlimit; if (bestlimit < maxfiles) { unsigned int old_maxclients = maxclients; maxclients = bestlimit-CONFIG_MIN_RESERVED_FDS; /* maxclients is unsigned so may overflow: in order * to check if maxclients is now logically less than 1 * we test indirectly via bestlimit. */ if (bestlimit <= CONFIG_MIN_RESERVED_FDS) { printf("Your current 'ulimit -n' " "of %llu is not enough for the server to start. " "Please increase your open file limit to at least " "%llu. Exiting.", (unsigned long long) oldlimit, (unsigned long long) maxfiles); exit(1); } printf("You requested maxclients of %d " "requiring at least %llu max file descriptors.", old_maxclients, (unsigned long long) maxfiles); printf("Server can't set maximum open files " "to %llu because of OS error: %s.", (unsigned long long) maxfiles, strerror(setrlimit_error)); printf("Current maximum open files is %llu. " "maxclients has been reduced to %d to compensate for " "low ulimit. " "If you need higher maxclients increase 'ulimit -n'.", (unsigned long long) bestlimit, maxclients); } else { printf("Increased maximum number of open files " "to %llu (it was originally set to %llu).", (unsigned long long) maxfiles, (unsigned long long) oldlimit); } } } } void signal_exit_func(int signo) { printf("exit sig is %d\n", signo); stop = 1; } void signal_exit_handler() { struct sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_handler = signal_exit_func; sigaction(SIGINT, &sa, NULL);//当按下ctrl+c时,它的效果就是发送SIGINT信号 sigaction(SIGTERM, &sa, NULL);//kill pid sigaction(SIGQUIT, &sa, NULL);//ctrl+\代表退出SIGQUIT //SIGSTOP和SIGKILL信号是不可捕获的,所以下面两句话写了等于没有写 sigaction(SIGKILL, &sa, NULL);//kill -9 pid sigaction(SIGSTOP, &sa, NULL);//ctrl+z代表停止 //#define SIGTERM 15 //#define SIGKILL 9 //kill和kill -9,两个命令在linux中都有杀死进程的效果,然而两命令的执行过程却大有不同,在程序中如果用错了,可能会造成莫名其妙的现象。 //执行kill pid命令,系统会发送一个SIGTERM信号给对应的程序。 //执行kill -9 pid命令,系统给对应程序发送的信号是SIGKILL,即exit。exit信号不会被系统阻塞,所以kill -9能顺利杀掉进程。 } //./mystressclient 172.16.6.161 8011 1000 1 //argv[1] 表示服务器IP地址 //argv[2] 表示服务器端口 //argv[3] 表示建立多少个client //argv[4] 表示每建立多少个client就延时等待1s int main( int argc, char* argv[] ) { //signal(SIGHUP, SIG_IGN); //开启的话,就捕获不到终端窗口关闭的信号了。即窗口关闭,进程仍然进行。 signal(SIGPIPE, SIG_IGN); signal_exit_handler(); int background = 0; if (background) { daemonize(); } assert( argc == 5 ); int maxclients = atoi( argv[ 3 ] ) + CONFIG_FDSET_INCR; adjustOpenFilesLimit(maxclients); int epoll_fd = epoll_create( 1024 );/* 1024 is just a hint for the kernel */ start_conn( epoll_fd, atoi( argv[ 3 ] ), argv[1], atoi( argv[2] ), atoi( argv[4] ) ); //epoll_event events[ 10000 ]; epoll_event *events = (epoll_event*)malloc(sizeof(struct epoll_event) * (maxclients)); char buffer[ 2048 ]; while ( !stop ) { int fds = epoll_wait( epoll_fd, events, maxclients, 2000 ); for ( int i = 0; i < fds; i++ ) { int sockfd = events[i].data.fd; if ( events[i].events & EPOLLIN ) { if ( ! read_once( sockfd, buffer, 2048 ) ) { close_conn( epoll_fd, sockfd ); } struct epoll_event event; //event.events = EPOLLOUT | EPOLLET | EPOLLERR; event.events = EPOLLOUT | EPOLLERR; event.data.fd = sockfd; epoll_ctl( epoll_fd, EPOLL_CTL_MOD, sockfd, &event ); } else if( events[i].events & EPOLLOUT ) { if ( ! write_nbytes( sockfd, request, strlen( request ) ) ) { close_conn( epoll_fd, sockfd ); } struct epoll_event event; //event.events = EPOLLIN | EPOLLET | EPOLLERR; event.events = EPOLLIN | EPOLLERR; event.data.fd = sockfd; epoll_ctl( epoll_fd, EPOLL_CTL_MOD, sockfd, &event ); } else if( events[i].events & EPOLLERR ) { close_conn( epoll_fd, sockfd ); } } } close(epoll_fd); if (events) { free(events); } printf("exit!\n"); }
注意:addfd函数,初始化是event.events = EPOLLOUT | EPOLLERR;输出
输入终端命令,开始压力测试
./mystressclient 172.16.6.161 8011 20000 100
./mystressclient 192.168.83.128 1883 50000 2000
//172.16.6.161 表示服务器IP地址
//8011 表示服务器端口
//20000 表示建立20000个client
//100 表示每建立100个client就延时等待1s
---
友情链接:
我个人的Nginx-1.12.2实践:安装,编译与测试(★firecat推荐,针对TCP四层负载均衡★)
我个人的Haproxy-1.7.9实践:安装,编译与测试(★firecat推荐,针对TCP四层负载均衡★)
我个人的Linux TCP server和client测试源码,C语言(2)(★firecat推荐★)