linux服务器实现百万并发遇到的问题以及解决思路

简介: 前言

前言

在完成百万并发服务器的时候调试了大概五天,期间总会出现莫名其妙的问题导致连接断开,所以本文就这些问题与如何解决这些问题做一个总结。
本次实验完成的百万并发指的是单纯的连接数量,中间只是增加了必要的打印信息而已,并未增加具体的业务信息。

服务器与客户端的配置介绍

server(1台):ubuntu20.04 8G8核

client(3台):ubuntu16.04 4G2核

server 代码

#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/epoll.h>
#include <sys/poll.h>
#include <errno.h>
#include <netinet/tcp.h>
#include <arpa/inet.h>
#include <netdb.h>
#include <fcntl.h>
#include <pthread.h>
#define SERVER_PORT   8080
#define SERVER_IP   "127.0.0.1"
#define MAX_BUFFER    64
#define MAX_EPOLLSIZE 100000
#define MAX_THREAD    80
#define MAX_PORT    100
#define CPU_CORES_SIZE  8
#define TIME_SUB_MS(tv1, tv2)  ((tv1.tv_sec - tv2.tv_sec) * 1000 + (tv1.tv_usec - tv2.tv_usec) / 1000)
static int ntySetNonblock(int fd) {
  int flags;
  flags = fcntl(fd, F_GETFL, 0);
  if (flags < 0) return flags;
  flags |= O_NONBLOCK;
  if (fcntl(fd, F_SETFL, flags) < 0) return -1;
  return 0;
}
static int ntySetReUseAddr(int fd) {
  int reuse = 1;
  return setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *)&reuse, sizeof(reuse));
}
static int ntySetAlive(int fd) {
  int alive = 1;
  int idle = 60;
  int interval = 5;
  int count = 2;
  setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (void*)&alive, sizeof(alive));
  setsockopt(fd, SOL_TCP, TCP_KEEPIDLE, (void*)&idle, sizeof(idle));
  setsockopt(fd, SOL_TCP, TCP_KEEPINTVL, (void*)&interval, sizeof(interval));
  setsockopt(fd, SOL_TCP, TCP_KEEPCNT, (void*)&count, sizeof(count));
}
/** **** ******** **************** thread pool **************** ******** **** **/
#define LL_ADD(item, list) { \
  item->prev = NULL; \
  item->next = list; \
  list = item; \
}
#define LL_REMOVE(item, list) { \
  if (item->prev != NULL) item->prev->next = item->next; \
  if (item->next != NULL) item->next->prev = item->prev; \
  if (list == item) list = item->next; \
  item->prev = item->next = NULL; \
}
typedef struct worker { 
  pthread_t thread; 
  int terminate;  
  struct workqueue *workqueue;  
  struct worker *prev;  
  struct worker *next;
} worker_t;
typedef struct job {  
  void (*job_function)(struct job *job);  
  void *user_data;  
  struct job *prev; 
  struct job *next;
} job_t;
typedef struct workqueue {  
  struct worker *workers; 
  struct job *waiting_jobs; 
  pthread_mutex_t jobs_mutex; 
  pthread_cond_t jobs_cond;
} workqueue_t;
static void *worker_function(void *ptr) { 
  worker_t *worker = (worker_t *)ptr; 
  job_t *job; 
  while (1) {     
    pthread_mutex_lock(&worker->workqueue->jobs_mutex);   
    while (worker->workqueue->waiting_jobs == NULL) {     
      if (worker->terminate) break;     
      pthread_cond_wait(&worker->workqueue->jobs_cond, &worker->workqueue->jobs_mutex);   
    }     
    if (worker->terminate) break;   
    job = worker->workqueue->waiting_jobs;    
    if (job != NULL) {      
      LL_REMOVE(job, worker->workqueue->waiting_jobs);    
    }   
    pthread_mutex_unlock(&worker->workqueue->jobs_mutex);   
    if (job == NULL) continue;  
    /* Execute the job. */    
    job->job_function(job); 
  } 
  free(worker); 
  pthread_exit(NULL);
}
int workqueue_init(workqueue_t *workqueue, int numWorkers) {  
  int i;  
  worker_t *worker; 
  pthread_cond_t blank_cond = PTHREAD_COND_INITIALIZER; 
  pthread_mutex_t blank_mutex = PTHREAD_MUTEX_INITIALIZER;  
  if (numWorkers < 1) numWorkers = 1; 
  memset(workqueue, 0, sizeof(*workqueue)); 
  memcpy(&workqueue->jobs_mutex, &blank_mutex, sizeof(workqueue->jobs_mutex));  
  memcpy(&workqueue->jobs_cond, &blank_cond, sizeof(workqueue->jobs_cond)); 
  for (i = 0; i < numWorkers; i++) {    
    if ((worker = malloc(sizeof(worker_t))) == NULL) {      
      perror("Failed to allocate all workers");     
      return 1;   
    }   
    memset(worker, 0, sizeof(*worker));   
    worker->workqueue = workqueue;    
    if (pthread_create(&worker->thread, NULL, worker_function, (void *)worker)) {     
      perror("Failed to start all worker threads");     
      free(worker);     
      return 1;   
    }   
    LL_ADD(worker, worker->workqueue->workers); 
  } 
  return 0;
}
void workqueue_shutdown(workqueue_t *workqueue) { 
  worker_t *worker = NULL;    
  for (worker = workqueue->workers; worker != NULL; worker = worker->next) {    
    worker->terminate = 1;  
  } 
  pthread_mutex_lock(&workqueue->jobs_mutex); 
  workqueue->workers = NULL;  
  workqueue->waiting_jobs = NULL; 
  pthread_cond_broadcast(&workqueue->jobs_cond);  
  pthread_mutex_unlock(&workqueue->jobs_mutex);
}
void workqueue_add_job(workqueue_t *workqueue, job_t *job) {  
  pthread_mutex_lock(&workqueue->jobs_mutex); 
  LL_ADD(job, workqueue->waiting_jobs); 
  pthread_cond_signal(&workqueue->jobs_cond); 
  pthread_mutex_unlock(&workqueue->jobs_mutex);
}
static workqueue_t workqueue;
void threadpool_init(void) {
  workqueue_init(&workqueue, MAX_THREAD);
}
/** **** ******** **************** thread pool **************** ******** **** **/
typedef struct client {
  int fd;
  char rBuffer[MAX_BUFFER];
  int length;
} client_t;
void *client_cb(void *arg) {
  int clientfd = *(int *)arg;
  char buffer[MAX_BUFFER] = {0};
  int childpid = getpid();
  while (1) {
    bzero(buffer, MAX_BUFFER);
    ssize_t length = recv(clientfd, buffer, MAX_BUFFER, 0); //bio
    if (length > 0) {
      //printf(" PID:%d --> buffer: %s\n", childpid, buffer);
      int sLen = send(clientfd, buffer, length, 0);
      //printf(" PID:%d --> sLen: %d\n", childpid, sLen);
    } else if (length == 0) {
      printf(" PID:%d client disconnect\n", childpid);
      break;
    } else {
      printf(" PID:%d errno:%d\n", childpid, errno);
      break;
    }
  }
}
static int nRecv(int sockfd, void *data, size_t length, int *count) {
  int left_bytes;
  int read_bytes;
  int res;
  int ret_code;
  unsigned char *p;
  struct pollfd pollfds;
  pollfds.fd = sockfd;
  pollfds.events = ( POLLIN | POLLERR | POLLHUP );
  read_bytes = 0;
  ret_code = 0;
  p = (unsigned char *)data;
  left_bytes = length;
  while (left_bytes > 0) {
    read_bytes = recv(sockfd, p, left_bytes, 0);
    if (read_bytes > 0) {
      left_bytes -= read_bytes;
      p += read_bytes;
      continue;
    } else if (read_bytes < 0) {
      if (!(errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)) {
        ret_code = (errno != 0 ? errno : EINTR);
      }
    } else {
      ret_code = ENOTCONN;
      break;
    }
    res = poll(&pollfds, 1, 5);
    if (pollfds.revents & POLLHUP) {
      ret_code = ENOTCONN;
      break;
    }
    if (res < 0) {
      if (errno == EINTR) {
        continue;
      }
      ret_code = (errno != 0 ? errno : EINTR);
    } else if (res == 0) {
      ret_code = ETIMEDOUT;
      break;
    }
  }
  if (count != NULL) {
    *count = length - left_bytes;
  }
  //printf("nRecv:%s, ret_code:%d, count:%d\n", (char*)data, ret_code, *count);
  return ret_code;
}
void epoll_et_loop(int sockfd)
{
    char buffer[MAX_BUFFER];
    int ret;
    while (1)
    {
        memset(buffer, 0, MAX_BUFFER);
        ret = recv(sockfd, buffer, MAX_BUFFER, 0);
        if (ret == -1)
        {
            if (errno == EAGAIN || errno == EWOULDBLOCK)
            {
                printf(" read all data\n");
                break;
            }
            close(sockfd);
            break;
        }
        else if (ret == 0)
        {
            printf(" disconnect\n");
            close(sockfd);
            break;
        }
        else
            printf("Recv:%s, %d Bytes\n", buffer, ret);
    }
}
static int nSend(int sockfd, const void *buffer, int length, int flags) {
  int wrotelen = 0;
  int writeret = 0;
  unsigned char *p = (unsigned char *)buffer;
  struct pollfd pollfds = {0};
  pollfds.fd = sockfd;
  pollfds.events = ( POLLOUT | POLLERR | POLLHUP );
  do {
    int result = poll( &pollfds, 1, 5);
    if (pollfds.revents & POLLHUP) {
      printf(" ntySend errno:%d, revent:%x\n", errno, pollfds.revents);
      return -1;
    }
    if (result < 0) {
      if (errno == EINTR) continue;
      printf(" ntySend errno:%d, result:%d\n", errno, result);
      return -1;
    } else if (result == 0) {
      printf(" ntySend errno:%d, socket timeout \n", errno);
      return -1;
    }
    writeret = send( sockfd, p + wrotelen, length - wrotelen, flags );
    if( writeret <= 0 )
    {
      break;
    }
    wrotelen += writeret ;
  } while (wrotelen < length);
  return wrotelen;
}
static int curfds = 1;
static int nRun = 0;
void client_job(job_t *job) {
  client_t *rClient = (client_t*)job->user_data;
  int clientfd = rClient->fd;
  char buffer[MAX_BUFFER];
  bzero(buffer, MAX_BUFFER);
  int length = 0;
  int ret = nRecv(clientfd, buffer, MAX_BUFFER, &length);
  if (length > 0) { 
    if (nRun || buffer[0] == 'a') {   
      printf(" TcpRecv --> curfds : %d, buffer: %s\n", curfds, buffer);
      nSend(clientfd, buffer, strlen(buffer), 0);
    }
  } else if (ret == ENOTCONN) {
    curfds --;
    close(clientfd);
  } else {
  }
  free(rClient);
  free(job);
}
void client_data_process(int clientfd) {
  char buffer[MAX_BUFFER];
  bzero(buffer, MAX_BUFFER);
  int length = 0;
  int ret = nRecv(clientfd, buffer, MAX_BUFFER, &length);
  if (length > 0) { 
    if (nRun || buffer[0] == 'a') {   
      printf(" TcpRecv --> curfds : %d, buffer: %s\n", curfds, buffer);
      nSend(clientfd, buffer, strlen(buffer), 0);
    }
  } else if (ret == ENOTCONN) {
    curfds --;
    close(clientfd);
  } else {
  }
}
int listenfd(int fd, int *fds) {
  int i = 0;
  for (i = 0;i < MAX_PORT;i ++) {
    if (fd == *(fds+i)) return *(fds+i);
  }
  return 0;
}
int main(void) {
  int i = 0;
  int sockfds[MAX_PORT] = {0};
  printf("C1000K Server Start\n");
  threadpool_init(); //
  int epoll_fd = epoll_create(MAX_EPOLLSIZE); 
  for (i = 0;i < MAX_PORT;i ++) {
    int sockfd = socket(AF_INET, SOCK_STREAM, 0);
    if (sockfd < 0) {
      perror("socket");
      return 1;
    }
    struct sockaddr_in addr;
    memset(&addr, 0, sizeof(struct sockaddr_in));
    addr.sin_family = AF_INET;
    addr.sin_port = htons(SERVER_PORT+i);
    addr.sin_addr.s_addr = INADDR_ANY;
    if (bind(sockfd, (struct sockaddr*)&addr, sizeof(struct sockaddr_in)) < 0) {
      perror("bind");
      return 2;
    }
    if (listen(sockfd, 5) < 0) {
      perror("listen");
      return 3;
    }
    sockfds[i] = sockfd;
    printf("C1000K Server Listen on Port:%d\n", SERVER_PORT+i);
    struct epoll_event ev;
    ev.events = EPOLLIN | EPOLLET; //EPOLLLT
    ev.data.fd = sockfd;
    epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sockfd, &ev);  
  }
  struct timeval tv_begin;
  gettimeofday(&tv_begin, NULL);
  struct epoll_event events[MAX_EPOLLSIZE];
  while (1) {
    int nfds = epoll_wait(epoll_fd, events, curfds, 5);  //是不是秘书给累死。
    if (nfds == -1) {
      perror("epoll_wait");
      break;
    }
    for (i = 0;i < nfds;i ++) {
      int sockfd = listenfd(events[i].data.fd, sockfds);
      if (sockfd) {
        struct sockaddr_in client_addr;
        memset(&client_addr, 0, sizeof(struct sockaddr_in));
        socklen_t client_len = sizeof(client_addr);
        int clientfd = accept(sockfd, (struct sockaddr*)&client_addr, &client_len);
        if (clientfd < 0) {
          perror("accept");
          return 4;
        }
        if (curfds ++ > 1000 * 1000) {
          nRun = 1;
        }
#if 0
        printf(" Client %d: %d.%d.%d.%d:%d \n", curfds, *(unsigned char*)(&client_addr.sin_addr.s_addr), *((unsigned char*)(&client_addr.sin_addr.s_addr)+1),                         
              *((unsigned char*)(&client_addr.sin_addr.s_addr)+2), *((unsigned char*)(&client_addr.sin_addr.s_addr)+3),                         
              client_addr.sin_port);
#elif 0
        if(curfds % 1000 == 999) {  
          printf("connections: %d, fd: %d\n", curfds, clientfd);      
        }
#else
        if (curfds % 1000 == 999) {
          struct timeval tv_cur;
          memcpy(&tv_cur, &tv_begin, sizeof(struct timeval));
          gettimeofday(&tv_begin, NULL);
          int time_used = TIME_SUB_MS(tv_begin, tv_cur);
          printf("connections: %d, sockfd:%d, time_used:%d\n", curfds, clientfd, time_used);
        }
#endif
        ntySetNonblock(clientfd);
        ntySetReUseAddr(clientfd);
        struct epoll_event ev;
        ev.events = EPOLLIN | EPOLLET | EPOLLOUT;
        ev.data.fd = clientfd;
        epoll_ctl(epoll_fd, EPOLL_CTL_ADD, clientfd, &ev);
      } else {
        int clientfd = events[i].data.fd;
#if 1
        if (nRun) {
          printf(" New Data is Comming\n");
          client_data_process(clientfd);
        } else {
          client_t *rClient = (client_t*)malloc(sizeof(client_t));
          memset(rClient, 0, sizeof(client_t));       
          rClient->fd = clientfd;
          job_t *job = malloc(sizeof(job_t));
          job->job_function = client_job;
          job->user_data = rClient;
          workqueue_add_job(&workqueue, job);
        }
#else
        client_data_process(clientfd);
#endif
      }
    }
  }
}

client 代码

#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/epoll.h>
#include <errno.h>
#include <netinet/tcp.h>
#include <arpa/inet.h>
#include <netdb.h>
#include <fcntl.h>
#define MAX_BUFFER    128
#define MAX_EPOLLSIZE (384*1024)
#define MAX_PORT    100
#define TIME_SUB_MS(tv1, tv2)  ((tv1.tv_sec - tv2.tv_sec) * 1000 + (tv1.tv_usec - tv2.tv_usec) / 1000)
int isContinue = 0;
static int ntySetNonblock(int fd) {
  int flags;
  flags = fcntl(fd, F_GETFL, 0);
  if (flags < 0) return flags;
  flags |= O_NONBLOCK;
  if (fcntl(fd, F_SETFL, flags) < 0) return -1;
  return 0;
}
static int ntySetReUseAddr(int fd) {
  int reuse = 1;
  return setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *)&reuse, sizeof(reuse));
}
int main(int argc, char **argv) {
  if (argc <= 2) {
    printf("Usage: %s ip port\n", argv[0]);
    exit(0);
  }
  const char *ip = argv[1];
  int port = atoi(argv[2]);
  int connections = 0;
  char buffer[128] = {0};
  int i = 0, index = 0;
  struct epoll_event events[MAX_EPOLLSIZE];
  int epoll_fd = epoll_create(MAX_EPOLLSIZE);
  strcpy(buffer, " Data From MulClient\n");
  struct sockaddr_in addr;
  memset(&addr, 0, sizeof(struct sockaddr_in));
  addr.sin_family = AF_INET;
  addr.sin_addr.s_addr = inet_addr(ip);
  struct timeval tv_begin;
  gettimeofday(&tv_begin, NULL);
  while (1) {
    if (++index >= MAX_PORT) index = 0;
    struct epoll_event ev;
    int sockfd = 0;
    if (connections < 380000 && !isContinue) {
      sockfd = socket(AF_INET, SOCK_STREAM, 0);
      if (sockfd == -1) {
        perror("socket");
        goto err;
      }
      //ntySetReUseAddr(sockfd);
      addr.sin_port = htons(port+index);
      if (connect(sockfd, (struct sockaddr*)&addr, sizeof(struct sockaddr_in)) < 0) {
        perror("connect");
        goto err;
      }
      ntySetNonblock(sockfd);
      ntySetReUseAddr(sockfd);
      sprintf(buffer, "Hello Server: client --> %d\n", connections);
      send(sockfd, buffer, strlen(buffer), 0);
      ev.data.fd = sockfd;
      ev.events = EPOLLIN | EPOLLOUT;
      epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sockfd, &ev);
      connections ++;
    }
    //connections ++;
    if (connections % 1000 == 999 || connections >= 380000) {
      struct timeval tv_cur;
      memcpy(&tv_cur, &tv_begin, sizeof(struct timeval));
      gettimeofday(&tv_begin, NULL);
      int time_used = TIME_SUB_MS(tv_begin, tv_cur);
      printf("connections: %d, sockfd:%d, time_used:%d\n", connections, sockfd, time_used);
      int nfds = epoll_wait(epoll_fd, events, connections, 100);
      for (i = 0;i < nfds;i ++) {
        int clientfd = events[i].data.fd;
        if (events[i].events & EPOLLOUT) {
          sprintf(buffer, "data from %d\n", clientfd);
          send(sockfd, buffer, strlen(buffer), 0);
        } else if (events[i].events & EPOLLIN) {
          char rBuffer[MAX_BUFFER] = {0};       
          ssize_t length = recv(sockfd, rBuffer, MAX_BUFFER, 0);
          if (length > 0) {
            printf(" RecvBuffer:%s\n", rBuffer);
            if (!strcmp(rBuffer, "quit")) {
              isContinue = 0;
            }
          } else if (length == 0) {
            printf(" Disconnect clientfd:%d\n", clientfd);
            connections --;
            close(clientfd);
          } else {
            if (errno == EINTR) continue;
            printf(" Error clientfd:%d, errno:%d\n", clientfd, errno);
            close(clientfd);
          }
        } else {
          printf(" clientfd:%d, errno:%d\n", clientfd, errno);
          close(clientfd);
        }
      }
    }
    usleep(1 * 1000);
  }
  return 0;
err:
  printf("error : %s\n", strerror(errno));
  return 0;
}

遇到的问题

error too many open files

e6a9920f5cf5400dbce358664aaedeec.png

为什么会出现这个问题

因为是文件系统默认允许打开文件描述符数量个数(默认1024)的限制,使用ulimit -a查看open files的数量

ulimit -a

e7b4f29b47234823afd94e21f72ed564.png

查看一个进程能够打开的文件描述符数量: ulimit -a

 ulimit -a  
 # 可以看到 open files 是1024,所以我们进行调大到 1048576

7552c164ec6344c09406dd8164e785d6.png

解决这个问题的思路

修改对应的可以打开的文件描述符的数量即可。

1.临时修改:ulimit -n 1048576

2.永久修改:按照下面的操作即可

vim /etc/security/limits.conf
# 在该配置文件的最后添加如下内容即可
*               soft    nofile          1048576
*               hard    nofile          1048576
# esc+:wq保存文件
#重启机器
reboot


killed(已杀死)

此时已经很接近了!!还是出现问题。

image.png

为什么会出现这个问题

此时是因为内存不够了。

解决这个问题的思路

修改net.ipv4.tcp_mem,net.ipv4.tcp_wmem,net.ipv4.tcp_rmem的大小即可。

server:

vim  vim /etc/sysctl.conf
#移动到配置文件最末位的位置
net.ipv4.tcp_mem = 252144 524288 786432
net.ipv4.tcp_wmem = 512 512 1024
net.ipv4.tcp_rmem = 512 512 1024
fs.file-max = 1048576
net.nf_conntrack_max = 1048576
#esc+ :wq保存
#执行如下操作:
sudo modprobe ip_conntrack
sysctl -p


bc64e64b56e14780bbc76309ed28883c.png

client:

vim  vim /etc/sysctl.conf
#移动到配置文件最末位的位置
net.ipv4.tcp_mem = 252144 524288 786432
net.ipv4.tcp_wmem = 1024 1024 2048
net.ipv4.tcp_rmem = 1024 1024 2048
fs.file-max = 1048576
net.nf_conntrack_max = 1048576
#esc+ :wq保存
#执行如下操作:
sudo modprobe ip_conntrack
sysctl -p

70ca699d85fb4e5cb0e7352db134b631.png

最终结果

将以上配置好之后再次执行程序即可得到如下结果。

3987ae645efa4b079f1c92755e615885.png

image.png

学到的经验教训

理论与实践有很大的差别,不要眼高手低,要多多调试,真真正正的将程序敲出来。

文章参考与<零声教育>的C/C++linux服务期高级架构系统教程学习: 链接

目录
相关文章
W9
|
29天前
|
运维 关系型数据库 MySQL
轻松管理Linux服务器的5个优秀管理面板
Websoft9 应用管理平台,github 2k star 开源软件,既有200+的优秀开源软件商店,一键安装。又有可视化的Linux管理面板,文件、数据库、ssl证书方便快捷管理。
W9
85 1
|
1月前
|
缓存 Ubuntu Linux
Linux环境下测试服务器的DDR5内存性能
通过使用 `memtester`和 `sysbench`等工具,可以有效地测试Linux环境下服务器的DDR5内存性能。这些工具不仅可以评估内存的读写速度,还可以检测内存中的潜在问题,帮助确保系统的稳定性和性能。通过合理配置和使用这些工具,系统管理员可以深入了解服务器内存的性能状况,为系统优化提供数据支持。
38 4
|
1月前
|
运维 监控 安全
盘点Linux服务器运维管理面板
随着云计算和大数据技术的迅猛发展,Linux服务器在运维管理中扮演着越来越重要的角色。传统的Linux服务器管理方式已经无法满足现代企业的需求,因此,高效、安全、易用的运维管理面板应运而生。
|
1月前
|
运维 监控 Linux
服务器管理面板大盘点: 8款开源面板助你轻松管理Linux服务器
在数字化时代,服务器作为数据存储和计算的核心设备,其管理效率与安全性直接关系到业务的稳定性和可持续发展。随着技术的不断进步,开源社区涌现出众多服务器管理面板,这些工具以其强大的功能、灵活的配置和友好的用户界面,极大地简化了Linux服务器的管理工作。本文将详细介绍8款开源的服务器管理面板,包括Websoft9、宝塔、cPanel、1Panel等,旨在帮助运维人员更好地选择和使用这些工具,提升服务器管理效率。
|
20天前
|
存储 Oracle 安全
服务器数据恢复—LINUX系统删除/格式化的数据恢复流程
Linux操作系统是世界上流行的操作系统之一,被广泛用于服务器、个人电脑、移动设备和嵌入式系统。Linux系统下数据被误删除或者误格式化的问题非常普遍。下面北亚企安数据恢复工程师简单聊一下基于linux的文件系统(EXT2/EXT3/EXT4/Reiserfs/Xfs) 下删除或者格式化的数据恢复流程和可行性。
|
1月前
|
安全 Linux API
Linux服务器安全
人们常误认为服务器因存于数据中心且数据持续使用而无需加密。然而,当驱动器需维修或处理时,加密显得尤为重要,以防止数据泄露。Linux虽有dm-crypt和LUKS等内置加密技术,但在集中管理、根卷加密及合规性等方面仍存不足。企业应选择具备强大验证、简单加密擦除及集中管理等功能的解决方案,以弥补这些缺口。
26 0
|
2天前
|
人工智能 JSON Linux
利用阿里云GPU加速服务器实现pdf转换为markdown格式
随着AI模型的发展,GPU需求日益增长,尤其是个人学习和研究。直接购置硬件成本高且更新快,建议选择阿里云等提供的GPU加速型服务器。
利用阿里云GPU加速服务器实现pdf转换为markdown格式
|
1天前
|
开发框架 缓存 .NET
阿里云轻量应用服务器、经济型e、通用算力型u1实例怎么选?区别及选择参考
在阿里云目前的活动中,价格比较优惠的云服务器有轻量应用服务器2核2G3M带宽68元1年,经济型e实例2核2G3M带宽99元1年,通用算力型u1实例2核4G5M带宽199元1年,这几个云服务器是用户关注度最高的。有的新手用户由于是初次使用阿里云服务器,对于轻量应用服务器、经济型e、通用算力型u1实例的相关性能并不是很清楚,本文为大家做个简单的介绍和对比,以供参考。
|
9天前
|
弹性计算 运维 安全
阿里云轻量应用服务器与ECS的区别及选择指南
轻量应用服务器和云服务器ECS(Elastic Compute Service)是两款颇受欢迎的产品。本文将对这两者进行详细的对比,帮助用户更好地理解它们之间的区别,并根据自身需求做出明智的选择。
|
10天前
|
SQL 弹性计算 安全
阿里云上云优选与飞天加速计划活动区别及购买云服务器后续必做功课参考
对于很多用户来说,购买云服务器通常都是通过阿里云当下的各种活动来购买,这就有必要了解这些活动的区别,同时由于活动内的云服务器购买之后还需要单独购买并挂载数据盘,还需要设置远程密码以及安全组等操作之后才能正常使用云服务器。本文就为大家介绍一下目前比较热门的上云优选与飞天加速计划两个活动的区别,以及通过活动来购买云服务器之后的一些必做功课,确保云服务器可以正常使用,以供参考。