接续接上篇 缓存时代来临 为蓝本,继续改造我们的百万级站点架构,这次我们
拿之前存储静态内容的 nfs 开刀,众所周知 nfs 的多台集群节点下可能由于多重
原因(磁盘io , 网络带宽, 并发场景),不适合做文件共享系统的基础结构.
互联网站点中,存在大量图片或其他静态内容,并且这些内容一般在1M之内,对于
海量小文件,我们将采用mogilefs分布式文件系统来完成.其中概念自行google.
# mogilefs分布式文件系统工作流程
架构已经愈发复杂,我们需要从新梳理一下.从下表中应该很容易看出第三弹的
任务,重新规划如下:
# 罗马数字表示在何阶段规划,(*)代表已废弃
slave1.king.com
172.16.43.1
DNS轮询 -> slave1.king.com , slave2.king.com(I)
haproxy七层代理流量分离 -> imgs , text , dynamic(I)
keepalived 为haproxy HA(I)
slave2.king.com
172.16.43.2
haproxy七层代理流量分离 -> imgs , text , dynamic(I)
keepalived 为haproxy HA(I)
slave3.king.com
172.16.43.3
nginx虚拟主机组 -> imgs1.king.com , imgs2.king.com
text1.king.com , text2.king.com
dynamic1.king.com(I)
php-fpm模块(I)
mysql数据库(I)
nfs /nfsshared(*) New!!
varnish imgs 缓存(II)
memcache session, key/value cache(II)
mogilefs tracker node(III) New!!
mogilefs store node(III) New!!
slave4.king.com
172.16.43.4
nginx虚拟主机 -> dynamic2.king.com(I)
php-fpm模块(I)
mysql-proxy(I)
mysql数据库(I)
varnish text 缓存(II)
memcache session, key/value cache(II)
mogilefs tracker node(III) New!!
mogilefs store node(III) New!!
# 新规划内容如下
操作过程
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
|
1. 安装配置阶段(slave3.king.com , slave4.king.com)
# 准备如下包,因为我们要安装tracker节点和store节点
MogileFS-Server-2.46-2.el6.noarch.rpm
MogileFS-Server-mogilefsd-2.46-2.el6.noarch.rpm
MogileFS-Server-mogstored-2.46-2.el6.noarch.rpm
MogileFS-Utils-2.19-1.el6.noarch.rpm
perl-MogileFS-Client-1.14-1.el6.noarch.rpm
perl-Net-Netmask-1.9015-8.el6.noarch.rpm
perl-Perlbal-1.78-1.el6.noarch.rpm
#
# 安装mogilefs以及创建store目录,改变权限
yum -y
install
*.rpm perl-IO-AIO -y
mkdir
/dfs/mogdata/dev1
-pv (slace4上建dev2)
chown
-R mogilefs.mogilefs
/var/run/mogilefsd
/dfs/mogdata
#
# mysql -e 授权 建库(仅slave3执行)
mysql -e
"create database mogilefs;"
mysql -e
"grant all on *.* to 'root'@'172.16.%.%' identified by '';"
mysql -e
"grant all on mogilefs.* 'moguser'@'172.16.%.%' identified by 'mogpass';"
mysql -e
"flush privileges;"
#
# 初始化mogilefs所需数据库(仅slave3执行)
mogdbsetup --dbhost=172.16.43.3 --dbport=3306 --dbname=mogilefs --dbuser=moguser --dbpass=mogpass --
yes
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
|
# 接 1. 安装配置阶段(slave3.king.com , slave4.king.com) 操作
# mogilefs配置文件
vim
/etc/mogilefs/mogilefsd
.conf
# Enable daemon mode to work in background and use syslog
daemonize = 1
# Where to store the pid of the daemon (must be the same in the init script)
pidfile =
/var/run/mogilefsd/mogilefsd
.pid
# Database connection information
db_dsn = DBI:mysql:mogilefs:host=172.16.43.3
db_user = moguser
db_pass = mogpass
# IP:PORT to listen on for mogilefs client requests
listen = 172.16.43.3:7001
# Optional, if you don't define the port above.
conf_port = 7001
# Number of query workers to start by default.
query_jobs = 10
# Number of delete workers to start by default.
delete_jobs = 1
# Number of replicate workers to start by default.
replicate_jobs = 5
# Number of reaper workers to start by default.
# (you don't usually need to increase this)#
reaper_jobs = 1
#
# store配置文件
vim
/etc/mogilefs/mogstored
.conf
maxconns = 10000
httplisten = 0.0.0.0:7500
mgmtlisten = 0.0.0.0:7501
docroot =
/dfs/mogdata
#
# 分别启动tracker以及store服务 (观察7001与7500,7501端口)
service mogilefsd start
service mogstored start
|
# 观察服务启动情况
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
|
2. 添加分布式文件系统(slave3.king.com)
# 添加分布式节点信息
mogadm --tracker=172.16.43.3:7001 host add 172.16.43.3 --ip=172.16.43.3 --status=alive
mogadm --tracker=172.16.43.4:7001 host add 172.16.43.4 --ip=172.16.43.4 --status=alive
mogadm --tracker=172.16.43.3:7001 host list
#
# 添加分布式设备节点信息
mogadm --tracker=172.16.43.3:7001 device add 172.16.43.3 1
mogadm --tracker=172.16.43.4:7001 device add 172.16.43.4 2
mogadm --tracker=172.16.43.3:7001 device list
#
# 添加分布式域节点信息
mogadm --tracker=172.16.43.3:7001 domain add images
mogadm --tracker=172.16.43.3:7001 domain add text
mogadm --tracker=172.16.43.3:7001 domain list
|
# mogilefs 所需 host , device , domain 添加准备就绪情况
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
|
3. ngxin反向代理解析文件
# 将第三方模块编译进nginx,先将之前的nginx服务停止,防止意外
service nginx stop
cd
nginx-1.4.7
.
/configure
--prefix=
/usr/local/nginx
--sbin-path=
/usr/local/nginx/nginx
--conf-path=
/etc/nginx/nginx
.conf --pid-path=
/usr/local/nginx/nginx
.pid --with-pcre --add-module=
/root/mogilefs/nginx-mogilefs-module-master
make
&&
make
install
#
nginx-mogilefs-module-master.zip
#
http {
upstream mogcluster {
server 172.16.43.3:7001;
server 172.16.43.4:7001;
}
#
# 之前的系统图片设置到images与imgs考虑程序一致性,这里
location
/images/
{
mogilefs_tracker mogcluster;
mogilefs_domain images;
mogilefs_methods GET PUT DELETE;
mogilefs_pass {
proxy_pass $mogilefs_path;
proxy_hide_header Content-Type;
proxy_buffering off;
}
}
#
location
/imgs/
{
mogilefs_tracker mogcluster;
mogilefs_domain images;
mogilefs_methods GET PUT DELETE;
mogilefs_pass {
proxy_pass $mogilefs_path;
proxy_hide_header Content-Type;
proxy_buffering off;
}
}
#
# 之前系统静态文件分为html,css,js等目录,所以这里需要目录定位
location ~* ^(/(static|css|js)/.*)$ {
mogilefs_tracker mogcluster;
mogilefs_domain text;
mogilefs_pass $1 {
proxy_pass $mogilefs_path;
proxy_hide_header Content-Type;
proxy_buffering off;
}
}
}
|
1
2
3
4
5
6
|
4. 以上一篇架构为例,添加测试文件
mogupload --trackers=172.16.43.3:7001 --domain=images --key=
'/2.jpg'
--
file
=
'./imgs/2.jpg'
mogupload --trackers=172.16.43.3:7001 --domain=images --key=
'/1.jpg'
--
file
=
'./images/1.jpg'
mogupload --trackers=172.16.43.3:7001 --domain=text --key=
'static/index.html'
--
file
=
'./static/index.html'
mogupload --trackers=172.16.43.3:7001 --domain=text --key=
'css/test.css'
--
file
=
'./css/test.css'
mogupload --trackers=172.16.43.3:7001 --domain=text --key=
'js/test.js'
--
file
=
'./js/test.js'
|
# 测试
service nginx restart
service varnish restart
使用浏览器隐身模式访问
最后附完整slave3.king.com中nginx的配置文件
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
|
#user nobody;
worker_processes 1;
#error_log logs/error.log;
#error_log logs/error.log notice;
#error_log logs/error.log info;
#pid logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application
/octet-stream
;
#log_format main '$remote_addr - $remote_user [$time_local] "$request" '
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for"';
#access_log logs/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 5;
gzip
on;
upstream mogcluster {
server 172.16.43.3:7001;
server 172.16.43.4:7001;
}
server {
listen 80;
server_name dynamic1.king.com;
access_log
/var/log/nginx/dynamic1
.access.log;
location ~ \.php$ {
root
/nfsshared/html
;
fastcgi_pass 172.16.43.3:9000;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
include fastcgi_params;
}
location / {
root
/nfsshared/html
;
index index.php index.html index.htm;
}
}
server {
listen 80;
server_name imgs1.king.com;
access_log
/var/log/nginx/imgs1
.access.log;
location
/images/
{
mogilefs_tracker mogcluster;
mogilefs_domain images;
mogilefs_methods GET PUT DELETE;
mogilefs_pass {
proxy_pass $mogilefs_path;
proxy_hide_header Content-Type;
proxy_buffering off;
}
}
location
/imgs/
{
mogilefs_tracker mogcluster;
mogilefs_domain images;
mogilefs_methods GET PUT DELETE;
mogilefs_pass {
proxy_pass $mogilefs_path;
proxy_hide_header Content-Type;
proxy_buffering off;
}
}
error_page 404
/404
.html;
error_page 500 502 503 504
/50x
.html;
location =
/50x
.html {
root html;
}
}
server {
listen 80;
server_name imgs2.king.com;
access_log
/var/log/nginx/imgs2
.access.log;
location
/images/
{
mogilefs_tracker mogcluster;
mogilefs_domain images;
mogilefs_methods GET PUT DELETE;
mogilefs_pass {
proxy_pass $mogilefs_path;
proxy_hide_header Content-Type;
proxy_buffering off;
}
}
location
/imgs/
{
mogilefs_tracker mogcluster;
mogilefs_domain images;
mogilefs_methods GET PUT DELETE;
mogilefs_pass {
proxy_pass $mogilefs_path;
proxy_hide_header Content-Type;
proxy_buffering off;
}
}
error_page 404
/404
.html;
error_page 500 502 503 504
/50x
.html;
location =
/50x
.html {
root html;
}
}
server {
listen 80;
server_name text1.king.com;
access_log
/var/log/nginx/text1
.access.log;
location ~* ^(/(static|css|js)/.*)$ {
mogilefs_tracker mogcluster;
mogilefs_domain text;
mogilefs_pass $1 {
proxy_pass $mogilefs_path;
proxy_hide_header Content-Type;
proxy_buffering off;
}
}
location / {
index index.html index.htm;
}
error_page 404
/404
.html;
error_page 500 502 503 504
/50x
.html;
location =
/50x
.html {
root html;
}
}
server {
listen 80;
server_name text2.king.com;
access_log
/var/log/nginx/text2
.access.log;
location ~* ^(/(static|css|js)/.*)$ {
mogilefs_tracker mogcluster;
mogilefs_domain text;
mogilefs_pass $1 {
proxy_pass $mogilefs_path;
proxy_hide_header Content-Type;
proxy_buffering off;
}
}
location / {
index index.html index.htm;
}
error_page 404
/404
.html;
error_page 500 502 503 504
/50x
.html;
location =
/50x
.html {
root html;
}
}
}
|
本文转自My_King1 51CTO博客,原文链接:http://blog.51cto.com/apprentice/1409340,如需转载请自行联系原作者