#$1为共几台机器,hadoop配置为分部
#$2为机器信息 ip +hostname 组合。如: 192.168.2.1 machine1 192.168.2.2 machine2 192.168.2.3 machine3 空格分隔
#下载配置文件
rm -rf /root/trans.zip
rm -rf /root/trans
wget -P /root http://fastmr.oss-cn-shenzhen.aliyuncs.com/bigdata/conf/trans.zip
unzip /root/trans.zip
chmod -R 755 /root/trans
#配置/etc/hosts
#输入字符串
input_str=$2
#将字符串分割成数组
input_arr=($input_str)
#数组长度
len=${#input_arr[@]}
#对每个IP地址和机器名进行处理
for (( i=0; i<$len; i+=2 ))
do
ip=${input_arr[$i]}
name=${input_arr[$i+1]}
#如果是第一组IP地址和机器名
if [[ $i == 0 ]]; then
echo "$ip master1" >>/etc/hosts
fi
#打印变量值
echo "$ip $name" >>/etc/hosts
done
#配置互信
cp /root/trans/config/trust/authorized_keys /root/.ssh/
cp /root/trans/config/trust/id_rsa.pub /root/.ssh/
cp /root/trans/config/trust/id_rsa /root/.ssh/
#分发hosts文件
while IFS= read -r ip || [[ -n "$ip" ]]; do
scp -r /etc/hosts $ip:/etc/
done < "/root/ip.list"
#安装java,下载安装包
os=`uname -m`
if [ $a=='x86_64' ]; then
yum install -y java-1.8.0-openjdk-devel.x86_64
wget -P /opt http://fastmr.oss-cn-shenzhen.aliyuncs.com/bigdata/x86/TPC.tar.gz
wget -P /opt http://fastmr.oss-cn-shenzhen.aliyuncs.com/bigdata/x86/apache-hive-2.3.7-bin.tar.gz
wget -P /opt http://fastmr.oss-cn-shenzhen.aliyuncs.com/bigdata/x86/collectd.tar.gz
wget -P /opt http://fastmr.oss-cn-shenzhen.aliyuncs.com/bigdata/x86/hadoop-3.3.1.tar.gz
wget -P /opt http://fastmr.oss-cn-shenzhen.aliyuncs.com/bigdata/x86/mysql-connector-java-8.0.26.jar
wget -P /opt http://fastmr.oss-cn-shenzhen.aliyuncs.com/bigdata/x86/spark-3.2.1-bin-hadoop3.2.tgz
wget -P /root http://fastmr.oss-cn-shenzhen.aliyuncs.com/bigdata/x86/tpcds-kit.tar.gz
cd /opt && tar -xf TPC.tar.gz
cd /opt && tar -xf apache-hive-2.3.7-bin.tar.gz
cd /opt && tar -xf collectd.tar.gz
cd /opt && tar -xf hadoop-3.3.1.tar.gz
cd /opt && tar -xf spark-3.2.1-bin-hadoop3.2.tgz
cd /root && tar -xf tpcds-kit.tar.gz
cd /root && wget https://gosspublic.alicdn.com/ossutil/1.7.9/ossutil64 --no-check-certificate && chmod 755 ossutil64
cp /root/trans/conf/oss/.ossutilconfig /root
cd /opt && rm -rf hadoop && ln -s hadoop-3.3.1 hadoop
rm -rf /opt/apache-hive-2.3.7-bin/conf/hive-site.xml
cp /root/trans/conf/hive/hive-site.xml /opt/apache-hive-2.3.7-bin/conf/
cp /root/trans/conf/hive/hive-site.xml /opt/spark-3.2.1-bin-hadoop3.2/conf
echo -e "export SPARK_HOME=/opt/spark-3.2.1-bin-hadoop3.2 \n" >>/etc/profile.d/env.sh
echo -e "export HIVE_HOME=/opt/apache-hive-2.3.7-bin \n" >>/etc/profile.d/env.sh
else
yum install -y java-1.8.0-openjdk-devel.aarch64
cd /root && rm -rf /root/ossutil-v1.7.15-linux-arm64.zip && wget https://gosspublic.alicdn.com/ossutil/1.7.15/ossutil-v1.7.15-linux-arm64.zip --no-check-certificate -P /root && yum install -y unzip && unzip /root/ossutil-v1.7.15-linux-arm64.zip && cp /root/ossutil-v1.7.15-linux-arm64/ossutil64 /root/ && chmod 755 /root/ossutil64
cp /root/trans/conf/oss/.ossutilconfig /root
wget -P /opt http://fastmr.oss-cn-shenzhen.aliyuncs.com/bigdata/arm/TPC.tar.gz
wget -P /opt http://fastmr.oss-cn-shenzhen.aliyuncs.com/bigdata/arm/apache-hive-3.1.2-bin.tar.gz
wget -P /opt http://fastmr.oss-cn-shenzhen.aliyuncs.com/bigdata/arm/collectd.tar.gz
wget -P /opt http://fastmr.oss-cn-shenzhen.aliyuncs.com/bigdata/arm/hadoop-3.3.1.tar.gz
wget -P /opt http://fastmr.oss-cn-shenzhen.aliyuncs.com/bigdata/arm/spark-3.1.2-bin-hadoop3.2.tgz
wget -P /root http://fastmr.oss-cn-shenzhen.aliyuncs.com/bigdata/arm/tpcds-kit.tar.gz
cd /opt && tar -xf TPC.tar.gz
cd /opt && tar -xf apache-hive-3.1.2-bin.tar.gz
cd /opt && tar -xf collectd.tar.gz
cd /opt && tar -xf hadoop-3.3.1.tar.gz
cd /opt && tar -xf spark-3.1.2-bin-hadoop3.2.tgz
cd /root && tar -xf tpcds-kit.tar.gz
cd /opt && rm -rf hadoop && ln -s hadoop-3.3.1 hadoop
rm -rf /opt/apache-hive-3.1.2-bin/conf/hive-site.xml
cp /root/trans/conf/hive/hive-site.xml /opt/apache-hive-3.1.2-bin/conf/
cp /root/trans/conf/hive/hive-site.xml /opt/spark-3.1.2-bin-hadoop3.2/conf
echo -e "export SPARK_HOME=/opt/spark-3.1.2-bin-hadoop3.2 \n" >>/etc/profile.d/env.sh
echo -e "export HIVE_HOME=/opt/apache-hive-3.1.2-bin \n" >>/etc/profile.d/env.sh
fi
echo -e "export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk \n" >>/etc/profile.d/env.sh
echo -e "export HADOOP_HOME=/opt/hadoop \n" >>/etc/profile.d/env.sh
echo -e "export HADOOP_CONF_DIR=/opt/hadoop/etc/hadoop \n " >>/etc/profile.d/env.sh
echo -e "export PATH=\$JAVA_HOME/bin:\$HADOOP_HOME/bin:\$SPARK_HOME/bin:\$HIVE_HOME/bin \n" >>/etc/profile.d/env.sh
source /etc/profile.d/env.sh
#挂载磁盘
disk=`lsblk -d -o rota |tail -1`
num=`lsblk|wc -l`
data_disk_num=`expr $num - 2`
if [ $disk=='1' ]; then
/root/trans/config/system/mkfs-ad.sh $data_disk_num
else
/root/trans/config/system/mkfs-nvme.sh $data_disk_num
fi
#配置hadoop
#配置worker
rm -rf /opt/hadoop/etc/hadoop/
wget -P /opt/hadoop/etc/ http://fastmr.oss-cn-shenzhen.aliyuncs.com/bigdata/x86/had_conf.zip
cd /opt/hadoop/etc/ && unzip had_conf.zip
master=`hostname`
worker_num=`expr $1 - 1`
for i in {1..$worker_num}
do
echo $master |sed 's/.$/$i/' >>/opt/hadoop/etc/hadoop/worker
done
#配置hdfs-site.xml
for (( i=1; i<$worker_num; i++ ))
do
echo "/mnt/disk$i/data/hadoop," >> hdfs-site
done
cat hdfs-site| tr -d '\n'|sed 's/.$//' > hdfs-site_bak
sed -i 's/^/<value>/' hdfs-site_bak
echo "</value>" >> hdfs-site_bak
bak=`cat hdfs-site_bak`
sed "20s|.*|$bak|g" /opt/hadoop/etc/hadoop/hdfs-site.xml -i
#配置yarn-site.xml
for (( i=1; i<$worker_num; i++ ))
do
echo "/mnt/disk$i/data/nmlocaldir," >> yarn-site
done
cat yarn-site| tr -d '\n'|sed 's/.$//' > yarn-site_bak
sed -i 's/^/<value>/' yarn-site_bak
echo "</value>" >> yarn-site_bak
bak=`cat yarn-site_bak`
sed "60s|.*|$bak|g" /opt/hadoop/etc/hadoop/yarn-site.xml -i
# 将hadoop文件复制到worker节点
while IFS= read -r ip || [[ -n "$ip" ]]; do
scp -r /opt/hadoop-3.3.1 $ip:/opt/
done < "/root/ip.list"
hadoop namenode -format
/opt/hadoop/sbin/start-all.sh
hadoop fs -mkdir /sparklogs
#配置spark
cd /opt/spark-3.2.1-bin-hadoop3.2/conf && cp spark-defaults.conf.template spark-defaults.conf
echo -e "spark.eventLog.enabled true \n " >> /opt/spark-3.2.1-bin-hadoop3.2/conf/spark-defaults.conf
echo -e "spark.eventLog.dir hdfs://master1:9000/sparklogs \n " >> /opt/spark-3.2.1-bin-hadoop3.2/conf/spark-defaults.conf
echo -e "spark.history.fs.logDirectory hdfs://master1:9000/sparklogs \n " >> /opt/spark-3.2.1-bin-hadoop3.2/conf/spark-defaults.conf
/opt/spark-3.2.1-bin-hadoop3.2/sbin/start-history-server.sh
#配置mysql
if [ $a=='x86_64' ]; then
yum install -y mysql-server.x86_64
else
yum install -y mysql-server.aarch64
fi
mysql_v=`mysql --version|awk '{print $3}'`
if [ -e $HIVE_HOME/lib/mysql-connector-java-$mysql_v.jar ];then
echo "mysql_connect_jar exists"
else
wget -P /root https://downloads.mysql.com/archives/get/p/3/file/mysql-connector-java-$mysql_v.tar.gz
cd /root && tar -zxvf /root/mysql-connector-java-$mysql_v.tar.gz
\cp -r /root/mysql-connector-java-$mysql_v/mysql-connector-java-$mysql_v.jar $HIVE_HOME/lib/
\cp -r /root/mysql-connector-java-$mysql_v/mysql-connector-java-$mysql_v.jar $SPARK_HOME/jars/
fi
service mysqld restart
mysql -uroot -D mysql -e "create user 'hive'@'localhost' identified by '123456';"
mysql -uroot -D mysql -e "grant all privileges on *.* to 'hive'@'localhost';"
mysql -uroot -D mysql -e "create user 'hive'@'%' identified by '123456';"
mysql -uroot -D mysql -e "grant all privileges on *.* to 'hive'@'%';"
mysql -uroot -D mysql -e "flush privileges;"
mysql -uroot -D mysql -e "alter user user() identified by '123456';"
service mysqld restart
#配置hive
mv /opt/apache-hive-2.3.7-bin/lib/guava-14.0.1.jar /opt/apache-hive-2.3.7-bin/lib/guava-14.0.1.jar_bak
cp /opt/hadoop/share/hadoop/common/lib/guava-27.0-jre.jar /opt/apache-hive-2.3.7-bin/lib/
rm -rf /opt/apache-hive-2.3.7-bin/conf/hive-site.xml
wget -P /opt/apache-hive-2.3.7-bin/conf/ http://fastmr.oss-cn-shenzhen.aliyuncs.com/bigdata/x86/hive-site.xml
schematool -dbType mysql -initSchema
#配置collectd
if [ $a=='x86_64' ]; then
cd /opt/collectd/lib64/jevents && make install
\cp /opt/collectd/lib64/libjson-c.so.2 /lib64/
\cp /opt/collectd/lib64/libpqos.so.4 /lib64/
else
yum install -y git cmake libarchive valgrind
wget http://fastmr.oss-cn-shenzhen.aliyuncs.com/bigdata/conf/json-c.tar.gz -P /root
tar -xf /root/json-c.tar.gz -C /root && mkdir -p /root/json-c-build
rm -rf /root/json-c-build && mkdir -p /root/json-c-build
cd /root/json-c-build && cmake /root/json-c
cd /root/json-c-build && make
cd /root/json-c-build && make install
cd /opt/collectd/ && sh sbin/start_collectd.sh
fi