可执行脚本
#!/bin/bash
# Define the software versions and installation directory
jdk_version="jdk-11.0.12"
mysql_version="mysql-8.0.26"
hadoop_version="hadoop-3.3.5"
hive_version="hive-3.1.2"
sqoop_version="sqoop-1.4.7"
zookeeper_version="zookeeper-3.7.0"
install_dir="/opt"
# Function to install JDK
install_jdk() {
wget https://example.com/$jdk_version.tar.gz
tar -xzvf $jdk_version.tar.gz
mv $jdk_version $install_dir
ln -s $install_dir/$jdk_version /usr/local/$jdk_version
}
# Function to install MySQL
install_mysql() {
wget https://example.com/$mysql_version.tar.gz
tar -xzvf $mysql_version.tar.gz
mv $mysql_version $install_dir
ln -s $install_dir/$mysql_version /usr/local/$mysql_version
}
# Function to install Hadoop
install_hadoop() {
wget https://example.com/$hadoop_version.tar.gz
tar -xzvf $hadoop_version.tar.gz
mv $hadoop_version $install_dir
ln -s $install_dir/$hadoop_version /usr/local/$hadoop_version
}
# Function to install Hive
install_hive() {
wget https://example.com/$hive_version.tar.gz
tar -xzvf $hive_version.tar.gz
mv $hive_version $install_dir
ln -s $install_dir/$hive_version /usr/local/$hive_version
}
# Function to install Sqoop
install_sqoop() {
wget https://example.com/$sqoop_version.tar.gz
tar -xzvf $sqoop_version.tar.gz
mv $sqoop_version $install_dir
ln -s $install_dir/$sqoop_version /usr/local/$sqoop_version
}
# Function to install Zookeeper
install_zookeeper() {
wget https://example.com/$zookeeper_version.tar.gz
tar -xzvf $zookeeper_version.tar.gz
mv $zookeeper_version $install_dir
ln -s $install_dir/$zookeeper_version /usr/local/$zookeeper_version
}
# Call the installation functions
install_jdk
install_mysql
install_hadoop
install_hive
install_sqoop
install_zookeeper
# Configuration steps
cd /usr/local/$hadoop_version/etc/hadoop
cp mapred-site.xml.template mapred-site.xml
cp core-site.xml.template core-site.xml
cp hdfs-site.xml.template hdfs-site.xml
cp yarn-site.xml.template yarn-site.xml
cd /usr/local/$hive_version/conf
cp hive-env.sh.template hive-env.sh
cp hive-site.xml.template hive-site.xml
cd /usr/local/$sqoop_version/conf
cp sqoop-env-template.sh sqoop-env.sh
cp sqoop.properties.template sqoop.properties
cd /usr/local/$zookeeper_version/conf
cp zoo_sample.cfg zoo.cfg
echo "
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://localhost:9000</value>
</property>
</configuration>
" > /usr/local/$hadoop_version/etc/hadoop/core-site.xml
echo "
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
</configuration>
" > /usr/local/$hadoop_version/etc/hadoop/hdfs-site.xml
echo "
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
" > /usr/local/$hadoop_version/etc/hadoop/mapred-site.xml
# 配置Hadoop yarn-site.xml
echo "
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>localhost</value>
</property>
<property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>4096</value>
</property>
<property>
<name>yarn.nodemanager.resource.cpu-vcores</name>
<value>2</value>
</property>
</configuration>
" > /usr/local/$hadoop_version/etc/hadoop/yarn-site.xml
# 配置Hive hive-site.xml
echo "
<configuration>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://localhost:3306/hive?createDatabaseIfNotExist=true</value>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>password</value>
</property>
<property>
<name>hive.metastore.warehouse.dir</name>
<value>/user/hive/warehouse</value>
</property>
</configuration>
" > /usr/local/$hive_version/conf/hive-site.xml
# 配置Sqoop sqoop-env.sh
echo "
export HADOOP_COMMON_HOME=/usr/local/$hadoop_version
export HADOOP_MAPRED_HOME=/usr/local/$hadoop_version
export HIVE_HOME=/usr/local/$hive_version
export ZOOKEEPER_HOME=/usr/local/$zookeeper_version
" > /usr/local/$sqoop_version/conf/sqoop-env.sh
# 配置Zookeeper zoo.cfg
echo "
tickTime=2000
dataDir=/var/lib/zookeeper
clientPort=2181
" > /usr/local/$zookeeper_version/conf/zoo.cfg
将以上脚本同时分发给n台服务器并运行
#!/bin/bash
# Define the script file and IP address file
script_file="install.sh"
ip_file="ip_addresses.txt"
# Read the IP addresses from the file and execute the script on each server
while IFS= read -r ip_address
do
echo "Running script on $ip_address"
scp $script_file $ip_address:~/ # Transfer the script file to the server
ssh $ip_address "bash ~/$(basename $script_file)" # Execute the script on the server
done < "$ip_file"