顶顶顶顶
root@master:~# cd .. //一直cd ..进入根目录然后一层一层地找你的hadoop文件的位置 root@master:/# ls bin dev initrd.img lib64 mnt root snap tmp vmlinuz boot etc initrd.img.old lost+found opt run srv usr vmlinuz.old cdrom home lib media proc sbin sys var root@master:/# cd opt root@master:/opt# ls Eclipse hadoop-2.9.2 spark-2.3.3-bin-hadoop2.7 eclipse-workspace hbase-1.3.3 root@master:/opt# cd hadoop-2.9.2 root@master:/opt/hadoop-2.9.2# sbin/start-all.sh //开启集群 This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh Starting namenodes on [master] master: starting namenode, logging to /opt/hadoop-2.9.2/logs/hadoop-root-namenode-master.out localhost: starting datanode, logging to /opt/hadoop-2.9.2/logs/hadoop-root-datanode-master.out Starting secondary namenodes [master] master: starting secondarynamenode, logging to /opt/hadoop-2.9.2/logs/hadoop-root-secondarynamenode-master.out starting yarn daemons starting resourcemanager, logging to /opt/hadoop-2.9.2/logs/yarn-root-resourcemanager-master.out localhost: starting nodemanager, logging to /opt/hadoop-2.9.2/logs/yarn-root-nodemanager-master.out root@master:~# jps //jps查看是否开启成功 3153 NodeManager 3010 ResourceManager 3522 Jps 2502 DataNode 2358 NameNode 2813 SecondaryNameNode//说明打开了 root@master:/opt/hadoop-2.9.2# sbin/start-all.sh //关闭集群,还是要进入原来的那个路径