#!/bin/bash workdir=$(cd $(dirname $0); pwd)date=`date +%Y-%m-%d-%H:%M:%S`init(){
rm-rf$workdir/hdfs_detail.txt
touch$workdir/hdfs_detail.txt
chmod777$workdir/hdfs_detail.txt
echo"[Init Time]:$date" >> $workdir/hdfs_detail.txt
echo"--" >> $workdir/hdfs_detail.txt
echo"--" >> $workdir/hdfs_detail.txt
}
hdfs_collect(){
echo" ----[ 汇总数据 ]---- " >> $workdir/hdfs_detail.txt
echo"" >> $workdir/hdfs_detail.txt
echo"| 总量 | 当前目录 |" >> $workdir/hdfs_detail.txt
hadoop fs -du / |sort -r-n| awk'{size=$1/1024;if(size<1024){printf("%10.3f KB\t%s\n",size,$2);}else{size=size/1024;if(size<1024){printf("\033[36m%10.3f MB\t%s\n\033[0m",size,$2);}else{size=size/1024;if(size<1024){printf("\033[35m%10.3f GB\t%s\n\033[0m",size,$2);}else{size=size/1024;printf("\033[31m%10.3f TB\t%s\n\033[0m",size,$2);}}}}' >> $workdir/hdfs_detail.txt
echo"" >> $workdir/hdfs_detail.txt
echo"" >> $workdir/hdfs_detail.txt
}
hdfs_detail(){
echo" ----[ 明细数据 ]---- " >> $workdir/hdfs_detail.txt
echo"" >> $workdir/hdfs_detail.txt
for first in`cat $workdir/hdfsfirst.txt`;
do hadoop fs -du$first |sort $1-r-n |awk '{size=$1/1024;if(size<1024){printf("%10.3f KB\t%s\n",size,$2);}else{size=size/1024;if(size<1024){printf("\033[36m%10.3f MB\t%s\n\033[0m",size,$2);}else{size=size/1024;if(size<1024){printf("\033[35m%10.3f GB\t%s\n\033[0m",size,$2);}else{size=size/1024;printf("\033[31m%10.3f TB\t%s\n\033[0m",size,$2);}}}}' >> $workdir/hdfs_detail.txt
donefor second in`cat $workdir/hdfsfirst.txt`;
do hadoop fs -du$second |sort $1-r-n |awk '{size=$1/1024;if(size<1024){printf("%10.3f KB\t%s\n",size,$2);}else{size=size/1024;if(size<1024){printf("\033[36m%10.3f MB\t%s\n\033[0m",size,$2);}else{size=size/1024;if(size<1024){printf("\033[35m%10.3f GB\t%s\n\033[0m",size,$2);}else{size=size/1024;printf("\033[31m%10.3f TB\t%s\n\033[0m",size,$2);}}}}' >> $workdir/hdfs_detail.txt
donefor third in`cat $workdir/hdfssecond.txt`;
do hadoop fs -du$third |sort $1-r-n |awk '{size=$1/1024;if(size<1024){printf("%10.3f KB\t%s\n",size,$2);}else{size=size/1024;if(size<1024){printf("\033[36m%10.3f MB\t%s\n\033[0m",size,$2);}else{size=size/1024;if(size<1024){printf("\033[35m%10.3f GB\t%s\n\033[0m",size,$2);}else{size=size/1024;printf("\033[31m%10.3f TB\t%s\n\033[0m",size,$2);}}}}' >> $workdir/hdfs_detail.txt
done:<<!
for line in$hdfs1;
do hadoop fs -du$line |sort -r-n | awk'{size=$1/1024;if(size<1024){printf("%10.3f KB\t%s\n",size,$2,"'$line'");}else{size=size/1024;if(size<1024){printf("\033[36m%10.3f MB\t%s\n\033[0m",size,$2,"'$line'");}else{size=size/1024;if(size<1024){printf("\033[35m%10.3f GB\t%s\n\033[0m",size,$2,"'$line'");}else{size=size/1024;printf("\033[31m%10.3f TB\t%s\n\033[0m",size,$2,"'$line'");}}}}'|head -10 >> $workdir/hdfs_detail.txt
for line1 in$hdfs2;
do hadoop fs -du$line1 |sort -r-n | awk'{size=$1/1024;if(size<1024){printf("%10.3f KB\t%s\n",size,$2,"'$line1'");}else{size=size/1024;if(size<1024){printf("\033[36m%10.3f MB\t%s\n\033[0m",size,$2,"'$line1'");}else{size=size/1024;if(size<1024){printf("\033[35m%10.3f GB\t%s\n\033[0m",size,$2,"'$line1'");}else{size=size/1024;printf("\033[31m%10.3f TB\t%s\n\033[0m",size,$2,"'$line1'");}}}}'|head -10 >> $workdir/hdfs_detail.txt
for line2 in$hdfs3;
do hadoop fs -du$line2 |sort -r-n | awk'{size=$1/1024;if(size<1024){printf("%10.3f KB\t%s\n",size,$2,"'$line2'");}else{size=size/1024;if(size<1024){printf("\033[36m%10.3f MB\t%s\n\033[0m",size,$2,"'$line2'");}else{size=size/1024;if(size<1024){printf("\033[35m%10.3f GB\t%s\n\033[0m",size,$2,"'$line2'");}else{size=size/1024;printf("\033[31m%10.3f TB\t%s\n\033[0m",size,$2,"'$line2'");}}}}'|head -10 >> $workdir/hdfs_detail.txt
donedoneecho"" >> $workdir/hdfs_detail.txt
donerm-rf$workdir/hdfsfirst.txt
rm-rf$workdir/hdfssecond.txt
rm-rf$workdir/hdfsthird.txt
!
}
init
hdfs_collect
hdfs_detail
echo"SUCCESS"