集群远程复制(由namenode分发 )
---------------------------------------
#! /bin/bash if [ $# -lt 1 ] ; then echo no args exit ; fi #get first argument arg1=$1; cuser=`whoami` fname=`basename $arg1` dir=`dirname $arg1` if [ "$dir" = "." ] ; then dir=`pwd` fi for (( i=200;i<=500;i=i+100 )) ; do echo --------coping $arg1 to $i ---------; if [ -d $arg1 ] ; then scp -r $arg1 $cuser@s$i:$dir else scp $arg1 $cuser@s$i:$dir fi echo done
集群远程查看列表(由namenode完成)
------------------------------------------
#! /bin/bash if [ $# -lt 1 ] ; then echo no args exit ; fi #get first argument arg1=$1; cuser=`whoami` fname=`basename $arg1` dir=`dirname $arg1` if [ "$dir" = "." ] ; then dir=`pwd` fi for (( i=200;i<=500;i=i+100 )) ; do echo --------ls $arg1 $i ---------; if [ -d $arg1 ] ; then ssh s$i ls $dir/$fname | xargs else ssh s$i ls $dir | xargs fi echo done
集群远程删除文件或文件夹(由namenode完成)
--------------------------------------------
#! /bin/bash if [ $# -lt 1 ] ; then echo no args exit ; fi #get first argument arg1=$1; cuser=`whoami` fname=`basename $arg1` dir=`dirname $arg1` if [ "$dir" = "." ] ; then dir=`pwd` fi for (( i=200;i<=500;i=i+100 )) ; do echo --------rm $arg1 in s$i ---------; if [ -d $arg1 ] ; then ssh s$i rm -rf $dir/$fname echo ok else ssh s$i rm $dir/$fname echo ok fi echo done
集群一键式开启并完成namenode格式化,创建用户目录(由namenode完成)
-----------------------------------------------------------------------------------
#!/bin/bash echo "--------------- NOW FORMAT HDFS ------------" hdfs namenode -format echo "--------------- HDFS FORMAT ALREADY -------------" echo "--------------- NOW START HDFS --------------" start-dfs.sh echo "--------------- HDFS START ALREADY --------------" echo "--------------- NOW START YARN SYSTEM -------------" start-yarn.sh echo "--------------- YARN SYSTEM START ALREADY -------------" echo "--------------- NOW CREAT USER DIRECTORY -------------" hadoop fs -mkdir -p /user/yehom/data echo "--------------- USER DIREDCTORY CREATED ALREADY -----------" echo "--------------- SHOW USER DIRERECTORY LIST --------------" hadoop fs -ls -R / echo "*************** ALL START AND INI *******************" echo "*************** DESIGN BY YEHOM @YehomLab.com *****************"
集群一键式关闭并删除所有日志和相关目录(由namenode完成)
#!/bin/bash echo "-------------------NOW STOP HADOOP CLUSTER--------------------" stop-yarn.sh stop-dfs.sh echo "-------------------HADOOP CLUSTER STOP ALREADY--------------------" echo "-------------------NOW DELETE DATA FILE------------------------" xrm.sh ~/hadoop-yehom echo "-------------------DATA FILE DELETED ALREADY--------------------" echo "-------------------NOW DELETE LOGS ---------------------" xrm.sh /soft/hadoop/logs echo "-------------------LOGS DELETED ALREADY----------------" echo "******************* ALL HADOOP TASK STOP AND REFRESH *****************" echo "******************* DESIGN BY YEHOM @YehomLab.com *****************"
本文出自 “yehomlab” 博客,请务必保留此出处http://yehom.blog.51cto.com/5159116/1793049
【DAY2】hadoop 完全分布模式中需要用到的SHELL脚本
原文地址:http://yehom.blog.51cto.com/5159116/1793049