标签:config etc nbsp apr dir post clu replicat 复制
mkdir -p /root/install/hadoop mkdir -p /root/install/hadoop/hdfs mkdir -p /root/install/hadoop/tmp mkdir -p /root/install/hadoop/mapred mkdir -p /root/install/hadoop/hdfs/name mkdir -p /root/install/hadoop/hdfs/data mkdir -p /root/install/hadoop/mapred/local mkdir -p /root/install/hadoop/mapred/system
export HADOOP_HOME=/root/install/hadoop-2.2.0 export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop export YARN_CONF_DIR=$HADOOP_HOME/etc/hadoop export PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
export JAVA_HOME=/root/install/jdk1.7.0_21
export JAVA_HOME=/root/install/jdk1.7.0_21
<configuration> <property> <name>fs.defaultFS</name> <value>hdfs://spark1:9000</value> </property> <property> <name>hadoop.tmp.dir</name> <value>/root/install/hadoop/tmp</value> </property> </configuration>
<configuration> <property> <name>dfs.name.dir</name> <value>/root/install/hadoop/hdfs/name</value> </property> <property> <name>dfs.data.dir</name> <value>/root/install/hadoop/hdfs/data</value> </property> <property> <name>dfs.replication</name> <value>3</value> </property> </configuration>
<configuration> <property> <name>mapreduce.cluster.local.dir</name> <value>/root/install/hadoop/mapred/local</value> </property> <property> <name>mapreduce.cluster.system.dir</name> <value>/root/install/hadoop/mapred/system</value> </property> <property> <name>mapreduce.framework.name</name> <value>yarn</value> </property> <property> <name>mapreduce.jobhistory.address</name> <value>spark1:10020</value> </property> <property> <name>mapreduce.jobhistory.webapp.address</name> <value>spark1:19888</value> </property> <property> <name>mapred.child.java.opts</name> <value>-Djava.awt.headless=true</value> </property> <!-- add headless to default -Xmx1024m --> <property> <name>yarn.app.mapreduce.am.command-opts</name> <value>-Djava.awt.headless=true -Xmx1024m</value> </property> <property> <name>yarn.app.mapreduce.am.admin-command-opts</name> <value>-Djava.awt.headless=true</value> </property> </configuration>
[root@spark1 install]# cat dispatchcfg.sh #!/bin/bash for target in spark2 do scp -r $HADOOP_CONF_DIR $target:/root/install/hadoop-2.2.0/etc done
hadoop fs -mkdir /input
hadoop fs -put /etc/group /input
export JAVA_HOME=/root/install/jdk1.7.0_21 export SPARK_MASTER_IP=spark1 export SPARK_MASTER_PORT=7077 export SPARK_WORKER_CORES=1 export SPARK_WORKER_INSTANCES=1 export SPARK_WORKER_MEMORY=1g
标签:config etc nbsp apr dir post clu replicat 复制
原文地址:http://www.cnblogs.com/jzssuanfa/p/7058891.html