标签:hadoop
安装JDK
安装Hadoop
配置环境变量
配置core-site.xml
配置hdfs-site.xml
配置mapred-site.xml
配置yarn-site.xml
配置slaves
cd /usr/local/src
wget http://download.oracle.com/otn-pub/java/jdk/8u73-b02/jdk-8u73-linux-x64.tar.gz?AuthParam=1458008151_64a44ef61864b914ee2cb5adb5a1ffb4
tar -zxf jdk-8u73-linux-x64.tar.gz
mv jdk1.8.0_73/ /usr/local/
vim /etc/profile.d/java.sh
写入:
JAVA_HOME=/usr/local/jdk1.8.0_73/ JAVA_BIN=/usr/local/jdk1.8.0_73/bin JRE_HOME=/usr/local/jdk1.8.0_73/jre PATH=$PATH:/usr/local/jdk1.8.0_73/bin:/usr/local/jdk1.8.0_73/jre/bin CLASSPATH=/usr/local/jdk1.8.0_73/jre/lib:/usr/local/jdk1.8.0_73/lib:/usr/local/jdk1.8.0_73/jre/lib/charsets.jar export JAVA_HOME JAVA_BIN JRE_HOME PATH CLASSPATH
source /etc/profile.d/java.sh
修改三台服务器hostname
./etc/hosts
./etc/sysconfig/network
IP | HOSTNAME | 节点信息 |
---|---|---|
172.16.1.212 | h1 | NameNode HMaster SecondaryNameNode ResourceManager zookeeper |
172.16.1.213 | h2 | DataNode HRegionServer NodeManager ResourceManager zookeeper |
172.16.1.214 | h3 | DataNode HRegionServer NodeManager ResourceManager zookeeper |
h1 h2 h3分别生成ssh公钥`ssh-keygen -t rsa`
[root@h2 ~]# scp /root/.ssh/id_rsa.pub root@h1:~/h2pub
[root@h3 ~]# scp /root/.ssh/id_rsa.pub root@h1:~/h3pub
[root@h1 ~]# cat ~/.ssh/id_rsa.pub ~/h2pub ~/h3pub > ~/.ssh/authorized_keys
[root@h1 ~]# scp ~/.ssh/authorized_keys root@h2:~/.ssh/authorized_keys
[root@h1 ~]# scp ~/.ssh/authorized_keys root@h3:~/.ssh/authorized_keys
mkdir /home/hadoop
cd !$
wget http://apache.opencas.org/hadoop/common/hadoop-2.6.3/hadoop-2.6.3.tar.gz
tar -zxvf hadoop-2.6.3.tar.gz
mv hadoop-2.6.3 hadoop
vim ~/.bashrc
加入
#Hadoop Environment Variables
export HADOOP_HOME=/home/hadoop/hadoop
export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin
source ~/.bashrc
vim /home/hadoop/hadoop/etc/hadoop/core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://h1:9000</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>file:/home/hadoop/hadoop/tmp</value>
</property>
<property>
<name>hadoop.proxyuser.hduser.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.hduser.groups</name>
<value>*</value>
</property>
<property>
<name>io.native.lib.available</name>
<value>true</value>
</property>
</configuration>
vim /home/hadoop/hadoop/etc/hadoop/hdfs-site.xml
<configuration>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>h1:9011</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/home/hadoop/hadoop/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/home/hadoop/hadoop/dfs/data</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/home/hadoop/hadoop/tmp</value>
<description>A base for other temporary directories.</description>
</property>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
</configuration>
mv mapred-site.xml.template mapred-site.xml
vim mapred-site.xml
<configuration>
<property>
<name>mapred.job.tracker</name>
<value>h1:9001</value>
</property>
</configuration>
<configuration>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>h1</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>h1:8032</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>h1:8030</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>h1:8031</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>h1:8033</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>h1:8088</value>
</property>
</configuration>
在里面添加DataNode
[root@h1 hadoop]# vim slaves
h2
h3
将core-site.xml 传到h2.h3
[root@h1 hadoop]# scp core-site.xml root@h2:/home/hadoop/hadoop/etc/hadoop
[root@h1 hadoop]# scp core-site.xml root@h3:/home/hadoop/hadoop/etc/hadoop
格式化NameNode,并启动hadoop
[root@h1 hadoop]# /home/hadoop/hadoop/bin/hdfs namenode -format
[root@h1 hadoop]# sbin/start-all.sh
jps
查看启动项目
Web访问172.16.1.212:50070查看hadoop状态
本文出自 “CGL的博客” 博客,请务必保留此出处http://chengongliang.blog.51cto.com/10693153/1761633
标签:hadoop
原文地址:http://chengongliang.blog.51cto.com/10693153/1761633