#############################
#ENV
#spark01 192.168.51.6
#spark02 192.168.51.18
#spark03 192.168.51.19
#spark04 192.168.51.21
#spark05 192.168.51.24
############################
##We must to improve file limits on every nodes
echo "ulimit -SHn 204800" >> /etc/rc.local
echo "ulimit -SHu 204800" >> /etc/rc.local
cat >> /etc/security/limits.conf << EOF
* soft nofile 204800
* hard nofile 204800
* soft nproc 204800
* hard nproc 204800
EOF
##We must to disable ipv6 on every nodes
echo 'net.ipv6.conf.all.disable_ipv6 = 1'>>/etc/sysctl.conf
echo 'net.ipv6.conf.default.disable_ipv6 = 1' >>/etc/sysctl.conf
echo 'vm.swappiness = 0' >> /etc/sysctl.conf
sysctl -p
echo 'echo never > /sys/kernel/mm/transparent_hugepage/defrag' >> /etc/rc.local
chmod +x /etc/rc.d/rc.local
#1)Edit /etc/hosts file on every nodes
cat >/etc/hosts<>/etc/sudoers
#4)set permission with opt directory on every nodes
chown -R hadoop.hadoop /opt/
#5)Set up key-based (passwordless) login:
#just do it no spark01
su - hadoop
ssh-keygen
ssh-copy-id -i ~/.ssh/id_rsa.pub hadoop@spark01
ssh-copy-id -i ~/.ssh/id_rsa.pub hadoop@spark02
ssh-copy-id -i ~/.ssh/id_rsa.pub hadoop@spark03
ssh-copy-id -i ~/.ssh/id_rsa.pub hadoop@spark04
ssh-copy-id -i ~/.ssh/id_rsa.pub hadoop@spark05
#6)install hadoop on spark01 and propagate /opt/hadoop2.7.3 to other nodes:
cd /home/tools
sudo wget http://god.nongdingbang.net/downloads/hadoop-2.7.3.tar.gz
sudo tar zxvf hadoop-2.7.3.tar.gz -C /opt/
sudo chown -R hadoop.hadoop /opt/hadoop-2.7.3
scp -r /opt/hadoop-2.7.3 hadoop@spark02:/opt
scp -r /opt/hadoop-2.7.3 hadoop@spark03:/opt
scp -r /opt/hadoop-2.7.3 hadoop@spark04:/opt
scp -r /opt/hadoop-2.7.3 hadoop@spark05:/opt
#7)Edit this file on every nodes
sudo su -
cat >/etc/profile.d/hadoop.sh </opt/hadoop-2.7.3/etc/hadoop/core-site.xml<
fs.defaultFS
hdfs://spark01:9000
io.file.buffer.size
131072
hadoop.tmp.dir
/opt/hadoop-2.7.3/tmp/
hadoop.proxyuser.hadoop.hosts
*
hadoop.proxyuser.hadoop.groups
*
EOF
#9)Create HDFS DataNode data dirs on every node and change ownership
mkdir -p /opt/storage/{datanode,namenode}
chown -R hadoop.hadoop /opt/storage
#10)Edit /opt/hadoop/etc/hadoop/hdfs-site.xml on every nodes– set up DataNodes:
###############################################
cat >/opt/hadoop-2.7.3/etc/hadoop/hdfs-site.xml<
dfs.replication
3
dfs.permissions
false
dfs.datanode.data.dir
/opt/storage/datanode
dfs.namenode.data.dir
/opt/storage/namenode
dfs.secondary.http.address
spark01:50090
dfs.namenode.http-address
spark01:50070
dfs.webhdfs.enabled
true
EOF
#11)Edit /opt/hadoop/etc/hadoop/mapred-site.xml on spark01.
#################################################################
cat > /opt/hadoop-2.7.3/etc/hadoop/mapred-site.xml <
mapreduce.framework.name
yarn
mapreduce.jobhistory.address
spark01:10020
mapreduce.jobhistory.webapp.address
spark01:19888
EOF
#12)setup ResourceManager on spark01 and NodeManagers on spark02-05
#########################################################################
cat >/opt/hadoop-2.7.3/etc/hadoop/yarn-site.xml<
yarn.resourcemanager.hostname
spark01
yarn.nodemanager.hostname.nm1
spark02
yarn.nodemanager.hostname.nm2
spark03
yarn.nodemanager.hostname.nm3
spark04
yarn.nodemanager.hostname.nm4
spark05
yarn.nodemanager.aux-services
mapreduce_shuffle
EOF
#13)Edit /opt/hadoop-2.7.3/etc/hadoop/slaves on spark01
##(so that master may start all necessary services on slaves automagically):
###############################################################
cat >/opt/hadoop-2.7.3/etc/hadoop/slaves< /etc/yum.repos.d/apache-maven.repo <
网站栏目:HowtoinstallHadoop2.7.3clusteronCentOS7.3
网址分享:http://cxhlcq.com/article/giehpp.html