hadoop2的federation+HA配置启动后,活着的datanode节点不对,如何解决?

mvp31 发布于 2014/08/19 13:47
阅读 893
收藏 0

hadoop2的federation+HA,hadoop1和hadoop2为c1联邦+HA,hadoop3和hadoop4为c2联邦+HA,hadoop1、hadoop2、hadoop3、hadoop4都为datanode。格式化成功,但启动集群后,活着的datanode节点不对,hadoop1只有两个datanode活着,hadoop2只有一个datanode活着并占用率为100%,hadoop3只有一个datanode活着占用率为100%,hadoop4只有一个datanode活着占用率为100%,四个datanode都成功启动,磁盘空间绝对够,怎样解决?

hadoop1、hadoop2的下面是配置文件:

《hdfs-site.xml》配置文件

<configuration>
<!--1描述cluster1集群的信息-->

<property>
<name>dfs.replication</name>
<value>3</value>
</property>

<property>
<name>dfs.nameservices</name>
<value>cluster1,cluster2</value>
</property>

<property>
<name>dfs.ha.namenodes.cluster1</name>
<value>hadoop1,hadoop2</value>
</property>

<property>
<name>dfs.namenode.rpc-address.cluster1.hadoop1</name>
<value>hadoop1:9000</value>
</property>

<property>
<name>dfs.namenode.http-address.cluster1.hadoop1</name>
<value>hadoop1:50070</value>
</property>

<property>
<name>dfs.namenode.rpc-address.cluster1.hadoop2</name>
<value>hadoop2:9000</value>
</property>

<property>
<name>dfs.namenode.http-address.cluster1.hadoop2</name>
<value>hadoop2:50070</value>
</property>

<!--在cluster1中此处的注释是关闭的,cluster2反之-->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://hadoop1:8485;hadoop2:8485;hadoop3:8485/cluster1</value>
<description>指定cluster1的两个NameNode共享edits文件目录时,使用的是JournalNode集群来维护</description>
</property>

<property>
<name>dfs.ha.automatic-failover.enabled.cluster1</name>
<value>true</value>
</property>

<property>
<name>dfs.client.failover.proxy.provider.cluster1</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<!--2下面描述cluster2集群的信息-->
<property>
<name>dfs.ha.namenodes.cluster2</name>
<value>hadoop3,hadoop4</value>
</property>

<property>
<name>dfs.namenode.rpc-address.cluster2.hadoop3</name>
<value>hadoop3:9000</value>
</property>

<property>
<name>dfs.namenode.http-address.cluster2.hadoop3</name>
<value>hadoop3:50070</value>
</property>

<property>
<name>dfs.namenode.rpc-address.cluster2.hadoop4</name>
<value>hadoop4:9000</value>
</property>

<property>
<name>dfs.namenode.http-address.cluster2.hadoop4</name>
<value>hadoop4:50070</value>
</property>

<property>
<name>dfs.ha.automatic-failover.enabled.cluster2</name>
<value>true</value>
</property>

<property>
<name>dfs.client.failover.proxy.provider.cluster2</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<!--3配置cluster1、cluster2公共的信息-->
<property>
   <name>dfs.permissions</name>
   <value>flase</value>
</property>

<property>
<name>dfs.journalnode.edits.dir</name>
<value>/home/hadoop/tmp/journal</value>
</property>

<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
</property>

<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/home/hadoop/.ssh/id_rsa</value>
</property>

</configuration>

配置文件:core-site.xml

<configuration>


<property>
<name>fs.defaultFS</name>
<value>hdfs://cluster1</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/home/hadoop/tmp</value>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>hadoop1:2181,hadoop2:2181,hadoop3:2181</value>
</property>
</configuration>

配置文件mapred-site.xml

<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>

配置文件yarn-site.xml

<configuration>
<!-- Site specific YARN configuration properties -->
<property>
<name>yarn.resourcemanager.hostname</name>
<value>hadoop1</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>


hadoop3、hadoop4的下面是配置文件:

《hdfs-site.xml》配置文件

<configuration>
<!--1描述cluster1集群的信息-->

<property>
<name>dfs.replication</name>
<value>3</value>
</property>

<property>
<name>dfs.nameservices</name>
<value>cluster1,cluster2</value>
</property>

<property>
<name>dfs.ha.namenodes.cluster1</name>
<value>hadoop1,hadoop2</value>
</property>

<property>
<name>dfs.namenode.rpc-address.cluster1.hadoop1</name>
<value>hadoop1:9000</value>
</property>

<property>
<name>dfs.namenode.http-address.cluster1.hadoop1</name>
<value>hadoop1:50070</value>
</property>

<property>
<name>dfs.namenode.rpc-address.cluster1.hadoop2</name>
<value>hadoop2:9000</value>
</property>

<property>
<name>dfs.namenode.http-address.cluster1.hadoop2</name>
<value>hadoop2:50070</value>
</property>

<property>
<name>dfs.ha.automatic-failover.enabled.cluster1</name>
<value>true</value>
</property>

<property>
<name>dfs.client.failover.proxy.provider.cluster1</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<!--2下面描述cluster2集群的信息-->
<property>
<name>dfs.ha.namenodes.cluster2</name>
<value>hadoop3,hadoop4</value>
</property>

<property>
<name>dfs.namenode.rpc-address.cluster2.hadoop3</name>
<value>hadoop3:9000</value>
</property>

<property>
<name>dfs.namenode.http-address.cluster2.hadoop3</name>
<value>hadoop3:50070</value>
</property>

<property>
<name>dfs.namenode.rpc-address.cluster2.hadoop4</name>
<value>hadoop4:9000</value>
</property>

<property>
<name>dfs.namenode.http-address.cluster2.hadoop4</name>
<value>hadoop4:50070</value>
</property>

<!--在cluster1中此处的注释是关闭的,cluster2反之-->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://hadoop1:8485;hadoop2:8485;hadoop3:8485/cluster2</value>
<description>指定cluster2的两个NameNode共享edits文件目录时,使用的是JournalNode集群来维护</description>
</property>

<property>
<name>dfs.ha.automatic-failover.enabled.cluster2</name>
<value>true</value>
</property>

<property>
<name>dfs.client.failover.proxy.provider.cluster2</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<!--3配置cluster1、cluster2公共的信息-->
<property>
   <name>dfs.permissions</name>
   <value>flase</value>
</property>

<property>
<name>dfs.journalnode.edits.dir</name>
<value>/home/hadoop/tmp/journal</value>
</property>

<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
</property>

<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/home/hadoop/.ssh/id_rsa</value>
</property>

</configuration>

配置文件:core-site.xml

<configuration>


<property>
<name>fs.defaultFS</name>
<value>hdfs://cluster1</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/home/hadoop/tmp</value>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>hadoop1:2181,hadoop2:2181,hadoop3:2181</value>
</property>
</configuration>

配置文件mapred-site.xml

<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>

配置文件yarn-site.xml

<configuration>
<!-- Site specific YARN configuration properties -->
<property>
<name>yarn.resourcemanager.hostname</name>
<value>hadoop1</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>


加载中
返回顶部
顶部