Hadoop配置启动时jps没有namenode和secondaryname,,网上方法都试过了没有用

Hadoop配置启动时jps没有namenode和secondaryname,,网上方法都试过了没有用

img

hdfs代码:

dfs.replication 1
<property>
    
    <name>dfs.namenode.name.dirname>
    <value>/home/hadoop/namenode/datavalue>
property>

<property>
    
    <name>dfs.datanode.data.dirname>
    <value>/home/hadoop/datanode/datavalue>
property>

<property>
    
    <name>dfs.nameservicesname>
    <value>myclustervalue>
property>

<property>
    
    <name>dfs.blocksizename>
    <value>134217728value>
property>

<property>
 
  <name>dfs.namenode.http-addressname>
  <value>master:50070value>
property> 

<property>

    <name>dfs.namenode.secondary.http-addressname>
    <value>master:50090value>
property>

<property>
    
    <name>dfs.namenode.shared.edits.dirname>
    <value>qjournal://master:8485;slave1:8485;slave2:8485/myclustervalue>
property>

<property>
    
    <name>dfs.journalnode.edits.dirname>
    <value>/home/hadoop/journalnode/datavalue>
property>

<property>
    
    <name>dfs.ha.fencing.methodsname>
    <value>sshfencevalue>
property>

<property>
    
    <name>dfs.ha.fencing.ssh.private-key-filesname>
    <value>/home/hadoop/.ssh/id_rsavalue>
property>

<property>
    
    <name>dfs.ha.fencing.ssh.connect-timeoutname>
    <value>30000value>
property>

<property>
    
    <name>dfs.client.failover.proxy.provider.myclustername>
    <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvidervalue>
property>

<property>
    
    <name>dfs.ha.automatic-failover.enabledname>
    <value>truevalue>
property>

core代码:

<property>
    
    <name>fs.defaultFSname>
    <value>hdfs://master:8020value>
property>

<property>
    <name>io.file.buffer.sizename>
    <value>131072value>
<property>
    
    <name>hadoop.tmp.dirname>
    <value>/opt/module/hadoop-2.7.7/data/abc/tmpvalue>
property>

<property>
    
    <name>ha.zookeeper.quorumname>
    <value>master:2181,slave1:2181,slave2:2181value>
property>

<property>
    
    <name>ha.zookeeper.session-timeout.msname>
    <value>10000value>
property>

yarn代码:

<property>
    
    
    <name>yarn.nodemanager.aux-servicesname>
    <value>mapreduce_shufflevalue>
property>

<property>
    
    <name>yarn.log-aggregation-enablename>
    <value>truevalue>
property>

<property>
    
    <name>yarn.log-aggregation.retain-secondsname>
    <value>86400value>
property>

<property>
    
    <name>yarn.resourcemanager.ha.enabledname>
    <value>truevalue>
property>

<property>
    
    <name>yarn.resourcemanager.cluster-idname>
    <value>my-yarn-clustervalue>
property>
 
<property>

    <name>yarn.resourcemanager.hostnamename>
    <value>mastervalue>
property> 

<property>

    <name>yarn.resourcemanager.addressname>
    <value>master:8032value>
property>

<property>
 
    <name>yarn.resourcemanager.scheduler.addressname>
    <value>master:8030value>
property>

<property>

    <name>yarn.resourcemanager.resource-tracker.addressname>
    <value>master:8031value>
property>
    
 <property>
 
  <name>yarn.resourcemanager.admin.addressname>
  <value>master:8033value>
 property>

<property>
   
            <name>yarn.resourcemanager.webapp.addressname>
    <value>master:8088value>
property>

<property>
    
    <name>yarn.resourcemanager.zk-addressname>
    <value>master:2181,slave1:2181,slave2:2181value>
property>

<property>
    
    <name>yarn.resourcemanager.recovery.enabledname>
    <value>truevalue>
property>

<property>
    
    <name>yarn.resourcemanager.store.classname>
    <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStorevalue>
property>

看看我之前安装的整理文档。
Hadoop集群搭建
https://blog.csdn.net/lydms/article/details/128957505