在搭建HA的时候我遇到一个指令导致我一直无法搭建运行成功
[root@master src]# chown -R hadoop:hadoop /usr/local/src/java
chown: 无效的用户: "hadoop:hadoop"
请问如果我不增加用户的话,我要修改哪些文件配置才可以搭建成功呢?以下给出文件配置
profile的配置文件
export JAVA_HOME=/usr/local/src/java
export PATH=$PATH:$JAVA_HOME/bin
export ZK_HOME=/usr/local/src/zookeeper
export PATH=$PATH:$ZK_HOME/bin
export HADOOP_HOME=/usr/local/src/hadoop
export HADOOP_PREFIX=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_OPTS="-Djava.library.path=$HADOOP_INSTALL/lib:$HADOOP_COMMON_LIB_NATIVE_DIR"
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
core-site.xml的配置文件
<property>
<name>fs.defaultFsname>
<value>hdfs://myclustervalue>
property>
<property>
<name>hadoop.tmp.dirname>
<value>file:/usr/local/src/hadoop/tmpvalue>
property>
<property>
<name>ha.zookeeper.quorumname>
<value>master:2181,slave1:2181,slave2:2181value>
property>
<property>
<name>ha.zookeeper.session-timeout.msname>
<value>30000value>
<description>msdescription>
property>
<property>
<name>fs.trash.intervalname>
<value>1440value>
property>
mapred-site.xml的配置文件
<property>
<name>mapreduce.framework.namename>
<value>yarnvalue>
property>
<property>
<name>mapreduce.jobhistiory.addressname>
<value>master:10020value>
property>
<property>
<name>mapreduce.jobhistiory.webapp.addressname>
<value>master:19888value>
property>
hdfs-site.xml的配置文件
<property>
<name>dfs.qjournal.start-segment.timeout.msname>
<value>60000value>
property>
<property>
<name>dfs.nameservericesname>
<value>myclustervalue>
property>
<property>
<name>dfs.namenodea.myclustername>
<value>master,slave1value>
property>
<property>
<name>dfs.namenode.rpc-address.mycluster.mastername>
<value>master:8020value>
property>
<property>
<name>dfs.namenode.rpc-address.mycluster.slave1name>
<value>slave:8020value>
property>
<property>
<name>dfs.namenode.http.address.mycluster.mastername>
<value>master:50070value>
property>
<property>
<name>dfs.namenode.http-address.mycluster.slave1name>
<value>slave:50070value>
property>
<property>
<name>dfs.namenode.share.edits.dirname>
<value>qjournal://master:8485;slave1:8485;slave2:8485/myclustervalue>
property>
<property>
<name>dfs.client.failover.proxy.provider.myclustername>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvidirvalue>
property>
<property>
<name>dfs.ha.fencing.methodsname>
<value>
sshfence
shell(bin/true)
value>
property>
<property>
<name>dfs.permissions.enabledname>
<value>falsevalue>
property>
<property>
<name>dfs.support.appendname>
<value>truevalue>
property>
<property>
<name>dfs.ha.fencing.ssh.private-ket-filesname>
<value>/root/.ssh/id_rsavalue>
property>
<property>
<name>dfs.replicationname>
<value>2value>
property>
<property>
<name>dfs.namenode.name.dirname>
<value>/usr/local/src/hadoop/tmp/hdfs/nnvalue>
property>
<property>
<name>dfs.namenode.name.dirname>
<value>/usr/local/src/hadoop/tmp/hdfs/dnvalue>
property>
<property>
<name>dfs.namenode.name.dirname>
<value>/usr/local/src/hadoop/tmp/hdfs/jnvalue>
property>
<property>
<name>dfs.ha.automatic.failover.enabledname>
<value>truevalue>
property>
<property>
<name>dfs.webhdfs.enbaledname>
<value>truevalue>
property>
<property>
<name>dfs.ha.fencing.ssh.connect-timeoutname>
<value>30000value>
property>
<property>
<name>ha.failover-contoller.cil-check.rcp-address.timeout.msname>
<value>60000value>
property>
yarn-site.xml的配置文件
<property>
<name>yarn.resoucemanager.ha.enabledname>
<value>truevalue>
property>
<property>
<name>yarn.resoucemanager.cluster-idsname>
<value>rm1,rm2value>
property>
<property>
<name>yarn.resoucemanager.hostname.rm1name>
<value>mastervalue>
property>
<property>
<name>yarn.resoucemanager.hostname.rm2name>
<value>slave1value>
property>
<property>
<name>yarn.resoucemanager.zk-addressname>
<value>master:2181,slave1:2181,slave2:2181value>
property>
<property>
<name>yarn.resoucemanager.aux-servicesname>
<value>mapreduce_shufflevalue>
property>
<property>
<name>yarn.log-aggregation-enablename>
<value>value>
property>
<property>
<name>yarn.log-aggregation.retain-secondsname>
<value>86400value>
property>
<property>
<name>yarn.resoucemanager.recovery.enabledname>
<value>truevalue>
property>
<property>
<name>yarn.resoucemanager.store.classname>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStorevalue>
property>
hadoop-env.sh文件
export JAVA_HOME=/usr/local/src/java
大概主要文件就是这些惹,如有遗漏还请牛逼人士说出来
先从你现在 现有的报错去解决,现在报错是 “chown: 无效的用户: "hadoop:hadoop"”,
那就创建一个普通用户啊,然后再授权。
还有,JAVA 环境变量目录可以不用设置 属主位普通用户的,直接root就OK,普通用户可读来使用就好。
先一步一步来,哪里报错,就解决哪里,所有部署的思维都一样。