这篇文章主要介绍“Oozie-4.1.0 和 hadoop-2.7.1 怎么进行编译”,在日常操作中,相信很多人在 Oozie-4.1.0 和 hadoop-2.7.1 怎么进行编译问题上存在疑惑,丸趣 TV 小编查阅了各式资料,整理出简单好用的操作方法,希望对大家解答”Oozie-4.1.0 和 hadoop-2.7.1 怎么进行编译”的疑惑有所帮助!接下来,请跟着丸趣 TV 小编一起来学习吧!
一、环境
maven-3.3.0
hadoop-2.7.1
二、编译
[root@hftclclw0001 opt]# pwd
[root@hftclclw0001 opt]# wget http://apache.mirrors.pair.com/oozie/4.1.0/oozie-4.1.0.tar.gz
[root@hftclclw0001 opt]# tar -zxvf oozie-4.1.0.tar.gz
[root@hftclclw0001 opt]# cd oozie-4.1.0
#sqoop.version=1.4.3
#hive.version=0.13.1 = 修改为其他,编译出错
#hbase.version=0.94.2 = 修改为其他,编译出错
#pig.version=0.12.1
#hadoop.version=2.3.0 = 最新版本是 2.3.0 但是支持 2.7.1
#tomcat.version=6.0.43
[root@hftclclw0001 opt]# ./bin/mkdistro.sh -DskipTests -Phadoop-2 -Dsqoop.version=1.4.6
INFO] BUILD SUCCESS
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 07:25 min
[INFO] Finished at: 2016-06-19T12:46:07+00:00
[INFO] Final Memory: 128M/1178M
[INFO] ------------------------------------------------------------------------
Oozie distro created, DATE[2016.06.19-12:38:39GMT] VC-REV[unavailable], available at [/opt/oozie-4.1.0/distro/target]
三、配置
[root@hftclclw0001 opt]# pwd
[root@hftclclw0001 opt]# mkdir Oozie
[root@hftclclw0001 opt]# cd Oozie
[root@hftclclw0001 Oozie]# pwd
/opt/Oozie
[root@hftclclw0001 Oozie]# cp ../oozie-4.1.0/distro/target/oozie-4.1.0-distro.tar.gz ./
[root@hftclclw0001 Oozie]# tar -zxvf oozie-4.1.0-distro.tar.gz
[root@hftclclw0001 Oozie]# cd oozie-4.1.0
[root@hftclclw0001 oozie-4.1.0]# pwd
/opt/Oozie/oozie-4.1.0
[root@hftclclw0001 oozie-4.1.0]# mkdir libext
[root@hftclclw0001 oozie-4.1.0]# cp /opt/oozie-4.1.0/hadooplibs/hadoop-2/target/hadooplibs/hadooplib-2.3.0.oozie-4.1.0/* ./libext
[root@hftclclw0001 oozie-4.1.0]# cd libext
[root@hftclclw0001 libext]# curl -O http://archive.cloudera.com/gplextras/misc/ext-2.2.zip
下载 mysql 驱动放入 libext,因为用 mysql 作为元数据库,默认为 Derby
[root@hftclclw0001 libext]# ll
total 26452
-rw------- 1 root root 848401 Jun 19 13:41 mysql-connector-java-5.1.25-bin.jar
[root@hftclclw0001 libext]# cd ..
[root@hftclclw0001 oozie-4.1.0]# pwd
/opt/Oozie/oozie-4.1.0
[root@hftclclw0001 oozie-4.1.0]# ./bin/oozie-setup.sh prepare-war
[root@hftclclw0001 oozie-4.1.0]# ./bin/oozie-setup.sh sharelib create -fs hdfs://localhost:9000
mysql CREATE DATABASE OOZIEDB;
mysql GRANT ALL PRIVILEGES ON OOZIEDB.* TO oozie IDENTIFIED BY oozie
mysql FLUSH PRIVILEGES;
[root@hftclclw0001 oozie-4.1.0]# ./bin/ooziedb.sh create db -run
[root@hftclclw0001 oozie-4.1.0]# ./oozied.sh start
四、examples
job.properties
nameNode=hdfs://nameservice1
#nameNode=hdfs://nameservice1 == HA
#nameNode=hdfs://${namenode}:8020 == single namenode
jobTracker=dapdevhmn001.qa.webex.com:8032
#jobTracker=rm1,rm2 == HA
#jobTracker(yarn.resourcemanager.address)=10.224.243.124:8032
queueName=default
examplesRoot=examples
#oozie.use.system.libpath=true
oozie.wf.application.path=${nameNode}/user/${user.name}/${examplesRoot}/apps/map-reduce
outputDir=map-reduce
workflow.xml
workflow-app xmlns= uri:oozie:workflow:0.2 name= map-reduce-wf
start to= mr-node /
action name= mr-node
map-reduce
job-tracker ${jobTracker} /job-tracker
name-node ${nameNode} /name-node
prepare
delete path= ${nameNode}/user/${wf:user()}/${examplesRoot}/output-data/${outputDir} /
/prepare
configuration
property
name mapred.job.queue.name /name
value ${queueName} /value
/property
property
name mapred.mapper.class /name
value org.apache.oozie.example.SampleMapper /value
/property
property
name mapred.reducer.class /name
value org.apache.oozie.example.SampleReducer /value
/property
property
name mapred.map.tasks /name
value 1 /value
/property
property
name mapred.input.dir /name
value /user/${wf:user()}/${examplesRoot}/input-data/text /value
/property
property
name mapred.output.dir /name
value /user/${wf:user()}/${examplesRoot}/output-data/${outputDir} /value
/property
/configuration
/map-reduce
ok to= end /
error to= fail /
/action
kill name= fail
message Map/Reduce failed, error message[${wf:errorMessage(wf:lastErrorNode())}] /message
/kill
end name= end /
/workflow-app
lib/oozie-examples-4.1.0.jar
hadoop fs -mkdir -p /user/root/examples/apps/map-reduce
hadoop fs -put ./job.properties /user/root/examples/apps/map-reduce
hadoop fs -put ./workflow.xml /user/root/examples/apps/map-reduce
hadoop fs -put ./lib/oozie-examples-4.1.0.jar /user/root/examples/apps/map-reduce
job.properties == 不仅仅需要在 HDFS,本地也需要一份。执行命令 -config 是指向本地的文件
oozie job -oozie ${OOZIE_URL} -config job.properties -run
oozie job -oozie ${OOZIE_URL} -info ${oozie_id}
#oozie job -oozie ${OOZIE_URL} -info 0000001-170206083712434-oozie-oozi-W
oozie job -oozie ${OOZIE_URL} -log ${oozie_id}
#oozie job -oozie ${OOZIE_URL} -log 0000001-170206083712434-oozie-oozi-W
五、distcp
job.properties
nameNode=hdfs://${sourceNameNode}:8020
destNameNode=hdfs://${destNameNode}:8020
jobTracker=${RM}:8032
#yarn.resourcemanager.address=${RM}:8032
queueName=default
examplesRoot=examples
oozie.use.system.libpath=true
oozie.wf.application.path=${nameNode}/user/${user.name}/${examplesRoot}/apps/distcp_2
outputDir=distcp
workflow.xml
workflow-app xmlns= uri:oozie:workflow:0.3 name= distcp-wf
start to= distcp-node /
action name= distcp-node
distcp xmlns= uri:oozie:distcp-action:0.1
job-tracker ${jobTracker} /job-tracker
name-node ${nameNode} /name-node
prepare
delete path= ${nameNode}/user/${wf:user()}/${examplesRoot}/output-data/${outputDir} /
/prepare
configuration
property
name mapred.job.queue.name /name
value ${queueName} /value
/property
/configuration
arg ${nameNode}/user/${wf:user()}/${examplesRoot}/input-data/text/data.txt /arg
arg ${destNameNode}/tmp/data.txt /arg
/distcp
ok to= end /
error to= fail /
/action
kill name= fail
message DistCP failed, error message[${wf:errorMessage(wf:lastErrorNode())}] /message
/kill
end name= end /
/workflow-app
到此,关于“Oozie-4.1.0 和 hadoop-2.7.1 怎么进行编译”的学习就结束了,希望能够解决大家的疑惑。理论与实践的搭配能更好的帮助大家学习,快去试试吧!若想继续学习更多相关知识,请继续关注丸趣 TV 网站,丸趣 TV 小编会继续努力为大家带来更多实用的文章!