hadoop怎么合并sequcefie并在map中读取
本篇内容介绍了“hadoop怎么合并sequcefie并在map中读取”的有关知识,在实际案例的操作过程中,不少人都会遇到这样的困境,接下来就让小编带领大家学习一下如何处理这些情况吧!希望大家仔细阅读,能够学有所成!
梅河口网站制作公司哪家好,找创新互联!从网页设计、网站建设、微信开发、APP开发、响应式网站建设等网站项目制作,到程序开发,运营维护。创新互联于2013年成立到现在10年的时间,我们拥有了丰富的建站经验和运维经验,来保证我们的工作的顺利进行。专注于网站建设就选创新互联。
package hgs.sequencefile; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.Text; //合并小文件 public class SequenceMain { public static void main(String[] args) throws IOException, URISyntaxException { Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(new URI("hdfs://192.168.6.129:9000"),conf); //获得该文件夹下的所有的文件 FileStatus[] fstats = fs.listStatus(new Path("/words")); //System.out.println(fstats.length); Text key = new Text(); Text value = new Text(); @SuppressWarnings("deprecation") //创建一个sequecewriter //merge.seq是文件名 SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, new Path("/sequence/merge.seq"), key.getClass(), value.getClass()); //循环遍历每个文件 for(FileStatus fis : fstats) { //将每个文件以key value的形式写入到sequencefile中 FSDataInputStream finput = fs.open(fis.getPath()); byte[] buffer = new byte[(int)fis.getLen()]; IOUtils.readFully(finput, buffer, 0, buffer.length); //文件名为key 文件内容为value key.set(fis.getPath().getName()); value.set(buffer); writer.append(key, value); finput.close(); } writer.close(); fs.close(); } }
package hgs.sequencefile; import java.io.IOException; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper; public class SequnceMapper extends Mapper{ @Override protected void map(Text key, Text value, Mapper .Context context) throws IOException, InterruptedException { context.write(key, value); } }
package hgs.sequencefile; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.SequenceFileOutputFormat; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.input.SequenceFileAsTextInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; public class SequenceDriver { public static void main(String[] args) throws Exception { Configuration conf = new Configuration(); Job job = Job.getInstance(conf, "read_sequence_file"); job.setJarByClass(hgs.sequencefile.SequenceDriver.class); // TODO: specify a mapper job.setMapperClass(SequnceMapper.class); // TODO: specify a reducer //job.setReducerClass(Reducer.class); // TODO: specify output types job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class); //在这个设置读取sequencefile的inputformat,该类读取的是String泪习惯的key value //SequenceFileAsBinaryInputFormat 该类独处的ByteWritable的key value job.setInputFormatClass(SequenceFileAsTextInputFormat.class); // TODO: specify input and output DIRECTORIES (not files) FileInputFormat.setInputPaths(job, new Path("hdfs://192.168.6.129:9000/sequence")); FileOutputFormat.setOutputPath(job, new Path("hdfs://192.168.6.129:9000/seqresult")); if (!job.waitForCompletion(true)) return; } }
“hadoop怎么合并sequcefie并在map中读取”的内容就介绍到这里了,感谢大家的阅读。如果想了解更多行业相关的知识可以关注创新互联网站,小编将为大家输出更多高质量的实用文章!
当前题目:hadoop怎么合并sequcefie并在map中读取
本文来源:http://scyanting.com/article/gdsccd.html