0

Trying a Java MapReduce problem. When I compile the code with the following command, Im getting some errors, they are listed below .. please help me out. Thanks in advance

Source code

package cvkumar.hadoopmr;
import java.io.IOException;
import java.util.StrinTokenizer;
import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.conf.*;
 import org.apache.hadoop.io.*;
 import org.apache.hadoop.mapred.*;
 import org.apache.hadoop.util.*;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputForm;
import org.apache.hadoop.mapreduce.lib.output.FileOutFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class Dictionary 
{
    public static class WordMapper extends Mapper <Text, Text, Text, Text>
    {
        private Text word = new Text();
        public void map(Text key, Text value, Context context)
            throws IOException, InterruptedException
        {
            StringTokenizer itr = new StringTokenizer(value.toString(),",");
            while (itr.hasMoreTokens())
            {
                word.set(itr.nextToken());
                context.write(key,word);
            }
        }
    }
    public static class AllTranslationsReducer
        extends Reducer<Text,Text,Text,Text>
    {
            private Text result = new Text();
            public void reduce(Text key, Iterable<Text> values,Context context)
         throws IOException, InterruptedException
            {
                String translations = "";
                for (Text val : values)
                {
                    translations += "|"+val.toString();
                }
                result.set(translations);
                context.write(key, result);
            }
        }

    public static void main(String[] args) throws Exception
    {
            Configuration conf = new Configuration();
            Job job = new Job(conf, "dictionary");
            job.setJarByClass(Dictionary.class);
            job.setMapperClass(WordMapper.class);
            job.setReducerClass(AllTranslationsReducer.class);
            job.setOutputKeyClass(Text.class);
            job.setOutputValueClass(Text.class);
            job.setInputFormatClass(KeyValueTextInputFormat.class);
            //FileInputFormat.addInputPath(job, new Path("/tmp/hadoop-cscarioni/dfs/name/file"));
            //FileOutputFormat.setOutputPath(job, new Path("output"));
        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
            System.exit(job.waitForCompletion(true) ? 0 : 1);
        }
}

Errors

$javac -classpath hadoop-core-1.2.1.jar -d ./Dictionary ./cvkumar/hadoopmr/Dictionary.java

hadoop@hadoop-Vostro1310:~/hadoop-1.2.1$ javac -classpath hadoop-core-1.2.1.jar -d ./Dictionary ./cvkumar/hadoopmr/Dictionary.java 
./cvkumar/hadoopmr/Dictionary.java:3: cannot find symbol
symbol  : class StrinTokenizer
location: package java.util
import java.util.StrinTokenizer;
                ^
./cvkumar/hadoopmr/Dictionary.java:15: cannot find symbol
symbol  : class KeyValueTextInputForm
location: package org.apache.hadoop.mapreduce.lib.input
import org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputForm;
                                            ^
./cvkumar/hadoopmr/Dictionary.java:16: cannot find symbol
symbol  : class FileOutFormat
location: package org.apache.hadoop.mapreduce.lib.output
import org.apache.hadoop.mapreduce.lib.output.FileOutFormat;
                                             ^
./cvkumar/hadoopmr/Dictionary.java:27: cannot find symbol
symbol  : class StringTokenizer
location: class cvkumar.hadoopmr.Dictionary.WordMapper
            StringTokenizer itr = new StringTokenizer(value.toString(),",");
            ^
./cvkumar/hadoopmr/Dictionary.java:27: cannot find symbol
symbol  : class StringTokenizer
location: class cvkumar.hadoopmr.Dictionary.WordMapper
            StringTokenizer itr = new StringTokenizer(value.toString(),",");
                                      ^
./cvkumar/hadoopmr/Dictionary.java:61: setInputFormatClass(java.lang.Class<? extends org.apache.hadoop.mapreduce.InputFormat>) in org.apache.hadoop.mapreduce.Job cannot be applied to (java.lang.Class<org.apache.hadoop.mapred.KeyValueTextInputFormat>)
            job.setInputFormatClass(KeyValueTextInputFormat.class);
               ^
./cvkumar/hadoopmr/Dictionary.java:65: setOutputPath(org.apache.hadoop.mapred.JobConf,org.apache.hadoop.fs.Path) in org.apache.hadoop.mapred.FileOutputFormat cannot be applied to (org.apache.hadoop.mapreduce.Job,org.apache.hadoop.fs.Path)
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
                        ^
7 errors
2
  • import java.util.StringTokenizer; Commented Jan 21, 2014 at 7:07
  • swift, make sure your imports are correct. import org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputFormat; import java.util.StringTokenizer; Commented Jan 21, 2014 at 9:32

1 Answer 1

1

The answer is already there, by the java compiler. Change the following lines:

line 3:

import java.util.StringTokenizer;

line 15:

import org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputFormat;

line 16:

import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

Tip: If you are using an IDE like Eclipse or NetBeans, java compile errors should be already highlighted and showing you a tip on how to resolve them. If you are not using an IDE, I strongly suggest you to do so! Since you are writing MapReduce programs, I suggest Eclipse, for which you can find a hadoop plugin.

Sign up to request clarification or add additional context in comments.

Comments

Your Answer

By clicking “Post Your Answer”, you agree to our terms of service and acknowledge you have read our privacy policy.

Start asking to get answers

Find the answer to your question by asking.

Ask question

Explore related questions

See similar questions with these tags.