Run Custom Hadoop Mapper/Combiner/Reducer

TODO CODE

	public static class CustomProcessingMapper {
		public String mapperClass; // The java classpath to the jobs mapper, it should be in the form of package.file$class 
		
		public String mapperKeyClass; // (If no reducer is specified, then this is mandatory) Allows you to use different mapper output classes than the reducer (key class name, should be fully specified)
		public String mapperValueClass; // (If no reducer is specified, then this is mandatory) Allows you to use different mapper output classes than the reducer (value class name, should be fully specified)
	}

	public static class CustomProcessingCombiner {
		public String combinerClass; // The java classpath to the jobs combiner, it should be in the form of package.file$class (use the reducer if you have not written a combiner or submit null). If not present, then only the mapper (or combiner) is run, and records with duplicate keys will overwrite each other in an arbitrary order. 
	}

	public static class CustomProcessingReducer {
		public String reducerClass; // The java classpath to the jobs reducer, it should be in the form of package.file$class
		public Integer numReducers; // Specifies the number of reducers to use (OPTIONAL default: 1)
		
		public String outputKeyClass; // The classpath for the map reduce output format key usually org.apache.hadoop.io.Text or com.mongodb.hadoop.io.BSONWritable
		public String outputValueClass; // The classpath for the map reduce output format key usually com.mongodb.hadoop.io.BSONWritable
	}