Run Built-in/Custom Hadoop Module

TODO CODE

	public static class HadoopEngine {
		public String engineName; // A descriptive name for the engine
		public String mapperClass; // The java classpath to the jobs mapper, it should be in the form of package.file$class
		public String combinerClass; // The java classpath to the jobs combiner, it should be in the form of package.file$class (use the reducer if you have not written a combiner or submit null). If not present, then only the mapper (or combiner) is run, and records with duplicate keys will overwrite each other in an arbitrary order.
		public String reducerClass; // The java classpath to the jobs reducer, it should be in the form of package.file$class
		public Integer numReducers; // Specifies the number of reducers to use (OPTIONAL default: 1)
		public String mapperKeyClass; // Allows you to use different mapper output classes than the reducer (key class name, should be fully specified)
		public String mapperValueClass; // Allows you to use different mapper output classes than the reducer (value class name, should be fully specified)
		public String outputKeyClass; // The classpath for the map reduce output format key usually org.apache.hadoop.io.BSONWritable
		public String outputValueClass; // The classpath for the map reduce output format key usually org.apache.hadoop.io.BSONWritable
		public String configuration; // The configuration (can be JSON or any arbitrary string format depending on what the engine needs)
		public LinkedHashMap<String, String> configParams; // For JSON formats, enables seperate key/values to be inserted (currently needs to be string)
		public String mainJar; // Either a share _id, or a URL (admin only), or a string of the format ($infinite/share/get/{_id}; legacy) that points the JAR to use for the mapper/combiner/reducer classes
	}