T
- public abstract class FileInputFormatRdfBase<T>
extends org.apache.hadoop.mapreduce.lib.input.FileInputFormat<org.apache.hadoop.io.LongWritable,T>
createRecordReaderActual(InputSplit, TaskAttemptContext)
.Modifier and Type | Field and Description |
---|---|
static String |
BASE_IRI_KEY |
protected org.apache.jena.riot.Lang |
lang
Input language
|
static long |
PARSED_PREFIXES_LENGTH_DEFAULT |
static String |
PREFIXES_KEY |
protected String |
prefixesLengthMaxKey |
Constructor and Description |
---|
FileInputFormatRdfBase(org.apache.jena.riot.Lang lang,
String prefixesLengthMaxKey) |
Modifier and Type | Method and Description |
---|---|
org.apache.hadoop.mapreduce.RecordReader<org.apache.hadoop.io.LongWritable,T> |
createRecordReader(org.apache.hadoop.mapreduce.InputSplit inputSplit,
org.apache.hadoop.mapreduce.TaskAttemptContext context) |
abstract org.apache.hadoop.mapreduce.RecordReader<org.apache.hadoop.io.LongWritable,T> |
createRecordReaderActual(org.apache.hadoop.mapreduce.InputSplit inputSplit,
org.apache.hadoop.mapreduce.TaskAttemptContext context) |
List<org.apache.hadoop.mapreduce.InputSplit> |
getSplits(org.apache.hadoop.mapreduce.JobContext job) |
boolean |
isSplitable(org.apache.hadoop.mapreduce.JobContext context,
org.apache.hadoop.fs.Path file) |
addInputPath, addInputPathRecursively, addInputPaths, computeSplitSize, getBlockIndex, getFormatMinSplitSize, getInputDirRecursive, getInputPathFilter, getInputPaths, getMaxSplitSize, getMinSplitSize, listStatus, makeSplit, makeSplit, setInputDirRecursive, setInputPathFilter, setInputPaths, setInputPaths, setMaxInputSplitSize, setMinInputSplitSize
public static final String PREFIXES_KEY
public static final String BASE_IRI_KEY
public static final long PARSED_PREFIXES_LENGTH_DEFAULT
protected org.apache.jena.riot.Lang lang
protected String prefixesLengthMaxKey
public FileInputFormatRdfBase(org.apache.jena.riot.Lang lang, String prefixesLengthMaxKey)
public boolean isSplitable(org.apache.hadoop.mapreduce.JobContext context, org.apache.hadoop.fs.Path file)
isSplitable
in class org.apache.hadoop.mapreduce.lib.input.FileInputFormat<org.apache.hadoop.io.LongWritable,T>
public final org.apache.hadoop.mapreduce.RecordReader<org.apache.hadoop.io.LongWritable,T> createRecordReader(org.apache.hadoop.mapreduce.InputSplit inputSplit, org.apache.hadoop.mapreduce.TaskAttemptContext context)
createRecordReader
in class org.apache.hadoop.mapreduce.InputFormat<org.apache.hadoop.io.LongWritable,T>
public abstract org.apache.hadoop.mapreduce.RecordReader<org.apache.hadoop.io.LongWritable,T> createRecordReaderActual(org.apache.hadoop.mapreduce.InputSplit inputSplit, org.apache.hadoop.mapreduce.TaskAttemptContext context)
public List<org.apache.hadoop.mapreduce.InputSplit> getSplits(org.apache.hadoop.mapreduce.JobContext job) throws IOException
getSplits
in class org.apache.hadoop.mapreduce.lib.input.FileInputFormat<org.apache.hadoop.io.LongWritable,T>
IOException
Copyright © 2016–2021 Smart Data Analytics (SDA) Research Group. All rights reserved.