Skip to content

Commit

Permalink
Optimize scan scans, use parallel scans based on the number of regins
Browse files Browse the repository at this point in the history
  • Loading branch information
ideal committed Sep 30, 2018
1 parent c491cc1 commit d104f0c
Showing 1 changed file with 11 additions and 1 deletion.
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,6 @@ public HbaseClient(
* @param table Table Name
* @param rowIdDomain Domain for the row ID
* @param constraints Column constraints for the query
* // * @param serializer Instance of a row serializer
* @return List of TabletSplitMetadata objects for Presto
*/
public List<TabletSplitMetadata> getTabletSplits(
Expand Down Expand Up @@ -154,6 +153,17 @@ public List<TabletSplitMetadata> getTabletSplits(
}
}

/**
* Exec the HbaseSplit for a query against an Hbase table.
* <p>
* Does a whole bunch of fun stuff! Splitting on row ID ranges, applying secondary indexes, column pruning,
* all sorts of sweet optimizations. What you have here is an important method.
*
* @param session Current session
* @param split HbaseSplit
* @param columnHandles List of HbaseColumnHandle
* @return RecordReader<ImmutableBytesWritable , Result> for {@link org.apache.hadoop.mapreduce.RecordReader}
*/
public RecordReader<ImmutableBytesWritable, Result> execSplit(ConnectorSession session, HbaseSplit split, List<HbaseColumnHandle> columnHandles)
throws IllegalAccessException, NoSuchFieldException, IOException, InterruptedException
{
Expand Down

0 comments on commit d104f0c

Please sign in to comment.