-
Notifications
You must be signed in to change notification settings - Fork 3.3k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
HBASE-22802 Avoid temp ByteBuffer allocation in FileIOEngine#read #467
Changes from 1 commit
17e8e2b
a15735c
aa31394
a3bb400
12f1a3b
f28b01b
886799c
401c1ff
a842c83
de954ed
b8fa85a
2bb575b
6174987
02bb685
684a0f3
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -19,6 +19,7 @@ | |
|
||
import java.io.IOException; | ||
import java.nio.ByteBuffer; | ||
import java.nio.channels.FileChannel; | ||
import java.nio.channels.ReadableByteChannel; | ||
import java.util.List; | ||
|
||
|
@@ -450,6 +451,16 @@ public byte[] toBytes() { | |
*/ | ||
public abstract int read(ReadableByteChannel channel) throws IOException; | ||
|
||
/** | ||
* Reads bytes from FileChannel into this ByteBuff | ||
*/ | ||
public abstract int read(FileChannel channel, long offset) throws IOException; | ||
|
||
/** | ||
* Write this ByteBuff's data into target file | ||
*/ | ||
public abstract int write(FileChannel channel, long offset) throws IOException; | ||
|
||
// static helper methods | ||
public static int channelRead(ReadableByteChannel channel, ByteBuffer buf) throws IOException { | ||
if (buf.remaining() <= NIO_BUFFER_LIMIT) { | ||
|
@@ -475,6 +486,32 @@ public static int channelRead(ReadableByteChannel channel, ByteBuffer buf) throw | |
return (nBytes > 0) ? nBytes : ret; | ||
} | ||
|
||
public static int fileRead(FileChannel channel, ByteBuffer buf, long offset) | ||
throws IOException { | ||
if (buf.remaining() <= NIO_BUFFER_LIMIT) { | ||
return channel.read(buf, offset); | ||
} | ||
int originalLimit = buf.limit(); | ||
int initialRemaining = buf.remaining(); | ||
int ret = 0; | ||
|
||
while (buf.remaining() > 0) { | ||
try { | ||
int ioSize = Math.min(buf.remaining(), NIO_BUFFER_LIMIT); | ||
buf.limit(buf.position() + ioSize); | ||
offset += ret; | ||
ret = channel.read(buf, offset); | ||
if (ret < ioSize) { | ||
break; | ||
} | ||
} finally { | ||
buf.limit(originalLimit); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Only reset the limit ? should we also reset the position ? |
||
} | ||
} | ||
int nBytes = initialRemaining - buf.remaining(); | ||
return (nBytes > 0) ? nBytes : ret; | ||
} | ||
|
||
/** | ||
* Read integer from ByteBuff coded in 7 bits and increment position. | ||
* @return Read integer. | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -24,6 +24,7 @@ | |
import java.nio.BufferUnderflowException; | ||
import java.nio.ByteBuffer; | ||
import java.nio.InvalidMarkException; | ||
import java.nio.channels.FileChannel; | ||
import java.nio.channels.ReadableByteChannel; | ||
|
||
import org.apache.hadoop.hbase.io.ByteBuffAllocator.Recycler; | ||
|
@@ -1086,6 +1087,48 @@ public int read(ReadableByteChannel channel) throws IOException { | |
return total; | ||
} | ||
|
||
@Override | ||
public int read(FileChannel channel, long offset) throws IOException { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should also make the abstraction between MultiByteBuff#read and MultiByteBuff#write ? As said above. |
||
checkRefCount(); | ||
int total = 0; | ||
while (true) { | ||
int len = fileRead(channel, this.curItem, offset); | ||
if (len > 0) { | ||
total += len; | ||
offset += len; | ||
} | ||
if (this.curItem.hasRemaining()) { | ||
break; | ||
} else { | ||
if (this.curItemIndex >= this.limitedItemIndex) { | ||
break; | ||
} | ||
this.curItemIndex++; | ||
this.curItem = this.items[this.curItemIndex]; | ||
} | ||
} | ||
return total; | ||
} | ||
|
||
@Override | ||
public int write(FileChannel channel, long offset) throws IOException { | ||
checkRefCount(); | ||
int total = 0; | ||
while (true) { | ||
int len = channel.write(curItem, offset); | ||
if (len > 0) { | ||
total += len; | ||
offset += len; | ||
} | ||
if (this.curItemIndex >= this.limitedItemIndex) { | ||
break; | ||
} | ||
this.curItemIndex++; | ||
this.curItem = this.items[this.curItemIndex]; | ||
} | ||
return total; | ||
} | ||
|
||
@Override | ||
public ByteBuffer[] nioByteBuffers() { | ||
checkRefCount(); | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -429,7 +429,9 @@ private void cacheBlockWithWaitInternal(BlockCacheKey cacheKey, Cacheable cached | |
if (!cacheEnabled) { | ||
return; | ||
} | ||
LOG.trace("Caching key={}, item={}", cacheKey, cachedItem); | ||
if (LOG.isTraceEnabled()) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Don't need this if LOG.isTraceEnabled when using this logging form with the '{}' (Internally it does this test). There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Correct |
||
LOG.trace("Caching key={}, item={}", cacheKey, cachedItem); | ||
} | ||
// Stuff the entry into the RAM cache so it can get drained to the persistent store | ||
RAMQueueEntry re = | ||
new RAMQueueEntry(cacheKey, cachedItem, accessCount.incrementAndGet(), inMemory, | ||
|
@@ -502,8 +504,10 @@ public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat, | |
// block will use the refCnt of bucketEntry, which means if two HFileBlock mapping to | ||
// the same BucketEntry, then all of the three will share the same refCnt. | ||
Cacheable cachedBlock = ioEngine.read(bucketEntry); | ||
// RPC start to reference, so retain here. | ||
cachedBlock.retain(); | ||
if (ioEngine.usesSharedMemory()) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. One big concern here: now for exclusive memory IOEngine, the refCnt value of all bucketEntry will be 1, means the reference from BucketCache, no RPC reference. Then I think the BucketCache's eviction policy would always evict those blocks despite that the RPC is still using the block, not say the memory leak issue , but the eviction policy is evicting those RPC referring blocks (violate the LRU ? ).... There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The eviction policy will compare BucketEntry with it’s accessCounter, so this will not violate the LRU? |
||
// RPC start to reference, so retain here. | ||
cachedBlock.retain(); | ||
} | ||
// Update the cache statistics. | ||
if (updateCacheMetrics) { | ||
cacheStats.hit(caching, key.isPrimary(), key.getBlockType()); | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -80,7 +80,7 @@ class BucketEntry implements HBaseReferenceCounted { | |
*/ | ||
private final RefCnt refCnt; | ||
final AtomicBoolean markedAsEvicted; | ||
private final ByteBuffAllocator allocator; | ||
final ByteBuffAllocator allocator; | ||
|
||
/** | ||
* Time this block was cached. Presumes we are created just before we are added to the cache. | ||
|
@@ -194,7 +194,10 @@ boolean isRpcRef() { | |
} | ||
|
||
Cacheable wrapAsCacheable(ByteBuffer[] buffers) throws IOException { | ||
ByteBuff buf = ByteBuff.wrap(buffers, this.refCnt); | ||
return wrapAsCacheable(ByteBuff.wrap(buffers, this.refCnt)); | ||
} | ||
|
||
Cacheable wrapAsCacheable(ByteBuff buf) throws IOException { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. That's a good thing, make the wrapAsCacheable into two methods. the SharedIOEngine use the former one, and the ExclusiveIOEngine use the later one. Good. |
||
return this.deserializerReference().deserialize(buf, allocator); | ||
} | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Looks like we could make some abstraction between the existed channelRead(...) and the newly introduced fileRead (...) ? Similar with the ByteBufferArray#read & ByteBufferArray#write.. Please take a look.