netty: add an internal option to disable native buffer (#6619)

This is needed for internal rollout where the native memory usage from netty makes task more prone to exceeding memory limits.
This commit is contained in:
Kun Zhang 2020-01-21 11:37:02 -08:00 committed by GitHub
parent e7d7c5bde4
commit 74cde7e8b4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 66 additions and 35 deletions

View File

@ -43,6 +43,10 @@ public final class InternalNettyServerBuilder {
builder.setTracingEnabled(value);
}
public static void setForceHeapBuffer(NettyServerBuilder builder, boolean value) {
builder.setForceHeapBuffer(value);
}
/**
* Sets {@link io.grpc.Channel} and {@link io.netty.channel.EventLoopGroup}s to Nio. A major
* benefit over using existing setters is gRPC will manage the life cycle of {@link

View File

@ -226,7 +226,7 @@ class NettyClientTransport implements ConnectionClientTransport {
ChannelHandler negotiationHandler = negotiator.newHandler(handler);
Bootstrap b = new Bootstrap();
b.option(ALLOCATOR, Utils.getByteBufAllocator());
b.option(ALLOCATOR, Utils.getByteBufAllocator(false));
b.attr(LOGGER_KEY, channelLogger);
b.group(eventLoop);
b.channelFactory(channelFactory);

View File

@ -73,6 +73,7 @@ class NettyServer implements InternalServer, InternalWithLogId {
private final int maxStreamsPerConnection;
private final ObjectPool<? extends EventLoopGroup> bossGroupPool;
private final ObjectPool<? extends EventLoopGroup> workerGroupPool;
private final boolean forceHeapBuffer;
private EventLoopGroup bossGroup;
private EventLoopGroup workerGroup;
private ServerListener listener;
@ -100,6 +101,7 @@ class NettyServer implements InternalServer, InternalWithLogId {
Map<ChannelOption<?>, ?> channelOptions,
ObjectPool<? extends EventLoopGroup> bossGroupPool,
ObjectPool<? extends EventLoopGroup> workerGroupPool,
boolean forceHeapBuffer,
ProtocolNegotiator protocolNegotiator,
List<? extends ServerStreamTracer.Factory> streamTracerFactories,
TransportTracer.Factory transportTracerFactory,
@ -115,6 +117,7 @@ class NettyServer implements InternalServer, InternalWithLogId {
this.channelOptions = new HashMap<ChannelOption<?>, Object>(channelOptions);
this.bossGroupPool = checkNotNull(bossGroupPool, "bossGroupPool");
this.workerGroupPool = checkNotNull(workerGroupPool, "workerGroupPool");
this.forceHeapBuffer = forceHeapBuffer;
this.bossGroup = bossGroupPool.getObject();
this.workerGroup = workerGroupPool.getObject();
this.protocolNegotiator = checkNotNull(protocolNegotiator, "protocolNegotiator");
@ -155,8 +158,8 @@ class NettyServer implements InternalServer, InternalWithLogId {
listener = checkNotNull(serverListener, "serverListener");
ServerBootstrap b = new ServerBootstrap();
b.option(ALLOCATOR, Utils.getByteBufAllocator());
b.childOption(ALLOCATOR, Utils.getByteBufAllocator());
b.option(ALLOCATOR, Utils.getByteBufAllocator(forceHeapBuffer));
b.childOption(ALLOCATOR, Utils.getByteBufAllocator(forceHeapBuffer));
b.group(bossGroup, workerGroup);
b.channelFactory(channelFactory);
// For non-socket based channel, the option will be ignored.

View File

@ -89,6 +89,7 @@ public final class NettyServerBuilder extends AbstractServerImplBuilder<NettySer
DEFAULT_BOSS_EVENT_LOOP_GROUP_POOL;
private ObjectPool<? extends EventLoopGroup> workerEventLoopGroupPool =
DEFAULT_WORKER_EVENT_LOOP_GROUP_POOL;
private boolean forceHeapBuffer;
private SslContext sslContext;
private ProtocolNegotiator protocolNegotiator;
private int maxConcurrentCallsPerConnection = Integer.MAX_VALUE;
@ -268,6 +269,13 @@ public final class NettyServerBuilder extends AbstractServerImplBuilder<NettySer
return this;
}
/**
* Force using heap buffer when custom allocator is enabled.
*/
void setForceHeapBuffer(boolean value) {
forceHeapBuffer = value;
}
/**
* Sets the TLS context to use for encryption. Providing a context enables encryption. It must
* have been configured with {@link GrpcSslContexts}, but options could have been overridden.
@ -542,7 +550,7 @@ public final class NettyServerBuilder extends AbstractServerImplBuilder<NettySer
for (SocketAddress listenAddress : listenAddresses) {
NettyServer transportServer = new NettyServer(
listenAddress, channelFactory, channelOptions, bossEventLoopGroupPool,
workerEventLoopGroupPool, negotiator, streamTracerFactories,
workerEventLoopGroupPool, forceHeapBuffer, negotiator, streamTracerFactories,
getTransportTracerFactory(), maxConcurrentCallsPerConnection, flowControlWindow,
maxMessageSize, maxHeaderListSize, keepAliveTimeInNanos, keepAliveTimeoutInNanos,
maxConnectionIdleInNanos, maxConnectionAgeInNanos, maxConnectionAgeGraceInNanos,

View File

@ -87,35 +87,13 @@ class Utils {
public static final Resource<EventLoopGroup> DEFAULT_WORKER_EVENT_LOOP_GROUP;
// This class is initialized on first use, thus provides delayed allocator creation.
private static final class ByteBufAllocatorHolder {
private static final ByteBufAllocator allocator;
private static final class ByteBufAllocatorPreferDirectHolder {
private static final ByteBufAllocator allocator = createByteBufAllocator(true);
}
static {
if (Boolean.parseBoolean(
System.getProperty("io.grpc.netty.useCustomAllocator", "true"))) {
int maxOrder;
if (System.getProperty("io.netty.allocator.maxOrder") == null) {
// See the implementation of PooledByteBufAllocator. DEFAULT_MAX_ORDER in there is
// 11, which makes chunk size to be 8192 << 11 = 16 MiB. We want the chunk size to be
// 2MiB, thus reducing the maxOrder to 8.
maxOrder = 8;
} else {
maxOrder = PooledByteBufAllocator.defaultMaxOrder();
}
allocator = new PooledByteBufAllocator(
PooledByteBufAllocator.defaultPreferDirect(),
PooledByteBufAllocator.defaultNumHeapArena(),
PooledByteBufAllocator.defaultNumDirectArena(),
PooledByteBufAllocator.defaultPageSize(),
maxOrder,
PooledByteBufAllocator.defaultTinyCacheSize(),
PooledByteBufAllocator.defaultSmallCacheSize(),
PooledByteBufAllocator.defaultNormalCacheSize(),
PooledByteBufAllocator.defaultUseCacheForAllThreads());
} else {
allocator = ByteBufAllocator.DEFAULT;
}
}
// This class is initialized on first use, thus provides delayed allocator creation.
private static final class ByteBufAllocatorPreferHeapHolder {
private static final ByteBufAllocator allocator = createByteBufAllocator(false);
}
public static final ChannelFactory<? extends ServerChannel> DEFAULT_SERVER_CHANNEL_FACTORY;
@ -144,8 +122,42 @@ class Utils {
}
}
public static ByteBufAllocator getByteBufAllocator() {
return ByteBufAllocatorHolder.allocator;
public static ByteBufAllocator getByteBufAllocator(boolean forceHeapBuffer) {
if (Boolean.parseBoolean(
System.getProperty("io.grpc.netty.useCustomAllocator", "true"))) {
if (forceHeapBuffer || !PooledByteBufAllocator.defaultPreferDirect()) {
return ByteBufAllocatorPreferHeapHolder.allocator;
} else {
return ByteBufAllocatorPreferDirectHolder.allocator;
}
} else {
return ByteBufAllocator.DEFAULT;
}
}
private static ByteBufAllocator createByteBufAllocator(boolean preferDirect) {
int maxOrder;
if (System.getProperty("io.netty.allocator.maxOrder") == null) {
// See the implementation of PooledByteBufAllocator. DEFAULT_MAX_ORDER in there is
// 11, which makes chunk size to be 8192 << 11 = 16 MiB. We want the chunk size to be
// 2MiB, thus reducing the maxOrder to 8.
maxOrder = 8;
} else {
maxOrder = PooledByteBufAllocator.defaultMaxOrder();
}
return new PooledByteBufAllocator(
preferDirect,
PooledByteBufAllocator.defaultNumHeapArena(),
// Assuming neither gRPC nor netty are using allocator.directBuffer() to request
// specifically for direct buffers, which is true as I just checked, setting arenas to 0
// will make sure no direct buffer is ever created.
preferDirect ? PooledByteBufAllocator.defaultNumDirectArena() : 0,
PooledByteBufAllocator.defaultPageSize(),
maxOrder,
PooledByteBufAllocator.defaultTinyCacheSize(),
PooledByteBufAllocator.defaultSmallCacheSize(),
PooledByteBufAllocator.defaultNormalCacheSize(),
PooledByteBufAllocator.defaultUseCacheForAllThreads());
}
public static Metadata convertHeaders(Http2Headers http2Headers) {

View File

@ -769,7 +769,7 @@ public class NettyClientTransportTest {
TestUtils.testServerAddress(new InetSocketAddress(0)),
new ReflectiveChannelFactory<>(NioServerSocketChannel.class),
new HashMap<ChannelOption<?>, Object>(),
new FixedObjectPool<>(group), new FixedObjectPool<>(group), negotiator,
new FixedObjectPool<>(group), new FixedObjectPool<>(group), false, negotiator,
Collections.<ServerStreamTracer.Factory>emptyList(),
TransportTracer.getDefaultFactory(),
maxStreamsPerConnection,

View File

@ -94,6 +94,7 @@ public class NettyServerTest {
new HashMap<ChannelOption<?>, Object>(),
new FixedObjectPool<>(eventLoop),
new FixedObjectPool<>(eventLoop),
false,
protocolNegotiator,
Collections.<ServerStreamTracer.Factory>emptyList(),
TransportTracer.getDefaultFactory(),
@ -138,6 +139,7 @@ public class NettyServerTest {
new HashMap<ChannelOption<?>, Object>(),
new FixedObjectPool<>(eventLoop),
new FixedObjectPool<>(eventLoop),
false,
ProtocolNegotiators.plaintext(),
Collections.<ServerStreamTracer.Factory>emptyList(),
TransportTracer.getDefaultFactory(),
@ -176,6 +178,7 @@ public class NettyServerTest {
channelOptions,
new FixedObjectPool<>(eventLoop),
new FixedObjectPool<>(eventLoop),
false,
ProtocolNegotiators.plaintext(),
Collections.<ServerStreamTracer.Factory>emptyList(),
TransportTracer.getDefaultFactory(),
@ -226,6 +229,7 @@ public class NettyServerTest {
new HashMap<ChannelOption<?>, Object>(),
new FixedObjectPool<>(eventLoop),
new FixedObjectPool<>(eventLoop),
false,
ProtocolNegotiators.plaintext(),
Collections.<ServerStreamTracer.Factory>emptyList(),
TransportTracer.getDefaultFactory(),