/
AbstractBenchmark.java
560 lines (507 loc) · 20.4 KB
/
AbstractBenchmark.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
/*
* Copyright 2015 The gRPC Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.grpc.benchmarks.netty;
import io.grpc.CallOptions;
import io.grpc.ClientCall;
import io.grpc.InsecureServerCredentials;
import io.grpc.ManagedChannel;
import io.grpc.Metadata;
import io.grpc.MethodDescriptor;
import io.grpc.MethodDescriptor.MethodType;
import io.grpc.Server;
import io.grpc.ServerCall;
import io.grpc.ServerCallHandler;
import io.grpc.ServerCredentials;
import io.grpc.ServerServiceDefinition;
import io.grpc.ServiceDescriptor;
import io.grpc.Status;
import io.grpc.benchmarks.ByteBufOutputMarshaller;
import io.grpc.netty.NegotiationType;
import io.grpc.netty.NettyChannelBuilder;
import io.grpc.netty.NettyServerBuilder;
import io.grpc.stub.ClientCalls;
import io.grpc.stub.StreamObserver;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.PooledByteBufAllocator;
import io.netty.channel.local.LocalAddress;
import io.netty.channel.local.LocalChannel;
import io.netty.channel.local.LocalServerChannel;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.channel.socket.nio.NioSocketChannel;
import io.netty.util.concurrent.DefaultThreadFactory;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.NetworkInterface;
import java.net.ServerSocket;
import java.net.SocketAddress;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.util.Enumeration;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Abstract base class for Netty end-to-end benchmarks.
*/
public abstract class AbstractBenchmark {
private static final Logger logger = Logger.getLogger(AbstractBenchmark.class.getName());
/**
* Standard message sizes.
*/
public enum MessageSize {
// Max out at 1MB to avoid creating messages larger than Netty's buffer pool can handle
// by default
SMALL(10), MEDIUM(1024), LARGE(65536), JUMBO(1048576);
private final int bytes;
MessageSize(int bytes) {
this.bytes = bytes;
}
public int bytes() {
return bytes;
}
}
/**
* Standard flow-control window sizes.
*/
public enum FlowWindowSize {
SMALL(16383), MEDIUM(65535), LARGE(1048575), JUMBO(8388607);
private final int bytes;
FlowWindowSize(int bytes) {
this.bytes = bytes;
}
public int bytes() {
return bytes;
}
}
/**
* Executor types used by Channel & Server.
*/
public enum ExecutorType {
DEFAULT, DIRECT;
}
/**
* Support channel types.
*/
public enum ChannelType {
NIO, LOCAL;
}
private static final CallOptions CALL_OPTIONS = CallOptions.DEFAULT;
private static final InetAddress BENCHMARK_ADDR = buildBenchmarkAddr();
/**
* Resolve the address bound to the benchmark interface. Currently we assume it's a
* child interface of the loopback interface with the term 'benchmark' in its name.
*
* <p>>This allows traffic shaping to be applied to an IP address and to have the benchmarks
* detect it's presence and use it. E.g for Linux we can apply netem to a specific IP to
* do traffic shaping, bind that IP to the loopback adapter and then apply a label to that
* binding so that it appears as a child interface.
*
* <pre>
* sudo tc qdisc del dev lo root
* sudo tc qdisc add dev lo root handle 1: prio
* sudo tc qdisc add dev lo parent 1:1 handle 2: netem delay 0.1ms rate 10gbit
* sudo tc filter add dev lo parent 1:0 protocol ip prio 1 \
* u32 match ip dst 127.127.127.127 flowid 2:1
* sudo ip addr add dev lo 127.127.127.127/32 label lo:benchmark
* </pre>
*/
@SuppressWarnings("JdkObsolete") // No choice but to use Enumeration
private static InetAddress buildBenchmarkAddr() {
InetAddress tmp = null;
try {
Enumeration<NetworkInterface> networkInterfaces = NetworkInterface.getNetworkInterfaces();
outer: while (networkInterfaces.hasMoreElements()) {
NetworkInterface networkInterface = networkInterfaces.nextElement();
if (!networkInterface.isLoopback()) {
continue;
}
Enumeration<NetworkInterface> subInterfaces = networkInterface.getSubInterfaces();
while (subInterfaces.hasMoreElements()) {
NetworkInterface subLoopback = subInterfaces.nextElement();
if (subLoopback.getDisplayName().contains("benchmark")) {
tmp = subLoopback.getInetAddresses().nextElement();
System.out.println("\nResolved benchmark address to " + tmp + " on "
+ subLoopback.getDisplayName() + "\n\n");
break outer;
}
}
}
} catch (SocketException se) {
System.out.println("\nWARNING: Error trying to resolve benchmark interface \n" + se);
}
if (tmp == null) {
try {
System.out.println(
"\nWARNING: Unable to resolve benchmark interface, defaulting to localhost");
tmp = InetAddress.getLocalHost();
} catch (UnknownHostException uhe) {
throw new RuntimeException(uhe);
}
}
return tmp;
}
protected Server server;
protected ByteBuf request;
protected ByteBuf response;
protected MethodDescriptor<ByteBuf, ByteBuf> unaryMethod;
private MethodDescriptor<ByteBuf, ByteBuf> pingPongMethod;
private MethodDescriptor<ByteBuf, ByteBuf> flowControlledStreaming;
protected ManagedChannel[] channels;
protected AbstractBenchmark() {
}
/**
* Initialize the environment for the executor.
*/
public void setup(ExecutorType clientExecutor,
ExecutorType serverExecutor,
MessageSize requestSize,
MessageSize responseSize,
FlowWindowSize windowSize,
ChannelType channelType,
int maxConcurrentStreams,
int channelCount) throws Exception {
ServerCredentials serverCreds = InsecureServerCredentials.create();
NettyServerBuilder serverBuilder;
NettyChannelBuilder channelBuilder;
if (channelType == ChannelType.LOCAL) {
LocalAddress address = new LocalAddress("netty-e2e-benchmark");
serverBuilder = NettyServerBuilder.forAddress(address, serverCreds);
serverBuilder.channelType(LocalServerChannel.class);
channelBuilder = NettyChannelBuilder.forAddress(address);
channelBuilder.channelType(LocalChannel.class);
} else {
ServerSocket sock = new ServerSocket();
// Pick a port using an ephemeral socket.
sock.bind(new InetSocketAddress(BENCHMARK_ADDR, 0));
SocketAddress address = sock.getLocalSocketAddress();
sock.close();
serverBuilder = NettyServerBuilder.forAddress(address, serverCreds)
.channelType(NioServerSocketChannel.class);
channelBuilder = NettyChannelBuilder.forAddress(address).channelType(NioSocketChannel.class);
}
if (serverExecutor == ExecutorType.DIRECT) {
serverBuilder.directExecutor();
}
if (clientExecutor == ExecutorType.DIRECT) {
channelBuilder.directExecutor();
}
// Always use a different worker group from the client.
ThreadFactory serverThreadFactory = new DefaultThreadFactory("STF pool", true /* daemon */);
serverBuilder.workerEventLoopGroup(new NioEventLoopGroup(0, serverThreadFactory));
serverBuilder.bossEventLoopGroup(new NioEventLoopGroup(1, serverThreadFactory));
// Always set connection and stream window size to same value
serverBuilder.flowControlWindow(windowSize.bytes());
channelBuilder.flowControlWindow(windowSize.bytes());
channelBuilder.negotiationType(NegotiationType.PLAINTEXT);
serverBuilder.maxConcurrentCallsPerConnection(maxConcurrentStreams);
// Create buffers of the desired size for requests and responses.
PooledByteBufAllocator alloc = PooledByteBufAllocator.DEFAULT;
// Use a heap buffer for now, since MessageFramer doesn't know how to directly convert this
// into a WritableBuffer
// TODO(carl-mastrangelo): convert this into a regular buffer() call. See
// https://github.com/grpc/grpc-java/issues/2062#issuecomment-234646216
request = alloc.heapBuffer(requestSize.bytes());
request.writerIndex(request.capacity() - 1);
response = alloc.heapBuffer(responseSize.bytes());
response.writerIndex(response.capacity() - 1);
// Simple method that sends and receives NettyByteBuf
unaryMethod = MethodDescriptor.<ByteBuf, ByteBuf>newBuilder()
.setType(MethodType.UNARY)
.setFullMethodName("benchmark/unary")
.setRequestMarshaller(new ByteBufOutputMarshaller())
.setResponseMarshaller(new ByteBufOutputMarshaller())
.build();
pingPongMethod = unaryMethod.toBuilder()
.setType(MethodType.BIDI_STREAMING)
.setFullMethodName("benchmark/pingPong")
.build();
flowControlledStreaming = pingPongMethod.toBuilder()
.setFullMethodName("benchmark/flowControlledStreaming")
.build();
// Server implementation of unary & streaming methods
serverBuilder.addService(
ServerServiceDefinition.builder(
new ServiceDescriptor("benchmark",
unaryMethod,
pingPongMethod,
flowControlledStreaming))
.addMethod(unaryMethod, new ServerCallHandler<ByteBuf, ByteBuf>() {
@Override
public ServerCall.Listener<ByteBuf> startCall(
final ServerCall<ByteBuf, ByteBuf> call,
Metadata headers) {
call.sendHeaders(new Metadata());
call.request(1);
return new ServerCall.Listener<ByteBuf>() {
@Override
public void onMessage(ByteBuf message) {
// no-op
message.release();
call.sendMessage(response.slice());
}
@Override
public void onHalfClose() {
call.close(Status.OK, new Metadata());
}
@Override
public void onCancel() {
}
@Override
public void onComplete() {
}
};
}
})
.addMethod(pingPongMethod, new ServerCallHandler<ByteBuf, ByteBuf>() {
@Override
public ServerCall.Listener<ByteBuf> startCall(
final ServerCall<ByteBuf, ByteBuf> call,
Metadata headers) {
call.sendHeaders(new Metadata());
call.request(1);
return new ServerCall.Listener<ByteBuf>() {
@Override
public void onMessage(ByteBuf message) {
message.release();
call.sendMessage(response.slice());
// Request next message
call.request(1);
}
@Override
public void onHalfClose() {
call.close(Status.OK, new Metadata());
}
@Override
public void onCancel() {
}
@Override
public void onComplete() {
}
};
}
})
.addMethod(flowControlledStreaming, new ServerCallHandler<ByteBuf, ByteBuf>() {
@Override
public ServerCall.Listener<ByteBuf> startCall(
final ServerCall<ByteBuf, ByteBuf> call,
Metadata headers) {
call.sendHeaders(new Metadata());
call.request(1);
return new ServerCall.Listener<ByteBuf>() {
@Override
public void onMessage(ByteBuf message) {
message.release();
while (call.isReady()) {
call.sendMessage(response.slice());
}
// Request next message
call.request(1);
}
@Override
public void onHalfClose() {
call.close(Status.OK, new Metadata());
}
@Override
public void onCancel() {
}
@Override
public void onComplete() {
}
@Override
public void onReady() {
while (call.isReady()) {
call.sendMessage(response.slice());
}
}
};
}
})
.build());
// Build and start the clients and servers
server = serverBuilder.build();
server.start();
channels = new ManagedChannel[channelCount];
ThreadFactory clientThreadFactory = new DefaultThreadFactory("CTF pool", true /* daemon */);
for (int i = 0; i < channelCount; i++) {
// Use a dedicated event-loop for each channel
channels[i] = channelBuilder
.eventLoopGroup(new NioEventLoopGroup(1, clientThreadFactory))
.build();
}
}
/**
* Start a continuously executing set of unary calls that will terminate when
* {@code done.get()} is true. Each completed call will increment the counter by the specified
* delta which benchmarks can use to measure QPS or bandwidth.
*/
protected void startUnaryCalls(int callsPerChannel,
final AtomicLong counter,
final AtomicBoolean done,
final long counterDelta) {
for (final ManagedChannel channel : channels) {
for (int i = 0; i < callsPerChannel; i++) {
StreamObserver<ByteBuf> observer = new StreamObserver<ByteBuf>() {
@Override
public void onNext(ByteBuf value) {
counter.addAndGet(counterDelta);
}
@Override
public void onError(Throwable t) {
done.set(true);
}
@Override
public void onCompleted() {
if (!done.get()) {
ByteBuf slice = request.slice();
ClientCalls.asyncUnaryCall(
channel.newCall(unaryMethod, CALL_OPTIONS), slice, this);
}
}
};
observer.onCompleted();
}
}
}
/**
* Start a continuously executing set of duplex streaming ping-pong calls that will terminate when
* {@code done.get()} is true. Each completed call will increment the counter by the specified
* delta which benchmarks can use to measure messages per second or bandwidth.
*/
protected CountDownLatch startStreamingCalls(int callsPerChannel, final AtomicLong counter,
final AtomicBoolean record, final AtomicBoolean done, final long counterDelta) {
final CountDownLatch latch = new CountDownLatch(callsPerChannel * channels.length);
for (final ManagedChannel channel : channels) {
for (int i = 0; i < callsPerChannel; i++) {
final ClientCall<ByteBuf, ByteBuf> streamingCall =
channel.newCall(pingPongMethod, CALL_OPTIONS);
final AtomicReference<StreamObserver<ByteBuf>> requestObserverRef =
new AtomicReference<>();
final AtomicBoolean ignoreMessages = new AtomicBoolean();
StreamObserver<ByteBuf> requestObserver = ClientCalls.asyncBidiStreamingCall(
streamingCall,
new StreamObserver<ByteBuf>() {
@Override
public void onNext(ByteBuf value) {
if (done.get()) {
if (!ignoreMessages.getAndSet(true)) {
requestObserverRef.get().onCompleted();
}
return;
}
requestObserverRef.get().onNext(request.slice());
if (record.get()) {
counter.addAndGet(counterDelta);
}
// request is called automatically because the observer implicitly has auto
// inbound flow control
}
@Override
public void onError(Throwable t) {
logger.log(Level.WARNING, "call error", t);
latch.countDown();
}
@Override
public void onCompleted() {
latch.countDown();
}
});
requestObserverRef.set(requestObserver);
requestObserver.onNext(request.slice());
requestObserver.onNext(request.slice());
}
}
return latch;
}
/**
* Start a continuously executing set of duplex streaming ping-pong calls that will terminate when
* {@code done.get()} is true. Each completed call will increment the counter by the specified
* delta which benchmarks can use to measure messages per second or bandwidth.
*/
protected CountDownLatch startFlowControlledStreamingCalls(int callsPerChannel,
final AtomicLong counter, final AtomicBoolean record, final AtomicBoolean done,
final long counterDelta) {
final CountDownLatch latch = new CountDownLatch(callsPerChannel * channels.length);
for (final ManagedChannel channel : channels) {
for (int i = 0; i < callsPerChannel; i++) {
final ClientCall<ByteBuf, ByteBuf> streamingCall =
channel.newCall(flowControlledStreaming, CALL_OPTIONS);
final AtomicReference<StreamObserver<ByteBuf>> requestObserverRef =
new AtomicReference<>();
final AtomicBoolean ignoreMessages = new AtomicBoolean();
StreamObserver<ByteBuf> requestObserver = ClientCalls.asyncBidiStreamingCall(
streamingCall,
new StreamObserver<ByteBuf>() {
@Override
public void onNext(ByteBuf value) {
StreamObserver<ByteBuf> obs = requestObserverRef.get();
if (done.get()) {
if (!ignoreMessages.getAndSet(true)) {
obs.onCompleted();
}
return;
}
if (record.get()) {
counter.addAndGet(counterDelta);
}
// request is called automatically because the observer implicitly has auto
// inbound flow control
}
@Override
public void onError(Throwable t) {
logger.log(Level.WARNING, "call error", t);
latch.countDown();
}
@Override
public void onCompleted() {
latch.countDown();
}
});
requestObserverRef.set(requestObserver);
// Add some outstanding requests to ensure the server is filling the connection
streamingCall.request(5);
requestObserver.onNext(request.slice());
}
}
return latch;
}
/**
* Shutdown all the client channels and then shutdown the server.
*/
protected void teardown() throws Exception {
logger.fine("shutting down channels");
for (ManagedChannel channel : channels) {
channel.shutdown();
}
logger.fine("shutting down server");
server.shutdown();
if (!server.awaitTermination(5, TimeUnit.SECONDS)) {
logger.warning("Failed to shutdown server");
}
logger.fine("server shut down");
for (ManagedChannel channel : channels) {
if (!channel.awaitTermination(1, TimeUnit.SECONDS)) {
logger.warning("Failed to shutdown client");
}
}
logger.fine("channels shut down");
}
}