benchmark service: sync protos with c-core 070a8ee (#3108)

Sync our protos with the definitions in c-core as of 070a8ee.
After this, I will add the workloads present in C that are missing in Java.
This commit is contained in:
zpencer 2017-06-19 13:25:33 -07:00 committed by GitHub
parent 2b1eee90e5
commit c3269f296f
10 changed files with 12895 additions and 2640 deletions

View File

@ -51,6 +51,42 @@ public final class BenchmarkServiceGrpc {
.setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
io.grpc.benchmarks.proto.Messages.SimpleResponse.getDefaultInstance()))
.build();
@io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901")
public static final io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Messages.SimpleRequest,
io.grpc.benchmarks.proto.Messages.SimpleResponse> METHOD_STREAMING_FROM_CLIENT =
io.grpc.MethodDescriptor.<io.grpc.benchmarks.proto.Messages.SimpleRequest, io.grpc.benchmarks.proto.Messages.SimpleResponse>newBuilder()
.setType(io.grpc.MethodDescriptor.MethodType.CLIENT_STREAMING)
.setFullMethodName(generateFullMethodName(
"grpc.testing.BenchmarkService", "StreamingFromClient"))
.setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
io.grpc.benchmarks.proto.Messages.SimpleRequest.getDefaultInstance()))
.setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
io.grpc.benchmarks.proto.Messages.SimpleResponse.getDefaultInstance()))
.build();
@io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901")
public static final io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Messages.SimpleRequest,
io.grpc.benchmarks.proto.Messages.SimpleResponse> METHOD_STREAMING_FROM_SERVER =
io.grpc.MethodDescriptor.<io.grpc.benchmarks.proto.Messages.SimpleRequest, io.grpc.benchmarks.proto.Messages.SimpleResponse>newBuilder()
.setType(io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING)
.setFullMethodName(generateFullMethodName(
"grpc.testing.BenchmarkService", "StreamingFromServer"))
.setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
io.grpc.benchmarks.proto.Messages.SimpleRequest.getDefaultInstance()))
.setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
io.grpc.benchmarks.proto.Messages.SimpleResponse.getDefaultInstance()))
.build();
@io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901")
public static final io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Messages.SimpleRequest,
io.grpc.benchmarks.proto.Messages.SimpleResponse> METHOD_STREAMING_BOTH_WAYS =
io.grpc.MethodDescriptor.<io.grpc.benchmarks.proto.Messages.SimpleRequest, io.grpc.benchmarks.proto.Messages.SimpleResponse>newBuilder()
.setType(io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING)
.setFullMethodName(generateFullMethodName(
"grpc.testing.BenchmarkService", "StreamingBothWays"))
.setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
io.grpc.benchmarks.proto.Messages.SimpleRequest.getDefaultInstance()))
.setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
io.grpc.benchmarks.proto.Messages.SimpleResponse.getDefaultInstance()))
.build();
/**
* Creates a new async stub that supports all call types for the service
@ -92,8 +128,9 @@ public final class BenchmarkServiceGrpc {
/**
* <pre>
* One request followed by one response.
* The server returns the client payload as-is.
* Repeated sequence of one request followed by one response.
* Should be called streaming ping-pong
* The server returns the client payload as-is on each response
* </pre>
*/
public io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Messages.SimpleRequest> streamingCall(
@ -101,6 +138,39 @@ public final class BenchmarkServiceGrpc {
return asyncUnimplementedStreamingCall(METHOD_STREAMING_CALL, responseObserver);
}
/**
* <pre>
* Single-sided unbounded streaming from client to server
* The server returns the client payload as-is once the client does WritesDone
* </pre>
*/
public io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Messages.SimpleRequest> streamingFromClient(
io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Messages.SimpleResponse> responseObserver) {
return asyncUnimplementedStreamingCall(METHOD_STREAMING_FROM_CLIENT, responseObserver);
}
/**
* <pre>
* Single-sided unbounded streaming from server to client
* The server repeatedly returns the client payload as-is
* </pre>
*/
public void streamingFromServer(io.grpc.benchmarks.proto.Messages.SimpleRequest request,
io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Messages.SimpleResponse> responseObserver) {
asyncUnimplementedUnaryCall(METHOD_STREAMING_FROM_SERVER, responseObserver);
}
/**
* <pre>
* Two-sided unbounded streaming between server to client
* Both sides send the content of their own choice to the other
* </pre>
*/
public io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Messages.SimpleRequest> streamingBothWays(
io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Messages.SimpleResponse> responseObserver) {
return asyncUnimplementedStreamingCall(METHOD_STREAMING_BOTH_WAYS, responseObserver);
}
@java.lang.Override public final io.grpc.ServerServiceDefinition bindService() {
return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor())
.addMethod(
@ -117,6 +187,27 @@ public final class BenchmarkServiceGrpc {
io.grpc.benchmarks.proto.Messages.SimpleRequest,
io.grpc.benchmarks.proto.Messages.SimpleResponse>(
this, METHODID_STREAMING_CALL)))
.addMethod(
METHOD_STREAMING_FROM_CLIENT,
asyncClientStreamingCall(
new MethodHandlers<
io.grpc.benchmarks.proto.Messages.SimpleRequest,
io.grpc.benchmarks.proto.Messages.SimpleResponse>(
this, METHODID_STREAMING_FROM_CLIENT)))
.addMethod(
METHOD_STREAMING_FROM_SERVER,
asyncServerStreamingCall(
new MethodHandlers<
io.grpc.benchmarks.proto.Messages.SimpleRequest,
io.grpc.benchmarks.proto.Messages.SimpleResponse>(
this, METHODID_STREAMING_FROM_SERVER)))
.addMethod(
METHOD_STREAMING_BOTH_WAYS,
asyncBidiStreamingCall(
new MethodHandlers<
io.grpc.benchmarks.proto.Messages.SimpleRequest,
io.grpc.benchmarks.proto.Messages.SimpleResponse>(
this, METHODID_STREAMING_BOTH_WAYS)))
.build();
}
}
@ -153,8 +244,9 @@ public final class BenchmarkServiceGrpc {
/**
* <pre>
* One request followed by one response.
* The server returns the client payload as-is.
* Repeated sequence of one request followed by one response.
* Should be called streaming ping-pong
* The server returns the client payload as-is on each response
* </pre>
*/
public io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Messages.SimpleRequest> streamingCall(
@ -162,6 +254,42 @@ public final class BenchmarkServiceGrpc {
return asyncBidiStreamingCall(
getChannel().newCall(METHOD_STREAMING_CALL, getCallOptions()), responseObserver);
}
/**
* <pre>
* Single-sided unbounded streaming from client to server
* The server returns the client payload as-is once the client does WritesDone
* </pre>
*/
public io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Messages.SimpleRequest> streamingFromClient(
io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Messages.SimpleResponse> responseObserver) {
return asyncClientStreamingCall(
getChannel().newCall(METHOD_STREAMING_FROM_CLIENT, getCallOptions()), responseObserver);
}
/**
* <pre>
* Single-sided unbounded streaming from server to client
* The server repeatedly returns the client payload as-is
* </pre>
*/
public void streamingFromServer(io.grpc.benchmarks.proto.Messages.SimpleRequest request,
io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Messages.SimpleResponse> responseObserver) {
asyncServerStreamingCall(
getChannel().newCall(METHOD_STREAMING_FROM_SERVER, getCallOptions()), request, responseObserver);
}
/**
* <pre>
* Two-sided unbounded streaming between server to client
* Both sides send the content of their own choice to the other
* </pre>
*/
public io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Messages.SimpleRequest> streamingBothWays(
io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Messages.SimpleResponse> responseObserver) {
return asyncBidiStreamingCall(
getChannel().newCall(METHOD_STREAMING_BOTH_WAYS, getCallOptions()), responseObserver);
}
}
/**
@ -192,6 +320,18 @@ public final class BenchmarkServiceGrpc {
return blockingUnaryCall(
getChannel(), METHOD_UNARY_CALL, getCallOptions(), request);
}
/**
* <pre>
* Single-sided unbounded streaming from server to client
* The server repeatedly returns the client payload as-is
* </pre>
*/
public java.util.Iterator<io.grpc.benchmarks.proto.Messages.SimpleResponse> streamingFromServer(
io.grpc.benchmarks.proto.Messages.SimpleRequest request) {
return blockingServerStreamingCall(
getChannel(), METHOD_STREAMING_FROM_SERVER, getCallOptions(), request);
}
}
/**
@ -226,7 +366,10 @@ public final class BenchmarkServiceGrpc {
}
private static final int METHODID_UNARY_CALL = 0;
private static final int METHODID_STREAMING_CALL = 1;
private static final int METHODID_STREAMING_FROM_SERVER = 1;
private static final int METHODID_STREAMING_CALL = 2;
private static final int METHODID_STREAMING_FROM_CLIENT = 3;
private static final int METHODID_STREAMING_BOTH_WAYS = 4;
private static final class MethodHandlers<Req, Resp> implements
io.grpc.stub.ServerCalls.UnaryMethod<Req, Resp>,
@ -249,6 +392,10 @@ public final class BenchmarkServiceGrpc {
serviceImpl.unaryCall((io.grpc.benchmarks.proto.Messages.SimpleRequest) request,
(io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Messages.SimpleResponse>) responseObserver);
break;
case METHODID_STREAMING_FROM_SERVER:
serviceImpl.streamingFromServer((io.grpc.benchmarks.proto.Messages.SimpleRequest) request,
(io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Messages.SimpleResponse>) responseObserver);
break;
default:
throw new AssertionError();
}
@ -262,6 +409,12 @@ public final class BenchmarkServiceGrpc {
case METHODID_STREAMING_CALL:
return (io.grpc.stub.StreamObserver<Req>) serviceImpl.streamingCall(
(io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Messages.SimpleResponse>) responseObserver);
case METHODID_STREAMING_FROM_CLIENT:
return (io.grpc.stub.StreamObserver<Req>) serviceImpl.streamingFromClient(
(io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Messages.SimpleResponse>) responseObserver);
case METHODID_STREAMING_BOTH_WAYS:
return (io.grpc.stub.StreamObserver<Req>) serviceImpl.streamingBothWays(
(io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Messages.SimpleResponse>) responseObserver);
default:
throw new AssertionError();
}
@ -287,6 +440,9 @@ public final class BenchmarkServiceGrpc {
.setSchemaDescriptor(new BenchmarkServiceDescriptorSupplier())
.addMethod(METHOD_UNARY_CALL)
.addMethod(METHOD_STREAMING_CALL)
.addMethod(METHOD_STREAMING_FROM_CLIENT)
.addMethod(METHOD_STREAMING_FROM_SERVER)
.addMethod(METHOD_STREAMING_BOTH_WAYS)
.build();
}
}

View File

@ -24,20 +24,29 @@ public final class Services {
static {
java.lang.String[] descriptorData = {
"\n\016services.proto\022\014grpc.testing\032\016messages" +
".proto\032\rcontrol.proto2\252\001\n\020BenchmarkServi" +
"ce\022F\n\tUnaryCall\022\033.grpc.testing.SimpleReq" +
"uest\032\034.grpc.testing.SimpleResponse\022N\n\rSt" +
"reamingCall\022\033.grpc.testing.SimpleRequest" +
"\032\034.grpc.testing.SimpleResponse(\0010\0012\227\002\n\rW" +
"orkerService\022E\n\tRunServer\022\030.grpc.testing" +
".ServerArgs\032\032.grpc.testing.ServerStatus(" +
"\0010\001\022E\n\tRunClient\022\030.grpc.testing.ClientAr" +
"gs\032\032.grpc.testing.ClientStatus(\0010\001\022B\n\tCo",
"reCount\022\031.grpc.testing.CoreRequest\032\032.grp" +
"c.testing.CoreResponse\0224\n\nQuitWorker\022\022.g" +
"rpc.testing.Void\032\022.grpc.testing.VoidB$\n\030" +
"io.grpc.benchmarks.protoB\010Servicesb\006prot" +
"o3"
".proto\032\rcontrol.proto\032\013stats.proto2\246\003\n\020B" +
"enchmarkService\022F\n\tUnaryCall\022\033.grpc.test" +
"ing.SimpleRequest\032\034.grpc.testing.SimpleR" +
"esponse\022N\n\rStreamingCall\022\033.grpc.testing." +
"SimpleRequest\032\034.grpc.testing.SimpleRespo" +
"nse(\0010\001\022R\n\023StreamingFromClient\022\033.grpc.te" +
"sting.SimpleRequest\032\034.grpc.testing.Simpl" +
"eResponse(\001\022R\n\023StreamingFromServer\022\033.grp" +
"c.testing.SimpleRequest\032\034.grpc.testing.S",
"impleResponse0\001\022R\n\021StreamingBothWays\022\033.g" +
"rpc.testing.SimpleRequest\032\034.grpc.testing" +
".SimpleResponse(\0010\0012\227\002\n\rWorkerService\022E\n" +
"\tRunServer\022\030.grpc.testing.ServerArgs\032\032.g" +
"rpc.testing.ServerStatus(\0010\001\022E\n\tRunClien" +
"t\022\030.grpc.testing.ClientArgs\032\032.grpc.testi" +
"ng.ClientStatus(\0010\001\022B\n\tCoreCount\022\031.grpc." +
"testing.CoreRequest\032\032.grpc.testing.CoreR" +
"esponse\0224\n\nQuitWorker\022\022.grpc.testing.Voi" +
"d\032\022.grpc.testing.Void2^\n\030ReportQpsScenar",
"ioService\022B\n\016ReportScenario\022\034.grpc.testi" +
"ng.ScenarioResult\032\022.grpc.testing.VoidB$\n" +
"\030io.grpc.benchmarks.protoB\010Servicesb\006pro" +
"to3"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() {
@ -52,9 +61,11 @@ public final class Services {
new com.google.protobuf.Descriptors.FileDescriptor[] {
io.grpc.benchmarks.proto.Messages.getDescriptor(),
io.grpc.benchmarks.proto.Control.getDescriptor(),
io.grpc.benchmarks.proto.Stats.getDescriptor(),
}, assigner);
io.grpc.benchmarks.proto.Messages.getDescriptor();
io.grpc.benchmarks.proto.Control.getDescriptor();
io.grpc.benchmarks.proto.Stats.getDescriptor();
}
// @@protoc_insertion_point(outer_class_scope)

View File

@ -23,19 +23,26 @@ option java_package = "io.grpc.benchmarks.proto";
option java_outer_classname = "Control";
enum ClientType {
// Many languages support a basic distinction between using
// sync or async client, and this allows the specification
SYNC_CLIENT = 0;
ASYNC_CLIENT = 1;
OTHER_CLIENT = 2; // used for some language-specific variants
}
enum ServerType {
SYNC_SERVER = 0;
ASYNC_SERVER = 1;
ASYNC_GENERIC_SERVER = 2;
OTHER_SERVER = 3; // used for some language-specific variants
}
enum RpcType {
UNARY = 0;
STREAMING = 1;
STREAMING_FROM_CLIENT = 2;
STREAMING_FROM_SERVER = 3;
STREAMING_BOTH_WAYS = 4;
}
// Parameters of poisson process distribution, which is a good representation
@ -45,18 +52,6 @@ message PoissonParams {
double offered_load = 1;
}
message UniformParams {
double interarrival_lo = 1;
double interarrival_hi = 2;
}
message DeterministicParams { double offered_load = 1; }
message ParetoParams {
double interarrival_base = 1;
double alpha = 2;
}
// Once an RPC finishes, immediately start a new one.
// No configuration parameters needed.
message ClosedLoopParams {}
@ -65,9 +60,6 @@ message LoadParams {
oneof load {
ClosedLoopParams closed_loop = 1;
PoissonParams poisson = 2;
UniformParams uniform = 3;
DeterministicParams determ = 4;
ParetoParams pareto = 5;
};
}
@ -77,6 +69,14 @@ message SecurityParams {
string server_host_override = 2;
}
message ChannelArg {
string name = 1;
oneof value {
string str_value = 2;
int32 int_value = 3;
}
}
message ClientConfig {
// List of targets to connect to. At least one target needs to be specified.
repeated string server_targets = 1;
@ -99,6 +99,14 @@ message ClientConfig {
// Specify the cores we should run the client on, if desired
repeated int32 core_list = 13;
int32 core_limit = 14;
// If we use an OTHER_CLIENT client_type, this string gives more detail
string other_client_api = 15;
repeated ChannelArg channel_args = 16;
// Number of messages on a stream before it gets finished/restarted
int32 messages_per_stream = 18;
}
message ClientStatus { ClientStats stats = 1; }
@ -125,11 +133,22 @@ message ServerConfig {
int32 async_server_threads = 7;
// Specify the number of cores to limit server to, if desired
int32 core_limit = 8;
// payload config, used in generic server
// payload config, used in generic server.
// Note this must NOT be used in proto (non-generic) servers. For proto servers,
// 'response sizes' must be configured from the 'response_size' field of the
// 'SimpleRequest' objects in RPC requests.
PayloadConfig payload_config = 9;
// Specify the cores we should run the server on, if desired
repeated int32 core_list = 10;
// If we use an OTHER_SERVER client_type, this string gives more detail
string other_server_api = 11;
// c++-only options (for now) --------------------------------
// Buffer pool size (no buffer pool specified if unset)
int32 resource_quota_size = 1001;
}
message ServerArgs {
@ -158,3 +177,84 @@ message CoreResponse {
message Void {
}
// A single performance scenario: input to qps_json_driver
message Scenario {
// Human readable name for this scenario
string name = 1;
// Client configuration
ClientConfig client_config = 2;
// Number of clients to start for the test
int32 num_clients = 3;
// Server configuration
ServerConfig server_config = 4;
// Number of servers to start for the test
int32 num_servers = 5;
// Warmup period, in seconds
int32 warmup_seconds = 6;
// Benchmark time, in seconds
int32 benchmark_seconds = 7;
// Number of workers to spawn locally (usually zero)
int32 spawn_local_worker_count = 8;
}
// A set of scenarios to be run with qps_json_driver
message Scenarios {
repeated Scenario scenarios = 1;
}
// Basic summary that can be computed from ClientStats and ServerStats
// once the scenario has finished.
message ScenarioResultSummary
{
// Total number of operations per second over all clients.
double qps = 1;
// QPS per one server core.
double qps_per_server_core = 2;
// server load based on system_time (0.85 => 85%)
double server_system_time = 3;
// server load based on user_time (0.85 => 85%)
double server_user_time = 4;
// client load based on system_time (0.85 => 85%)
double client_system_time = 5;
// client load based on user_time (0.85 => 85%)
double client_user_time = 6;
// X% latency percentiles (in nanoseconds)
double latency_50 = 7;
double latency_90 = 8;
double latency_95 = 9;
double latency_99 = 10;
double latency_999 = 11;
// server cpu usage percentage
double server_cpu_usage = 12;
// Number of requests that succeeded/failed
double successful_requests_per_second = 13;
double failed_requests_per_second = 14;
// Number of polls called inside completion queue per request
double client_polls_per_request = 15;
double server_polls_per_request = 16;
}
// Results of a single benchmark scenario.
message ScenarioResult {
// Inputs used to run the scenario.
Scenario scenario = 1;
// Histograms from all clients merged into one histogram.
HistogramData latencies = 2;
// Client stats for each client
repeated ClientStats client_stats = 3;
// Server stats for each server
repeated ServerStats server_stats = 4;
// Number of cores available to each server
repeated int32 server_cores = 5;
// An after-the-fact computed summary
ScenarioResultSummary summary = 6;
// Information on success or failure of each worker
repeated bool client_success = 7;
repeated bool server_success = 8;
// Number of failed requests (one row per status code seen)
repeated RequestResultCount request_results = 9;
}

View File

@ -21,29 +21,24 @@ package grpc.testing;
option java_package = "io.grpc.benchmarks.proto";
option java_outer_classname = "Messages";
// TODO(dgq): Go back to using well-known types once
// https://github.com/grpc/grpc/issues/6980 has been fixed.
// import "google/protobuf/wrappers.proto";
message BoolValue {
// The bool value.
bool value = 1;
}
// DEPRECATED, don't use. To be removed shortly.
// The type of payload that should be returned.
enum PayloadType {
// Compressable text format.
COMPRESSABLE = 0;
// Uncompressable binary format.
UNCOMPRESSABLE = 1;
// Randomly chosen from all other formats defined in this enum.
RANDOM = 2;
}
// Compression algorithms
enum CompressionType {
// No compression
NONE = 0;
GZIP = 1;
DEFLATE = 2;
}
// A block of data, to simply increase gRPC message size.
message Payload {
// DEPRECATED, don't use. To be removed shortly.
// The type of data in body.
PayloadType type = 1;
// Primary contents of payload.
@ -59,12 +54,12 @@ message EchoStatus {
// Unary request.
message SimpleRequest {
// DEPRECATED, don't use. To be removed shortly.
// Desired payload type in the response from the server.
// If response_type is RANDOM, server randomly chooses one from other formats.
PayloadType response_type = 1;
// Desired payload size in the response from the server.
// If response_type is COMPRESSABLE, this denotes the size before compression.
int32 response_size = 2;
// Optional input payload sent along with the request.
@ -76,11 +71,17 @@ message SimpleRequest {
// Whether SimpleResponse should include OAuth scope.
bool fill_oauth_scope = 5;
// Compression algorithm to be used by the server for the response (stream)
CompressionType response_compression = 6;
// Whether to request the server to compress the response. This field is
// "nullable" in order to interoperate seamlessly with clients not able to
// implement the full compression tests by introspecting the call to verify
// the response's compression status.
BoolValue response_compressed = 6;
// Whether server should return a given status
EchoStatus response_status = 7;
// Whether the server should expect this request to be compressed.
BoolValue expect_compressed = 8;
}
// Unary response, as configured by the request.
@ -99,6 +100,12 @@ message StreamingInputCallRequest {
// Optional input payload sent along with the request.
Payload payload = 1;
// Whether the server should expect this request to be compressed. This field
// is "nullable" in order to interoperate seamlessly with servers not able to
// implement the full compression tests by introspecting the call to verify
// the request's compression status.
BoolValue expect_compressed = 2;
// Not expecting any payload from the response.
}
@ -111,16 +118,22 @@ message StreamingInputCallResponse {
// Configuration for a particular response.
message ResponseParameters {
// Desired payload sizes in responses from the server.
// If response_type is COMPRESSABLE, this denotes the size before compression.
int32 size = 1;
// Desired interval between consecutive responses in the response stream in
// microseconds.
int32 interval_us = 2;
// Whether to request the server to compress the response. This field is
// "nullable" in order to interoperate seamlessly with clients not able to
// implement the full compression tests by introspecting the call to verify
// the response's compression status.
BoolValue compressed = 3;
}
// Server-streaming request.
message StreamingOutputCallRequest {
// DEPRECATED, don't use. To be removed shortly.
// Desired payload type in the response from the server.
// If response_type is RANDOM, the payload from each response in the stream
// might be of different types. This is to simulate a mixed type of payload
@ -133,9 +146,6 @@ message StreamingOutputCallRequest {
// Optional input payload sent along with the request.
Payload payload = 3;
// Compression algorithm to be used by the server for the response (stream)
CompressionType response_compression = 6;
// Whether server should return a given status
EchoStatus response_status = 7;
}
@ -146,10 +156,16 @@ message StreamingOutputCallResponse {
Payload payload = 1;
}
// For reconnect interop test only.
// Client tells server what reconnection parameters it used.
message ReconnectParams {
int32 max_reconnect_backoff_ms = 1;
}
// For reconnect interop test only.
// Server tells client whether its reconnects are following the spec and the
// reconnect backoffs it saw.
message ReconnectInfo {
bool passed = 1;
repeated int32 backoff_ms = 2;
}
}

View File

@ -40,4 +40,4 @@ message PayloadConfig {
SimpleProtoParams simple_params = 2;
ComplexProtoParams complex_params = 3;
}
}
}

View File

@ -18,21 +18,34 @@ syntax = "proto3";
import "messages.proto";
import "control.proto";
import "stats.proto";
package grpc.testing;
option java_package = "io.grpc.benchmarks.proto";
option java_outer_classname = "Services";
service BenchmarkService {
// One request followed by one response.
// The server returns the client payload as-is.
rpc UnaryCall(SimpleRequest) returns (SimpleResponse);
// One request followed by one response.
// The server returns the client payload as-is.
// Repeated sequence of one request followed by one response.
// Should be called streaming ping-pong
// The server returns the client payload as-is on each response
rpc StreamingCall(stream SimpleRequest) returns (stream SimpleResponse);
// Single-sided unbounded streaming from client to server
// The server returns the client payload as-is once the client does WritesDone
rpc StreamingFromClient(stream SimpleRequest) returns (SimpleResponse);
// Single-sided unbounded streaming from server to client
// The server repeatedly returns the client payload as-is
rpc StreamingFromServer(SimpleRequest) returns (stream SimpleResponse);
// Two-sided unbounded streaming between server to client
// Both sides send the content of their own choice to the other
rpc StreamingBothWays(stream SimpleRequest) returns (stream SimpleResponse);
}
service WorkerService {
@ -57,4 +70,9 @@ service WorkerService {
// Quit this worker
rpc QuitWorker(Void) returns (Void);
}
}
service ReportQpsScenarioService {
// Report results of a QPS test benchmark scenario.
rpc ReportScenario(ScenarioResult) returns (Void);
}

View File

@ -29,6 +29,15 @@ message ServerStats {
// change in server time (in seconds) used by the server process and all
// threads since last reset
double time_system = 3;
// change in total cpu time of the server (data from proc/stat)
uint64 total_cpu_time = 4;
// change in idle time of the server (data from proc/stat)
uint64 idle_cpu_time = 5;
// Number of polls called inside completion queue
uint64 cq_poll_count = 6;
}
// Histogram params based on grpc/support/histogram.c
@ -47,6 +56,11 @@ message HistogramData {
double count = 6;
}
message RequestResultCount {
int32 status_code = 1;
int64 count = 2;
}
message ClientStats {
// Latency histogram. Data points are in nanoseconds.
HistogramData latencies = 1;
@ -55,4 +69,10 @@ message ClientStats {
double time_elapsed = 2;
double time_user = 3;
double time_system = 4;
}
// Number of failed requests (one row per status code seen)
repeated RequestResultCount request_results = 5;
// Number of polls called inside completion queue
uint64 cq_poll_count = 6;
}