aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar Craig Tiller <craig.tiller@gmail.com>2015-01-27 10:18:09 -0800
committerGravatar Craig Tiller <craig.tiller@gmail.com>2015-01-27 10:18:09 -0800
commit6437bd5037518953b159ae5321cd72eaaeb4ed03 (patch)
tree6df55f3880b5c6a4fb77bf737cb834467a24fae6 /src
parent103481ec8a7d93e0b120c639aa0c879e5e0aff88 (diff)
parent1159dcf5bc98460e0408cb30609dd1dafa8d31a4 (diff)
Merge github.com:google/grpc into javascript
Diffstat (limited to 'src')
-rw-r--r--src/core/channel/call_op_string.c98
-rw-r--r--src/core/channel/channel_args.c2
-rw-r--r--src/core/channel/client_channel.c6
-rw-r--r--src/core/channel/connected_channel.c41
-rw-r--r--src/core/httpcli/format_request.c82
-rw-r--r--src/core/httpcli/httpcli.c3
-rw-r--r--src/core/httpcli/httpcli_security_context.c2
-rw-r--r--src/core/iomgr/resolve_address.c (renamed from src/core/iomgr/resolve_address_posix.c)7
-rw-r--r--src/core/iomgr/sockaddr_posix.h4
-rw-r--r--src/core/iomgr/sockaddr_utils.c3
-rw-r--r--src/core/iomgr/sockaddr_win32.h2
-rw-r--r--src/core/iomgr/socket_utils_common_posix.c2
-rw-r--r--src/core/iomgr/tcp_posix.c2
-rw-r--r--src/core/security/credentials.c17
-rw-r--r--src/core/security/json_token.c2
-rw-r--r--src/core/security/secure_endpoint.c2
-rw-r--r--src/core/security/security_context.c2
-rw-r--r--src/core/statistics/census_rpc_stats.c2
-rw-r--r--src/core/statistics/census_tracing.c2
-rw-r--r--src/core/support/cmdline.c2
-rw-r--r--src/core/support/host_port.c2
-rw-r--r--src/core/support/murmur_hash.c6
-rw-r--r--src/core/support/string.c80
-rw-r--r--src/core/support/string.h109
-rw-r--r--src/core/surface/byte_buffer.c11
-rw-r--r--src/core/surface/call.c190
-rw-r--r--src/core/surface/channel_create.c4
-rw-r--r--src/core/surface/client.c2
-rw-r--r--src/core/surface/completion_queue.c7
-rw-r--r--src/core/surface/event_string.c82
-rw-r--r--src/core/surface/lame_client.c2
-rw-r--r--src/core/surface/secure_channel_create.c4
-rw-r--r--src/core/surface/server.c2
-rw-r--r--src/core/transport/chttp2/frame_data.c2
-rw-r--r--src/core/transport/chttp2/hpack_parser.c2
-rw-r--r--src/core/transport/chttp2/timeout_encoding.c38
-rw-r--r--src/core/transport/chttp2/timeout_encoding.h3
-rw-r--r--src/core/transport/chttp2_transport.c6
-rw-r--r--src/core/transport/metadata.c1
-rw-r--r--src/cpp/client/channel.cc15
-rw-r--r--src/cpp/stream/stream_context.cc12
-rw-r--r--src/cpp/stream/stream_context.h1
-rw-r--r--src/node/binding.gyp57
-rw-r--r--src/node/examples/math_server.js7
-rw-r--r--src/node/ext/byte_buffer.cc (renamed from src/node/byte_buffer.cc)0
-rw-r--r--src/node/ext/byte_buffer.h (renamed from src/node/byte_buffer.h)0
-rw-r--r--src/node/ext/call.cc (renamed from src/node/call.cc)31
-rw-r--r--src/node/ext/call.h (renamed from src/node/call.h)2
-rw-r--r--src/node/ext/channel.cc (renamed from src/node/channel.cc)0
-rw-r--r--src/node/ext/channel.h (renamed from src/node/channel.h)0
-rw-r--r--src/node/ext/completion_queue_async_worker.cc (renamed from src/node/completion_queue_async_worker.cc)0
-rw-r--r--src/node/ext/completion_queue_async_worker.h (renamed from src/node/completion_queue_async_worker.h)0
-rw-r--r--src/node/ext/credentials.cc (renamed from src/node/credentials.cc)0
-rw-r--r--src/node/ext/credentials.h (renamed from src/node/credentials.h)0
-rw-r--r--src/node/ext/event.cc (renamed from src/node/event.cc)0
-rw-r--r--src/node/ext/event.h (renamed from src/node/event.h)0
-rw-r--r--src/node/ext/node_grpc.cc (renamed from src/node/node_grpc.cc)2
-rw-r--r--src/node/ext/server.cc (renamed from src/node/server.cc)0
-rw-r--r--src/node/ext/server.h (renamed from src/node/server.h)0
-rw-r--r--src/node/ext/server_credentials.cc (renamed from src/node/server_credentials.cc)0
-rw-r--r--src/node/ext/server_credentials.h (renamed from src/node/server_credentials.h)0
-rw-r--r--src/node/ext/tag.cc (renamed from src/node/tag.cc)0
-rw-r--r--src/node/ext/tag.h (renamed from src/node/tag.h)0
-rw-r--r--src/node/ext/timeval.cc (renamed from src/node/timeval.cc)0
-rw-r--r--src/node/ext/timeval.h (renamed from src/node/timeval.h)0
-rw-r--r--src/node/index.js (renamed from src/node/main.js)4
-rw-r--r--src/node/interop/interop_client.js2
-rw-r--r--src/node/interop/interop_server.js3
-rw-r--r--src/node/package.json2
-rw-r--r--src/node/src/client.js (renamed from src/node/client.js)109
-rw-r--r--src/node/src/common.js (renamed from src/node/common.js)0
-rw-r--r--src/node/src/server.js (renamed from src/node/server.js)5
-rw-r--r--src/node/src/surface_client.js (renamed from src/node/surface_client.js)132
-rw-r--r--src/node/src/surface_server.js (renamed from src/node/surface_server.js)145
-rw-r--r--src/node/test/call_test.js55
-rw-r--r--src/node/test/client_server_test.js98
-rw-r--r--src/node/test/constant_test.js1
-rw-r--r--src/node/test/end_to_end_test.js79
-rw-r--r--src/node/test/interop_sanity_test.js9
-rw-r--r--src/node/test/server_test.js56
-rw-r--r--src/node/test/surface_test.js55
-rw-r--r--src/php/ext/grpc/call.c21
-rw-r--r--src/php/ext/grpc/php_grpc.c4
-rwxr-xr-xsrc/php/lib/Grpc/ActiveCall.php3
-rwxr-xr-xsrc/php/tests/unit_tests/CallTest.php6
-rwxr-xr-xsrc/php/tests/unit_tests/EndToEndTest.php24
-rwxr-xr-xsrc/php/tests/unit_tests/SecureEndToEndTest.php24
-rw-r--r--src/python/_framework/base/__init__.py0
-rw-r--r--src/python/_framework/base/exceptions.py (renamed from src/ruby/spec/port_picker.rb)21
-rw-r--r--src/python/_framework/base/interfaces.py229
-rw-r--r--src/python/_framework/base/interfaces_test.py299
-rw-r--r--src/python/_framework/base/packets/__init__.py0
-rw-r--r--src/python/_framework/base/packets/_cancellation.py64
-rw-r--r--src/python/_framework/base/packets/_constants.py32
-rw-r--r--src/python/_framework/base/packets/_context.py99
-rw-r--r--src/python/_framework/base/packets/_emission.py126
-rw-r--r--src/python/_framework/base/packets/_ends.py408
-rw-r--r--src/python/_framework/base/packets/_expiration.py158
-rw-r--r--src/python/_framework/base/packets/_ingestion.py440
-rw-r--r--src/python/_framework/base/packets/_interfaces.py269
-rw-r--r--src/python/_framework/base/packets/_reception.py394
-rw-r--r--src/python/_framework/base/packets/_termination.py201
-rw-r--r--src/python/_framework/base/packets/_transmission.py393
-rw-r--r--src/python/_framework/base/packets/implementations.py77
-rw-r--r--src/python/_framework/base/packets/implementations_test.py80
-rw-r--r--src/python/_framework/base/packets/in_memory.py108
-rw-r--r--src/python/_framework/base/packets/interfaces.py84
-rw-r--r--src/python/_framework/base/packets/null.py56
-rw-r--r--src/python/_framework/base/packets/packets.py112
-rw-r--r--src/python/_framework/base/util.py91
-rw-r--r--src/python/_framework/common/__init__.py0
-rw-r--r--src/python/_framework/common/cardinality.py42
-rw-r--r--src/python/_framework/face/__init__.py0
-rw-r--r--src/python/_framework/face/_calls.py310
-rw-r--r--src/python/_framework/face/_control.py194
-rw-r--r--src/python/_framework/face/_service.py189
-rw-r--r--src/python/_framework/face/_test_case.py81
-rw-r--r--src/python/_framework/face/blocking_invocation_inline_service_test.py46
-rw-r--r--src/python/_framework/face/demonstration.py118
-rw-r--r--src/python/_framework/face/event_invocation_synchronous_event_service_test.py46
-rw-r--r--src/python/_framework/face/exceptions.py77
-rw-r--r--src/python/_framework/face/future_invocation_asynchronous_event_service_test.py46
-rw-r--r--src/python/_framework/face/implementations.py246
-rw-r--r--src/python/_framework/face/interfaces.py545
-rw-r--r--src/python/_framework/face/testing/__init__.py0
-rw-r--r--src/python/_framework/face/testing/base_util.py102
-rw-r--r--src/python/_framework/face/testing/blocking_invocation_inline_service_test_case.py223
-rw-r--r--src/python/_framework/face/testing/callback.py94
-rw-r--r--src/python/_framework/face/testing/control.py87
-rw-r--r--src/python/_framework/face/testing/coverage.py123
-rw-r--r--src/python/_framework/face/testing/digest.py446
-rw-r--r--src/python/_framework/face/testing/event_invocation_synchronous_event_service_test_case.py367
-rw-r--r--src/python/_framework/face/testing/future_invocation_asynchronous_event_service_test_case.py377
-rw-r--r--src/python/_framework/face/testing/interfaces.py117
-rw-r--r--src/python/_framework/face/testing/serial.py70
-rw-r--r--src/python/_framework/face/testing/service.py337
-rw-r--r--src/python/_framework/face/testing/stock_service.py374
-rw-r--r--src/python/_framework/face/testing/test_case.py111
-rw-r--r--src/python/_framework/foundation/_later_test.py145
-rw-r--r--src/python/_framework/foundation/_logging_pool_test.py2
-rw-r--r--src/python/_framework/foundation/_timer_future.py156
-rw-r--r--src/python/_framework/foundation/abandonment.py38
-rw-r--r--src/python/_framework/foundation/callable_util.py78
-rw-r--r--src/python/_framework/foundation/future.py172
-rw-r--r--src/python/_framework/foundation/later.py51
-rw-r--r--src/python/_framework/foundation/stream.py60
-rw-r--r--src/python/_framework/foundation/stream_testing.py73
-rw-r--r--src/python/_framework/foundation/stream_util.py160
-rw-r--r--src/python/_junkdrawer/__init__.py0
-rw-r--r--src/python/_junkdrawer/stock_pb2.py152
-rwxr-xr-xsrc/ruby/README.md27
-rwxr-xr-xsrc/ruby/bin/interop/interop_client.rb99
-rwxr-xr-xsrc/ruby/bin/interop/interop_server.rb19
-rw-r--r--src/ruby/ext/grpc/rb_call.c20
-rw-r--r--src/ruby/ext/grpc/rb_credentials.c6
-rw-r--r--src/ruby/ext/grpc/rb_event.c6
-rw-r--r--src/ruby/ext/grpc/rb_server.c12
-rwxr-xr-xsrc/ruby/grpc.gemspec1
-rw-r--r--src/ruby/lib/grpc/generic/active_call.rb30
-rw-r--r--src/ruby/lib/grpc/generic/bidi_call.rb4
-rw-r--r--src/ruby/spec/call_spec.rb40
-rw-r--r--src/ruby/spec/channel_spec.rb21
-rw-r--r--src/ruby/spec/client_server_spec.rb106
-rw-r--r--src/ruby/spec/event_spec.rb3
-rw-r--r--src/ruby/spec/generic/active_call_spec.rb66
-rw-r--r--src/ruby/spec/generic/client_stub_spec.rb144
-rw-r--r--src/ruby/spec/generic/rpc_server_spec.rb7
-rw-r--r--src/ruby/spec/server_spec.rb5
168 files changed, 10892 insertions, 1120 deletions
diff --git a/src/core/channel/call_op_string.c b/src/core/channel/call_op_string.c
index e6557cef99..40d53693c2 100644
--- a/src/core/channel/call_op_string.c
+++ b/src/core/channel/call_op_string.c
@@ -37,117 +37,93 @@
#include <stdio.h>
#include <string.h>
+#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
-#include <grpc/support/string.h>
#include <grpc/support/useful.h>
-#define MAX_APPEND 1024
+static void put_metadata(gpr_strvec *b, grpc_mdelem *md) {
+ gpr_strvec_add(b, gpr_strdup(" key="));
+ gpr_strvec_add(b, gpr_hexdump((char *)GPR_SLICE_START_PTR(md->key->slice),
+ GPR_SLICE_LENGTH(md->key->slice), GPR_HEXDUMP_PLAINTEXT));
-typedef struct {
- size_t cap;
- size_t len;
- char *buffer;
-} buf;
-
-static void bprintf(buf *b, const char *fmt, ...) {
- va_list arg;
- if (b->len + MAX_APPEND > b->cap) {
- b->cap = GPR_MAX(b->len + MAX_APPEND, b->cap * 3 / 2);
- b->buffer = gpr_realloc(b->buffer, b->cap);
- }
- va_start(arg, fmt);
- b->len += vsprintf(b->buffer + b->len, fmt, arg);
- va_end(arg);
-}
-
-static void bputs(buf *b, const char *s) {
- size_t slen = strlen(s);
- if (b->len + slen + 1 > b->cap) {
- b->cap = GPR_MAX(b->len + slen + 1, b->cap * 3 / 2);
- b->buffer = gpr_realloc(b->buffer, b->cap);
- }
- strcat(b->buffer, s);
- b->len += slen;
-}
-
-static void put_metadata(buf *b, grpc_mdelem *md) {
- char *txt;
-
- txt = gpr_hexdump((char *)GPR_SLICE_START_PTR(md->key->slice),
- GPR_SLICE_LENGTH(md->key->slice), GPR_HEXDUMP_PLAINTEXT);
- bputs(b, " key=");
- bputs(b, txt);
- gpr_free(txt);
-
- txt = gpr_hexdump((char *)GPR_SLICE_START_PTR(md->value->slice),
- GPR_SLICE_LENGTH(md->value->slice), GPR_HEXDUMP_PLAINTEXT);
- bputs(b, " value=");
- bputs(b, txt);
- gpr_free(txt);
+ gpr_strvec_add(b, gpr_strdup(" value="));
+ gpr_strvec_add(b, gpr_hexdump((char *)GPR_SLICE_START_PTR(md->value->slice),
+ GPR_SLICE_LENGTH(md->value->slice), GPR_HEXDUMP_PLAINTEXT));
}
char *grpc_call_op_string(grpc_call_op *op) {
- buf b = {0, 0, 0};
+ char *tmp;
+ char *out;
+
+ gpr_strvec b;
+ gpr_strvec_init(&b);
switch (op->dir) {
case GRPC_CALL_DOWN:
- bprintf(&b, ">");
+ gpr_strvec_add(&b, gpr_strdup(">"));
break;
case GRPC_CALL_UP:
- bprintf(&b, "<");
+ gpr_strvec_add(&b, gpr_strdup("<"));
break;
}
switch (op->type) {
case GRPC_SEND_METADATA:
- bprintf(&b, "SEND_METADATA");
+ gpr_strvec_add(&b, gpr_strdup("SEND_METADATA"));
put_metadata(&b, op->data.metadata);
break;
case GRPC_SEND_DEADLINE:
- bprintf(&b, "SEND_DEADLINE %d.%09d", op->data.deadline.tv_sec,
+ gpr_asprintf(&tmp, "SEND_DEADLINE %d.%09d", op->data.deadline.tv_sec,
op->data.deadline.tv_nsec);
+ gpr_strvec_add(&b, tmp);
break;
case GRPC_SEND_START:
- bprintf(&b, "SEND_START pollset=%p", op->data.start.pollset);
+ gpr_asprintf(&tmp, "SEND_START pollset=%p", op->data.start.pollset);
+ gpr_strvec_add(&b, tmp);
break;
case GRPC_SEND_MESSAGE:
- bprintf(&b, "SEND_MESSAGE");
+ gpr_strvec_add(&b, gpr_strdup("SEND_MESSAGE"));
break;
case GRPC_SEND_PREFORMATTED_MESSAGE:
bprintf(&b, "SEND_PREFORMATTED_MESSAGE");
break;
case GRPC_SEND_FINISH:
- bprintf(&b, "SEND_FINISH");
+ gpr_strvec_add(&b, gpr_strdup("SEND_FINISH"));
break;
case GRPC_REQUEST_DATA:
- bprintf(&b, "REQUEST_DATA");
+ gpr_strvec_add(&b, gpr_strdup("REQUEST_DATA"));
break;
case GRPC_RECV_METADATA:
- bprintf(&b, "RECV_METADATA");
+ gpr_strvec_add(&b, gpr_strdup("RECV_METADATA"));
put_metadata(&b, op->data.metadata);
break;
case GRPC_RECV_DEADLINE:
- bprintf(&b, "RECV_DEADLINE %d.%09d", op->data.deadline.tv_sec,
+ gpr_asprintf(&tmp, "RECV_DEADLINE %d.%09d", op->data.deadline.tv_sec,
op->data.deadline.tv_nsec);
+ gpr_strvec_add(&b, tmp);
break;
case GRPC_RECV_END_OF_INITIAL_METADATA:
- bprintf(&b, "RECV_END_OF_INITIAL_METADATA");
+ gpr_strvec_add(&b, gpr_strdup("RECV_END_OF_INITIAL_METADATA"));
break;
case GRPC_RECV_MESSAGE:
- bprintf(&b, "RECV_MESSAGE");
+ gpr_strvec_add(&b, gpr_strdup("RECV_MESSAGE"));
break;
case GRPC_RECV_HALF_CLOSE:
- bprintf(&b, "RECV_HALF_CLOSE");
+ gpr_strvec_add(&b, gpr_strdup("RECV_HALF_CLOSE"));
break;
case GRPC_RECV_FINISH:
- bprintf(&b, "RECV_FINISH");
+ gpr_strvec_add(&b, gpr_strdup("RECV_FINISH"));
break;
case GRPC_CANCEL_OP:
- bprintf(&b, "CANCEL_OP");
+ gpr_strvec_add(&b, gpr_strdup("CANCEL_OP"));
break;
}
- bprintf(&b, " flags=0x%08x", op->flags);
+ gpr_asprintf(&tmp, " flags=0x%08x", op->flags);
+ gpr_strvec_add(&b, tmp);
+
+ out = gpr_strvec_flatten(&b, NULL);
+ gpr_strvec_destroy(&b);
- return b.buffer;
+ return out;
}
void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
diff --git a/src/core/channel/channel_args.c b/src/core/channel/channel_args.c
index c1ab698012..5f16c7b7e9 100644
--- a/src/core/channel/channel_args.c
+++ b/src/core/channel/channel_args.c
@@ -33,9 +33,9 @@
#include <grpc/grpc.h>
#include "src/core/channel/channel_args.h"
+#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
-#include <grpc/support/string.h>
#include <string.h>
diff --git a/src/core/channel/client_channel.c b/src/core/channel/client_channel.c
index fa75561c78..f9b42db419 100644
--- a/src/core/channel/client_channel.c
+++ b/src/core/channel/client_channel.c
@@ -40,9 +40,9 @@
#include "src/core/channel/connected_channel.h"
#include "src/core/channel/metadata_buffer.h"
#include "src/core/iomgr/iomgr.h"
+#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/string.h>
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
@@ -410,7 +410,7 @@ static void init_channel_elem(grpc_channel_element *elem,
grpc_mdctx *metadata_context, int is_first,
int is_last) {
channel_data *chand = elem->channel_data;
- char temp[16];
+ char temp[GPR_LTOA_MIN_BUFSIZE];
GPR_ASSERT(!is_first);
GPR_ASSERT(is_last);
@@ -425,7 +425,7 @@ static void init_channel_elem(grpc_channel_element *elem,
chand->transport_setup_initiated = 0;
chand->args = grpc_channel_args_copy(args);
- sprintf(temp, "%d", GRPC_STATUS_CANCELLED);
+ gpr_ltoa(GRPC_STATUS_CANCELLED, temp);
chand->cancel_status =
grpc_mdelem_from_strings(metadata_context, "grpc-status", temp);
}
diff --git a/src/core/channel/connected_channel.c b/src/core/channel/connected_channel.c
index 6067896a8c..adbeec0fc6 100644
--- a/src/core/channel/connected_channel.c
+++ b/src/core/channel/connected_channel.c
@@ -37,12 +37,12 @@
#include <stdio.h>
#include <string.h>
+#include "src/core/support/string.h"
#include "src/core/transport/transport.h"
#include <grpc/byte_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/slice_buffer.h>
-#include <grpc/support/string.h>
#define MAX_BUFFER_LENGTH 8192
/* the protobuf library will (by default) start warning at 100megs */
@@ -386,23 +386,25 @@ static void recv_batch(void *user_data, grpc_transport *transport,
case GRPC_OP_BEGIN_MESSAGE:
/* can't begin a message when we're still reading a message */
if (calld->reading_message) {
- char message[128];
- sprintf(message,
- "Message terminated early; read %d bytes, expected %d",
- (int)calld->incoming_message.length,
- (int)calld->incoming_message_length);
+ char *message = NULL;
+ gpr_asprintf(&message,
+ "Message terminated early; read %d bytes, expected %d",
+ (int)calld->incoming_message.length,
+ (int)calld->incoming_message_length);
recv_error(chand, calld, __LINE__, message);
+ gpr_free(message);
return;
}
/* stash away parameters, and prepare for incoming slices */
length = stream_op->data.begin_message.length;
if (length > calld->max_message_length) {
- char message[128];
- sprintf(
- message,
+ char *message = NULL;
+ gpr_asprintf(
+ &message,
"Maximum message length of %d exceeded by a message of length %d",
calld->max_message_length, length);
recv_error(chand, calld, __LINE__, message);
+ gpr_free(message);
} else if (length > 0) {
calld->reading_message = 1;
calld->incoming_message_length = length;
@@ -425,12 +427,13 @@ static void recv_batch(void *user_data, grpc_transport *transport,
gpr_slice_buffer_add(&calld->incoming_message, stream_op->data.slice);
if (calld->incoming_message.length > calld->incoming_message_length) {
/* if we got too many bytes, complain */
- char message[128];
- sprintf(message,
- "Receiving message overflow; read %d bytes, expected %d",
- (int)calld->incoming_message.length,
- (int)calld->incoming_message_length);
+ char *message = NULL;
+ gpr_asprintf(&message,
+ "Receiving message overflow; read %d bytes, expected %d",
+ (int)calld->incoming_message.length,
+ (int)calld->incoming_message_length);
recv_error(chand, calld, __LINE__, message);
+ gpr_free(message);
return;
} else if (calld->incoming_message.length ==
calld->incoming_message_length) {
@@ -443,11 +446,13 @@ static void recv_batch(void *user_data, grpc_transport *transport,
final_state == GRPC_STREAM_CLOSED)) {
calld->got_read_close = 1;
if (calld->reading_message) {
- char message[128];
- sprintf(message, "Last message truncated; read %d bytes, expected %d",
- (int)calld->incoming_message.length,
- (int)calld->incoming_message_length);
+ char *message = NULL;
+ gpr_asprintf(&message,
+ "Last message truncated; read %d bytes, expected %d",
+ (int)calld->incoming_message.length,
+ (int)calld->incoming_message_length);
recv_error(chand, calld, __LINE__, message);
+ gpr_free(message);
}
call_op.type = GRPC_RECV_HALF_CLOSE;
call_op.dir = GRPC_CALL_UP;
diff --git a/src/core/httpcli/format_request.c b/src/core/httpcli/format_request.c
index 7a44f1266f..58bb7c740e 100644
--- a/src/core/httpcli/format_request.c
+++ b/src/core/httpcli/format_request.c
@@ -37,67 +37,57 @@
#include <stdio.h>
#include <string.h>
+#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/slice.h>
#include <grpc/support/useful.h>
-typedef struct {
- size_t length;
- size_t capacity;
- char *data;
-} sbuf;
-
-static void sbuf_append(sbuf *buf, const char *bytes, size_t len) {
- if (buf->length + len > buf->capacity) {
- buf->capacity = GPR_MAX(buf->length + len, buf->capacity * 3 / 2);
- buf->data = gpr_realloc(buf->data, buf->capacity);
- }
- memcpy(buf->data + buf->length, bytes, len);
- buf->length += len;
-}
-
-static void sbprintf(sbuf *buf, const char *fmt, ...) {
- char temp[GRPC_HTTPCLI_MAX_HEADER_LENGTH];
- size_t len;
- va_list args;
-
- va_start(args, fmt);
- len = vsprintf(temp, fmt, args);
- va_end(args);
-
- sbuf_append(buf, temp, len);
-}
-
-static void fill_common_header(const grpc_httpcli_request *request, sbuf *buf) {
+static void fill_common_header(const grpc_httpcli_request *request, gpr_strvec *buf) {
size_t i;
- sbprintf(buf, "%s HTTP/1.0\r\n", request->path);
+ gpr_strvec_add(buf, gpr_strdup(request->path));
+ gpr_strvec_add(buf, gpr_strdup(" HTTP/1.0\r\n"));
/* just in case some crazy server really expects HTTP/1.1 */
- sbprintf(buf, "Host: %s\r\n", request->host);
- sbprintf(buf, "Connection: close\r\n");
- sbprintf(buf, "User-Agent: %s\r\n", GRPC_HTTPCLI_USER_AGENT);
+ gpr_strvec_add(buf, gpr_strdup("Host: "));
+ gpr_strvec_add(buf, gpr_strdup(request->host));
+ gpr_strvec_add(buf, gpr_strdup("\r\n"));
+ gpr_strvec_add(buf, gpr_strdup("Connection: close\r\n"));
+ gpr_strvec_add(buf, gpr_strdup("User-Agent: "GRPC_HTTPCLI_USER_AGENT"\r\n"));
/* user supplied headers */
for (i = 0; i < request->hdr_count; i++) {
- sbprintf(buf, "%s: %s\r\n", request->hdrs[i].key, request->hdrs[i].value);
+ gpr_strvec_add(buf, gpr_strdup(request->hdrs[i].key));
+ gpr_strvec_add(buf, gpr_strdup(": "));
+ gpr_strvec_add(buf, gpr_strdup(request->hdrs[i].value));
+ gpr_strvec_add(buf, gpr_strdup("\r\n"));
}
}
gpr_slice grpc_httpcli_format_get_request(const grpc_httpcli_request *request) {
- sbuf out = {0, 0, NULL};
+ gpr_strvec out;
+ char *flat;
+ size_t flat_len;
- sbprintf(&out, "GET ");
+ gpr_strvec_init(&out);
+ gpr_strvec_add(&out, gpr_strdup("GET "));
fill_common_header(request, &out);
- sbprintf(&out, "\r\n");
+ gpr_strvec_add(&out, gpr_strdup("\r\n"));
- return gpr_slice_new(out.data, out.length, gpr_free);
+ flat = gpr_strvec_flatten(&out, &flat_len);
+ gpr_strvec_destroy(&out);
+
+ return gpr_slice_new(flat, flat_len, gpr_free);
}
gpr_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request,
const char *body_bytes,
size_t body_size) {
- sbuf out = {0, 0, NULL};
+ gpr_strvec out;
+ char *tmp;
+ size_t out_len;
size_t i;
- sbprintf(&out, "POST ");
+ gpr_strvec_init(&out);
+
+ gpr_strvec_add(&out, gpr_strdup("POST "));
fill_common_header(request, &out);
if (body_bytes) {
gpr_uint8 has_content_type = 0;
@@ -108,14 +98,18 @@ gpr_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request,
}
}
if (!has_content_type) {
- sbprintf(&out, "Content-Type: text/plain\r\n");
+ gpr_strvec_add(&out, gpr_strdup("Content-Type: text/plain\r\n"));
}
- sbprintf(&out, "Content-Length: %lu\r\n", (unsigned long)body_size);
+ gpr_asprintf(&tmp, "Content-Length: %lu\r\n", (unsigned long)body_size);
+ gpr_strvec_add(&out, tmp);
}
- sbprintf(&out, "\r\n");
+ gpr_strvec_add(&out, gpr_strdup("\r\n"));
+ tmp = gpr_strvec_flatten(&out, &out_len);
if (body_bytes) {
- sbuf_append(&out, body_bytes, body_size);
+ tmp = gpr_realloc(tmp, out_len + body_size);
+ memcpy(tmp + out_len, body_bytes, body_size);
+ out_len += body_size;
}
- return gpr_slice_new(out.data, out.length, gpr_free);
+ return gpr_slice_new(tmp, out_len, gpr_free);
}
diff --git a/src/core/httpcli/httpcli.c b/src/core/httpcli/httpcli.c
index 2143eeb63d..acd9fa7b55 100644
--- a/src/core/httpcli/httpcli.c
+++ b/src/core/httpcli/httpcli.c
@@ -31,6 +31,7 @@
*
*/
+#include "src/core/iomgr/sockaddr.h"
#include "src/core/httpcli/httpcli.h"
#include <string.h>
@@ -44,9 +45,9 @@
#include "src/core/security/security_context.h"
#include "src/core/security/google_root_certs.h"
#include "src/core/security/secure_transport_setup.h"
+#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/string.h>
typedef struct {
gpr_slice request_text;
diff --git a/src/core/httpcli/httpcli_security_context.c b/src/core/httpcli/httpcli_security_context.c
index c7b9b330f0..d074e163f1 100644
--- a/src/core/httpcli/httpcli_security_context.c
+++ b/src/core/httpcli/httpcli_security_context.c
@@ -36,9 +36,9 @@
#include <string.h>
#include "src/core/security/secure_transport_setup.h"
+#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/string.h>
#include "src/core/tsi/ssl_transport_security.h"
typedef struct {
diff --git a/src/core/iomgr/resolve_address_posix.c b/src/core/iomgr/resolve_address.c
index c9c2c5378a..01681168ce 100644
--- a/src/core/iomgr/resolve_address_posix.c
+++ b/src/core/iomgr/resolve_address.c
@@ -33,20 +33,17 @@
#define _POSIX_SOURCE
+#include "src/core/iomgr/sockaddr.h"
#include "src/core/iomgr/resolve_address.h"
#include <sys/types.h>
-#include <sys/socket.h>
-#include <netdb.h>
-#include <unistd.h>
#include <string.h>
#include "src/core/iomgr/iomgr_internal.h"
#include "src/core/iomgr/sockaddr_utils.h"
-#include "src/core/iomgr/socket_utils_posix.h"
+#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/string.h>
#include <grpc/support/thd.h>
#include <grpc/support/time.h>
diff --git a/src/core/iomgr/sockaddr_posix.h b/src/core/iomgr/sockaddr_posix.h
index 79ef3ca3cf..53c80386d4 100644
--- a/src/core/iomgr/sockaddr_posix.h
+++ b/src/core/iomgr/sockaddr_posix.h
@@ -34,7 +34,11 @@
#ifndef __GRPC_INTERNAL_IOMGR_SOCKADDR_POSIX_H_
#define __GRPC_INTERNAL_IOMGR_SOCKADDR_POSIX_H_
+#include <arpa/inet.h>
#include <sys/socket.h>
+#include <sys/types.h>
#include <netinet/in.h>
+#include <netdb.h>
+#include <unistd.h>
#endif /* __GRPC_INTERNAL_IOMGR_SOCKADDR_POSIX_H_ */
diff --git a/src/core/iomgr/sockaddr_utils.c b/src/core/iomgr/sockaddr_utils.c
index eca14a4f39..07bf7b3a35 100644
--- a/src/core/iomgr/sockaddr_utils.c
+++ b/src/core/iomgr/sockaddr_utils.c
@@ -33,12 +33,11 @@
#include "src/core/iomgr/sockaddr_utils.h"
-#include <arpa/inet.h>
#include <errno.h>
#include <string.h>
+#include "src/core/support/string.h"
#include <grpc/support/host_port.h>
-#include <grpc/support/string.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
diff --git a/src/core/iomgr/sockaddr_win32.h b/src/core/iomgr/sockaddr_win32.h
index 751ac3d2e7..cdea33fec0 100644
--- a/src/core/iomgr/sockaddr_win32.h
+++ b/src/core/iomgr/sockaddr_win32.h
@@ -34,4 +34,6 @@
#ifndef __GRPC_INTERNAL_IOMGR_SOCKADDR_WIN32_H_
#define __GRPC_INTERNAL_IOMGR_SOCKADDR_WIN32_H_
+#include <ws2tcpip.h>
+
#endif // __GRPC_INTERNAL_IOMGR_SOCKADDR_WIN32_H_
diff --git a/src/core/iomgr/socket_utils_common_posix.c b/src/core/iomgr/socket_utils_common_posix.c
index 3a0639f356..1854285b5a 100644
--- a/src/core/iomgr/socket_utils_common_posix.c
+++ b/src/core/iomgr/socket_utils_common_posix.c
@@ -50,8 +50,8 @@
#include <errno.h>
#include "src/core/iomgr/sockaddr_utils.h"
+#include "src/core/support/string.h"
#include <grpc/support/host_port.h>
-#include <grpc/support/string.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/sync.h>
diff --git a/src/core/iomgr/tcp_posix.c b/src/core/iomgr/tcp_posix.c
index 64996bd07d..a9b59df885 100644
--- a/src/core/iomgr/tcp_posix.c
+++ b/src/core/iomgr/tcp_posix.c
@@ -44,10 +44,10 @@
#include <sys/socket.h>
#include <unistd.h>
+#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/slice.h>
-#include <grpc/support/string.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
diff --git a/src/core/security/credentials.c b/src/core/security/credentials.c
index 628963e46c..2f75556e7b 100644
--- a/src/core/security/credentials.c
+++ b/src/core/security/credentials.c
@@ -36,9 +36,9 @@
#include "src/core/httpcli/httpcli.h"
#include "src/core/iomgr/iomgr.h"
#include "src/core/security/json_token.h"
+#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/string.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
@@ -157,7 +157,7 @@ static void ssl_server_destroy(grpc_server_credentials *creds) {
if (c->config.pem_private_keys[i] != NULL) {
gpr_free(c->config.pem_private_keys[i]);
}
- if (c->config.pem_cert_chains[i]!= NULL) {
+ if (c->config.pem_cert_chains[i] != NULL) {
gpr_free(c->config.pem_cert_chains[i]);
}
}
@@ -354,7 +354,6 @@ grpc_oauth2_token_fetcher_credentials_parse_server_response(
cJSON *access_token = NULL;
cJSON *token_type = NULL;
cJSON *expires_in = NULL;
- size_t new_access_token_size = 0;
json = cJSON_Parse(null_terminated_body);
if (json == NULL) {
gpr_log(GPR_ERROR, "Could not parse JSON from %s", null_terminated_body);
@@ -384,12 +383,8 @@ grpc_oauth2_token_fetcher_credentials_parse_server_response(
status = GRPC_CREDENTIALS_ERROR;
goto end;
}
- new_access_token_size = strlen(token_type->valuestring) + 1 +
- strlen(access_token->valuestring) + 1;
- new_access_token = gpr_malloc(new_access_token_size);
- /* C89 does not have snprintf :(. */
- sprintf(new_access_token, "%s %s", token_type->valuestring,
- access_token->valuestring);
+ gpr_asprintf(&new_access_token, "%s %s", token_type->valuestring,
+ access_token->valuestring);
token_lifetime->tv_sec = expires_in->valueint;
token_lifetime->tv_nsec = 0;
if (*token_elem != NULL) grpc_mdelem_unref(*token_elem);
@@ -539,9 +534,7 @@ static void service_account_fetch_oauth2(
response_cb(metadata_req, &response);
return;
}
- body = gpr_malloc(strlen(GRPC_SERVICE_ACCOUNT_POST_BODY_PREFIX) +
- strlen(jwt) + 1);
- sprintf(body, "%s%s", GRPC_SERVICE_ACCOUNT_POST_BODY_PREFIX, jwt);
+ gpr_asprintf(&body, "%s%s", GRPC_SERVICE_ACCOUNT_POST_BODY_PREFIX, jwt);
memset(&request, 0, sizeof(grpc_httpcli_request));
request.host = GRPC_SERVICE_ACCOUNT_HOST;
request.path = GRPC_SERVICE_ACCOUNT_TOKEN_PATH;
diff --git a/src/core/security/json_token.c b/src/core/security/json_token.c
index 14ee758e8b..82bd9b505a 100644
--- a/src/core/security/json_token.c
+++ b/src/core/security/json_token.c
@@ -37,9 +37,9 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/string.h>
#include "src/core/security/base64.h"
+#include "src/core/support/string.h"
#include <openssl/bio.h>
#include <openssl/evp.h>
diff --git a/src/core/security/secure_endpoint.c b/src/core/security/secure_endpoint.c
index e73767c1aa..9f12cf5d60 100644
--- a/src/core/security/secure_endpoint.c
+++ b/src/core/security/secure_endpoint.c
@@ -32,11 +32,11 @@
*/
#include "src/core/security/secure_endpoint.h"
+#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/slice_buffer.h>
#include <grpc/support/slice.h>
-#include <grpc/support/string.h>
#include <grpc/support/sync.h>
#include "src/core/tsi/transport_security_interface.h"
diff --git a/src/core/security/security_context.c b/src/core/security/security_context.c
index cce3c7fe04..58cd458415 100644
--- a/src/core/security/security_context.c
+++ b/src/core/security/security_context.c
@@ -39,12 +39,12 @@
#include "src/core/channel/http_client_filter.h"
#include "src/core/security/credentials.h"
#include "src/core/security/secure_endpoint.h"
+#include "src/core/support/string.h"
#include "src/core/surface/lame_client.h"
#include "src/core/transport/chttp2/alpn.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/slice_buffer.h>
-#include <grpc/support/string.h>
#include "src/core/tsi/fake_transport_security.h"
#include "src/core/tsi/ssl_transport_security.h"
diff --git a/src/core/statistics/census_rpc_stats.c b/src/core/statistics/census_rpc_stats.c
index 39094b5f65..dd3c07e80b 100644
--- a/src/core/statistics/census_rpc_stats.c
+++ b/src/core/statistics/census_rpc_stats.c
@@ -39,9 +39,9 @@
#include "src/core/statistics/census_tracing.h"
#include "src/core/statistics/window_stats.h"
#include "src/core/support/murmur_hash.h"
+#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/string.h>
#include <grpc/support/sync.h>
#define NUM_INTERVALS 3
diff --git a/src/core/statistics/census_tracing.c b/src/core/statistics/census_tracing.c
index 1e61602071..3c4ba66f5f 100644
--- a/src/core/statistics/census_tracing.c
+++ b/src/core/statistics/census_tracing.c
@@ -38,10 +38,10 @@
#include "src/core/statistics/census_rpc_stats.h"
#include "src/core/statistics/hash_table.h"
+#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
-#include <grpc/support/string.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
diff --git a/src/core/support/cmdline.c b/src/core/support/cmdline.c
index ff163a1f6c..a55da9dd18 100644
--- a/src/core/support/cmdline.c
+++ b/src/core/support/cmdline.c
@@ -37,9 +37,9 @@
#include <stdio.h>
#include <string.h>
+#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/string.h>
typedef enum { ARGTYPE_INT, ARGTYPE_BOOL, ARGTYPE_STRING } argtype;
diff --git a/src/core/support/host_port.c b/src/core/support/host_port.c
index 02500551fc..446c11ebec 100644
--- a/src/core/support/host_port.c
+++ b/src/core/support/host_port.c
@@ -35,8 +35,8 @@
#include <string.h>
+#include "src/core/support/string.h"
#include <grpc/support/log.h>
-#include <grpc/support/string.h>
int gpr_join_host_port(char **out, const char *host, int port) {
if (host[0] != '[' && strchr(host, ':') != NULL) {
diff --git a/src/core/support/murmur_hash.c b/src/core/support/murmur_hash.c
index 08b1eb80d8..892e360968 100644
--- a/src/core/support/murmur_hash.c
+++ b/src/core/support/murmur_hash.c
@@ -52,7 +52,7 @@ gpr_uint32 gpr_murmur_hash3(const void *key, size_t len, gpr_uint32 seed) {
int i;
gpr_uint32 h1 = seed;
- gpr_uint32 k1 = 0;
+ gpr_uint32 k1;
const gpr_uint32 c1 = 0xcc9e2d51;
const gpr_uint32 c2 = 0x1b873593;
@@ -62,7 +62,7 @@ gpr_uint32 gpr_murmur_hash3(const void *key, size_t len, gpr_uint32 seed) {
/* body */
for (i = -nblocks; i; i++) {
- gpr_uint32 k1 = GETBLOCK32(blocks, i);
+ k1 = GETBLOCK32(blocks, i);
k1 *= c1;
k1 = ROTL32(k1, 15);
@@ -73,6 +73,8 @@ gpr_uint32 gpr_murmur_hash3(const void *key, size_t len, gpr_uint32 seed) {
h1 = h1 * 5 + 0xe6546b64;
}
+ k1 = 0;
+
/* tail */
switch (len & 3) {
case 3:
diff --git a/src/core/support/string.c b/src/core/support/string.c
index 7960547735..97bce60f94 100644
--- a/src/core/support/string.c
+++ b/src/core/support/string.c
@@ -14,7 +14,7 @@
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
+ * contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@@ -31,7 +31,7 @@
*
*/
-#include <grpc/support/string.h>
+#include "src/core/support/string.h"
#include <ctype.h>
#include <stddef.h>
@@ -122,3 +122,79 @@ int gpr_parse_bytes_to_uint32(const char *buf, size_t len, gpr_uint32 *result) {
*result = out;
return 1;
}
+
+void gpr_reverse_bytes(char *str, int len) {
+ char *p1, *p2;
+ for (p1 = str, p2 = str + len - 1; p2 > p1; ++p1, --p2) {
+ char temp = *p1;
+ *p1 = *p2;
+ *p2 = temp;
+ }
+}
+
+int gpr_ltoa(long value, char *string) {
+ int i = 0;
+ int neg = value < 0;
+
+ if (value == 0) {
+ string[0] = '0';
+ string[1] = 0;
+ return 1;
+ }
+
+ if (neg) value = -value;
+ while (value) {
+ string[i++] = '0' + value % 10;
+ value /= 10;
+ }
+ if (neg) string[i++] = '-';
+ gpr_reverse_bytes(string, i);
+ string[i] = 0;
+ return i;
+}
+
+char *gpr_strjoin(const char **strs, size_t nstrs, size_t *final_length) {
+ size_t out_length = 0;
+ size_t i;
+ char *out;
+ for (i = 0; i < nstrs; i++) {
+ out_length += strlen(strs[i]);
+ }
+ out_length += 1; /* null terminator */
+ out = gpr_malloc(out_length);
+ out_length = 0;
+ for (i = 0; i < nstrs; i++) {
+ size_t slen = strlen(strs[i]);
+ memcpy(out + out_length, strs[i], slen);
+ out_length += slen;
+ }
+ out[out_length] = 0;
+ if (final_length != NULL) {
+ *final_length = out_length;
+ }
+ return out;
+}
+
+void gpr_strvec_init(gpr_strvec *sv) {
+ memset(sv, 0, sizeof(*sv));
+}
+
+void gpr_strvec_destroy(gpr_strvec *sv) {
+ size_t i;
+ for (i = 0; i < sv->count; i++) {
+ gpr_free(sv->strs[i]);
+ }
+ gpr_free(sv->strs);
+}
+
+void gpr_strvec_add(gpr_strvec *sv, char *str) {
+ if (sv->count == sv->capacity) {
+ sv->capacity = GPR_MAX(sv->capacity + 8, sv->capacity * 2);
+ sv->strs = gpr_realloc(sv->strs, sizeof(char*) * sv->capacity);
+ }
+ sv->strs[sv->count++] = str;
+}
+
+char *gpr_strvec_flatten(gpr_strvec *sv, size_t *final_length) {
+ return gpr_strjoin((const char**)sv->strs, sv->count, final_length);
+}
diff --git a/src/core/support/string.h b/src/core/support/string.h
new file mode 100644
index 0000000000..64e06d3b6a
--- /dev/null
+++ b/src/core/support/string.h
@@ -0,0 +1,109 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __GRPC_SUPPORT_STRING_H__
+#define __GRPC_SUPPORT_STRING_H__
+
+#include <stddef.h>
+
+#include <grpc/support/port_platform.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* String utility functions */
+
+/* Returns a copy of src that can be passed to gpr_free().
+ If allocation fails or if src is NULL, returns NULL. */
+char *gpr_strdup(const char *src);
+
+/* flag to include plaintext after a hexdump */
+#define GPR_HEXDUMP_PLAINTEXT 0x00000001
+
+/* Converts array buf, of length len, into a hexadecimal dump. Result should
+ be freed with gpr_free() */
+char *gpr_hexdump(const char *buf, size_t len, gpr_uint32 flags);
+
+/* Parses an array of bytes into an integer (base 10). Returns 1 on success,
+ 0 on failure. */
+int gpr_parse_bytes_to_uint32(const char *data, size_t length,
+ gpr_uint32 *result);
+
+/* Minimum buffer size for calling ltoa */
+#define GPR_LTOA_MIN_BUFSIZE (3 * sizeof(long))
+
+/* Convert a long to a string in base 10; returns the length of the
+ output string (or 0 on failure).
+ output must be at least GPR_LTOA_MIN_BUFSIZE bytes long. */
+int gpr_ltoa(long value, char *output);
+
+/* Reverse a run of bytes */
+void gpr_reverse_bytes(char *str, int len);
+
+/* printf to a newly-allocated string. The set of supported formats may vary
+ between platforms.
+
+ On success, returns the number of bytes printed (excluding the final '\0'),
+ and *strp points to a string which must later be destroyed with gpr_free().
+
+ On error, returns -1 and sets *strp to NULL. If the format string is bad,
+ the result is undefined. */
+int gpr_asprintf(char **strp, const char *format, ...);
+
+/* Join a set of strings, returning the resulting string.
+ Total combined length (excluding null terminator) is returned in total_length
+ if it is non-null. */
+char *gpr_strjoin(const char **strs, size_t nstrs, size_t *total_length);
+
+/* A vector of strings... for building up a final string one piece at a time */
+typedef struct {
+ char **strs;
+ size_t count;
+ size_t capacity;
+} gpr_strvec;
+
+/* Initialize/destroy */
+void gpr_strvec_init(gpr_strvec *strs);
+void gpr_strvec_destroy(gpr_strvec *strs);
+/* Add a string to a strvec, takes ownership of the string */
+void gpr_strvec_add(gpr_strvec *strs, char *add);
+/* Return a joined string with all added substrings, optionally setting
+ total_length as per gpr_strjoin */
+char *gpr_strvec_flatten(gpr_strvec *strs, size_t *total_length);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __GRPC_SUPPORT_STRING_H__ */
diff --git a/src/core/surface/byte_buffer.c b/src/core/surface/byte_buffer.c
index 27a6c6e33d..d1be41074d 100644
--- a/src/core/surface/byte_buffer.c
+++ b/src/core/surface/byte_buffer.c
@@ -49,6 +49,17 @@ grpc_byte_buffer *grpc_byte_buffer_create(gpr_slice *slices, size_t nslices) {
return bb;
}
+grpc_byte_buffer *grpc_byte_buffer_copy(grpc_byte_buffer *bb) {
+ switch (bb->type) {
+ case GRPC_BB_SLICE_BUFFER:
+ return grpc_byte_buffer_create(bb->data.slice_buffer.slices,
+ bb->data.slice_buffer.count);
+ }
+ gpr_log(GPR_INFO, "should never get here");
+ abort();
+ return NULL;
+}
+
void grpc_byte_buffer_destroy(grpc_byte_buffer *bb) {
switch (bb->type) {
case GRPC_BB_SLICE_BUFFER:
diff --git a/src/core/surface/call.c b/src/core/surface/call.c
index 46502fb6b1..14d990df6a 100644
--- a/src/core/surface/call.c
+++ b/src/core/surface/call.c
@@ -35,11 +35,11 @@
#include "src/core/channel/channel_stack.h"
#include "src/core/channel/metadata_buffer.h"
#include "src/core/iomgr/alarm.h"
+#include "src/core/support/string.h"
#include "src/core/surface/channel.h"
#include "src/core/surface/completion_queue.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/string.h>
#include <stdio.h>
#include <stdlib.h>
@@ -173,11 +173,14 @@ struct grpc_call {
/* protects variables in this section */
gpr_mu read_mu;
+ gpr_uint8 received_start;
+ gpr_uint8 start_ok;
gpr_uint8 reads_done;
gpr_uint8 received_finish;
gpr_uint8 received_metadata;
gpr_uint8 have_read;
gpr_uint8 have_alarm;
+ gpr_uint8 pending_writes_done;
gpr_uint8 got_status_code;
/* The current outstanding read message tag (only valid if have_read == 1) */
void *read_tag;
@@ -190,6 +193,8 @@ struct grpc_call {
/* The current outstanding send message/context/invoke/end tag (only valid if
have_write == 1) */
void *write_tag;
+ grpc_byte_buffer *pending_write;
+ gpr_uint32 pending_write_flags;
/* The final status of the call */
grpc_status_code status_code;
@@ -227,11 +232,15 @@ grpc_call *grpc_call_create(grpc_channel *channel,
call->have_alarm = 0;
call->received_metadata = 0;
call->got_status_code = 0;
+ call->start_ok = 0;
call->status_code =
server_transport_data != NULL ? GRPC_STATUS_OK : GRPC_STATUS_UNKNOWN;
call->status_details = NULL;
call->received_finish = 0;
call->reads_done = 0;
+ call->received_start = 0;
+ call->pending_write = NULL;
+ call->pending_writes_done = 0;
grpc_metadata_buffer_init(&call->incoming_metadata);
gpr_ref_init(&call->internal_refcount, 1);
grpc_call_stack_init(channel_stack, server_transport_data,
@@ -360,16 +369,6 @@ grpc_call_error grpc_call_add_metadata(grpc_call *call, grpc_metadata *metadata,
return GRPC_CALL_OK;
}
-static void done_invoke(void *user_data, grpc_op_error error) {
- grpc_call *call = user_data;
- void *tag = call->write_tag;
-
- GPR_ASSERT(call->have_write);
- call->have_write = 0;
- call->write_tag = INVALID_TAG;
- grpc_cq_end_invoke_accepted(call->cq, tag, call, NULL, NULL, error);
-}
-
static void finish_call(grpc_call *call) {
size_t count;
grpc_metadata *elements;
@@ -384,11 +383,81 @@ static void finish_call(grpc_call *call) {
elements, count);
}
-grpc_call_error grpc_call_start_invoke(grpc_call *call,
- grpc_completion_queue *cq,
- void *invoke_accepted_tag,
- void *metadata_read_tag,
- void *finished_tag, gpr_uint32 flags) {
+static void done_write(void *user_data, grpc_op_error error) {
+ grpc_call *call = user_data;
+ void *tag = call->write_tag;
+
+ GPR_ASSERT(call->have_write);
+ call->have_write = 0;
+ call->write_tag = INVALID_TAG;
+ grpc_cq_end_write_accepted(call->cq, tag, call, NULL, NULL, error);
+}
+
+static void done_writes_done(void *user_data, grpc_op_error error) {
+ grpc_call *call = user_data;
+ void *tag = call->write_tag;
+
+ GPR_ASSERT(call->have_write);
+ call->have_write = 0;
+ call->write_tag = INVALID_TAG;
+ grpc_cq_end_finish_accepted(call->cq, tag, call, NULL, NULL, error);
+}
+
+static void call_started(void *user_data, grpc_op_error error) {
+ grpc_call *call = user_data;
+ grpc_call_element *elem;
+ grpc_byte_buffer *pending_write = NULL;
+ gpr_uint32 pending_write_flags = 0;
+ gpr_uint8 pending_writes_done = 0;
+ int ok;
+ grpc_call_op op;
+
+ gpr_mu_lock(&call->read_mu);
+ GPR_ASSERT(!call->received_start);
+ call->received_start = 1;
+ ok = call->start_ok = (error == GRPC_OP_OK);
+ pending_write = call->pending_write;
+ pending_write_flags = call->pending_write_flags;
+ pending_writes_done = call->pending_writes_done;
+ gpr_mu_unlock(&call->read_mu);
+
+ if (pending_write) {
+ if (ok) {
+ op.type = GRPC_SEND_MESSAGE;
+ op.dir = GRPC_CALL_DOWN;
+ op.flags = pending_write_flags;
+ op.done_cb = done_write;
+ op.user_data = call;
+ op.data.message = pending_write;
+
+ elem = CALL_ELEM_FROM_CALL(call, 0);
+ elem->filter->call_op(elem, NULL, &op);
+ } else {
+ done_write(call, error);
+ }
+ grpc_byte_buffer_destroy(pending_write);
+ }
+ if (pending_writes_done) {
+ if (ok) {
+ op.type = GRPC_SEND_FINISH;
+ op.dir = GRPC_CALL_DOWN;
+ op.flags = 0;
+ op.done_cb = done_writes_done;
+ op.user_data = call;
+
+ elem = CALL_ELEM_FROM_CALL(call, 0);
+ elem->filter->call_op(elem, NULL, &op);
+ } else {
+ done_writes_done(call, error);
+ }
+ }
+
+ grpc_call_internal_unref(call);
+}
+
+grpc_call_error grpc_call_invoke(grpc_call *call, grpc_completion_queue *cq,
+ void *metadata_read_tag, void *finished_tag,
+ gpr_uint32 flags) {
grpc_call_element *elem;
grpc_call_op op;
@@ -420,7 +489,6 @@ grpc_call_error grpc_call_start_invoke(grpc_call *call,
/* inform the completion queue of an incoming operation */
grpc_cq_begin_op(cq, call, GRPC_FINISHED);
grpc_cq_begin_op(cq, call, GRPC_CLIENT_METADATA_READ);
- grpc_cq_begin_op(cq, call, GRPC_INVOKE_ACCEPTED);
gpr_mu_lock(&call->read_mu);
@@ -431,8 +499,6 @@ grpc_call_error grpc_call_start_invoke(grpc_call *call,
if (call->received_finish) {
/* handle early cancellation */
- grpc_cq_end_invoke_accepted(call->cq, invoke_accepted_tag, call, NULL, NULL,
- GRPC_OP_ERROR);
grpc_cq_end_client_metadata_read(call->cq, metadata_read_tag, call, NULL,
NULL, 0, NULL);
finish_call(call);
@@ -442,20 +508,18 @@ grpc_call_error grpc_call_start_invoke(grpc_call *call,
return GRPC_CALL_OK;
}
- call->write_tag = invoke_accepted_tag;
call->metadata_tag = metadata_read_tag;
- call->have_write = 1;
-
gpr_mu_unlock(&call->read_mu);
/* call down the filter stack */
op.type = GRPC_SEND_START;
op.dir = GRPC_CALL_DOWN;
op.flags = flags;
- op.done_cb = done_invoke;
+ op.done_cb = call_started;
op.data.start.pollset = grpc_cq_pollset(cq);
op.user_data = call;
+ grpc_call_internal_ref(call);
elem = CALL_ELEM_FROM_CALL(call, 0);
elem->filter->call_op(elem, NULL, &op);
@@ -486,6 +550,7 @@ grpc_call_error grpc_call_server_accept(grpc_call *call,
call->state = CALL_BOUNDCQ;
call->cq = cq;
call->finished_tag = finished_tag;
+ call->received_start = 1;
if (prq_is_empty(&call->prq) && call->received_finish) {
finish_call(call);
@@ -535,26 +600,6 @@ grpc_call_error grpc_call_server_end_initial_metadata(grpc_call *call,
return GRPC_CALL_OK;
}
-static void done_writes_done(void *user_data, grpc_op_error error) {
- grpc_call *call = user_data;
- void *tag = call->write_tag;
-
- GPR_ASSERT(call->have_write);
- call->have_write = 0;
- call->write_tag = INVALID_TAG;
- grpc_cq_end_finish_accepted(call->cq, tag, call, NULL, NULL, error);
-}
-
-static void done_write(void *user_data, grpc_op_error error) {
- grpc_call *call = user_data;
- void *tag = call->write_tag;
-
- GPR_ASSERT(call->have_write);
- call->have_write = 0;
- call->write_tag = INVALID_TAG;
- grpc_cq_end_write_accepted(call->cq, tag, call, NULL, NULL, error);
-}
-
void grpc_call_client_initial_metadata_complete(
grpc_call_element *surface_element) {
grpc_call *call = grpc_call_from_top_element(surface_element);
@@ -617,7 +662,7 @@ grpc_call_error grpc_call_start_read(grpc_call *call, void *tag) {
} else {
call->read_tag = tag;
call->have_read = 1;
- request_more = 1;
+ request_more = call->received_start;
}
} else if (prq_is_empty(&call->prq) && call->received_finish) {
finish_call(call);
@@ -654,8 +699,6 @@ grpc_call_error grpc_call_start_write(grpc_call *call,
grpc_cq_begin_op(call->cq, call, GRPC_WRITE_ACCEPTED);
- /* for now we do no buffering, so a NULL byte_buffer can have no impact
- on our behavior -- succeed immediately */
/* TODO(ctiller): if flags & GRPC_WRITE_BUFFER_HINT == 0, this indicates a
flush, and that flush should be propogated down from here */
if (byte_buffer == NULL) {
@@ -666,15 +709,25 @@ grpc_call_error grpc_call_start_write(grpc_call *call,
call->write_tag = tag;
call->have_write = 1;
- op.type = GRPC_SEND_MESSAGE;
- op.dir = GRPC_CALL_DOWN;
- op.flags = flags;
- op.done_cb = done_write;
- op.user_data = call;
- op.data.message = byte_buffer;
+ gpr_mu_lock(&call->read_mu);
+ if (!call->received_start) {
+ call->pending_write = grpc_byte_buffer_copy(byte_buffer);
+ call->pending_write_flags = flags;
- elem = CALL_ELEM_FROM_CALL(call, 0);
- elem->filter->call_op(elem, NULL, &op);
+ gpr_mu_unlock(&call->read_mu);
+ } else {
+ gpr_mu_unlock(&call->read_mu);
+
+ op.type = GRPC_SEND_MESSAGE;
+ op.dir = GRPC_CALL_DOWN;
+ op.flags = flags;
+ op.done_cb = done_write;
+ op.user_data = call;
+ op.data.message = byte_buffer;
+
+ elem = CALL_ELEM_FROM_CALL(call, 0);
+ elem->filter->call_op(elem, NULL, &op);
+ }
return GRPC_CALL_OK;
}
@@ -706,14 +759,23 @@ grpc_call_error grpc_call_writes_done(grpc_call *call, void *tag) {
call->write_tag = tag;
call->have_write = 1;
- op.type = GRPC_SEND_FINISH;
- op.dir = GRPC_CALL_DOWN;
- op.flags = 0;
- op.done_cb = done_writes_done;
- op.user_data = call;
+ gpr_mu_lock(&call->read_mu);
+ if (!call->received_start) {
+ call->pending_writes_done = 1;
- elem = CALL_ELEM_FROM_CALL(call, 0);
- elem->filter->call_op(elem, NULL, &op);
+ gpr_mu_unlock(&call->read_mu);
+ } else {
+ gpr_mu_unlock(&call->read_mu);
+
+ op.type = GRPC_SEND_FINISH;
+ op.dir = GRPC_CALL_DOWN;
+ op.flags = 0;
+ op.done_cb = done_writes_done;
+ op.user_data = call;
+
+ elem = CALL_ELEM_FROM_CALL(call, 0);
+ elem->filter->call_op(elem, NULL, &op);
+ }
return GRPC_CALL_OK;
}
@@ -760,8 +822,8 @@ grpc_call_error grpc_call_start_write_status(grpc_call *call,
/* always send status */
{
grpc_mdelem *md;
- char buffer[32];
- sprintf(buffer, "%d", status);
+ char buffer[GPR_LTOA_MIN_BUFSIZE];
+ gpr_ltoa(status, buffer);
md =
grpc_mdelem_from_strings(call->metadata_context, "grpc-status", buffer);
@@ -818,6 +880,8 @@ void grpc_call_recv_metadata(grpc_call_element *elem, grpc_call_op *op) {
grpc_call *call = CALL_FROM_TOP_ELEM(elem);
grpc_mdelem *md = op->data.metadata;
grpc_mdstr *key = md->key;
+ gpr_log(GPR_DEBUG, "call %p got metadata %s %s", call,
+ grpc_mdstr_as_c_string(md->key), grpc_mdstr_as_c_string(md->value));
if (key == grpc_channel_get_status_string(call->channel)) {
maybe_set_status_code(call, decode_status(md));
grpc_mdelem_unref(md);
diff --git a/src/core/surface/channel_create.c b/src/core/surface/channel_create.c
index 41093d78ef..d3faf0c996 100644
--- a/src/core/surface/channel_create.c
+++ b/src/core/surface/channel_create.c
@@ -31,6 +31,8 @@
*
*/
+#include "src/core/iomgr/sockaddr.h"
+
#include <grpc/grpc.h>
#include <stdlib.h>
@@ -48,10 +50,10 @@
#include "src/core/iomgr/tcp_client.h"
#include "src/core/surface/channel.h"
#include "src/core/surface/client.h"
+#include "src/core/support/string.h"
#include "src/core/transport/chttp2_transport.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/string.h>
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
diff --git a/src/core/surface/client.c b/src/core/surface/client.c
index 74c79bdf9b..fe3a81f1b9 100644
--- a/src/core/surface/client.c
+++ b/src/core/surface/client.c
@@ -34,9 +34,9 @@
#include "src/core/surface/client.h"
#include "src/core/surface/call.h"
+#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/string.h>
typedef struct { void *unused; } call_data;
diff --git a/src/core/surface/completion_queue.c b/src/core/surface/completion_queue.c
index 652f23e888..2bf31c50a8 100644
--- a/src/core/surface/completion_queue.c
+++ b/src/core/surface/completion_queue.c
@@ -37,13 +37,13 @@
#include <string.h>
#include "src/core/iomgr/pollset.h"
+#include "src/core/support/string.h"
#include "src/core/surface/call.h"
#include "src/core/surface/event_string.h"
#include "src/core/surface/surface_trace.h"
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
-#include <grpc/support/string.h>
#define NUM_TAG_BUCKETS 31
@@ -396,12 +396,13 @@ void grpc_event_finish(grpc_event *base) {
void grpc_cq_dump_pending_ops(grpc_completion_queue *cc) {
#ifndef NDEBUG
- char tmp[256];
+ char tmp[GRPC_COMPLETION_DO_NOT_USE * (1 + GPR_LTOA_MIN_BUFSIZE)];
char *p = tmp;
int i;
for (i = 0; i < GRPC_COMPLETION_DO_NOT_USE; i++) {
- p += sprintf(p, " %d", (int)cc->pending_op_count[i]);
+ *p++ = ' ';
+ p += gpr_ltoa(cc->pending_op_count[i], p);
}
gpr_log(GPR_INFO, "pending ops:%s", tmp);
diff --git a/src/core/surface/event_string.c b/src/core/surface/event_string.c
index 8ae2af7472..8975d312ee 100644
--- a/src/core/surface/event_string.c
+++ b/src/core/surface/event_string.c
@@ -35,11 +35,13 @@
#include <stdio.h>
-#include <grpc/support/string.h>
+#include "src/core/support/string.h"
#include <grpc/byte_buffer.h>
-static size_t addhdr(char *p, grpc_event *ev) {
- return sprintf(p, "tag:%p call:%p", ev->tag, (void *)ev->call);
+static void addhdr(gpr_strvec *buf, grpc_event *ev) {
+ char *tmp;
+ gpr_asprintf(&tmp, "tag:%p call:%p", ev->tag, (void *)ev->call);
+ gpr_strvec_add(buf, tmp);
}
static const char *errstr(grpc_op_error err) {
@@ -52,72 +54,84 @@ static const char *errstr(grpc_op_error err) {
return "UNKNOWN_UNKNOWN";
}
-static size_t adderr(char *p, grpc_op_error err) {
- return sprintf(p, " err=%s", errstr(err));
+static void adderr(gpr_strvec *buf, grpc_op_error err) {
+ char *tmp;
+ gpr_asprintf(&tmp, " err=%s", errstr(err));
+ gpr_strvec_add(buf, tmp);
}
char *grpc_event_string(grpc_event *ev) {
- char buffer[1024];
- char *p = buffer;
+ char *out;
+ char *tmp;
+ gpr_strvec buf;
if (ev == NULL) return gpr_strdup("null");
+ gpr_strvec_init(&buf);
+
switch (ev->type) {
case GRPC_SERVER_SHUTDOWN:
- p += sprintf(p, "SERVER_SHUTDOWN");
+ gpr_strvec_add(&buf, gpr_strdup("SERVER_SHUTDOWN"));
break;
case GRPC_QUEUE_SHUTDOWN:
- p += sprintf(p, "QUEUE_SHUTDOWN");
+ gpr_strvec_add(&buf, gpr_strdup("QUEUE_SHUTDOWN"));
break;
case GRPC_READ:
- p += sprintf(p, "READ: ");
- p += addhdr(p, ev);
+ gpr_strvec_add(&buf, gpr_strdup("READ: "));
+ addhdr(&buf, ev);
if (ev->data.read) {
- p += sprintf(p, " %d bytes",
+ gpr_asprintf(&tmp, " %d bytes",
(int)grpc_byte_buffer_length(ev->data.read));
+ gpr_strvec_add(&buf, tmp);
} else {
- p += sprintf(p, " end-of-stream");
+ gpr_strvec_add(&buf, gpr_strdup(" end-of-stream"));
}
break;
case GRPC_INVOKE_ACCEPTED:
- p += sprintf(p, "INVOKE_ACCEPTED: ");
- p += addhdr(p, ev);
- p += adderr(p, ev->data.invoke_accepted);
+ gpr_strvec_add(&buf, gpr_strdup("INVOKE_ACCEPTED: "));
+ addhdr(&buf, ev);
+ adderr(&buf, ev->data.invoke_accepted);
break;
case GRPC_WRITE_ACCEPTED:
- p += sprintf(p, "WRITE_ACCEPTED: ");
- p += addhdr(p, ev);
- p += adderr(p, ev->data.write_accepted);
+ gpr_strvec_add(&buf, gpr_strdup("WRITE_ACCEPTED: "));
+ addhdr(&buf, ev);
+ adderr(&buf, ev->data.write_accepted);
break;
case GRPC_FINISH_ACCEPTED:
- p += sprintf(p, "FINISH_ACCEPTED: ");
- p += addhdr(p, ev);
- p += adderr(p, ev->data.write_accepted);
+ gpr_strvec_add(&buf, gpr_strdup("FINISH_ACCEPTED: "));
+ addhdr(&buf, ev);
+ adderr(&buf, ev->data.write_accepted);
break;
case GRPC_CLIENT_METADATA_READ:
- p += sprintf(p, "CLIENT_METADATA_READ: ");
- p += addhdr(p, ev);
- p += sprintf(p, " %d elements", (int)ev->data.client_metadata_read.count);
+ gpr_strvec_add(&buf, gpr_strdup("CLIENT_METADATA_READ: "));
+ addhdr(&buf, ev);
+ gpr_asprintf(&tmp, " %d elements",
+ (int)ev->data.client_metadata_read.count);
+ gpr_strvec_add(&buf, tmp);
break;
case GRPC_FINISHED:
- p += sprintf(p, "FINISHED: ");
- p += addhdr(p, ev);
- p += sprintf(p, " status=%d details='%s' %d metadata elements",
+ gpr_strvec_add(&buf, gpr_strdup("FINISHED: "));
+ addhdr(&buf, ev);
+ gpr_asprintf(&tmp, " status=%d details='%s' %d metadata elements",
ev->data.finished.status, ev->data.finished.details,
(int)ev->data.finished.metadata_count);
+ gpr_strvec_add(&buf, tmp);
break;
case GRPC_SERVER_RPC_NEW:
- p += sprintf(p, "SERVER_RPC_NEW: ");
- p += addhdr(p, ev);
- p += sprintf(p, " method='%s' host='%s' %d metadata elements",
+ gpr_strvec_add(&buf, gpr_strdup("SERVER_RPC_NEW: "));
+ addhdr(&buf, ev);
+ gpr_asprintf(&tmp, " method='%s' host='%s' %d metadata elements",
ev->data.server_rpc_new.method, ev->data.server_rpc_new.host,
(int)ev->data.server_rpc_new.metadata_count);
+ gpr_strvec_add(&buf, tmp);
break;
case GRPC_COMPLETION_DO_NOT_USE:
- p += sprintf(p, "DO_NOT_USE (this is a bug)");
- p += addhdr(p, ev);
+ gpr_strvec_add(&buf, gpr_strdup("DO_NOT_USE (this is a bug)"));
+ addhdr(&buf, ev);
break;
}
- return gpr_strdup(buffer);
+ out = gpr_strvec_flatten(&buf, NULL);
+ gpr_strvec_destroy(&buf);
+ return out;
}
diff --git a/src/core/surface/lame_client.c b/src/core/surface/lame_client.c
index a5244dbe61..056c98646b 100644
--- a/src/core/surface/lame_client.c
+++ b/src/core/surface/lame_client.c
@@ -36,11 +36,11 @@
#include <string.h>
#include "src/core/channel/channel_stack.h"
+#include "src/core/support/string.h"
#include "src/core/surface/channel.h"
#include "src/core/surface/call.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/string.h>
typedef struct { void *unused; } call_data;
diff --git a/src/core/surface/secure_channel_create.c b/src/core/surface/secure_channel_create.c
index 3d5727927d..defee79766 100644
--- a/src/core/surface/secure_channel_create.c
+++ b/src/core/surface/secure_channel_create.c
@@ -31,6 +31,8 @@
*
*/
+#include "src/core/iomgr/sockaddr.h"
+
#include <grpc/grpc.h>
#include <stdlib.h>
@@ -48,13 +50,13 @@
#include "src/core/security/auth.h"
#include "src/core/security/security_context.h"
#include "src/core/security/secure_transport_setup.h"
+#include "src/core/support/string.h"
#include "src/core/surface/channel.h"
#include "src/core/surface/client.h"
#include "src/core/transport/chttp2_transport.h"
#include <grpc/grpc_security.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/string.h>
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
#include "src/core/tsi/transport_security_interface.h"
diff --git a/src/core/surface/server.c b/src/core/surface/server.c
index cbdd3bfa30..9585e4e8ea 100644
--- a/src/core/surface/server.c
+++ b/src/core/surface/server.c
@@ -40,12 +40,12 @@
#include "src/core/channel/channel_args.h"
#include "src/core/channel/connected_channel.h"
#include "src/core/iomgr/iomgr.h"
+#include "src/core/support/string.h"
#include "src/core/surface/call.h"
#include "src/core/surface/channel.h"
#include "src/core/surface/completion_queue.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/string.h>
#include <grpc/support/useful.h>
typedef enum { PENDING_START, ALL_CALLS, CALL_LIST_COUNT } call_list;
diff --git a/src/core/transport/chttp2/frame_data.c b/src/core/transport/chttp2/frame_data.c
index 00b020b31b..dee61cee50 100644
--- a/src/core/transport/chttp2/frame_data.c
+++ b/src/core/transport/chttp2/frame_data.c
@@ -35,9 +35,9 @@
#include <string.h>
+#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/string.h>
#include <grpc/support/useful.h>
#include "src/core/transport/transport.h"
diff --git a/src/core/transport/chttp2/hpack_parser.c b/src/core/transport/chttp2/hpack_parser.c
index 64e08ffac7..c98b90e5d1 100644
--- a/src/core/transport/chttp2/hpack_parser.c
+++ b/src/core/transport/chttp2/hpack_parser.c
@@ -38,10 +38,10 @@
#include <assert.h>
#include "src/core/transport/chttp2/bin_encoder.h"
+#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
-#include <grpc/support/string.h>
#include <grpc/support/useful.h>
typedef enum {
diff --git a/src/core/transport/chttp2/timeout_encoding.c b/src/core/transport/chttp2/timeout_encoding.c
index 2706c369a6..23c4554cf2 100644
--- a/src/core/transport/chttp2/timeout_encoding.c
+++ b/src/core/transport/chttp2/timeout_encoding.c
@@ -36,6 +36,8 @@
#include <stdio.h>
#include <string.h>
+#include "src/core/support/string.h"
+
static int round_up(int x, int divisor) {
return (x / divisor + (x % divisor != 0)) * divisor;
}
@@ -53,15 +55,21 @@ static int round_up_to_three_sig_figs(int x) {
}
/* encode our minimum viable timeout value */
-static void enc_tiny(char *buffer) { strcpy(buffer, "1n"); }
+static void enc_tiny(char *buffer) { memcpy(buffer, "1n", 3); }
+
+static void enc_ext(char *buffer, long value, char ext) {
+ int n = gpr_ltoa(value, buffer);
+ buffer[n] = ext;
+ buffer[n+1] = 0;
+}
static void enc_seconds(char *buffer, long sec) {
if (sec % 3600 == 0) {
- sprintf(buffer, "%ldH", sec / 3600);
+ enc_ext(buffer, sec / 3600, 'H');
} else if (sec % 60 == 0) {
- sprintf(buffer, "%ldM", sec / 60);
+ enc_ext(buffer, sec / 60, 'M');
} else {
- sprintf(buffer, "%ldS", sec);
+ enc_ext(buffer, sec, 'S');
}
}
@@ -69,23 +77,23 @@ static void enc_nanos(char *buffer, int x) {
x = round_up_to_three_sig_figs(x);
if (x < 100000) {
if (x % 1000 == 0) {
- sprintf(buffer, "%du", x / 1000);
+ enc_ext(buffer, x / 1000, 'u');
} else {
- sprintf(buffer, "%dn", x);
+ enc_ext(buffer, x, 'n');
}
} else if (x < 100000000) {
if (x % 1000000 == 0) {
- sprintf(buffer, "%dm", x / 1000000);
+ enc_ext(buffer, x / 1000000, 'm');
} else {
- sprintf(buffer, "%du", x / 1000);
+ enc_ext(buffer, x / 1000, 'u');
}
} else if (x < 1000000000) {
- sprintf(buffer, "%dm", x / 1000000);
+ enc_ext(buffer, x / 1000000, 'm');
} else {
/* note that this is only ever called with times of less than one second,
so if we reach here the time must have been rounded up to a whole second
(and no more) */
- strcpy(buffer, "1S");
+ memcpy(buffer, "1S", 3);
}
}
@@ -93,18 +101,18 @@ static void enc_micros(char *buffer, int x) {
x = round_up_to_three_sig_figs(x);
if (x < 100000) {
if (x % 1000 == 0) {
- sprintf(buffer, "%dm", x / 1000);
+ enc_ext(buffer, x / 1000, 'm');
} else {
- sprintf(buffer, "%du", x);
+ enc_ext(buffer, x, 'u');
}
} else if (x < 100000000) {
if (x % 1000000 == 0) {
- sprintf(buffer, "%dS", x / 1000000);
+ enc_ext(buffer, x / 1000000, 'S');
} else {
- sprintf(buffer, "%dm", x / 1000);
+ enc_ext(buffer, x / 1000, 'm');
}
} else {
- sprintf(buffer, "%dS", x / 1000000);
+ enc_ext(buffer, x / 1000000, 'S');
}
}
diff --git a/src/core/transport/chttp2/timeout_encoding.h b/src/core/transport/chttp2/timeout_encoding.h
index a4582566ad..d1e4776032 100644
--- a/src/core/transport/chttp2/timeout_encoding.h
+++ b/src/core/transport/chttp2/timeout_encoding.h
@@ -34,8 +34,11 @@
#ifndef __GRPC_INTERNAL_TRANSPORT_CHTTP2_TIMEOUT_ENCODING_H_
#define __GRPC_INTERNAL_TRANSPORT_CHTTP2_TIMEOUT_ENCODING_H_
+#include "src/core/support/string.h"
#include <grpc/support/time.h>
+#define GRPC_CHTTP2_TIMEOUT_ENCODE_MIN_BUFSIZE (GPR_LTOA_MIN_BUFSIZE + 1)
+
/* Encode/decode timeouts to the GRPC over HTTP2 format;
encoding may round up arbitrarily */
void grpc_chttp2_encode_timeout(gpr_timespec timeout, char *buffer);
diff --git a/src/core/transport/chttp2_transport.c b/src/core/transport/chttp2_transport.c
index 17b37d6d4a..531a53b984 100644
--- a/src/core/transport/chttp2_transport.c
+++ b/src/core/transport/chttp2_transport.c
@@ -37,6 +37,7 @@
#include <stdio.h>
#include <string.h>
+#include "src/core/support/string.h"
#include "src/core/transport/chttp2/frame_data.h"
#include "src/core/transport/chttp2/frame_goaway.h"
#include "src/core/transport/chttp2/frame_ping.h"
@@ -53,7 +54,6 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/slice_buffer.h>
-#include <grpc/support/string.h>
#include <grpc/support/useful.h>
#define DEFAULT_WINDOW 65535
@@ -1002,7 +1002,7 @@ static void cancel_stream_inner(transport *t, stream *s, gpr_uint32 id,
grpc_chttp2_error_code error_code,
int send_rst) {
int had_outgoing;
- char buffer[32];
+ char buffer[GPR_LTOA_MIN_BUFSIZE];
if (s) {
/* clear out any unreported input & output: nobody cares anymore */
@@ -1015,7 +1015,7 @@ static void cancel_stream_inner(transport *t, stream *s, gpr_uint32 id,
s->cancelled = 1;
stream_list_join(t, s, CANCELLED);
- sprintf(buffer, "%d", local_status);
+ gpr_ltoa(local_status, buffer);
grpc_sopb_add_metadata(
&s->parser.incoming_sopb,
grpc_mdelem_from_strings(t->metadata_context, "grpc-status", buffer));
diff --git a/src/core/transport/metadata.c b/src/core/transport/metadata.c
index 6881b871ec..74bbb02134 100644
--- a/src/core/transport/metadata.c
+++ b/src/core/transport/metadata.c
@@ -31,6 +31,7 @@
*
*/
+#include "src/core/iomgr/sockaddr.h"
#include "src/core/transport/metadata.h"
#include <stddef.h>
diff --git a/src/cpp/client/channel.cc b/src/cpp/client/channel.cc
index a8919a10d9..c8b2bb2cf6 100644
--- a/src/cpp/client/channel.cc
+++ b/src/cpp/client/channel.cc
@@ -104,7 +104,6 @@ Status Channel::StartBlockingRpc(const RpcMethod &method,
context->set_call(call);
grpc_event *ev;
void *finished_tag = reinterpret_cast<char *>(call);
- void *invoke_tag = reinterpret_cast<char *>(call) + 1;
void *metadata_read_tag = reinterpret_cast<char *>(call) + 2;
void *write_tag = reinterpret_cast<char *>(call) + 3;
void *halfclose_tag = reinterpret_cast<char *>(call) + 4;
@@ -115,19 +114,11 @@ Status Channel::StartBlockingRpc(const RpcMethod &method,
// add_metadata from context
//
// invoke
- GPR_ASSERT(grpc_call_start_invoke(call, cq, invoke_tag, metadata_read_tag,
- finished_tag,
- GRPC_WRITE_BUFFER_HINT) == GRPC_CALL_OK);
- ev = grpc_completion_queue_pluck(cq, invoke_tag, gpr_inf_future);
- bool success = ev->data.invoke_accepted == GRPC_OP_OK;
- grpc_event_finish(ev);
- if (!success) {
- GetFinalStatus(cq, finished_tag, &status);
- return status;
- }
+ GPR_ASSERT(grpc_call_invoke(call, cq, metadata_read_tag, finished_tag,
+ GRPC_WRITE_BUFFER_HINT) == GRPC_CALL_OK);
// write request
grpc_byte_buffer *write_buffer = nullptr;
- success = SerializeProto(request, &write_buffer);
+ bool success = SerializeProto(request, &write_buffer);
if (!success) {
grpc_call_cancel(call);
status =
diff --git a/src/cpp/stream/stream_context.cc b/src/cpp/stream/stream_context.cc
index e64010be64..edb2fc5ad9 100644
--- a/src/cpp/stream/stream_context.cc
+++ b/src/cpp/stream/stream_context.cc
@@ -80,17 +80,9 @@ void StreamContext::Start(bool buffered) {
if (is_client_) {
// TODO(yangg) handle metadata send path
int flag = buffered ? GRPC_WRITE_BUFFER_HINT : 0;
- grpc_call_error error = grpc_call_start_invoke(call(), cq(), invoke_tag(),
- client_metadata_read_tag(),
- finished_tag(), flag);
+ grpc_call_error error = grpc_call_invoke(
+ call(), cq(), client_metadata_read_tag(), finished_tag(), flag);
GPR_ASSERT(GRPC_CALL_OK == error);
- grpc_event *invoke_ev =
- grpc_completion_queue_pluck(cq(), invoke_tag(), gpr_inf_future);
- if (invoke_ev->data.invoke_accepted != GRPC_OP_OK) {
- peer_halfclosed_ = true;
- self_halfclosed_ = true;
- }
- grpc_event_finish(invoke_ev);
} else {
// TODO(yangg) metadata needs to be added before accept
// TODO(yangg) correctly set flag to accept
diff --git a/src/cpp/stream/stream_context.h b/src/cpp/stream/stream_context.h
index 8697d86e83..8def589841 100644
--- a/src/cpp/stream/stream_context.h
+++ b/src/cpp/stream/stream_context.h
@@ -76,7 +76,6 @@ class StreamContext final : public StreamContextInterface {
void *read_tag() { return reinterpret_cast<char *>(this) + 1; }
void *write_tag() { return reinterpret_cast<char *>(this) + 2; }
void *halfclose_tag() { return reinterpret_cast<char *>(this) + 3; }
- void *invoke_tag() { return reinterpret_cast<char *>(this) + 4; }
void *client_metadata_read_tag() {
return reinterpret_cast<char *>(this) + 5;
}
diff --git a/src/node/binding.gyp b/src/node/binding.gyp
index da4a943491..cf2a6acb04 100644
--- a/src/node/binding.gyp
+++ b/src/node/binding.gyp
@@ -1,8 +1,13 @@
{
+ "variables" : {
+ 'no_install': "<!(echo $GRPC_NO_INSTALL)",
+ 'grpc_root': "<!(echo $GRPC_ROOT)",
+ 'grpc_lib_subdir': "<!(echo $GRPC_LIB_SUBDIR)"
+ },
"targets" : [
{
'include_dirs': [
- "<!(node -e \"require('nan')\")"
+ "<!(nodejs -e \"require('nan')\")"
],
'cxxflags': [
'-Wall',
@@ -11,32 +16,50 @@
'-g',
'-zdefs'
'-Werror',
- ],
+ ],
'ldflags': [
- '-g',
- '-L/usr/local/google/home/mlumish/grpc_dev/lib'
+ '-g'
],
'link_settings': {
'libraries': [
- '-lgrpc',
'-lrt',
- '-lgpr',
'-lpthread'
],
},
"target_name": "grpc",
"sources": [
- "byte_buffer.cc",
- "call.cc",
- "channel.cc",
- "completion_queue_async_worker.cc",
- "credentials.cc",
- "event.cc",
- "node_grpc.cc",
- "server.cc",
- "server_credentials.cc",
- "tag.cc",
- "timeval.cc"
+ "ext/byte_buffer.cc",
+ "ext/call.cc",
+ "ext/channel.cc",
+ "ext/completion_queue_async_worker.cc",
+ "ext/credentials.cc",
+ "ext/event.cc",
+ "ext/node_grpc.cc",
+ "ext/server.cc",
+ "ext/server_credentials.cc",
+ "ext/tag.cc",
+ "ext/timeval.cc"
+ ],
+ 'conditions' : [
+ ['no_install=="yes"', {
+ 'include_dirs': [
+ "<(grpc_root)/include"
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '<(grpc_root)/<(grpc_lib_subdir)/libgrpc.a',
+ '<(grpc_root)/<(grpc_lib_subdir)/libgpr.a'
+ ]
+ }
+ }],
+ ['no_install!="yes"', {
+ 'link_settings': {
+ 'libraries': [
+ '-lgrpc',
+ '-lgpr'
+ ]
+ }
+ }]
]
}
]
diff --git a/src/node/examples/math_server.js b/src/node/examples/math_server.js
index d649b4fd6d..e65cfe3002 100644
--- a/src/node/examples/math_server.js
+++ b/src/node/examples/math_server.js
@@ -52,7 +52,8 @@ var Server = grpc.buildServer([math.Math.service]);
*/
function mathDiv(call, cb) {
var req = call.request;
- if (req.divisor == 0) {
+ // Unary + is explicit coersion to integer
+ if (+req.divisor === 0) {
cb(new Error('cannot divide by zero'));
}
cb(null, {
@@ -89,7 +90,7 @@ function mathSum(call, cb) {
// Here, call is a standard readable Node object Stream
var sum = 0;
call.on('data', function(data) {
- sum += data.num | 0;
+ sum += (+data.num);
});
call.on('end', function() {
cb(null, {num: sum});
@@ -104,7 +105,7 @@ function mathDivMany(stream) {
Transform.call(this, options);
}
DivTransform.prototype._transform = function(div_args, encoding, callback) {
- if (div_args.divisor == 0) {
+ if (+div_args.divisor === 0) {
callback(new Error('cannot divide by zero'));
}
callback(null, {
diff --git a/src/node/byte_buffer.cc b/src/node/ext/byte_buffer.cc
index 142951475a..142951475a 100644
--- a/src/node/byte_buffer.cc
+++ b/src/node/ext/byte_buffer.cc
diff --git a/src/node/byte_buffer.h b/src/node/ext/byte_buffer.h
index ee2b4c0d15..ee2b4c0d15 100644
--- a/src/node/byte_buffer.h
+++ b/src/node/ext/byte_buffer.h
diff --git a/src/node/call.cc b/src/node/ext/call.cc
index b8ee1786a6..6434c2f0d5 100644
--- a/src/node/call.cc
+++ b/src/node/ext/call.cc
@@ -78,8 +78,8 @@ void Call::Init(Handle<Object> exports) {
tpl->InstanceTemplate()->SetInternalFieldCount(1);
NanSetPrototypeTemplate(tpl, "addMetadata",
FunctionTemplate::New(AddMetadata)->GetFunction());
- NanSetPrototypeTemplate(tpl, "startInvoke",
- FunctionTemplate::New(StartInvoke)->GetFunction());
+ NanSetPrototypeTemplate(tpl, "invoke",
+ FunctionTemplate::New(Invoke)->GetFunction());
NanSetPrototypeTemplate(tpl, "serverAccept",
FunctionTemplate::New(ServerAccept)->GetFunction());
NanSetPrototypeTemplate(
@@ -203,37 +203,30 @@ NAN_METHOD(Call::AddMetadata) {
NanReturnUndefined();
}
-NAN_METHOD(Call::StartInvoke) {
+NAN_METHOD(Call::Invoke) {
NanScope();
if (!HasInstance(args.This())) {
- return NanThrowTypeError("startInvoke can only be called on Call objects");
+ return NanThrowTypeError("invoke can only be called on Call objects");
}
if (!args[0]->IsFunction()) {
- return NanThrowTypeError("StartInvoke's first argument must be a function");
+ return NanThrowTypeError("invoke's first argument must be a function");
}
if (!args[1]->IsFunction()) {
- return NanThrowTypeError(
- "StartInvoke's second argument must be a function");
- }
- if (!args[2]->IsFunction()) {
- return NanThrowTypeError("StartInvoke's third argument must be a function");
+ return NanThrowTypeError("invoke's second argument must be a function");
}
- if (!args[3]->IsUint32()) {
- return NanThrowTypeError(
- "StartInvoke's fourth argument must be integer flags");
+ if (!args[2]->IsUint32()) {
+ return NanThrowTypeError("invoke's third argument must be integer flags");
}
Call *call = ObjectWrap::Unwrap<Call>(args.This());
unsigned int flags = args[3]->Uint32Value();
- grpc_call_error error = grpc_call_start_invoke(
+ grpc_call_error error = grpc_call_invoke(
call->wrapped_call, CompletionQueueAsyncWorker::GetQueue(),
- CreateTag(args[0], args.This()), CreateTag(args[1], args.This()),
- CreateTag(args[2], args.This()), flags);
+ CreateTag(args[0], args.This()), CreateTag(args[1], args.This()), flags);
if (error == GRPC_CALL_OK) {
CompletionQueueAsyncWorker::Next();
CompletionQueueAsyncWorker::Next();
- CompletionQueueAsyncWorker::Next();
} else {
- return NanThrowError("startInvoke failed", error);
+ return NanThrowError("invoke failed", error);
}
NanReturnUndefined();
}
@@ -281,7 +274,7 @@ NAN_METHOD(Call::ServerEndInitialMetadata) {
NAN_METHOD(Call::Cancel) {
NanScope();
if (!HasInstance(args.This())) {
- return NanThrowTypeError("startInvoke can only be called on Call objects");
+ return NanThrowTypeError("cancel can only be called on Call objects");
}
Call *call = ObjectWrap::Unwrap<Call>(args.This());
grpc_call_error error = grpc_call_cancel(call->wrapped_call);
diff --git a/src/node/call.h b/src/node/ext/call.h
index 55a6fc65b8..1924a1bf42 100644
--- a/src/node/call.h
+++ b/src/node/ext/call.h
@@ -61,7 +61,7 @@ class Call : public ::node::ObjectWrap {
static NAN_METHOD(New);
static NAN_METHOD(AddMetadata);
- static NAN_METHOD(StartInvoke);
+ static NAN_METHOD(Invoke);
static NAN_METHOD(ServerAccept);
static NAN_METHOD(ServerEndInitialMetadata);
static NAN_METHOD(Cancel);
diff --git a/src/node/channel.cc b/src/node/ext/channel.cc
index 9087d6f919..9087d6f919 100644
--- a/src/node/channel.cc
+++ b/src/node/ext/channel.cc
diff --git a/src/node/channel.h b/src/node/ext/channel.h
index 140cbf201a..140cbf201a 100644
--- a/src/node/channel.h
+++ b/src/node/ext/channel.h
diff --git a/src/node/completion_queue_async_worker.cc b/src/node/ext/completion_queue_async_worker.cc
index 8de7db66d5..8de7db66d5 100644
--- a/src/node/completion_queue_async_worker.cc
+++ b/src/node/ext/completion_queue_async_worker.cc
diff --git a/src/node/completion_queue_async_worker.h b/src/node/ext/completion_queue_async_worker.h
index 2c928b7024..2c928b7024 100644
--- a/src/node/completion_queue_async_worker.h
+++ b/src/node/ext/completion_queue_async_worker.h
diff --git a/src/node/credentials.cc b/src/node/ext/credentials.cc
index f9cd2fcfe0..f9cd2fcfe0 100644
--- a/src/node/credentials.cc
+++ b/src/node/ext/credentials.cc
diff --git a/src/node/credentials.h b/src/node/ext/credentials.h
index 981e5a99bc..981e5a99bc 100644
--- a/src/node/credentials.h
+++ b/src/node/ext/credentials.h
diff --git a/src/node/event.cc b/src/node/ext/event.cc
index 2ca38b7448..2ca38b7448 100644
--- a/src/node/event.cc
+++ b/src/node/ext/event.cc
diff --git a/src/node/event.h b/src/node/ext/event.h
index e06d8f0168..e06d8f0168 100644
--- a/src/node/event.h
+++ b/src/node/ext/event.h
diff --git a/src/node/node_grpc.cc b/src/node/ext/node_grpc.cc
index acee0386d2..bc1dfaf899 100644
--- a/src/node/node_grpc.cc
+++ b/src/node/ext/node_grpc.cc
@@ -148,8 +148,6 @@ void InitCompletionTypeConstants(Handle<Object> exports) {
completion_type->Set(NanNew("QUEUE_SHUTDOWN"), QUEUE_SHUTDOWN);
Handle<Value> READ(NanNew<Uint32, uint32_t>(GRPC_READ));
completion_type->Set(NanNew("READ"), READ);
- Handle<Value> INVOKE_ACCEPTED(NanNew<Uint32, uint32_t>(GRPC_INVOKE_ACCEPTED));
- completion_type->Set(NanNew("INVOKE_ACCEPTED"), INVOKE_ACCEPTED);
Handle<Value> WRITE_ACCEPTED(NanNew<Uint32, uint32_t>(GRPC_WRITE_ACCEPTED));
completion_type->Set(NanNew("WRITE_ACCEPTED"), WRITE_ACCEPTED);
Handle<Value> FINISH_ACCEPTED(NanNew<Uint32, uint32_t>(GRPC_FINISH_ACCEPTED));
diff --git a/src/node/server.cc b/src/node/ext/server.cc
index b102775d33..b102775d33 100644
--- a/src/node/server.cc
+++ b/src/node/ext/server.cc
diff --git a/src/node/server.h b/src/node/ext/server.h
index d50f1fb6c5..d50f1fb6c5 100644
--- a/src/node/server.h
+++ b/src/node/ext/server.h
diff --git a/src/node/server_credentials.cc b/src/node/ext/server_credentials.cc
index 393f3a6305..393f3a6305 100644
--- a/src/node/server_credentials.cc
+++ b/src/node/ext/server_credentials.cc
diff --git a/src/node/server_credentials.h b/src/node/ext/server_credentials.h
index 8baae3f185..8baae3f185 100644
--- a/src/node/server_credentials.h
+++ b/src/node/ext/server_credentials.h
diff --git a/src/node/tag.cc b/src/node/ext/tag.cc
index dc8e523e12..dc8e523e12 100644
--- a/src/node/tag.cc
+++ b/src/node/ext/tag.cc
diff --git a/src/node/tag.h b/src/node/ext/tag.h
index bdb09252d9..bdb09252d9 100644
--- a/src/node/tag.h
+++ b/src/node/ext/tag.h
diff --git a/src/node/timeval.cc b/src/node/ext/timeval.cc
index 687e33576b..687e33576b 100644
--- a/src/node/timeval.cc
+++ b/src/node/ext/timeval.cc
diff --git a/src/node/timeval.h b/src/node/ext/timeval.h
index 1fb0f2c690..1fb0f2c690 100644
--- a/src/node/timeval.h
+++ b/src/node/ext/timeval.h
diff --git a/src/node/main.js b/src/node/index.js
index 751c3525d3..0627e7f557 100644
--- a/src/node/main.js
+++ b/src/node/index.js
@@ -35,9 +35,9 @@ var _ = require('underscore');
var ProtoBuf = require('protobufjs');
-var surface_client = require('./surface_client.js');
+var surface_client = require('./src/surface_client.js');
-var surface_server = require('./surface_server.js');
+var surface_server = require('./src/surface_server.js');
var grpc = require('bindings')('grpc');
diff --git a/src/node/interop/interop_client.js b/src/node/interop/interop_client.js
index cf75b9a77a..9306317b68 100644
--- a/src/node/interop/interop_client.js
+++ b/src/node/interop/interop_client.js
@@ -183,7 +183,7 @@ function pingPong(client, done) {
assert.equal(response.payload.body.limit - response.payload.body.offset,
response_sizes[index]);
index += 1;
- if (index == 4) {
+ if (index === 4) {
call.end();
} else {
call.write({
diff --git a/src/node/interop/interop_server.js b/src/node/interop/interop_server.js
index 6d2bd7ae0d..ebf847876c 100644
--- a/src/node/interop/interop_server.js
+++ b/src/node/interop/interop_server.js
@@ -194,7 +194,8 @@ if (require.main === module) {
string: ['port', 'use_tls']
});
var server_obj = getServer(argv.port, argv.use_tls === 'true');
- server_obj.server.start();
+ console.log('Server attaching to port ' + argv.port);
+ server_obj.server.listen();
}
/**
diff --git a/src/node/package.json b/src/node/package.json
index 5f3c6fa345..8a0b51dda8 100644
--- a/src/node/package.json
+++ b/src/node/package.json
@@ -17,5 +17,5 @@
"mocha": "~1.21.0",
"minimist": "^1.1.0"
},
- "main": "main.js"
+ "main": "index.js"
}
diff --git a/src/node/client.js b/src/node/src/client.js
index f913b06f29..3a1c9eef84 100644
--- a/src/node/client.js
+++ b/src/node/src/client.js
@@ -62,12 +62,9 @@ function GrpcClientStream(call, serialize, deserialize) {
};
}
var self = this;
- // Indicates that we can start reading and have not received a null read
- var can_read = false;
+ var finished = false;
// Indicates that a read is currently pending
var reading = false;
- // Indicates that we can call startWrite
- var can_write = false;
// Indicates that a write is currently pending
var writing = false;
this._call = call;
@@ -98,91 +95,46 @@ function GrpcClientStream(call, serialize, deserialize) {
return deserialize(buffer);
};
/**
- * Callback to handle receiving a READ event. Pushes the data from that event
- * onto the read queue and starts reading again if applicable.
- * @param {grpc.Event} event The READ event object
+ * Callback to be called when a READ event is received. Pushes the data onto
+ * the read queue and starts reading again if applicable
+ * @param {grpc.Event} event READ event object
*/
function readCallback(event) {
+ if (finished) {
+ self.push(null);
+ return;
+ }
var data = event.data;
- if (self.push(self.deserialize(data))) {
- if (data == null) {
- // Disable starting to read after null read was received
- can_read = false;
- reading = false;
- } else {
- call.startRead(readCallback);
- }
+ if (self.push(self.deserialize(data)) && data != null) {
+ self._call.startRead(readCallback);
} else {
- // Indicate that reading can be resumed by calling startReading
reading = false;
}
- };
- /**
- * Initiate a read, which continues until self.push returns false (indicating
- * that reading should be paused) or data is null (indicating that there is no
- * more data to read).
- */
- function startReading() {
- call.startRead(readCallback);
}
- // TODO(mlumish): possibly change queue implementation due to shift slowness
- var write_queue = [];
- /**
- * Write the next chunk of data in the write queue if there is one. Otherwise
- * indicate that there is no pending write. When the write succeeds, this
- * function is called again.
- */
- function writeNext() {
- if (write_queue.length > 0) {
- writing = true;
- var next = write_queue.shift();
- var writeCallback = function(event) {
- next.callback();
- writeNext();
- };
- call.startWrite(self.serialize(next.chunk), writeCallback, 0);
- } else {
- writing = false;
- }
- }
- call.startInvoke(function(event) {
- can_read = true;
- can_write = true;
- startReading();
- writeNext();
- }, function(event) {
+ call.invoke(function(event) {
self.emit('metadata', event.data);
}, function(event) {
+ finished = true;
self.emit('status', event.data);
}, 0);
this.on('finish', function() {
call.writesDone(function() {});
});
/**
- * Indicate that reads should start, and start them if the INVOKE_ACCEPTED
- * event has been received.
+ * Start reading if there is not already a pending read. Reading will
+ * continue until self.push returns false (indicating reads should slow
+ * down) or the read data is null (indicating that there is no more data).
*/
- this._enableRead = function() {
- if (!reading) {
- reading = true;
- if (can_read) {
- startReading();
+ this.startReading = function() {
+ if (finished) {
+ self.push(null);
+ } else {
+ if (!reading) {
+ reading = true;
+ self._call.startRead(readCallback);
}
}
};
- /**
- * Push the chunk onto the write queue, and write from the write queue if
- * there is not a pending write
- * @param {Buffer} chunk The chunk of data to write
- * @param {function(Error=)} callback The callback to call when the write
- * completes
- */
- this._tryWrite = function(chunk, callback) {
- write_queue.push({chunk: chunk, callback: callback});
- if (can_write && !writing) {
- writeNext();
- }
- };
}
/**
@@ -191,7 +143,7 @@ function GrpcClientStream(call, serialize, deserialize) {
* @param {number} size Ignored
*/
GrpcClientStream.prototype._read = function(size) {
- this._enableRead();
+ this.startReading();
};
/**
@@ -202,7 +154,18 @@ GrpcClientStream.prototype._read = function(size) {
* @param {function(Error=)} callback Ignored
*/
GrpcClientStream.prototype._write = function(chunk, encoding, callback) {
- this._tryWrite(chunk, callback);
+ var self = this;
+ self._call.startWrite(self.serialize(chunk), function(event) {
+ callback();
+ }, 0);
+};
+
+/**
+ * Cancel the ongoing call. If the call has not already finished, it will finish
+ * with status CANCELLED.
+ */
+GrpcClientStream.prototype.cancel = function() {
+ this._call.cancel();
};
/**
@@ -230,7 +193,7 @@ function makeRequest(channel,
if (metadata) {
call.addMetadata(metadata);
}
- return new GrpcClientStream(call);
+ return new GrpcClientStream(call, serialize, deserialize);
}
/**
diff --git a/src/node/common.js b/src/node/src/common.js
index 54247e3fa1..54247e3fa1 100644
--- a/src/node/common.js
+++ b/src/node/src/common.js
diff --git a/src/node/server.js b/src/node/src/server.js
index eca20aa5fd..03cdbe6f98 100644
--- a/src/node/server.js
+++ b/src/node/src/server.js
@@ -151,7 +151,7 @@ function GrpcServerStream(call, serialize, deserialize) {
return;
}
var data = event.data;
- if (self.push(deserialize(data)) && data != null) {
+ if (self.push(self.deserialize(data)) && data != null) {
self._call.startRead(readCallback);
} else {
reading = false;
@@ -233,7 +233,7 @@ function Server(options) {
function handleNewCall(event) {
var call = event.call;
var data = event.data;
- if (data == null) {
+ if (data === null) {
return;
}
server.requestCall(handleNewCall);
@@ -246,6 +246,7 @@ function Server(options) {
call.serverAccept(function(event) {
if (event.data.code === grpc.status.CANCELLED) {
cancelled = true;
+ stream.emit('cancelled');
}
}, 0);
call.serverEndInitialMetadata(0);
diff --git a/src/node/surface_client.js b/src/node/src/surface_client.js
index 996e3d101f..16c31809f4 100644
--- a/src/node/surface_client.js
+++ b/src/node/src/surface_client.js
@@ -63,114 +63,80 @@ util.inherits(ClientReadableObjectStream, Readable);
* client side. Extends from stream.Readable.
* @constructor
* @param {stream} stream Underlying binary Duplex stream for the call
- * @param {function(Buffer)} deserialize Function for deserializing binary data
- * @param {object} options Stream options
*/
-function ClientReadableObjectStream(stream, deserialize, options) {
- options = _.extend(options, {objectMode: true});
+function ClientReadableObjectStream(stream) {
+ var options = {objectMode: true};
Readable.call(this, options);
this._stream = stream;
var self = this;
forwardEvent(stream, this, 'status');
forwardEvent(stream, this, 'metadata');
this._stream.on('data', function forwardData(chunk) {
- if (!self.push(deserialize(chunk))) {
+ if (!self.push(chunk)) {
self._stream.pause();
}
});
this._stream.pause();
}
-util.inherits(ClientWritableObjectStream, Writable);
-
/**
- * Class for representing a gRPC client streaming call as a Node stream on the
- * client side. Extends from stream.Writable.
- * @constructor
- * @param {stream} stream Underlying binary Duplex stream for the call
- * @param {function(*):Buffer} serialize Function for serializing objects
- * @param {object} options Stream options
+ * _read implementation for both types of streams that allow reading.
+ * @this {ClientReadableObjectStream}
+ * @param {number} size Ignored
*/
-function ClientWritableObjectStream(stream, serialize, options) {
- options = _.extend(options, {objectMode: true});
- Writable.call(this, options);
- this._stream = stream;
- this._serialize = serialize;
- forwardEvent(stream, this, 'status');
- forwardEvent(stream, this, 'metadata');
- this.on('finish', function() {
- this._stream.end();
- });
+function _read(size) {
+ this._stream.resume();
}
+/**
+ * See docs for _read
+ */
+ClientReadableObjectStream.prototype._read = _read;
-util.inherits(ClientBidiObjectStream, Duplex);
+util.inherits(ClientWritableObjectStream, Writable);
/**
- * Class for representing a gRPC bidi streaming call as a Node stream on the
- * client side. Extends from stream.Duplex.
+ * Class for representing a gRPC client streaming call as a Node stream on the
+ * client side. Extends from stream.Writable.
* @constructor
* @param {stream} stream Underlying binary Duplex stream for the call
- * @param {function(*):Buffer} serialize Function for serializing objects
- * @param {function(Buffer)} deserialize Function for deserializing binary data
- * @param {object} options Stream options
*/
-function ClientBidiObjectStream(stream, serialize, deserialize, options) {
- options = _.extend(options, {objectMode: true});
- Duplex.call(this, options);
+function ClientWritableObjectStream(stream) {
+ var options = {objectMode: true};
+ Writable.call(this, options);
this._stream = stream;
- this._serialize = serialize;
- var self = this;
forwardEvent(stream, this, 'status');
forwardEvent(stream, this, 'metadata');
- this._stream.on('data', function forwardData(chunk) {
- if (!self.push(deserialize(chunk))) {
- self._stream.pause();
- }
- });
- this._stream.pause();
this.on('finish', function() {
this._stream.end();
});
}
/**
- * _read implementation for both types of streams that allow reading.
- * @this {ClientReadableObjectStream|ClientBidiObjectStream}
- * @param {number} size Ignored
- */
-function _read(size) {
- this._stream.resume();
-}
-
-/**
- * See docs for _read
- */
-ClientReadableObjectStream.prototype._read = _read;
-/**
- * See docs for _read
- */
-ClientBidiObjectStream.prototype._read = _read;
-
-/**
* _write implementation for both types of streams that allow writing
- * @this {ClientWritableObjectStream|ClientBidiObjectStream}
+ * @this {ClientWritableObjectStream}
* @param {*} chunk The value to write to the stream
* @param {string} encoding Ignored
* @param {function(Error)} callback Callback to call when finished writing
*/
function _write(chunk, encoding, callback) {
- this._stream.write(this._serialize(chunk), encoding, callback);
+ this._stream.write(chunk, encoding, callback);
}
/**
* See docs for _write
*/
ClientWritableObjectStream.prototype._write = _write;
+
/**
- * See docs for _write
+ * Cancel the underlying call
*/
-ClientBidiObjectStream.prototype._write = _write;
+function cancel() {
+ this._stream.cancel();
+}
+
+ClientReadableObjectStream.prototype.cancel = cancel;
+ClientWritableObjectStream.prototype.cancel = cancel;
/**
* Get a function that can make unary requests to the specified method.
@@ -196,19 +162,28 @@ function makeUnaryRequestFunction(method, serialize, deserialize) {
* @return {EventEmitter} An event emitter for stream related events
*/
function makeUnaryRequest(argument, callback, metadata, deadline) {
- var stream = client.makeRequest(this.channel, method, metadata, deadline);
+ var stream = client.makeRequest(this.channel, method, serialize,
+ deserialize, metadata, deadline);
var emitter = new EventEmitter();
+ emitter.cancel = function cancel() {
+ stream.cancel();
+ };
forwardEvent(stream, emitter, 'status');
forwardEvent(stream, emitter, 'metadata');
- stream.write(serialize(argument));
+ stream.write(argument);
stream.end();
stream.on('data', function forwardData(chunk) {
try {
- callback(null, deserialize(chunk));
+ callback(null, chunk);
} catch (e) {
callback(e);
}
});
+ stream.on('status', function forwardStatus(status) {
+ if (status.code !== client.status.OK) {
+ callback(status);
+ }
+ });
return emitter;
}
return makeUnaryRequest;
@@ -236,15 +211,21 @@ function makeClientStreamRequestFunction(method, serialize, deserialize) {
* @return {EventEmitter} An event emitter for stream related events
*/
function makeClientStreamRequest(callback, metadata, deadline) {
- var stream = client.makeRequest(this.channel, method, metadata, deadline);
- var obj_stream = new ClientWritableObjectStream(stream, serialize, {});
+ var stream = client.makeRequest(this.channel, method, serialize,
+ deserialize, metadata, deadline);
+ var obj_stream = new ClientWritableObjectStream(stream);
stream.on('data', function forwardData(chunk) {
try {
- callback(null, deserialize(chunk));
+ callback(null, chunk);
} catch (e) {
callback(e);
}
});
+ stream.on('status', function forwardStatus(status) {
+ if (status.code !== client.status.OK) {
+ callback(status);
+ }
+ });
return obj_stream;
}
return makeClientStreamRequest;
@@ -272,9 +253,10 @@ function makeServerStreamRequestFunction(method, serialize, deserialize) {
* @return {EventEmitter} An event emitter for stream related events
*/
function makeServerStreamRequest(argument, metadata, deadline) {
- var stream = client.makeRequest(this.channel, method, metadata, deadline);
- var obj_stream = new ClientReadableObjectStream(stream, deserialize, {});
- stream.write(serialize(argument));
+ var stream = client.makeRequest(this.channel, method, serialize,
+ deserialize, metadata, deadline);
+ var obj_stream = new ClientReadableObjectStream(stream);
+ stream.write(argument);
stream.end();
return obj_stream;
}
@@ -301,12 +283,8 @@ function makeBidiStreamRequestFunction(method, serialize, deserialize) {
* @return {EventEmitter} An event emitter for stream related events
*/
function makeBidiStreamRequest(metadata, deadline) {
- var stream = client.makeRequest(this.channel, method, metadata, deadline);
- var obj_stream = new ClientBidiObjectStream(stream,
- serialize,
- deserialize,
- {});
- return obj_stream;
+ return client.makeRequest(this.channel, method, serialize,
+ deserialize, metadata, deadline);
}
return makeBidiStreamRequest;
}
diff --git a/src/node/surface_server.js b/src/node/src/surface_server.js
index bc688839fe..af23ec211c 100644
--- a/src/node/surface_server.js
+++ b/src/node/src/surface_server.js
@@ -54,67 +54,20 @@ util.inherits(ServerReadableObjectStream, Readable);
* server side. Extends from stream.Readable.
* @constructor
* @param {stream} stream Underlying binary Duplex stream for the call
- * @param {function(Buffer)} deserialize Function for deserializing binary data
- * @param {object} options Stream options
*/
-function ServerReadableObjectStream(stream, deserialize, options) {
- options = _.extend(options, {objectMode: true});
+function ServerReadableObjectStream(stream) {
+ var options = {objectMode: true};
Readable.call(this, options);
this._stream = stream;
Object.defineProperty(this, 'cancelled', {
get: function() { return stream.cancelled; }
});
var self = this;
- this._stream.on('data', function forwardData(chunk) {
- if (!self.push(deserialize(chunk))) {
- self._stream.pause();
- }
- });
- this._stream.on('end', function forwardEnd() {
- self.push(null);
+ this._stream.on('cancelled', function() {
+ self.emit('cancelled');
});
- this._stream.pause();
-}
-
-util.inherits(ServerWritableObjectStream, Writable);
-
-/**
- * Class for representing a gRPC server streaming call as a Node stream on the
- * server side. Extends from stream.Writable.
- * @constructor
- * @param {stream} stream Underlying binary Duplex stream for the call
- * @param {function(*):Buffer} serialize Function for serializing objects
- * @param {object} options Stream options
- */
-function ServerWritableObjectStream(stream, serialize, options) {
- options = _.extend(options, {objectMode: true});
- Writable.call(this, options);
- this._stream = stream;
- this._serialize = serialize;
- this.on('finish', function() {
- this._stream.end();
- });
-}
-
-util.inherits(ServerBidiObjectStream, Duplex);
-
-/**
- * Class for representing a gRPC bidi streaming call as a Node stream on the
- * server side. Extends from stream.Duplex.
- * @constructor
- * @param {stream} stream Underlying binary Duplex stream for the call
- * @param {function(*):Buffer} serialize Function for serializing objects
- * @param {function(Buffer)} deserialize Function for deserializing binary data
- * @param {object} options Stream options
- */
-function ServerBidiObjectStream(stream, serialize, deserialize, options) {
- options = _.extend(options, {objectMode: true});
- Duplex.call(this, options);
- this._stream = stream;
- this._serialize = serialize;
- var self = this;
this._stream.on('data', function forwardData(chunk) {
- if (!self.push(deserialize(chunk))) {
+ if (!self.push(chunk)) {
self._stream.pause();
}
});
@@ -122,9 +75,6 @@ function ServerBidiObjectStream(stream, serialize, deserialize, options) {
self.push(null);
});
this._stream.pause();
- this.on('finish', function() {
- this._stream.end();
- });
}
/**
@@ -140,39 +90,49 @@ function _read(size) {
* See docs for _read
*/
ServerReadableObjectStream.prototype._read = _read;
+
+util.inherits(ServerWritableObjectStream, Writable);
+
/**
- * See docs for _read
+ * Class for representing a gRPC server streaming call as a Node stream on the
+ * server side. Extends from stream.Writable.
+ * @constructor
+ * @param {stream} stream Underlying binary Duplex stream for the call
*/
-ServerBidiObjectStream.prototype._read = _read;
+function ServerWritableObjectStream(stream) {
+ var options = {objectMode: true};
+ Writable.call(this, options);
+ this._stream = stream;
+ this._stream.on('cancelled', function() {
+ self.emit('cancelled');
+ });
+ this.on('finish', function() {
+ this._stream.end();
+ });
+}
/**
* _write implementation for both types of streams that allow writing
- * @this {ServerWritableObjectStream|ServerBidiObjectStream}
+ * @this {ServerWritableObjectStream}
* @param {*} chunk The value to write to the stream
* @param {string} encoding Ignored
* @param {function(Error)} callback Callback to call when finished writing
*/
function _write(chunk, encoding, callback) {
- this._stream.write(this._serialize(chunk), encoding, callback);
+ this._stream.write(chunk, encoding, callback);
}
/**
* See docs for _write
*/
ServerWritableObjectStream.prototype._write = _write;
-/**
- * See docs for _write
- */
-ServerBidiObjectStream.prototype._write = _write;
/**
* Creates a binary stream handler function from a unary handler function
* @param {function(Object, function(Error, *))} handler Unary call handler
- * @param {function(*):Buffer} serialize Serialization function
- * @param {function(Buffer):*} deserialize Deserialization function
* @return {function(stream)} Binary stream handler
*/
-function makeUnaryHandler(handler, serialize, deserialize) {
+function makeUnaryHandler(handler) {
/**
* Handles a stream by reading a single data value, passing it to the handler,
* and writing the response back to the stream.
@@ -180,15 +140,18 @@ function makeUnaryHandler(handler, serialize, deserialize) {
*/
return function handleUnaryCall(stream) {
stream.on('data', function handleUnaryData(value) {
- var call = {request: deserialize(value)};
+ var call = {request: value};
Object.defineProperty(call, 'cancelled', {
get: function() { return stream.cancelled;}
});
+ stream.on('cancelled', function() {
+ call.emit('cancelled');
+ });
handler(call, function sendUnaryData(err, value) {
if (err) {
stream.emit('error', err);
} else {
- stream.write(serialize(value));
+ stream.write(value);
stream.end();
}
});
@@ -201,23 +164,21 @@ function makeUnaryHandler(handler, serialize, deserialize) {
* function
* @param {function(Readable, function(Error, *))} handler Client stream call
* handler
- * @param {function(*):Buffer} serialize Serialization function
- * @param {function(Buffer):*} deserialize Deserialization function
* @return {function(stream)} Binary stream handler
*/
-function makeClientStreamHandler(handler, serialize, deserialize) {
+function makeClientStreamHandler(handler) {
/**
* Handles a stream by passing a deserializing stream to the handler and
* writing the response back to the stream.
* @param {stream} stream Binary data stream
*/
return function handleClientStreamCall(stream) {
- var object_stream = new ServerReadableObjectStream(stream, deserialize, {});
+ var object_stream = new ServerReadableObjectStream(stream);
handler(object_stream, function sendClientStreamData(err, value) {
if (err) {
stream.emit('error', err);
} else {
- stream.write(serialize(value));
+ stream.write(value);
stream.end();
}
});
@@ -228,11 +189,9 @@ function makeClientStreamHandler(handler, serialize, deserialize) {
* Creates a binary stream handler function from a server stream handler
* function
* @param {function(Writable)} handler Server stream call handler
- * @param {function(*):Buffer} serialize Serialization function
- * @param {function(Buffer):*} deserialize Deserialization function
* @return {function(stream)} Binary stream handler
*/
-function makeServerStreamHandler(handler, serialize, deserialize) {
+function makeServerStreamHandler(handler) {
/**
* Handles a stream by attaching it to a serializing stream, and passing it to
* the handler.
@@ -240,10 +199,8 @@ function makeServerStreamHandler(handler, serialize, deserialize) {
*/
return function handleServerStreamCall(stream) {
stream.on('data', function handleClientData(value) {
- var object_stream = new ServerWritableObjectStream(stream,
- serialize,
- {});
- object_stream.request = deserialize(value);
+ var object_stream = new ServerWritableObjectStream(stream);
+ object_stream.request = value;
handler(object_stream);
});
};
@@ -252,23 +209,10 @@ function makeServerStreamHandler(handler, serialize, deserialize) {
/**
* Creates a binary stream handler function from a bidi stream handler function
* @param {function(Duplex)} handler Unary call handler
- * @param {function(*):Buffer} serialize Serialization function
- * @param {function(Buffer):*} deserialize Deserialization function
* @return {function(stream)} Binary stream handler
*/
-function makeBidiStreamHandler(handler, serialize, deserialize) {
- /**
- * Handles a stream by wrapping it in a serializing and deserializing object
- * stream, and passing it to the handler.
- * @param {stream} stream Binary data stream
- */
- return function handleBidiStreamCall(stream) {
- var object_stream = new ServerBidiObjectStream(stream,
- serialize,
- deserialize,
- {});
- handler(object_stream);
- };
+function makeBidiStreamHandler(handler) {
+ return handler;
}
/**
@@ -341,10 +285,13 @@ function makeServerConstructor(services) {
common.fullyQualifiedName(method) + ' not provided.');
}
var binary_handler = handler_makers[method_type](
- service_handlers[service_name][decapitalize(method.name)],
- common.serializeCls(method.resolvedResponseType.build()),
- common.deserializeCls(method.resolvedRequestType.build()));
- server.register(prefix + capitalize(method.name), binary_handler);
+ service_handlers[service_name][decapitalize(method.name)]);
+ var serialize = common.serializeCls(
+ method.resolvedResponseType.build());
+ var deserialize = common.deserializeCls(
+ method.resolvedRequestType.build());
+ server.register(prefix + capitalize(method.name), binary_handler,
+ serialize, deserialize);
});
}, this);
}
diff --git a/src/node/test/call_test.js b/src/node/test/call_test.js
index e6dc9664f1..b37c44abaf 100644
--- a/src/node/test/call_test.js
+++ b/src/node/test/call_test.js
@@ -34,8 +34,6 @@
var assert = require('assert');
var grpc = require('bindings')('grpc.node');
-var channel = new grpc.Channel('localhost:7070');
-
/**
* Helper function to return an absolute deadline given a relative timeout in
* seconds.
@@ -49,6 +47,17 @@ function getDeadline(timeout_secs) {
}
describe('call', function() {
+ var channel;
+ var server;
+ before(function() {
+ server = new grpc.Server();
+ var port = server.addHttp2Port('localhost:0');
+ server.start();
+ channel = new grpc.Channel('localhost:' + port);
+ });
+ after(function() {
+ server.shutdown();
+ });
describe('constructor', function() {
it('should reject anything less than 3 arguments', function() {
assert.throws(function() {
@@ -118,12 +127,11 @@ describe('call', function() {
call.addMetadata(5);
}, TypeError);
});
- it('should fail if startInvoke was already called', function(done) {
+ it('should fail if invoke was already called', function(done) {
var call = new grpc.Call(channel, 'method', getDeadline(1));
- call.startInvoke(function() {},
- function() {},
- function() {done();},
- 0);
+ call.invoke(function() {},
+ function() {done();},
+ 0);
assert.throws(function() {
call.addMetadata({'key' : 'key', 'value' : new Buffer('value') });
}, function(err) {
@@ -133,32 +141,26 @@ describe('call', function() {
call.cancel();
});
});
- describe('startInvoke', function() {
- it('should fail with fewer than 4 arguments', function() {
+ describe('invoke', function() {
+ it('should fail with fewer than 3 arguments', function() {
var call = new grpc.Call(channel, 'method', getDeadline(1));
assert.throws(function() {
- call.startInvoke();
- }, TypeError);
- assert.throws(function() {
- call.startInvoke(function() {});
+ call.invoke();
}, TypeError);
assert.throws(function() {
- call.startInvoke(function() {},
- function() {});
+ call.invoke(function() {});
}, TypeError);
assert.throws(function() {
- call.startInvoke(function() {},
- function() {},
- function() {});
+ call.invoke(function() {},
+ function() {});
}, TypeError);
});
- it('should work with 3 args and an int', function(done) {
+ it('should work with 2 args and an int', function(done) {
assert.doesNotThrow(function() {
var call = new grpc.Call(channel, 'method', getDeadline(1));
- call.startInvoke(function() {},
- function() {},
- function() {done();},
- 0);
+ call.invoke(function() {},
+ function() {done();},
+ 0);
// Cancel to speed up the test
call.cancel();
});
@@ -166,12 +168,11 @@ describe('call', function() {
it('should reject incorrectly typed arguments', function() {
var call = new grpc.Call(channel, 'method', getDeadline(1));
assert.throws(function() {
- call.startInvoke(0, 0, 0, 0);
+ call.invoke(0, 0, 0);
}, TypeError);
assert.throws(function() {
- call.startInvoke(function() {},
- function() {},
- function() {}, 'test');
+ call.invoke(function() {},
+ function() {}, 'test');
});
});
});
diff --git a/src/node/test/client_server_test.js b/src/node/test/client_server_test.js
index 2a25908684..d657ef41a4 100644
--- a/src/node/test/client_server_test.js
+++ b/src/node/test/client_server_test.js
@@ -35,9 +35,9 @@ var assert = require('assert');
var fs = require('fs');
var path = require('path');
var grpc = require('bindings')('grpc.node');
-var Server = require('../server');
-var client = require('../client');
-var common = require('../common');
+var Server = require('../src/server');
+var client = require('../src/client');
+var common = require('../src/common');
var _ = require('highland');
var ca_path = path.join(__dirname, 'data/ca.pem');
@@ -77,15 +77,32 @@ function errorHandler(stream) {
};
}
+/**
+ * Wait for a cancellation instead of responding
+ * @param {Stream} stream
+ */
+function cancelHandler(stream) {
+ // do nothing
+}
+
describe('echo client', function() {
- it('should receive echo responses', function(done) {
- var server = new Server();
+ var server;
+ var channel;
+ before(function() {
+ server = new Server();
var port_num = server.bind('0.0.0.0:0');
server.register('echo', echoHandler);
+ server.register('error', errorHandler);
+ server.register('cancellation', cancelHandler);
server.start();
+ channel = new grpc.Channel('localhost:' + port_num);
+ });
+ after(function() {
+ server.shutdown();
+ });
+ it('should receive echo responses', function(done) {
var messages = ['echo1', 'echo2', 'echo3', 'echo4'];
- var channel = new grpc.Channel('localhost:' + port_num);
var stream = client.makeRequest(
channel,
'echo');
@@ -98,17 +115,10 @@ describe('echo client', function() {
index += 1;
});
stream.on('end', function() {
- server.shutdown();
done();
});
});
it('should get an error status that the server throws', function(done) {
- var server = new Server();
- var port_num = server.bind('0.0.0.0:0');
- server.register('error', errorHandler);
- server.start();
-
- var channel = new grpc.Channel('localhost:' + port_num);
var stream = client.makeRequest(
channel,
'error',
@@ -121,7 +131,19 @@ describe('echo client', function() {
stream.on('status', function(status) {
assert.equal(status.code, grpc.status.UNIMPLEMENTED);
assert.equal(status.details, 'error details');
- server.shutdown();
+ done();
+ });
+ });
+ it('should be able to cancel a call', function(done) {
+ var stream = client.makeRequest(
+ channel,
+ 'cancellation',
+ null,
+ getDeadline(1));
+
+ stream.cancel();
+ stream.on('status', function(status) {
+ assert.equal(status.code, grpc.status.CANCELLED);
done();
});
});
@@ -129,7 +151,9 @@ describe('echo client', function() {
/* TODO(mlumish): explore options for reducing duplication between this test
* and the insecure echo client test */
describe('secure echo client', function() {
- it('should recieve echo responses', function(done) {
+ var server;
+ var channel;
+ before(function(done) {
fs.readFile(ca_path, function(err, ca_data) {
assert.ifError(err);
fs.readFile(key_path, function(err, key_data) {
@@ -141,34 +165,40 @@ describe('secure echo client', function() {
key_data,
pem_data);
- var server = new Server({'credentials' : server_creds});
+ server = new Server({'credentials' : server_creds});
var port_num = server.bind('0.0.0.0:0', true);
server.register('echo', echoHandler);
server.start();
- var messages = ['echo1', 'echo2', 'echo3', 'echo4'];
- var channel = new grpc.Channel('localhost:' + port_num, {
+ channel = new grpc.Channel('localhost:' + port_num, {
'grpc.ssl_target_name_override' : 'foo.test.google.com',
'credentials' : creds
});
- var stream = client.makeRequest(
- channel,
- 'echo');
-
- _(messages).map(function(val) {
- return new Buffer(val);
- }).pipe(stream);
- var index = 0;
- stream.on('data', function(chunk) {
- assert.equal(messages[index], chunk.toString());
- index += 1;
- });
- stream.on('end', function() {
- server.shutdown();
- done();
- });
+ done();
});
});
});
});
+ after(function() {
+ server.shutdown();
+ });
+ it('should recieve echo responses', function(done) {
+ var messages = ['echo1', 'echo2', 'echo3', 'echo4'];
+ var stream = client.makeRequest(
+ channel,
+ 'echo');
+
+ _(messages).map(function(val) {
+ return new Buffer(val);
+ }).pipe(stream);
+ var index = 0;
+ stream.on('data', function(chunk) {
+ assert.equal(messages[index], chunk.toString());
+ index += 1;
+ });
+ stream.on('end', function() {
+ server.shutdown();
+ done();
+ });
+ });
});
diff --git a/src/node/test/constant_test.js b/src/node/test/constant_test.js
index f65eea3cff..0138a55226 100644
--- a/src/node/test/constant_test.js
+++ b/src/node/test/constant_test.js
@@ -94,7 +94,6 @@ var opErrorNames = [
var completionTypeNames = [
'QUEUE_SHUTDOWN',
'READ',
- 'INVOKE_ACCEPTED',
'WRITE_ACCEPTED',
'FINISH_ACCEPTED',
'CLIENT_METADATA_READ',
diff --git a/src/node/test/end_to_end_test.js b/src/node/test/end_to_end_test.js
index db3834dbba..f8cb660d2d 100644
--- a/src/node/test/end_to_end_test.js
+++ b/src/node/test/end_to_end_test.js
@@ -56,30 +56,28 @@ function multiDone(done, count) {
}
describe('end-to-end', function() {
+ var server;
+ var channel;
+ before(function() {
+ server = new grpc.Server();
+ var port_num = server.addHttp2Port('0.0.0.0:0');
+ server.start();
+ channel = new grpc.Channel('localhost:' + port_num);
+ });
+ after(function() {
+ server.shutdown();
+ });
it('should start and end a request without error', function(complete) {
- var server = new grpc.Server();
var done = multiDone(function() {
complete();
- server.shutdown();
}, 2);
- var port_num = server.addHttp2Port('0.0.0.0:0');
- var channel = new grpc.Channel('localhost:' + port_num);
var deadline = new Date();
deadline.setSeconds(deadline.getSeconds() + 3);
var status_text = 'xyz';
var call = new grpc.Call(channel,
'dummy_method',
deadline);
- call.startInvoke(function(event) {
- assert.strictEqual(event.type,
- grpc.completionType.INVOKE_ACCEPTED);
-
- call.writesDone(function(event) {
- assert.strictEqual(event.type,
- grpc.completionType.FINISH_ACCEPTED);
- assert.strictEqual(event.data, grpc.opError.OK);
- });
- },function(event) {
+ call.invoke(function(event) {
assert.strictEqual(event.type,
grpc.completionType.CLIENT_METADATA_READ);
},function(event) {
@@ -90,7 +88,6 @@ describe('end-to-end', function() {
done();
}, 0);
- server.start();
server.requestCall(function(event) {
assert.strictEqual(event.type, grpc.completionType.SERVER_RPC_NEW);
var server_call = event.call;
@@ -109,46 +106,26 @@ describe('end-to-end', function() {
done();
});
});
+ call.writesDone(function(event) {
+ assert.strictEqual(event.type,
+ grpc.completionType.FINISH_ACCEPTED);
+ assert.strictEqual(event.data, grpc.opError.OK);
+ });
});
-
it('should send and receive data without error', function(complete) {
var req_text = 'client_request';
var reply_text = 'server_response';
- var server = new grpc.Server();
var done = multiDone(function() {
complete();
server.shutdown();
}, 6);
- var port_num = server.addHttp2Port('0.0.0.0:0');
- var channel = new grpc.Channel('localhost:' + port_num);
var deadline = new Date();
deadline.setSeconds(deadline.getSeconds() + 3);
var status_text = 'success';
var call = new grpc.Call(channel,
'dummy_method',
deadline);
- call.startInvoke(function(event) {
- assert.strictEqual(event.type,
- grpc.completionType.INVOKE_ACCEPTED);
- call.startWrite(
- new Buffer(req_text),
- function(event) {
- assert.strictEqual(event.type,
- grpc.completionType.WRITE_ACCEPTED);
- assert.strictEqual(event.data, grpc.opError.OK);
- call.writesDone(function(event) {
- assert.strictEqual(event.type,
- grpc.completionType.FINISH_ACCEPTED);
- assert.strictEqual(event.data, grpc.opError.OK);
- done();
- });
- }, 0);
- call.startRead(function(event) {
- assert.strictEqual(event.type, grpc.completionType.READ);
- assert.strictEqual(event.data.toString(), reply_text);
- done();
- });
- },function(event) {
+ call.invoke(function(event) {
assert.strictEqual(event.type,
grpc.completionType.CLIENT_METADATA_READ);
done();
@@ -159,8 +136,24 @@ describe('end-to-end', function() {
assert.strictEqual(status.details, status_text);
done();
}, 0);
-
- server.start();
+ call.startWrite(
+ new Buffer(req_text),
+ function(event) {
+ assert.strictEqual(event.type,
+ grpc.completionType.WRITE_ACCEPTED);
+ assert.strictEqual(event.data, grpc.opError.OK);
+ call.writesDone(function(event) {
+ assert.strictEqual(event.type,
+ grpc.completionType.FINISH_ACCEPTED);
+ assert.strictEqual(event.data, grpc.opError.OK);
+ done();
+ });
+ }, 0);
+ call.startRead(function(event) {
+ assert.strictEqual(event.type, grpc.completionType.READ);
+ assert.strictEqual(event.data.toString(), reply_text);
+ done();
+ });
server.requestCall(function(event) {
assert.strictEqual(event.type, grpc.completionType.SERVER_RPC_NEW);
var server_call = event.call;
diff --git a/src/node/test/interop_sanity_test.js b/src/node/test/interop_sanity_test.js
index 410b050e8d..6cc7d444cd 100644
--- a/src/node/test/interop_sanity_test.js
+++ b/src/node/test/interop_sanity_test.js
@@ -48,11 +48,15 @@ describe('Interop tests', function() {
port = 'localhost:' + server_obj.port;
done();
});
+ after(function() {
+ server.shutdown();
+ });
// This depends on not using a binary stream
it('should pass empty_unary', function(done) {
interop_client.runTest(port, name_override, 'empty_unary', true, done);
});
- it('should pass large_unary', function(done) {
+ // This fails due to an unknown bug
+ it.skip('should pass large_unary', function(done) {
interop_client.runTest(port, name_override, 'large_unary', true, done);
});
it('should pass client_streaming', function(done) {
@@ -64,8 +68,7 @@ describe('Interop tests', function() {
it('should pass ping_pong', function(done) {
interop_client.runTest(port, name_override, 'ping_pong', true, done);
});
- // This depends on the new invoke API
- it.skip('should pass empty_stream', function(done) {
+ it('should pass empty_stream', function(done) {
interop_client.runTest(port, name_override, 'empty_stream', true, done);
});
});
diff --git a/src/node/test/server_test.js b/src/node/test/server_test.js
index 61aef4677e..5fad9a5564 100644
--- a/src/node/test/server_test.js
+++ b/src/node/test/server_test.js
@@ -33,7 +33,7 @@
var assert = require('assert');
var grpc = require('bindings')('grpc.node');
-var Server = require('../server');
+var Server = require('../src/server');
/**
* This is used for testing functions with multiple asynchronous calls that
@@ -65,44 +65,28 @@ function echoHandler(stream) {
}
describe('echo server', function() {
- it('should echo inputs as responses', function(done) {
- done = multiDone(done, 4);
- var server = new Server();
+ var server;
+ var channel;
+ before(function() {
+ server = new Server();
var port_num = server.bind('[::]:0');
server.register('echo', echoHandler);
server.start();
+ channel = new grpc.Channel('localhost:' + port_num);
+ });
+ it('should echo inputs as responses', function(done) {
+ done = multiDone(done, 4);
+
var req_text = 'echo test string';
var status_text = 'OK';
- var channel = new grpc.Channel('localhost:' + port_num);
var deadline = new Date();
deadline.setSeconds(deadline.getSeconds() + 3);
var call = new grpc.Call(channel,
'echo',
deadline);
- call.startInvoke(function(event) {
- assert.strictEqual(event.type,
- grpc.completionType.INVOKE_ACCEPTED);
- call.startWrite(
- new Buffer(req_text),
- function(event) {
- assert.strictEqual(event.type,
- grpc.completionType.WRITE_ACCEPTED);
- assert.strictEqual(event.data, grpc.opError.OK);
- call.writesDone(function(event) {
- assert.strictEqual(event.type,
- grpc.completionType.FINISH_ACCEPTED);
- assert.strictEqual(event.data, grpc.opError.OK);
- done();
- });
- }, 0);
- call.startRead(function(event) {
- assert.strictEqual(event.type, grpc.completionType.READ);
- assert.strictEqual(event.data.toString(), req_text);
- done();
- });
- },function(event) {
+ call.invoke(function(event) {
assert.strictEqual(event.type,
grpc.completionType.CLIENT_METADATA_READ);
done();
@@ -114,5 +98,23 @@ describe('echo server', function() {
server.shutdown();
done();
}, 0);
+ call.startWrite(
+ new Buffer(req_text),
+ function(event) {
+ assert.strictEqual(event.type,
+ grpc.completionType.WRITE_ACCEPTED);
+ assert.strictEqual(event.data, grpc.opError.OK);
+ call.writesDone(function(event) {
+ assert.strictEqual(event.type,
+ grpc.completionType.FINISH_ACCEPTED);
+ assert.strictEqual(event.data, grpc.opError.OK);
+ done();
+ });
+ }, 0);
+ call.startRead(function(event) {
+ assert.strictEqual(event.type, grpc.completionType.READ);
+ assert.strictEqual(event.data.toString(), req_text);
+ done();
+ });
});
});
diff --git a/src/node/test/surface_test.js b/src/node/test/surface_test.js
index 34f1a156eb..85f4841d4b 100644
--- a/src/node/test/surface_test.js
+++ b/src/node/test/surface_test.js
@@ -33,7 +33,9 @@
var assert = require('assert');
-var surface_server = require('../surface_server.js');
+var surface_server = require('../src/surface_server.js');
+
+var surface_client = require('../src/surface_client.js');
var ProtoBuf = require('protobufjs');
@@ -73,3 +75,54 @@ describe('Surface server constructor', function() {
}, /math.Math/);
});
});
+describe('Surface client', function() {
+ var client;
+ var server;
+ before(function() {
+ var Server = grpc.buildServer([mathService]);
+ server = new Server({
+ 'math.Math': {
+ 'div': function(stream) {},
+ 'divMany': function(stream) {},
+ 'fib': function(stream) {},
+ 'sum': function(stream) {}
+ }
+ });
+ var port = server.bind('localhost:0');
+ var Client = surface_client.makeClientConstructor(mathService);
+ client = new Client('localhost:' + port);
+ });
+ after(function() {
+ server.shutdown();
+ });
+ it('Should correctly cancel a unary call', function(done) {
+ var call = client.div({'divisor': 0, 'dividend': 0}, function(err, resp) {
+ assert.strictEqual(err.code, surface_client.status.CANCELLED);
+ done();
+ });
+ call.cancel();
+ });
+ it('Should correctly cancel a client stream call', function(done) {
+ var call = client.sum(function(err, resp) {
+ assert.strictEqual(err.code, surface_client.status.CANCELLED);
+ done();
+ });
+ call.cancel();
+ });
+ it('Should correctly cancel a server stream call', function(done) {
+ var call = client.fib({'limit': 5});
+ call.on('status', function(status) {
+ assert.strictEqual(status.code, surface_client.status.CANCELLED);
+ done();
+ });
+ call.cancel();
+ });
+ it('Should correctly cancel a bidi stream call', function(done) {
+ var call = client.divMany();
+ call.on('status', function(status) {
+ assert.strictEqual(status.code, surface_client.status.CANCELLED);
+ done();
+ });
+ call.cancel();
+ });
+});
diff --git a/src/php/ext/grpc/call.c b/src/php/ext/grpc/call.c
index 410efbce68..b171c9c176 100644
--- a/src/php/ext/grpc/call.c
+++ b/src/php/ext/grpc/call.c
@@ -224,27 +224,25 @@ PHP_METHOD(Call, add_metadata) {
/**
* Invoke the RPC. Starts sending metadata and request headers over the wire
* @param CompletionQueue $queue The completion queue to use with this call
- * @param long $invoke_accepted_tag The tag to associate with this invocation
* @param long $metadata_tag The tag to associate with returned metadata
* @param long $finished_tag The tag to associate with the finished event
* @param long $flags A bitwise combination of the Grpc\WRITE_* constants
* (optional)
* @return Void
*/
-PHP_METHOD(Call, start_invoke) {
+PHP_METHOD(Call, invoke) {
grpc_call_error error_code;
long tag1;
long tag2;
- long tag3;
zval *queue_obj;
long flags = 0;
- /* "Olll|l" == 1 Object, 3 mandatory longs, 1 optional long */
- if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "Olll|l", &queue_obj,
- grpc_ce_completion_queue, &tag1, &tag2, &tag3,
+ /* "Oll|l" == 1 Object, 3 mandatory longs, 1 optional long */
+ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "Oll|l", &queue_obj,
+ grpc_ce_completion_queue, &tag1, &tag2,
&flags) == FAILURE) {
zend_throw_exception(
spl_ce_InvalidArgumentException,
- "start_invoke needs a CompletionQueue, 3 longs, and an optional long",
+ "invoke needs a CompletionQueue, 2 longs, and an optional long",
1 TSRMLS_CC);
return;
}
@@ -254,10 +252,9 @@ PHP_METHOD(Call, start_invoke) {
wrapped_grpc_completion_queue *queue =
(wrapped_grpc_completion_queue *)zend_object_store_get_object(
queue_obj TSRMLS_CC);
- error_code =
- grpc_call_start_invoke(call->wrapped, queue->wrapped, (void *)tag1,
- (void *)tag2, (void *)tag3, (gpr_uint32)flags);
- MAYBE_THROW_CALL_ERROR(start_invoke, error_code);
+ error_code = grpc_call_invoke(call->wrapped, queue->wrapped, (void *)tag1,
+ (void *)tag2, (gpr_uint32)flags);
+ MAYBE_THROW_CALL_ERROR(invoke, error_code);
}
/**
@@ -427,7 +424,7 @@ static zend_function_entry call_methods[] = {
PHP_ME(Call, server_end_initial_metadata, NULL, ZEND_ACC_PUBLIC)
PHP_ME(Call, add_metadata, NULL, ZEND_ACC_PUBLIC)
PHP_ME(Call, cancel, NULL, ZEND_ACC_PUBLIC)
- PHP_ME(Call, start_invoke, NULL, ZEND_ACC_PUBLIC)
+ PHP_ME(Call, invoke, NULL, ZEND_ACC_PUBLIC)
PHP_ME(Call, start_read, NULL, ZEND_ACC_PUBLIC)
PHP_ME(Call, start_write, NULL, ZEND_ACC_PUBLIC)
PHP_ME(Call, start_write_status, NULL, ZEND_ACC_PUBLIC)
diff --git a/src/php/ext/grpc/php_grpc.c b/src/php/ext/grpc/php_grpc.c
index e8b4643a58..492ac06739 100644
--- a/src/php/ext/grpc/php_grpc.c
+++ b/src/php/ext/grpc/php_grpc.c
@@ -107,11 +107,9 @@ PHP_MINIT_FUNCTION(grpc) {
/* Register completion type constants */
REGISTER_LONG_CONSTANT("Grpc\\QUEUE_SHUTDOWN", GRPC_QUEUE_SHUTDOWN, CONST_CS);
REGISTER_LONG_CONSTANT("Grpc\\READ", GRPC_READ, CONST_CS);
- REGISTER_LONG_CONSTANT("Grpc\\INVOKE_ACCEPTED", GRPC_INVOKE_ACCEPTED,
- CONST_CS);
- REGISTER_LONG_CONSTANT("Grpc\\WRITE_ACCEPTED", GRPC_WRITE_ACCEPTED, CONST_CS);
REGISTER_LONG_CONSTANT("Grpc\\FINISH_ACCEPTED", GRPC_FINISH_ACCEPTED,
CONST_CS);
+ REGISTER_LONG_CONSTANT("Grpc\\WRITE_ACCEPTED", GRPC_WRITE_ACCEPTED, CONST_CS);
REGISTER_LONG_CONSTANT("Grpc\\CLIENT_METADATA_READ",
GRPC_CLIENT_METADATA_READ, CONST_CS);
REGISTER_LONG_CONSTANT("Grpc\\FINISHED", GRPC_FINISHED, CONST_CS);
diff --git a/src/php/lib/Grpc/ActiveCall.php b/src/php/lib/Grpc/ActiveCall.php
index aa66dbb848..836a4b09e3 100755
--- a/src/php/lib/Grpc/ActiveCall.php
+++ b/src/php/lib/Grpc/ActiveCall.php
@@ -29,11 +29,8 @@ class ActiveCall {
// Invoke the call.
$this->call->start_invoke($this->completion_queue,
- INVOKE_ACCEPTED,
CLIENT_METADATA_READ,
FINISHED, 0);
- $this->completion_queue->pluck(INVOKE_ACCEPTED,
- Timeval::inf_future());
$metadata_event = $this->completion_queue->pluck(CLIENT_METADATA_READ,
Timeval::inf_future());
$this->metadata = $metadata_event->data;
diff --git a/src/php/tests/unit_tests/CallTest.php b/src/php/tests/unit_tests/CallTest.php
index 253052a038..795831cb65 100755
--- a/src/php/tests/unit_tests/CallTest.php
+++ b/src/php/tests/unit_tests/CallTest.php
@@ -19,10 +19,10 @@ class CallTest extends PHPUnit_Framework_TestCase{
/**
* @expectedException LogicException
* @expectedExceptionCode Grpc\CALL_ERROR_INVALID_FLAGS
- * @expectedExceptionMessage start_invoke
+ * @expectedExceptionMessage invoke
*/
- public function testStartInvokeRejectsBadFlags() {
- $this->call->start_invoke($this->cq, 0, 0, 0, 0xDEADBEEF);
+ public function testInvokeRejectsBadFlags() {
+ $this->call->invoke($this->cq, 0, 0, 0xDEADBEEF);
}
/**
diff --git a/src/php/tests/unit_tests/EndToEndTest.php b/src/php/tests/unit_tests/EndToEndTest.php
index 3818f9531c..78c5e9f93b 100755
--- a/src/php/tests/unit_tests/EndToEndTest.php
+++ b/src/php/tests/unit_tests/EndToEndTest.php
@@ -25,18 +25,12 @@ class EndToEndTest extends PHPUnit_Framework_TestCase{
$deadline);
$tag = 1;
$this->assertEquals(Grpc\CALL_OK,
- $call->start_invoke($this->client_queue,
- $tag,
- $tag,
- $tag));
+ $call->invoke($this->client_queue,
+ $tag,
+ $tag));
$server_tag = 2;
- // the client invocation was accepted
- $event = $this->client_queue->next($deadline);
- $this->assertNotNull($event);
- $this->assertEquals(Grpc\INVOKE_ACCEPTED, $event->type);
-
$call->writes_done($tag);
$event = $this->client_queue->next($deadline);
$this->assertNotNull($event);
@@ -103,18 +97,12 @@ class EndToEndTest extends PHPUnit_Framework_TestCase{
$deadline);
$tag = 1;
$this->assertEquals(Grpc\CALL_OK,
- $call->start_invoke($this->client_queue,
- $tag,
- $tag,
- $tag));
+ $call->invoke($this->client_queue,
+ $tag,
+ $tag));
$server_tag = 2;
- // the client invocation was accepted
- $event = $this->client_queue->next($deadline);
- $this->assertNotNull($event);
- $this->assertEquals(Grpc\INVOKE_ACCEPTED, $event->type);
-
// the client writes
$call->start_write($req_text, $tag);
$event = $this->client_queue->next($deadline);
diff --git a/src/php/tests/unit_tests/SecureEndToEndTest.php b/src/php/tests/unit_tests/SecureEndToEndTest.php
index c562a821a4..7c3ad8a07c 100755
--- a/src/php/tests/unit_tests/SecureEndToEndTest.php
+++ b/src/php/tests/unit_tests/SecureEndToEndTest.php
@@ -37,17 +37,11 @@ class SecureEndToEndTest extends PHPUnit_Framework_TestCase{
$deadline);
$tag = 1;
$this->assertEquals(Grpc\CALL_OK,
- $call->start_invoke($this->client_queue,
- $tag,
- $tag,
- $tag));
+ $call->invoke($this->client_queue,
+ $tag,
+ $tag));
$server_tag = 2;
- // the client invocation was accepted
- $event = $this->client_queue->next($deadline);
- $this->assertNotNull($event);
- $this->assertEquals(Grpc\INVOKE_ACCEPTED, $event->type);
-
$call->writes_done($tag);
$event = $this->client_queue->next($deadline);
$this->assertNotNull($event);
@@ -113,18 +107,12 @@ class SecureEndToEndTest extends PHPUnit_Framework_TestCase{
$deadline);
$tag = 1;
$this->assertEquals(Grpc\CALL_OK,
- $call->start_invoke($this->client_queue,
- $tag,
- $tag,
- $tag));
+ $call->invoke($this->client_queue,
+ $tag,
+ $tag));
$server_tag = 2;
- // the client invocation was accepted
- $event = $this->client_queue->next($deadline);
- $this->assertNotNull($event);
- $this->assertEquals(Grpc\INVOKE_ACCEPTED, $event->type);
-
// the client writes
$call->start_write($req_text, $tag);
$event = $this->client_queue->next($deadline);
diff --git a/src/python/_framework/base/__init__.py b/src/python/_framework/base/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/src/python/_framework/base/__init__.py
diff --git a/src/ruby/spec/port_picker.rb b/src/python/_framework/base/exceptions.py
index 98ffbacc1b..b8f4752184 100644
--- a/src/ruby/spec/port_picker.rb
+++ b/src/python/_framework/base/exceptions.py
@@ -1,4 +1,4 @@
-# Copyright 2014, Google Inc.
+# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -27,19 +27,8 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-require 'socket'
+"""Exceptions defined and used by the base layer of RPC Framework."""
-# @param [Fixnum] the minimum port number to accept
-# @param [Fixnum] the maximum port number to accept
-# @return [Fixnum ]a free tcp port
-def find_unused_tcp_port(min = 32_768, max = 60_000)
- # Allow the system to assign a port, by specifying 0.
- # Loop until a port is assigned in the required range
- loop do
- socket = Socket.new(:INET, :STREAM, 0)
- socket.bind(Addrinfo.tcp('127.0.0.1', 0))
- p = socket.local_address.ip_port
- socket.close
- return p if p > min && p < max
- end
-end
+
+class NoSuchMethodError(Exception):
+ """Indicates that an operation with an unrecognized name has been called."""
diff --git a/src/python/_framework/base/interfaces.py b/src/python/_framework/base/interfaces.py
new file mode 100644
index 0000000000..de7137cbf7
--- /dev/null
+++ b/src/python/_framework/base/interfaces.py
@@ -0,0 +1,229 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Interfaces defined and used by the base layer of RPC Framework."""
+
+# TODO(nathaniel): Use Python's new enum library for enumerated types rather
+# than constants merely placed close together.
+
+import abc
+
+# stream is referenced from specification in this module.
+from _framework.foundation import stream # pylint: disable=unused-import
+
+# Operation outcomes.
+COMPLETED = 'completed'
+CANCELLED = 'cancelled'
+EXPIRED = 'expired'
+RECEPTION_FAILURE = 'reception failure'
+TRANSMISSION_FAILURE = 'transmission failure'
+SERVICER_FAILURE = 'servicer failure'
+SERVICED_FAILURE = 'serviced failure'
+
+# Subscription categories.
+FULL = 'full'
+TERMINATION_ONLY = 'termination only'
+NONE = 'none'
+
+
+class OperationContext(object):
+ """Provides operation-related information and action.
+
+ Attributes:
+ trace_id: A uuid.UUID identifying a particular set of related operations.
+ """
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def is_active(self):
+ """Describes whether the operation is active or has terminated."""
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_termination_callback(self, callback):
+ """Adds a function to be called upon operation termination.
+
+ Args:
+ callback: A callable that will be passed one of COMPLETED, CANCELLED,
+ EXPIRED, RECEPTION_FAILURE, TRANSMISSION_FAILURE, SERVICER_FAILURE, or
+ SERVICED_FAILURE.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def time_remaining(self):
+ """Describes the length of allowed time remaining for the operation.
+
+ Returns:
+ A nonnegative float indicating the length of allowed time in seconds
+ remaining for the operation to complete before it is considered to have
+ timed out.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def fail(self, exception):
+ """Indicates that the operation has failed.
+
+ Args:
+ exception: An exception germane to the operation failure. May be None.
+ """
+ raise NotImplementedError()
+
+
+class Servicer(object):
+ """Interface for service implementations."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def service(self, name, context, output_consumer):
+ """Services an operation.
+
+ Args:
+ name: The name of the operation.
+ context: A ServicerContext object affording contextual information and
+ actions.
+ output_consumer: A stream.Consumer that will accept output values of
+ the operation.
+
+ Returns:
+ A stream.Consumer that will accept input values for the operation.
+
+ Raises:
+ exceptions.NoSuchMethodError: If this Servicer affords no method with the
+ given name.
+ abandonment.Abandoned: If the operation has been aborted and there no
+ longer is any reason to service the operation.
+ """
+ raise NotImplementedError()
+
+
+class Operation(object):
+ """Representation of an in-progress operation.
+
+ Attributes:
+ consumer: A stream.Consumer into which payloads constituting the operation's
+ input may be passed.
+ context: An OperationContext affording information and action about the
+ operation.
+ """
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def cancel(self):
+ """Cancels this operation."""
+ raise NotImplementedError()
+
+
+class ServicedIngestor(object):
+ """Responsible for accepting the result of an operation."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def consumer(self, operation_context):
+ """Affords a consumer to which operation results will be passed.
+
+ Args:
+ operation_context: An OperationContext object for the current operation.
+
+ Returns:
+ A stream.Consumer to which the results of the current operation will be
+ passed.
+
+ Raises:
+ abandonment.Abandoned: If the operation has been aborted and there no
+ longer is any reason to service the operation.
+ """
+ raise NotImplementedError()
+
+
+class ServicedSubscription(object):
+ """A sum type representing a serviced's interest in an operation.
+
+ Attributes:
+ category: One of FULL, TERMINATION_ONLY, or NONE.
+ ingestor: A ServicedIngestor. Must be present if category is FULL.
+ """
+ __metaclass__ = abc.ABCMeta
+
+
+class End(object):
+ """Common type for entry-point objects on both sides of an operation."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def operation_stats(self):
+ """Reports the number of terminated operations broken down by outcome.
+
+ Returns:
+ A dictionary from operation outcome constant (COMPLETED, CANCELLED,
+ EXPIRED, and so on) to an integer representing the number of operations
+ that terminated with that outcome.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_idle_action(self, action):
+ """Adds an action to be called when this End has no ongoing operations.
+
+ Args:
+ action: A callable that accepts no arguments.
+ """
+ raise NotImplementedError()
+
+
+class Front(End):
+ """Clientish objects that afford the invocation of operations."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def operate(
+ self, name, payload, complete, timeout, subscription, trace_id):
+ """Commences an operation.
+
+ Args:
+ name: The name of the method invoked for the operation.
+ payload: An initial payload for the operation. May be None.
+ complete: A boolean indicating whether or not additional payloads to be
+ sent to the servicer may be supplied after this call.
+ timeout: A length of time in seconds to allow for the operation.
+ subscription: A ServicedSubscription for the operation.
+ trace_id: A uuid.UUID identifying a set of related operations to which
+ this operation belongs.
+
+ Returns:
+ An Operation object affording information and action about the operation
+ in progress.
+ """
+ raise NotImplementedError()
+
+
+class Back(End):
+ """Serverish objects that perform the work of operations."""
+ __metaclass__ = abc.ABCMeta
diff --git a/src/python/_framework/base/interfaces_test.py b/src/python/_framework/base/interfaces_test.py
new file mode 100644
index 0000000000..6eb07ea505
--- /dev/null
+++ b/src/python/_framework/base/interfaces_test.py
@@ -0,0 +1,299 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Abstract tests against the interfaces of the base layer of RPC Framework."""
+
+import threading
+import time
+
+from _framework.base import interfaces
+from _framework.base import util
+from _framework.foundation import stream
+from _framework.foundation import stream_testing
+from _framework.foundation import stream_util
+
+TICK = 0.1
+SMALL_TIMEOUT = TICK * 50
+STREAM_LENGTH = 100
+
+SYNCHRONOUS_ECHO = 'synchronous echo'
+ASYNCHRONOUS_ECHO = 'asynchronous echo'
+IMMEDIATE_FAILURE = 'immediate failure'
+TRIGGERED_FAILURE = 'triggered failure'
+WAIT_ON_CONDITION = 'wait on condition'
+
+EMPTY_OUTCOME_DICT = {
+ interfaces.COMPLETED: 0,
+ interfaces.CANCELLED: 0,
+ interfaces.EXPIRED: 0,
+ interfaces.RECEPTION_FAILURE: 0,
+ interfaces.TRANSMISSION_FAILURE: 0,
+ interfaces.SERVICER_FAILURE: 0,
+ interfaces.SERVICED_FAILURE: 0,
+ }
+
+
+def _synchronous_echo(output_consumer):
+ return stream_util.TransformingConsumer(lambda x: x, output_consumer)
+
+
+class AsynchronousEcho(stream.Consumer):
+ """A stream.Consumer that echoes its input to another stream.Consumer."""
+
+ def __init__(self, output_consumer, pool):
+ self._lock = threading.Lock()
+ self._output_consumer = output_consumer
+ self._pool = pool
+
+ self._queue = []
+ self._spinning = False
+
+ def _spin(self, value, complete):
+ while True:
+ if value:
+ if complete:
+ self._output_consumer.consume_and_terminate(value)
+ else:
+ self._output_consumer.consume(value)
+ elif complete:
+ self._output_consumer.terminate()
+ with self._lock:
+ if self._queue:
+ value, complete = self._queue.pop(0)
+ else:
+ self._spinning = False
+ return
+
+ def consume(self, value):
+ with self._lock:
+ if self._spinning:
+ self._queue.append((value, False))
+ else:
+ self._spinning = True
+ self._pool.submit(self._spin, value, False)
+
+ def terminate(self):
+ with self._lock:
+ if self._spinning:
+ self._queue.append((None, True))
+ else:
+ self._spinning = True
+ self._pool.submit(self._spin, None, True)
+
+ def consume_and_terminate(self, value):
+ with self._lock:
+ if self._spinning:
+ self._queue.append((value, True))
+ else:
+ self._spinning = True
+ self._pool.submit(self._spin, value, True)
+
+
+class TestServicer(interfaces.Servicer):
+ """An interfaces.Servicer with instrumented for testing."""
+
+ def __init__(self, pool):
+ self._pool = pool
+ self.condition = threading.Condition()
+ self._released = False
+
+ def service(self, name, context, output_consumer):
+ if name == SYNCHRONOUS_ECHO:
+ return _synchronous_echo(output_consumer)
+ elif name == ASYNCHRONOUS_ECHO:
+ return AsynchronousEcho(output_consumer, self._pool)
+ elif name == IMMEDIATE_FAILURE:
+ raise ValueError()
+ elif name == TRIGGERED_FAILURE:
+ raise NotImplementedError
+ elif name == WAIT_ON_CONDITION:
+ with self.condition:
+ while not self._released:
+ self.condition.wait()
+ return _synchronous_echo(output_consumer)
+ else:
+ raise NotImplementedError()
+
+ def release(self):
+ with self.condition:
+ self._released = True
+ self.condition.notify_all()
+
+
+class EasyServicedIngestor(interfaces.ServicedIngestor):
+ """A trivial implementation of interfaces.ServicedIngestor."""
+
+ def __init__(self, consumer):
+ self._consumer = consumer
+
+ def consumer(self, operation_context):
+ """See interfaces.ServicedIngestor.consumer for specification."""
+ return self._consumer
+
+
+class FrontAndBackTest(object):
+ """A test suite usable against any joined Front and Back."""
+
+ # Pylint doesn't know that this is a unittest.TestCase mix-in.
+ # pylint: disable=invalid-name
+
+ def testSimplestCall(self):
+ """Tests the absolute simplest call - a one-packet fire-and-forget."""
+ self.front.operate(
+ SYNCHRONOUS_ECHO, None, True, SMALL_TIMEOUT,
+ util.none_serviced_subscription(), 'test trace ID')
+ util.wait_for_idle(self.front)
+ self.assertEqual(1, self.front.operation_stats()[interfaces.COMPLETED])
+
+ # Assuming nothing really pathological (such as pauses on the order of
+ # SMALL_TIMEOUT interfering with this test) there are a two different ways
+ # the back could have experienced execution up to this point:
+ # (1) The packet is still either in the front waiting to be transmitted
+ # or is somewhere on the link between the front and the back. The back has
+ # no idea that this test is even happening. Calling wait_for_idle on it
+ # would do no good because in this case the back is idle and the call would
+ # return with the packet bound for it still in the front or on the link.
+ back_operation_stats = self.back.operation_stats()
+ first_back_possibility = EMPTY_OUTCOME_DICT
+ # (2) The packet arrived at the back and the back completed the operation.
+ second_back_possibility = dict(EMPTY_OUTCOME_DICT)
+ second_back_possibility[interfaces.COMPLETED] = 1
+ self.assertIn(
+ back_operation_stats, (first_back_possibility, second_back_possibility))
+ # It's true that if the packet had arrived at the back and the back had
+ # begun processing that wait_for_idle could hold test execution until the
+ # back completed the operation, but that doesn't really collapse the
+ # possibility space down to one solution.
+
+ def testEntireEcho(self):
+ """Tests a very simple one-packet-each-way round-trip."""
+ test_payload = 'test payload'
+ test_consumer = stream_testing.TestConsumer()
+ subscription = util.full_serviced_subscription(
+ EasyServicedIngestor(test_consumer))
+
+ self.front.operate(
+ ASYNCHRONOUS_ECHO, test_payload, True, SMALL_TIMEOUT, subscription,
+ 'test trace ID')
+
+ util.wait_for_idle(self.front)
+ util.wait_for_idle(self.back)
+ self.assertEqual(1, self.front.operation_stats()[interfaces.COMPLETED])
+ self.assertEqual(1, self.back.operation_stats()[interfaces.COMPLETED])
+ self.assertListEqual([(test_payload, True)], test_consumer.calls)
+
+ def testBidirectionalStreamingEcho(self):
+ """Tests sending multiple packets each way."""
+ test_payload_template = 'test_payload: %03d'
+ test_payloads = [test_payload_template % i for i in range(STREAM_LENGTH)]
+ test_consumer = stream_testing.TestConsumer()
+ subscription = util.full_serviced_subscription(
+ EasyServicedIngestor(test_consumer))
+
+ operation = self.front.operate(
+ SYNCHRONOUS_ECHO, None, False, SMALL_TIMEOUT, subscription,
+ 'test trace ID')
+
+ for test_payload in test_payloads:
+ operation.consumer.consume(test_payload)
+ operation.consumer.terminate()
+
+ util.wait_for_idle(self.front)
+ util.wait_for_idle(self.back)
+ self.assertEqual(1, self.front.operation_stats()[interfaces.COMPLETED])
+ self.assertEqual(1, self.back.operation_stats()[interfaces.COMPLETED])
+ self.assertListEqual(test_payloads, test_consumer.values())
+
+ def testCancellation(self):
+ """Tests cancelling a long-lived operation."""
+ test_consumer = stream_testing.TestConsumer()
+ subscription = util.full_serviced_subscription(
+ EasyServicedIngestor(test_consumer))
+
+ operation = self.front.operate(
+ ASYNCHRONOUS_ECHO, None, False, SMALL_TIMEOUT, subscription,
+ 'test trace ID')
+ operation.cancel()
+
+ util.wait_for_idle(self.front)
+ self.assertEqual(1, self.front.operation_stats()[interfaces.CANCELLED])
+ util.wait_for_idle(self.back)
+ self.assertListEqual([], test_consumer.calls)
+
+ # Assuming nothing really pathological (such as pauses on the order of
+ # SMALL_TIMEOUT interfering with this test) there are a two different ways
+ # the back could have experienced execution up to this point:
+ # (1) Both packets are still either in the front waiting to be transmitted
+ # or are somewhere on the link between the front and the back. The back has
+ # no idea that this test is even happening. Calling wait_for_idle on it
+ # would do no good because in this case the back is idle and the call would
+ # return with the packets bound for it still in the front or on the link.
+ back_operation_stats = self.back.operation_stats()
+ first_back_possibility = EMPTY_OUTCOME_DICT
+ # (2) Both packets arrived within SMALL_TIMEOUT of one another at the back.
+ # The back started processing based on the first packet and then stopped
+ # upon receiving the cancellation packet.
+ second_back_possibility = dict(EMPTY_OUTCOME_DICT)
+ second_back_possibility[interfaces.CANCELLED] = 1
+ self.assertIn(
+ back_operation_stats, (first_back_possibility, second_back_possibility))
+
+ def testExpiration(self):
+ """Tests that operations time out."""
+ timeout = TICK * 2
+ allowance = TICK # How much extra time to
+ condition = threading.Condition()
+ test_payload = 'test payload'
+ subscription = util.termination_only_serviced_subscription()
+ start_time = time.time()
+
+ outcome_cell = [None]
+ termination_time_cell = [None]
+ def termination_action(outcome):
+ with condition:
+ outcome_cell[0] = outcome
+ termination_time_cell[0] = time.time()
+ condition.notify()
+
+ with condition:
+ operation = self.front.operate(
+ SYNCHRONOUS_ECHO, test_payload, False, timeout, subscription,
+ 'test trace ID')
+ operation.context.add_termination_callback(termination_action)
+ while outcome_cell[0] is None:
+ condition.wait()
+
+ duration = termination_time_cell[0] - start_time
+ self.assertLessEqual(timeout, duration)
+ self.assertLess(duration, timeout + allowance)
+ self.assertEqual(interfaces.EXPIRED, outcome_cell[0])
+ util.wait_for_idle(self.front)
+ self.assertEqual(1, self.front.operation_stats()[interfaces.EXPIRED])
+ util.wait_for_idle(self.back)
+ self.assertLessEqual(1, self.back.operation_stats()[interfaces.EXPIRED])
diff --git a/src/python/_framework/base/packets/__init__.py b/src/python/_framework/base/packets/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/src/python/_framework/base/packets/__init__.py
diff --git a/src/python/_framework/base/packets/_cancellation.py b/src/python/_framework/base/packets/_cancellation.py
new file mode 100644
index 0000000000..49172d1b97
--- /dev/null
+++ b/src/python/_framework/base/packets/_cancellation.py
@@ -0,0 +1,64 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""State and behavior for operation cancellation."""
+
+from _framework.base.packets import _interfaces
+from _framework.base.packets import packets
+
+
+class CancellationManager(_interfaces.CancellationManager):
+ """An implementation of _interfaces.CancellationManager."""
+
+ def __init__(
+ self, lock, termination_manager, transmission_manager, ingestion_manager,
+ expiration_manager):
+ """Constructor.
+
+ Args:
+ lock: The operation-wide lock.
+ termination_manager: The _interfaces.TerminationManager for the operation.
+ transmission_manager: The _interfaces.TransmissionManager for the
+ operation.
+ ingestion_manager: The _interfaces.IngestionManager for the operation.
+ expiration_manager: The _interfaces.ExpirationManager for the operation.
+ """
+ self._lock = lock
+ self._termination_manager = termination_manager
+ self._transmission_manager = transmission_manager
+ self._ingestion_manager = ingestion_manager
+ self._expiration_manager = expiration_manager
+
+ def cancel(self):
+ """See _interfaces.CancellationManager.cancel for specification."""
+ with self._lock:
+ self._termination_manager.abort(packets.Kind.CANCELLATION)
+ self._transmission_manager.abort(packets.Kind.CANCELLATION)
+ self._ingestion_manager.abort()
+ self._expiration_manager.abort()
diff --git a/src/python/_framework/base/packets/_constants.py b/src/python/_framework/base/packets/_constants.py
new file mode 100644
index 0000000000..8fbdc82782
--- /dev/null
+++ b/src/python/_framework/base/packets/_constants.py
@@ -0,0 +1,32 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Private constants for the package."""
+
+INTERNAL_ERROR_LOG_MESSAGE = ':-( RPC Framework (Base) internal error! :-('
diff --git a/src/python/_framework/base/packets/_context.py b/src/python/_framework/base/packets/_context.py
new file mode 100644
index 0000000000..be390364b0
--- /dev/null
+++ b/src/python/_framework/base/packets/_context.py
@@ -0,0 +1,99 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""State and behavior for operation context."""
+
+import time
+
+# _interfaces and packets are referenced from specification in this module.
+from _framework.base import interfaces as base_interfaces
+from _framework.base.packets import _interfaces # pylint: disable=unused-import
+from _framework.base.packets import packets # pylint: disable=unused-import
+
+
+class OperationContext(base_interfaces.OperationContext):
+ """An implementation of base_interfaces.OperationContext."""
+
+ def __init__(
+ self, lock, operation_id, local_failure, termination_manager,
+ transmission_manager):
+ """Constructor.
+
+ Args:
+ lock: The operation-wide lock.
+ operation_id: An object identifying the operation.
+ local_failure: Whichever one of packets.Kind.SERVICED_FAILURE or
+ packets.Kind.SERVICER_FAILURE describes local failure of customer code.
+ termination_manager: The _interfaces.TerminationManager for the operation.
+ transmission_manager: The _interfaces.TransmissionManager for the
+ operation.
+ """
+ self._lock = lock
+ self._local_failure = local_failure
+ self._termination_manager = termination_manager
+ self._transmission_manager = transmission_manager
+ self._ingestion_manager = None
+ self._expiration_manager = None
+
+ self.operation_id = operation_id
+
+ def set_ingestion_and_expiration_managers(
+ self, ingestion_manager, expiration_manager):
+ """Sets managers with which this OperationContext cooperates.
+
+ Args:
+ ingestion_manager: The _interfaces.IngestionManager for the operation.
+ expiration_manager: The _interfaces.ExpirationManager for the operation.
+ """
+ self._ingestion_manager = ingestion_manager
+ self._expiration_manager = expiration_manager
+
+ def is_active(self):
+ """See base_interfaces.OperationContext.is_active for specification."""
+ with self._lock:
+ return self._termination_manager.is_active()
+
+ def add_termination_callback(self, callback):
+ """See base_interfaces.OperationContext.add_termination_callback."""
+ with self._lock:
+ self._termination_manager.add_callback(callback)
+
+ def time_remaining(self):
+ """See interfaces.OperationContext.time_remaining for specification."""
+ with self._lock:
+ deadline = self._expiration_manager.deadline()
+ return max(0.0, deadline - time.time())
+
+ def fail(self, exception):
+ """See interfaces.OperationContext.fail for specification."""
+ with self._lock:
+ self._termination_manager.abort(self._local_failure)
+ self._transmission_manager.abort(self._local_failure)
+ self._ingestion_manager.abort()
+ self._expiration_manager.abort()
diff --git a/src/python/_framework/base/packets/_emission.py b/src/python/_framework/base/packets/_emission.py
new file mode 100644
index 0000000000..b4be5eb0ff
--- /dev/null
+++ b/src/python/_framework/base/packets/_emission.py
@@ -0,0 +1,126 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""State and behavior for handling emitted values."""
+
+# packets is referenced from specifications in this module.
+from _framework.base.packets import _interfaces
+from _framework.base.packets import packets # pylint: disable=unused-import
+
+
+class _EmissionManager(_interfaces.EmissionManager):
+ """An implementation of _interfaces.EmissionManager."""
+
+ def __init__(
+ self, lock, failure_kind, termination_manager, transmission_manager):
+ """Constructor.
+
+ Args:
+ lock: The operation-wide lock.
+ failure_kind: Whichever one of packets.Kind.SERVICED_FAILURE or
+ packets.Kind.SERVICER_FAILURE describes this object's methods being
+ called inappropriately by customer code.
+ termination_manager: The _interfaces.TerminationManager for the operation.
+ transmission_manager: The _interfaces.TransmissionManager for the
+ operation.
+ """
+ self._lock = lock
+ self._failure_kind = failure_kind
+ self._termination_manager = termination_manager
+ self._transmission_manager = transmission_manager
+ self._ingestion_manager = None
+ self._expiration_manager = None
+
+ self._emission_complete = False
+
+ def set_ingestion_manager_and_expiration_manager(
+ self, ingestion_manager, expiration_manager):
+ self._ingestion_manager = ingestion_manager
+ self._expiration_manager = expiration_manager
+
+ def _abort(self):
+ self._termination_manager.abort(self._failure_kind)
+ self._transmission_manager.abort(self._failure_kind)
+ self._ingestion_manager.abort()
+ self._expiration_manager.abort()
+
+ def consume(self, value):
+ with self._lock:
+ if self._emission_complete:
+ self._abort()
+ else:
+ self._transmission_manager.inmit(value, False)
+
+ def terminate(self):
+ with self._lock:
+ if not self._emission_complete:
+ self._termination_manager.emission_complete()
+ self._transmission_manager.inmit(None, True)
+ self._emission_complete = True
+
+ def consume_and_terminate(self, value):
+ with self._lock:
+ if self._emission_complete:
+ self._abort()
+ else:
+ self._termination_manager.emission_complete()
+ self._transmission_manager.inmit(value, True)
+ self._emission_complete = True
+
+
+def front_emission_manager(lock, termination_manager, transmission_manager):
+ """Creates an _interfaces.EmissionManager appropriate for front-side use.
+
+ Args:
+ lock: The operation-wide lock.
+ termination_manager: The _interfaces.TerminationManager for the operation.
+ transmission_manager: The _interfaces.TransmissionManager for the operation.
+
+ Returns:
+ An _interfaces.EmissionManager appropriate for front-side use.
+ """
+ return _EmissionManager(
+ lock, packets.Kind.SERVICED_FAILURE, termination_manager,
+ transmission_manager)
+
+
+def back_emission_manager(lock, termination_manager, transmission_manager):
+ """Creates an _interfaces.EmissionManager appropriate for back-side use.
+
+ Args:
+ lock: The operation-wide lock.
+ termination_manager: The _interfaces.TerminationManager for the operation.
+ transmission_manager: The _interfaces.TransmissionManager for the operation.
+
+ Returns:
+ An _interfaces.EmissionManager appropriate for back-side use.
+ """
+ return _EmissionManager(
+ lock, packets.Kind.SERVICER_FAILURE, termination_manager,
+ transmission_manager)
diff --git a/src/python/_framework/base/packets/_ends.py b/src/python/_framework/base/packets/_ends.py
new file mode 100644
index 0000000000..baaf5cacf9
--- /dev/null
+++ b/src/python/_framework/base/packets/_ends.py
@@ -0,0 +1,408 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Implementations of Fronts and Backs."""
+
+import collections
+import threading
+import uuid
+
+# _interfaces and packets are referenced from specification in this module.
+from _framework.base import interfaces as base_interfaces
+from _framework.base.packets import _cancellation
+from _framework.base.packets import _context
+from _framework.base.packets import _emission
+from _framework.base.packets import _expiration
+from _framework.base.packets import _ingestion
+from _framework.base.packets import _interfaces # pylint: disable=unused-import
+from _framework.base.packets import _reception
+from _framework.base.packets import _termination
+from _framework.base.packets import _transmission
+from _framework.base.packets import interfaces
+from _framework.base.packets import packets # pylint: disable=unused-import
+from _framework.foundation import callable_util
+
+_IDLE_ACTION_EXCEPTION_LOG_MESSAGE = 'Exception calling idle action!'
+
+_OPERATION_OUTCOMES = (
+ base_interfaces.COMPLETED,
+ base_interfaces.CANCELLED,
+ base_interfaces.EXPIRED,
+ base_interfaces.RECEPTION_FAILURE,
+ base_interfaces.TRANSMISSION_FAILURE,
+ base_interfaces.SERVICER_FAILURE,
+ base_interfaces.SERVICED_FAILURE,
+ )
+
+
+class _EasyOperation(base_interfaces.Operation):
+ """A trivial implementation of base_interfaces.Operation."""
+
+ def __init__(self, emission_manager, context, cancellation_manager):
+ """Constructor.
+
+ Args:
+ emission_manager: The _interfaces.EmissionManager for the operation that
+ will accept values emitted by customer code.
+ context: The base_interfaces.OperationContext for use by the customer
+ during the operation.
+ cancellation_manager: The _interfaces.CancellationManager for the
+ operation.
+ """
+ self.consumer = emission_manager
+ self.context = context
+ self._cancellation_manager = cancellation_manager
+
+ def cancel(self):
+ self._cancellation_manager.cancel()
+
+
+class _Endlette(object):
+ """Utility for stateful behavior common to Fronts and Backs."""
+
+ def __init__(self, pool):
+ """Constructor.
+
+ Args:
+ pool: A thread pool to use when calling registered idle actions.
+ """
+ self._lock = threading.Lock()
+ self._pool = pool
+ # Dictionary from operation IDs to ReceptionManager-or-None. A None value
+ # indicates an in-progress fire-and-forget operation for which the customer
+ # has chosen to ignore results.
+ self._operations = {}
+ self._stats = {outcome: 0 for outcome in _OPERATION_OUTCOMES}
+ self._idle_actions = []
+
+ def terminal_action(self, operation_id):
+ """Constructs the termination action for a single operation.
+
+ Args:
+ operation_id: An operation ID.
+
+ Returns:
+ A callable that takes an operation outcome for an argument to be used as
+ the termination action for the operation associated with the given
+ operation ID.
+ """
+ def termination_action(outcome):
+ with self._lock:
+ self._stats[outcome] += 1
+ self._operations.pop(operation_id, None)
+ if not self._operations:
+ for action in self._idle_actions:
+ self._pool.submit(callable_util.with_exceptions_logged(
+ action, _IDLE_ACTION_EXCEPTION_LOG_MESSAGE))
+ self._idle_actions = []
+ return termination_action
+
+ def __enter__(self):
+ self._lock.acquire()
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self._lock.release()
+
+ def get_operation(self, operation_id):
+ return self._operations.get(operation_id, None)
+
+ def add_operation(self, operation_id, operation_reception_manager):
+ self._operations[operation_id] = operation_reception_manager
+
+ def operation_stats(self):
+ with self._lock:
+ return dict(self._stats)
+
+ def add_idle_action(self, action):
+ with self._lock:
+ if self._operations:
+ self._idle_actions.append(action)
+ else:
+ self._pool.submit(callable_util.with_exceptions_logged(
+ action, _IDLE_ACTION_EXCEPTION_LOG_MESSAGE))
+
+
+class _FrontManagement(
+ collections.namedtuple(
+ '_FrontManagement',
+ ('reception', 'emission', 'operation', 'cancellation'))):
+ """Just a trivial helper class to bundle four fellow-traveling objects."""
+
+
+def _front_operate(
+ callback, work_pool, transmission_pool, utility_pool,
+ termination_action, operation_id, name, payload, complete, timeout,
+ subscription, trace_id):
+ """Constructs objects necessary for front-side operation management.
+
+ Args:
+ callback: A callable that accepts packets.FrontToBackPackets and delivers
+ them to the other side of the operation. Execution of this callable may
+ take any arbitrary length of time.
+ work_pool: A thread pool in which to execute customer code.
+ transmission_pool: A thread pool to use for transmitting to the other side
+ of the operation.
+ utility_pool: A thread pool for utility tasks.
+ termination_action: A no-arg behavior to be called upon operation
+ completion.
+ operation_id: An object identifying the operation.
+ name: The name of the method being called during the operation.
+ payload: The first customer-significant value to be transmitted to the other
+ side. May be None if there is no such value or if the customer chose not
+ to pass it at operation invocation.
+ complete: A boolean indicating whether or not additional payloads will be
+ supplied by the customer.
+ timeout: A length of time in seconds to allow for the operation.
+ subscription: A base_interfaces.ServicedSubscription describing the
+ customer's interest in the results of the operation.
+ trace_id: A uuid.UUID identifying a set of related operations to which this
+ operation belongs. May be None.
+
+ Returns:
+ A _FrontManagement object bundling together the
+ _interfaces.ReceptionManager, _interfaces.EmissionManager,
+ _context.OperationContext, and _interfaces.CancellationManager for the
+ operation.
+ """
+ lock = threading.Lock()
+ with lock:
+ termination_manager = _termination.front_termination_manager(
+ work_pool, utility_pool, termination_action, subscription.category)
+ transmission_manager = _transmission.front_transmission_manager(
+ lock, transmission_pool, callback, operation_id, name,
+ subscription.category, trace_id, timeout, termination_manager)
+ operation_context = _context.OperationContext(
+ lock, operation_id, packets.Kind.SERVICED_FAILURE,
+ termination_manager, transmission_manager)
+ emission_manager = _emission.front_emission_manager(
+ lock, termination_manager, transmission_manager)
+ ingestion_manager = _ingestion.front_ingestion_manager(
+ lock, work_pool, subscription, termination_manager,
+ transmission_manager, operation_context)
+ expiration_manager = _expiration.front_expiration_manager(
+ lock, termination_manager, transmission_manager, ingestion_manager,
+ timeout)
+ reception_manager = _reception.front_reception_manager(
+ lock, termination_manager, transmission_manager, ingestion_manager,
+ expiration_manager)
+ cancellation_manager = _cancellation.CancellationManager(
+ lock, termination_manager, transmission_manager, ingestion_manager,
+ expiration_manager)
+
+ transmission_manager.set_ingestion_and_expiration_managers(
+ ingestion_manager, expiration_manager)
+ operation_context.set_ingestion_and_expiration_managers(
+ ingestion_manager, expiration_manager)
+ emission_manager.set_ingestion_manager_and_expiration_manager(
+ ingestion_manager, expiration_manager)
+ ingestion_manager.set_expiration_manager(expiration_manager)
+
+ transmission_manager.inmit(payload, complete)
+
+ returned_reception_manager = (
+ None if subscription.category == base_interfaces.NONE
+ else reception_manager)
+
+ return _FrontManagement(
+ returned_reception_manager, emission_manager, operation_context,
+ cancellation_manager)
+
+
+class Front(interfaces.Front):
+ """An implementation of interfaces.Front."""
+
+ def __init__(self, work_pool, transmission_pool, utility_pool):
+ """Constructor.
+
+ Args:
+ work_pool: A thread pool to be used for executing customer code.
+ transmission_pool: A thread pool to be used for transmitting values to
+ the other side of the operation.
+ utility_pool: A thread pool to be used for utility tasks.
+ """
+ self._endlette = _Endlette(utility_pool)
+ self._work_pool = work_pool
+ self._transmission_pool = transmission_pool
+ self._utility_pool = utility_pool
+ self._callback = None
+
+ self._operations = {}
+
+ def join_rear_link(self, rear_link):
+ """See interfaces.ForeLink.join_rear_link for specification."""
+ with self._endlette:
+ self._callback = rear_link.accept_front_to_back_ticket
+
+ def operation_stats(self):
+ """See base_interfaces.End.operation_stats for specification."""
+ return self._endlette.operation_stats()
+
+ def add_idle_action(self, action):
+ """See base_interfaces.End.add_idle_action for specification."""
+ self._endlette.add_idle_action(action)
+
+ def operate(
+ self, name, payload, complete, timeout, subscription, trace_id):
+ """See base_interfaces.Front.operate for specification."""
+ operation_id = uuid.uuid4()
+ with self._endlette:
+ management = _front_operate(
+ self._callback, self._work_pool, self._transmission_pool,
+ self._utility_pool, self._endlette.terminal_action(operation_id),
+ operation_id, name, payload, complete, timeout, subscription,
+ trace_id)
+ self._endlette.add_operation(operation_id, management.reception)
+ return _EasyOperation(
+ management.emission, management.operation, management.cancellation)
+
+ def accept_back_to_front_ticket(self, ticket):
+ """See interfaces.End.act for specification."""
+ with self._endlette:
+ reception_manager = self._endlette.get_operation(ticket.operation_id)
+ if reception_manager:
+ reception_manager.receive_packet(ticket)
+
+
+def _back_operate(
+ servicer, callback, work_pool, transmission_pool, utility_pool,
+ termination_action, ticket, default_timeout, maximum_timeout):
+ """Constructs objects necessary for back-side operation management.
+
+ Also begins back-side operation by feeding the first received ticket into the
+ constructed _interfaces.ReceptionManager.
+
+ Args:
+ servicer: An interfaces.Servicer for servicing operations.
+ callback: A callable that accepts packets.BackToFrontPackets and delivers
+ them to the other side of the operation. Execution of this callable may
+ take any arbitrary length of time.
+ work_pool: A thread pool in which to execute customer code.
+ transmission_pool: A thread pool to use for transmitting to the other side
+ of the operation.
+ utility_pool: A thread pool for utility tasks.
+ termination_action: A no-arg behavior to be called upon operation
+ completion.
+ ticket: The first packets.FrontToBackPacket received for the operation.
+ default_timeout: A length of time in seconds to be used as the default
+ time alloted for a single operation.
+ maximum_timeout: A length of time in seconds to be used as the maximum
+ time alloted for a single operation.
+
+ Returns:
+ The _interfaces.ReceptionManager to be used for the operation.
+ """
+ lock = threading.Lock()
+ with lock:
+ termination_manager = _termination.back_termination_manager(
+ work_pool, utility_pool, termination_action, ticket.subscription)
+ transmission_manager = _transmission.back_transmission_manager(
+ lock, transmission_pool, callback, ticket.operation_id,
+ termination_manager, ticket.subscription)
+ operation_context = _context.OperationContext(
+ lock, ticket.operation_id, packets.Kind.SERVICER_FAILURE,
+ termination_manager, transmission_manager)
+ emission_manager = _emission.back_emission_manager(
+ lock, termination_manager, transmission_manager)
+ ingestion_manager = _ingestion.back_ingestion_manager(
+ lock, work_pool, servicer, termination_manager,
+ transmission_manager, operation_context, emission_manager)
+ expiration_manager = _expiration.back_expiration_manager(
+ lock, termination_manager, transmission_manager, ingestion_manager,
+ ticket.timeout, default_timeout, maximum_timeout)
+ reception_manager = _reception.back_reception_manager(
+ lock, termination_manager, transmission_manager, ingestion_manager,
+ expiration_manager)
+
+ transmission_manager.set_ingestion_and_expiration_managers(
+ ingestion_manager, expiration_manager)
+ operation_context.set_ingestion_and_expiration_managers(
+ ingestion_manager, expiration_manager)
+ emission_manager.set_ingestion_manager_and_expiration_manager(
+ ingestion_manager, expiration_manager)
+ ingestion_manager.set_expiration_manager(expiration_manager)
+
+ reception_manager.receive_packet(ticket)
+
+ return reception_manager
+
+
+class Back(interfaces.Back):
+ """An implementation of interfaces.Back."""
+
+ def __init__(
+ self, servicer, work_pool, transmission_pool, utility_pool,
+ default_timeout, maximum_timeout):
+ """Constructor.
+
+ Args:
+ servicer: An interfaces.Servicer for servicing operations.
+ work_pool: A thread pool in which to execute customer code.
+ transmission_pool: A thread pool to use for transmitting to the other side
+ of the operation.
+ utility_pool: A thread pool for utility tasks.
+ default_timeout: A length of time in seconds to be used as the default
+ time alloted for a single operation.
+ maximum_timeout: A length of time in seconds to be used as the maximum
+ time alloted for a single operation.
+ """
+ self._endlette = _Endlette(utility_pool)
+ self._servicer = servicer
+ self._work_pool = work_pool
+ self._transmission_pool = transmission_pool
+ self._utility_pool = utility_pool
+ self._default_timeout = default_timeout
+ self._maximum_timeout = maximum_timeout
+ self._callback = None
+
+ def join_fore_link(self, fore_link):
+ """See interfaces.RearLink.join_fore_link for specification."""
+ with self._endlette:
+ self._callback = fore_link.accept_back_to_front_ticket
+
+ def accept_front_to_back_ticket(self, ticket):
+ """See interfaces.RearLink.accept_front_to_back_ticket for specification."""
+ with self._endlette:
+ reception_manager = self._endlette.get_operation(ticket.operation_id)
+ if reception_manager is None:
+ reception_manager = _back_operate(
+ self._servicer, self._callback, self._work_pool,
+ self._transmission_pool, self._utility_pool,
+ self._endlette.terminal_action(ticket.operation_id), ticket,
+ self._default_timeout, self._maximum_timeout)
+ self._endlette.add_operation(ticket.operation_id, reception_manager)
+ else:
+ reception_manager.receive_packet(ticket)
+
+ def operation_stats(self):
+ """See base_interfaces.End.operation_stats for specification."""
+ return self._endlette.operation_stats()
+
+ def add_idle_action(self, action):
+ """See base_interfaces.End.add_idle_action for specification."""
+ self._endlette.add_idle_action(action)
diff --git a/src/python/_framework/base/packets/_expiration.py b/src/python/_framework/base/packets/_expiration.py
new file mode 100644
index 0000000000..772e15f08c
--- /dev/null
+++ b/src/python/_framework/base/packets/_expiration.py
@@ -0,0 +1,158 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""State and behavior for operation expiration."""
+
+import time
+
+from _framework.base.packets import _interfaces
+from _framework.base.packets import packets
+from _framework.foundation import later
+
+
+class _ExpirationManager(_interfaces.ExpirationManager):
+ """An implementation of _interfaces.ExpirationManager."""
+
+ def __init__(
+ self, lock, termination_manager, transmission_manager, ingestion_manager,
+ commencement, timeout, maximum_timeout):
+ """Constructor.
+
+ Args:
+ lock: The operation-wide lock.
+ termination_manager: The _interfaces.TerminationManager for the operation.
+ transmission_manager: The _interfaces.TransmissionManager for the
+ operation.
+ ingestion_manager: The _interfaces.IngestionManager for the operation.
+ commencement: The time in seconds since the epoch at which the operation
+ began.
+ timeout: A length of time in seconds to allow for the operation to run.
+ maximum_timeout: The maximum length of time in seconds to allow for the
+ operation to run despite what is requested via this object's
+ change_timout method.
+ """
+ self._lock = lock
+ self._termination_manager = termination_manager
+ self._transmission_manager = transmission_manager
+ self._ingestion_manager = ingestion_manager
+ self._commencement = commencement
+ self._maximum_timeout = maximum_timeout
+
+ self._timeout = timeout
+ self._deadline = commencement + timeout
+ self._index = None
+ self._future = None
+
+ def _expire(self, index):
+ with self._lock:
+ if self._future is not None and index == self._index:
+ self._future = None
+ self._termination_manager.abort(packets.Kind.EXPIRATION)
+ self._transmission_manager.abort(packets.Kind.EXPIRATION)
+ self._ingestion_manager.abort()
+
+ def start(self):
+ self._index = 0
+ self._future = later.later(self._timeout, lambda: self._expire(0))
+
+ def change_timeout(self, timeout):
+ if self._future is not None and timeout != self._timeout:
+ self._future.cancel()
+ new_timeout = min(timeout, self._maximum_timeout)
+ new_index = self._index + 1
+ self._timeout = new_timeout
+ self._deadline = self._commencement + new_timeout
+ self._index = new_index
+ delay = self._deadline - time.time()
+ self._future = later.later(
+ delay, lambda: self._expire(new_index))
+
+ def deadline(self):
+ return self._deadline
+
+ def abort(self):
+ if self._future:
+ self._future.cancel()
+ self._future = None
+ self._deadline_index = None
+
+
+def front_expiration_manager(
+ lock, termination_manager, transmission_manager, ingestion_manager,
+ timeout):
+ """Creates an _interfaces.ExpirationManager appropriate for front-side use.
+
+ Args:
+ lock: The operation-wide lock.
+ termination_manager: The _interfaces.TerminationManager for the operation.
+ transmission_manager: The _interfaces.TransmissionManager for the
+ operation.
+ ingestion_manager: The _interfaces.IngestionManager for the operation.
+ timeout: A length of time in seconds to allow for the operation to run.
+
+ Returns:
+ An _interfaces.ExpirationManager appropriate for front-side use.
+ """
+ commencement = time.time()
+ expiration_manager = _ExpirationManager(
+ lock, termination_manager, transmission_manager, ingestion_manager,
+ commencement, timeout, timeout)
+ expiration_manager.start()
+ return expiration_manager
+
+
+def back_expiration_manager(
+ lock, termination_manager, transmission_manager, ingestion_manager,
+ timeout, default_timeout, maximum_timeout):
+ """Creates an _interfaces.ExpirationManager appropriate for back-side use.
+
+ Args:
+ lock: The operation-wide lock.
+ termination_manager: The _interfaces.TerminationManager for the operation.
+ transmission_manager: The _interfaces.TransmissionManager for the
+ operation.
+ ingestion_manager: The _interfaces.IngestionManager for the operation.
+ timeout: A length of time in seconds to allow for the operation to run. May
+ be None in which case default_timeout will be used.
+ default_timeout: The default length of time in seconds to allow for the
+ operation to run if the front-side customer has not specified such a value
+ (or if the value they specified is not yet known).
+ maximum_timeout: The maximum length of time in seconds to allow for the
+ operation to run.
+
+ Returns:
+ An _interfaces.ExpirationManager appropriate for back-side use.
+ """
+ commencement = time.time()
+ expiration_manager = _ExpirationManager(
+ lock, termination_manager, transmission_manager, ingestion_manager,
+ commencement, default_timeout if timeout is None else timeout,
+ maximum_timeout)
+ expiration_manager.start()
+ return expiration_manager
diff --git a/src/python/_framework/base/packets/_ingestion.py b/src/python/_framework/base/packets/_ingestion.py
new file mode 100644
index 0000000000..ad5ed4cada
--- /dev/null
+++ b/src/python/_framework/base/packets/_ingestion.py
@@ -0,0 +1,440 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""State and behavior for ingestion during an operation."""
+
+import abc
+import collections
+
+from _framework.base import exceptions
+from _framework.base import interfaces
+from _framework.base.packets import _constants
+from _framework.base.packets import _interfaces
+from _framework.base.packets import packets
+from _framework.foundation import abandonment
+from _framework.foundation import callable_util
+from _framework.foundation import stream
+
+_CREATE_CONSUMER_EXCEPTION_LOG_MESSAGE = 'Exception initializing ingestion!'
+_CONSUME_EXCEPTION_LOG_MESSAGE = 'Exception during ingestion!'
+
+
+class _ConsumerCreation(collections.namedtuple(
+ '_ConsumerCreation', ('consumer', 'remote_error', 'abandoned'))):
+ """A sum type for the outcome of ingestion initialization.
+
+ Either consumer will be non-None, remote_error will be True, or abandoned will
+ be True.
+
+ Attributes:
+ consumer: A stream.Consumer for ingesting payloads.
+ remote_error: A boolean indicating that the consumer could not be created
+ due to an error on the remote side of the operation.
+ abandoned: A boolean indicating that the consumer creation was abandoned.
+ """
+
+
+class _EmptyConsumer(stream.Consumer):
+ """A no-operative stream.Consumer that ignores all inputs and calls."""
+
+ def consume(self, value):
+ """See stream.Consumer.consume for specification."""
+
+ def terminate(self):
+ """See stream.Consumer.terminate for specification."""
+
+ def consume_and_terminate(self, value):
+ """See stream.Consumer.consume_and_terminate for specification."""
+
+
+class _ConsumerCreator(object):
+ """Common specification of different consumer-creating behavior."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def create_consumer(self, requirement):
+ """Creates the stream.Consumer to which customer payloads will be delivered.
+
+ Any exceptions raised by this method should be attributed to and treated as
+ defects in the serviced or servicer code called by this method.
+
+ Args:
+ requirement: A value required by this _ConsumerCreator for consumer
+ creation.
+
+ Returns:
+ A _ConsumerCreation describing the result of consumer creation.
+ """
+ raise NotImplementedError()
+
+
+class _FrontConsumerCreator(_ConsumerCreator):
+ """A _ConsumerCreator appropriate for front-side use."""
+
+ def __init__(self, subscription, operation_context):
+ """Constructor.
+
+ Args:
+ subscription: The serviced's interfaces.ServicedSubscription for the
+ operation.
+ operation_context: The interfaces.OperationContext object for the
+ operation.
+ """
+ self._subscription = subscription
+ self._operation_context = operation_context
+
+ def create_consumer(self, requirement):
+ """See _ConsumerCreator.create_consumer for specification."""
+ if self._subscription.category == interfaces.FULL:
+ try:
+ return _ConsumerCreation(
+ self._subscription.ingestor.consumer(self._operation_context),
+ False, False)
+ except abandonment.Abandoned:
+ return _ConsumerCreation(None, False, True)
+ else:
+ return _ConsumerCreation(_EmptyConsumer(), False, False)
+
+
+class _BackConsumerCreator(_ConsumerCreator):
+ """A _ConsumerCreator appropriate for back-side use."""
+
+ def __init__(self, servicer, operation_context, emission_consumer):
+ """Constructor.
+
+ Args:
+ servicer: The interfaces.Servicer that will service the operation.
+ operation_context: The interfaces.OperationContext object for the
+ operation.
+ emission_consumer: The stream.Consumer object to which payloads emitted
+ from the operation will be passed.
+ """
+ self._servicer = servicer
+ self._operation_context = operation_context
+ self._emission_consumer = emission_consumer
+
+ def create_consumer(self, requirement):
+ """See _ConsumerCreator.create_consumer for full specification.
+
+ Args:
+ requirement: The name of the Servicer method to be called during this
+ operation.
+
+ Returns:
+ A _ConsumerCreation describing the result of consumer creation.
+ """
+ try:
+ return _ConsumerCreation(
+ self._servicer.service(
+ requirement, self._operation_context, self._emission_consumer),
+ False, False)
+ except exceptions.NoSuchMethodError:
+ return _ConsumerCreation(None, True, False)
+ except abandonment.Abandoned:
+ return _ConsumerCreation(None, False, True)
+
+
+class _WrappedConsumer(object):
+ """Wraps a consumer to catch the exceptions that it is allowed to throw."""
+
+ def __init__(self, consumer):
+ """Constructor.
+
+ Args:
+ consumer: A stream.Consumer that may raise abandonment.Abandoned from any
+ of its methods.
+ """
+ self._consumer = consumer
+
+ def moar(self, payload, complete):
+ """Makes progress with the wrapped consumer.
+
+ This method catches all exceptions allowed to be thrown by the wrapped
+ consumer. Any exceptions raised by this method should be blamed on the
+ customer-supplied consumer.
+
+ Args:
+ payload: A customer-significant payload object. May be None only if
+ complete is True.
+ complete: Whether or not the end of the payload sequence has been reached.
+ May be False only if payload is not None.
+
+ Returns:
+ True if the wrapped consumer made progress or False if the wrapped
+ consumer raised abandonment.Abandoned to indicate its abandonment of
+ progress.
+ """
+ try:
+ if payload:
+ if complete:
+ self._consumer.consume_and_terminate(payload)
+ else:
+ self._consumer.consume(payload)
+ else:
+ self._consumer.terminate()
+ return True
+ except abandonment.Abandoned:
+ return False
+
+
+class _IngestionManager(_interfaces.IngestionManager):
+ """An implementation of _interfaces.IngestionManager."""
+
+ def __init__(
+ self, lock, pool, consumer_creator, failure_kind, termination_manager,
+ transmission_manager):
+ """Constructor.
+
+ Args:
+ lock: The operation-wide lock.
+ pool: A thread pool in which to execute customer code.
+ consumer_creator: A _ConsumerCreator wrapping the portion of customer code
+ that when called returns the stream.Consumer with which the customer
+ code will ingest payload values.
+ failure_kind: Whichever one of packets.Kind.SERVICED_FAILURE or
+ packets.Kind.SERVICER_FAILURE describes local failure of customer code.
+ termination_manager: The _interfaces.TerminationManager for the operation.
+ transmission_manager: The _interfaces.TransmissionManager for the
+ operation.
+ """
+ self._lock = lock
+ self._pool = pool
+ self._consumer_creator = consumer_creator
+ self._failure_kind = failure_kind
+ self._termination_manager = termination_manager
+ self._transmission_manager = transmission_manager
+ self._expiration_manager = None
+
+ self._wrapped_ingestion_consumer = None
+ self._pending_ingestion = []
+ self._ingestion_complete = False
+ self._processing = False
+
+ def set_expiration_manager(self, expiration_manager):
+ self._expiration_manager = expiration_manager
+
+ def _abort_internal_only(self):
+ self._wrapped_ingestion_consumer = None
+ self._pending_ingestion = None
+
+ def _abort_and_notify(self, outcome):
+ self._abort_internal_only()
+ self._termination_manager.abort(outcome)
+ self._transmission_manager.abort(outcome)
+ self._expiration_manager.abort()
+
+ def _next(self):
+ """Computes the next step for ingestion.
+
+ Returns:
+ A payload, complete, continue triplet indicating what payload (if any) is
+ available to feed into customer code, whether or not the sequence of
+ payloads has terminated, and whether or not there is anything
+ immediately actionable to call customer code to do.
+ """
+ if self._pending_ingestion is None:
+ return None, False, False
+ elif self._pending_ingestion:
+ payload = self._pending_ingestion.pop(0)
+ complete = self._ingestion_complete and not self._pending_ingestion
+ return payload, complete, True
+ elif self._ingestion_complete:
+ return None, True, True
+ else:
+ return None, False, False
+
+ def _process(self, wrapped_ingestion_consumer, payload, complete):
+ """A method to call to execute customer code.
+
+ This object's lock must *not* be held when calling this method.
+
+ Args:
+ wrapped_ingestion_consumer: The _WrappedConsumer with which to pass
+ payloads to customer code.
+ payload: A customer payload. May be None only if complete is True.
+ complete: Whether or not the sequence of payloads to pass to the customer
+ has concluded.
+ """
+ while True:
+ consumption_outcome = callable_util.call_logging_exceptions(
+ wrapped_ingestion_consumer.moar, _CONSUME_EXCEPTION_LOG_MESSAGE,
+ payload, complete)
+ if consumption_outcome.exception is None:
+ if consumption_outcome.return_value:
+ with self._lock:
+ if complete:
+ self._pending_ingestion = None
+ self._termination_manager.ingestion_complete()
+ return
+ else:
+ payload, complete, moar = self._next()
+ if not moar:
+ self._processing = False
+ return
+ else:
+ with self._lock:
+ if self._pending_ingestion is not None:
+ self._abort_and_notify(self._failure_kind)
+ self._processing = False
+ return
+ else:
+ with self._lock:
+ self._abort_and_notify(self._failure_kind)
+ self._processing = False
+ return
+
+ def start(self, requirement):
+ if self._pending_ingestion is not None:
+ def initialize():
+ consumer_creation_outcome = callable_util.call_logging_exceptions(
+ self._consumer_creator.create_consumer,
+ _CREATE_CONSUMER_EXCEPTION_LOG_MESSAGE, requirement)
+ if consumer_creation_outcome.return_value is None:
+ with self._lock:
+ self._abort_and_notify(self._failure_kind)
+ self._processing = False
+ elif consumer_creation_outcome.return_value.remote_error:
+ with self._lock:
+ self._abort_and_notify(packets.Kind.RECEPTION_FAILURE)
+ self._processing = False
+ elif consumer_creation_outcome.return_value.abandoned:
+ with self._lock:
+ if self._pending_ingestion is not None:
+ self._abort_and_notify(self._failure_kind)
+ self._processing = False
+ else:
+ wrapped_ingestion_consumer = _WrappedConsumer(
+ consumer_creation_outcome.return_value.consumer)
+ with self._lock:
+ self._wrapped_ingestion_consumer = wrapped_ingestion_consumer
+ payload, complete, moar = self._next()
+ if not moar:
+ self._processing = False
+ return
+
+ self._process(wrapped_ingestion_consumer, payload, complete)
+
+ self._pool.submit(
+ callable_util.with_exceptions_logged(
+ initialize, _constants.INTERNAL_ERROR_LOG_MESSAGE))
+ self._processing = True
+
+ def consume(self, payload):
+ if self._ingestion_complete:
+ self._abort_and_notify(self._failure_kind)
+ elif self._pending_ingestion is not None:
+ if self._processing:
+ self._pending_ingestion.append(payload)
+ else:
+ self._pool.submit(
+ callable_util.with_exceptions_logged(
+ self._process, _constants.INTERNAL_ERROR_LOG_MESSAGE),
+ self._wrapped_ingestion_consumer, payload, False)
+ self._processing = True
+
+ def terminate(self):
+ if self._ingestion_complete:
+ self._abort_and_notify(self._failure_kind)
+ else:
+ self._ingestion_complete = True
+ if self._pending_ingestion is not None and not self._processing:
+ self._pool.submit(
+ callable_util.with_exceptions_logged(
+ self._process, _constants.INTERNAL_ERROR_LOG_MESSAGE),
+ self._wrapped_ingestion_consumer, None, True)
+ self._processing = True
+
+ def consume_and_terminate(self, payload):
+ if self._ingestion_complete:
+ self._abort_and_notify(self._failure_kind)
+ else:
+ self._ingestion_complete = True
+ if self._pending_ingestion is not None:
+ if self._processing:
+ self._pending_ingestion.append(payload)
+ else:
+ self._pool.submit(
+ callable_util.with_exceptions_logged(
+ self._process, _constants.INTERNAL_ERROR_LOG_MESSAGE),
+ self._wrapped_ingestion_consumer, payload, True)
+ self._processing = True
+
+ def abort(self):
+ """See _interfaces.IngestionManager.abort for specification."""
+ self._abort_internal_only()
+
+
+def front_ingestion_manager(
+ lock, pool, subscription, termination_manager, transmission_manager,
+ operation_context):
+ """Creates an IngestionManager appropriate for front-side use.
+
+ Args:
+ lock: The operation-wide lock.
+ pool: A thread pool in which to execute customer code.
+ subscription: A base_interfaces.ServicedSubscription indicating the
+ customer's interest in the results of the operation.
+ termination_manager: The _interfaces.TerminationManager for the operation.
+ transmission_manager: The _interfaces.TransmissionManager for the
+ operation.
+ operation_context: A base_interfaces.OperationContext for the operation.
+
+ Returns:
+ An IngestionManager appropriate for front-side use.
+ """
+ ingestion_manager = _IngestionManager(
+ lock, pool, _FrontConsumerCreator(subscription, operation_context),
+ packets.Kind.SERVICED_FAILURE, termination_manager, transmission_manager)
+ ingestion_manager.start(None)
+ return ingestion_manager
+
+
+def back_ingestion_manager(
+ lock, pool, servicer, termination_manager, transmission_manager,
+ operation_context, emission_consumer):
+ """Creates an IngestionManager appropriate for back-side use.
+
+ Args:
+ lock: The operation-wide lock.
+ pool: A thread pool in which to execute customer code.
+ servicer: A base_interfaces.Servicer for servicing the operation.
+ termination_manager: The _interfaces.TerminationManager for the operation.
+ transmission_manager: The _interfaces.TransmissionManager for the
+ operation.
+ operation_context: A base_interfaces.OperationContext for the operation.
+ emission_consumer: The _interfaces.EmissionConsumer for the operation.
+
+ Returns:
+ An IngestionManager appropriate for back-side use.
+ """
+ ingestion_manager = _IngestionManager(
+ lock, pool, _BackConsumerCreator(
+ servicer, operation_context, emission_consumer),
+ packets.Kind.SERVICER_FAILURE, termination_manager, transmission_manager)
+ return ingestion_manager
diff --git a/src/python/_framework/base/packets/_interfaces.py b/src/python/_framework/base/packets/_interfaces.py
new file mode 100644
index 0000000000..5f6c0593d0
--- /dev/null
+++ b/src/python/_framework/base/packets/_interfaces.py
@@ -0,0 +1,269 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Package-internal interfaces."""
+
+import abc
+
+# base_interfaces and packets are referenced from specification in this module.
+from _framework.base import interfaces as base_interfaces # pylint: disable=unused-import
+from _framework.base.packets import packets # pylint: disable=unused-import
+from _framework.foundation import stream
+
+
+class TerminationManager(object):
+ """An object responsible for handling the termination of an operation."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def is_active(self):
+ """Reports whether or not the operation is active.
+
+ Returns:
+ True if the operation is active or False if the operation has terminated.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_callback(self, callback):
+ """Registers a callback to be called on operation termination.
+
+ If the operation has already terminated, the callback will be called
+ immediately.
+
+ Args:
+ callback: A callable that will be passed one of base_interfaces.COMPLETED,
+ base_interfaces.CANCELLED, base_interfaces.EXPIRED,
+ base_interfaces.RECEPTION_FAILURE, base_interfaces.TRANSMISSION_FAILURE,
+ base_interfaces.SERVICER_FAILURE, or base_interfaces.SERVICED_FAILURE.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def emission_complete(self):
+ """Indicates that emissions from customer code have completed."""
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def transmission_complete(self):
+ """Indicates that transmissions to the remote end are complete."""
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def ingestion_complete(self):
+ """Indicates that customer code ingestion of received values is complete."""
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def abort(self, kind):
+ """Indicates that the operation must abort for the indicated reason.
+
+ Args:
+ kind: A value of packets.Kind indicating operation abortion.
+ """
+ raise NotImplementedError()
+
+
+class TransmissionManager(object):
+ """A manager responsible for transmitting to the other end of an operation."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def inmit(self, emission, complete):
+ """Accepts a value for transmission to the other end of the operation.
+
+ Args:
+ emission: A value of some significance to the customer to be transmitted
+ to the other end of the operation. May be None only if complete is True.
+ complete: A boolean that if True indicates that customer code has emitted
+ all values it intends to emit.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def abort(self, kind):
+ """Indicates that the operation has aborted for the indicated reason.
+
+ Args:
+ kind: A value of packets.Kind indicating operation abortion.
+ """
+ raise NotImplementedError()
+
+
+class EmissionManager(stream.Consumer):
+ """A manager of values emitted by customer code."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def set_ingestion_manager_and_expiration_manager(
+ self, ingestion_manager, expiration_manager):
+ """Sets two other objects with which this EmissionManager will cooperate.
+
+ Args:
+ ingestion_manager: The IngestionManager for the operation.
+ expiration_manager: The ExpirationManager for the operation.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def consume(self, value):
+ """Accepts a value emitted by customer code.
+
+ This method should only be called by customer code.
+
+ Args:
+ value: Any value of significance to the customer.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def terminate(self):
+ """Indicates that no more values will be emitted by customer code.
+
+ This method should only be called by customer code.
+
+ Implementations of this method may be idempotent and forgive customer code
+ calling this method more than once.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def consume_and_terminate(self, value):
+ """Accepts the last value emitted by customer code.
+
+ This method should only be called by customer code.
+
+ Args:
+ value: Any value of significance to the customer.
+ """
+ raise NotImplementedError()
+
+
+class IngestionManager(stream.Consumer):
+ """A manager responsible for executing customer code."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def set_expiration_manager(self, expiration_manager):
+ """Sets the ExpirationManager with which this object will cooperate."""
+
+ @abc.abstractmethod
+ def start(self, requirement):
+ """Commences execution of customer code.
+
+ Args:
+ requirement: Some value unavailable at the time of this object's
+ construction that is required to begin executing customer code.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def consume(self, payload):
+ """Accepts a customer-significant value to be supplied to customer code.
+
+ Args:
+ payload: Some customer-significant value.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def terminate(self):
+ """Indicates the end of values to be supplied to customer code."""
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def consume_and_terminate(self, payload):
+ """Accepts the last value to be supplied to customer code.
+
+ Args:
+ payload: Some customer-significant value (and the last such value).
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def abort(self):
+ """Indicates to this manager that the operation has aborted."""
+ raise NotImplementedError()
+
+
+class ExpirationManager(object):
+ """A manager responsible for aborting the operation if it runs out of time."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def change_timeout(self, timeout):
+ """Changes the timeout allotted for the operation.
+
+ Operation duration is always measure from the beginning of the operation;
+ calling this method changes the operation's allotted time to timeout total
+ seconds, not timeout seconds from the time of this method call.
+
+ Args:
+ timeout: A length of time in seconds to allow for the operation.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def deadline(self):
+ """Returns the time until which the operation is allowed to run.
+
+ Returns:
+ The time (seconds since the epoch) at which the operation will expire.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def abort(self):
+ """Indicates to this manager that the operation has aborted."""
+ raise NotImplementedError()
+
+
+class ReceptionManager(object):
+ """A manager responsible for receiving packets from the other end."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def receive_packet(self, packet):
+ """Handle a packet from the other side of the operation.
+
+ Args:
+ packet: A packets.BackToFrontPacket or packets.FrontToBackPacket
+ appropriate to this end of the operation and this object.
+ """
+ raise NotImplementedError()
+
+
+class CancellationManager(object):
+ """A manager of operation cancellation."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def cancel(self):
+ """Cancels the operation."""
+ raise NotImplementedError()
diff --git a/src/python/_framework/base/packets/_reception.py b/src/python/_framework/base/packets/_reception.py
new file mode 100644
index 0000000000..a2a3823d28
--- /dev/null
+++ b/src/python/_framework/base/packets/_reception.py
@@ -0,0 +1,394 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""State and behavior for packet reception."""
+
+import abc
+
+from _framework.base.packets import _interfaces
+from _framework.base.packets import packets
+
+
+class _Receiver(object):
+ """Common specification of different packet-handling behavior."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def abort_if_abortive(self, packet):
+ """Aborts the operation if the packet is abortive.
+
+ Args:
+ packet: A just-arrived packet.
+
+ Returns:
+ A boolean indicating whether or not this Receiver aborted the operation
+ based on the packet.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def receive(self, packet):
+ """Handles a just-arrived packet.
+
+ Args:
+ packet: A just-arrived packet.
+
+ Returns:
+ A boolean indicating whether or not the packet was terminal (i.e. whether
+ or not non-abortive packets are legal after this one).
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def reception_failure(self):
+ """Aborts the operation with an indication of reception failure."""
+ raise NotImplementedError()
+
+
+def _abort(
+ category, termination_manager, transmission_manager, ingestion_manager,
+ expiration_manager):
+ """Indicates abortion with the given category to the given managers."""
+ termination_manager.abort(category)
+ transmission_manager.abort(category)
+ ingestion_manager.abort()
+ expiration_manager.abort()
+
+
+def _abort_if_abortive(
+ packet, abortive, termination_manager, transmission_manager,
+ ingestion_manager, expiration_manager):
+ """Determines a packet's being abortive and if so aborts the operation.
+
+ Args:
+ packet: A just-arrived packet.
+ abortive: A callable that takes a packet and returns an operation category
+ indicating that the operation should be aborted or None indicating that
+ the operation should not be aborted.
+ termination_manager: The operation's _interfaces.TerminationManager.
+ transmission_manager: The operation's _interfaces.TransmissionManager.
+ ingestion_manager: The operation's _interfaces.IngestionManager.
+ expiration_manager: The operation's _interfaces.ExpirationManager.
+
+ Returns:
+ True if the operation was aborted; False otherwise.
+ """
+ abort_category = abortive(packet)
+ if abort_category is None:
+ return False
+ else:
+ _abort(
+ abort_category, termination_manager, transmission_manager,
+ ingestion_manager, expiration_manager)
+ return True
+
+
+def _reception_failure(
+ termination_manager, transmission_manager, ingestion_manager,
+ expiration_manager):
+ """Aborts the operation with an indication of reception failure."""
+ _abort(
+ packets.Kind.RECEPTION_FAILURE, termination_manager, transmission_manager,
+ ingestion_manager, expiration_manager)
+
+
+class _BackReceiver(_Receiver):
+ """Packet-handling specific to the back side of an operation."""
+
+ def __init__(
+ self, termination_manager, transmission_manager, ingestion_manager,
+ expiration_manager):
+ """Constructor.
+
+ Args:
+ termination_manager: The operation's _interfaces.TerminationManager.
+ transmission_manager: The operation's _interfaces.TransmissionManager.
+ ingestion_manager: The operation's _interfaces.IngestionManager.
+ expiration_manager: The operation's _interfaces.ExpirationManager.
+ """
+ self._termination_manager = termination_manager
+ self._transmission_manager = transmission_manager
+ self._ingestion_manager = ingestion_manager
+ self._expiration_manager = expiration_manager
+
+ self._first_packet_seen = False
+ self._last_packet_seen = False
+
+ def _abortive(self, packet):
+ """Determines whether or not (and if so, how) a packet is abortive.
+
+ Args:
+ packet: A just-arrived packet.
+
+ Returns:
+ One of packets.Kind.CANCELLATION, packets.Kind.SERVICED_FAILURE, or
+ packets.Kind.RECEPTION_FAILURE, indicating that the packet is abortive
+ and how, or None, indicating that the packet is not abortive.
+ """
+ if packet.kind is packets.Kind.CANCELLATION:
+ return packets.Kind.CANCELLATION
+ elif packet.kind is packets.Kind.EXPIRATION:
+ return packets.Kind.EXPIRATION
+ elif packet.kind is packets.Kind.SERVICED_FAILURE:
+ return packets.Kind.SERVICED_FAILURE
+ elif packet.kind is packets.Kind.RECEPTION_FAILURE:
+ return packets.Kind.SERVICED_FAILURE
+ elif (packet.kind in (packets.Kind.COMMENCEMENT, packets.Kind.ENTIRE) and
+ self._first_packet_seen):
+ return packets.Kind.RECEPTION_FAILURE
+ elif self._last_packet_seen:
+ return packets.Kind.RECEPTION_FAILURE
+ else:
+ return None
+
+ def abort_if_abortive(self, packet):
+ """See _Receiver.abort_if_abortive for specification."""
+ return _abort_if_abortive(
+ packet, self._abortive, self._termination_manager,
+ self._transmission_manager, self._ingestion_manager,
+ self._expiration_manager)
+
+ def receive(self, packet):
+ """See _Receiver.receive for specification."""
+ if packet.timeout is not None:
+ self._expiration_manager.change_timeout(packet.timeout)
+
+ if packet.kind is packets.Kind.COMMENCEMENT:
+ self._first_packet_seen = True
+ self._ingestion_manager.start(packet.name)
+ if packet.payload is not None:
+ self._ingestion_manager.consume(packet.payload)
+ elif packet.kind is packets.Kind.CONTINUATION:
+ self._ingestion_manager.consume(packet.payload)
+ elif packet.kind is packets.Kind.COMPLETION:
+ self._last_packet_seen = True
+ if packet.payload is None:
+ self._ingestion_manager.terminate()
+ else:
+ self._ingestion_manager.consume_and_terminate(packet.payload)
+ else:
+ self._first_packet_seen = True
+ self._last_packet_seen = True
+ self._ingestion_manager.start(packet.name)
+ if packet.payload is None:
+ self._ingestion_manager.terminate()
+ else:
+ self._ingestion_manager.consume_and_terminate(packet.payload)
+
+ def reception_failure(self):
+ """See _Receiver.reception_failure for specification."""
+ _reception_failure(
+ self._termination_manager, self._transmission_manager,
+ self._ingestion_manager, self._expiration_manager)
+
+
+class _FrontReceiver(_Receiver):
+ """Packet-handling specific to the front side of an operation."""
+
+ def __init__(
+ self, termination_manager, transmission_manager, ingestion_manager,
+ expiration_manager):
+ """Constructor.
+
+ Args:
+ termination_manager: The operation's _interfaces.TerminationManager.
+ transmission_manager: The operation's _interfaces.TransmissionManager.
+ ingestion_manager: The operation's _interfaces.IngestionManager.
+ expiration_manager: The operation's _interfaces.ExpirationManager.
+ """
+ self._termination_manager = termination_manager
+ self._transmission_manager = transmission_manager
+ self._ingestion_manager = ingestion_manager
+ self._expiration_manager = expiration_manager
+
+ self._last_packet_seen = False
+
+ def _abortive(self, packet):
+ """Determines whether or not (and if so, how) a packet is abortive.
+
+ Args:
+ packet: A just-arrived packet.
+
+ Returns:
+ One of packets.Kind.EXPIRATION, packets.Kind.SERVICER_FAILURE, or
+ packets.Kind.RECEPTION_FAILURE, indicating that the packet is abortive
+ and how, or None, indicating that the packet is not abortive.
+ """
+ if packet.kind is packets.Kind.EXPIRATION:
+ return packets.Kind.EXPIRATION
+ elif packet.kind is packets.Kind.SERVICER_FAILURE:
+ return packets.Kind.SERVICER_FAILURE
+ elif packet.kind is packets.Kind.RECEPTION_FAILURE:
+ return packets.Kind.SERVICER_FAILURE
+ elif self._last_packet_seen:
+ return packets.Kind.RECEPTION_FAILURE
+ else:
+ return None
+
+ def abort_if_abortive(self, packet):
+ """See _Receiver.abort_if_abortive for specification."""
+ return _abort_if_abortive(
+ packet, self._abortive, self._termination_manager,
+ self._transmission_manager, self._ingestion_manager,
+ self._expiration_manager)
+
+ def receive(self, packet):
+ """See _Receiver.receive for specification."""
+ if packet.kind is packets.Kind.CONTINUATION:
+ self._ingestion_manager.consume(packet.payload)
+ elif packet.kind is packets.Kind.COMPLETION:
+ self._last_packet_seen = True
+ if packet.payload is None:
+ self._ingestion_manager.terminate()
+ else:
+ self._ingestion_manager.consume_and_terminate(packet.payload)
+
+ def reception_failure(self):
+ """See _Receiver.reception_failure for specification."""
+ _reception_failure(
+ self._termination_manager, self._transmission_manager,
+ self._ingestion_manager, self._expiration_manager)
+
+
+class _ReceptionManager(_interfaces.ReceptionManager):
+ """A ReceptionManager based around a _Receiver passed to it."""
+
+ def __init__(self, lock, receiver):
+ """Constructor.
+
+ Args:
+ lock: The operation-servicing-wide lock object.
+ receiver: A _Receiver responsible for handling received packets.
+ """
+ self._lock = lock
+ self._receiver = receiver
+
+ self._lowest_unseen_sequence_number = 0
+ self._out_of_sequence_packets = {}
+ self._completed_sequence_number = None
+ self._aborted = False
+
+ def _sequence_failure(self, packet):
+ """Determines a just-arrived packet's sequential legitimacy.
+
+ Args:
+ packet: A just-arrived packet.
+
+ Returns:
+ True if the packet is sequentially legitimate; False otherwise.
+ """
+ if packet.sequence_number < self._lowest_unseen_sequence_number:
+ return True
+ elif packet.sequence_number in self._out_of_sequence_packets:
+ return True
+ elif (self._completed_sequence_number is not None and
+ self._completed_sequence_number <= packet.sequence_number):
+ return True
+ else:
+ return False
+
+ def _process(self, packet):
+ """Process those packets ready to be processed.
+
+ Args:
+ packet: A just-arrived packet the sequence number of which matches this
+ _ReceptionManager's _lowest_unseen_sequence_number field.
+ """
+ while True:
+ completed = self._receiver.receive(packet)
+ if completed:
+ self._out_of_sequence_packets.clear()
+ self._completed_sequence_number = packet.sequence_number
+ self._lowest_unseen_sequence_number = packet.sequence_number + 1
+ return
+ else:
+ next_packet = self._out_of_sequence_packets.pop(
+ packet.sequence_number + 1, None)
+ if next_packet is None:
+ self._lowest_unseen_sequence_number = packet.sequence_number + 1
+ return
+ else:
+ packet = next_packet
+
+ def receive_packet(self, packet):
+ """See _interfaces.ReceptionManager.receive_packet for specification."""
+ with self._lock:
+ if self._aborted:
+ return
+ elif self._sequence_failure(packet):
+ self._receiver.reception_failure()
+ self._aborted = True
+ elif self._receiver.abort_if_abortive(packet):
+ self._aborted = True
+ elif packet.sequence_number == self._lowest_unseen_sequence_number:
+ self._process(packet)
+ else:
+ self._out_of_sequence_packets[packet.sequence_number] = packet
+
+
+def front_reception_manager(
+ lock, termination_manager, transmission_manager, ingestion_manager,
+ expiration_manager):
+ """Creates a _interfaces.ReceptionManager for front-side use.
+
+ Args:
+ lock: The operation-servicing-wide lock object.
+ termination_manager: The operation's _interfaces.TerminationManager.
+ transmission_manager: The operation's _interfaces.TransmissionManager.
+ ingestion_manager: The operation's _interfaces.IngestionManager.
+ expiration_manager: The operation's _interfaces.ExpirationManager.
+
+ Returns:
+ A _interfaces.ReceptionManager appropriate for front-side use.
+ """
+ return _ReceptionManager(
+ lock, _FrontReceiver(
+ termination_manager, transmission_manager, ingestion_manager,
+ expiration_manager))
+
+
+def back_reception_manager(
+ lock, termination_manager, transmission_manager, ingestion_manager,
+ expiration_manager):
+ """Creates a _interfaces.ReceptionManager for back-side use.
+
+ Args:
+ lock: The operation-servicing-wide lock object.
+ termination_manager: The operation's _interfaces.TerminationManager.
+ transmission_manager: The operation's _interfaces.TransmissionManager.
+ ingestion_manager: The operation's _interfaces.IngestionManager.
+ expiration_manager: The operation's _interfaces.ExpirationManager.
+
+ Returns:
+ A _interfaces.ReceptionManager appropriate for back-side use.
+ """
+ return _ReceptionManager(
+ lock, _BackReceiver(
+ termination_manager, transmission_manager, ingestion_manager,
+ expiration_manager))
diff --git a/src/python/_framework/base/packets/_termination.py b/src/python/_framework/base/packets/_termination.py
new file mode 100644
index 0000000000..d586c2167b
--- /dev/null
+++ b/src/python/_framework/base/packets/_termination.py
@@ -0,0 +1,201 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""State and behavior for operation termination."""
+
+from _framework.base import interfaces
+from _framework.base.packets import _constants
+from _framework.base.packets import _interfaces
+from _framework.base.packets import packets
+from _framework.foundation import callable_util
+
+_CALLBACK_EXCEPTION_LOG_MESSAGE = 'Exception calling termination callback!'
+
+# TODO(nathaniel): enum module.
+_EMISSION = 'emission'
+_TRANSMISSION = 'transmission'
+_INGESTION = 'ingestion'
+
+_FRONT_NOT_LISTENING_REQUIREMENTS = (_TRANSMISSION,)
+_BACK_NOT_LISTENING_REQUIREMENTS = (_EMISSION, _INGESTION,)
+_LISTENING_REQUIREMENTS = (_TRANSMISSION, _INGESTION,)
+
+_KINDS_TO_OUTCOMES = {
+ packets.Kind.COMPLETION: interfaces.COMPLETED,
+ packets.Kind.CANCELLATION: interfaces.CANCELLED,
+ packets.Kind.EXPIRATION: interfaces.EXPIRED,
+ packets.Kind.RECEPTION_FAILURE: interfaces.RECEPTION_FAILURE,
+ packets.Kind.TRANSMISSION_FAILURE: interfaces.TRANSMISSION_FAILURE,
+ packets.Kind.SERVICER_FAILURE: interfaces.SERVICER_FAILURE,
+ packets.Kind.SERVICED_FAILURE: interfaces.SERVICED_FAILURE,
+ }
+
+
+class _TerminationManager(_interfaces.TerminationManager):
+ """An implementation of _interfaces.TerminationManager."""
+
+ def __init__(
+ self, work_pool, utility_pool, action, requirements, local_failure):
+ """Constructor.
+
+ Args:
+ work_pool: A thread pool in which customer work will be done.
+ utility_pool: A thread pool in which work utility work will be done.
+ action: An action to call on operation termination.
+ requirements: A combination of _EMISSION, _TRANSMISSION, and _INGESTION
+ identifying what must finish for the operation to be considered
+ completed.
+ local_failure: A packets.Kind specifying what constitutes local failure of
+ customer work.
+ """
+ self._work_pool = work_pool
+ self._utility_pool = utility_pool
+ self._action = action
+ self._local_failure = local_failure
+ self._has_locally_failed = False
+
+ self._outstanding_requirements = set(requirements)
+ self._kind = None
+ self._callbacks = []
+
+ def _terminate(self, kind):
+ """Terminates the operation.
+
+ Args:
+ kind: One of packets.Kind.COMPLETION, packets.Kind.CANCELLATION,
+ packets.Kind.EXPIRATION, packets.Kind.RECEPTION_FAILURE,
+ packets.Kind.TRANSMISSION_FAILURE, packets.Kind.SERVICER_FAILURE, or
+ packets.Kind.SERVICED_FAILURE.
+ """
+ self._outstanding_requirements = None
+ callbacks = list(self._callbacks)
+ self._callbacks = None
+ self._kind = kind
+ outcome = _KINDS_TO_OUTCOMES[kind]
+
+ act = callable_util.with_exceptions_logged(
+ self._action, _constants.INTERNAL_ERROR_LOG_MESSAGE)
+
+ if self._has_locally_failed:
+ self._utility_pool.submit(act, outcome)
+ else:
+ def call_callbacks_and_act(callbacks, outcome):
+ for callback in callbacks:
+ callback_outcome = callable_util.call_logging_exceptions(
+ callback, _CALLBACK_EXCEPTION_LOG_MESSAGE, outcome)
+ if callback_outcome.exception is not None:
+ outcome = _KINDS_TO_OUTCOMES[self._local_failure]
+ break
+ self._utility_pool.submit(act, outcome)
+
+ self._work_pool.submit(callable_util.with_exceptions_logged(
+ call_callbacks_and_act,
+ _constants.INTERNAL_ERROR_LOG_MESSAGE),
+ callbacks, outcome)
+
+ def is_active(self):
+ """See _interfaces.TerminationManager.is_active for specification."""
+ return self._outstanding_requirements is not None
+
+ def add_callback(self, callback):
+ """See _interfaces.TerminationManager.add_callback for specification."""
+ if not self._has_locally_failed:
+ if self._outstanding_requirements is None:
+ self._work_pool.submit(
+ callable_util.with_exceptions_logged(
+ callback, _CALLBACK_EXCEPTION_LOG_MESSAGE),
+ _KINDS_TO_OUTCOMES[self._kind])
+ else:
+ self._callbacks.append(callback)
+
+ def emission_complete(self):
+ """See superclass method for specification."""
+ if self._outstanding_requirements is not None:
+ self._outstanding_requirements.discard(_EMISSION)
+ if not self._outstanding_requirements:
+ self._terminate(packets.Kind.COMPLETION)
+
+ def transmission_complete(self):
+ """See superclass method for specification."""
+ if self._outstanding_requirements is not None:
+ self._outstanding_requirements.discard(_TRANSMISSION)
+ if not self._outstanding_requirements:
+ self._terminate(packets.Kind.COMPLETION)
+
+ def ingestion_complete(self):
+ """See superclass method for specification."""
+ if self._outstanding_requirements is not None:
+ self._outstanding_requirements.discard(_INGESTION)
+ if not self._outstanding_requirements:
+ self._terminate(packets.Kind.COMPLETION)
+
+ def abort(self, kind):
+ """See _interfaces.TerminationManager.abort for specification."""
+ if kind == self._local_failure:
+ self._has_failed_locally = True
+ if self._outstanding_requirements is not None:
+ self._terminate(kind)
+
+
+def front_termination_manager(work_pool, utility_pool, action, subscription):
+ """Creates a TerminationManager appropriate for front-side use.
+
+ Args:
+ work_pool: A thread pool in which customer work will be done.
+ utility_pool: A thread pool in which work utility work will be done.
+ action: An action to call on operation termination.
+ subscription: One of interfaces.FULL, interfaces.termination_only, or
+ interfaces.NONE.
+
+ Returns:
+ A TerminationManager appropriate for front-side use.
+ """
+ return _TerminationManager(
+ work_pool, utility_pool, action,
+ _FRONT_NOT_LISTENING_REQUIREMENTS if subscription == interfaces.NONE else
+ _LISTENING_REQUIREMENTS, packets.Kind.SERVICED_FAILURE)
+
+
+def back_termination_manager(work_pool, utility_pool, action, subscription):
+ """Creates a TerminationManager appropriate for back-side use.
+
+ Args:
+ work_pool: A thread pool in which customer work will be done.
+ utility_pool: A thread pool in which work utility work will be done.
+ action: An action to call on operation termination.
+ subscription: One of interfaces.FULL, interfaces.termination_only, or
+ interfaces.NONE.
+
+ Returns:
+ A TerminationManager appropriate for back-side use.
+ """
+ return _TerminationManager(
+ work_pool, utility_pool, action,
+ _BACK_NOT_LISTENING_REQUIREMENTS if subscription == interfaces.NONE else
+ _LISTENING_REQUIREMENTS, packets.Kind.SERVICER_FAILURE)
diff --git a/src/python/_framework/base/packets/_transmission.py b/src/python/_framework/base/packets/_transmission.py
new file mode 100644
index 0000000000..006128774d
--- /dev/null
+++ b/src/python/_framework/base/packets/_transmission.py
@@ -0,0 +1,393 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""State and behavior for packet transmission during an operation."""
+
+import abc
+
+from _framework.base import interfaces
+from _framework.base.packets import _constants
+from _framework.base.packets import _interfaces
+from _framework.base.packets import packets
+from _framework.foundation import callable_util
+
+_TRANSMISSION_EXCEPTION_LOG_MESSAGE = 'Exception during transmission!'
+
+_FRONT_TO_BACK_NO_TRANSMISSION_KINDS = (
+ packets.Kind.SERVICER_FAILURE,
+ )
+_BACK_TO_FRONT_NO_TRANSMISSION_KINDS = (
+ packets.Kind.CANCELLATION,
+ packets.Kind.SERVICED_FAILURE,
+ )
+
+
+class _Packetizer(object):
+ """Common specification of different packet-creating behavior."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def packetize(self, operation_id, sequence_number, payload, complete):
+ """Creates a packet indicating ordinary operation progress.
+
+ Args:
+ operation_id: The operation ID for the current operation.
+ sequence_number: A sequence number for the packet.
+ payload: A customer payload object. May be None if sequence_number is
+ zero or complete is true.
+ complete: A boolean indicating whether or not the packet should describe
+ itself as (but for a later indication of operation abortion) the last
+ packet to be sent.
+
+ Returns:
+ An object of an appropriate type suitable for transmission to the other
+ side of the operation.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def packetize_abortion(self, operation_id, sequence_number, kind):
+ """Creates a packet indicating that the operation is aborted.
+
+ Args:
+ operation_id: The operation ID for the current operation.
+ sequence_number: A sequence number for the packet.
+ kind: One of the values of packets.Kind indicating operational abortion.
+
+ Returns:
+ An object of an appropriate type suitable for transmission to the other
+ side of the operation, or None if transmission is not appropriate for
+ the given kind.
+ """
+ raise NotImplementedError()
+
+
+class _FrontPacketizer(_Packetizer):
+ """Front-side packet-creating behavior."""
+
+ def __init__(self, name, subscription, trace_id, timeout):
+ """Constructor.
+
+ Args:
+ name: The name of the operation.
+ subscription: One of interfaces.FULL, interfaces.TERMINATION_ONLY, or
+ interfaces.NONE describing the interest the front has in packets sent
+ from the back.
+ trace_id: A uuid.UUID identifying a set of related operations to which
+ this operation belongs.
+ timeout: A length of time in seconds to allow for the entire operation.
+ """
+ self._name = name
+ self._subscription = subscription
+ self._trace_id = trace_id
+ self._timeout = timeout
+
+ def packetize(self, operation_id, sequence_number, payload, complete):
+ """See _Packetizer.packetize for specification."""
+ if sequence_number:
+ return packets.FrontToBackPacket(
+ operation_id, sequence_number,
+ packets.Kind.COMPLETION if complete else packets.Kind.CONTINUATION,
+ self._name, self._subscription, self._trace_id, payload,
+ self._timeout)
+ else:
+ return packets.FrontToBackPacket(
+ operation_id, 0,
+ packets.Kind.ENTIRE if complete else packets.Kind.COMMENCEMENT,
+ self._name, self._subscription, self._trace_id, payload,
+ self._timeout)
+
+ def packetize_abortion(self, operation_id, sequence_number, kind):
+ """See _Packetizer.packetize_abortion for specification."""
+ if kind in _FRONT_TO_BACK_NO_TRANSMISSION_KINDS:
+ return None
+ else:
+ return packets.FrontToBackPacket(
+ operation_id, sequence_number, kind, None, None, None, None, None)
+
+
+class _BackPacketizer(_Packetizer):
+ """Back-side packet-creating behavior."""
+
+ def packetize(self, operation_id, sequence_number, payload, complete):
+ """See _Packetizer.packetize for specification."""
+ return packets.BackToFrontPacket(
+ operation_id, sequence_number,
+ packets.Kind.COMPLETION if complete else packets.Kind.CONTINUATION,
+ payload)
+
+ def packetize_abortion(self, operation_id, sequence_number, kind):
+ """See _Packetizer.packetize_abortion for specification."""
+ if kind in _BACK_TO_FRONT_NO_TRANSMISSION_KINDS:
+ return None
+ else:
+ return packets.BackToFrontPacket(
+ operation_id, sequence_number, kind, None)
+
+
+class TransmissionManager(_interfaces.TransmissionManager):
+ """A _interfaces.TransmissionManager on which other managers may be set."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def set_ingestion_and_expiration_managers(
+ self, ingestion_manager, expiration_manager):
+ """Sets two of the other managers with which this manager may interact.
+
+ Args:
+ ingestion_manager: The _interfaces.IngestionManager associated with the
+ current operation.
+ expiration_manager: The _interfaces.ExpirationManager associated with the
+ current operation.
+ """
+ raise NotImplementedError()
+
+
+class _EmptyTransmissionManager(TransmissionManager):
+ """A completely no-operative _interfaces.TransmissionManager."""
+
+ def set_ingestion_and_expiration_managers(
+ self, ingestion_manager, expiration_manager):
+ """See overriden method for specification."""
+
+ def inmit(self, emission, complete):
+ """See _interfaces.TransmissionManager.inmit for specification."""
+
+ def abort(self, category):
+ """See _interfaces.TransmissionManager.abort for specification."""
+
+
+class _TransmittingTransmissionManager(TransmissionManager):
+ """A TransmissionManager implementation that sends packets."""
+
+ def __init__(
+ self, lock, pool, callback, operation_id, packetizer,
+ termination_manager):
+ """Constructor.
+
+ Args:
+ lock: The operation-servicing-wide lock object.
+ pool: A thread pool in which the work of transmitting packets will be
+ performed.
+ callback: A callable that accepts packets and sends them to the other side
+ of the operation.
+ operation_id: The operation's ID.
+ packetizer: A _Packetizer for packet creation.
+ termination_manager: The _interfaces.TerminationManager associated with
+ this operation.
+ """
+ self._lock = lock
+ self._pool = pool
+ self._callback = callback
+ self._operation_id = operation_id
+ self._packetizer = packetizer
+ self._termination_manager = termination_manager
+ self._ingestion_manager = None
+ self._expiration_manager = None
+
+ self._emissions = []
+ self._emission_complete = False
+ self._kind = None
+ self._lowest_unused_sequence_number = 0
+ self._transmitting = False
+
+ def set_ingestion_and_expiration_managers(
+ self, ingestion_manager, expiration_manager):
+ """See overridden method for specification."""
+ self._ingestion_manager = ingestion_manager
+ self._expiration_manager = expiration_manager
+
+ def _lead_packet(self, emission, complete):
+ """Creates a packet suitable for leading off the transmission loop.
+
+ Args:
+ emission: A customer payload object to be sent to the other side of the
+ operation.
+ complete: Whether or not the sequence of customer payloads ends with
+ the passed object.
+
+ Returns:
+ A packet with which to lead off the transmission loop.
+ """
+ sequence_number = self._lowest_unused_sequence_number
+ self._lowest_unused_sequence_number += 1
+ return self._packetizer.packetize(
+ self._operation_id, sequence_number, emission, complete)
+
+ def _abortive_response_packet(self, kind):
+ """Creates a packet indicating operation abortion.
+
+ Args:
+ kind: One of the values of packets.Kind indicating operational abortion.
+
+ Returns:
+ A packet indicating operation abortion.
+ """
+ packet = self._packetizer.packetize_abortion(
+ self._operation_id, self._lowest_unused_sequence_number, kind)
+ if packet is None:
+ return None
+ else:
+ self._lowest_unused_sequence_number += 1
+ return packet
+
+ def _next_packet(self):
+ """Creates the next packet to be sent to the other side of the operation.
+
+ Returns:
+ A (completed, packet) tuple comprised of a boolean indicating whether or
+ not the sequence of packets has completed normally and a packet to send
+ to the other side if the sequence of packets hasn't completed. The tuple
+ will never have both a True first element and a non-None second element.
+ """
+ if self._emissions is None:
+ return False, None
+ elif self._kind is None:
+ if self._emissions:
+ payload = self._emissions.pop(0)
+ complete = self._emission_complete and not self._emissions
+ sequence_number = self._lowest_unused_sequence_number
+ self._lowest_unused_sequence_number += 1
+ return complete, self._packetizer.packetize(
+ self._operation_id, sequence_number, payload, complete)
+ else:
+ return self._emission_complete, None
+ else:
+ packet = self._abortive_response_packet(self._kind)
+ self._emissions = None
+ return False, None if packet is None else packet
+
+ def _transmit(self, packet):
+ """Commences the transmission loop sending packets.
+
+ Args:
+ packet: A packet to be sent to the other side of the operation.
+ """
+ def transmit(packet):
+ while True:
+ transmission_outcome = callable_util.call_logging_exceptions(
+ self._callback, _TRANSMISSION_EXCEPTION_LOG_MESSAGE, packet)
+ if transmission_outcome.exception is None:
+ with self._lock:
+ complete, packet = self._next_packet()
+ if packet is None:
+ if complete:
+ self._termination_manager.transmission_complete()
+ self._transmitting = False
+ return
+ else:
+ with self._lock:
+ self._emissions = None
+ self._termination_manager.abort(packets.Kind.TRANSMISSION_FAILURE)
+ self._ingestion_manager.abort()
+ self._expiration_manager.abort()
+ self._transmitting = False
+ return
+
+ self._pool.submit(callable_util.with_exceptions_logged(
+ transmit, _constants.INTERNAL_ERROR_LOG_MESSAGE), packet)
+ self._transmitting = True
+
+ def inmit(self, emission, complete):
+ """See _interfaces.TransmissionManager.inmit for specification."""
+ if self._emissions is not None and self._kind is None:
+ self._emission_complete = complete
+ if self._transmitting:
+ self._emissions.append(emission)
+ else:
+ self._transmit(self._lead_packet(emission, complete))
+
+ def abort(self, kind):
+ """See _interfaces.TransmissionManager.abort for specification."""
+ if self._emissions is not None and self._kind is None:
+ self._kind = kind
+ if not self._transmitting:
+ packet = self._abortive_response_packet(kind)
+ self._emissions = None
+ if packet is not None:
+ self._transmit(packet)
+
+
+def front_transmission_manager(
+ lock, pool, callback, operation_id, name, subscription, trace_id, timeout,
+ termination_manager):
+ """Creates a TransmissionManager appropriate for front-side use.
+
+ Args:
+ lock: The operation-servicing-wide lock object.
+ pool: A thread pool in which the work of transmitting packets will be
+ performed.
+ callback: A callable that accepts packets and sends them to the other side
+ of the operation.
+ operation_id: The operation's ID.
+ name: The name of the operation.
+ subscription: One of interfaces.FULL, interfaces.TERMINATION_ONLY, or
+ interfaces.NONE describing the interest the front has in packets sent
+ from the back.
+ trace_id: A uuid.UUID identifying a set of related operations to which
+ this operation belongs.
+ timeout: A length of time in seconds to allow for the entire operation.
+ termination_manager: The _interfaces.TerminationManager associated with
+ this operation.
+
+ Returns:
+ A TransmissionManager appropriate for front-side use.
+ """
+ return _TransmittingTransmissionManager(
+ lock, pool, callback, operation_id, _FrontPacketizer(
+ name, subscription, trace_id, timeout),
+ termination_manager)
+
+
+def back_transmission_manager(
+ lock, pool, callback, operation_id, termination_manager, subscription):
+ """Creates a TransmissionManager appropriate for back-side use.
+
+ Args:
+ lock: The operation-servicing-wide lock object.
+ pool: A thread pool in which the work of transmitting packets will be
+ performed.
+ callback: A callable that accepts packets and sends them to the other side
+ of the operation.
+ operation_id: The operation's ID.
+ termination_manager: The _interfaces.TerminationManager associated with
+ this operation.
+ subscription: One of interfaces.FULL, interfaces.TERMINATION_ONLY, or
+ interfaces.NONE describing the interest the front has in packets sent from
+ the back.
+
+ Returns:
+ A TransmissionManager appropriate for back-side use.
+ """
+ if subscription == interfaces.NONE:
+ return _EmptyTransmissionManager()
+ else:
+ return _TransmittingTransmissionManager(
+ lock, pool, callback, operation_id, _BackPacketizer(),
+ termination_manager)
diff --git a/src/python/_framework/base/packets/implementations.py b/src/python/_framework/base/packets/implementations.py
new file mode 100644
index 0000000000..2f07054d4d
--- /dev/null
+++ b/src/python/_framework/base/packets/implementations.py
@@ -0,0 +1,77 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Entry points into the packet-exchange-based implementation the base layer."""
+
+# interfaces is referenced from specification in this module.
+from _framework.base.packets import _ends
+from _framework.base.packets import interfaces # pylint: disable=unused-import
+
+
+def front(work_pool, transmission_pool, utility_pool):
+ """Factory function for creating interfaces.Fronts.
+
+ Args:
+ work_pool: A thread pool to be used for doing work within the created Front
+ object.
+ transmission_pool: A thread pool to be used within the created Front object
+ for transmitting values to some Back object.
+ utility_pool: A thread pool to be used within the created Front object for
+ utility tasks.
+
+ Returns:
+ An interfaces.Front.
+ """
+ return _ends.Front(work_pool, transmission_pool, utility_pool)
+
+
+def back(
+ servicer, work_pool, transmission_pool, utility_pool, default_timeout,
+ maximum_timeout):
+ """Factory function for creating interfaces.Backs.
+
+ Args:
+ servicer: An interfaces.Servicer for servicing operations.
+ work_pool: A thread pool to be used for doing work within the created Back
+ object.
+ transmission_pool: A thread pool to be used within the created Back object
+ for transmitting values to some Front object.
+ utility_pool: A thread pool to be used within the created Back object for
+ utility tasks.
+ default_timeout: A length of time in seconds to be used as the default
+ time alloted for a single operation.
+ maximum_timeout: A length of time in seconds to be used as the maximum
+ time alloted for a single operation.
+
+ Returns:
+ An interfaces.Back.
+ """
+ return _ends.Back(
+ servicer, work_pool, transmission_pool, utility_pool, default_timeout,
+ maximum_timeout)
diff --git a/src/python/_framework/base/packets/implementations_test.py b/src/python/_framework/base/packets/implementations_test.py
new file mode 100644
index 0000000000..8bb5353176
--- /dev/null
+++ b/src/python/_framework/base/packets/implementations_test.py
@@ -0,0 +1,80 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Tests for _framework.base.packets.implementations."""
+
+import unittest
+
+from _framework.base import interfaces_test
+from _framework.base import util
+from _framework.base.packets import implementations
+from _framework.foundation import logging_pool
+
+POOL_MAX_WORKERS = 100
+DEFAULT_TIMEOUT = 30
+MAXIMUM_TIMEOUT = 60
+
+
+class ImplementationsTest(
+ interfaces_test.FrontAndBackTest, unittest.TestCase):
+
+ def setUp(self):
+ self.memory_transmission_pool = logging_pool.pool(POOL_MAX_WORKERS)
+ self.front_work_pool = logging_pool.pool(POOL_MAX_WORKERS)
+ self.front_transmission_pool = logging_pool.pool(POOL_MAX_WORKERS)
+ self.front_utility_pool = logging_pool.pool(POOL_MAX_WORKERS)
+ self.back_work_pool = logging_pool.pool(POOL_MAX_WORKERS)
+ self.back_transmission_pool = logging_pool.pool(POOL_MAX_WORKERS)
+ self.back_utility_pool = logging_pool.pool(POOL_MAX_WORKERS)
+ self.test_pool = logging_pool.pool(POOL_MAX_WORKERS)
+ self.test_servicer = interfaces_test.TestServicer(self.test_pool)
+ self.front = implementations.front(
+ self.front_work_pool, self.front_transmission_pool,
+ self.front_utility_pool)
+ self.back = implementations.back(
+ self.test_servicer, self.back_work_pool, self.back_transmission_pool,
+ self.back_utility_pool, DEFAULT_TIMEOUT, MAXIMUM_TIMEOUT)
+ self.front.join_rear_link(self.back)
+ self.back.join_fore_link(self.front)
+
+ def tearDown(self):
+ util.wait_for_idle(self.back)
+ util.wait_for_idle(self.front)
+ self.memory_transmission_pool.shutdown(wait=True)
+ self.front_work_pool.shutdown(wait=True)
+ self.front_transmission_pool.shutdown(wait=True)
+ self.front_utility_pool.shutdown(wait=True)
+ self.back_work_pool.shutdown(wait=True)
+ self.back_transmission_pool.shutdown(wait=True)
+ self.back_utility_pool.shutdown(wait=True)
+ self.test_pool.shutdown(wait=True)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/src/python/_framework/base/packets/in_memory.py b/src/python/_framework/base/packets/in_memory.py
new file mode 100644
index 0000000000..17daf3acf7
--- /dev/null
+++ b/src/python/_framework/base/packets/in_memory.py
@@ -0,0 +1,108 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Entry points into the packet-exchange-based implementation the base layer."""
+
+import threading
+
+from _framework.base.packets import _constants
+from _framework.base.packets import interfaces
+from _framework.foundation import callable_util
+
+
+class _Serializer(object):
+ """A utility for serializing values that may arrive concurrently."""
+
+ def __init__(self, pool):
+ self._lock = threading.Lock()
+ self._pool = pool
+ self._sink = None
+ self._spinning = False
+ self._values = []
+
+ def _spin(self, sink, value):
+ while True:
+ sink(value)
+ with self._lock:
+ if self._sink is None or not self._values:
+ self._spinning = False
+ return
+ else:
+ sink, value = self._sink, self._values.pop(0)
+
+ def set_sink(self, sink):
+ with self._lock:
+ self._sink = sink
+ if sink is not None and self._values and not self._spinning:
+ self._spinning = True
+ self._pool.submit(
+ callable_util.with_exceptions_logged(
+ self._spin, _constants.INTERNAL_ERROR_LOG_MESSAGE),
+ sink, self._values.pop(0))
+
+ def add_value(self, value):
+ with self._lock:
+ if self._sink and not self._spinning:
+ self._spinning = True
+ self._pool.submit(
+ callable_util.with_exceptions_logged(
+ self._spin, _constants.INTERNAL_ERROR_LOG_MESSAGE),
+ self._sink, value)
+ else:
+ self._values.append(value)
+
+
+class Link(interfaces.ForeLink, interfaces.RearLink):
+ """A trivial implementation of interfaces.ForeLink and interfaces.RearLink."""
+
+ def __init__(self, pool):
+ """Constructor.
+
+ Args:
+ pool: A thread pool to be used for serializing ticket exchange in each
+ direction.
+ """
+ self._front_to_back = _Serializer(pool)
+ self._back_to_front = _Serializer(pool)
+
+ def join_fore_link(self, fore_link):
+ """See interfaces.RearLink.join_fore_link for specification."""
+ self._back_to_front.set_sink(fore_link.accept_back_to_front_ticket)
+
+ def join_rear_link(self, rear_link):
+ """See interfaces.ForeLink.join_rear_link for specification."""
+ self._front_to_back.set_sink(rear_link.accept_front_to_back_ticket)
+
+ def accept_front_to_back_ticket(self, ticket):
+ """See interfaces.ForeLink.accept_front_to_back_ticket for specification."""
+ self._front_to_back.add_value(ticket)
+
+ def accept_back_to_front_ticket(self, ticket):
+ """See interfaces.RearLink.accept_back_to_front_ticket for specification."""
+ self._back_to_front.add_value(ticket)
diff --git a/src/python/_framework/base/packets/interfaces.py b/src/python/_framework/base/packets/interfaces.py
new file mode 100644
index 0000000000..99f9e87772
--- /dev/null
+++ b/src/python/_framework/base/packets/interfaces.py
@@ -0,0 +1,84 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Interfaces defined and used by the base layer of RPC Framework."""
+
+import abc
+
+# packets is referenced from specifications in this module.
+from _framework.base import interfaces
+from _framework.base.packets import packets # pylint: disable=unused-import
+
+
+class ForeLink(object):
+ """Accepts back-to-front tickets and emits front-to-back tickets."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def accept_back_to_front_ticket(self, ticket):
+ """Accept a packets.BackToFrontPacket.
+
+ Args:
+ ticket: Any packets.BackToFrontPacket.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def join_rear_link(self, rear_link):
+ """Mates this object with a peer with which it will exchange tickets."""
+ raise NotImplementedError()
+
+
+class RearLink(object):
+ """Accepts front-to-back tickets and emits back-to-front tickets."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def accept_front_to_back_ticket(self, ticket):
+ """Accepts a packets.FrontToBackPacket.
+
+ Args:
+ ticket: Any packets.FrontToBackPacket.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def join_fore_link(self, fore_link):
+ """Mates this object with a peer with which it will exchange tickets."""
+ raise NotImplementedError()
+
+
+class Front(ForeLink, interfaces.Front):
+ """Clientish objects that operate by sending and receiving tickets."""
+ __metaclass__ = abc.ABCMeta
+
+
+class Back(RearLink, interfaces.Back):
+ """Serverish objects that operate by sending and receiving tickets."""
+ __metaclass__ = abc.ABCMeta
diff --git a/src/python/_framework/base/packets/null.py b/src/python/_framework/base/packets/null.py
new file mode 100644
index 0000000000..9b40a00505
--- /dev/null
+++ b/src/python/_framework/base/packets/null.py
@@ -0,0 +1,56 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Null links that ignore tickets passed to them."""
+
+from _framework.base.packets import interfaces
+
+
+class _NullForeLink(interfaces.ForeLink):
+ """A do-nothing ForeLink."""
+
+ def accept_back_to_front_ticket(self, ticket):
+ pass
+
+ def join_rear_link(self, rear_link):
+ raise NotImplementedError()
+
+
+class _NullRearLink(interfaces.RearLink):
+ """A do-nothing RearLink."""
+
+ def accept_front_to_back_ticket(self, ticket):
+ pass
+
+ def join_fore_link(self, fore_link):
+ raise NotImplementedError()
+
+
+NULL_FORE_LINK = _NullForeLink()
+NULL_REAR_LINK = _NullRearLink()
diff --git a/src/python/_framework/base/packets/packets.py b/src/python/_framework/base/packets/packets.py
new file mode 100644
index 0000000000..1315ca650e
--- /dev/null
+++ b/src/python/_framework/base/packets/packets.py
@@ -0,0 +1,112 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Packets used between fronts and backs."""
+
+import collections
+import enum
+
+# interfaces is referenced from specifications in this module.
+from _framework.base import interfaces # pylint: disable=unused-import
+
+
+@enum.unique
+class Kind(enum.Enum):
+ """Identifies the overall kind of a ticket."""
+
+ COMMENCEMENT = 'commencement'
+ CONTINUATION = 'continuation'
+ COMPLETION = 'completion'
+ ENTIRE = 'entire'
+ CANCELLATION = 'cancellation'
+ EXPIRATION = 'expiration'
+ SERVICER_FAILURE = 'servicer failure'
+ SERVICED_FAILURE = 'serviced failure'
+ RECEPTION_FAILURE = 'reception failure'
+ TRANSMISSION_FAILURE = 'transmission failure'
+
+
+class FrontToBackPacket(
+ collections.namedtuple(
+ 'FrontToBackPacket',
+ ['operation_id', 'sequence_number', 'kind', 'name', 'subscription',
+ 'trace_id', 'payload', 'timeout'])):
+ """A sum type for all values sent from a front to a back.
+
+ Attributes:
+ operation_id: A unique-with-respect-to-equality hashable object identifying
+ a particular operation.
+ sequence_number: A zero-indexed integer sequence number identifying the
+ packet's place among all the packets sent from front to back for this
+ particular operation. Must be zero if kind is Kind.COMMENCEMENT or
+ Kind.ENTIRE. Must be positive for any other kind.
+ kind: One of Kind.COMMENCEMENT, Kind.CONTINUATION, Kind.COMPLETION,
+ Kind.ENTIRE, Kind.CANCELLATION, Kind.EXPIRATION, Kind.SERVICED_FAILURE,
+ Kind.RECEPTION_FAILURE, or Kind.TRANSMISSION_FAILURE.
+ name: The name of an operation. Must be present if kind is Kind.COMMENCEMENT
+ or Kind.ENTIRE. Must be None for any other kind.
+ subscription: One of interfaces.FULL, interfaces.TERMINATION_ONLY, or
+ interfaces.NONE describing the interest the front has in packets sent from
+ the back. Must be present if kind is Kind.COMMENCEMENT or Kind.ENTIRE.
+ Must be None for any other kind.
+ trace_id: A uuid.UUID identifying a set of related operations to which this
+ operation belongs. May be None.
+ payload: A customer payload object. Must be present if kind is
+ Kind.CONTINUATION. Must be None if kind is Kind.CANCELLATION. May be None
+ for any other kind.
+ timeout: An optional length of time (measured from the beginning of the
+ operation) to allow for the entire operation. If None, a default value on
+ the back will be used. If present and excessively large, the back may
+ limit the operation to a smaller duration of its choice. May be present
+ for any ticket kind; setting a value on a later ticket allows fronts
+ to request time extensions (or even time reductions!) on in-progress
+ operations.
+ """
+
+
+class BackToFrontPacket(
+ collections.namedtuple(
+ 'BackToFrontPacket',
+ ['operation_id', 'sequence_number', 'kind', 'payload'])):
+ """A sum type for all values sent from a back to a front.
+
+ Attributes:
+ operation_id: A unique-with-respect-to-equality hashable object identifying
+ a particular operation.
+ sequence_number: A zero-indexed integer sequence number identifying the
+ packet's place among all the packets sent from back to front for this
+ particular operation.
+ kind: One of Kind.CONTINUATION, Kind.COMPLETION, Kind.EXPIRATION,
+ Kind.SERVICER_FAILURE, Kind.RECEPTION_FAILURE, or
+ Kind.TRANSMISSION_FAILURE.
+ payload: A customer payload object. Must be present if kind is
+ Kind.CONTINUATION. May be None if kind is Kind.COMPLETION. Must be None if
+ kind is Kind.EXPIRATION, Kind.SERVICER_FAILURE, Kind.RECEPTION_FAILURE, or
+ Kind.TRANSMISSION_FAILURE.
+ """
diff --git a/src/python/_framework/base/util.py b/src/python/_framework/base/util.py
new file mode 100644
index 0000000000..6bbd18a59a
--- /dev/null
+++ b/src/python/_framework/base/util.py
@@ -0,0 +1,91 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Utilities helpful for working with the base layer of RPC Framework."""
+
+import collections
+import threading
+
+from _framework.base import interfaces
+
+
+class _ServicedSubscription(
+ collections.namedtuple('_ServicedSubscription', ['category', 'ingestor']),
+ interfaces.ServicedSubscription):
+ """See interfaces.ServicedSubscription for specification."""
+
+_NONE_SUBSCRIPTION = _ServicedSubscription(interfaces.NONE, None)
+_TERMINATION_ONLY_SUBSCRIPTION = _ServicedSubscription(
+ interfaces.TERMINATION_ONLY, None)
+
+
+def none_serviced_subscription():
+ """Creates a "none" interfaces.ServicedSubscription object.
+
+ Returns:
+ An interfaces.ServicedSubscription indicating no subscription to an
+ operation's results (such as would be the case for a fire-and-forget
+ operation invocation).
+ """
+ return _NONE_SUBSCRIPTION
+
+
+def termination_only_serviced_subscription():
+ """Creates a "termination only" interfaces.ServicedSubscription object.
+
+ Returns:
+ An interfaces.ServicedSubscription indicating that the front-side customer
+ is interested only in the overall termination outcome of the operation
+ (such as completion or expiration) and would ignore the actual results of
+ the operation.
+ """
+ return _TERMINATION_ONLY_SUBSCRIPTION
+
+
+def full_serviced_subscription(ingestor):
+ """Creates a "full" interfaces.ServicedSubscription object.
+
+ Args:
+ ingestor: A ServicedIngestor.
+
+ Returns:
+ A ServicedSubscription object indicating a full subscription.
+ """
+ return _ServicedSubscription(interfaces.FULL, ingestor)
+
+
+def wait_for_idle(end):
+ """Waits for an interfaces.End to complete all operations.
+
+ Args:
+ end: Any interfaces.End.
+ """
+ event = threading.Event()
+ end.add_idle_action(event.set)
+ event.wait()
diff --git a/src/python/_framework/common/__init__.py b/src/python/_framework/common/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/src/python/_framework/common/__init__.py
diff --git a/src/python/_framework/common/cardinality.py b/src/python/_framework/common/cardinality.py
new file mode 100644
index 0000000000..610425e803
--- /dev/null
+++ b/src/python/_framework/common/cardinality.py
@@ -0,0 +1,42 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Defines an enum for classifying RPC methods by streaming semantics."""
+
+import enum
+
+
+@enum.unique
+class Cardinality(enum.Enum):
+ """Describes the streaming semantics of an RPC method."""
+
+ UNARY_UNARY = 'request-unary/response-unary'
+ UNARY_STREAM = 'request-unary/response-streaming'
+ STREAM_UNARY = 'request-streaming/response-unary'
+ STREAM_STREAM = 'request-streaming/response-streaming'
diff --git a/src/python/_framework/face/__init__.py b/src/python/_framework/face/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/src/python/_framework/face/__init__.py
diff --git a/src/python/_framework/face/_calls.py b/src/python/_framework/face/_calls.py
new file mode 100644
index 0000000000..ab58e6378b
--- /dev/null
+++ b/src/python/_framework/face/_calls.py
@@ -0,0 +1,310 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Utility functions for invoking RPCs."""
+
+import threading
+
+from _framework.base import interfaces as base_interfaces
+from _framework.base import util as base_util
+from _framework.face import _control
+from _framework.face import interfaces
+from _framework.foundation import callable_util
+from _framework.foundation import future
+
+_ITERATOR_EXCEPTION_LOG_MESSAGE = 'Exception iterating over requests!'
+_DONE_CALLBACK_LOG_MESSAGE = 'Exception calling Future "done" callback!'
+
+
+class _RendezvousServicedIngestor(base_interfaces.ServicedIngestor):
+
+ def __init__(self, rendezvous):
+ self._rendezvous = rendezvous
+
+ def consumer(self, operation_context):
+ return self._rendezvous
+
+
+class _EventServicedIngestor(base_interfaces.ServicedIngestor):
+
+ def __init__(self, result_consumer, abortion_callback):
+ self._result_consumer = result_consumer
+ self._abortion_callback = abortion_callback
+
+ def consumer(self, operation_context):
+ operation_context.add_termination_callback(
+ _control.as_operation_termination_callback(self._abortion_callback))
+ return self._result_consumer
+
+
+def _rendezvous_subscription(rendezvous):
+ return base_util.full_serviced_subscription(
+ _RendezvousServicedIngestor(rendezvous))
+
+
+def _unary_event_subscription(completion_callback, abortion_callback):
+ return base_util.full_serviced_subscription(
+ _EventServicedIngestor(
+ _control.UnaryConsumer(completion_callback), abortion_callback))
+
+
+def _stream_event_subscription(result_consumer, abortion_callback):
+ return base_util.full_serviced_subscription(
+ _EventServicedIngestor(result_consumer, abortion_callback))
+
+
+class _OperationCancellableIterator(interfaces.CancellableIterator):
+ """An interfaces.CancellableIterator for response-streaming operations."""
+
+ def __init__(self, rendezvous, operation):
+ self._rendezvous = rendezvous
+ self._operation = operation
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ return next(self._rendezvous)
+
+ def cancel(self):
+ self._operation.cancel()
+ self._rendezvous.set_outcome(base_interfaces.CANCELLED)
+
+
+class _OperationFuture(future.Future):
+ """A future.Future interface to an operation."""
+
+ def __init__(self, rendezvous, operation):
+ self._condition = threading.Condition()
+ self._rendezvous = rendezvous
+ self._operation = operation
+
+ self._outcome = None
+ self._callbacks = []
+
+ def cancel(self):
+ """See future.Future.cancel for specification."""
+ with self._condition:
+ if self._outcome is None:
+ self._operation.cancel()
+ self._outcome = future.aborted()
+ self._condition.notify_all()
+ return False
+
+ def cancelled(self):
+ """See future.Future.cancelled for specification."""
+ return False
+
+ def done(self):
+ """See future.Future.done for specification."""
+ with self._condition:
+ return (self._outcome is not None and
+ self._outcome.category is not future.ABORTED)
+
+ def outcome(self):
+ """See future.Future.outcome for specification."""
+ with self._condition:
+ while self._outcome is None:
+ self._condition.wait()
+ return self._outcome
+
+ def add_done_callback(self, callback):
+ """See future.Future.add_done_callback for specification."""
+ with self._condition:
+ if self._callbacks is not None:
+ self._callbacks.add(callback)
+ return
+
+ outcome = self._outcome
+
+ callable_util.call_logging_exceptions(
+ callback, _DONE_CALLBACK_LOG_MESSAGE, outcome)
+
+ def on_operation_termination(self, operation_outcome):
+ """Indicates to this object that the operation has terminated.
+
+ Args:
+ operation_outcome: One of base_interfaces.COMPLETED,
+ base_interfaces.CANCELLED, base_interfaces.EXPIRED,
+ base_interfaces.RECEPTION_FAILURE, base_interfaces.TRANSMISSION_FAILURE,
+ base_interfaces.SERVICED_FAILURE, or base_interfaces.SERVICER_FAILURE
+ indicating the categorical outcome of the operation.
+ """
+ with self._condition:
+ if (self._outcome is None and
+ operation_outcome != base_interfaces.COMPLETED):
+ self._outcome = future.raised(
+ _control.abortion_outcome_to_exception(operation_outcome))
+ self._condition.notify_all()
+
+ outcome = self._outcome
+ rendezvous = self._rendezvous
+ callbacks = list(self._callbacks)
+ self._callbacks = None
+
+ if outcome is None:
+ try:
+ return_value = next(rendezvous)
+ except Exception as e: # pylint: disable=broad-except
+ outcome = future.raised(e)
+ else:
+ outcome = future.returned(return_value)
+ with self._condition:
+ if self._outcome is None:
+ self._outcome = outcome
+ self._condition.notify_all()
+ else:
+ outcome = self._outcome
+
+ for callback in callbacks:
+ callable_util.call_logging_exceptions(
+ callback, _DONE_CALLBACK_LOG_MESSAGE, outcome)
+
+
+class _Call(interfaces.Call):
+
+ def __init__(self, operation):
+ self._operation = operation
+ self.context = _control.RpcContext(operation.context)
+
+ def cancel(self):
+ self._operation.cancel()
+
+
+def blocking_value_in_value_out(front, name, payload, timeout, trace_id):
+ """Services in a blocking fashion a value-in value-out servicer method."""
+ rendezvous = _control.Rendezvous()
+ subscription = _rendezvous_subscription(rendezvous)
+ operation = front.operate(
+ name, payload, True, timeout, subscription, trace_id)
+ operation.context.add_termination_callback(rendezvous.set_outcome)
+ return next(rendezvous)
+
+
+def future_value_in_value_out(front, name, payload, timeout, trace_id):
+ """Services a value-in value-out servicer method by returning a Future."""
+ rendezvous = _control.Rendezvous()
+ subscription = _rendezvous_subscription(rendezvous)
+ operation = front.operate(
+ name, payload, True, timeout, subscription, trace_id)
+ operation.context.add_termination_callback(rendezvous.set_outcome)
+ operation_future = _OperationFuture(rendezvous, operation)
+ operation.context.add_termination_callback(
+ operation_future.on_operation_termination)
+ return operation_future
+
+
+def inline_value_in_stream_out(front, name, payload, timeout, trace_id):
+ """Services a value-in stream-out servicer method."""
+ rendezvous = _control.Rendezvous()
+ subscription = _rendezvous_subscription(rendezvous)
+ operation = front.operate(
+ name, payload, True, timeout, subscription, trace_id)
+ operation.context.add_termination_callback(rendezvous.set_outcome)
+ return _OperationCancellableIterator(rendezvous, operation)
+
+
+def blocking_stream_in_value_out(
+ front, name, payload_iterator, timeout, trace_id):
+ """Services in a blocking fashion a stream-in value-out servicer method."""
+ rendezvous = _control.Rendezvous()
+ subscription = _rendezvous_subscription(rendezvous)
+ operation = front.operate(name, None, False, timeout, subscription, trace_id)
+ operation.context.add_termination_callback(rendezvous.set_outcome)
+ for payload in payload_iterator:
+ operation.consumer.consume(payload)
+ operation.consumer.terminate()
+ return next(rendezvous)
+
+
+def future_stream_in_value_out(
+ front, name, payload_iterator, timeout, trace_id, pool):
+ """Services a stream-in value-out servicer method by returning a Future."""
+ rendezvous = _control.Rendezvous()
+ subscription = _rendezvous_subscription(rendezvous)
+ operation = front.operate(name, None, False, timeout, subscription, trace_id)
+ operation.context.add_termination_callback(rendezvous.set_outcome)
+ pool.submit(
+ callable_util.with_exceptions_logged(
+ _control.pipe_iterator_to_consumer, _ITERATOR_EXCEPTION_LOG_MESSAGE),
+ payload_iterator, operation.consumer, lambda: True, True)
+ operation_future = _OperationFuture(rendezvous, operation)
+ operation.context.add_termination_callback(
+ operation_future.on_operation_termination)
+ return operation_future
+
+
+def inline_stream_in_stream_out(
+ front, name, payload_iterator, timeout, trace_id, pool):
+ """Services a stream-in stream-out servicer method."""
+ rendezvous = _control.Rendezvous()
+ subscription = _rendezvous_subscription(rendezvous)
+ operation = front.operate(name, None, False, timeout, subscription, trace_id)
+ operation.context.add_termination_callback(rendezvous.set_outcome)
+ pool.submit(
+ callable_util.with_exceptions_logged(
+ _control.pipe_iterator_to_consumer, _ITERATOR_EXCEPTION_LOG_MESSAGE),
+ payload_iterator, operation.consumer, lambda: True, True)
+ return _OperationCancellableIterator(rendezvous, operation)
+
+
+def event_value_in_value_out(
+ front, name, payload, completion_callback, abortion_callback, timeout,
+ trace_id):
+ subscription = _unary_event_subscription(
+ completion_callback, abortion_callback)
+ operation = front.operate(
+ name, payload, True, timeout, subscription, trace_id)
+ return _Call(operation)
+
+
+def event_value_in_stream_out(
+ front, name, payload, result_payload_consumer, abortion_callback, timeout,
+ trace_id):
+ subscription = _stream_event_subscription(
+ result_payload_consumer, abortion_callback)
+ operation = front.operate(
+ name, payload, True, timeout, subscription, trace_id)
+ return _Call(operation)
+
+
+def event_stream_in_value_out(
+ front, name, completion_callback, abortion_callback, timeout, trace_id):
+ subscription = _unary_event_subscription(
+ completion_callback, abortion_callback)
+ operation = front.operate(name, None, False, timeout, subscription, trace_id)
+ return _Call(operation), operation.consumer
+
+
+def event_stream_in_stream_out(
+ front, name, result_payload_consumer, abortion_callback, timeout, trace_id):
+ subscription = _stream_event_subscription(
+ result_payload_consumer, abortion_callback)
+ operation = front.operate(name, None, False, timeout, subscription, trace_id)
+ return _Call(operation), operation.consumer
diff --git a/src/python/_framework/face/_control.py b/src/python/_framework/face/_control.py
new file mode 100644
index 0000000000..2c221321d6
--- /dev/null
+++ b/src/python/_framework/face/_control.py
@@ -0,0 +1,194 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""State and behavior for translating between sync and async control flow."""
+
+import threading
+
+from _framework.base import interfaces as base_interfaces
+from _framework.face import exceptions
+from _framework.face import interfaces
+from _framework.foundation import abandonment
+from _framework.foundation import stream
+
+INTERNAL_ERROR_LOG_MESSAGE = ':-( RPC Framework (Face) Internal Error! :-('
+
+_OPERATION_OUTCOME_TO_RPC_ABORTION = {
+ base_interfaces.CANCELLED: interfaces.CANCELLED,
+ base_interfaces.EXPIRED: interfaces.EXPIRED,
+ base_interfaces.RECEPTION_FAILURE: interfaces.NETWORK_FAILURE,
+ base_interfaces.TRANSMISSION_FAILURE: interfaces.NETWORK_FAILURE,
+ base_interfaces.SERVICED_FAILURE: interfaces.SERVICED_FAILURE,
+ base_interfaces.SERVICER_FAILURE: interfaces.SERVICER_FAILURE,
+ }
+
+
+def _as_operation_termination_callback(rpc_abortion_callback):
+ def operation_termination_callback(operation_outcome):
+ rpc_abortion = _OPERATION_OUTCOME_TO_RPC_ABORTION.get(
+ operation_outcome, None)
+ if rpc_abortion is not None:
+ rpc_abortion_callback(rpc_abortion)
+ return operation_termination_callback
+
+
+def _abortion_outcome_to_exception(abortion_outcome):
+ if abortion_outcome == base_interfaces.CANCELLED:
+ return exceptions.CancellationError()
+ elif abortion_outcome == base_interfaces.EXPIRED:
+ return exceptions.ExpirationError()
+ elif abortion_outcome == base_interfaces.SERVICER_FAILURE:
+ return exceptions.ServicerError()
+ elif abortion_outcome == base_interfaces.SERVICED_FAILURE:
+ return exceptions.ServicedError()
+ else:
+ return exceptions.NetworkError()
+
+
+class UnaryConsumer(stream.Consumer):
+ """A stream.Consumer that should only ever be passed one value."""
+
+ def __init__(self, on_termination):
+ self._on_termination = on_termination
+ self._value = None
+
+ def consume(self, value):
+ self._value = value
+
+ def terminate(self):
+ self._on_termination(self._value)
+
+ def consume_and_terminate(self, value):
+ self._on_termination(value)
+
+
+class Rendezvous(stream.Consumer):
+ """A rendez-vous with stream.Consumer and iterator interfaces."""
+
+ def __init__(self):
+ self._condition = threading.Condition()
+ self._values = []
+ self._values_completed = False
+ self._abortion = None
+
+ def consume(self, value):
+ with self._condition:
+ self._values.append(value)
+ self._condition.notify()
+
+ def terminate(self):
+ with self._condition:
+ self._values_completed = True
+ self._condition.notify()
+
+ def consume_and_terminate(self, value):
+ with self._condition:
+ self._values.append(value)
+ self._values_completed = True
+ self._condition.notify()
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ with self._condition:
+ while ((self._abortion is None) and
+ (not self._values) and
+ (not self._values_completed)):
+ self._condition.wait()
+ if self._abortion is not None:
+ raise _abortion_outcome_to_exception(self._abortion)
+ elif self._values:
+ return self._values.pop(0)
+ elif self._values_completed:
+ raise StopIteration()
+ else:
+ raise AssertionError('Unreachable code reached!')
+
+ def set_outcome(self, outcome):
+ with self._condition:
+ if outcome != base_interfaces.COMPLETED:
+ self._abortion = outcome
+ self._condition.notify()
+
+
+class RpcContext(interfaces.RpcContext):
+ """A wrapped base_interfaces.OperationContext."""
+
+ def __init__(self, operation_context):
+ self._operation_context = operation_context
+
+ def is_active(self):
+ return self._operation_context.is_active()
+
+ def time_remaining(self):
+ return self._operation_context.time_remaining()
+
+ def add_abortion_callback(self, abortion_callback):
+ self._operation_context.add_termination_callback(
+ _as_operation_termination_callback(abortion_callback))
+
+
+def pipe_iterator_to_consumer(iterator, consumer, active, terminate):
+ """Pipes values emitted from an iterator to a stream.Consumer.
+
+ Args:
+ iterator: An iterator from which values will be emitted.
+ consumer: A stream.Consumer to which values will be passed.
+ active: A no-argument callable that returns True if the work being done by
+ this function is still valid and should not be abandoned and False if the
+ work being done by this function should be abandoned.
+ terminate: A boolean indicating whether or not this function should
+ terminate the given consumer after passing to it all values emitted by the
+ given iterator.
+
+ Raises:
+ abandonment.Abandoned: If this function quits early after seeing False
+ returned by the active function passed to it.
+ Exception: This function raises whatever exceptions are raised by iterating
+ over the given iterator.
+ """
+ for element in iterator:
+ if not active():
+ raise abandonment.Abandoned()
+
+ consumer.consume(element)
+
+ if not active():
+ raise abandonment.Abandoned()
+ if terminate:
+ consumer.terminate()
+
+
+def abortion_outcome_to_exception(abortion_outcome):
+ return _abortion_outcome_to_exception(abortion_outcome)
+
+
+def as_operation_termination_callback(rpc_abortion_callback):
+ return _as_operation_termination_callback(rpc_abortion_callback)
diff --git a/src/python/_framework/face/_service.py b/src/python/_framework/face/_service.py
new file mode 100644
index 0000000000..d758c2f148
--- /dev/null
+++ b/src/python/_framework/face/_service.py
@@ -0,0 +1,189 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Behaviors for servicing RPCs."""
+
+# base_interfaces and interfaces are referenced from specification in this
+# module.
+from _framework.base import interfaces as base_interfaces # pylint: disable=unused-import
+from _framework.face import _control
+from _framework.face import exceptions
+from _framework.face import interfaces # pylint: disable=unused-import
+from _framework.foundation import abandonment
+from _framework.foundation import callable_util
+from _framework.foundation import stream
+from _framework.foundation import stream_util
+
+
+class _ValueInStreamOutConsumer(stream.Consumer):
+ """A stream.Consumer that maps inputs one-to-many onto outputs."""
+
+ def __init__(self, behavior, context, downstream):
+ """Constructor.
+
+ Args:
+ behavior: A callable that takes a single value and an
+ interfaces.RpcContext and returns a generator of arbitrarily many
+ values.
+ context: An interfaces.RpcContext.
+ downstream: A stream.Consumer to which to pass the values generated by the
+ given behavior.
+ """
+ self._behavior = behavior
+ self._context = context
+ self._downstream = downstream
+
+ def consume(self, value):
+ _control.pipe_iterator_to_consumer(
+ self._behavior(value, self._context), self._downstream,
+ self._context.is_active, False)
+
+ def terminate(self):
+ self._downstream.terminate()
+
+ def consume_and_terminate(self, value):
+ _control.pipe_iterator_to_consumer(
+ self._behavior(value, self._context), self._downstream,
+ self._context.is_active, True)
+
+
+def _pool_wrap(behavior, operation_context):
+ """Wraps an operation-related behavior so that it may be called in a pool.
+
+ Args:
+ behavior: A callable related to carrying out an operation.
+ operation_context: A base_interfaces.OperationContext for the operation.
+
+ Returns:
+ A callable that when called carries out the behavior of the given callable
+ and handles whatever exceptions it raises appropriately.
+ """
+ def translation(*args):
+ try:
+ behavior(*args)
+ except (
+ abandonment.Abandoned,
+ exceptions.ExpirationError,
+ exceptions.CancellationError,
+ exceptions.ServicedError,
+ exceptions.NetworkError) as e:
+ if operation_context.is_active():
+ operation_context.fail(e)
+ except Exception as e:
+ operation_context.fail(e)
+ return callable_util.with_exceptions_logged(
+ translation, _control.INTERNAL_ERROR_LOG_MESSAGE)
+
+
+def adapt_inline_value_in_value_out(method):
+ def adaptation(response_consumer, operation_context):
+ rpc_context = _control.RpcContext(operation_context)
+ return stream_util.TransformingConsumer(
+ lambda request: method.service(request, rpc_context), response_consumer)
+ return adaptation
+
+
+def adapt_inline_value_in_stream_out(method):
+ def adaptation(response_consumer, operation_context):
+ rpc_context = _control.RpcContext(operation_context)
+ return _ValueInStreamOutConsumer(
+ method.service, rpc_context, response_consumer)
+ return adaptation
+
+
+def adapt_inline_stream_in_value_out(method, pool):
+ def adaptation(response_consumer, operation_context):
+ rendezvous = _control.Rendezvous()
+ operation_context.add_termination_callback(rendezvous.set_outcome)
+ def in_pool_thread():
+ response_consumer.consume_and_terminate(
+ method.service(rendezvous, _control.RpcContext(operation_context)))
+ pool.submit(_pool_wrap(in_pool_thread, operation_context))
+ return rendezvous
+ return adaptation
+
+
+def adapt_inline_stream_in_stream_out(method, pool):
+ """Adapts an interfaces.InlineStreamInStreamOutMethod for use with Consumers.
+
+ RPCs may be serviced by calling the return value of this function, passing
+ request values to the stream.Consumer returned from that call, and receiving
+ response values from the stream.Consumer passed to that call.
+
+ Args:
+ method: An interfaces.InlineStreamInStreamOutMethod.
+ pool: A thread pool.
+
+ Returns:
+ A callable that takes a stream.Consumer and a
+ base_interfaces.OperationContext and returns a stream.Consumer.
+ """
+ def adaptation(response_consumer, operation_context):
+ rendezvous = _control.Rendezvous()
+ operation_context.add_termination_callback(rendezvous.set_outcome)
+ def in_pool_thread():
+ _control.pipe_iterator_to_consumer(
+ method.service(rendezvous, _control.RpcContext(operation_context)),
+ response_consumer, operation_context.is_active, True)
+ pool.submit(_pool_wrap(in_pool_thread, operation_context))
+ return rendezvous
+ return adaptation
+
+
+def adapt_event_value_in_value_out(method):
+ def adaptation(response_consumer, operation_context):
+ def on_payload(payload):
+ method.service(
+ payload, response_consumer.consume_and_terminate,
+ _control.RpcContext(operation_context))
+ return _control.UnaryConsumer(on_payload)
+ return adaptation
+
+
+def adapt_event_value_in_stream_out(method):
+ def adaptation(response_consumer, operation_context):
+ def on_payload(payload):
+ method.service(
+ payload, response_consumer, _control.RpcContext(operation_context))
+ return _control.UnaryConsumer(on_payload)
+ return adaptation
+
+
+def adapt_event_stream_in_value_out(method):
+ def adaptation(response_consumer, operation_context):
+ rpc_context = _control.RpcContext(operation_context)
+ return method.service(response_consumer.consume_and_terminate, rpc_context)
+ return adaptation
+
+
+def adapt_event_stream_in_stream_out(method):
+ def adaptation(response_consumer, operation_context):
+ return method.service(
+ response_consumer, _control.RpcContext(operation_context))
+ return adaptation
diff --git a/src/python/_framework/face/_test_case.py b/src/python/_framework/face/_test_case.py
new file mode 100644
index 0000000000..50b55c389f
--- /dev/null
+++ b/src/python/_framework/face/_test_case.py
@@ -0,0 +1,81 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Common lifecycle code for in-memory-ticket-exchange Face-layer tests."""
+
+from _framework.face import implementations
+from _framework.face.testing import base_util
+from _framework.face.testing import test_case
+from _framework.foundation import logging_pool
+
+_TIMEOUT = 3
+_MAXIMUM_POOL_SIZE = 100
+
+
+class FaceTestCase(test_case.FaceTestCase):
+ """Provides abstract Face-layer tests an in-memory implementation."""
+
+ def set_up_implementation(
+ self,
+ name,
+ methods,
+ inline_value_in_value_out_methods,
+ inline_value_in_stream_out_methods,
+ inline_stream_in_value_out_methods,
+ inline_stream_in_stream_out_methods,
+ event_value_in_value_out_methods,
+ event_value_in_stream_out_methods,
+ event_stream_in_value_out_methods,
+ event_stream_in_stream_out_methods,
+ multi_method):
+ servicer_pool = logging_pool.pool(_MAXIMUM_POOL_SIZE)
+ stub_pool = logging_pool.pool(_MAXIMUM_POOL_SIZE)
+
+ servicer = implementations.servicer(
+ servicer_pool,
+ inline_value_in_value_out_methods=inline_value_in_value_out_methods,
+ inline_value_in_stream_out_methods=inline_value_in_stream_out_methods,
+ inline_stream_in_value_out_methods=inline_stream_in_value_out_methods,
+ inline_stream_in_stream_out_methods=inline_stream_in_stream_out_methods,
+ event_value_in_value_out_methods=event_value_in_value_out_methods,
+ event_value_in_stream_out_methods=event_value_in_stream_out_methods,
+ event_stream_in_value_out_methods=event_stream_in_value_out_methods,
+ event_stream_in_stream_out_methods=event_stream_in_stream_out_methods,
+ multi_method=multi_method)
+
+ linked_pair = base_util.linked_pair(servicer, _TIMEOUT)
+ server = implementations.server()
+ stub = implementations.stub(linked_pair.front, stub_pool)
+ return server, stub, (servicer_pool, stub_pool, linked_pair)
+
+ def tear_down_implementation(self, memo):
+ servicer_pool, stub_pool, linked_pair = memo
+ linked_pair.shut_down()
+ stub_pool.shutdown(wait=True)
+ servicer_pool.shutdown(wait=True)
diff --git a/src/python/_framework/face/blocking_invocation_inline_service_test.py b/src/python/_framework/face/blocking_invocation_inline_service_test.py
new file mode 100644
index 0000000000..96563c94ee
--- /dev/null
+++ b/src/python/_framework/face/blocking_invocation_inline_service_test.py
@@ -0,0 +1,46 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""One of the tests of the Face layer of RPC Framework."""
+
+import unittest
+
+from _framework.face import _test_case
+from _framework.face.testing import blocking_invocation_inline_service_test_case as test_case
+
+
+class BlockingInvocationInlineServiceTest(
+ _test_case.FaceTestCase,
+ test_case.BlockingInvocationInlineServiceTestCase,
+ unittest.TestCase):
+ pass
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/src/python/_framework/face/demonstration.py b/src/python/_framework/face/demonstration.py
new file mode 100644
index 0000000000..501ec6b3f8
--- /dev/null
+++ b/src/python/_framework/face/demonstration.py
@@ -0,0 +1,118 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Demonstration-suitable implementation of the face layer of RPC Framework."""
+
+from _framework.base import util as _base_util
+from _framework.base.packets import implementations as _tickets_implementations
+from _framework.face import implementations
+from _framework.foundation import logging_pool
+
+_POOL_SIZE_LIMIT = 20
+
+_MAXIMUM_TIMEOUT = 90
+
+
+class LinkedPair(object):
+ """A Server and Stub that are linked to one another.
+
+ Attributes:
+ server: A Server.
+ stub: A Stub.
+ """
+
+ def shut_down(self):
+ """Shuts down this object and releases its resources."""
+ raise NotImplementedError()
+
+
+class _LinkedPair(LinkedPair):
+
+ def __init__(self, server, stub, front, back, pools):
+ self.server = server
+ self.stub = stub
+ self._front = front
+ self._back = back
+ self._pools = pools
+
+ def shut_down(self):
+ _base_util.wait_for_idle(self._front)
+ _base_util.wait_for_idle(self._back)
+
+ for pool in self._pools:
+ pool.shutdown(wait=True)
+
+
+def server_and_stub(
+ default_timeout,
+ inline_value_in_value_out_methods=None,
+ inline_value_in_stream_out_methods=None,
+ inline_stream_in_value_out_methods=None,
+ inline_stream_in_stream_out_methods=None,
+ event_value_in_value_out_methods=None,
+ event_value_in_stream_out_methods=None,
+ event_stream_in_value_out_methods=None,
+ event_stream_in_stream_out_methods=None,
+ multi_method=None):
+ """Creates a Server and Stub linked together for use."""
+ front_work_pool = logging_pool.pool(_POOL_SIZE_LIMIT)
+ front_transmission_pool = logging_pool.pool(_POOL_SIZE_LIMIT)
+ front_utility_pool = logging_pool.pool(_POOL_SIZE_LIMIT)
+ back_work_pool = logging_pool.pool(_POOL_SIZE_LIMIT)
+ back_transmission_pool = logging_pool.pool(_POOL_SIZE_LIMIT)
+ back_utility_pool = logging_pool.pool(_POOL_SIZE_LIMIT)
+ stub_pool = logging_pool.pool(_POOL_SIZE_LIMIT)
+ pools = (
+ front_work_pool, front_transmission_pool, front_utility_pool,
+ back_work_pool, back_transmission_pool, back_utility_pool,
+ stub_pool)
+
+ servicer = implementations.servicer(
+ back_work_pool,
+ inline_value_in_value_out_methods=inline_value_in_value_out_methods,
+ inline_value_in_stream_out_methods=inline_value_in_stream_out_methods,
+ inline_stream_in_value_out_methods=inline_stream_in_value_out_methods,
+ inline_stream_in_stream_out_methods=inline_stream_in_stream_out_methods,
+ event_value_in_value_out_methods=event_value_in_value_out_methods,
+ event_value_in_stream_out_methods=event_value_in_stream_out_methods,
+ event_stream_in_value_out_methods=event_stream_in_value_out_methods,
+ event_stream_in_stream_out_methods=event_stream_in_stream_out_methods,
+ multi_method=multi_method)
+
+ front = _tickets_implementations.front(
+ front_work_pool, front_transmission_pool, front_utility_pool)
+ back = _tickets_implementations.back(
+ servicer, back_work_pool, back_transmission_pool, back_utility_pool,
+ default_timeout, _MAXIMUM_TIMEOUT)
+ front.join_rear_link(back)
+ back.join_fore_link(front)
+
+ stub = implementations.stub(front, stub_pool)
+
+ return _LinkedPair(implementations.server(), stub, front, back, pools)
diff --git a/src/python/_framework/face/event_invocation_synchronous_event_service_test.py b/src/python/_framework/face/event_invocation_synchronous_event_service_test.py
new file mode 100644
index 0000000000..48e05b2478
--- /dev/null
+++ b/src/python/_framework/face/event_invocation_synchronous_event_service_test.py
@@ -0,0 +1,46 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""One of the tests of the Face layer of RPC Framework."""
+
+import unittest
+
+from _framework.face import _test_case
+from _framework.face.testing import event_invocation_synchronous_event_service_test_case as test_case
+
+
+class EventInvocationSynchronousEventServiceTest(
+ _test_case.FaceTestCase,
+ test_case.EventInvocationSynchronousEventServiceTestCase,
+ unittest.TestCase):
+ pass
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/src/python/_framework/face/exceptions.py b/src/python/_framework/face/exceptions.py
new file mode 100644
index 0000000000..f112df70bc
--- /dev/null
+++ b/src/python/_framework/face/exceptions.py
@@ -0,0 +1,77 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Exceptions used in the Face layer of RPC Framework."""
+
+import abc
+
+
+class NoSuchMethodError(Exception):
+ """Raised by customer code to indicate an unrecognized RPC method name.
+
+ Attributes:
+ name: The unrecognized name.
+ """
+
+ def __init__(self, name):
+ """Constructor.
+
+ Args:
+ name: The unrecognized RPC method name.
+ """
+ super(NoSuchMethodError, self).__init__()
+ self.name = name
+
+
+class RpcError(Exception):
+ """Common super type for all exceptions raised by the Face layer.
+
+ Only RPC Framework should instantiate and raise these exceptions.
+ """
+ __metaclass__ = abc.ABCMeta
+
+
+class CancellationError(RpcError):
+ """Indicates that an RPC has been cancelled."""
+
+
+class ExpirationError(RpcError):
+ """Indicates that an RPC has expired ("timed out")."""
+
+
+class NetworkError(RpcError):
+ """Indicates that some error occurred on the network."""
+
+
+class ServicedError(RpcError):
+ """Indicates that the Serviced failed in the course of an RPC."""
+
+
+class ServicerError(RpcError):
+ """Indicates that the Servicer failed in the course of servicing an RPC."""
diff --git a/src/python/_framework/face/future_invocation_asynchronous_event_service_test.py b/src/python/_framework/face/future_invocation_asynchronous_event_service_test.py
new file mode 100644
index 0000000000..96f5fe85d3
--- /dev/null
+++ b/src/python/_framework/face/future_invocation_asynchronous_event_service_test.py
@@ -0,0 +1,46 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""One of the tests of the Face layer of RPC Framework."""
+
+import unittest
+
+from _framework.face import _test_case
+from _framework.face.testing import future_invocation_asynchronous_event_service_test_case as test_case
+
+
+class FutureInvocationAsynchronousEventServiceTest(
+ _test_case.FaceTestCase,
+ test_case.FutureInvocationAsynchronousEventServiceTestCase,
+ unittest.TestCase):
+ pass
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/src/python/_framework/face/implementations.py b/src/python/_framework/face/implementations.py
new file mode 100644
index 0000000000..94362e2007
--- /dev/null
+++ b/src/python/_framework/face/implementations.py
@@ -0,0 +1,246 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Entry points into the Face layer of RPC Framework."""
+
+from _framework.base import exceptions as _base_exceptions
+from _framework.base import interfaces as base_interfaces
+from _framework.face import _calls
+from _framework.face import _service
+from _framework.face import exceptions
+from _framework.face import interfaces
+
+
+class _BaseServicer(base_interfaces.Servicer):
+
+ def __init__(self, methods, multi_method):
+ self._methods = methods
+ self._multi_method = multi_method
+
+ def service(self, name, context, output_consumer):
+ method = self._methods.get(name, None)
+ if method is not None:
+ return method(output_consumer, context)
+ elif self._multi_method is not None:
+ try:
+ return self._multi_method.service(name, output_consumer, context)
+ except exceptions.NoSuchMethodError:
+ raise _base_exceptions.NoSuchMethodError()
+ else:
+ raise _base_exceptions.NoSuchMethodError()
+
+
+class _Server(interfaces.Server):
+ """An interfaces.Server implementation."""
+
+
+class _Stub(interfaces.Stub):
+ """An interfaces.Stub implementation."""
+
+ def __init__(self, front, pool):
+ self._front = front
+ self._pool = pool
+
+ def blocking_value_in_value_out(self, name, request, timeout):
+ return _calls.blocking_value_in_value_out(
+ self._front, name, request, timeout, 'unused trace ID')
+
+ def future_value_in_value_out(self, name, request, timeout):
+ return _calls.future_value_in_value_out(
+ self._front, name, request, timeout, 'unused trace ID')
+
+ def inline_value_in_stream_out(self, name, request, timeout):
+ return _calls.inline_value_in_stream_out(
+ self._front, name, request, timeout, 'unused trace ID')
+
+ def blocking_stream_in_value_out(self, name, request_iterator, timeout):
+ return _calls.blocking_stream_in_value_out(
+ self._front, name, request_iterator, timeout, 'unused trace ID')
+
+ def future_stream_in_value_out(self, name, request_iterator, timeout):
+ return _calls.future_stream_in_value_out(
+ self._front, name, request_iterator, timeout, 'unused trace ID',
+ self._pool)
+
+ def inline_stream_in_stream_out(self, name, request_iterator, timeout):
+ return _calls.inline_stream_in_stream_out(
+ self._front, name, request_iterator, timeout, 'unused trace ID',
+ self._pool)
+
+ def event_value_in_value_out(
+ self, name, request, response_callback, abortion_callback, timeout):
+ return _calls.event_value_in_value_out(
+ self._front, name, request, response_callback, abortion_callback,
+ timeout, 'unused trace ID')
+
+ def event_value_in_stream_out(
+ self, name, request, response_consumer, abortion_callback, timeout):
+ return _calls.event_value_in_stream_out(
+ self._front, name, request, response_consumer, abortion_callback,
+ timeout, 'unused trace ID')
+
+ def event_stream_in_value_out(
+ self, name, response_callback, abortion_callback, timeout):
+ return _calls.event_stream_in_value_out(
+ self._front, name, response_callback, abortion_callback, timeout,
+ 'unused trace ID')
+
+ def event_stream_in_stream_out(
+ self, name, response_consumer, abortion_callback, timeout):
+ return _calls.event_stream_in_stream_out(
+ self._front, name, response_consumer, abortion_callback, timeout,
+ 'unused trace ID')
+
+
+def _aggregate_methods(
+ pool,
+ inline_value_in_value_out_methods,
+ inline_value_in_stream_out_methods,
+ inline_stream_in_value_out_methods,
+ inline_stream_in_stream_out_methods,
+ event_value_in_value_out_methods,
+ event_value_in_stream_out_methods,
+ event_stream_in_value_out_methods,
+ event_stream_in_stream_out_methods):
+ """Aggregates methods coded in according to different interfaces."""
+ methods = {}
+
+ def adapt_unpooled_methods(adapted_methods, unadapted_methods, adaptation):
+ if unadapted_methods is not None:
+ for name, unadapted_method in unadapted_methods.iteritems():
+ adapted_methods[name] = adaptation(unadapted_method)
+
+ def adapt_pooled_methods(adapted_methods, unadapted_methods, adaptation):
+ if unadapted_methods is not None:
+ for name, unadapted_method in unadapted_methods.iteritems():
+ adapted_methods[name] = adaptation(unadapted_method, pool)
+
+ adapt_unpooled_methods(
+ methods, inline_value_in_value_out_methods,
+ _service.adapt_inline_value_in_value_out)
+ adapt_unpooled_methods(
+ methods, inline_value_in_stream_out_methods,
+ _service.adapt_inline_value_in_stream_out)
+ adapt_pooled_methods(
+ methods, inline_stream_in_value_out_methods,
+ _service.adapt_inline_stream_in_value_out)
+ adapt_pooled_methods(
+ methods, inline_stream_in_stream_out_methods,
+ _service.adapt_inline_stream_in_stream_out)
+ adapt_unpooled_methods(
+ methods, event_value_in_value_out_methods,
+ _service.adapt_event_value_in_value_out)
+ adapt_unpooled_methods(
+ methods, event_value_in_stream_out_methods,
+ _service.adapt_event_value_in_stream_out)
+ adapt_unpooled_methods(
+ methods, event_stream_in_value_out_methods,
+ _service.adapt_event_stream_in_value_out)
+ adapt_unpooled_methods(
+ methods, event_stream_in_stream_out_methods,
+ _service.adapt_event_stream_in_stream_out)
+
+ return methods
+
+
+def servicer(
+ pool,
+ inline_value_in_value_out_methods=None,
+ inline_value_in_stream_out_methods=None,
+ inline_stream_in_value_out_methods=None,
+ inline_stream_in_stream_out_methods=None,
+ event_value_in_value_out_methods=None,
+ event_value_in_stream_out_methods=None,
+ event_stream_in_value_out_methods=None,
+ event_stream_in_stream_out_methods=None,
+ multi_method=None):
+ """Creates a base_interfaces.Servicer.
+
+ The key sets of the passed dictionaries must be disjoint. It is guaranteed
+ that any passed MultiMethod implementation will only be called to service an
+ RPC if the RPC method name is not present in the key sets of the passed
+ dictionaries.
+
+ Args:
+ pool: A thread pool.
+ inline_value_in_value_out_methods: A dictionary mapping method names to
+ interfaces.InlineValueInValueOutMethod implementations.
+ inline_value_in_stream_out_methods: A dictionary mapping method names to
+ interfaces.InlineValueInStreamOutMethod implementations.
+ inline_stream_in_value_out_methods: A dictionary mapping method names to
+ interfaces.InlineStreamInValueOutMethod implementations.
+ inline_stream_in_stream_out_methods: A dictionary mapping method names to
+ interfaces.InlineStreamInStreamOutMethod implementations.
+ event_value_in_value_out_methods: A dictionary mapping method names to
+ interfaces.EventValueInValueOutMethod implementations.
+ event_value_in_stream_out_methods: A dictionary mapping method names to
+ interfaces.EventValueInStreamOutMethod implementations.
+ event_stream_in_value_out_methods: A dictionary mapping method names to
+ interfaces.EventStreamInValueOutMethod implementations.
+ event_stream_in_stream_out_methods: A dictionary mapping method names to
+ interfaces.EventStreamInStreamOutMethod implementations.
+ multi_method: An implementation of interfaces.MultiMethod.
+
+ Returns:
+ A base_interfaces.Servicer that services RPCs via the given implementations.
+ """
+ methods = _aggregate_methods(
+ pool,
+ inline_value_in_value_out_methods,
+ inline_value_in_stream_out_methods,
+ inline_stream_in_value_out_methods,
+ inline_stream_in_stream_out_methods,
+ event_value_in_value_out_methods,
+ event_value_in_stream_out_methods,
+ event_stream_in_value_out_methods,
+ event_stream_in_stream_out_methods)
+
+ return _BaseServicer(methods, multi_method)
+
+
+def server():
+ """Creates an interfaces.Server.
+
+ Returns:
+ An interfaces.Server.
+ """
+ return _Server()
+
+
+def stub(front, pool):
+ """Creates an interfaces.Stub.
+
+ Args:
+ front: A base_interfaces.Front.
+ pool: A futures.ThreadPoolExecutor.
+
+ Returns:
+ An interfaces.Stub that performs RPCs via the given base_interfaces.Front.
+ """
+ return _Stub(front, pool)
diff --git a/src/python/_framework/face/interfaces.py b/src/python/_framework/face/interfaces.py
new file mode 100644
index 0000000000..0cc7c70df3
--- /dev/null
+++ b/src/python/_framework/face/interfaces.py
@@ -0,0 +1,545 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Interfaces for the face layer of RPC Framework."""
+
+import abc
+
+# exceptions, abandonment, and future are referenced from specification in this
+# module.
+from _framework.face import exceptions # pylint: disable=unused-import
+from _framework.foundation import abandonment # pylint: disable=unused-import
+from _framework.foundation import future # pylint: disable=unused-import
+
+
+class CancellableIterator(object):
+ """Implements the Iterator protocol and affords a cancel method."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def __iter__(self):
+ """Returns the self object in accordance with the Iterator protocol."""
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def next(self):
+ """Returns a value or raises StopIteration per the Iterator protocol."""
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def cancel(self):
+ """Requests cancellation of whatever computation underlies this iterator."""
+ raise NotImplementedError()
+
+
+# Constants that categorize RPC abortion.
+# TODO(nathaniel): Learn and use Python's enum library for this de facto
+# enumerated type
+CANCELLED = 'abortion: cancelled'
+EXPIRED = 'abortion: expired'
+NETWORK_FAILURE = 'abortion: network failure'
+SERVICED_FAILURE = 'abortion: serviced failure'
+SERVICER_FAILURE = 'abortion: servicer failure'
+
+
+class RpcContext(object):
+ """Provides RPC-related information and action."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def is_active(self):
+ """Describes whether the RPC is active or has terminated."""
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def time_remaining(self):
+ """Describes the length of allowed time remaining for the RPC.
+
+ Returns:
+ A nonnegative float indicating the length of allowed time in seconds
+ remaining for the RPC to complete before it is considered to have timed
+ out.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_abortion_callback(self, abortion_callback):
+ """Registers a callback to be called if the RPC is aborted.
+
+ Args:
+ abortion_callback: A callable to be called and passed one of CANCELLED,
+ EXPIRED, NETWORK_FAILURE, SERVICED_FAILURE, or SERVICER_FAILURE in the
+ event of RPC abortion.
+ """
+ raise NotImplementedError()
+
+
+class InlineValueInValueOutMethod(object):
+ """A type for inline unary-request-unary-response RPC methods."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def service(self, request, context):
+ """Services an RPC that accepts one value and produces one value.
+
+ Args:
+ request: The single request value for the RPC.
+ context: An RpcContext object.
+
+ Returns:
+ The single response value for the RPC.
+
+ Raises:
+ abandonment.Abandoned: If no response is necessary because the RPC has
+ been aborted.
+ """
+ raise NotImplementedError()
+
+
+class InlineValueInStreamOutMethod(object):
+ """A type for inline unary-request-stream-response RPC methods."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def service(self, request, context):
+ """Services an RPC that accepts one value and produces a stream of values.
+
+ Args:
+ request: The single request value for the RPC.
+ context: An RpcContext object.
+
+ Yields:
+ The values that comprise the response stream of the RPC.
+
+ Raises:
+ abandonment.Abandoned: If completing the response stream is not necessary
+ because the RPC has been aborted.
+ """
+ raise NotImplementedError()
+
+
+class InlineStreamInValueOutMethod(object):
+ """A type for inline stream-request-unary-response RPC methods."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def service(self, request_iterator, context):
+ """Services an RPC that accepts a stream of values and produces one value.
+
+ Args:
+ request_iterator: An iterator that yields the request values of the RPC.
+ Drawing values from this iterator may also raise exceptions.RpcError to
+ indicate abortion of the RPC.
+ context: An RpcContext object.
+
+ Yields:
+ The values that comprise the response stream of the RPC.
+
+ Raises:
+ abandonment.Abandoned: If no response is necessary because the RPC has
+ been aborted.
+ exceptions.RpcError: Implementations of this method must not deliberately
+ raise exceptions.RpcError but may allow such errors raised by the
+ request_iterator passed to them to propagate through their bodies
+ uncaught.
+ """
+ raise NotImplementedError()
+
+
+class InlineStreamInStreamOutMethod(object):
+ """A type for inline stream-request-stream-response RPC methods."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def service(self, request_iterator, context):
+ """Services an RPC that accepts and produces streams of values.
+
+ Args:
+ request_iterator: An iterator that yields the request values of the RPC.
+ Drawing values from this iterator may also raise exceptions.RpcError to
+ indicate abortion of the RPC.
+ context: An RpcContext object.
+
+ Yields:
+ The values that comprise the response stream of the RPC.
+
+ Raises:
+ abandonment.Abandoned: If completing the response stream is not necessary
+ because the RPC has been aborted.
+ exceptions.RpcError: Implementations of this method must not deliberately
+ raise exceptions.RpcError but may allow such errors raised by the
+ request_iterator passed to them to propagate through their bodies
+ uncaught.
+ """
+ raise NotImplementedError()
+
+
+class EventValueInValueOutMethod(object):
+ """A type for event-driven unary-request-unary-response RPC methods."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def service(self, request, response_callback, context):
+ """Services an RPC that accepts one value and produces one value.
+
+ Args:
+ request: The single request value for the RPC.
+ response_callback: A callback to be called to accept the response value of
+ the RPC.
+ context: An RpcContext object.
+
+ Raises:
+ abandonment.Abandoned: May or may not be raised when the RPC has been
+ aborted.
+ """
+ raise NotImplementedError()
+
+
+class EventValueInStreamOutMethod(object):
+ """A type for event-driven unary-request-stream-response RPC methods."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def service(self, request, response_consumer, context):
+ """Services an RPC that accepts one value and produces a stream of values.
+
+ Args:
+ request: The single request value for the RPC.
+ response_consumer: A stream.Consumer to be called to accept the response
+ values of the RPC.
+ context: An RpcContext object.
+
+ Raises:
+ abandonment.Abandoned: May or may not be raised when the RPC has been
+ aborted.
+ """
+ raise NotImplementedError()
+
+
+class EventStreamInValueOutMethod(object):
+ """A type for event-driven stream-request-unary-response RPC methods."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def service(self, response_callback, context):
+ """Services an RPC that accepts a stream of values and produces one value.
+
+ Args:
+ response_callback: A callback to be called to accept the response value of
+ the RPC.
+ context: An RpcContext object.
+
+ Returns:
+ A stream.Consumer with which to accept the request values of the RPC. The
+ consumer returned from this method may or may not be invoked to
+ completion: in the case of RPC abortion, RPC Framework will simply stop
+ passing values to this object. Implementations must not assume that this
+ object will be called to completion of the request stream or even called
+ at all.
+
+ Raises:
+ abandonment.Abandoned: May or may not be raised when the RPC has been
+ aborted.
+ """
+ raise NotImplementedError()
+
+
+class EventStreamInStreamOutMethod(object):
+ """A type for event-driven stream-request-stream-response RPC methods."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def service(self, response_consumer, context):
+ """Services an RPC that accepts and produces streams of values.
+
+ Args:
+ response_consumer: A stream.Consumer to be called to accept the response
+ values of the RPC.
+ context: An RpcContext object.
+
+ Returns:
+ A stream.Consumer with which to accept the request values of the RPC. The
+ consumer returned from this method may or may not be invoked to
+ completion: in the case of RPC abortion, RPC Framework will simply stop
+ passing values to this object. Implementations must not assume that this
+ object will be called to completion of the request stream or even called
+ at all.
+
+ Raises:
+ abandonment.Abandoned: May or may not be raised when the RPC has been
+ aborted.
+ """
+ raise NotImplementedError()
+
+
+class MultiMethod(object):
+ """A general type able to service many RPC methods."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def service(self, name, response_consumer, context):
+ """Services an RPC.
+
+ Args:
+ name: The RPC method name.
+ response_consumer: A stream.Consumer to be called to accept the response
+ values of the RPC.
+ context: An RpcContext object.
+
+ Returns:
+ A stream.Consumer with which to accept the request values of the RPC. The
+ consumer returned from this method may or may not be invoked to
+ completion: in the case of RPC abortion, RPC Framework will simply stop
+ passing values to this object. Implementations must not assume that this
+ object will be called to completion of the request stream or even called
+ at all.
+
+ Raises:
+ abandonment.Abandoned: May or may not be raised when the RPC has been
+ aborted.
+ exceptions.NoSuchMethodError: If this MultiMethod does not recognize the
+ given RPC method name and is not able to service the RPC.
+ """
+ raise NotImplementedError()
+
+
+class Server(object):
+ """Specification of a running server that services RPCs."""
+ __metaclass__ = abc.ABCMeta
+
+
+class Call(object):
+ """Invocation-side representation of an RPC.
+
+ Attributes:
+ context: An RpcContext affording information about the RPC.
+ """
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def cancel(self):
+ """Requests cancellation of the RPC."""
+ raise NotImplementedError()
+
+
+class Stub(object):
+ """Affords RPC methods to callers."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def blocking_value_in_value_out(self, name, request, timeout):
+ """Invokes a unary-request-unary-response RPC method.
+
+ This method blocks until either returning the response value of the RPC
+ (in the event of RPC completion) or raising an exception (in the event of
+ RPC abortion).
+
+ Args:
+ name: The RPC method name.
+ request: The request value for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+
+ Returns:
+ The response value for the RPC.
+
+ Raises:
+ exceptions.RpcError: Indicating that the RPC was aborted.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def future_value_in_value_out(self, name, request, timeout):
+ """Invokes a unary-request-unary-response RPC method.
+
+ Args:
+ name: The RPC method name.
+ request: The request value for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+
+ Returns:
+ A future.Future representing the RPC. In the event of RPC completion, the
+ returned Future will return an outcome indicating that the RPC returned
+ the response value of the RPC. In the event of RPC abortion, the
+ returned Future will return an outcome indicating that the RPC raised
+ an exceptions.RpcError.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def inline_value_in_stream_out(self, name, request, timeout):
+ """Invokes a unary-request-stream-response RPC method.
+
+ Args:
+ name: The RPC method name.
+ request: The request value for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+
+ Returns:
+ A CancellableIterator that yields the response values of the RPC and
+ affords RPC cancellation. Drawing response values from the returned
+ CancellableIterator may raise exceptions.RpcError indicating abortion of
+ the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def blocking_stream_in_value_out(self, name, request_iterator, timeout):
+ """Invokes a stream-request-unary-response RPC method.
+
+ This method blocks until either returning the response value of the RPC
+ (in the event of RPC completion) or raising an exception (in the event of
+ RPC abortion).
+
+ Args:
+ name: The RPC method name.
+ request_iterator: An iterator that yields the request values of the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+
+ Returns:
+ The response value for the RPC.
+
+ Raises:
+ exceptions.RpcError: Indicating that the RPC was aborted.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def future_stream_in_value_out(self, name, request_iterator, timeout):
+ """Invokes a stream-request-unary-response RPC method.
+
+ Args:
+ name: The RPC method name.
+ request_iterator: An iterator that yields the request values of the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+
+ Returns:
+ A future.Future representing the RPC. In the event of RPC completion, the
+ returned Future will return an outcome indicating that the RPC returned
+ the response value of the RPC. In the event of RPC abortion, the
+ returned Future will return an outcome indicating that the RPC raised
+ an exceptions.RpcError.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def inline_stream_in_stream_out(self, name, request_iterator, timeout):
+ """Invokes a stream-request-stream-response RPC method.
+
+ Args:
+ name: The RPC method name.
+ request_iterator: An iterator that yields the request values of the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+
+ Returns:
+ A CancellableIterator that yields the response values of the RPC and
+ affords RPC cancellation. Drawing response values from the returned
+ CancellableIterator may raise exceptions.RpcError indicating abortion of
+ the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def event_value_in_value_out(
+ self, name, request, response_callback, abortion_callback, timeout):
+ """Event-driven invocation of a unary-request-unary-response RPC method.
+
+ Args:
+ name: The RPC method name.
+ request: The request value for the RPC.
+ response_callback: A callback to be called to accept the response value
+ of the RPC.
+ abortion_callback: A callback to be called to accept one of CANCELLED,
+ EXPIRED, NETWORK_FAILURE, or SERVICER_FAILURE in the event of RPC
+ abortion.
+ timeout: A duration of time in seconds to allow for the RPC.
+
+ Returns:
+ A Call object for the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def event_value_in_stream_out(
+ self, name, request, response_consumer, abortion_callback, timeout):
+ """Event-driven invocation of a unary-request-stream-response RPC method.
+
+ Args:
+ name: The RPC method name.
+ request: The request value for the RPC.
+ response_consumer: A stream.Consumer to be called to accept the response
+ values of the RPC.
+ abortion_callback: A callback to be called to accept one of CANCELLED,
+ EXPIRED, NETWORK_FAILURE, or SERVICER_FAILURE in the event of RPC
+ abortion.
+ timeout: A duration of time in seconds to allow for the RPC.
+
+ Returns:
+ A Call object for the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def event_stream_in_value_out(
+ self, name, response_callback, abortion_callback, timeout):
+ """Event-driven invocation of a unary-request-unary-response RPC method.
+
+ Args:
+ name: The RPC method name.
+ response_callback: A callback to be called to accept the response value
+ of the RPC.
+ abortion_callback: A callback to be called to accept one of CANCELLED,
+ EXPIRED, NETWORK_FAILURE, or SERVICER_FAILURE in the event of RPC
+ abortion.
+ timeout: A duration of time in seconds to allow for the RPC.
+
+ Returns:
+ A pair of a Call object for the RPC and a stream.Consumer to which the
+ request values of the RPC should be passed.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def event_stream_in_stream_out(
+ self, name, response_consumer, abortion_callback, timeout):
+ """Event-driven invocation of a unary-request-stream-response RPC method.
+
+ Args:
+ name: The RPC method name.
+ response_consumer: A stream.Consumer to be called to accept the response
+ values of the RPC.
+ abortion_callback: A callback to be called to accept one of CANCELLED,
+ EXPIRED, NETWORK_FAILURE, or SERVICER_FAILURE in the event of RPC
+ abortion.
+ timeout: A duration of time in seconds to allow for the RPC.
+
+ Returns:
+ A pair of a Call object for the RPC and a stream.Consumer to which the
+ request values of the RPC should be passed.
+ """
+ raise NotImplementedError()
diff --git a/src/python/_framework/face/testing/__init__.py b/src/python/_framework/face/testing/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/src/python/_framework/face/testing/__init__.py
diff --git a/src/python/_framework/face/testing/base_util.py b/src/python/_framework/face/testing/base_util.py
new file mode 100644
index 0000000000..d9ccb3af8f
--- /dev/null
+++ b/src/python/_framework/face/testing/base_util.py
@@ -0,0 +1,102 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Utilities for creating Base-layer objects for use in Face-layer tests."""
+
+import abc
+
+# interfaces is referenced from specification in this module.
+from _framework.base import util as _base_util
+from _framework.base.packets import implementations
+from _framework.base.packets import in_memory
+from _framework.base.packets import interfaces # pylint: disable=unused-import
+from _framework.foundation import logging_pool
+
+_POOL_SIZE_LIMIT = 20
+
+_MAXIMUM_TIMEOUT = 90
+
+
+class LinkedPair(object):
+ """A Front and Back that are linked to one another.
+
+ Attributes:
+ front: An interfaces.Front.
+ back: An interfaces.Back.
+ """
+
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def shut_down(self):
+ """Shuts down this object and releases its resources."""
+ raise NotImplementedError()
+
+
+class _LinkedPair(LinkedPair):
+
+ def __init__(self, front, back, pools):
+ self.front = front
+ self.back = back
+ self._pools = pools
+
+ def shut_down(self):
+ _base_util.wait_for_idle(self.front)
+ _base_util.wait_for_idle(self.back)
+
+ for pool in self._pools:
+ pool.shutdown(wait=True)
+
+
+def linked_pair(servicer, default_timeout):
+ """Creates a Server and Stub linked together for use."""
+ link_pool = logging_pool.pool(_POOL_SIZE_LIMIT)
+ front_work_pool = logging_pool.pool(_POOL_SIZE_LIMIT)
+ front_transmission_pool = logging_pool.pool(_POOL_SIZE_LIMIT)
+ front_utility_pool = logging_pool.pool(_POOL_SIZE_LIMIT)
+ back_work_pool = logging_pool.pool(_POOL_SIZE_LIMIT)
+ back_transmission_pool = logging_pool.pool(_POOL_SIZE_LIMIT)
+ back_utility_pool = logging_pool.pool(_POOL_SIZE_LIMIT)
+ pools = (
+ link_pool,
+ front_work_pool, front_transmission_pool, front_utility_pool,
+ back_work_pool, back_transmission_pool, back_utility_pool)
+
+ link = in_memory.Link(link_pool)
+ front = implementations.front(
+ front_work_pool, front_transmission_pool, front_utility_pool)
+ back = implementations.back(
+ servicer, back_work_pool, back_transmission_pool, back_utility_pool,
+ default_timeout, _MAXIMUM_TIMEOUT)
+ front.join_rear_link(link)
+ link.join_fore_link(front)
+ back.join_fore_link(link)
+ link.join_rear_link(back)
+
+ return _LinkedPair(front, back, pools)
diff --git a/src/python/_framework/face/testing/blocking_invocation_inline_service_test_case.py b/src/python/_framework/face/testing/blocking_invocation_inline_service_test_case.py
new file mode 100644
index 0000000000..0b1a2f0bd2
--- /dev/null
+++ b/src/python/_framework/face/testing/blocking_invocation_inline_service_test_case.py
@@ -0,0 +1,223 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A test to verify an implementation of the Face layer of RPC Framework."""
+
+# unittest is referenced from specification in this module.
+import abc
+import unittest # pylint: disable=unused-import
+
+from _framework.face import exceptions
+from _framework.face.testing import control
+from _framework.face.testing import coverage
+from _framework.face.testing import digest
+from _framework.face.testing import stock_service
+from _framework.face.testing import test_case
+
+_TIMEOUT = 3
+
+
+class BlockingInvocationInlineServiceTestCase(
+ test_case.FaceTestCase, coverage.BlockingCoverage):
+ """A test of the Face layer of RPC Framework.
+
+ Concrete subclasses must also extend unittest.TestCase.
+ """
+ __metaclass__ = abc.ABCMeta
+
+ def setUp(self):
+ """See unittest.TestCase.setUp for full specification.
+
+ Overriding implementations must call this implementation.
+ """
+ self.control = control.PauseFailControl()
+ self.digest = digest.digest(
+ stock_service.STOCK_TEST_SERVICE, self.control, None)
+
+ self.server, self.stub, self.memo = self.set_up_implementation(
+ self.digest.name, self.digest.methods,
+ self.digest.inline_unary_unary_methods,
+ self.digest.inline_unary_stream_methods,
+ self.digest.inline_stream_unary_methods,
+ self.digest.inline_stream_stream_methods,
+ {}, {}, {}, {}, None)
+
+ def tearDown(self):
+ """See unittest.TestCase.tearDown for full specification.
+
+ Overriding implementations must call this implementation.
+ """
+ self.tear_down_implementation(self.memo)
+
+ def testSuccessfulUnaryRequestUnaryResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.unary_unary_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+
+ response = self.stub.blocking_value_in_value_out(
+ name, request, _TIMEOUT)
+
+ test_messages.verify(request, response, self)
+
+ def testSuccessfulUnaryRequestStreamResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.unary_stream_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+
+ response_iterator = self.stub.inline_value_in_stream_out(
+ name, request, _TIMEOUT)
+ responses = list(response_iterator)
+
+ test_messages.verify(request, responses, self)
+
+ def testSuccessfulStreamRequestUnaryResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.stream_unary_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+
+ response = self.stub.blocking_stream_in_value_out(
+ name, iter(requests), _TIMEOUT)
+
+ test_messages.verify(requests, response, self)
+
+ def testSuccessfulStreamRequestStreamResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.stream_stream_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+
+ response_iterator = self.stub.inline_stream_in_stream_out(
+ name, iter(requests), _TIMEOUT)
+ responses = list(response_iterator)
+
+ test_messages.verify(requests, responses, self)
+
+ def testSequentialInvocations(self):
+ for name, test_messages_sequence in (
+ self.digest.unary_unary_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ first_request = test_messages.request()
+ second_request = test_messages.request()
+
+ first_response = self.stub.blocking_value_in_value_out(
+ name, first_request, _TIMEOUT)
+
+ test_messages.verify(first_request, first_response, self)
+
+ second_response = self.stub.blocking_value_in_value_out(
+ name, second_request, _TIMEOUT)
+
+ test_messages.verify(second_request, second_response, self)
+
+ def testExpiredUnaryRequestUnaryResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.unary_unary_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+
+ with self.control.pause(), self.assertRaises(
+ exceptions.ExpirationError):
+ self.stub.blocking_value_in_value_out(name, request, _TIMEOUT)
+
+ def testExpiredUnaryRequestStreamResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.unary_stream_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+
+ with self.control.pause(), self.assertRaises(
+ exceptions.ExpirationError):
+ response_iterator = self.stub.inline_value_in_stream_out(
+ name, request, _TIMEOUT)
+ list(response_iterator)
+
+ def testExpiredStreamRequestUnaryResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.stream_unary_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+
+ with self.control.pause(), self.assertRaises(
+ exceptions.ExpirationError):
+ self.stub.blocking_stream_in_value_out(name, iter(requests), _TIMEOUT)
+
+ def testExpiredStreamRequestStreamResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.stream_stream_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+
+ with self.control.pause(), self.assertRaises(
+ exceptions.ExpirationError):
+ response_iterator = self.stub.inline_stream_in_stream_out(
+ name, iter(requests), _TIMEOUT)
+ list(response_iterator)
+
+ def testFailedUnaryRequestUnaryResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.unary_unary_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+
+ with self.control.fail(), self.assertRaises(exceptions.ServicerError):
+ self.stub.blocking_value_in_value_out(name, request, _TIMEOUT)
+
+ def testFailedUnaryRequestStreamResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.unary_stream_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+
+ with self.control.fail(), self.assertRaises(exceptions.ServicerError):
+ response_iterator = self.stub.inline_value_in_stream_out(
+ name, request, _TIMEOUT)
+ list(response_iterator)
+
+ def testFailedStreamRequestUnaryResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.stream_unary_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+
+ with self.control.fail(), self.assertRaises(exceptions.ServicerError):
+ self.stub.blocking_stream_in_value_out(name, iter(requests), _TIMEOUT)
+
+ def testFailedStreamRequestStreamResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.stream_stream_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+
+ with self.control.fail(), self.assertRaises(exceptions.ServicerError):
+ response_iterator = self.stub.inline_stream_in_stream_out(
+ name, iter(requests), _TIMEOUT)
+ list(response_iterator)
diff --git a/src/python/_framework/face/testing/callback.py b/src/python/_framework/face/testing/callback.py
new file mode 100644
index 0000000000..7a20869abe
--- /dev/null
+++ b/src/python/_framework/face/testing/callback.py
@@ -0,0 +1,94 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A utility useful in tests of asynchronous, event-driven interfaces."""
+
+import threading
+
+from _framework.foundation import stream
+
+
+class Callback(stream.Consumer):
+ """A utility object useful in tests of asynchronous code."""
+
+ def __init__(self):
+ self._condition = threading.Condition()
+ self._unary_response = None
+ self._streamed_responses = []
+ self._completed = False
+ self._abortion = None
+
+ def abort(self, abortion):
+ with self._condition:
+ self._abortion = abortion
+ self._condition.notify_all()
+
+ def complete(self, unary_response):
+ with self._condition:
+ self._unary_response = unary_response
+ self._completed = True
+ self._condition.notify_all()
+
+ def consume(self, streamed_response):
+ with self._condition:
+ self._streamed_responses.append(streamed_response)
+
+ def terminate(self):
+ with self._condition:
+ self._completed = True
+ self._condition.notify_all()
+
+ def consume_and_terminate(self, streamed_response):
+ with self._condition:
+ self._streamed_responses.append(streamed_response)
+ self._completed = True
+ self._condition.notify_all()
+
+ def block_until_terminated(self):
+ with self._condition:
+ while self._abortion is None and not self._completed:
+ self._condition.wait()
+
+ def response(self):
+ with self._condition:
+ if self._abortion is None:
+ return self._unary_response
+ else:
+ raise AssertionError('Aborted with abortion "%s"!' % self._abortion)
+
+ def responses(self):
+ with self._condition:
+ if self._abortion is None:
+ return list(self._streamed_responses)
+ else:
+ raise AssertionError('Aborted with abortion "%s"!' % self._abortion)
+
+ def abortion(self):
+ with self._condition:
+ return self._abortion
diff --git a/src/python/_framework/face/testing/control.py b/src/python/_framework/face/testing/control.py
new file mode 100644
index 0000000000..3960c4e649
--- /dev/null
+++ b/src/python/_framework/face/testing/control.py
@@ -0,0 +1,87 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Code for instructing systems under test to block or fail."""
+
+import abc
+import contextlib
+import threading
+
+
+class Control(object):
+ """An object that accepts program control from a system under test.
+
+ Systems under test passed a Control should call its control() method
+ frequently during execution. The control() method may block, raise an
+ exception, or do nothing, all according to the enclosing test's desire for
+ the system under test to simulate hanging, failing, or functioning.
+ """
+
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def control(self):
+ """Potentially does anything."""
+ raise NotImplementedError()
+
+
+class PauseFailControl(Control):
+ """A Control that can be used to pause or fail code under control."""
+
+ def __init__(self):
+ self._condition = threading.Condition()
+ self._paused = False
+ self._fail = False
+
+ def control(self):
+ with self._condition:
+ if self._fail:
+ raise ValueError()
+
+ while self._paused:
+ self._condition.wait()
+
+ @contextlib.contextmanager
+ def pause(self):
+ """Pauses code under control while controlling code is in context."""
+ with self._condition:
+ self._paused = True
+ yield
+ with self._condition:
+ self._paused = False
+ self._condition.notify_all()
+
+ @contextlib.contextmanager
+ def fail(self):
+ """Fails code under control while controlling code is in context."""
+ with self._condition:
+ self._fail = True
+ yield
+ with self._condition:
+ self._fail = False
diff --git a/src/python/_framework/face/testing/coverage.py b/src/python/_framework/face/testing/coverage.py
new file mode 100644
index 0000000000..f3aca113fe
--- /dev/null
+++ b/src/python/_framework/face/testing/coverage.py
@@ -0,0 +1,123 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Governs coverage for the tests of the Face layer of RPC Framework."""
+
+import abc
+
+# These classes are only valid when inherited by unittest.TestCases.
+# pylint: disable=invalid-name
+
+
+class BlockingCoverage(object):
+ """Specification of test coverage for blocking behaviors."""
+
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def testSuccessfulUnaryRequestUnaryResponse(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def testSuccessfulUnaryRequestStreamResponse(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def testSuccessfulStreamRequestUnaryResponse(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def testSuccessfulStreamRequestStreamResponse(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def testSequentialInvocations(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def testExpiredUnaryRequestUnaryResponse(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def testExpiredUnaryRequestStreamResponse(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def testExpiredStreamRequestUnaryResponse(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def testExpiredStreamRequestStreamResponse(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def testFailedUnaryRequestUnaryResponse(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def testFailedUnaryRequestStreamResponse(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def testFailedStreamRequestUnaryResponse(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def testFailedStreamRequestStreamResponse(self):
+ raise NotImplementedError()
+
+
+class FullCoverage(BlockingCoverage):
+ """Specification of test coverage for non-blocking behaviors."""
+
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def testParallelInvocations(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def testWaitingForSomeButNotAllParallelInvocations(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def testCancelledUnaryRequestUnaryResponse(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def testCancelledUnaryRequestStreamResponse(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def testCancelledStreamRequestUnaryResponse(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def testCancelledStreamRequestStreamResponse(self):
+ raise NotImplementedError()
diff --git a/src/python/_framework/face/testing/digest.py b/src/python/_framework/face/testing/digest.py
new file mode 100644
index 0000000000..8d1291c975
--- /dev/null
+++ b/src/python/_framework/face/testing/digest.py
@@ -0,0 +1,446 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Code for making a service.TestService more amenable to use in tests."""
+
+import collections
+import threading
+
+# testing_control, interfaces, and testing_service are referenced from
+# specification in this module.
+from _framework.face import exceptions
+from _framework.face import interfaces as face_interfaces
+from _framework.face.testing import control as testing_control # pylint: disable=unused-import
+from _framework.face.testing import interfaces # pylint: disable=unused-import
+from _framework.face.testing import service as testing_service # pylint: disable=unused-import
+from _framework.foundation import stream
+from _framework.foundation import stream_util
+
+_IDENTITY = lambda x: x
+
+
+class TestServiceDigest(
+ collections.namedtuple(
+ 'TestServiceDigest',
+ ['name',
+ 'methods',
+ 'inline_unary_unary_methods',
+ 'inline_unary_stream_methods',
+ 'inline_stream_unary_methods',
+ 'inline_stream_stream_methods',
+ 'event_unary_unary_methods',
+ 'event_unary_stream_methods',
+ 'event_stream_unary_methods',
+ 'event_stream_stream_methods',
+ 'multi_method',
+ 'unary_unary_messages_sequences',
+ 'unary_stream_messages_sequences',
+ 'stream_unary_messages_sequences',
+ 'stream_stream_messages_sequences'])):
+ """A transformation of a service.TestService.
+
+ Attributes:
+ name: The RPC service name to be used in the test.
+ methods: A sequence of interfaces.Method objects describing the RPC
+ methods that will be called during the test.
+ inline_unary_unary_methods: A dict from method name to
+ face_interfaces.InlineValueInValueOutMethod object to be used in tests of
+ in-line calls to behaviors under test.
+ inline_unary_stream_methods: A dict from method name to
+ face_interfaces.InlineValueInStreamOutMethod object to be used in tests of
+ in-line calls to behaviors under test.
+ inline_stream_unary_methods: A dict from method name to
+ face_interfaces.InlineStreamInValueOutMethod object to be used in tests of
+ in-line calls to behaviors under test.
+ inline_stream_stream_methods: A dict from method name to
+ face_interfaces.InlineStreamInStreamOutMethod object to be used in tests
+ of in-line calls to behaviors under test.
+ event_unary_unary_methods: A dict from method name to
+ face_interfaces.EventValueInValueOutMethod object to be used in tests of
+ event-driven calls to behaviors under test.
+ event_unary_stream_methods: A dict from method name to
+ face_interfaces.EventValueInStreamOutMethod object to be used in tests of
+ event-driven calls to behaviors under test.
+ event_stream_unary_methods: A dict from method name to
+ face_interfaces.EventStreamInValueOutMethod object to be used in tests of
+ event-driven calls to behaviors under test.
+ event_stream_stream_methods: A dict from method name to
+ face_interfaces.EventStreamInStreamOutMethod object to be used in tests of
+ event-driven calls to behaviors under test.
+ multi_method: A face_interfaces.MultiMethod to be used in tests of generic
+ calls to behaviors under test.
+ unary_unary_messages_sequences: A dict from method name to sequence of
+ service.UnaryUnaryTestMessages objects to be used to test the method
+ with the given name.
+ unary_stream_messages_sequences: A dict from method name to sequence of
+ service.UnaryStreamTestMessages objects to be used to test the method
+ with the given name.
+ stream_unary_messages_sequences: A dict from method name to sequence of
+ service.StreamUnaryTestMessages objects to be used to test the method
+ with the given name.
+ stream_stream_messages_sequences: A dict from method name to sequence of
+ service.StreamStreamTestMessages objects to be used to test the
+ method with the given name.
+ serialization: A serial.Serialization object describing serialization
+ behaviors for all the RPC methods.
+ """
+
+
+class _BufferingConsumer(stream.Consumer):
+ """A trivial Consumer that dumps what it consumes in a user-mutable buffer."""
+
+ def __init__(self):
+ self.consumed = []
+ self.terminated = False
+
+ def consume(self, value):
+ self.consumed.append(value)
+
+ def terminate(self):
+ self.terminated = True
+
+ def consume_and_terminate(self, value):
+ self.consumed.append(value)
+ self.terminated = True
+
+
+class _InlineUnaryUnaryMethod(face_interfaces.InlineValueInValueOutMethod):
+
+ def __init__(self, unary_unary_test_method, control):
+ self._test_method = unary_unary_test_method
+ self._control = control
+
+ def service(self, request, context):
+ response_list = []
+ self._test_method.service(
+ request, response_list.append, context, self._control)
+ return response_list.pop(0)
+
+
+class _EventUnaryUnaryMethod(face_interfaces.EventValueInValueOutMethod):
+
+ def __init__(self, unary_unary_test_method, control, pool):
+ self._test_method = unary_unary_test_method
+ self._control = control
+ self._pool = pool
+
+ def service(self, request, response_callback, context):
+ if self._pool is None:
+ self._test_method.service(
+ request, response_callback, context, self._control)
+ else:
+ self._pool.submit(
+ self._test_method.service, request, response_callback, context,
+ self._control)
+
+
+class _InlineUnaryStreamMethod(face_interfaces.InlineValueInStreamOutMethod):
+
+ def __init__(self, unary_stream_test_method, control):
+ self._test_method = unary_stream_test_method
+ self._control = control
+
+ def service(self, request, context):
+ response_consumer = _BufferingConsumer()
+ self._test_method.service(
+ request, response_consumer, context, self._control)
+ for response in response_consumer.consumed:
+ yield response
+
+
+class _EventUnaryStreamMethod(face_interfaces.EventValueInStreamOutMethod):
+
+ def __init__(self, unary_stream_test_method, control, pool):
+ self._test_method = unary_stream_test_method
+ self._control = control
+ self._pool = pool
+
+ def service(self, request, response_consumer, context):
+ if self._pool is None:
+ self._test_method.service(
+ request, response_consumer, context, self._control)
+ else:
+ self._pool.submit(
+ self._test_method.service, request, response_consumer, context,
+ self._control)
+
+
+class _InlineStreamUnaryMethod(face_interfaces.InlineStreamInValueOutMethod):
+
+ def __init__(self, stream_unary_test_method, control):
+ self._test_method = stream_unary_test_method
+ self._control = control
+
+ def service(self, request_iterator, context):
+ response_list = []
+ request_consumer = self._test_method.service(
+ response_list.append, context, self._control)
+ for request in request_iterator:
+ request_consumer.consume(request)
+ request_consumer.terminate()
+ return response_list.pop(0)
+
+
+class _EventStreamUnaryMethod(face_interfaces.EventStreamInValueOutMethod):
+
+ def __init__(self, stream_unary_test_method, control, pool):
+ self._test_method = stream_unary_test_method
+ self._control = control
+ self._pool = pool
+
+ def service(self, response_callback, context):
+ request_consumer = self._test_method.service(
+ response_callback, context, self._control)
+ if self._pool is None:
+ return request_consumer
+ else:
+ return stream_util.ThreadSwitchingConsumer(request_consumer, self._pool)
+
+
+class _InlineStreamStreamMethod(face_interfaces.InlineStreamInStreamOutMethod):
+
+ def __init__(self, stream_stream_test_method, control):
+ self._test_method = stream_stream_test_method
+ self._control = control
+
+ def service(self, request_iterator, context):
+ response_consumer = _BufferingConsumer()
+ request_consumer = self._test_method.service(
+ response_consumer, context, self._control)
+
+ for request in request_iterator:
+ request_consumer.consume(request)
+ while response_consumer.consumed:
+ yield response_consumer.consumed.pop(0)
+ response_consumer.terminate()
+
+
+class _EventStreamStreamMethod(face_interfaces.EventStreamInStreamOutMethod):
+
+ def __init__(self, stream_stream_test_method, control, pool):
+ self._test_method = stream_stream_test_method
+ self._control = control
+ self._pool = pool
+
+ def service(self, response_consumer, context):
+ request_consumer = self._test_method.service(
+ response_consumer, context, self._control)
+ if self._pool is None:
+ return request_consumer
+ else:
+ return stream_util.ThreadSwitchingConsumer(request_consumer, self._pool)
+
+
+class _UnaryConsumer(stream.Consumer):
+ """A Consumer that only allows consumption of exactly one value."""
+
+ def __init__(self, action):
+ self._lock = threading.Lock()
+ self._action = action
+ self._consumed = False
+ self._terminated = False
+
+ def consume(self, value):
+ with self._lock:
+ if self._consumed:
+ raise ValueError('Unary consumer already consumed!')
+ elif self._terminated:
+ raise ValueError('Unary consumer already terminated!')
+ else:
+ self._consumed = True
+
+ self._action(value)
+
+ def terminate(self):
+ with self._lock:
+ if not self._consumed:
+ raise ValueError('Unary consumer hasn\'t yet consumed!')
+ elif self._terminated:
+ raise ValueError('Unary consumer already terminated!')
+ else:
+ self._terminated = True
+
+ def consume_and_terminate(self, value):
+ with self._lock:
+ if self._consumed:
+ raise ValueError('Unary consumer already consumed!')
+ elif self._terminated:
+ raise ValueError('Unary consumer already terminated!')
+ else:
+ self._consumed = True
+ self._terminated = True
+
+ self._action(value)
+
+
+class _UnaryUnaryAdaptation(object):
+
+ def __init__(self, unary_unary_test_method):
+ self._method = unary_unary_test_method
+
+ def service(self, response_consumer, context, control):
+ def action(request):
+ self._method.service(
+ request, response_consumer.consume_and_terminate, context, control)
+ return _UnaryConsumer(action)
+
+
+class _UnaryStreamAdaptation(object):
+
+ def __init__(self, unary_stream_test_method):
+ self._method = unary_stream_test_method
+
+ def service(self, response_consumer, context, control):
+ def action(request):
+ self._method.service(request, response_consumer, context, control)
+ return _UnaryConsumer(action)
+
+
+class _StreamUnaryAdaptation(object):
+
+ def __init__(self, stream_unary_test_method):
+ self._method = stream_unary_test_method
+
+ def service(self, response_consumer, context, control):
+ return self._method.service(
+ response_consumer.consume_and_terminate, context, control)
+
+
+class _MultiMethod(face_interfaces.MultiMethod):
+
+ def __init__(self, methods, control, pool):
+ self._methods = methods
+ self._control = control
+ self._pool = pool
+
+ def service(self, name, response_consumer, context):
+ method = self._methods.get(name, None)
+ if method is None:
+ raise exceptions.NoSuchMethodError(name)
+ elif self._pool is None:
+ return method(response_consumer, context, self._control)
+ else:
+ request_consumer = method(response_consumer, context, self._control)
+ return stream_util.ThreadSwitchingConsumer(request_consumer, self._pool)
+
+
+class _Assembly(
+ collections.namedtuple(
+ '_Assembly',
+ ['methods', 'inlines', 'events', 'adaptations', 'messages'])):
+ """An intermediate structure created when creating a TestServiceDigest."""
+
+
+def _assemble(
+ scenarios, names, inline_method_constructor, event_method_constructor,
+ adapter, control, pool):
+ """Creates an _Assembly from the given scenarios."""
+ methods = []
+ inlines = {}
+ events = {}
+ adaptations = {}
+ messages = {}
+ for name, scenario in scenarios.iteritems():
+ if name in names:
+ raise ValueError('Repeated name "%s"!' % name)
+
+ test_method = scenario[0]
+ inline_method = inline_method_constructor(test_method, control)
+ event_method = event_method_constructor(test_method, control, pool)
+ adaptation = adapter(test_method)
+
+ methods.append(test_method)
+ inlines[name] = inline_method
+ events[name] = event_method
+ adaptations[name] = adaptation
+ messages[name] = scenario[1]
+
+ return _Assembly(methods, inlines, events, adaptations, messages)
+
+
+def digest(service, control, pool):
+ """Creates a TestServiceDigest from a TestService.
+
+ Args:
+ service: A testing_service.TestService.
+ control: A testing_control.Control.
+ pool: If RPC methods should be serviced in a separate thread, a thread pool.
+ None if RPC methods should be serviced in the thread belonging to the
+ run-time that calls for their service.
+
+ Returns:
+ A TestServiceDigest synthesized from the given service.TestService.
+ """
+ names = set()
+
+ unary_unary = _assemble(
+ service.unary_unary_scenarios(), names, _InlineUnaryUnaryMethod,
+ _EventUnaryUnaryMethod, _UnaryUnaryAdaptation, control, pool)
+ names.update(set(unary_unary.inlines))
+
+ unary_stream = _assemble(
+ service.unary_stream_scenarios(), names, _InlineUnaryStreamMethod,
+ _EventUnaryStreamMethod, _UnaryStreamAdaptation, control, pool)
+ names.update(set(unary_stream.inlines))
+
+ stream_unary = _assemble(
+ service.stream_unary_scenarios(), names, _InlineStreamUnaryMethod,
+ _EventStreamUnaryMethod, _StreamUnaryAdaptation, control, pool)
+ names.update(set(stream_unary.inlines))
+
+ stream_stream = _assemble(
+ service.stream_stream_scenarios(), names, _InlineStreamStreamMethod,
+ _EventStreamStreamMethod, _IDENTITY, control, pool)
+ names.update(set(stream_stream.inlines))
+
+ methods = list(unary_unary.methods)
+ methods.extend(unary_stream.methods)
+ methods.extend(stream_unary.methods)
+ methods.extend(stream_stream.methods)
+ adaptations = dict(unary_unary.adaptations)
+ adaptations.update(unary_stream.adaptations)
+ adaptations.update(stream_unary.adaptations)
+ adaptations.update(stream_stream.adaptations)
+
+ return TestServiceDigest(
+ service.name(),
+ methods,
+ unary_unary.inlines,
+ unary_stream.inlines,
+ stream_unary.inlines,
+ stream_stream.inlines,
+ unary_unary.events,
+ unary_stream.events,
+ stream_unary.events,
+ stream_stream.events,
+ _MultiMethod(adaptations, control, pool),
+ unary_unary.messages,
+ unary_stream.messages,
+ stream_unary.messages,
+ stream_stream.messages)
diff --git a/src/python/_framework/face/testing/event_invocation_synchronous_event_service_test_case.py b/src/python/_framework/face/testing/event_invocation_synchronous_event_service_test_case.py
new file mode 100644
index 0000000000..dba73a9368
--- /dev/null
+++ b/src/python/_framework/face/testing/event_invocation_synchronous_event_service_test_case.py
@@ -0,0 +1,367 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A test to verify an implementation of the Face layer of RPC Framework."""
+
+import abc
+import unittest
+
+from _framework.face import interfaces
+from _framework.face.testing import callback as testing_callback
+from _framework.face.testing import control
+from _framework.face.testing import coverage
+from _framework.face.testing import digest
+from _framework.face.testing import stock_service
+from _framework.face.testing import test_case
+
+_TIMEOUT = 3
+
+
+class EventInvocationSynchronousEventServiceTestCase(
+ test_case.FaceTestCase, coverage.FullCoverage):
+ """A test of the Face layer of RPC Framework.
+
+ Concrete subclasses must also extend unittest.TestCase.
+ """
+ __metaclass__ = abc.ABCMeta
+
+ def setUp(self):
+ """See unittest.TestCase.setUp for full specification.
+
+ Overriding implementations must call this implementation.
+ """
+ self.control = control.PauseFailControl()
+ self.digest = digest.digest(
+ stock_service.STOCK_TEST_SERVICE, self.control, None)
+
+ self.server, self.stub, self.memo = self.set_up_implementation(
+ self.digest.name, self.digest.methods,
+ {}, {}, {}, {},
+ self.digest.event_unary_unary_methods,
+ self.digest.event_unary_stream_methods,
+ self.digest.event_stream_unary_methods,
+ self.digest.event_stream_stream_methods,
+ None)
+
+ def tearDown(self):
+ """See unittest.TestCase.tearDown for full specification.
+
+ Overriding implementations must call this implementation.
+ """
+ self.tear_down_implementation(self.memo)
+
+ def testSuccessfulUnaryRequestUnaryResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.unary_unary_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+ callback = testing_callback.Callback()
+
+ self.stub.event_value_in_value_out(
+ name, request, callback.complete, callback.abort, _TIMEOUT)
+ callback.block_until_terminated()
+ response = callback.response()
+
+ test_messages.verify(request, response, self)
+
+ def testSuccessfulUnaryRequestStreamResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.unary_stream_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+ callback = testing_callback.Callback()
+
+ self.stub.event_value_in_stream_out(
+ name, request, callback, callback.abort, _TIMEOUT)
+ callback.block_until_terminated()
+ responses = callback.responses()
+
+ test_messages.verify(request, responses, self)
+
+ def testSuccessfulStreamRequestUnaryResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.stream_unary_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+ callback = testing_callback.Callback()
+
+ unused_call, request_consumer = self.stub.event_stream_in_value_out(
+ name, callback.complete, callback.abort, _TIMEOUT)
+ for request in requests:
+ request_consumer.consume(request)
+ request_consumer.terminate()
+ callback.block_until_terminated()
+ response = callback.response()
+
+ test_messages.verify(requests, response, self)
+
+ def testSuccessfulStreamRequestStreamResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.stream_stream_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+ callback = testing_callback.Callback()
+
+ unused_call, request_consumer = self.stub.event_stream_in_stream_out(
+ name, callback, callback.abort, _TIMEOUT)
+ for request in requests:
+ request_consumer.consume(request)
+ request_consumer.terminate()
+ callback.block_until_terminated()
+ responses = callback.responses()
+
+ test_messages.verify(requests, responses, self)
+
+ def testSequentialInvocations(self):
+ # pylint: disable=cell-var-from-loop
+ for name, test_messages_sequence in (
+ self.digest.unary_unary_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ first_request = test_messages.request()
+ second_request = test_messages.request()
+ first_callback = testing_callback.Callback()
+ second_callback = testing_callback.Callback()
+
+ def make_second_invocation(first_response):
+ first_callback.complete(first_response)
+ self.stub.event_value_in_value_out(
+ name, second_request, second_callback.complete,
+ second_callback.abort, _TIMEOUT)
+
+ self.stub.event_value_in_value_out(
+ name, first_request, make_second_invocation, first_callback.abort,
+ _TIMEOUT)
+ second_callback.block_until_terminated()
+
+ first_response = first_callback.response()
+ second_response = second_callback.response()
+ test_messages.verify(first_request, first_response, self)
+ test_messages.verify(second_request, second_response, self)
+
+ def testExpiredUnaryRequestUnaryResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.unary_unary_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+ callback = testing_callback.Callback()
+
+ with self.control.pause():
+ self.stub.event_value_in_value_out(
+ name, request, callback.complete, callback.abort, _TIMEOUT)
+ callback.block_until_terminated()
+
+ self.assertEqual(interfaces.EXPIRED, callback.abortion())
+
+ def testExpiredUnaryRequestStreamResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.unary_stream_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+ callback = testing_callback.Callback()
+
+ with self.control.pause():
+ self.stub.event_value_in_stream_out(
+ name, request, callback, callback.abort, _TIMEOUT)
+ callback.block_until_terminated()
+
+ self.assertEqual(interfaces.EXPIRED, callback.abortion())
+
+ def testExpiredStreamRequestUnaryResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.stream_unary_messages_sequences.iteritems()):
+ for unused_test_messages in test_messages_sequence:
+ callback = testing_callback.Callback()
+
+ self.stub.event_stream_in_value_out(
+ name, callback.complete, callback.abort, _TIMEOUT)
+ callback.block_until_terminated()
+
+ self.assertEqual(interfaces.EXPIRED, callback.abortion())
+
+ def testExpiredStreamRequestStreamResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.stream_stream_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+ callback = testing_callback.Callback()
+
+ unused_call, request_consumer = self.stub.event_stream_in_stream_out(
+ name, callback, callback.abort, _TIMEOUT)
+ for request in requests:
+ request_consumer.consume(request)
+ callback.block_until_terminated()
+
+ self.assertEqual(interfaces.EXPIRED, callback.abortion())
+
+ def testFailedUnaryRequestUnaryResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.unary_unary_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+ callback = testing_callback.Callback()
+
+ with self.control.fail():
+ self.stub.event_value_in_value_out(
+ name, request, callback.complete, callback.abort, _TIMEOUT)
+ callback.block_until_terminated()
+
+ self.assertEqual(interfaces.SERVICER_FAILURE, callback.abortion())
+
+ def testFailedUnaryRequestStreamResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.unary_stream_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+ callback = testing_callback.Callback()
+
+ with self.control.fail():
+ self.stub.event_value_in_stream_out(
+ name, request, callback, callback.abort, _TIMEOUT)
+ callback.block_until_terminated()
+
+ self.assertEqual(interfaces.SERVICER_FAILURE, callback.abortion())
+
+ def testFailedStreamRequestUnaryResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.stream_unary_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+ callback = testing_callback.Callback()
+
+ with self.control.fail():
+ unused_call, request_consumer = self.stub.event_stream_in_value_out(
+ name, callback.complete, callback.abort, _TIMEOUT)
+ for request in requests:
+ request_consumer.consume(request)
+ request_consumer.terminate()
+ callback.block_until_terminated()
+
+ self.assertEqual(interfaces.SERVICER_FAILURE, callback.abortion())
+
+ def testFailedStreamRequestStreamResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.stream_stream_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+ callback = testing_callback.Callback()
+
+ with self.control.fail():
+ unused_call, request_consumer = self.stub.event_stream_in_stream_out(
+ name, callback, callback.abort, _TIMEOUT)
+ for request in requests:
+ request_consumer.consume(request)
+ request_consumer.terminate()
+ callback.block_until_terminated()
+
+ self.assertEqual(interfaces.SERVICER_FAILURE, callback.abortion())
+
+ def testParallelInvocations(self):
+ for name, test_messages_sequence in (
+ self.digest.unary_unary_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ first_request = test_messages.request()
+ first_callback = testing_callback.Callback()
+ second_request = test_messages.request()
+ second_callback = testing_callback.Callback()
+
+ self.stub.event_value_in_value_out(
+ name, first_request, first_callback.complete, first_callback.abort,
+ _TIMEOUT)
+ self.stub.event_value_in_value_out(
+ name, second_request, second_callback.complete,
+ second_callback.abort, _TIMEOUT)
+ first_callback.block_until_terminated()
+ second_callback.block_until_terminated()
+
+ first_response = first_callback.response()
+ second_response = second_callback.response()
+ test_messages.verify(first_request, first_response, self)
+ test_messages.verify(second_request, second_response, self)
+
+ @unittest.skip('TODO(nathaniel): implement.')
+ def testWaitingForSomeButNotAllParallelInvocations(self):
+ raise NotImplementedError()
+
+ def testCancelledUnaryRequestUnaryResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.unary_unary_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+ callback = testing_callback.Callback()
+
+ with self.control.pause():
+ call = self.stub.event_value_in_value_out(
+ name, request, callback.complete, callback.abort, _TIMEOUT)
+ call.cancel()
+ callback.block_until_terminated()
+
+ self.assertEqual(interfaces.CANCELLED, callback.abortion())
+
+ def testCancelledUnaryRequestStreamResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.unary_stream_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+ callback = testing_callback.Callback()
+
+ call = self.stub.event_value_in_stream_out(
+ name, request, callback, callback.abort, _TIMEOUT)
+ call.cancel()
+ callback.block_until_terminated()
+
+ self.assertEqual(interfaces.CANCELLED, callback.abortion())
+
+ def testCancelledStreamRequestUnaryResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.stream_unary_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+ callback = testing_callback.Callback()
+
+ call, request_consumer = self.stub.event_stream_in_value_out(
+ name, callback.complete, callback.abort, _TIMEOUT)
+ for request in requests:
+ request_consumer.consume(request)
+ call.cancel()
+ callback.block_until_terminated()
+
+ self.assertEqual(interfaces.CANCELLED, callback.abortion())
+
+ def testCancelledStreamRequestStreamResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.stream_stream_messages_sequences.iteritems()):
+ for unused_test_messages in test_messages_sequence:
+ callback = testing_callback.Callback()
+
+ call, unused_request_consumer = self.stub.event_stream_in_stream_out(
+ name, callback, callback.abort, _TIMEOUT)
+ call.cancel()
+ callback.block_until_terminated()
+
+ self.assertEqual(interfaces.CANCELLED, callback.abortion())
diff --git a/src/python/_framework/face/testing/future_invocation_asynchronous_event_service_test_case.py b/src/python/_framework/face/testing/future_invocation_asynchronous_event_service_test_case.py
new file mode 100644
index 0000000000..cf8b2eeb95
--- /dev/null
+++ b/src/python/_framework/face/testing/future_invocation_asynchronous_event_service_test_case.py
@@ -0,0 +1,377 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A test to verify an implementation of the Face layer of RPC Framework."""
+
+import abc
+import contextlib
+import threading
+import unittest
+
+from _framework.face import exceptions
+from _framework.face.testing import control
+from _framework.face.testing import coverage
+from _framework.face.testing import digest
+from _framework.face.testing import stock_service
+from _framework.face.testing import test_case
+from _framework.foundation import future
+from _framework.foundation import logging_pool
+
+_TIMEOUT = 3
+_MAXIMUM_POOL_SIZE = 100
+
+
+class _PauseableIterator(object):
+
+ def __init__(self, upstream):
+ self._upstream = upstream
+ self._condition = threading.Condition()
+ self._paused = False
+
+ @contextlib.contextmanager
+ def pause(self):
+ with self._condition:
+ self._paused = True
+ yield
+ with self._condition:
+ self._paused = False
+ self._condition.notify_all()
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ with self._condition:
+ while self._paused:
+ self._condition.wait()
+ return next(self._upstream)
+
+
+class FutureInvocationAsynchronousEventServiceTestCase(
+ test_case.FaceTestCase, coverage.FullCoverage):
+ """A test of the Face layer of RPC Framework.
+
+ Concrete subclasses must also extend unittest.TestCase.
+ """
+ __metaclass__ = abc.ABCMeta
+
+ def setUp(self):
+ """See unittest.TestCase.setUp for full specification.
+
+ Overriding implementations must call this implementation.
+ """
+ self.control = control.PauseFailControl()
+ self.digest_pool = logging_pool.pool(_MAXIMUM_POOL_SIZE)
+ self.digest = digest.digest(
+ stock_service.STOCK_TEST_SERVICE, self.control, self.digest_pool)
+
+ self.server, self.stub, self.memo = self.set_up_implementation(
+ self.digest.name, self.digest.methods,
+ {}, {}, {}, {},
+ self.digest.event_unary_unary_methods,
+ self.digest.event_unary_stream_methods,
+ self.digest.event_stream_unary_methods,
+ self.digest.event_stream_stream_methods,
+ None)
+
+ def tearDown(self):
+ """See unittest.TestCase.tearDown for full specification.
+
+ Overriding implementations must call this implementation.
+ """
+ self.tear_down_implementation(self.memo)
+ self.digest_pool.shutdown(wait=True)
+
+ def testSuccessfulUnaryRequestUnaryResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.unary_unary_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+
+ response_future = self.stub.future_value_in_value_out(
+ name, request, _TIMEOUT)
+ response = response_future.outcome().return_value
+
+ test_messages.verify(request, response, self)
+
+ def testSuccessfulUnaryRequestStreamResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.unary_stream_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+
+ response_iterator = self.stub.inline_value_in_stream_out(
+ name, request, _TIMEOUT)
+ responses = list(response_iterator)
+
+ test_messages.verify(request, responses, self)
+
+ def testSuccessfulStreamRequestUnaryResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.stream_unary_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+ request_iterator = _PauseableIterator(iter(requests))
+
+ # Use of a paused iterator of requests allows us to test that control is
+ # returned to calling code before the iterator yields any requests.
+ with request_iterator.pause():
+ response_future = self.stub.future_stream_in_value_out(
+ name, request_iterator, _TIMEOUT)
+ response = response_future.outcome().return_value
+
+ test_messages.verify(requests, response, self)
+
+ def testSuccessfulStreamRequestStreamResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.stream_stream_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+ request_iterator = _PauseableIterator(iter(requests))
+
+ # Use of a paused iterator of requests allows us to test that control is
+ # returned to calling code before the iterator yields any requests.
+ with request_iterator.pause():
+ response_iterator = self.stub.inline_stream_in_stream_out(
+ name, request_iterator, _TIMEOUT)
+ responses = list(response_iterator)
+
+ test_messages.verify(requests, responses, self)
+
+ def testSequentialInvocations(self):
+ for name, test_messages_sequence in (
+ self.digest.unary_unary_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ first_request = test_messages.request()
+ second_request = test_messages.request()
+
+ first_response_future = self.stub.future_value_in_value_out(
+ name, first_request, _TIMEOUT)
+ first_response = first_response_future.outcome().return_value
+
+ test_messages.verify(first_request, first_response, self)
+
+ second_response_future = self.stub.future_value_in_value_out(
+ name, second_request, _TIMEOUT)
+ second_response = second_response_future.outcome().return_value
+
+ test_messages.verify(second_request, second_response, self)
+
+ def testExpiredUnaryRequestUnaryResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.unary_unary_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+
+ with self.control.pause():
+ response_future = self.stub.future_value_in_value_out(
+ name, request, _TIMEOUT)
+ outcome = response_future.outcome()
+
+ self.assertIsInstance(
+ outcome.exception, exceptions.ExpirationError)
+
+ def testExpiredUnaryRequestStreamResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.unary_stream_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+
+ with self.control.pause(), self.assertRaises(
+ exceptions.ExpirationError):
+ response_iterator = self.stub.inline_value_in_stream_out(
+ name, request, _TIMEOUT)
+ list(response_iterator)
+
+ def testExpiredStreamRequestUnaryResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.stream_unary_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+
+ with self.control.pause():
+ response_future = self.stub.future_stream_in_value_out(
+ name, iter(requests), _TIMEOUT)
+ outcome = response_future.outcome()
+
+ self.assertIsInstance(
+ outcome.exception, exceptions.ExpirationError)
+
+ def testExpiredStreamRequestStreamResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.stream_stream_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+
+ with self.control.pause(), self.assertRaises(
+ exceptions.ExpirationError):
+ response_iterator = self.stub.inline_stream_in_stream_out(
+ name, iter(requests), _TIMEOUT)
+ list(response_iterator)
+
+ def testFailedUnaryRequestUnaryResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.unary_unary_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+
+ with self.control.fail():
+ response_future = self.stub.future_value_in_value_out(
+ name, request, _TIMEOUT)
+ outcome = response_future.outcome()
+
+ # Because the servicer fails outside of the thread from which the
+ # servicer-side runtime called into it its failure is indistinguishable
+ # from simply not having called its response_callback before the
+ # expiration of the RPC.
+ self.assertIsInstance(outcome.exception, exceptions.ExpirationError)
+
+ def testFailedUnaryRequestStreamResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.unary_stream_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+
+ # Because the servicer fails outside of the thread from which the
+ # servicer-side runtime called into it its failure is indistinguishable
+ # from simply not having called its response_consumer before the
+ # expiration of the RPC.
+ with self.control.fail(), self.assertRaises(exceptions.ExpirationError):
+ response_iterator = self.stub.inline_value_in_stream_out(
+ name, request, _TIMEOUT)
+ list(response_iterator)
+
+ def testFailedStreamRequestUnaryResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.stream_unary_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+
+ with self.control.fail():
+ response_future = self.stub.future_stream_in_value_out(
+ name, iter(requests), _TIMEOUT)
+ outcome = response_future.outcome()
+
+ # Because the servicer fails outside of the thread from which the
+ # servicer-side runtime called into it its failure is indistinguishable
+ # from simply not having called its response_callback before the
+ # expiration of the RPC.
+ self.assertIsInstance(outcome.exception, exceptions.ExpirationError)
+
+ def testFailedStreamRequestStreamResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.stream_stream_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+
+ # Because the servicer fails outside of the thread from which the
+ # servicer-side runtime called into it its failure is indistinguishable
+ # from simply not having called its response_consumer before the
+ # expiration of the RPC.
+ with self.control.fail(), self.assertRaises(exceptions.ExpirationError):
+ response_iterator = self.stub.inline_stream_in_stream_out(
+ name, iter(requests), _TIMEOUT)
+ list(response_iterator)
+
+ def testParallelInvocations(self):
+ for name, test_messages_sequence in (
+ self.digest.unary_unary_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ first_request = test_messages.request()
+ second_request = test_messages.request()
+
+ first_response_future = self.stub.future_value_in_value_out(
+ name, first_request, _TIMEOUT)
+ second_response_future = self.stub.future_value_in_value_out(
+ name, second_request, _TIMEOUT)
+ first_response = first_response_future.outcome().return_value
+ second_response = second_response_future.outcome().return_value
+
+ test_messages.verify(first_request, first_response, self)
+ test_messages.verify(second_request, second_response, self)
+
+ @unittest.skip('TODO(nathaniel): implement.')
+ def testWaitingForSomeButNotAllParallelInvocations(self):
+ raise NotImplementedError()
+
+ def testCancelledUnaryRequestUnaryResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.unary_unary_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+
+ with self.control.pause():
+ response_future = self.stub.future_value_in_value_out(
+ name, request, _TIMEOUT)
+ cancelled = response_future.cancel()
+
+ self.assertFalse(cancelled)
+ self.assertEqual(future.ABORTED, response_future.outcome().category)
+
+ def testCancelledUnaryRequestStreamResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.unary_stream_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+
+ with self.control.pause():
+ response_iterator = self.stub.inline_value_in_stream_out(
+ name, request, _TIMEOUT)
+ response_iterator.cancel()
+
+ with self.assertRaises(exceptions.CancellationError):
+ next(response_iterator)
+
+ def testCancelledStreamRequestUnaryResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.stream_unary_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+
+ with self.control.pause():
+ response_future = self.stub.future_stream_in_value_out(
+ name, iter(requests), _TIMEOUT)
+ cancelled = response_future.cancel()
+
+ self.assertFalse(cancelled)
+ self.assertEqual(future.ABORTED, response_future.outcome().category)
+
+ def testCancelledStreamRequestStreamResponse(self):
+ for name, test_messages_sequence in (
+ self.digest.stream_stream_messages_sequences.iteritems()):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+
+ with self.control.pause():
+ response_iterator = self.stub.inline_stream_in_stream_out(
+ name, iter(requests), _TIMEOUT)
+ response_iterator.cancel()
+
+ with self.assertRaises(exceptions.CancellationError):
+ next(response_iterator)
diff --git a/src/python/_framework/face/testing/interfaces.py b/src/python/_framework/face/testing/interfaces.py
new file mode 100644
index 0000000000..253f6f118d
--- /dev/null
+++ b/src/python/_framework/face/testing/interfaces.py
@@ -0,0 +1,117 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Interfaces implemented by data sets used in Face-layer tests."""
+
+import abc
+
+# cardinality is referenced from specification in this module.
+from _framework.common import cardinality # pylint: disable=unused-import
+
+
+class Method(object):
+ """An RPC method to be used in tests of RPC implementations."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def name(self):
+ """Identify the name of the method.
+
+ Returns:
+ The name of the method.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def cardinality(self):
+ """Identify the cardinality of the method.
+
+ Returns:
+ A cardinality.Cardinality value describing the streaming semantics of the
+ method.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def request_class(self):
+ """Identify the class used for the method's request objects.
+
+ Returns:
+ The class object of the class to which the method's request objects
+ belong.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def response_class(self):
+ """Identify the class used for the method's response objects.
+
+ Returns:
+ The class object of the class to which the method's response objects
+ belong.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def serialize_request(self, request):
+ """Serialize the given request object.
+
+ Args:
+ request: A request object appropriate for this method.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def deserialize_request(self, serialized_request):
+ """Synthesize a request object from a given bytestring.
+
+ Args:
+ serialized_request: A bytestring deserializable into a request object
+ appropriate for this method.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def serialize_response(self, response):
+ """Serialize the given response object.
+
+ Args:
+ response: A response object appropriate for this method.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def deserialize_response(self, serialized_response):
+ """Synthesize a response object from a given bytestring.
+
+ Args:
+ serialized_response: A bytestring deserializable into a response object
+ appropriate for this method.
+ """
+ raise NotImplementedError()
diff --git a/src/python/_framework/face/testing/serial.py b/src/python/_framework/face/testing/serial.py
new file mode 100644
index 0000000000..47fc5822de
--- /dev/null
+++ b/src/python/_framework/face/testing/serial.py
@@ -0,0 +1,70 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Utility for serialization in the context of test RPC services."""
+
+import collections
+
+
+class Serialization(
+ collections.namedtuple(
+ '_Serialization',
+ ['request_serializers',
+ 'request_deserializers',
+ 'response_serializers',
+ 'response_deserializers'])):
+ """An aggregation of serialization behaviors for an RPC service.
+
+ Attributes:
+ request_serializers: A dict from method name to request object serializer
+ behavior.
+ request_deserializers: A dict from method name to request object
+ deserializer behavior.
+ response_serializers: A dict from method name to response object serializer
+ behavior.
+ response_deserializers: A dict from method name to response object
+ deserializer behavior.
+ """
+
+
+def serialization(methods):
+ """Creates a Serialization from a sequences of interfaces.Method objects."""
+ request_serializers = {}
+ request_deserializers = {}
+ response_serializers = {}
+ response_deserializers = {}
+ for method in methods:
+ name = method.name()
+ request_serializers[name] = method.serialize_request
+ request_deserializers[name] = method.deserialize_request
+ response_serializers[name] = method.serialize_response
+ response_deserializers[name] = method.deserialize_response
+ return Serialization(
+ request_serializers, request_deserializers, response_serializers,
+ response_deserializers)
diff --git a/src/python/_framework/face/testing/service.py b/src/python/_framework/face/testing/service.py
new file mode 100644
index 0000000000..771346ec2e
--- /dev/null
+++ b/src/python/_framework/face/testing/service.py
@@ -0,0 +1,337 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Private interfaces implemented by data sets used in Face-layer tests."""
+
+import abc
+
+# interfaces is referenced from specification in this module.
+from _framework.face import interfaces as face_interfaces # pylint: disable=unused-import
+from _framework.face.testing import interfaces
+
+
+class UnaryUnaryTestMethod(interfaces.Method):
+ """Like face_interfaces.EventValueInValueOutMethod but with a control."""
+
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def service(self, request, response_callback, context, control):
+ """Services an RPC that accepts one message and produces one message.
+
+ Args:
+ request: The single request message for the RPC.
+ response_callback: A callback to be called to accept the response message
+ of the RPC.
+ context: An face_interfaces.RpcContext object.
+ control: A test_control.Control to control execution of this method.
+
+ Raises:
+ abandonment.Abandoned: May or may not be raised when the RPC has been
+ aborted.
+ """
+ raise NotImplementedError()
+
+
+class UnaryUnaryTestMessages(object):
+ """A type for unary-request-unary-response message pairings."""
+
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def request(self):
+ """Affords a request message.
+
+ Implementations of this method should return a different message with each
+ call so that multiple test executions of the test method may be made with
+ different inputs.
+
+ Returns:
+ A request message.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def verify(self, request, response, test_case):
+ """Verifies that the computed response matches the given request.
+
+ Args:
+ request: A request message.
+ response: A response message.
+ test_case: A unittest.TestCase object affording useful assertion methods.
+
+ Raises:
+ AssertionError: If the request and response do not match, indicating that
+ there was some problem executing the RPC under test.
+ """
+ raise NotImplementedError()
+
+
+class UnaryStreamTestMethod(interfaces.Method):
+ """Like face_interfaces.EventValueInStreamOutMethod but with a control."""
+
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def service(self, request, response_consumer, context, control):
+ """Services an RPC that takes one message and produces a stream of messages.
+
+ Args:
+ request: The single request message for the RPC.
+ response_consumer: A stream.Consumer to be called to accept the response
+ messages of the RPC.
+ context: An RpcContext object.
+ control: A test_control.Control to control execution of this method.
+
+ Raises:
+ abandonment.Abandoned: May or may not be raised when the RPC has been
+ aborted.
+ """
+ raise NotImplementedError()
+
+
+class UnaryStreamTestMessages(object):
+ """A type for unary-request-stream-response message pairings."""
+
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def request(self):
+ """Affords a request message.
+
+ Implementations of this method should return a different message with each
+ call so that multiple test executions of the test method may be made with
+ different inputs.
+
+ Returns:
+ A request message.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def verify(self, request, responses, test_case):
+ """Verifies that the computed responses match the given request.
+
+ Args:
+ request: A request message.
+ responses: A sequence of response messages.
+ test_case: A unittest.TestCase object affording useful assertion methods.
+
+ Raises:
+ AssertionError: If the request and responses do not match, indicating that
+ there was some problem executing the RPC under test.
+ """
+ raise NotImplementedError()
+
+
+class StreamUnaryTestMethod(interfaces.Method):
+ """Like face_interfaces.EventStreamInValueOutMethod but with a control."""
+
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def service(self, response_callback, context, control):
+ """Services an RPC that takes a stream of messages and produces one message.
+
+ Args:
+ response_callback: A callback to be called to accept the response message
+ of the RPC.
+ context: An RpcContext object.
+ control: A test_control.Control to control execution of this method.
+
+ Returns:
+ A stream.Consumer with which to accept the request messages of the RPC.
+ The consumer returned from this method may or may not be invoked to
+ completion: in the case of RPC abortion, RPC Framework will simply stop
+ passing messages to this object. Implementations must not assume that
+ this object will be called to completion of the request stream or even
+ called at all.
+
+ Raises:
+ abandonment.Abandoned: May or may not be raised when the RPC has been
+ aborted.
+ """
+ raise NotImplementedError()
+
+
+class StreamUnaryTestMessages(object):
+ """A type for stream-request-unary-response message pairings."""
+
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def requests(self):
+ """Affords a sequence of request messages.
+
+ Implementations of this method should return a different sequences with each
+ call so that multiple test executions of the test method may be made with
+ different inputs.
+
+ Returns:
+ A sequence of request messages.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def verify(self, requests, response, test_case):
+ """Verifies that the computed response matches the given requests.
+
+ Args:
+ requests: A sequence of request messages.
+ response: A response message.
+ test_case: A unittest.TestCase object affording useful assertion methods.
+
+ Raises:
+ AssertionError: If the requests and response do not match, indicating that
+ there was some problem executing the RPC under test.
+ """
+ raise NotImplementedError()
+
+
+class StreamStreamTestMethod(interfaces.Method):
+ """Like face_interfaces.EventStreamInStreamOutMethod but with a control."""
+
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def service(self, response_consumer, context, control):
+ """Services an RPC that accepts and produces streams of messages.
+
+ Args:
+ response_consumer: A stream.Consumer to be called to accept the response
+ messages of the RPC.
+ context: An RpcContext object.
+ control: A test_control.Control to control execution of this method.
+
+ Returns:
+ A stream.Consumer with which to accept the request messages of the RPC.
+ The consumer returned from this method may or may not be invoked to
+ completion: in the case of RPC abortion, RPC Framework will simply stop
+ passing messages to this object. Implementations must not assume that
+ this object will be called to completion of the request stream or even
+ called at all.
+
+ Raises:
+ abandonment.Abandoned: May or may not be raised when the RPC has been
+ aborted.
+ """
+ raise NotImplementedError()
+
+
+class StreamStreamTestMessages(object):
+ """A type for stream-request-stream-response message pairings."""
+
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def requests(self):
+ """Affords a sequence of request messages.
+
+ Implementations of this method should return a different sequences with each
+ call so that multiple test executions of the test method may be made with
+ different inputs.
+
+ Returns:
+ A sequence of request messages.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def verify(self, requests, responses, test_case):
+ """Verifies that the computed response matches the given requests.
+
+ Args:
+ requests: A sequence of request messages.
+ responses: A sequence of response messages.
+ test_case: A unittest.TestCase object affording useful assertion methods.
+
+ Raises:
+ AssertionError: If the requests and responses do not match, indicating
+ that there was some problem executing the RPC under test.
+ """
+ raise NotImplementedError()
+
+
+class TestService(object):
+ """A specification of implemented RPC methods to use in tests."""
+
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def name(self):
+ """Identifies the RPC service name used during the test.
+
+ Returns:
+ The RPC service name to be used for the test.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def unary_unary_scenarios(self):
+ """Affords unary-request-unary-response test methods and their messages.
+
+ Returns:
+ A dict from method name to pair. The first element of the pair
+ is a UnaryUnaryTestMethod object and the second element is a sequence
+ of UnaryUnaryTestMethodMessages objects.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def unary_stream_scenarios(self):
+ """Affords unary-request-stream-response test methods and their messages.
+
+ Returns:
+ A dict from method name to pair. The first element of the pair is a
+ UnaryStreamTestMethod object and the second element is a sequence of
+ UnaryStreamTestMethodMessages objects.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def stream_unary_scenarios(self):
+ """Affords stream-request-unary-response test methods and their messages.
+
+ Returns:
+ A dict from method name to pair. The first element of the pair is a
+ StreamUnaryTestMethod object and the second element is a sequence of
+ StreamUnaryTestMethodMessages objects.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def stream_stream_scenarios(self):
+ """Affords stream-request-stream-response test methods and their messages.
+
+ Returns:
+ A dict from method name to pair. The first element of the pair is a
+ StreamStreamTestMethod object and the second element is a sequence of
+ StreamStreamTestMethodMessages objects.
+ """
+ raise NotImplementedError()
diff --git a/src/python/_framework/face/testing/stock_service.py b/src/python/_framework/face/testing/stock_service.py
new file mode 100644
index 0000000000..bd82877e83
--- /dev/null
+++ b/src/python/_framework/face/testing/stock_service.py
@@ -0,0 +1,374 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Examples of Python implementations of the stock.proto Stock service."""
+
+from _framework.common import cardinality
+from _framework.face.testing import service
+from _framework.foundation import abandonment
+from _framework.foundation import stream
+from _framework.foundation import stream_util
+from _junkdrawer import stock_pb2
+
+SYMBOL_FORMAT = 'test symbol:%03d'
+STREAM_LENGTH = 400
+
+# A test-appropriate security-pricing function. :-P
+_price = lambda symbol_name: float(hash(symbol_name) % 4096)
+
+
+def _get_last_trade_price(stock_request, stock_reply_callback, control, active):
+ """A unary-request, unary-response test method."""
+ control.control()
+ if active():
+ stock_reply_callback(
+ stock_pb2.StockReply(
+ symbol=stock_request.symbol, price=_price(stock_request.symbol)))
+ else:
+ raise abandonment.Abandoned()
+
+
+def _get_last_trade_price_multiple(stock_reply_consumer, control, active):
+ """A stream-request, stream-response test method."""
+ def stock_reply_for_stock_request(stock_request):
+ control.control()
+ if active():
+ return stock_pb2.StockReply(
+ symbol=stock_request.symbol, price=_price(stock_request.symbol))
+ else:
+ raise abandonment.Abandoned()
+ return stream_util.TransformingConsumer(
+ stock_reply_for_stock_request, stock_reply_consumer)
+
+
+def _watch_future_trades(stock_request, stock_reply_consumer, control, active):
+ """A unary-request, stream-response test method."""
+ base_price = _price(stock_request.symbol)
+ for index in range(stock_request.num_trades_to_watch):
+ control.control()
+ if active():
+ stock_reply_consumer.consume(
+ stock_pb2.StockReply(
+ symbol=stock_request.symbol, price=base_price + index))
+ else:
+ raise abandonment.Abandoned()
+ stock_reply_consumer.terminate()
+
+
+def _get_highest_trade_price(stock_reply_callback, control, active):
+ """A stream-request, unary-response test method."""
+
+ class StockRequestConsumer(stream.Consumer):
+ """Keeps an ongoing record of the most valuable symbol yet consumed."""
+
+ def __init__(self):
+ self._symbol = None
+ self._price = None
+
+ def consume(self, stock_request):
+ control.control()
+ if active():
+ if self._price is None:
+ self._symbol = stock_request.symbol
+ self._price = _price(stock_request.symbol)
+ else:
+ candidate_price = _price(stock_request.symbol)
+ if self._price < candidate_price:
+ self._symbol = stock_request.symbol
+ self._price = candidate_price
+
+ def terminate(self):
+ control.control()
+ if active():
+ if self._symbol is None:
+ raise ValueError()
+ else:
+ stock_reply_callback(
+ stock_pb2.StockReply(symbol=self._symbol, price=self._price))
+ self._symbol = None
+ self._price = None
+
+ def consume_and_terminate(self, stock_request):
+ control.control()
+ if active():
+ if self._price is None:
+ stock_reply_callback(
+ stock_pb2.StockReply(
+ symbol=stock_request.symbol,
+ price=_price(stock_request.symbol)))
+ else:
+ candidate_price = _price(stock_request.symbol)
+ if self._price < candidate_price:
+ stock_reply_callback(
+ stock_pb2.StockReply(
+ symbol=stock_request.symbol, price=candidate_price))
+ else:
+ stock_reply_callback(
+ stock_pb2.StockReply(
+ symbol=self._symbol, price=self._price))
+
+ self._symbol = None
+ self._price = None
+
+ return StockRequestConsumer()
+
+
+class GetLastTradePrice(service.UnaryUnaryTestMethod):
+ """GetLastTradePrice for use in tests."""
+
+ def name(self):
+ return 'GetLastTradePrice'
+
+ def cardinality(self):
+ return cardinality.Cardinality.UNARY_UNARY
+
+ def request_class(self):
+ return stock_pb2.StockRequest
+
+ def response_class(self):
+ return stock_pb2.StockReply
+
+ def serialize_request(self, request):
+ return request.SerializeToString()
+
+ def deserialize_request(self, serialized_request):
+ return stock_pb2.StockRequest.FromString(serialized_request)
+
+ def serialize_response(self, response):
+ return response.SerializeToString()
+
+ def deserialize_response(self, serialized_response):
+ return stock_pb2.StockReply.FromString(serialized_response)
+
+ def service(self, request, response_callback, context, control):
+ _get_last_trade_price(
+ request, response_callback, control, context.is_active)
+
+
+class GetLastTradePriceMessages(service.UnaryUnaryTestMessages):
+
+ def __init__(self):
+ self._index = 0
+
+ def request(self):
+ symbol = SYMBOL_FORMAT % self._index
+ self._index += 1
+ return stock_pb2.StockRequest(symbol=symbol)
+
+ def verify(self, request, response, test_case):
+ test_case.assertEqual(request.symbol, response.symbol)
+ test_case.assertEqual(_price(request.symbol), response.price)
+
+
+class GetLastTradePriceMultiple(service.StreamStreamTestMethod):
+ """GetLastTradePriceMultiple for use in tests."""
+
+ def name(self):
+ return 'GetLastTradePriceMultiple'
+
+ def cardinality(self):
+ return cardinality.Cardinality.STREAM_STREAM
+
+ def request_class(self):
+ return stock_pb2.StockRequest
+
+ def response_class(self):
+ return stock_pb2.StockReply
+
+ def serialize_request(self, request):
+ return request.SerializeToString()
+
+ def deserialize_request(self, serialized_request):
+ return stock_pb2.StockRequest.FromString(serialized_request)
+
+ def serialize_response(self, response):
+ return response.SerializeToString()
+
+ def deserialize_response(self, serialized_response):
+ return stock_pb2.StockReply.FromString(serialized_response)
+
+ def service(self, response_consumer, context, control):
+ return _get_last_trade_price_multiple(
+ response_consumer, control, context.is_active)
+
+
+class GetLastTradePriceMultipleMessages(service.StreamStreamTestMessages):
+ """Pairs of message streams for use with GetLastTradePriceMultiple."""
+
+ def __init__(self):
+ self._index = 0
+
+ def requests(self):
+ base_index = self._index
+ self._index += 1
+ return [
+ stock_pb2.StockRequest(symbol=SYMBOL_FORMAT % (base_index + index))
+ for index in range(STREAM_LENGTH)]
+
+ def verify(self, requests, responses, test_case):
+ test_case.assertEqual(len(requests), len(responses))
+ for stock_request, stock_reply in zip(requests, responses):
+ test_case.assertEqual(stock_request.symbol, stock_reply.symbol)
+ test_case.assertEqual(_price(stock_request.symbol), stock_reply.price)
+
+
+class WatchFutureTrades(service.UnaryStreamTestMethod):
+ """WatchFutureTrades for use in tests."""
+
+ def name(self):
+ return 'WatchFutureTrades'
+
+ def cardinality(self):
+ return cardinality.Cardinality.UNARY_STREAM
+
+ def request_class(self):
+ return stock_pb2.StockRequest
+
+ def response_class(self):
+ return stock_pb2.StockReply
+
+ def serialize_request(self, request):
+ return request.SerializeToString()
+
+ def deserialize_request(self, serialized_request):
+ return stock_pb2.StockRequest.FromString(serialized_request)
+
+ def serialize_response(self, response):
+ return response.SerializeToString()
+
+ def deserialize_response(self, serialized_response):
+ return stock_pb2.StockReply.FromString(serialized_response)
+
+ def service(self, request, response_consumer, context, control):
+ _watch_future_trades(request, response_consumer, control, context.is_active)
+
+
+class WatchFutureTradesMessages(service.UnaryStreamTestMessages):
+ """Pairs of a single request message and a sequence of response messages."""
+
+ def __init__(self):
+ self._index = 0
+
+ def request(self):
+ symbol = SYMBOL_FORMAT % self._index
+ self._index += 1
+ return stock_pb2.StockRequest(
+ symbol=symbol, num_trades_to_watch=STREAM_LENGTH)
+
+ def verify(self, request, responses, test_case):
+ test_case.assertEqual(STREAM_LENGTH, len(responses))
+ base_price = _price(request.symbol)
+ for index, response in enumerate(responses):
+ test_case.assertEqual(base_price + index, response.price)
+
+
+class GetHighestTradePrice(service.StreamUnaryTestMethod):
+ """GetHighestTradePrice for use in tests."""
+
+ def name(self):
+ return 'GetHighestTradePrice'
+
+ def cardinality(self):
+ return cardinality.Cardinality.STREAM_UNARY
+
+ def request_class(self):
+ return stock_pb2.StockRequest
+
+ def response_class(self):
+ return stock_pb2.StockReply
+
+ def serialize_request(self, request):
+ return request.SerializeToString()
+
+ def deserialize_request(self, serialized_request):
+ return stock_pb2.StockRequest.FromString(serialized_request)
+
+ def serialize_response(self, response):
+ return response.SerializeToString()
+
+ def deserialize_response(self, serialized_response):
+ return stock_pb2.StockReply.FromString(serialized_response)
+
+ def service(self, response_callback, context, control):
+ return _get_highest_trade_price(
+ response_callback, control, context.is_active)
+
+
+class GetHighestTradePriceMessages(service.StreamUnaryTestMessages):
+
+ def requests(self):
+ return [
+ stock_pb2.StockRequest(symbol=SYMBOL_FORMAT % index)
+ for index in range(STREAM_LENGTH)]
+
+ def verify(self, requests, response, test_case):
+ price = None
+ symbol = None
+ for stock_request in requests:
+ current_symbol = stock_request.symbol
+ current_price = _price(current_symbol)
+ if price is None or price < current_price:
+ price = current_price
+ symbol = current_symbol
+ test_case.assertEqual(price, response.price)
+ test_case.assertEqual(symbol, response.symbol)
+
+
+class StockTestService(service.TestService):
+ """A corpus of test data with one method of each RPC cardinality."""
+
+ def name(self):
+ return 'Stock'
+
+ def unary_unary_scenarios(self):
+ return {
+ 'GetLastTradePrice': (
+ GetLastTradePrice(), [GetLastTradePriceMessages()]),
+ }
+
+ def unary_stream_scenarios(self):
+ return {
+ 'WatchFutureTrades': (
+ WatchFutureTrades(), [WatchFutureTradesMessages()]),
+ }
+
+ def stream_unary_scenarios(self):
+ return {
+ 'GetHighestTradePrice': (
+ GetHighestTradePrice(), [GetHighestTradePriceMessages()])
+ }
+
+ def stream_stream_scenarios(self):
+ return {
+ 'GetLastTradePriceMultiple': (
+ GetLastTradePriceMultiple(), [GetLastTradePriceMultipleMessages()]),
+ }
+
+
+STOCK_TEST_SERVICE = StockTestService()
diff --git a/src/python/_framework/face/testing/test_case.py b/src/python/_framework/face/testing/test_case.py
new file mode 100644
index 0000000000..09b5a67f5a
--- /dev/null
+++ b/src/python/_framework/face/testing/test_case.py
@@ -0,0 +1,111 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Tools for creating tests of implementations of the Face layer."""
+
+import abc
+
+# face_interfaces and interfaces are referenced in specification in this module.
+from _framework.face import interfaces as face_interfaces # pylint: disable=unused-import
+from _framework.face.testing import interfaces # pylint: disable=unused-import
+
+
+class FaceTestCase(object):
+ """Describes a test of the Face Layer of RPC Framework.
+
+ Concrete subclasses must also inherit from unittest.TestCase and from at least
+ one class that defines test methods.
+ """
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def set_up_implementation(
+ self,
+ name,
+ methods,
+ inline_value_in_value_out_methods,
+ inline_value_in_stream_out_methods,
+ inline_stream_in_value_out_methods,
+ inline_stream_in_stream_out_methods,
+ event_value_in_value_out_methods,
+ event_value_in_stream_out_methods,
+ event_stream_in_value_out_methods,
+ event_stream_in_stream_out_methods,
+ multi_method):
+ """Instantiates the Face Layer implementation under test.
+
+ Args:
+ name: The service name to be used in the test.
+ methods: A sequence of interfaces.Method objects describing the RPC
+ methods that will be called during the test.
+ inline_value_in_value_out_methods: A dictionary from string method names
+ to face_interfaces.InlineValueInValueOutMethod implementations of those
+ methods.
+ inline_value_in_stream_out_methods: A dictionary from string method names
+ to face_interfaces.InlineValueInStreamOutMethod implementations of those
+ methods.
+ inline_stream_in_value_out_methods: A dictionary from string method names
+ to face_interfaces.InlineStreamInValueOutMethod implementations of those
+ methods.
+ inline_stream_in_stream_out_methods: A dictionary from string method names
+ to face_interfaces.InlineStreamInStreamOutMethod implementations of
+ those methods.
+ event_value_in_value_out_methods: A dictionary from string method names
+ to face_interfaces.EventValueInValueOutMethod implementations of those
+ methods.
+ event_value_in_stream_out_methods: A dictionary from string method names
+ to face_interfaces.EventValueInStreamOutMethod implementations of those
+ methods.
+ event_stream_in_value_out_methods: A dictionary from string method names
+ to face_interfaces.EventStreamInValueOutMethod implementations of those
+ methods.
+ event_stream_in_stream_out_methods: A dictionary from string method names
+ to face_interfaces.EventStreamInStreamOutMethod implementations of those
+ methods.
+ multi_method: An face_interfaces.MultiMethod, or None.
+
+ Returns:
+ A sequence of length three the first element of which is a
+ face_interfaces.Server, the second element of which is a
+ face_interfaces.Stub, (both of which are backed by the given method
+ implementations), and the third element of which is an arbitrary memo
+ object to be kept and passed to tearDownImplementation at the conclusion
+ of the test.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def tear_down_implementation(self, memo):
+ """Destroys the Face layer implementation under test.
+
+ Args:
+ memo: The object from the third position of the return value of
+ set_up_implementation.
+ """
+ raise NotImplementedError()
diff --git a/src/python/_framework/foundation/_later_test.py b/src/python/_framework/foundation/_later_test.py
new file mode 100644
index 0000000000..fbd17a4ad9
--- /dev/null
+++ b/src/python/_framework/foundation/_later_test.py
@@ -0,0 +1,145 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Tests of the later module."""
+
+import threading
+import time
+import unittest
+
+from _framework.foundation import future
+from _framework.foundation import later
+
+TICK = 0.1
+
+
+class LaterTest(unittest.TestCase):
+
+ def test_simple_delay(self):
+ lock = threading.Lock()
+ cell = [0]
+ def increment_cell():
+ with lock:
+ cell[0] += 1
+ computation_future = later.later(TICK * 2, increment_cell)
+ self.assertFalse(computation_future.done())
+ self.assertFalse(computation_future.cancelled())
+ time.sleep(TICK)
+ self.assertFalse(computation_future.done())
+ self.assertFalse(computation_future.cancelled())
+ with lock:
+ self.assertEqual(0, cell[0])
+ time.sleep(TICK * 2)
+ self.assertTrue(computation_future.done())
+ self.assertFalse(computation_future.cancelled())
+ with lock:
+ self.assertEqual(1, cell[0])
+ outcome = computation_future.outcome()
+ self.assertEqual(future.RETURNED, outcome.category)
+
+ def test_callback(self):
+ lock = threading.Lock()
+ cell = [0]
+ callback_called = [False]
+ outcome_passed_to_callback = [None]
+ def increment_cell():
+ with lock:
+ cell[0] += 1
+ computation_future = later.later(TICK * 2, increment_cell)
+ def callback(outcome):
+ with lock:
+ callback_called[0] = True
+ outcome_passed_to_callback[0] = outcome
+ computation_future.add_done_callback(callback)
+ time.sleep(TICK)
+ with lock:
+ self.assertFalse(callback_called[0])
+ time.sleep(TICK * 2)
+ with lock:
+ self.assertTrue(callback_called[0])
+ self.assertEqual(future.RETURNED, outcome_passed_to_callback[0].category)
+
+ callback_called[0] = False
+ outcome_passed_to_callback[0] = None
+
+ computation_future.add_done_callback(callback)
+ with lock:
+ self.assertTrue(callback_called[0])
+ self.assertEqual(future.RETURNED, outcome_passed_to_callback[0].category)
+
+ def test_cancel(self):
+ lock = threading.Lock()
+ cell = [0]
+ callback_called = [False]
+ outcome_passed_to_callback = [None]
+ def increment_cell():
+ with lock:
+ cell[0] += 1
+ computation_future = later.later(TICK * 2, increment_cell)
+ def callback(outcome):
+ with lock:
+ callback_called[0] = True
+ outcome_passed_to_callback[0] = outcome
+ computation_future.add_done_callback(callback)
+ time.sleep(TICK)
+ with lock:
+ self.assertFalse(callback_called[0])
+ computation_future.cancel()
+ self.assertTrue(computation_future.cancelled())
+ self.assertFalse(computation_future.done())
+ self.assertEqual(future.ABORTED, computation_future.outcome().category)
+ with lock:
+ self.assertTrue(callback_called[0])
+ self.assertEqual(future.ABORTED, outcome_passed_to_callback[0].category)
+
+ def test_outcome(self):
+ lock = threading.Lock()
+ cell = [0]
+ callback_called = [False]
+ outcome_passed_to_callback = [None]
+ def increment_cell():
+ with lock:
+ cell[0] += 1
+ computation_future = later.later(TICK * 2, increment_cell)
+ def callback(outcome):
+ with lock:
+ callback_called[0] = True
+ outcome_passed_to_callback[0] = outcome
+ computation_future.add_done_callback(callback)
+ returned_outcome = computation_future.outcome()
+ self.assertEqual(future.RETURNED, returned_outcome.category)
+
+ # The callback may not yet have been called! Sleep a tick.
+ time.sleep(TICK)
+ with lock:
+ self.assertTrue(callback_called[0])
+ self.assertEqual(future.RETURNED, outcome_passed_to_callback[0].category)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/src/python/_framework/foundation/_logging_pool_test.py b/src/python/_framework/foundation/_logging_pool_test.py
index ffe07c788d..f2224d80e5 100644
--- a/src/python/_framework/foundation/_logging_pool_test.py
+++ b/src/python/_framework/foundation/_logging_pool_test.py
@@ -27,7 +27,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-"""Tests for google3.net.rpc.python.framework.foundation.logging_pool."""
+"""Tests for _framework.foundation.logging_pool."""
import unittest
diff --git a/src/python/_framework/foundation/_timer_future.py b/src/python/_framework/foundation/_timer_future.py
new file mode 100644
index 0000000000..86bc073d56
--- /dev/null
+++ b/src/python/_framework/foundation/_timer_future.py
@@ -0,0 +1,156 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Affords a Future implementation based on Python's threading.Timer."""
+
+import threading
+import time
+
+from _framework.foundation import future
+
+
+class TimerFuture(future.Future):
+ """A Future implementation based around Timer objects."""
+
+ def __init__(self, compute_time, computation):
+ """Constructor.
+
+ Args:
+ compute_time: The time after which to begin this future's computation.
+ computation: The computation to be performed within this Future.
+ """
+ self._lock = threading.Lock()
+ self._compute_time = compute_time
+ self._computation = computation
+ self._timer = None
+ self._computing = False
+ self._computed = False
+ self._cancelled = False
+ self._outcome = None
+ self._waiting = []
+
+ def _compute(self):
+ """Performs the computation embedded in this Future.
+
+ Or doesn't, if the time to perform it has not yet arrived.
+ """
+ with self._lock:
+ time_remaining = self._compute_time - time.time()
+ if 0 < time_remaining:
+ self._timer = threading.Timer(time_remaining, self._compute)
+ self._timer.start()
+ return
+ else:
+ self._computing = True
+
+ try:
+ returned_value = self._computation()
+ outcome = future.returned(returned_value)
+ except Exception as e: # pylint: disable=broad-except
+ outcome = future.raised(e)
+
+ with self._lock:
+ self._computing = False
+ self._computed = True
+ self._outcome = outcome
+ waiting = self._waiting
+
+ for callback in waiting:
+ callback(outcome)
+
+ def start(self):
+ """Starts this Future.
+
+ This must be called exactly once, immediately after construction.
+ """
+ with self._lock:
+ self._timer = threading.Timer(
+ self._compute_time - time.time(), self._compute)
+ self._timer.start()
+
+ def cancel(self):
+ """See future.Future.cancel for specification."""
+ with self._lock:
+ if self._computing or self._computed:
+ return False
+ elif self._cancelled:
+ return True
+ else:
+ self._timer.cancel()
+ self._cancelled = True
+ self._outcome = future.aborted()
+ outcome = self._outcome
+ waiting = self._waiting
+
+ for callback in waiting:
+ try:
+ callback(outcome)
+ except Exception: # pylint: disable=broad-except
+ pass
+
+ return True
+
+ def cancelled(self):
+ """See future.Future.cancelled for specification."""
+ with self._lock:
+ return self._cancelled
+
+ def done(self):
+ """See future.Future.done for specification."""
+ with self._lock:
+ return self._computed
+
+ def outcome(self):
+ """See future.Future.outcome for specification."""
+ with self._lock:
+ if self._computed or self._cancelled:
+ return self._outcome
+
+ condition = threading.Condition()
+ def notify_condition(unused_outcome):
+ with condition:
+ condition.notify()
+ self._waiting.append(notify_condition)
+
+ with condition:
+ condition.wait()
+
+ with self._lock:
+ return self._outcome
+
+ def add_done_callback(self, callback):
+ """See future.Future.add_done_callback for specification."""
+ with self._lock:
+ if not self._computed and not self._cancelled:
+ self._waiting.append(callback)
+ return
+ else:
+ outcome = self._outcome
+
+ callback(outcome)
diff --git a/src/python/_framework/foundation/abandonment.py b/src/python/_framework/foundation/abandonment.py
new file mode 100644
index 0000000000..960b4d06b4
--- /dev/null
+++ b/src/python/_framework/foundation/abandonment.py
@@ -0,0 +1,38 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Utilities for indicating abandonment of computation."""
+
+
+class Abandoned(Exception):
+ """Indicates that some computation is being abandoned.
+
+ Abandoning a computation is different than returning a value or raising
+ an exception indicating some operational or programming defect.
+ """
diff --git a/src/python/_framework/foundation/callable_util.py b/src/python/_framework/foundation/callable_util.py
new file mode 100644
index 0000000000..1f7546cb76
--- /dev/null
+++ b/src/python/_framework/foundation/callable_util.py
@@ -0,0 +1,78 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Utilities for working with callables."""
+
+import functools
+import logging
+
+from _framework.foundation import future
+
+
+def _call_logging_exceptions(behavior, message, *args, **kwargs):
+ try:
+ return future.returned(behavior(*args, **kwargs))
+ except Exception as e: # pylint: disable=broad-except
+ logging.exception(message)
+ return future.raised(e)
+
+
+def with_exceptions_logged(behavior, message):
+ """Wraps a callable in a try-except that logs any exceptions it raises.
+
+ Args:
+ behavior: Any callable.
+ message: A string to log if the behavior raises an exception.
+
+ Returns:
+ A callable that when executed invokes the given behavior. The returned
+ callable takes the same arguments as the given behavior but returns a
+ future.Outcome describing whether the given behavior returned a value or
+ raised an exception.
+ """
+ @functools.wraps(behavior)
+ def wrapped_behavior(*args, **kwargs):
+ return _call_logging_exceptions(behavior, message, *args, **kwargs)
+ return wrapped_behavior
+
+
+def call_logging_exceptions(behavior, message, *args, **kwargs):
+ """Calls a behavior in a try-except that logs any exceptions it raises.
+
+ Args:
+ behavior: Any callable.
+ message: A string to log if the behavior raises an exception.
+ *args: Positional arguments to pass to the given behavior.
+ **kwargs: Keyword arguments to pass to the given behavior.
+
+ Returns:
+ A future.Outcome describing whether the given behavior returned a value or
+ raised an exception.
+ """
+ return _call_logging_exceptions(behavior, message, *args, **kwargs)
diff --git a/src/python/_framework/foundation/future.py b/src/python/_framework/foundation/future.py
new file mode 100644
index 0000000000..f00c503257
--- /dev/null
+++ b/src/python/_framework/foundation/future.py
@@ -0,0 +1,172 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""The Future interface missing from Python's standard library.
+
+Python's concurrent.futures library defines a Future class very much like the
+Future defined here, but since that class is concrete and without construction
+semantics it is only available within the concurrent.futures library itself.
+The Future class defined here is an entirely abstract interface that anyone may
+implement and use.
+"""
+
+import abc
+import collections
+
+RETURNED = object()
+RAISED = object()
+ABORTED = object()
+
+
+class Outcome(object):
+ """A sum type describing the outcome of some computation.
+
+ Attributes:
+ category: One of RETURNED, RAISED, or ABORTED, respectively indicating
+ that the computation returned a value, raised an exception, or was
+ aborted.
+ return_value: The value returned by the computation. Must be present if
+ category is RETURNED.
+ exception: The exception raised by the computation. Must be present if
+ category is RAISED.
+ """
+ __metaclass__ = abc.ABCMeta
+
+
+class _EasyOutcome(
+ collections.namedtuple('_EasyOutcome',
+ ['category', 'return_value', 'exception']),
+ Outcome):
+ """A trivial implementation of Outcome."""
+
+# All Outcomes describing abortion are indistinguishable so there might as well
+# be only one.
+_ABORTED_OUTCOME = _EasyOutcome(ABORTED, None, None)
+
+
+def aborted():
+ """Returns an Outcome indicating that a computation was aborted.
+
+ Returns:
+ An Outcome indicating that a computation was aborted.
+ """
+ return _ABORTED_OUTCOME
+
+
+def raised(exception):
+ """Returns an Outcome indicating that a computation raised an exception.
+
+ Args:
+ exception: The exception raised by the computation.
+
+ Returns:
+ An Outcome indicating that a computation raised the given exception.
+ """
+ return _EasyOutcome(RAISED, None, exception)
+
+
+def returned(value):
+ """Returns an Outcome indicating that a computation returned a value.
+
+ Args:
+ value: The value returned by the computation.
+
+ Returns:
+ An Outcome indicating that a computation returned the given value.
+ """
+ return _EasyOutcome(RETURNED, value, None)
+
+
+class Future(object):
+ """A representation of a computation happening in another control flow.
+
+ Computations represented by a Future may have already completed, may be
+ ongoing, or may be yet to be begun.
+
+ Computations represented by a Future are considered uninterruptable; once
+ started they will be allowed to terminate either by returning or raising
+ an exception.
+ """
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def cancel(self):
+ """Attempts to cancel the computation.
+
+ Returns:
+ True if the computation will not be allowed to take place or False if
+ the computation has already taken place or is currently taking place.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def cancelled(self):
+ """Describes whether the computation was cancelled.
+
+ Returns:
+ True if the computation was cancelled and did not take place or False
+ if the computation took place, is taking place, or is scheduled to
+ take place in the future.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def done(self):
+ """Describes whether the computation has taken place.
+
+ Returns:
+ True if the computation took place; False otherwise.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def outcome(self):
+ """Accesses the outcome of the computation.
+
+ If the computation has not yet completed, this method blocks until it has.
+
+ Returns:
+ An Outcome describing the outcome of the computation.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_done_callback(self, callback):
+ """Adds a function to be called at completion of the computation.
+
+ The callback will be passed an Outcome object describing the outcome of
+ the computation.
+
+ If the computation has already completed, the callback will be called
+ immediately.
+
+ Args:
+ callback: A callable taking an Outcome as its single parameter.
+ """
+ raise NotImplementedError()
diff --git a/src/python/_framework/foundation/later.py b/src/python/_framework/foundation/later.py
new file mode 100644
index 0000000000..fc2cf578d0
--- /dev/null
+++ b/src/python/_framework/foundation/later.py
@@ -0,0 +1,51 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Enables scheduling execution at a later time."""
+
+import time
+
+from _framework.foundation import _timer_future
+
+
+def later(delay, computation):
+ """Schedules later execution of a callable.
+
+ Args:
+ delay: Any numeric value. Represents the minimum length of time in seconds
+ to allow to pass before beginning the computation. No guarantees are made
+ about the maximum length of time that will pass.
+ computation: A callable that accepts no arguments.
+
+ Returns:
+ A Future representing the scheduled computation.
+ """
+ timer_future = _timer_future.TimerFuture(time.time() + delay, computation)
+ timer_future.start()
+ return timer_future
diff --git a/src/python/_framework/foundation/stream.py b/src/python/_framework/foundation/stream.py
new file mode 100644
index 0000000000..75c0cf145b
--- /dev/null
+++ b/src/python/_framework/foundation/stream.py
@@ -0,0 +1,60 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Interfaces related to streams of values or objects."""
+
+import abc
+
+
+class Consumer(object):
+ """Interface for consumers of finite streams of values or objects."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def consume(self, value):
+ """Accepts a value.
+
+ Args:
+ value: Any value accepted by this Consumer.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def terminate(self):
+ """Indicates to this Consumer that no more values will be supplied."""
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def consume_and_terminate(self, value):
+ """Supplies a value and signals that no more values will be supplied.
+
+ Args:
+ value: Any value accepted by this Consumer.
+ """
+ raise NotImplementedError()
diff --git a/src/python/_framework/foundation/stream_testing.py b/src/python/_framework/foundation/stream_testing.py
new file mode 100644
index 0000000000..c1acedc5c6
--- /dev/null
+++ b/src/python/_framework/foundation/stream_testing.py
@@ -0,0 +1,73 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Utilities for testing stream-related code."""
+
+from _framework.foundation import stream
+
+
+class TestConsumer(stream.Consumer):
+ """A stream.Consumer instrumented for testing.
+
+ Attributes:
+ calls: A sequence of value-termination pairs describing the history of calls
+ made on this object.
+ """
+
+ def __init__(self):
+ self.calls = []
+
+ def consume(self, value):
+ """See stream.Consumer.consume for specification."""
+ self.calls.append((value, False))
+
+ def terminate(self):
+ """See stream.Consumer.terminate for specification."""
+ self.calls.append((None, True))
+
+ def consume_and_terminate(self, value):
+ """See stream.Consumer.consume_and_terminate for specification."""
+ self.calls.append((value, True))
+
+ def is_legal(self):
+ """Reports whether or not a legal sequence of calls has been made."""
+ terminated = False
+ for value, terminal in self.calls:
+ if terminated:
+ return False
+ elif terminal:
+ terminated = True
+ elif value is None:
+ return False
+ else: # pylint: disable=useless-else-on-loop
+ return True
+
+ def values(self):
+ """Returns the sequence of values that have been passed to this Consumer."""
+ return [value for value, _ in self.calls if value]
diff --git a/src/python/_framework/foundation/stream_util.py b/src/python/_framework/foundation/stream_util.py
new file mode 100644
index 0000000000..3a9c043316
--- /dev/null
+++ b/src/python/_framework/foundation/stream_util.py
@@ -0,0 +1,160 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Helpful utilities related to the stream module."""
+
+import logging
+import threading
+
+from _framework.foundation import stream
+
+_NO_VALUE = object()
+
+
+class TransformingConsumer(stream.Consumer):
+ """A stream.Consumer that passes a transformation of its input to another."""
+
+ def __init__(self, transformation, downstream):
+ self._transformation = transformation
+ self._downstream = downstream
+
+ def consume(self, value):
+ self._downstream.consume(self._transformation(value))
+
+ def terminate(self):
+ self._downstream.terminate()
+
+ def consume_and_terminate(self, value):
+ self._downstream.consume_and_terminate(self._transformation(value))
+
+
+class IterableConsumer(stream.Consumer):
+ """A Consumer that when iterated over emits the values it has consumed."""
+
+ def __init__(self):
+ self._condition = threading.Condition()
+ self._values = []
+ self._active = True
+
+ def consume(self, stock_reply):
+ with self._condition:
+ if self._active:
+ self._values.append(stock_reply)
+ self._condition.notify()
+
+ def terminate(self):
+ with self._condition:
+ self._active = False
+ self._condition.notify()
+
+ def consume_and_terminate(self, stock_reply):
+ with self._condition:
+ if self._active:
+ self._values.append(stock_reply)
+ self._active = False
+ self._condition.notify()
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ with self._condition:
+ while self._active and not self._values:
+ self._condition.wait()
+ if self._values:
+ return self._values.pop(0)
+ else:
+ raise StopIteration()
+
+
+class ThreadSwitchingConsumer(stream.Consumer):
+ """A Consumer decorator that affords serialization and asynchrony."""
+
+ def __init__(self, sink, pool):
+ self._lock = threading.Lock()
+ self._sink = sink
+ self._pool = pool
+ # True if self._spin has been submitted to the pool to be called once and
+ # that call has not yet returned, False otherwise.
+ self._spinning = False
+ self._values = []
+ self._active = True
+
+ def _spin(self, sink, value, terminate):
+ while True:
+ try:
+ if value is _NO_VALUE:
+ sink.terminate()
+ elif terminate:
+ sink.consume_and_terminate(value)
+ else:
+ sink.consume(value)
+ except Exception as e: # pylint:disable=broad-except
+ logging.exception(e)
+
+ with self._lock:
+ if terminate:
+ self._spinning = False
+ return
+ elif self._values:
+ value = self._values.pop(0)
+ terminate = not self._values and not self._active
+ elif not self._active:
+ value = _NO_VALUE
+ terminate = True
+ else:
+ self._spinning = False
+ return
+
+ def consume(self, value):
+ with self._lock:
+ if self._active:
+ if self._spinning:
+ self._values.append(value)
+ else:
+ self._pool.submit(self._spin, self._sink, value, False)
+ self._spinning = True
+
+ def terminate(self):
+ with self._lock:
+ if self._active:
+ self._active = False
+ if not self._spinning:
+ self._pool.submit(self._spin, self._sink, _NO_VALUE, True)
+ self._spinning = True
+
+ def consume_and_terminate(self, value):
+ with self._lock:
+ if self._active:
+ self._active = False
+ if self._spinning:
+ self._values.append(value)
+ else:
+ self._pool.submit(self._spin, self._sink, value, True)
+ self._spinning = True
diff --git a/src/python/_junkdrawer/__init__.py b/src/python/_junkdrawer/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/src/python/_junkdrawer/__init__.py
diff --git a/src/python/_junkdrawer/stock_pb2.py b/src/python/_junkdrawer/stock_pb2.py
new file mode 100644
index 0000000000..eef18f82d6
--- /dev/null
+++ b/src/python/_junkdrawer/stock_pb2.py
@@ -0,0 +1,152 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# TODO(nathaniel): Remove this from source control after having made
+# generation from the stock.proto source part of GRPC's build-and-test
+# process.
+
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: stock.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='stock.proto',
+ package='stock',
+ serialized_pb=_b('\n\x0bstock.proto\x12\x05stock\">\n\x0cStockRequest\x12\x0e\n\x06symbol\x18\x01 \x01(\t\x12\x1e\n\x13num_trades_to_watch\x18\x02 \x01(\x05:\x01\x30\"+\n\nStockReply\x12\r\n\x05price\x18\x01 \x01(\x02\x12\x0e\n\x06symbol\x18\x02 \x01(\t2\x96\x02\n\x05Stock\x12=\n\x11GetLastTradePrice\x12\x13.stock.StockRequest\x1a\x11.stock.StockReply\"\x00\x12I\n\x19GetLastTradePriceMultiple\x12\x13.stock.StockRequest\x1a\x11.stock.StockReply\"\x00(\x01\x30\x01\x12?\n\x11WatchFutureTrades\x12\x13.stock.StockRequest\x1a\x11.stock.StockReply\"\x00\x30\x01\x12\x42\n\x14GetHighestTradePrice\x12\x13.stock.StockRequest\x1a\x11.stock.StockReply\"\x00(\x01')
+)
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+
+
+
+_STOCKREQUEST = _descriptor.Descriptor(
+ name='StockRequest',
+ full_name='stock.StockRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='symbol', full_name='stock.StockRequest.symbol', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='num_trades_to_watch', full_name='stock.StockRequest.num_trades_to_watch', index=1,
+ number=2, type=5, cpp_type=1, label=1,
+ has_default_value=True, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=22,
+ serialized_end=84,
+)
+
+
+_STOCKREPLY = _descriptor.Descriptor(
+ name='StockReply',
+ full_name='stock.StockReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='price', full_name='stock.StockReply.price', index=0,
+ number=1, type=2, cpp_type=6, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='symbol', full_name='stock.StockReply.symbol', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=86,
+ serialized_end=129,
+)
+
+DESCRIPTOR.message_types_by_name['StockRequest'] = _STOCKREQUEST
+DESCRIPTOR.message_types_by_name['StockReply'] = _STOCKREPLY
+
+StockRequest = _reflection.GeneratedProtocolMessageType('StockRequest', (_message.Message,), dict(
+ DESCRIPTOR = _STOCKREQUEST,
+ __module__ = 'stock_pb2'
+ # @@protoc_insertion_point(class_scope:stock.StockRequest)
+ ))
+_sym_db.RegisterMessage(StockRequest)
+
+StockReply = _reflection.GeneratedProtocolMessageType('StockReply', (_message.Message,), dict(
+ DESCRIPTOR = _STOCKREPLY,
+ __module__ = 'stock_pb2'
+ # @@protoc_insertion_point(class_scope:stock.StockReply)
+ ))
+_sym_db.RegisterMessage(StockReply)
+
+
+# @@protoc_insertion_point(module_scope)
diff --git a/src/ruby/README.md b/src/ruby/README.md
index 7f7558dc67..7ece7e2706 100755
--- a/src/ruby/README.md
+++ b/src/ruby/README.md
@@ -14,9 +14,10 @@ INSTALLING
----------
- Install the gRPC core library
-TODO: describe this, once the core distribution mechanism is defined.
-
+ TODO: describe this, once the core distribution mechanism is defined.
+```
$ gem install grpc
+```
Installing from source
@@ -24,37 +25,47 @@ Installing from source
- Build or Install the gRPC core
E.g, from the root of the grpc [git repo](https://github.com/google/grpc)
+```
$ cd ../..
$ make && sudo make install
+```
- Install Ruby 2.x. Consider doing this with [RVM](http://rvm.io), it's a nice way of controlling
the exact ruby version that's used.
+```
$ command curl -sSL https://rvm.io/mpapis.asc | gpg --import -
$ \curl -sSL https://get.rvm.io | bash -s stable --ruby
$
$ # follow the instructions to ensure that your're using the latest stable version of Ruby
$ # and that the rvm command is installed
+```
- Install [bundler](http://bundler.io/)
+```
$ gem install bundler
+```
- Finally, install grpc ruby locally.
+```
$ cd <install_dir>
$ bundle install
$ rake # compiles the extension, runs the unit tests, see rake -T for other options
-
+```
CONTENTS
--------
Directory structure is the layout for [ruby extensions](http://guides.rubygems.org/gems-with-extensions/)
- * ext: the extension code
- * lib: the entrypoint grpc ruby library to be used in a 'require' statement
- * spec: tests
- * bin: example gRPC clients and servers, e.g,
+- ext:
+ the gRPC ruby extension
+- lib:
+ the entrypoint grpc ruby library to be used in a 'require' statement
+- spec:
+ Rspec unittest
+- bin:
+ example gRPC clients and servers, e.g,
```ruby
-# client
stub = Math::Math::Stub.new('my.test.math.server.com:8080')
req = Math::DivArgs.new(dividend: 7, divisor: 3)
logger.info("div(7/3): req=#{req.inspect}")
diff --git a/src/ruby/bin/interop/interop_client.rb b/src/ruby/bin/interop/interop_client.rb
index 0ea7f376be..86739b7b67 100755
--- a/src/ruby/bin/interop/interop_client.rb
+++ b/src/ruby/bin/interop/interop_client.rb
@@ -54,6 +54,8 @@ require 'test/cpp/interop/test_services'
require 'test/cpp/interop/messages'
require 'test/cpp/interop/empty'
+require 'signet/ssl_config'
+
# loads the certificates used to access the test server securely.
def load_test_certs
this_dir = File.expand_path(File.dirname(__FILE__))
@@ -62,21 +64,49 @@ def load_test_certs
files.map { |f| File.open(File.join(data_dir, f)).read }
end
+# loads the certificates used to access the test server securely.
+def load_prod_cert
+ fail 'could not find a production cert' if ENV['SSL_CERT_FILE'].nil?
+ p "loading prod certs from #{ENV['SSL_CERT_FILE']}"
+ File.open(ENV['SSL_CERT_FILE']).read
+end
+
# creates a Credentials from the test certificates.
def test_creds
certs = load_test_certs
GRPC::Core::Credentials.new(certs[0])
end
+RX_CERT = /-----BEGIN CERTIFICATE-----\n.*?-----END CERTIFICATE-----\n/m
+
+
+# creates a Credentials from the production certificates.
+def prod_creds
+ cert_text = load_prod_cert
+ GRPC::Core::Credentials.new(cert_text)
+end
+
# creates a test stub that accesses host:port securely.
-def create_stub(host, port)
+def create_stub(host, port, is_secure, host_override, use_test_ca)
address = "#{host}:#{port}"
- stub_opts = {
- :creds => test_creds,
- GRPC::Core::Channel::SSL_TARGET => 'foo.test.google.com'
- }
- logger.info("... connecting securely to #{address}")
- Grpc::Testing::TestService::Stub.new(address, **stub_opts)
+ if is_secure
+ creds = nil
+ if use_test_ca
+ creds = test_creds
+ else
+ creds = prod_creds
+ end
+
+ stub_opts = {
+ :creds => creds,
+ GRPC::Core::Channel::SSL_TARGET => host_override
+ }
+ logger.info("... connecting securely to #{address}")
+ Grpc::Testing::TestService::Stub.new(address, **stub_opts)
+ else
+ logger.info("... connecting insecurely to #{address}")
+ Grpc::Testing::TestService::Stub.new(address)
+ end
end
# produces a string of null chars (\0) of length l.
@@ -133,20 +163,12 @@ class NamedTests
@stub = stub
end
- # TESTING
- # PASSED
- # FAIL
- # ruby server: fails protobuf-ruby can't pass an empty message
def empty_unary
resp = @stub.empty_call(Empty.new)
assert resp.is_a?(Empty), 'empty_unary: invalid response'
p 'OK: empty_unary'
end
- # TESTING
- # PASSED
- # ruby server
- # FAILED
def large_unary
req_size, wanted_response_size = 271_828, 314_159
payload = Payload.new(type: :COMPRESSABLE, body: nulls(req_size))
@@ -163,10 +185,6 @@ class NamedTests
p 'OK: large_unary'
end
- # TESTING:
- # PASSED
- # ruby server
- # FAILED
def client_streaming
msg_sizes = [27_182, 8, 1828, 45_904]
wanted_aggregate_size = 74_922
@@ -180,10 +198,6 @@ class NamedTests
p 'OK: client_streaming'
end
- # TESTING:
- # PASSED
- # ruby server
- # FAILED
def server_streaming
msg_sizes = [31_415, 9, 2653, 58_979]
response_spec = msg_sizes.map { |s| ResponseParameters.new(size: s) }
@@ -200,10 +214,6 @@ class NamedTests
p 'OK: server_streaming'
end
- # TESTING:
- # PASSED
- # ruby server
- # FAILED
def ping_pong
msg_sizes = [[27_182, 31_415], [8, 9], [1828, 2653], [45_904, 58_979]]
ppp = PingPongPlayer.new(msg_sizes)
@@ -211,12 +221,23 @@ class NamedTests
resps.each { |r| ppp.queue.push(r) }
p 'OK: ping_pong'
end
+
+ def all
+ all_methods = NamedTests.instance_methods(false).map(&:to_s)
+ all_methods.each do |m|
+ next if m == 'all' || m.start_with?('assert')
+ p "TESTCASE: #{m}"
+ method(m).call
+ end
+ end
end
# validates the the command line options, returning them as a Hash.
def parse_options
options = {
+ 'secure' => false,
'server_host' => nil,
+ 'server_host_override' => nil,
'server_port' => nil,
'test_case' => nil
}
@@ -225,6 +246,10 @@ def parse_options
opts.on('--server_host SERVER_HOST', 'server hostname') do |v|
options['server_host'] = v
end
+ opts.on('--server_host_override HOST_OVERRIDE',
+ 'override host via a HTTP header') do |v|
+ options['server_host_override'] = v
+ end
opts.on('--server_port SERVER_PORT', 'server port') do |v|
options['server_port'] = v
end
@@ -235,19 +260,33 @@ def parse_options
" (#{test_case_list})") do |v|
options['test_case'] = v
end
+ opts.on('-s', '--use_tls', 'require a secure connection?') do |v|
+ options['secure'] = v
+ end
+ opts.on('-t', '--use_test_ca',
+ 'if secure, use the test certificate?') do |v|
+ options['use_test_ca'] = v
+ end
end.parse!
+ _check_options(options)
+end
+def _check_options(opts)
%w(server_host server_port test_case).each do |arg|
- if options[arg].nil?
+ if opts[arg].nil?
fail(OptionParser::MissingArgument, "please specify --#{arg}")
end
end
- options
+ if opts['server_host_override'].nil?
+ opts['server_host_override'] = opts['server_host']
+ end
+ opts
end
def main
opts = parse_options
- stub = create_stub(opts['server_host'], opts['server_port'])
+ stub = create_stub(opts['server_host'], opts['server_port'], opts['secure'],
+ opts['server_host_override'], opts['use_test_ca'])
NamedTests.new(stub).method(opts['test_case']).call
end
diff --git a/src/ruby/bin/interop/interop_server.rb b/src/ruby/bin/interop/interop_server.rb
index 83212823f6..cc4d260879 100755
--- a/src/ruby/bin/interop/interop_server.rb
+++ b/src/ruby/bin/interop/interop_server.rb
@@ -154,13 +154,17 @@ end
# validates the the command line options, returning them as a Hash.
def parse_options
options = {
- 'port' => nil
+ 'port' => nil,
+ 'secure' => false
}
OptionParser.new do |opts|
opts.banner = 'Usage: --port port'
opts.on('--port PORT', 'server port') do |v|
options['port'] = v
end
+ opts.on('-s', '--use_tls', 'require a secure connection?') do |v|
+ options['secure'] = v
+ end
end.parse!
if options['port'].nil?
@@ -172,10 +176,15 @@ end
def main
opts = parse_options
host = "0.0.0.0:#{opts['port']}"
- s = GRPC::RpcServer.new(creds: test_server_creds)
- s.add_http2_port(host, true)
- logger.info("... running securely on #{host}")
-
+ if opts['secure']
+ s = GRPC::RpcServer.new(creds: test_server_creds)
+ s.add_http2_port(host, true)
+ logger.info("... running securely on #{host}")
+ else
+ s = GRPC::RpcServer.new
+ s.add_http2_port(host)
+ logger.info("... running insecurely on #{host}")
+ end
s.handle(TestTarget)
s.run
end
diff --git a/src/ruby/ext/grpc/rb_call.c b/src/ruby/ext/grpc/rb_call.c
index 76b80bcaa1..1b6565f729 100644
--- a/src/ruby/ext/grpc/rb_call.c
+++ b/src/ruby/ext/grpc/rb_call.c
@@ -153,7 +153,7 @@ int grpc_rb_call_add_metadata_hash_cb(VALUE key, VALUE val, VALUE call_obj) {
Add metadata elements to the call from a ruby hash, to be sent upon
invocation. flags is a bit-field combination of the write flags defined
- above. REQUIRES: grpc_call_start_invoke/grpc_call_accept have not been
+ above. REQUIRES: grpc_call_invoke/grpc_call_accept have not been
called on this call. Produces no events. */
static VALUE grpc_rb_call_add_metadata(int argc, VALUE *argv, VALUE self) {
@@ -196,16 +196,15 @@ static VALUE grpc_rb_call_cancel(VALUE self) {
/*
call-seq:
- call.start_invoke(completion_queue, tag, flags=nil)
+ call.invoke(completion_queue, tag, flags=nil)
Invoke the RPC. Starts sending metadata and request headers on the wire.
flags is a bit-field combination of the write flags defined above.
REQUIRES: Can be called at most once per call.
Can only be called on the client.
Produces a GRPC_INVOKE_ACCEPTED event on completion. */
-static VALUE grpc_rb_call_start_invoke(int argc, VALUE *argv, VALUE self) {
+static VALUE grpc_rb_call_invoke(int argc, VALUE *argv, VALUE self) {
VALUE cqueue = Qnil;
- VALUE invoke_accepted_tag = Qnil;
VALUE metadata_read_tag = Qnil;
VALUE finished_tag = Qnil;
VALUE flags = Qnil;
@@ -213,17 +212,16 @@ static VALUE grpc_rb_call_start_invoke(int argc, VALUE *argv, VALUE self) {
grpc_completion_queue *cq = NULL;
grpc_call_error err;
- /* "41" == 4 mandatory args, 1 (flags) is optional */
- rb_scan_args(argc, argv, "41", &cqueue, &invoke_accepted_tag,
- &metadata_read_tag, &finished_tag, &flags);
+ /* "31" == 3 mandatory args, 1 (flags) is optional */
+ rb_scan_args(argc, argv, "31", &cqueue, &metadata_read_tag, &finished_tag,
+ &flags);
if (NIL_P(flags)) {
flags = UINT2NUM(0); /* Default to no flags */
}
cq = grpc_rb_get_wrapped_completion_queue(cqueue);
Data_Get_Struct(self, grpc_call, call);
- err = grpc_call_start_invoke(call, cq, ROBJECT(invoke_accepted_tag),
- ROBJECT(metadata_read_tag),
- ROBJECT(finished_tag), NUM2UINT(flags));
+ err = grpc_call_invoke(call, cq, ROBJECT(metadata_read_tag),
+ ROBJECT(finished_tag), NUM2UINT(flags));
if (err != GRPC_CALL_OK) {
rb_raise(rb_eCallError, "invoke failed: %s (code=%d)",
grpc_call_error_detail_of(err), err);
@@ -519,7 +517,7 @@ void Init_google_rpc_call() {
grpc_rb_call_server_end_initial_metadata, -1);
rb_define_method(rb_cCall, "add_metadata", grpc_rb_call_add_metadata, -1);
rb_define_method(rb_cCall, "cancel", grpc_rb_call_cancel, 0);
- rb_define_method(rb_cCall, "start_invoke", grpc_rb_call_start_invoke, -1);
+ rb_define_method(rb_cCall, "invoke", grpc_rb_call_invoke, -1);
rb_define_method(rb_cCall, "start_read", grpc_rb_call_start_read, 1);
rb_define_method(rb_cCall, "start_write", grpc_rb_call_start_write, -1);
rb_define_method(rb_cCall, "start_write_status",
diff --git a/src/ruby/ext/grpc/rb_credentials.c b/src/ruby/ext/grpc/rb_credentials.c
index 31f47f3b76..43cc21aeca 100644
--- a/src/ruby/ext/grpc/rb_credentials.c
+++ b/src/ruby/ext/grpc/rb_credentials.c
@@ -214,6 +214,8 @@ static VALUE grpc_rb_credentials_init(int argc, VALUE *argv, VALUE self) {
VALUE pem_cert_chain = Qnil;
grpc_rb_credentials *wrapper = NULL;
grpc_credentials *creds = NULL;
+ grpc_ssl_pem_key_cert_pair key_cert_pair;
+ MEMZERO(&key_cert_pair, grpc_ssl_pem_key_cert_pair, 1);
/* TODO: Remove mandatory arg when we support default roots. */
/* "12" == 1 mandatory arg, 2 (credentials) is optional */
rb_scan_args(argc, argv, "12", &pem_root_certs, &pem_private_key,
@@ -228,8 +230,8 @@ static VALUE grpc_rb_credentials_init(int argc, VALUE *argv, VALUE self) {
if (pem_private_key == Qnil && pem_cert_chain == Qnil) {
creds = grpc_ssl_credentials_create(RSTRING_PTR(pem_root_certs), NULL);
} else {
- grpc_ssl_pem_key_cert_pair key_cert_pair = {RSTRING_PTR(pem_private_key),
- RSTRING_PTR(pem_cert_chain)};
+ key_cert_pair.private_key = RSTRING_PTR(pem_private_key);
+ key_cert_pair.cert_chain = RSTRING_PTR(pem_cert_chain);
creds = grpc_ssl_credentials_create(
RSTRING_PTR(pem_root_certs), &key_cert_pair);
}
diff --git a/src/ruby/ext/grpc/rb_event.c b/src/ruby/ext/grpc/rb_event.c
index 0fae9502c3..a1ab6251c8 100644
--- a/src/ruby/ext/grpc/rb_event.c
+++ b/src/ruby/ext/grpc/rb_event.c
@@ -105,10 +105,6 @@ static VALUE grpc_rb_event_type(VALUE self) {
case GRPC_READ:
return rb_const_get(rb_mCompletionType, rb_intern("READ"));
- case GRPC_INVOKE_ACCEPTED:
- grpc_rb_event_result(self); /* validates the result */
- return rb_const_get(rb_mCompletionType, rb_intern("INVOKE_ACCEPTED"));
-
case GRPC_WRITE_ACCEPTED:
grpc_rb_event_result(self); /* validates the result */
return rb_const_get(rb_mCompletionType, rb_intern("WRITE_ACCEPTED"));
@@ -359,6 +355,8 @@ void Init_google_rpc_event() {
rb_define_const(rb_mCompletionType, "FINISHED", INT2NUM(GRPC_FINISHED));
rb_define_const(rb_mCompletionType, "SERVER_RPC_NEW",
INT2NUM(GRPC_SERVER_RPC_NEW));
+ rb_define_const(rb_mCompletionType, "SERVER_SHUTDOWN",
+ INT2NUM(GRPC_SERVER_SHUTDOWN));
rb_define_const(rb_mCompletionType, "RESERVED",
INT2NUM(GRPC_COMPLETION_DO_NOT_USE));
}
diff --git a/src/ruby/ext/grpc/rb_server.c b/src/ruby/ext/grpc/rb_server.c
index ef2a9f107b..436d006760 100644
--- a/src/ruby/ext/grpc/rb_server.c
+++ b/src/ruby/ext/grpc/rb_server.c
@@ -223,7 +223,7 @@ static VALUE grpc_rb_server_add_http2_port(int argc, VALUE *argv, VALUE self) {
VALUE port = Qnil;
VALUE is_secure = Qnil;
grpc_rb_server *s = NULL;
- int added_ok = 0;
+ int recvd_port = 0;
/* "11" == 1 mandatory args, 1 (is_secure) is optional */
rb_scan_args(argc, argv, "11", &port, &is_secure);
@@ -233,22 +233,22 @@ static VALUE grpc_rb_server_add_http2_port(int argc, VALUE *argv, VALUE self) {
rb_raise(rb_eRuntimeError, "closed!");
return Qnil;
} else if (is_secure == Qnil || TYPE(is_secure) != T_TRUE) {
- added_ok = grpc_server_add_http2_port(s->wrapped, StringValueCStr(port));
- if (added_ok == 0) {
+ recvd_port = grpc_server_add_http2_port(s->wrapped, StringValueCStr(port));
+ if (recvd_port == 0) {
rb_raise(rb_eRuntimeError,
"could not add port %s to server, not sure why",
StringValueCStr(port));
}
} else if (TYPE(is_secure) != T_FALSE) {
- added_ok =
+ recvd_port =
grpc_server_add_secure_http2_port(s->wrapped, StringValueCStr(port));
- if (added_ok == 0) {
+ if (recvd_port == 0) {
rb_raise(rb_eRuntimeError,
"could not add secure port %s to server, not sure why",
StringValueCStr(port));
}
}
- return Qnil;
+ return INT2NUM(recvd_port);
}
void Init_google_rpc_server() {
diff --git a/src/ruby/grpc.gemspec b/src/ruby/grpc.gemspec
index 450362f5a8..ffd084dc91 100755
--- a/src/ruby/grpc.gemspec
+++ b/src/ruby/grpc.gemspec
@@ -22,6 +22,7 @@ Gem::Specification.new do |s|
s.add_dependency 'xray'
s.add_dependency 'logging', '~> 1.8'
s.add_dependency 'google-protobuf', '~> 3.0.0alpha.1.1'
+ s.add_dependency 'signet', '~> 0.5.1'
s.add_dependency 'minitest', '~> 5.4' # reqd for interop tests
s.add_development_dependency 'bundler', '~> 1.7'
diff --git a/src/ruby/lib/grpc/generic/active_call.rb b/src/ruby/lib/grpc/generic/active_call.rb
index bd684a8d07..6c2b6e91c2 100644
--- a/src/ruby/lib/grpc/generic/active_call.rb
+++ b/src/ruby/lib/grpc/generic/active_call.rb
@@ -47,7 +47,7 @@ module Google
include Core::TimeConsts
attr_reader(:deadline)
- # client_start_invoke begins a client invocation.
+ # client_invoke begins a client invocation.
#
# Flow Control note: this blocks until flow control accepts that client
# request can go ahead.
@@ -59,34 +59,26 @@ module Google
# if a keyword value is a list, multiple metadata for it's key are sent
#
# @param call [Call] a call on which to start and invocation
- # @param q [CompletionQueue] used to wait for INVOKE_ACCEPTED
- # @param deadline [Fixnum,TimeSpec] the deadline for INVOKE_ACCEPTED
- def self.client_start_invoke(call, q, _deadline, **kw)
+ # @param q [CompletionQueue] the completion queue
+ # @param deadline [Fixnum,TimeSpec] the deadline
+ def self.client_invoke(call, q, _deadline, **kw)
fail(ArgumentError, 'not a call') unless call.is_a? Core::Call
unless q.is_a? Core::CompletionQueue
fail(ArgumentError, 'not a CompletionQueue')
end
call.add_metadata(kw) if kw.length > 0
- invoke_accepted, client_metadata_read = Object.new, Object.new
+ client_metadata_read = Object.new
finished_tag = Object.new
- call.start_invoke(q, invoke_accepted, client_metadata_read,
- finished_tag)
-
- # wait for the invocation to be accepted
- ev = q.pluck(invoke_accepted, INFINITE_FUTURE)
- fail OutOfTime if ev.nil?
- ev.close
-
+ call.invoke(q, client_metadata_read, finished_tag)
[finished_tag, client_metadata_read]
end
# Creates an ActiveCall.
#
- # ActiveCall should only be created after a call is accepted. That means
- # different things on a client and a server. On the client, the call is
- # accepted after call.start_invoke followed by receipt of the
- # corresponding INVOKE_ACCEPTED. on the server, this is after
- # call.accept.
+ # ActiveCall should only be created after a call is accepted. That
+ # means different things on a client and a server. On the client, the
+ # call is accepted after calling call.invoke. On the server, this is
+ # after call.accept.
#
# #initialize cannot determine if the call is accepted or not; so if a
# call that's not accepted is used here, the error won't be visible until
@@ -495,7 +487,7 @@ module Google
private
def start_call(**kw)
- tags = ActiveCall.client_start_invoke(@call, @cq, @deadline, **kw)
+ tags = ActiveCall.client_invoke(@call, @cq, @deadline, **kw)
@finished_tag, @read_metadata_tag = tags
@started = true
end
diff --git a/src/ruby/lib/grpc/generic/bidi_call.rb b/src/ruby/lib/grpc/generic/bidi_call.rb
index 36877dc648..099d57151c 100644
--- a/src/ruby/lib/grpc/generic/bidi_call.rb
+++ b/src/ruby/lib/grpc/generic/bidi_call.rb
@@ -50,9 +50,7 @@ module Google
#
# BidiCall should only be created after a call is accepted. That means
# different things on a client and a server. On the client, the call is
- # accepted after call.start_invoke followed by receipt of the
- # corresponding INVOKE_ACCEPTED. On the server, this is after
- # call.accept.
+ # accepted after call.invoke. On the server, this is after call.accept.
#
# #initialize cannot determine if the call is accepted or not; so if a
# call that's not accepted is used here, the error won't be visible until
diff --git a/src/ruby/spec/call_spec.rb b/src/ruby/spec/call_spec.rb
index b8ecd64f39..c793284488 100644
--- a/src/ruby/spec/call_spec.rb
+++ b/src/ruby/spec/call_spec.rb
@@ -28,7 +28,6 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
require 'grpc'
-require 'port_picker'
include GRPC::Core::StatusCodes
@@ -71,16 +70,8 @@ describe GRPC::Core::Call do
before(:each) do
@tag = Object.new
@client_queue = GRPC::Core::CompletionQueue.new
- @server_queue = GRPC::Core::CompletionQueue.new
- port = find_unused_tcp_port
- host = "localhost:#{port}"
- @server = GRPC::Core::Server.new(@server_queue, nil)
- @server.add_http2_port(host)
- @ch = GRPC::Core::Channel.new(host, nil)
- end
-
- after(:each) do
- @server.close
+ fake_host = 'localhost:10101'
+ @ch = GRPC::Core::Channel.new(fake_host, nil)
end
describe '#start_read' do
@@ -122,33 +113,6 @@ describe GRPC::Core::Call do
end
end
- describe '#start_invoke' do
- it 'should cause the INVOKE_ACCEPTED event' do
- call = make_test_call
- expect(call.start_invoke(@client_queue, @tag, @tag, @tag)).to be_nil
- ev = @client_queue.next(deadline)
- expect(ev.call).to be_a(GRPC::Core::Call)
- expect(ev.tag).to be(@tag)
- expect(ev.type).to be(GRPC::Core::CompletionType::INVOKE_ACCEPTED)
- expect(ev.call).to_not be(call)
- end
- end
-
- describe '#start_write' do
- it 'should cause the WRITE_ACCEPTED event' do
- call = make_test_call
- call.start_invoke(@client_queue, @tag, @tag, @tag)
- ev = @client_queue.next(deadline)
- expect(ev.type).to be(GRPC::Core::CompletionType::INVOKE_ACCEPTED)
- expect(call.start_write(GRPC::Core::ByteBuffer.new('test_start_write'),
- @tag)).to be_nil
- ev = @client_queue.next(deadline)
- expect(ev.call).to be_a(GRPC::Core::Call)
- expect(ev.type).to be(GRPC::Core::CompletionType::WRITE_ACCEPTED)
- expect(ev.tag).to be(@tag)
- end
- end
-
describe '#status' do
it 'can save the status and read it back' do
call = make_test_call
diff --git a/src/ruby/spec/channel_spec.rb b/src/ruby/spec/channel_spec.rb
index 820dbd39e9..189d1c67ab 100644
--- a/src/ruby/spec/channel_spec.rb
+++ b/src/ruby/spec/channel_spec.rb
@@ -28,7 +28,8 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
require 'grpc'
-require 'port_picker'
+
+FAKE_HOST='localhost:0'
def load_test_certs
test_root = File.join(File.dirname(__FILE__), 'testdata')
@@ -114,8 +115,7 @@ describe GRPC::Core::Channel do
describe '#create_call' do
it 'creates a call OK' do
- port = find_unused_tcp_port
- host = "localhost:#{port}"
+ host = FAKE_HOST
ch = GRPC::Core::Channel.new(host, nil)
deadline = Time.now + 5
@@ -127,8 +127,7 @@ describe GRPC::Core::Channel do
end
it 'raises an error if called on a closed channel' do
- port = find_unused_tcp_port
- host = "localhost:#{port}"
+ host = FAKE_HOST
ch = GRPC::Core::Channel.new(host, nil)
ch.close
@@ -142,16 +141,14 @@ describe GRPC::Core::Channel do
describe '#destroy' do
it 'destroys a channel ok' do
- port = find_unused_tcp_port
- host = "localhost:#{port}"
+ host = FAKE_HOST
ch = GRPC::Core::Channel.new(host, nil)
blk = proc { ch.destroy }
expect(&blk).to_not raise_error
end
it 'can be called more than once without error' do
- port = find_unused_tcp_port
- host = "localhost:#{port}"
+ host = FAKE_HOST
ch = GRPC::Core::Channel.new(host, nil)
blk = proc { ch.destroy }
blk.call
@@ -167,16 +164,14 @@ describe GRPC::Core::Channel do
describe '#close' do
it 'closes a channel ok' do
- port = find_unused_tcp_port
- host = "localhost:#{port}"
+ host = FAKE_HOST
ch = GRPC::Core::Channel.new(host, nil)
blk = proc { ch.close }
expect(&blk).to_not raise_error
end
it 'can be called more than once without error' do
- port = find_unused_tcp_port
- host = "localhost:#{port}"
+ host = FAKE_HOST
ch = GRPC::Core::Channel.new(host, nil)
blk = proc { ch.close }
blk.call
diff --git a/src/ruby/spec/client_server_spec.rb b/src/ruby/spec/client_server_spec.rb
index df70e56bca..96b8ef4300 100644
--- a/src/ruby/spec/client_server_spec.rb
+++ b/src/ruby/spec/client_server_spec.rb
@@ -28,7 +28,6 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
require 'grpc'
-require 'port_picker'
require 'spec_helper'
include GRPC::Core::CompletionType
@@ -44,12 +43,13 @@ shared_context 'setup: tags' do
before(:example) do
@server_finished_tag = Object.new
@client_finished_tag = Object.new
+ @client_metadata_tag = Object.new
@server_tag = Object.new
@tag = Object.new
end
def deadline
- Time.now + 0.05
+ Time.now + 2
end
def expect_next_event_on(queue, type, tag)
@@ -63,30 +63,30 @@ shared_context 'setup: tags' do
ev
end
- def server_receives_and_responds_with(reply_text)
- reply = ByteBuffer.new(reply_text)
+ def server_allows_client_to_proceed
@server.request_call(@server_tag)
- ev = @server_queue.pluck(@server_tag, TimeConsts::INFINITE_FUTURE)
+ ev = @server_queue.pluck(@server_tag, deadline)
expect(ev).not_to be_nil
expect(ev.type).to be(SERVER_RPC_NEW)
- ev.call.server_accept(@server_queue, @server_finished_tag)
- ev.call.server_end_initial_metadata
- ev.call.start_read(@server_tag)
+ server_call = ev.call
+ server_call.server_accept(@server_queue, @server_finished_tag)
+ server_call.server_end_initial_metadata
+ server_call
+ end
+
+ def server_responds_with(server_call, reply_text)
+ reply = ByteBuffer.new(reply_text)
+ server_call.start_read(@server_tag)
ev = @server_queue.pluck(@server_tag, TimeConsts::INFINITE_FUTURE)
expect(ev.type).to be(READ)
- ev.call.start_write(reply, @server_tag)
+ server_call.start_write(reply, @server_tag)
ev = @server_queue.pluck(@server_tag, TimeConsts::INFINITE_FUTURE)
expect(ev).not_to be_nil
expect(ev.type).to be(WRITE_ACCEPTED)
- ev.call
end
def client_sends(call, sent = 'a message')
req = ByteBuffer.new(sent)
- call.start_invoke(@client_queue, @tag, @tag, @client_finished_tag)
- ev = @client_queue.pluck(@tag, TimeConsts::INFINITE_FUTURE)
- expect(ev).not_to be_nil
- expect(ev.type).to be(INVOKE_ACCEPTED)
call.start_write(req, @tag)
ev = @client_queue.pluck(@tag, TimeConsts::INFINITE_FUTURE)
expect(ev).not_to be_nil
@@ -105,16 +105,20 @@ shared_examples 'basic GRPC message delivery is OK' do
it 'servers receive requests from clients and start responding' do
reply = ByteBuffer.new('the server payload')
call = new_client_call
- msg = client_sends(call)
+ call.invoke(@client_queue, @client_metadata_tag, @client_finished_tag)
# check the server rpc new was received
- @server.request_call(@server_tag)
- ev = expect_next_event_on(@server_queue, SERVER_RPC_NEW, @server_tag)
+ # @server.request_call(@server_tag)
+ # ev = expect_next_event_on(@server_queue, SERVER_RPC_NEW, @server_tag)
# accept the call
- server_call = ev.call
- server_call.server_accept(@server_queue, @server_finished_tag)
- server_call.server_end_initial_metadata
+ # server_call = ev.call
+ # server_call.server_accept(@server_queue, @server_finished_tag)
+ # server_call.server_end_initial_metadata
+ server_call = server_allows_client_to_proceed
+
+ # client sends a message
+ msg = client_sends(call)
# confirm the server can read the inbound message
server_call.start_read(@server_tag)
@@ -128,18 +132,19 @@ shared_examples 'basic GRPC message delivery is OK' do
it 'responses written by servers are received by the client' do
call = new_client_call
+ call.invoke(@client_queue, @client_metadata_tag, @client_finished_tag)
+ server_call = server_allows_client_to_proceed
client_sends(call)
- server_receives_and_responds_with('server_response')
+ server_responds_with(server_call, 'server_response')
call.start_read(@tag)
- expect_next_event_on(@client_queue, CLIENT_METADATA_READ, @tag)
ev = expect_next_event_on(@client_queue, READ, @tag)
expect(ev.result.to_s).to eq('server_response')
end
it 'servers can ignore a client write and send a status' do
call = new_client_call
- client_sends(call)
+ call.invoke(@client_queue, @client_metadata_tag, @client_finished_tag)
# check the server rpc new was received
@server.request_call(@server_tag)
@@ -153,9 +158,13 @@ shared_examples 'basic GRPC message delivery is OK' do
server_call.start_write_status(StatusCodes::NOT_FOUND, 'not found',
@server_tag)
+ # Client sends some data
+ client_sends(call)
+
# client gets an empty response for the read, preceeded by some metadata.
call.start_read(@tag)
- expect_next_event_on(@client_queue, CLIENT_METADATA_READ, @tag)
+ expect_next_event_on(@client_queue, CLIENT_METADATA_READ,
+ @client_metadata_tag)
ev = expect_next_event_on(@client_queue, READ, @tag)
expect(ev.tag).to be(@tag)
expect(ev.result.to_s).to eq('')
@@ -169,13 +178,14 @@ shared_examples 'basic GRPC message delivery is OK' do
it 'completes calls by sending status to client and server' do
call = new_client_call
+ call.invoke(@client_queue, @client_metadata_tag, @client_finished_tag)
+ server_call = server_allows_client_to_proceed
client_sends(call)
- server_call = server_receives_and_responds_with('server_response')
+ server_responds_with(server_call, 'server_response')
server_call.start_write_status(10_101, 'status code is 10101', @server_tag)
# first the client says writes are done
call.start_read(@tag)
- expect_next_event_on(@client_queue, CLIENT_METADATA_READ, @tag)
expect_next_event_on(@client_queue, READ, @tag)
call.writes_done(@tag)
@@ -218,23 +228,13 @@ shared_examples 'GRPC metadata delivery works OK' do
end
end
- it 'sends an empty hash when no metadata is added' do
- call = new_client_call
- client_sends(call)
-
- # Server gets a response
- @server.request_call(@server_tag)
- expect_next_event_on(@server_queue, SERVER_RPC_NEW, @server_tag)
- end
-
it 'sends all the metadata pairs when keys and values are valid' do
@valid_metadata.each do |md|
call = new_client_call
call.add_metadata(md)
# Client begins a call OK
- call.start_invoke(@client_queue, @tag, @tag, @client_finished_tag)
- expect_next_event_on(@client_queue, INVOKE_ACCEPTED, @tag)
+ call.invoke(@client_queue, @client_metadata_tag, @client_finished_tag)
# ... server has all metadata available even though the client did not
# send a write
@@ -266,7 +266,7 @@ shared_examples 'GRPC metadata delivery works OK' do
it 'raises an exception if a metadata key is invalid' do
@bad_keys.each do |md|
call = new_client_call
- client_sends(call)
+ call.invoke(@client_queue, @client_metadata_tag, @client_finished_tag)
# server gets the invocation
@server.request_call(@server_tag)
@@ -277,7 +277,7 @@ shared_examples 'GRPC metadata delivery works OK' do
it 'sends a hash that contains the status when no metadata is added' do
call = new_client_call
- client_sends(call)
+ call.invoke(@client_queue, @client_metadata_tag, @client_finished_tag)
# server gets the invocation
@server.request_call(@server_tag)
@@ -288,21 +288,17 @@ shared_examples 'GRPC metadata delivery works OK' do
server_call.server_accept(@server_queue, @server_finished_tag)
server_call.server_end_initial_metadata
- # ... these server sends some data, allowing the metadata read
- server_call.start_write(ByteBuffer.new('reply with metadata'),
- @server_tag)
- expect_next_event_on(@server_queue, WRITE_ACCEPTED, @server_tag)
-
# there is the HTTP status metadata, though there should not be any
# TODO: update this with the bug number to be resolved
- ev = expect_next_event_on(@client_queue, CLIENT_METADATA_READ, @tag)
+ ev = expect_next_event_on(@client_queue, CLIENT_METADATA_READ,
+ @client_metadata_tag)
expect(ev.result).to eq(':status' => '200')
end
it 'sends all the pairs and status:200 when keys and values are valid' do
@valid_metadata.each do |md|
call = new_client_call
- client_sends(call)
+ call.invoke(@client_queue, @client_metadata_tag, @client_finished_tag)
# server gets the invocation
@server.request_call(@server_tag)
@@ -315,7 +311,8 @@ shared_examples 'GRPC metadata delivery works OK' do
server_call.server_end_initial_metadata
# Now the client can read the metadata
- ev = expect_next_event_on(@client_queue, CLIENT_METADATA_READ, @tag)
+ ev = expect_next_event_on(@client_queue, CLIENT_METADATA_READ,
+ @client_metadata_tag)
replace_symbols = Hash[md.each_pair.collect { |x, y| [x.to_s, y] }]
replace_symbols[':status'] = '200'
expect(ev.result).to eq(replace_symbols)
@@ -326,17 +323,17 @@ end
describe 'the http client/server' do
before(:example) do
- port = find_unused_tcp_port
- host = "localhost:#{port}"
+ server_host = '0.0.0.0:0'
@client_queue = GRPC::Core::CompletionQueue.new
@server_queue = GRPC::Core::CompletionQueue.new
@server = GRPC::Core::Server.new(@server_queue, nil)
- @server.add_http2_port(host)
+ server_port = @server.add_http2_port(server_host)
@server.start
- @ch = Channel.new(host, nil)
+ @ch = Channel.new("0.0.0.0:#{server_port}", nil)
end
after(:example) do
+ @ch.close
@server.close
end
@@ -350,16 +347,15 @@ end
describe 'the secure http client/server' do
before(:example) do
certs = load_test_certs
- port = find_unused_tcp_port
- host = "localhost:#{port}"
+ server_host = 'localhost:0'
@client_queue = GRPC::Core::CompletionQueue.new
@server_queue = GRPC::Core::CompletionQueue.new
server_creds = GRPC::Core::ServerCredentials.new(nil, certs[1], certs[2])
@server = GRPC::Core::Server.new(@server_queue, nil, server_creds)
- @server.add_http2_port(host, true)
+ server_port = @server.add_http2_port(server_host, true)
@server.start
args = { Channel::SSL_TARGET => 'foo.test.google.com' }
- @ch = Channel.new(host, args,
+ @ch = Channel.new("0.0.0.0:#{server_port}", args,
GRPC::Core::Credentials.new(certs[0], nil, nil))
end
diff --git a/src/ruby/spec/event_spec.rb b/src/ruby/spec/event_spec.rb
index 5dec07e1ed..7ef08d021b 100644
--- a/src/ruby/spec/event_spec.rb
+++ b/src/ruby/spec/event_spec.rb
@@ -40,7 +40,8 @@ describe GRPC::Core::CompletionType do
CLIENT_METADATA_READ: 5,
FINISHED: 6,
SERVER_RPC_NEW: 7,
- RESERVED: 8
+ SERVER_SHUTDOWN: 8,
+ RESERVED: 9
}
end
diff --git a/src/ruby/spec/generic/active_call_spec.rb b/src/ruby/spec/generic/active_call_spec.rb
index 898022f185..e81b2168b0 100644
--- a/src/ruby/spec/generic/active_call_spec.rb
+++ b/src/ruby/spec/generic/active_call_spec.rb
@@ -28,7 +28,6 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
require 'grpc'
-require_relative '../port_picker'
include GRPC::Core::StatusCodes
@@ -45,12 +44,11 @@ describe GRPC::ActiveCall do
@client_queue = GRPC::Core::CompletionQueue.new
@server_queue = GRPC::Core::CompletionQueue.new
- port = find_unused_tcp_port
- host = "localhost:#{port}"
+ host = '0.0.0.0:0'
@server = GRPC::Core::Server.new(@server_queue, nil)
- @server.add_http2_port(host)
+ server_port = @server.add_http2_port(host)
@server.start
- @ch = GRPC::Core::Channel.new(host, nil)
+ @ch = GRPC::Core::Channel.new("localhost:#{server_port}", nil)
end
after(:each) do
@@ -60,8 +58,8 @@ describe GRPC::ActiveCall do
describe 'restricted view methods' do
before(:each) do
call = make_test_call
- done_tag, meta_tag = ActiveCall.client_start_invoke(call, @client_queue,
- deadline)
+ done_tag, meta_tag = ActiveCall.client_invoke(call, @client_queue,
+ deadline)
@client_call = ActiveCall.new(call, @client_queue, @pass_through,
@pass_through, deadline,
finished_tag: done_tag,
@@ -92,8 +90,8 @@ describe GRPC::ActiveCall do
describe '#remote_send' do
it 'allows a client to send a payload to the server' do
call = make_test_call
- done_tag, meta_tag = ActiveCall.client_start_invoke(call, @client_queue,
- deadline)
+ done_tag, meta_tag = ActiveCall.client_invoke(call, @client_queue,
+ deadline)
@client_call = ActiveCall.new(call, @client_queue, @pass_through,
@pass_through, deadline,
finished_tag: done_tag,
@@ -118,8 +116,8 @@ describe GRPC::ActiveCall do
it 'marshals the payload using the marshal func' do
call = make_test_call
- done_tag, meta_tag = ActiveCall.client_start_invoke(call, @client_queue,
- deadline)
+ done_tag, meta_tag = ActiveCall.client_invoke(call, @client_queue,
+ deadline)
marshal = proc { |x| 'marshalled:' + x }
client_call = ActiveCall.new(call, @client_queue, marshal,
@pass_through, deadline,
@@ -139,11 +137,11 @@ describe GRPC::ActiveCall do
end
end
- describe '#client_start_invoke' do
+ describe '#client_invoke' do
it 'sends keywords as metadata to the server when the are present' do
call = make_test_call
- ActiveCall.client_start_invoke(call, @client_queue, deadline,
- k1: 'v1', k2: 'v2')
+ ActiveCall.client_invoke(call, @client_queue, deadline,
+ k1: 'v1', k2: 'v2')
@server.request_call(@server_tag)
ev = @server_queue.next(deadline)
expect(ev).to_not be_nil
@@ -155,8 +153,8 @@ describe GRPC::ActiveCall do
describe '#remote_read' do
it 'reads the response sent by a server' do
call = make_test_call
- done_tag, meta_tag = ActiveCall.client_start_invoke(call, @client_queue,
- deadline)
+ done_tag, meta_tag = ActiveCall.client_invoke(call, @client_queue,
+ deadline)
client_call = ActiveCall.new(call, @client_queue, @pass_through,
@pass_through, deadline,
finished_tag: done_tag,
@@ -170,8 +168,8 @@ describe GRPC::ActiveCall do
it 'saves metadata { status=200 } when the server adds no metadata' do
call = make_test_call
- done_tag, meta_tag = ActiveCall.client_start_invoke(call, @client_queue,
- deadline)
+ done_tag, meta_tag = ActiveCall.client_invoke(call, @client_queue,
+ deadline)
client_call = ActiveCall.new(call, @client_queue, @pass_through,
@pass_through, deadline,
finished_tag: done_tag,
@@ -187,8 +185,8 @@ describe GRPC::ActiveCall do
it 'saves metadata add by the server' do
call = make_test_call
- done_tag, meta_tag = ActiveCall.client_start_invoke(call, @client_queue,
- deadline)
+ done_tag, meta_tag = ActiveCall.client_invoke(call, @client_queue,
+ deadline)
client_call = ActiveCall.new(call, @client_queue, @pass_through,
@pass_through, deadline,
finished_tag: done_tag,
@@ -205,8 +203,8 @@ describe GRPC::ActiveCall do
it 'get a nil msg before a status when an OK status is sent' do
call = make_test_call
- done_tag, meta_tag = ActiveCall.client_start_invoke(call, @client_queue,
- deadline)
+ done_tag, meta_tag = ActiveCall.client_invoke(call, @client_queue,
+ deadline)
client_call = ActiveCall.new(call, @client_queue, @pass_through,
@pass_through, deadline,
finished_tag: done_tag,
@@ -224,8 +222,8 @@ describe GRPC::ActiveCall do
it 'unmarshals the response using the unmarshal func' do
call = make_test_call
- done_tag, meta_tag = ActiveCall.client_start_invoke(call, @client_queue,
- deadline)
+ done_tag, meta_tag = ActiveCall.client_invoke(call, @client_queue,
+ deadline)
unmarshal = proc { |x| 'unmarshalled:' + x }
client_call = ActiveCall.new(call, @client_queue, @pass_through,
unmarshal, deadline,
@@ -251,8 +249,8 @@ describe GRPC::ActiveCall do
it 'the returns an enumerator that can read n responses' do
call = make_test_call
- done_tag, meta_tag = ActiveCall.client_start_invoke(call, @client_queue,
- deadline)
+ done_tag, meta_tag = ActiveCall.client_invoke(call, @client_queue,
+ deadline)
client_call = ActiveCall.new(call, @client_queue, @pass_through,
@pass_through, deadline,
finished_tag: done_tag,
@@ -271,8 +269,8 @@ describe GRPC::ActiveCall do
it 'the returns an enumerator that stops after an OK Status' do
call = make_test_call
- done_tag, meta_tag = ActiveCall.client_start_invoke(call, @client_queue,
- deadline)
+ done_tag, meta_tag = ActiveCall.client_invoke(call, @client_queue,
+ deadline)
client_call = ActiveCall.new(call, @client_queue, @pass_through,
@pass_through, deadline,
read_metadata_tag: meta_tag,
@@ -296,8 +294,8 @@ describe GRPC::ActiveCall do
describe '#writes_done' do
it 'finishes ok if the server sends a status response' do
call = make_test_call
- done_tag, meta_tag = ActiveCall.client_start_invoke(call, @client_queue,
- deadline)
+ done_tag, meta_tag = ActiveCall.client_invoke(call, @client_queue,
+ deadline)
client_call = ActiveCall.new(call, @client_queue, @pass_through,
@pass_through, deadline,
finished_tag: done_tag,
@@ -315,8 +313,8 @@ describe GRPC::ActiveCall do
it 'finishes ok if the server sends an early status response' do
call = make_test_call
- done_tag, meta_tag = ActiveCall.client_start_invoke(call, @client_queue,
- deadline)
+ done_tag, meta_tag = ActiveCall.client_invoke(call, @client_queue,
+ deadline)
client_call = ActiveCall.new(call, @client_queue, @pass_through,
@pass_through, deadline,
read_metadata_tag: meta_tag,
@@ -334,8 +332,8 @@ describe GRPC::ActiveCall do
it 'finishes ok if writes_done is true' do
call = make_test_call
- done_tag, meta_tag = ActiveCall.client_start_invoke(call, @client_queue,
- deadline)
+ done_tag, meta_tag = ActiveCall.client_invoke(call, @client_queue,
+ deadline)
client_call = ActiveCall.new(call, @client_queue, @pass_through,
@pass_through, deadline,
read_metadata_tag: meta_tag,
diff --git a/src/ruby/spec/generic/client_stub_spec.rb b/src/ruby/spec/generic/client_stub_spec.rb
index 8ebe48bc4c..f1500fbd44 100644
--- a/src/ruby/spec/generic/client_stub_spec.rb
+++ b/src/ruby/spec/generic/client_stub_spec.rb
@@ -29,9 +29,9 @@
require 'grpc'
require 'xray/thread_dump_signal_handler'
-require_relative '../port_picker'
NOOP = proc { |x| x }
+FAKE_HOST = 'localhost:0'
def wakey_thread(&blk)
awake_mutex, awake_cond = Mutex.new, ConditionVariable.new
@@ -67,7 +67,7 @@ describe 'ClientStub' do
describe '#new' do
it 'can be created from a host and args' do
- host = new_test_host
+ host = FAKE_HOST
opts = { a_channel_arg: 'an_arg' }
blk = proc do
GRPC::ClientStub.new(host, @cq, **opts)
@@ -76,7 +76,7 @@ describe 'ClientStub' do
end
it 'can be created with a default deadline' do
- host = new_test_host
+ host = FAKE_HOST
opts = { a_channel_arg: 'an_arg', deadline: 5 }
blk = proc do
GRPC::ClientStub.new(host, @cq, **opts)
@@ -85,7 +85,7 @@ describe 'ClientStub' do
end
it 'can be created with an channel override' do
- host = new_test_host
+ host = FAKE_HOST
opts = { a_channel_arg: 'an_arg', channel_override: @ch }
blk = proc do
GRPC::ClientStub.new(host, @cq, **opts)
@@ -94,7 +94,7 @@ describe 'ClientStub' do
end
it 'cannot be created with a bad channel override' do
- host = new_test_host
+ host = FAKE_HOST
blk = proc do
opts = { a_channel_arg: 'an_arg', channel_override: Object.new }
GRPC::ClientStub.new(host, @cq, **opts)
@@ -103,7 +103,7 @@ describe 'ClientStub' do
end
it 'cannot be created with bad credentials' do
- host = new_test_host
+ host = FAKE_HOST
blk = proc do
opts = { a_channel_arg: 'an_arg', creds: Object.new }
GRPC::ClientStub.new(host, @cq, **opts)
@@ -113,7 +113,7 @@ describe 'ClientStub' do
it 'can be created with test test credentials' do
certs = load_test_certs
- host = new_test_host
+ host = FAKE_HOST
blk = proc do
opts = {
GRPC::Core::Channel::SSL_TARGET => 'foo.test.google.com',
@@ -133,16 +133,17 @@ describe 'ClientStub' do
shared_examples 'request response' do
it 'should send a request to/receive a reply from a server' do
- host = new_test_host
- th = run_request_response(host, @sent_msg, @resp, @pass)
- stub = GRPC::ClientStub.new(host, @cq)
+ server_port = create_test_server
+ th = run_request_response(@sent_msg, @resp, @pass)
+ stub = GRPC::ClientStub.new("localhost:#{server_port}", @cq)
expect(get_response(stub)).to eq(@resp)
th.join
end
it 'should send metadata to the server ok' do
- host = new_test_host
- th = run_request_response(host, @sent_msg, @resp, @pass,
+ server_port = create_test_server
+ host = "localhost:#{server_port}"
+ th = run_request_response(@sent_msg, @resp, @pass,
k1: 'v1', k2: 'v2')
stub = GRPC::ClientStub.new(host, @cq)
expect(get_response(stub)).to eq(@resp)
@@ -150,8 +151,9 @@ describe 'ClientStub' do
end
it 'should update the sent metadata with a provided metadata updater' do
- host = new_test_host
- th = run_request_response(host, @sent_msg, @resp, @pass,
+ server_port = create_test_server
+ host = "localhost:#{server_port}"
+ th = run_request_response(@sent_msg, @resp, @pass,
k1: 'updated-v1', k2: 'v2')
update_md = proc do |md|
md[:k1] = 'updated-v1'
@@ -163,8 +165,9 @@ describe 'ClientStub' do
end
it 'should send a request when configured using an override channel' do
- alt_host = new_test_host
- th = run_request_response(alt_host, @sent_msg, @resp, @pass)
+ server_port = create_test_server
+ alt_host = "localhost:#{server_port}"
+ th = run_request_response(@sent_msg, @resp, @pass)
ch = GRPC::Core::Channel.new(alt_host, nil)
stub = GRPC::ClientStub.new('ignored-host', @cq, channel_override: ch)
expect(get_response(stub)).to eq(@resp)
@@ -172,8 +175,9 @@ describe 'ClientStub' do
end
it 'should raise an error if the status is not OK' do
- host = new_test_host
- th = run_request_response(host, @sent_msg, @resp, @fail)
+ server_port = create_test_server
+ host = "localhost:#{server_port}"
+ th = run_request_response(@sent_msg, @resp, @fail)
stub = GRPC::ClientStub.new(host, @cq)
blk = proc { get_response(stub) }
expect(&blk).to raise_error(GRPC::BadStatus)
@@ -210,16 +214,18 @@ describe 'ClientStub' do
end
it 'should send requests to/receive a reply from a server' do
- host = new_test_host
- th = run_client_streamer(host, @sent_msgs, @resp, @pass)
+ server_port = create_test_server
+ host = "localhost:#{server_port}"
+ th = run_client_streamer(@sent_msgs, @resp, @pass)
stub = GRPC::ClientStub.new(host, @cq)
expect(get_response(stub)).to eq(@resp)
th.join
end
it 'should send metadata to the server ok' do
- host = new_test_host
- th = run_client_streamer(host, @sent_msgs, @resp, @pass,
+ server_port = create_test_server
+ host = "localhost:#{server_port}"
+ th = run_client_streamer(@sent_msgs, @resp, @pass,
k1: 'v1', k2: 'v2')
stub = GRPC::ClientStub.new(host, @cq)
expect(get_response(stub)).to eq(@resp)
@@ -227,8 +233,9 @@ describe 'ClientStub' do
end
it 'should update the sent metadata with a provided metadata updater' do
- host = new_test_host
- th = run_client_streamer(host, @sent_msgs, @resp, @pass,
+ server_port = create_test_server
+ host = "localhost:#{server_port}"
+ th = run_client_streamer(@sent_msgs, @resp, @pass,
k1: 'updated-v1', k2: 'v2')
update_md = proc do |md|
md[:k1] = 'updated-v1'
@@ -240,8 +247,9 @@ describe 'ClientStub' do
end
it 'should raise an error if the status is not ok' do
- host = new_test_host
- th = run_client_streamer(host, @sent_msgs, @resp, @fail)
+ server_port = create_test_server
+ host = "localhost:#{server_port}"
+ th = run_client_streamer(@sent_msgs, @resp, @fail)
stub = GRPC::ClientStub.new(host, @cq)
blk = proc { get_response(stub) }
expect(&blk).to raise_error(GRPC::BadStatus)
@@ -278,16 +286,18 @@ describe 'ClientStub' do
end
it 'should send a request to/receive replies from a server' do
- host = new_test_host
- th = run_server_streamer(host, @sent_msg, @replys, @pass)
+ server_port = create_test_server
+ host = "localhost:#{server_port}"
+ th = run_server_streamer(@sent_msg, @replys, @pass)
stub = GRPC::ClientStub.new(host, @cq)
expect(get_responses(stub).collect { |r| r }).to eq(@replys)
th.join
end
it 'should raise an error if the status is not ok' do
- host = new_test_host
- th = run_server_streamer(host, @sent_msg, @replys, @fail)
+ server_port = create_test_server
+ host = "localhost:#{server_port}"
+ th = run_server_streamer(@sent_msg, @replys, @fail)
stub = GRPC::ClientStub.new(host, @cq)
e = get_responses(stub)
expect { e.collect { |r| r } }.to raise_error(GRPC::BadStatus)
@@ -295,8 +305,9 @@ describe 'ClientStub' do
end
it 'should send metadata to the server ok' do
- host = new_test_host
- th = run_server_streamer(host, @sent_msg, @replys, @fail,
+ server_port = create_test_server
+ host = "localhost:#{server_port}"
+ th = run_server_streamer(@sent_msg, @replys, @fail,
k1: 'v1', k2: 'v2')
stub = GRPC::ClientStub.new(host, @cq)
e = get_responses(stub)
@@ -305,8 +316,9 @@ describe 'ClientStub' do
end
it 'should update the sent metadata with a provided metadata updater' do
- host = new_test_host
- th = run_server_streamer(host, @sent_msg, @replys, @pass,
+ server_port = create_test_server
+ host = "localhost:#{server_port}"
+ th = run_server_streamer(@sent_msg, @replys, @pass,
k1: 'updated-v1', k2: 'v2')
update_md = proc do |md|
md[:k1] = 'updated-v1'
@@ -352,8 +364,9 @@ describe 'ClientStub' do
end
it 'supports sending all the requests first', bidi: true do
- host = new_test_host
- th = run_bidi_streamer_handle_inputs_first(host, @sent_msgs, @replys,
+ server_port = create_test_server
+ host = "localhost:#{server_port}"
+ th = run_bidi_streamer_handle_inputs_first(@sent_msgs, @replys,
@pass)
stub = GRPC::ClientStub.new(host, @cq)
e = get_responses(stub)
@@ -362,8 +375,9 @@ describe 'ClientStub' do
end
it 'supports client-initiated ping pong', bidi: true do
- host = new_test_host
- th = run_bidi_streamer_echo_ping_pong(host, @sent_msgs, @pass, true)
+ server_port = create_test_server
+ host = "localhost:#{server_port}"
+ th = run_bidi_streamer_echo_ping_pong(@sent_msgs, @pass, true)
stub = GRPC::ClientStub.new(host, @cq)
e = get_responses(stub)
expect(e.collect { |r| r }).to eq(@sent_msgs)
@@ -377,8 +391,9 @@ describe 'ClientStub' do
# they receive a message from the client. Without receiving all the
# metadata, the server does not accept the call, so this test hangs.
xit 'supports a server-initiated ping pong', bidi: true do
- host = new_test_host
- th = run_bidi_streamer_echo_ping_pong(host, @sent_msgs, @pass, false)
+ server_port = create_test_server
+ host = "localhost:#{server_port}"
+ th = run_bidi_streamer_echo_ping_pong(@sent_msgs, @pass, false)
stub = GRPC::ClientStub.new(host, @cq)
e = get_responses(stub)
expect(e.collect { |r| r }).to eq(@sent_msgs)
@@ -410,10 +425,10 @@ describe 'ClientStub' do
end
end
- def run_server_streamer(hostname, expected_input, replys, status, **kw)
+ def run_server_streamer(expected_input, replys, status, **kw)
wanted_metadata = kw.clone
wakey_thread do |mtx, cnd|
- c = expect_server_to_be_invoked(hostname, mtx, cnd)
+ c = expect_server_to_be_invoked(mtx, cnd)
wanted_metadata.each do |k, v|
expect(c.metadata[k.to_s]).to eq(v)
end
@@ -423,20 +438,19 @@ describe 'ClientStub' do
end
end
- def run_bidi_streamer_handle_inputs_first(hostname, expected_inputs, replys,
+ def run_bidi_streamer_handle_inputs_first(expected_inputs, replys,
status)
wakey_thread do |mtx, cnd|
- c = expect_server_to_be_invoked(hostname, mtx, cnd)
+ c = expect_server_to_be_invoked(mtx, cnd)
expected_inputs.each { |i| expect(c.remote_read).to eq(i) }
replys.each { |r| c.remote_send(r) }
c.send_status(status, status == @pass ? 'OK' : 'NOK', true)
end
end
- def run_bidi_streamer_echo_ping_pong(hostname, expected_inputs, status,
- client_starts)
+ def run_bidi_streamer_echo_ping_pong(expected_inputs, status, client_starts)
wakey_thread do |mtx, cnd|
- c = expect_server_to_be_invoked(hostname, mtx, cnd)
+ c = expect_server_to_be_invoked(mtx, cnd)
expected_inputs.each do |i|
if client_starts
expect(c.remote_read).to eq(i)
@@ -450,10 +464,10 @@ describe 'ClientStub' do
end
end
- def run_client_streamer(hostname, expected_inputs, resp, status, **kw)
+ def run_client_streamer(expected_inputs, resp, status, **kw)
wanted_metadata = kw.clone
wakey_thread do |mtx, cnd|
- c = expect_server_to_be_invoked(hostname, mtx, cnd)
+ c = expect_server_to_be_invoked(mtx, cnd)
expected_inputs.each { |i| expect(c.remote_read).to eq(i) }
wanted_metadata.each do |k, v|
expect(c.metadata[k.to_s]).to eq(v)
@@ -463,10 +477,10 @@ describe 'ClientStub' do
end
end
- def run_request_response(hostname, expected_input, resp, status, **kw)
+ def run_request_response(expected_input, resp, status, **kw)
wanted_metadata = kw.clone
wakey_thread do |mtx, cnd|
- c = expect_server_to_be_invoked(hostname, mtx, cnd)
+ c = expect_server_to_be_invoked(mtx, cnd)
expect(c.remote_read).to eq(expected_input)
wanted_metadata.each do |k, v|
expect(c.metadata[k.to_s]).to eq(v)
@@ -476,32 +490,30 @@ describe 'ClientStub' do
end
end
- def start_test_server(hostname, awake_mutex, awake_cond)
- server_queue = GRPC::Core::CompletionQueue.new
- @server = GRPC::Core::Server.new(server_queue, nil)
- @server.add_http2_port(hostname)
+ def create_test_server
+ @server_queue = GRPC::Core::CompletionQueue.new
+ @server = GRPC::Core::Server.new(@server_queue, nil)
+ @server.add_http2_port('0.0.0.0:0')
+ end
+
+ def start_test_server(awake_mutex, awake_cond)
@server.start
@server_tag = Object.new
@server.request_call(@server_tag)
awake_mutex.synchronize { awake_cond.signal }
- server_queue
end
- def expect_server_to_be_invoked(hostname, awake_mutex, awake_cond)
- server_queue = start_test_server(hostname, awake_mutex, awake_cond)
- ev = server_queue.pluck(@server_tag, INFINITE_FUTURE)
+ def expect_server_to_be_invoked(awake_mutex, awake_cond)
+ start_test_server(awake_mutex, awake_cond)
+ ev = @server_queue.pluck(@server_tag, INFINITE_FUTURE)
fail OutOfTime if ev.nil?
server_call = ev.call
server_call.metadata = ev.result.metadata
finished_tag = Object.new
- server_call.server_accept(server_queue, finished_tag)
+ server_call.server_accept(@server_queue, finished_tag)
server_call.server_end_initial_metadata
- GRPC::ActiveCall.new(server_call, server_queue, NOOP, NOOP, INFINITE_FUTURE,
+ GRPC::ActiveCall.new(server_call, @server_queue, NOOP, NOOP,
+ INFINITE_FUTURE,
finished_tag: finished_tag)
end
-
- def new_test_host
- port = find_unused_tcp_port
- "localhost:#{port}"
- end
end
diff --git a/src/ruby/spec/generic/rpc_server_spec.rb b/src/ruby/spec/generic/rpc_server_spec.rb
index cd4888a3b4..e083bc1e9d 100644
--- a/src/ruby/spec/generic/rpc_server_spec.rb
+++ b/src/ruby/spec/generic/rpc_server_spec.rb
@@ -29,7 +29,6 @@
require 'grpc'
require 'xray/thread_dump_signal_handler'
-require_relative '../port_picker'
def load_test_certs
test_root = File.join(File.dirname(File.dirname(__FILE__)), 'testdata')
@@ -104,10 +103,10 @@ describe GRPC::RpcServer do
@noop = proc { |x| x }
@server_queue = GRPC::Core::CompletionQueue.new
- port = find_unused_tcp_port
- @host = "localhost:#{port}"
+ server_host = '0.0.0.0:0'
@server = GRPC::Core::Server.new(@server_queue, nil)
- @server.add_http2_port(@host)
+ server_port = @server.add_http2_port(server_host)
+ @host = "localhost:#{server_port}"
@ch = GRPC::Core::Channel.new(@host, nil)
end
diff --git a/src/ruby/spec/server_spec.rb b/src/ruby/spec/server_spec.rb
index 6e5bb523de..1550ba6566 100644
--- a/src/ruby/spec/server_spec.rb
+++ b/src/ruby/spec/server_spec.rb
@@ -28,7 +28,6 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
require 'grpc'
-require 'port_picker'
def load_test_certs
test_root = File.join(File.dirname(__FILE__), 'testdata')
@@ -205,10 +204,8 @@ describe Server do
end
def start_a_server
- port = find_unused_tcp_port
- host = "localhost:#{port}"
s = Server.new(@cq, nil)
- s.add_http2_port(host)
+ s.add_http2_port('0.0.0.0:0')
s.start
s
end