diff options
author | Craig Tiller <ctiller@google.com> | 2015-08-21 10:45:48 -0700 |
---|---|---|
committer | Craig Tiller <ctiller@google.com> | 2015-08-21 10:45:48 -0700 |
commit | 592e7f2dd0c059468de6377e8d6bc0d61fe2dd2c (patch) | |
tree | 264e969d33b3fcc36bedc35d3f5b92a5285242a3 /src/core/iomgr | |
parent | 4f21d3549c8f652a152ae086a74dd8415ecca8fa (diff) |
Refactor Endpoint API
- Allow reads to complete immediately
- Unify read/write signatures
- Simplify memory management to allow future optimization work
Diffstat (limited to 'src/core/iomgr')
-rw-r--r-- | src/core/iomgr/endpoint.c | 17 | ||||
-rw-r--r-- | src/core/iomgr/endpoint.h | 51 | ||||
-rw-r--r-- | src/core/iomgr/tcp_posix.c | 520 |
3 files changed, 206 insertions, 382 deletions
diff --git a/src/core/iomgr/endpoint.c b/src/core/iomgr/endpoint.c index 8ee14bce9b..a7878e31dd 100644 --- a/src/core/iomgr/endpoint.c +++ b/src/core/iomgr/endpoint.c @@ -33,17 +33,16 @@ #include "src/core/iomgr/endpoint.h" -void grpc_endpoint_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb, - void *user_data) { - ep->vtable->notify_on_read(ep, cb, user_data); +grpc_endpoint_op_status grpc_endpoint_read(grpc_endpoint *ep, + gpr_slice_buffer *slices, + grpc_iomgr_closure *cb) { + return ep->vtable->read(ep, slices, cb); } -grpc_endpoint_write_status grpc_endpoint_write(grpc_endpoint *ep, - gpr_slice *slices, - size_t nslices, - grpc_endpoint_write_cb cb, - void *user_data) { - return ep->vtable->write(ep, slices, nslices, cb, user_data); +grpc_endpoint_op_status grpc_endpoint_write(grpc_endpoint *ep, + gpr_slice_buffer *slices, + grpc_iomgr_closure *cb) { + return ep->vtable->write(ep, slices, cb); } void grpc_endpoint_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) { diff --git a/src/core/iomgr/endpoint.h b/src/core/iomgr/endpoint.h index ea92a500e8..38f1e46d67 100644 --- a/src/core/iomgr/endpoint.h +++ b/src/core/iomgr/endpoint.h @@ -37,6 +37,7 @@ #include "src/core/iomgr/pollset.h" #include "src/core/iomgr/pollset_set.h" #include <grpc/support/slice.h> +#include <grpc/support/slice_buffer.h> #include <grpc/support/time.h> /* An endpoint caps a streaming channel between two communicating processes. @@ -45,31 +46,17 @@ typedef struct grpc_endpoint grpc_endpoint; typedef struct grpc_endpoint_vtable grpc_endpoint_vtable; -typedef enum grpc_endpoint_cb_status { - GRPC_ENDPOINT_CB_OK = 0, /* Call completed successfully */ - GRPC_ENDPOINT_CB_EOF, /* Call completed successfully, end of file reached */ - GRPC_ENDPOINT_CB_SHUTDOWN, /* Call interrupted by shutdown */ - GRPC_ENDPOINT_CB_ERROR /* Call interrupted by socket error */ -} grpc_endpoint_cb_status; - -typedef enum grpc_endpoint_write_status { - GRPC_ENDPOINT_WRITE_DONE, /* completed immediately, cb won't be called */ - GRPC_ENDPOINT_WRITE_PENDING, /* cb will be called when completed */ - GRPC_ENDPOINT_WRITE_ERROR /* write errored out, cb won't be called */ -} grpc_endpoint_write_status; - -typedef void (*grpc_endpoint_read_cb)(void *user_data, gpr_slice *slices, - size_t nslices, - grpc_endpoint_cb_status error); -typedef void (*grpc_endpoint_write_cb)(void *user_data, - grpc_endpoint_cb_status error); +typedef enum grpc_endpoint_op_status { + GRPC_ENDPOINT_DONE, /* completed immediately, cb won't be called */ + GRPC_ENDPOINT_PENDING, /* cb will be called when completed */ + GRPC_ENDPOINT_ERROR /* write errored out, cb won't be called */ +} grpc_endpoint_op_status; struct grpc_endpoint_vtable { - void (*notify_on_read)(grpc_endpoint *ep, grpc_endpoint_read_cb cb, - void *user_data); - grpc_endpoint_write_status (*write)(grpc_endpoint *ep, gpr_slice *slices, - size_t nslices, grpc_endpoint_write_cb cb, - void *user_data); + grpc_endpoint_op_status (*read)(grpc_endpoint *ep, gpr_slice_buffer *slices, + grpc_iomgr_closure *cb); + grpc_endpoint_op_status (*write)(grpc_endpoint *ep, gpr_slice_buffer *slices, + grpc_iomgr_closure *cb); void (*add_to_pollset)(grpc_endpoint *ep, grpc_pollset *pollset); void (*add_to_pollset_set)(grpc_endpoint *ep, grpc_pollset_set *pollset); void (*shutdown)(grpc_endpoint *ep); @@ -77,9 +64,13 @@ struct grpc_endpoint_vtable { char *(*get_peer)(grpc_endpoint *ep); }; -/* When data is available on the connection, calls the callback with slices. */ -void grpc_endpoint_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb, - void *user_data); +/* When data is available on the connection, calls the callback with slices. + Callback success indicates that the endpoint can accept more reads, failure + indicates the endpoint is closed. + Valid slices may be placed into \a slices even on callback success == 0. */ +grpc_endpoint_op_status grpc_endpoint_read( + grpc_endpoint *ep, gpr_slice_buffer *slices, + grpc_iomgr_closure *cb) GRPC_MUST_USE_RESULT; char *grpc_endpoint_get_peer(grpc_endpoint *ep); @@ -89,11 +80,9 @@ char *grpc_endpoint_get_peer(grpc_endpoint *ep); returns GRPC_ENDPOINT_WRITE_DONE. Otherwise it returns GRPC_ENDPOINT_WRITE_PENDING and calls cb when the connection is ready for more data. */ -grpc_endpoint_write_status grpc_endpoint_write(grpc_endpoint *ep, - gpr_slice *slices, - size_t nslices, - grpc_endpoint_write_cb cb, - void *user_data); +grpc_endpoint_op_status grpc_endpoint_write( + grpc_endpoint *ep, gpr_slice_buffer *slices, + grpc_iomgr_closure *cb) GRPC_MUST_USE_RESULT; /* Causes any pending read/write callbacks to run immediately with GRPC_ENDPOINT_CB_SHUTDOWN status */ diff --git a/src/core/iomgr/tcp_posix.c b/src/core/iomgr/tcp_posix.c index 360e6ebd8c..36ba3a7606 100644 --- a/src/core/iomgr/tcp_posix.c +++ b/src/core/iomgr/tcp_posix.c @@ -61,209 +61,8 @@ #define SENDMSG_FLAGS 0 #endif -/* Holds a slice array and associated state. */ -typedef struct grpc_tcp_slice_state { - gpr_slice *slices; /* Array of slices */ - size_t nslices; /* Size of slices array. */ - ssize_t first_slice; /* First valid slice in array */ - ssize_t last_slice; /* Last valid slice in array */ - gpr_slice working_slice; /* pointer to original final slice */ - int working_slice_valid; /* True if there is a working slice */ - int memory_owned; /* True if slices array is owned */ -} grpc_tcp_slice_state; - int grpc_tcp_trace = 0; -static void slice_state_init(grpc_tcp_slice_state *state, gpr_slice *slices, - size_t nslices, size_t valid_slices) { - state->slices = slices; - state->nslices = nslices; - if (valid_slices == 0) { - state->first_slice = -1; - } else { - state->first_slice = 0; - } - state->last_slice = valid_slices - 1; - state->working_slice_valid = 0; - state->memory_owned = 0; -} - -/* Returns true if there is still available data */ -static int slice_state_has_available(grpc_tcp_slice_state *state) { - return state->first_slice != -1 && state->last_slice >= state->first_slice; -} - -static ssize_t slice_state_slices_allocated(grpc_tcp_slice_state *state) { - if (state->first_slice == -1) { - return 0; - } else { - return state->last_slice - state->first_slice + 1; - } -} - -static void slice_state_realloc(grpc_tcp_slice_state *state, size_t new_size) { - /* TODO(klempner): use realloc instead when first_slice is 0 */ - /* TODO(klempner): Avoid a realloc in cases where it is unnecessary */ - gpr_slice *slices = state->slices; - size_t original_size = slice_state_slices_allocated(state); - size_t i; - gpr_slice *new_slices = gpr_malloc(sizeof(gpr_slice) * new_size); - - for (i = 0; i < original_size; ++i) { - new_slices[i] = slices[i + state->first_slice]; - } - - state->slices = new_slices; - state->last_slice = original_size - 1; - if (original_size > 0) { - state->first_slice = 0; - } else { - state->first_slice = -1; - } - state->nslices = new_size; - - if (state->memory_owned) { - gpr_free(slices); - } - state->memory_owned = 1; -} - -static void slice_state_remove_prefix(grpc_tcp_slice_state *state, - size_t prefix_bytes) { - gpr_slice *current_slice = &state->slices[state->first_slice]; - size_t current_slice_size; - - while (slice_state_has_available(state)) { - current_slice_size = GPR_SLICE_LENGTH(*current_slice); - if (current_slice_size > prefix_bytes) { - /* TODO(klempner): Get rid of the extra refcount created here by adding a - native "trim the first N bytes" operation to splice */ - /* TODO(klempner): This really shouldn't be modifying the current slice - unless we own the slices array. */ - gpr_slice tail; - tail = gpr_slice_split_tail(current_slice, prefix_bytes); - gpr_slice_unref(*current_slice); - *current_slice = tail; - return; - } else { - gpr_slice_unref(*current_slice); - ++state->first_slice; - ++current_slice; - prefix_bytes -= current_slice_size; - } - } -} - -static void slice_state_destroy(grpc_tcp_slice_state *state) { - while (slice_state_has_available(state)) { - gpr_slice_unref(state->slices[state->first_slice]); - ++state->first_slice; - } - - if (state->memory_owned) { - gpr_free(state->slices); - state->memory_owned = 0; - } -} - -void slice_state_transfer_ownership(grpc_tcp_slice_state *state, - gpr_slice **slices, size_t *nslices) { - *slices = state->slices + state->first_slice; - *nslices = state->last_slice - state->first_slice + 1; - - state->first_slice = -1; - state->last_slice = -1; -} - -/* Fills iov with the first min(iov_size, available) slices, returns number - filled */ -static size_t slice_state_to_iovec(grpc_tcp_slice_state *state, - struct iovec *iov, size_t iov_size) { - size_t nslices = state->last_slice - state->first_slice + 1; - gpr_slice *slices = state->slices + state->first_slice; - size_t i; - if (nslices < iov_size) { - iov_size = nslices; - } - - for (i = 0; i < iov_size; ++i) { - iov[i].iov_base = GPR_SLICE_START_PTR(slices[i]); - iov[i].iov_len = GPR_SLICE_LENGTH(slices[i]); - } - return iov_size; -} - -/* Makes n blocks available at the end of state, writes them into iov, and - returns the number of bytes allocated */ -static size_t slice_state_append_blocks_into_iovec(grpc_tcp_slice_state *state, - struct iovec *iov, size_t n, - size_t slice_size) { - size_t target_size; - size_t i; - size_t allocated_bytes; - ssize_t allocated_slices = slice_state_slices_allocated(state); - - if (n - state->working_slice_valid >= state->nslices - state->last_slice) { - /* Need to grow the slice array */ - target_size = state->nslices; - do { - target_size = target_size * 2; - } while (target_size < allocated_slices + n - state->working_slice_valid); - /* TODO(klempner): If this ever needs to support both prefix removal and - append, we should be smarter about the growth logic here */ - slice_state_realloc(state, target_size); - } - - i = 0; - allocated_bytes = 0; - - if (state->working_slice_valid) { - iov[0].iov_base = GPR_SLICE_END_PTR(state->slices[state->last_slice]); - iov[0].iov_len = GPR_SLICE_LENGTH(state->working_slice) - - GPR_SLICE_LENGTH(state->slices[state->last_slice]); - allocated_bytes += iov[0].iov_len; - ++i; - state->slices[state->last_slice] = state->working_slice; - state->working_slice_valid = 0; - } - - for (; i < n; ++i) { - ++state->last_slice; - state->slices[state->last_slice] = gpr_slice_malloc(slice_size); - iov[i].iov_base = GPR_SLICE_START_PTR(state->slices[state->last_slice]); - iov[i].iov_len = slice_size; - allocated_bytes += slice_size; - } - if (state->first_slice == -1) { - state->first_slice = 0; - } - return allocated_bytes; -} - -/* Remove the last n bytes from state */ -/* TODO(klempner): Consider having this defer actual deletion until later */ -static void slice_state_remove_last(grpc_tcp_slice_state *state, size_t bytes) { - while (bytes > 0 && slice_state_has_available(state)) { - if (GPR_SLICE_LENGTH(state->slices[state->last_slice]) > bytes) { - state->working_slice = state->slices[state->last_slice]; - state->working_slice_valid = 1; - /* TODO(klempner): Combine these into a single operation that doesn't need - to refcount */ - gpr_slice_unref(gpr_slice_split_tail( - &state->slices[state->last_slice], - GPR_SLICE_LENGTH(state->slices[state->last_slice]) - bytes)); - bytes = 0; - } else { - bytes -= GPR_SLICE_LENGTH(state->slices[state->last_slice]); - gpr_slice_unref(state->slices[state->last_slice]); - --state->last_slice; - if (state->last_slice == -1) { - state->first_slice = -1; - } - } - } -} - typedef struct { grpc_endpoint base; grpc_fd *em_fd; @@ -273,12 +72,15 @@ typedef struct { size_t slice_size; gpr_refcount refcount; - grpc_endpoint_read_cb read_cb; - void *read_user_data; - grpc_endpoint_write_cb write_cb; - void *write_user_data; + gpr_slice_buffer *incoming_buffer; + gpr_slice_buffer *outgoing_buffer; + /** slice within outgoing_buffer to write next */ + size_t outgoing_slice_idx; + /** byte within outgoing_buffer->slices[outgoing_slice_idx] to write next */ + size_t outgoing_byte_idx; - grpc_tcp_slice_state write_state; + grpc_iomgr_closure *read_cb; + grpc_iomgr_closure *write_cb; grpc_iomgr_closure read_closure; grpc_iomgr_closure write_closure; @@ -288,65 +90,95 @@ typedef struct { char *peer_string; } grpc_tcp; -static void grpc_tcp_handle_read(void *arg /* grpc_tcp */, int success); -static void grpc_tcp_handle_write(void *arg /* grpc_tcp */, int success); +static void tcp_handle_read(void *arg /* grpc_tcp */, int success); +static void tcp_handle_write(void *arg /* grpc_tcp */, int success); -static void grpc_tcp_shutdown(grpc_endpoint *ep) { +static void tcp_shutdown(grpc_endpoint *ep) { grpc_tcp *tcp = (grpc_tcp *)ep; grpc_fd_shutdown(tcp->em_fd); } -static void grpc_tcp_unref(grpc_tcp *tcp) { - int refcount_zero = gpr_unref(&tcp->refcount); - if (refcount_zero) { - grpc_fd_orphan(tcp->em_fd, NULL, "tcp_unref_orphan"); - gpr_free(tcp->peer_string); - gpr_free(tcp); +static void tcp_free(grpc_tcp *tcp) { + grpc_fd_orphan(tcp->em_fd, NULL, "tcp_unref_orphan"); + gpr_free(tcp->peer_string); + gpr_free(tcp); +} + +#define GRPC_TCP_REFCOUNT_DEBUG +#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__) +#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__) +static void tcp_unref(grpc_tcp *tcp, const char *reason, const char *file, + int line) { + gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP unref %p : %s %d -> %d", tcp, + reason, tcp->refcount.count, tcp->refcount.count - 1); + if (gpr_unref(&tcp->refcount)) { + tcp_free(tcp); + } +} + +static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file, + int line) { + gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP ref %p : %s %d -> %d", tcp, + reason, tcp->refcount.count, tcp->refcount.count + 1); + gpr_ref(&tcp->refcount); +} +#ifdef GRPC_TCP_REFCOUNT_DEBUG +#else +#define TCP_UNREF(tcp, reason) tcp_unref((tcp)) +#define TCP_REF(tcp, reason) tcp_ref((tcp)) +static void tcp_unref(grpc_tcp *tcp) { + if (gpr_unref(&tcp->refcount)) { + tcp_free(tcp); } } -static void grpc_tcp_destroy(grpc_endpoint *ep) { +static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); } +#endif + +static void tcp_destroy(grpc_endpoint *ep) { grpc_tcp *tcp = (grpc_tcp *)ep; - grpc_tcp_unref(tcp); + TCP_UNREF(tcp, "destroy"); } -static void call_read_cb(grpc_tcp *tcp, gpr_slice *slices, size_t nslices, - grpc_endpoint_cb_status status) { - grpc_endpoint_read_cb cb = tcp->read_cb; +static void call_read_cb(grpc_tcp *tcp, int success) { + grpc_iomgr_closure *cb = tcp->read_cb; if (grpc_tcp_trace) { size_t i; - gpr_log(GPR_DEBUG, "read: status=%d", status); - for (i = 0; i < nslices; i++) { - char *dump = gpr_dump_slice(slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); + gpr_log(GPR_DEBUG, "read: success=%d", success); + for (i = 0; i < tcp->incoming_buffer->count; i++) { + char *dump = gpr_dump_slice(tcp->incoming_buffer->slices[i], + GPR_DUMP_HEX | GPR_DUMP_ASCII); gpr_log(GPR_DEBUG, "READ %p: %s", tcp, dump); gpr_free(dump); } } tcp->read_cb = NULL; - cb(tcp->read_user_data, slices, nslices, status); + tcp->incoming_buffer = NULL; + cb->cb(cb->cb_arg, success); } -#define INLINE_SLICE_BUFFER_SIZE 8 #define MAX_READ_IOVEC 4 -static void grpc_tcp_continue_read(grpc_tcp *tcp) { - gpr_slice static_read_slices[INLINE_SLICE_BUFFER_SIZE]; +static void tcp_continue_read(grpc_tcp *tcp) { struct msghdr msg; struct iovec iov[MAX_READ_IOVEC]; ssize_t read_bytes; - ssize_t allocated_bytes; - struct grpc_tcp_slice_state read_state; - gpr_slice *final_slices; - size_t final_nslices; + size_t i; GPR_ASSERT(!tcp->finished_edge); + GPR_ASSERT(tcp->iov_size <= MAX_READ_IOVEC); + GPR_ASSERT(tcp->incoming_buffer->count <= MAX_READ_IOVEC); GRPC_TIMER_BEGIN(GRPC_PTAG_HANDLE_READ, 0); - slice_state_init(&read_state, static_read_slices, INLINE_SLICE_BUFFER_SIZE, - 0); - allocated_bytes = slice_state_append_blocks_into_iovec( - &read_state, iov, tcp->iov_size, tcp->slice_size); + while (tcp->incoming_buffer->count < (size_t)tcp->iov_size) { + gpr_slice_buffer_add_indexed(tcp->incoming_buffer, + gpr_slice_malloc(tcp->slice_size)); + } + for (i = 0; i < tcp->incoming_buffer->count; i++) { + iov[i].iov_base = GPR_SLICE_START_PTR(tcp->incoming_buffer->slices[i]); + iov[i].iov_len = GPR_SLICE_LENGTH(tcp->incoming_buffer->slices[i]); + } msg.msg_name = NULL; msg.msg_namelen = 0; @@ -362,87 +194,63 @@ static void grpc_tcp_continue_read(grpc_tcp *tcp) { } while (read_bytes < 0 && errno == EINTR); GRPC_TIMER_END(GRPC_PTAG_RECVMSG, 0); - if (read_bytes < allocated_bytes) { - /* TODO(klempner): Consider a second read first, in hopes of getting a - * quick EAGAIN and saving a bunch of allocations. */ - slice_state_remove_last(&read_state, read_bytes < 0 - ? allocated_bytes - : allocated_bytes - read_bytes); - } - if (read_bytes < 0) { - /* NB: After calling the user_cb a parallel call of the read handler may + /* NB: After calling call_read_cb a parallel call of the read handler may * be running. */ if (errno == EAGAIN) { if (tcp->iov_size > 1) { tcp->iov_size /= 2; } - if (slice_state_has_available(&read_state)) { - /* TODO(klempner): We should probably do the call into the application - without all this junk on the stack */ - /* FIXME(klempner): Refcount properly */ - slice_state_transfer_ownership(&read_state, &final_slices, - &final_nslices); - tcp->finished_edge = 1; - call_read_cb(tcp, final_slices, final_nslices, GRPC_ENDPOINT_CB_OK); - slice_state_destroy(&read_state); - grpc_tcp_unref(tcp); - } else { - /* We've consumed the edge, request a new one */ - slice_state_destroy(&read_state); - grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_closure); - } + /* We've consumed the edge, request a new one */ + grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_closure); } else { /* TODO(klempner): Log interesting errors */ - call_read_cb(tcp, NULL, 0, GRPC_ENDPOINT_CB_ERROR); - slice_state_destroy(&read_state); - grpc_tcp_unref(tcp); + gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer); + call_read_cb(tcp, 0); + TCP_UNREF(tcp, "read"); } } else if (read_bytes == 0) { /* 0 read size ==> end of stream */ - if (slice_state_has_available(&read_state)) { - /* there were bytes already read: pass them up to the application */ - slice_state_transfer_ownership(&read_state, &final_slices, - &final_nslices); - call_read_cb(tcp, final_slices, final_nslices, GRPC_ENDPOINT_CB_EOF); - } else { - call_read_cb(tcp, NULL, 0, GRPC_ENDPOINT_CB_EOF); - } - slice_state_destroy(&read_state); - grpc_tcp_unref(tcp); + gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer); + call_read_cb(tcp, 0); + TCP_UNREF(tcp, "read"); } else { - if (tcp->iov_size < MAX_READ_IOVEC) { + GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length); + if ((size_t)read_bytes < tcp->incoming_buffer->length) { + gpr_slice_buffer_trim_end(tcp->incoming_buffer, + tcp->incoming_buffer->length - read_bytes); + } else if (tcp->iov_size < MAX_READ_IOVEC) { ++tcp->iov_size; } - GPR_ASSERT(slice_state_has_available(&read_state)); - slice_state_transfer_ownership(&read_state, &final_slices, &final_nslices); - call_read_cb(tcp, final_slices, final_nslices, GRPC_ENDPOINT_CB_OK); - slice_state_destroy(&read_state); - grpc_tcp_unref(tcp); + GPR_ASSERT((size_t)read_bytes == tcp->incoming_buffer->length); + call_read_cb(tcp, 1); + TCP_UNREF(tcp, "read"); } GRPC_TIMER_END(GRPC_PTAG_HANDLE_READ, 0); } -static void grpc_tcp_handle_read(void *arg /* grpc_tcp */, int success) { +static void tcp_handle_read(void *arg /* grpc_tcp */, int success) { grpc_tcp *tcp = (grpc_tcp *)arg; GPR_ASSERT(!tcp->finished_edge); if (!success) { - call_read_cb(tcp, NULL, 0, GRPC_ENDPOINT_CB_SHUTDOWN); - grpc_tcp_unref(tcp); + call_read_cb(tcp, 0); + TCP_UNREF(tcp, "read"); } else { - grpc_tcp_continue_read(tcp); + tcp_continue_read(tcp); } } -static void grpc_tcp_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb, - void *user_data) { +static grpc_endpoint_op_status tcp_read(grpc_endpoint *ep, + gpr_slice_buffer *incoming_buffer, + grpc_iomgr_closure *cb) { grpc_tcp *tcp = (grpc_tcp *)ep; GPR_ASSERT(tcp->read_cb == NULL); tcp->read_cb = cb; - tcp->read_user_data = user_data; - gpr_ref(&tcp->refcount); + tcp->incoming_buffer = incoming_buffer; + gpr_slice_buffer_reset_and_unref(incoming_buffer); + TCP_REF(tcp, "read"); if (tcp->finished_edge) { tcp->finished_edge = 0; grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_closure); @@ -450,18 +258,41 @@ static void grpc_tcp_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb, tcp->handle_read_closure.cb_arg = tcp; grpc_iomgr_add_delayed_callback(&tcp->handle_read_closure, 1); } + /* TODO(ctiller): immediate return */ + return GRPC_ENDPOINT_PENDING; } #define MAX_WRITE_IOVEC 16 -static grpc_endpoint_write_status grpc_tcp_flush(grpc_tcp *tcp) { +static grpc_endpoint_op_status tcp_flush(grpc_tcp *tcp) { struct msghdr msg; struct iovec iov[MAX_WRITE_IOVEC]; int iov_size; ssize_t sent_length; - grpc_tcp_slice_state *state = &tcp->write_state; + ssize_t sending_length; + ssize_t trailing; + ssize_t unwind_slice_idx; + ssize_t unwind_byte_idx; for (;;) { - iov_size = slice_state_to_iovec(state, iov, MAX_WRITE_IOVEC); + sending_length = 0; + unwind_slice_idx = tcp->outgoing_slice_idx; + unwind_byte_idx = tcp->outgoing_byte_idx; + for (iov_size = 0; tcp->outgoing_slice_idx != tcp->outgoing_buffer->count && + iov_size != MAX_WRITE_IOVEC; + iov_size++) { + iov[iov_size].iov_base = + GPR_SLICE_START_PTR( + tcp->outgoing_buffer->slices[tcp->outgoing_slice_idx]) + + tcp->outgoing_byte_idx; + iov[iov_size].iov_len = + GPR_SLICE_LENGTH( + tcp->outgoing_buffer->slices[tcp->outgoing_slice_idx]) - + tcp->outgoing_byte_idx; + sending_length += iov[iov_size].iov_len; + tcp->outgoing_slice_idx++; + tcp->outgoing_byte_idx = 0; + } + GPR_ASSERT(iov_size > 0); msg.msg_name = NULL; msg.msg_namelen = 0; @@ -480,70 +311,75 @@ static grpc_endpoint_write_status grpc_tcp_flush(grpc_tcp *tcp) { if (sent_length < 0) { if (errno == EAGAIN) { - return GRPC_ENDPOINT_WRITE_PENDING; + tcp->outgoing_slice_idx = unwind_slice_idx; + tcp->outgoing_byte_idx = unwind_byte_idx; + return GRPC_ENDPOINT_PENDING; } else { /* TODO(klempner): Log some of these */ - slice_state_destroy(state); - return GRPC_ENDPOINT_WRITE_ERROR; + return GRPC_ENDPOINT_ERROR; } } - /* TODO(klempner): Probably better to batch this after we finish flushing */ - slice_state_remove_prefix(state, sent_length); + GPR_ASSERT(tcp->outgoing_byte_idx == 0); + trailing = sending_length - sent_length; + while (trailing > 0) { + ssize_t slice_length; + + tcp->outgoing_slice_idx--; + slice_length = GPR_SLICE_LENGTH( + tcp->outgoing_buffer->slices[tcp->outgoing_slice_idx]); + if (slice_length > trailing) { + tcp->outgoing_byte_idx = slice_length - trailing; + break; + } else { + trailing -= slice_length; + } + } - if (!slice_state_has_available(state)) { - return GRPC_ENDPOINT_WRITE_DONE; + if (tcp->outgoing_slice_idx == tcp->outgoing_buffer->count) { + return GRPC_ENDPOINT_DONE; } }; } -static void grpc_tcp_handle_write(void *arg /* grpc_tcp */, int success) { +static void tcp_handle_write(void *arg /* grpc_tcp */, int success) { grpc_tcp *tcp = (grpc_tcp *)arg; - grpc_endpoint_write_status write_status; - grpc_endpoint_cb_status cb_status; - grpc_endpoint_write_cb cb; + grpc_endpoint_op_status status; + grpc_iomgr_closure *cb; if (!success) { - slice_state_destroy(&tcp->write_state); cb = tcp->write_cb; tcp->write_cb = NULL; - cb(tcp->write_user_data, GRPC_ENDPOINT_CB_SHUTDOWN); - grpc_tcp_unref(tcp); + cb->cb(cb->cb_arg, 0); + TCP_UNREF(tcp, "write"); return; } GRPC_TIMER_BEGIN(GRPC_PTAG_TCP_CB_WRITE, 0); - write_status = grpc_tcp_flush(tcp); - if (write_status == GRPC_ENDPOINT_WRITE_PENDING) { + status = tcp_flush(tcp); + if (status == GRPC_ENDPOINT_PENDING) { grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_closure); } else { - slice_state_destroy(&tcp->write_state); - if (write_status == GRPC_ENDPOINT_WRITE_DONE) { - cb_status = GRPC_ENDPOINT_CB_OK; - } else { - cb_status = GRPC_ENDPOINT_CB_ERROR; - } cb = tcp->write_cb; tcp->write_cb = NULL; - cb(tcp->write_user_data, cb_status); - grpc_tcp_unref(tcp); + cb->cb(cb->cb_arg, status == GRPC_ENDPOINT_DONE); + TCP_UNREF(tcp, "write"); } GRPC_TIMER_END(GRPC_PTAG_TCP_CB_WRITE, 0); } -static grpc_endpoint_write_status grpc_tcp_write(grpc_endpoint *ep, - gpr_slice *slices, - size_t nslices, - grpc_endpoint_write_cb cb, - void *user_data) { +static grpc_endpoint_op_status tcp_write(grpc_endpoint *ep, + gpr_slice_buffer *buf, + grpc_iomgr_closure *cb) { grpc_tcp *tcp = (grpc_tcp *)ep; - grpc_endpoint_write_status status; + grpc_endpoint_op_status status; if (grpc_tcp_trace) { size_t i; - for (i = 0; i < nslices; i++) { - char *data = gpr_dump_slice(slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); + for (i = 0; i < buf->count; i++) { + char *data = + gpr_dump_slice(buf->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); gpr_log(GPR_DEBUG, "WRITE %p: %s", tcp, data); gpr_free(data); } @@ -551,15 +387,19 @@ static grpc_endpoint_write_status grpc_tcp_write(grpc_endpoint *ep, GRPC_TIMER_BEGIN(GRPC_PTAG_TCP_WRITE, 0); GPR_ASSERT(tcp->write_cb == NULL); - slice_state_init(&tcp->write_state, slices, nslices, nslices); - status = grpc_tcp_flush(tcp); - if (status == GRPC_ENDPOINT_WRITE_PENDING) { - /* TODO(klempner): Consider inlining rather than malloc for small nslices */ - slice_state_realloc(&tcp->write_state, nslices); - gpr_ref(&tcp->refcount); + if (buf->length == 0) { + GRPC_TIMER_END(GRPC_PTAG_TCP_WRITE, 0); + return GRPC_ENDPOINT_DONE; + } + tcp->outgoing_buffer = buf; + tcp->outgoing_slice_idx = 0; + tcp->outgoing_byte_idx = 0; + + status = tcp_flush(tcp); + if (status == GRPC_ENDPOINT_PENDING) { + TCP_REF(tcp, "write"); tcp->write_cb = cb; - tcp->write_user_data = user_data; grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_closure); } @@ -567,27 +407,25 @@ static grpc_endpoint_write_status grpc_tcp_write(grpc_endpoint *ep, return status; } -static void grpc_tcp_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) { +static void tcp_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) { grpc_tcp *tcp = (grpc_tcp *)ep; grpc_pollset_add_fd(pollset, tcp->em_fd); } -static void grpc_tcp_add_to_pollset_set(grpc_endpoint *ep, - grpc_pollset_set *pollset_set) { +static void tcp_add_to_pollset_set(grpc_endpoint *ep, + grpc_pollset_set *pollset_set) { grpc_tcp *tcp = (grpc_tcp *)ep; grpc_pollset_set_add_fd(pollset_set, tcp->em_fd); } -static char *grpc_tcp_get_peer(grpc_endpoint *ep) { +static char *tcp_get_peer(grpc_endpoint *ep) { grpc_tcp *tcp = (grpc_tcp *)ep; return gpr_strdup(tcp->peer_string); } static const grpc_endpoint_vtable vtable = { - grpc_tcp_notify_on_read, grpc_tcp_write, - grpc_tcp_add_to_pollset, grpc_tcp_add_to_pollset_set, - grpc_tcp_shutdown, grpc_tcp_destroy, - grpc_tcp_get_peer}; + tcp_read, tcp_write, tcp_add_to_pollset, tcp_add_to_pollset_set, + tcp_shutdown, tcp_destroy, tcp_get_peer}; grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size, const char *peer_string) { @@ -597,21 +435,19 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size, tcp->fd = em_fd->fd; tcp->read_cb = NULL; tcp->write_cb = NULL; - tcp->read_user_data = NULL; - tcp->write_user_data = NULL; + tcp->incoming_buffer = NULL; tcp->slice_size = slice_size; tcp->iov_size = 1; tcp->finished_edge = 1; - slice_state_init(&tcp->write_state, NULL, 0, 0); /* paired with unref in grpc_tcp_destroy */ gpr_ref_init(&tcp->refcount, 1); tcp->em_fd = em_fd; - tcp->read_closure.cb = grpc_tcp_handle_read; + tcp->read_closure.cb = tcp_handle_read; tcp->read_closure.cb_arg = tcp; - tcp->write_closure.cb = grpc_tcp_handle_write; + tcp->write_closure.cb = tcp_handle_write; tcp->write_closure.cb_arg = tcp; - tcp->handle_read_closure.cb = grpc_tcp_handle_read; + tcp->handle_read_closure.cb = tcp_handle_read; return &tcp->base; } |