diff options
Diffstat (limited to 'src/core')
-rw-r--r-- | src/core/ext/census/intrusive_hash_map.c | 302 | ||||
-rw-r--r-- | src/core/ext/census/intrusive_hash_map.h | 150 | ||||
-rw-r--r-- | src/core/ext/census/intrusive_hash_map_internal.h | 46 | ||||
-rw-r--r-- | src/core/lib/http/httpcli.c | 2 | ||||
-rw-r--r-- | src/core/lib/http/httpcli_security_connector.c | 4 | ||||
-rw-r--r-- | src/core/lib/security/transport/security_connector.c | 19 | ||||
-rw-r--r-- | src/core/lib/security/transport/security_handshaker.c | 220 |
7 files changed, 638 insertions, 105 deletions
diff --git a/src/core/ext/census/intrusive_hash_map.c b/src/core/ext/census/intrusive_hash_map.c new file mode 100644 index 0000000000..e64e8086ca --- /dev/null +++ b/src/core/ext/census/intrusive_hash_map.c @@ -0,0 +1,302 @@ +/* + * Copyright 2017 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/core/ext/census/intrusive_hash_map.h" +#include <string.h> + +extern bool hm_index_compare(const hm_index *A, const hm_index *B); + +/* Simple hashing function that takes lower 32 bits. */ +static inline uint32_t chunked_vector_hasher(uint64_t key) { + return (uint32_t)key; +} + +/* Vector chunks are 1MiB divided by pointer size. */ +static const size_t VECTOR_CHUNK_SIZE = (1 << 20) / sizeof(void *); + +/* Helper functions which return buckets from the chunked vector. */ +static inline void **get_mutable_bucket(const chunked_vector *buckets, + uint32_t index) { + if (index < VECTOR_CHUNK_SIZE) { + return &buckets->first_[index]; + } + size_t rest_index = (index - VECTOR_CHUNK_SIZE) / VECTOR_CHUNK_SIZE; + return &buckets->rest_[rest_index][index % VECTOR_CHUNK_SIZE]; +} + +static inline void *get_bucket(const chunked_vector *buckets, uint32_t index) { + if (index < VECTOR_CHUNK_SIZE) { + return buckets->first_[index]; + } + size_t rest_index = (index - VECTOR_CHUNK_SIZE) / VECTOR_CHUNK_SIZE; + return buckets->rest_[rest_index][index % VECTOR_CHUNK_SIZE]; +} + +/* Helper function. */ +static inline size_t RestSize(const chunked_vector *vec) { + return (vec->size_ <= VECTOR_CHUNK_SIZE) + ? 0 + : (vec->size_ - VECTOR_CHUNK_SIZE - 1) / VECTOR_CHUNK_SIZE + 1; +} + +/* Initialize chunked vector to size of 0. */ +static void chunked_vector_init(chunked_vector *vec) { + vec->size_ = 0; + vec->first_ = NULL; + vec->rest_ = NULL; +} + +/* Clear chunked vector and free all memory that has been allocated then + initialize chunked vector. */ +static void chunked_vector_clear(chunked_vector *vec) { + if (vec->first_ != NULL) { + gpr_free(vec->first_); + } + if (vec->rest_ != NULL) { + size_t rest_size = RestSize(vec); + for (size_t i = 0; i < rest_size; ++i) { + if (vec->rest_[i] != NULL) { + gpr_free(vec->rest_[i]); + } + } + gpr_free(vec->rest_); + } + chunked_vector_init(vec); +} + +/* Clear chunked vector and then resize it to n entries. Allow the first 1MB to + be read w/o an extra cache miss. The rest of the elements are stored in an + array of arrays to avoid large mallocs. */ +static void chunked_vector_reset(chunked_vector *vec, size_t n) { + chunked_vector_clear(vec); + vec->size_ = n; + if (n <= VECTOR_CHUNK_SIZE) { + vec->first_ = (void **)gpr_malloc(sizeof(void *) * n); + memset(vec->first_, 0, sizeof(void *) * n); + } else { + vec->first_ = (void **)gpr_malloc(sizeof(void *) * VECTOR_CHUNK_SIZE); + memset(vec->first_, 0, sizeof(void *) * VECTOR_CHUNK_SIZE); + size_t rest_size = RestSize(vec); + vec->rest_ = (void ***)gpr_malloc(sizeof(void **) * rest_size); + memset(vec->rest_, 0, sizeof(void **) * rest_size); + int i = 0; + n -= VECTOR_CHUNK_SIZE; + while (n > 0) { + size_t this_size = GPR_MIN(n, VECTOR_CHUNK_SIZE); + vec->rest_[i] = (void **)gpr_malloc(sizeof(void *) * this_size); + memset(vec->rest_[i], 0, sizeof(void *) * this_size); + n -= this_size; + ++i; + } + } +} + +void intrusive_hash_map_init(intrusive_hash_map *hash_map, + uint32_t initial_log2_table_size) { + hash_map->log2_num_buckets = initial_log2_table_size; + hash_map->num_items = 0; + uint32_t num_buckets = (uint32_t)1 << hash_map->log2_num_buckets; + hash_map->extend_threshold = num_buckets >> 1; + chunked_vector_init(&hash_map->buckets); + chunked_vector_reset(&hash_map->buckets, num_buckets); + hash_map->hash_mask = num_buckets - 1; +} + +bool intrusive_hash_map_empty(const intrusive_hash_map *hash_map) { + return hash_map->num_items == 0; +} + +size_t intrusive_hash_map_size(const intrusive_hash_map *hash_map) { + return hash_map->num_items; +} + +void intrusive_hash_map_end(const intrusive_hash_map *hash_map, hm_index *idx) { + idx->bucket_index = (uint32_t)hash_map->buckets.size_; + GPR_ASSERT(idx->bucket_index <= UINT32_MAX); + idx->item = NULL; +} + +void intrusive_hash_map_next(const intrusive_hash_map *hash_map, + hm_index *idx) { + idx->item = idx->item->hash_link; + while (idx->item == NULL) { + idx->bucket_index++; + if (idx->bucket_index >= hash_map->buckets.size_) { + /* Reached end of table. */ + idx->item = NULL; + return; + } + idx->item = (hm_item *)get_bucket(&hash_map->buckets, idx->bucket_index); + } +} + +void intrusive_hash_map_begin(const intrusive_hash_map *hash_map, + hm_index *idx) { + for (uint32_t i = 0; i < hash_map->buckets.size_; ++i) { + if (get_bucket(&hash_map->buckets, i) != NULL) { + idx->bucket_index = i; + idx->item = (hm_item *)get_bucket(&hash_map->buckets, i); + return; + } + } + intrusive_hash_map_end(hash_map, idx); +} + +hm_item *intrusive_hash_map_find(const intrusive_hash_map *hash_map, + uint64_t key) { + uint32_t index = chunked_vector_hasher(key) & hash_map->hash_mask; + + hm_item *p = (hm_item *)get_bucket(&hash_map->buckets, index); + while (p != NULL) { + if (key == p->key) { + return p; + } + p = p->hash_link; + } + return NULL; +} + +hm_item *intrusive_hash_map_erase(intrusive_hash_map *hash_map, uint64_t key) { + uint32_t index = chunked_vector_hasher(key) & hash_map->hash_mask; + + hm_item **slot = (hm_item **)get_mutable_bucket(&hash_map->buckets, index); + hm_item *p = *slot; + if (p == NULL) { + return NULL; + } + + if (key == p->key) { + *slot = p->hash_link; + p->hash_link = NULL; + hash_map->num_items--; + return p; + } + + hm_item *prev = p; + p = p->hash_link; + + while (p) { + if (key == p->key) { + prev->hash_link = p->hash_link; + p->hash_link = NULL; + hash_map->num_items--; + return p; + } + prev = p; + p = p->hash_link; + } + return NULL; +} + +/* Insert an hm_item* into the underlying chunked vector. hash_mask is + * array_size-1. Returns true if it is a new hm_item and false if the hm_item + * already existed. + */ +static inline bool intrusive_hash_map_internal_insert(chunked_vector *buckets, + uint32_t hash_mask, + hm_item *item) { + const uint64_t key = item->key; + uint32_t index = chunked_vector_hasher(key) & hash_mask; + hm_item **slot = (hm_item **)get_mutable_bucket(buckets, index); + hm_item *p = *slot; + item->hash_link = p; + + /* Check to see if key already exists. */ + while (p) { + if (p->key == key) { + return false; + } + p = p->hash_link; + } + + /* Otherwise add new entry. */ + *slot = item; + return true; +} + +/* Extend the allocated number of elements in the hash map by a factor of 2. */ +void intrusive_hash_map_extend(intrusive_hash_map *hash_map) { + uint32_t new_log2_num_buckets = 1 + hash_map->log2_num_buckets; + uint32_t new_num_buckets = (uint32_t)1 << new_log2_num_buckets; + GPR_ASSERT(new_num_buckets <= UINT32_MAX && new_num_buckets > 0); + chunked_vector new_buckets; + chunked_vector_init(&new_buckets); + chunked_vector_reset(&new_buckets, new_num_buckets); + uint32_t new_hash_mask = new_num_buckets - 1; + + hm_index cur_idx; + hm_index end_idx; + intrusive_hash_map_end(hash_map, &end_idx); + intrusive_hash_map_begin(hash_map, &cur_idx); + while (!hm_index_compare(&cur_idx, &end_idx)) { + hm_item *new_item = cur_idx.item; + intrusive_hash_map_next(hash_map, &cur_idx); + intrusive_hash_map_internal_insert(&new_buckets, new_hash_mask, new_item); + } + + /* Set values for new chunked_vector. extend_threshold is set to half of + * new_num_buckets. */ + hash_map->log2_num_buckets = new_log2_num_buckets; + chunked_vector_clear(&hash_map->buckets); + hash_map->buckets = new_buckets; + hash_map->hash_mask = new_hash_mask; + hash_map->extend_threshold = new_num_buckets >> 1; +} + +/* Insert a hm_item. The hm_item must remain live until it is removed from the + table. This object does not take the ownership of hm_item. The caller must + remove this hm_item from the table and delete it before this table is + deleted. If hm_item exists already num_items is not changed. */ +bool intrusive_hash_map_insert(intrusive_hash_map *hash_map, hm_item *item) { + if (hash_map->num_items >= hash_map->extend_threshold) { + intrusive_hash_map_extend(hash_map); + } + if (intrusive_hash_map_internal_insert(&hash_map->buckets, + hash_map->hash_mask, item)) { + hash_map->num_items++; + return true; + } + return false; +} + +void intrusive_hash_map_clear(intrusive_hash_map *hash_map, + void (*free_object)(void *)) { + hm_index cur; + hm_index end; + intrusive_hash_map_end(hash_map, &end); + intrusive_hash_map_begin(hash_map, &cur); + + while (!hm_index_compare(&cur, &end)) { + hm_index next = cur; + intrusive_hash_map_next(hash_map, &next); + if (cur.item != NULL) { + hm_item *item = intrusive_hash_map_erase(hash_map, cur.item->key); + (*free_object)((void *)item); + gpr_free(item); + } + cur = next; + } +} + +void intrusive_hash_map_free(intrusive_hash_map *hash_map, + void (*free_object)(void *)) { + intrusive_hash_map_clear(hash_map, (*free_object)); + hash_map->num_items = 0; + hash_map->extend_threshold = 0; + hash_map->log2_num_buckets = 0; + hash_map->hash_mask = 0; + chunked_vector_clear(&hash_map->buckets); +} diff --git a/src/core/ext/census/intrusive_hash_map.h b/src/core/ext/census/intrusive_hash_map.h new file mode 100644 index 0000000000..9a7fd07f20 --- /dev/null +++ b/src/core/ext/census/intrusive_hash_map.h @@ -0,0 +1,150 @@ +/* + * Copyright 2017 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_H +#define GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_H + +#include "src/core/ext/census/intrusive_hash_map_internal.h" + +/* intrusive_hash_map is a fast chained hash table. This hash map is faster than + * a dense hash map when the application calls insert and erase more often than + * find. When the workload is dominated by find() a dense hash map may be + * faster. + * + * intrusive_hash_map uses an intrusive header placed within a user defined + * struct. The header field IHM_key MUST be set to a valid value before + * insertion into the hash map or undefined behavior may occur. The header field + * IHM_hash_link MUST to be set to NULL initially. + * + * EXAMPLE USAGE: + * + * typedef struct string_item { + * INTRUSIVE_HASH_MAP_HEADER; + * // User data. + * char *str_buf; + * uint16_t len; + * } string_item; + * + * static string_item *make_string_item(uint64_t key, const char *buf, + * uint16_t len) { + * string_item *item = (string_item *)gpr_malloc(sizeof(string_item)); + * item->IHM_key = key; + * item->IHM_hash_link = NULL; + * item->len = len; + * item->str_buf = (char *)malloc(len); + * memcpy(item->str_buf, buf, len); + * return item; + * } + * + * intrusive_hash_map hash_map; + * intrusive_hash_map_init(&hash_map, 4); + * string_item *new_item1 = make_string_item(10, "test1", 5); + * bool ok = intrusive_hash_map_insert(&hash_map, (hm_item *)new_item1); + * + * string_item *item1 = + * (string_item *)intrusive_hash_map_find(&hash_map, 10); + */ + +/* Hash map item. Stores key and a pointer to the actual object. A user defined + * version of this can be passed in provided the first 2 entries (key and + * hash_link) are the same. These entries must be first in the user defined + * struct. Pointer to struct will need to be cast as (hm_item *) when passed to + * hash map. This allows it to be intrusive. */ +typedef struct hm_item { + uint64_t key; + struct hm_item *hash_link; + /* Optional user defined data after this. */ +} hm_item; + +/* Macro provided for ease of use. This must be first in the user defined + * struct (i.e. uint64_t key and hm_item * must be the first two elements in + * that order). */ +#define INTRUSIVE_HASH_MAP_HEADER \ + uint64_t IHM_key; \ + struct hm_item *IHM_hash_link + +/* Index struct which acts as a pseudo-iterator within the hash map. */ +typedef struct hm_index { + uint32_t bucket_index; // hash map bucket index. + hm_item *item; // Pointer to hm_item within the hash map. +} hm_index; + +/* Returns true if two hm_indices point to the same object within the hash map + * and false otherwise. */ +inline bool hm_index_compare(const hm_index *A, const hm_index *B) { + return (A->item == B->item && A->bucket_index == B->bucket_index); +} + +/* + * Helper functions for iterating over the hash map. + */ + +/* On return idx will contain an invalid index which is always equal to + * hash_map->buckets.size_ */ +void intrusive_hash_map_end(const intrusive_hash_map *hash_map, hm_index *idx); + +/* Iterates index to the next valid entry in the hash map and stores the + * index within idx. If end of table is reached, idx will contain the same + * values as if intrusive_hash_map_end() was called. */ +void intrusive_hash_map_next(const intrusive_hash_map *hash_map, hm_index *idx); + +/* On return, idx will contain the index of the first non-null entry in the hash + * map. If the hash map is empty, idx will contain the same values as if + * intrusive_hash_map_end() was called. */ +void intrusive_hash_map_begin(const intrusive_hash_map *hash_map, + hm_index *idx); + +/* Initialize intrusive hash map data structure. This must be called before + * the hash map can be used. The initial size of an intrusive hash map will be + * 2^initial_log2_map_size (valid range is [0, 31]). */ +void intrusive_hash_map_init(intrusive_hash_map *hash_map, + uint32_t initial_log2_map_size); + +/* Returns true if the hash map is empty and false otherwise. */ +bool intrusive_hash_map_empty(const intrusive_hash_map *hash_map); + +/* Returns the number of elements currently in the hash map. */ +size_t intrusive_hash_map_size(const intrusive_hash_map *hash_map); + +/* Find a hm_item within the hash map by key. Returns NULL if item was not + * found. */ +hm_item *intrusive_hash_map_find(const intrusive_hash_map *hash_map, + uint64_t key); + +/* Erase the hm_item that corresponds with key. If the hm_item is found, return + * the pointer to the hm_item. Else returns NULL. */ +hm_item *intrusive_hash_map_erase(intrusive_hash_map *hash_map, uint64_t key); + +/* Attempts to insert a new hm_item into the hash map. If an element with the + * same key already exists, it will not insert the new item and return false. + * Otherwise, it will insert the new item and return true. */ +bool intrusive_hash_map_insert(intrusive_hash_map *hash_map, hm_item *item); + +/* Clears entire contents of the hash map, but leaves internal data structure + * untouched. Second argument takes a function pointer to a function that will + * free the object designated by the user and pointed to by hash_map->value. */ +void intrusive_hash_map_clear(intrusive_hash_map *hash_map, + void (*free_object)(void *)); + +/* Erase all contents of hash map and free the memory. Hash map is invalid + * after calling this function and cannot be used until it has been + * reinitialized (intrusive_hash_map_init()). This function takes a function + * pointer to a function that will free the object designated by the user and + * pointed to by hash_map->value. */ +void intrusive_hash_map_free(intrusive_hash_map *hash_map, + void (*free_object)(void *)); + +#endif // GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_H diff --git a/src/core/ext/census/intrusive_hash_map_internal.h b/src/core/ext/census/intrusive_hash_map_internal.h new file mode 100644 index 0000000000..c0a3c6f59a --- /dev/null +++ b/src/core/ext/census/intrusive_hash_map_internal.h @@ -0,0 +1,46 @@ +/* + * Copyright 2017 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_INTERNAL_H +#define GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_INTERNAL_H + +#include <grpc/support/alloc.h> +#include <grpc/support/log.h> +#include <grpc/support/useful.h> +#include <stdbool.h> + +/* The chunked vector is a data structure that allocates buckets for use in the + * hash map. ChunkedVector is logically equivalent to T*[N] (cast void* as + * T*). It's internally implemented as an array of 1MB arrays to avoid + * allocating large consecutive memory chunks. This is an internal data + * structure that should never be accessed directly. */ +typedef struct chunked_vector { + size_t size_; + void **first_; + void ***rest_; +} chunked_vector; + +/* Core intrusive hash map data structure. All internal elements are managed by + * functions and should not be altered manually. */ +typedef struct intrusive_hash_map { + uint32_t num_items; + uint32_t extend_threshold; + uint32_t log2_num_buckets; + uint32_t hash_mask; + chunked_vector buckets; +} intrusive_hash_map; + +#endif // GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_INTERNAL_H diff --git a/src/core/lib/http/httpcli.c b/src/core/lib/http/httpcli.c index c3421e1b55..f5588a9a76 100644 --- a/src/core/lib/http/httpcli.c +++ b/src/core/lib/http/httpcli.c @@ -245,7 +245,7 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req, static void on_resolved(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { internal_request *req = arg; if (error != GRPC_ERROR_NONE) { - finish(exec_ctx, req, error); + finish(exec_ctx, req, GRPC_ERROR_REF(error)); return; } req->next_address = 0; diff --git a/src/core/lib/http/httpcli_security_connector.c b/src/core/lib/http/httpcli_security_connector.c index 76946434f0..ea7c1122c1 100644 --- a/src/core/lib/http/httpcli_security_connector.c +++ b/src/core/lib/http/httpcli_security_connector.c @@ -44,6 +44,7 @@ #include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/support/string.h" #include "src/core/tsi/ssl_transport_security.h" +#include "src/core/tsi/transport_security_adapter.h" typedef struct { grpc_channel_security_connector base; @@ -78,7 +79,8 @@ static void httpcli_ssl_add_handshakers(grpc_exec_ctx *exec_ctx, } grpc_handshake_manager_add( handshake_mgr, - grpc_security_handshaker_create(exec_ctx, handshaker, &sc->base)); + grpc_security_handshaker_create( + exec_ctx, tsi_create_adapter_handshaker(handshaker), &sc->base)); } static void httpcli_ssl_check_peer(grpc_exec_ctx *exec_ctx, diff --git a/src/core/lib/security/transport/security_connector.c b/src/core/lib/security/transport/security_connector.c index 30431a4e4a..416a3bdb35 100644 --- a/src/core/lib/security/transport/security_connector.c +++ b/src/core/lib/security/transport/security_connector.c @@ -56,6 +56,7 @@ #include "src/core/lib/support/string.h" #include "src/core/tsi/fake_transport_security.h" #include "src/core/tsi/ssl_transport_security.h" +#include "src/core/tsi/transport_security_adapter.h" /* -- Constants. -- */ @@ -390,7 +391,8 @@ static void fake_channel_add_handshakers( grpc_handshake_manager_add( handshake_mgr, grpc_security_handshaker_create( - exec_ctx, tsi_create_fake_handshaker(true /* is_client */), + exec_ctx, tsi_create_adapter_handshaker( + tsi_create_fake_handshaker(true /* is_client */)), &sc->base)); } @@ -400,7 +402,8 @@ static void fake_server_add_handshakers(grpc_exec_ctx *exec_ctx, grpc_handshake_manager_add( handshake_mgr, grpc_security_handshaker_create( - exec_ctx, tsi_create_fake_handshaker(false /* is_client */), + exec_ctx, tsi_create_adapter_handshaker( + tsi_create_fake_handshaker(false /* is_client */)), &sc->base)); } @@ -495,8 +498,10 @@ static void ssl_channel_add_handshakers(grpc_exec_ctx *exec_ctx, } // Create handshakers. - grpc_handshake_manager_add(handshake_mgr, grpc_security_handshaker_create( - exec_ctx, tsi_hs, &sc->base)); + grpc_handshake_manager_add( + handshake_mgr, + grpc_security_handshaker_create( + exec_ctx, tsi_create_adapter_handshaker(tsi_hs), &sc->base)); } static void ssl_server_add_handshakers(grpc_exec_ctx *exec_ctx, @@ -515,8 +520,10 @@ static void ssl_server_add_handshakers(grpc_exec_ctx *exec_ctx, } // Create handshakers. - grpc_handshake_manager_add(handshake_mgr, grpc_security_handshaker_create( - exec_ctx, tsi_hs, &sc->base)); + grpc_handshake_manager_add( + handshake_mgr, + grpc_security_handshaker_create( + exec_ctx, tsi_create_adapter_handshaker(tsi_hs), &sc->base)); } static int ssl_host_matches_name(const tsi_peer *peer, const char *peer_name) { diff --git a/src/core/lib/security/transport/security_handshaker.c b/src/core/lib/security/transport/security_handshaker.c index 509b4b556d..3bc113e20f 100644 --- a/src/core/lib/security/transport/security_handshaker.c +++ b/src/core/lib/security/transport/security_handshaker.c @@ -71,12 +71,12 @@ typedef struct { unsigned char *handshake_buffer; size_t handshake_buffer_size; - grpc_slice_buffer left_overs; grpc_slice_buffer outgoing; grpc_closure on_handshake_data_sent_to_peer; grpc_closure on_handshake_data_received_from_peer; grpc_closure on_peer_checked; grpc_auth_context *auth_context; + tsi_handshaker_result *handshaker_result; } security_handshaker; static void security_handshaker_unref(grpc_exec_ctx *exec_ctx, @@ -84,6 +84,7 @@ static void security_handshaker_unref(grpc_exec_ctx *exec_ctx, if (gpr_unref(&h->refs)) { gpr_mu_destroy(&h->mu); tsi_handshaker_destroy(h->handshaker); + tsi_handshaker_result_destroy(h->handshaker_result); if (h->endpoint_to_destroy != NULL) { grpc_endpoint_destroy(exec_ctx, h->endpoint_to_destroy); } @@ -92,7 +93,6 @@ static void security_handshaker_unref(grpc_exec_ctx *exec_ctx, gpr_free(h->read_buffer_to_destroy); } gpr_free(h->handshake_buffer); - grpc_slice_buffer_destroy_internal(exec_ctx, &h->left_overs); grpc_slice_buffer_destroy_internal(exec_ctx, &h->outgoing); GRPC_AUTH_CONTEXT_UNREF(h->auth_context, "handshake"); GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, h->connector, "handshake"); @@ -150,10 +150,10 @@ static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg, security_handshake_failed_locked(exec_ctx, h, GRPC_ERROR_REF(error)); goto done; } - // Get frame protector. + // Create frame protector. tsi_frame_protector *protector; - tsi_result result = - tsi_handshaker_create_frame_protector(h->handshaker, NULL, &protector); + tsi_result result = tsi_handshaker_result_create_frame_protector( + h->handshaker_result, NULL, &protector); if (result != TSI_OK) { error = grpc_set_tsi_error_result( GRPC_ERROR_CREATE_FROM_STATIC_STRING("Frame protector creation failed"), @@ -161,14 +161,25 @@ static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg, security_handshake_failed_locked(exec_ctx, h, error); goto done; } - // Success. + // Get unused bytes. + unsigned char *unused_bytes = NULL; + size_t unused_bytes_size = 0; + result = tsi_handshaker_result_get_unused_bytes( + h->handshaker_result, &unused_bytes, &unused_bytes_size); // Create secure endpoint. - h->args->endpoint = grpc_secure_endpoint_create( - protector, h->args->endpoint, h->left_overs.slices, h->left_overs.count); - h->left_overs.count = 0; - h->left_overs.length = 0; - // Clear out the read buffer before it gets passed to the transport, - // since any excess bytes were already copied to h->left_overs. + if (unused_bytes_size > 0) { + grpc_slice slice = + grpc_slice_from_copied_buffer((char *)unused_bytes, unused_bytes_size); + h->args->endpoint = + grpc_secure_endpoint_create(protector, h->args->endpoint, &slice, 1); + grpc_slice_unref_internal(exec_ctx, slice); + } else { + h->args->endpoint = + grpc_secure_endpoint_create(protector, h->args->endpoint, NULL, 0); + } + tsi_handshaker_result_destroy(h->handshaker_result); + h->handshaker_result = NULL; + // Clear out the read buffer before it gets passed to the transport. grpc_slice_buffer_reset_and_unref_internal(exec_ctx, h->args->read_buffer); // Add auth context to channel args. grpc_arg auth_context_arg = grpc_auth_context_to_arg(h->auth_context); @@ -189,7 +200,8 @@ done: static grpc_error *check_peer_locked(grpc_exec_ctx *exec_ctx, security_handshaker *h) { tsi_peer peer; - tsi_result result = tsi_handshaker_extract_peer(h->handshaker, &peer); + tsi_result result = + tsi_handshaker_result_extract_peer(h->handshaker_result, &peer); if (result != TSI_OK) { return grpc_set_tsi_error_result( GRPC_ERROR_CREATE_FROM_STATIC_STRING("Peer extraction failed"), result); @@ -199,34 +211,87 @@ static grpc_error *check_peer_locked(grpc_exec_ctx *exec_ctx, return GRPC_ERROR_NONE; } -static grpc_error *send_handshake_bytes_to_peer_locked(grpc_exec_ctx *exec_ctx, - security_handshaker *h) { - // Get data to send. - tsi_result result = TSI_OK; - size_t offset = 0; - do { - size_t to_send_size = h->handshake_buffer_size - offset; - result = tsi_handshaker_get_bytes_to_send_to_peer( - h->handshaker, h->handshake_buffer + offset, &to_send_size); - offset += to_send_size; - if (result == TSI_INCOMPLETE_DATA) { - h->handshake_buffer_size *= 2; - h->handshake_buffer = - gpr_realloc(h->handshake_buffer, h->handshake_buffer_size); - } - } while (result == TSI_INCOMPLETE_DATA); +static grpc_error *on_handshake_next_done_locked( + grpc_exec_ctx *exec_ctx, security_handshaker *h, tsi_result result, + const unsigned char *bytes_to_send, size_t bytes_to_send_size, + tsi_handshaker_result *handshaker_result) { + grpc_error *error = GRPC_ERROR_NONE; + // Read more if we need to. + if (result == TSI_INCOMPLETE_DATA) { + GPR_ASSERT(bytes_to_send_size == 0); + grpc_endpoint_read(exec_ctx, h->args->endpoint, h->args->read_buffer, + &h->on_handshake_data_received_from_peer); + return error; + } if (result != TSI_OK) { return grpc_set_tsi_error_result( GRPC_ERROR_CREATE_FROM_STATIC_STRING("Handshake failed"), result); } - // Send data. - grpc_slice to_send = - grpc_slice_from_copied_buffer((const char *)h->handshake_buffer, offset); - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &h->outgoing); - grpc_slice_buffer_add(&h->outgoing, to_send); - grpc_endpoint_write(exec_ctx, h->args->endpoint, &h->outgoing, - &h->on_handshake_data_sent_to_peer); - return GRPC_ERROR_NONE; + // Update handshaker result. + if (handshaker_result != NULL) { + GPR_ASSERT(h->handshaker_result == NULL); + h->handshaker_result = handshaker_result; + } + if (bytes_to_send_size > 0) { + // Send data to peer, if needed. + grpc_slice to_send = grpc_slice_from_copied_buffer( + (const char *)bytes_to_send, bytes_to_send_size); + grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &h->outgoing); + grpc_slice_buffer_add(&h->outgoing, to_send); + grpc_endpoint_write(exec_ctx, h->args->endpoint, &h->outgoing, + &h->on_handshake_data_sent_to_peer); + } else if (handshaker_result == NULL) { + // There is nothing to send, but need to read from peer. + grpc_endpoint_read(exec_ctx, h->args->endpoint, h->args->read_buffer, + &h->on_handshake_data_received_from_peer); + } else { + // Handshake has finished, check peer and so on. + error = check_peer_locked(exec_ctx, h); + } + return error; +} + +static void on_handshake_next_done_grpc_wrapper( + tsi_result result, void *user_data, const unsigned char *bytes_to_send, + size_t bytes_to_send_size, tsi_handshaker_result *handshaker_result) { + security_handshaker *h = user_data; + // This callback will be invoked by TSI in a non-grpc thread, so it's + // safe to create our own exec_ctx here. + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + gpr_mu_lock(&h->mu); + grpc_error *error = + on_handshake_next_done_locked(&exec_ctx, h, result, bytes_to_send, + bytes_to_send_size, handshaker_result); + if (error != GRPC_ERROR_NONE) { + security_handshake_failed_locked(&exec_ctx, h, error); + gpr_mu_unlock(&h->mu); + security_handshaker_unref(&exec_ctx, h); + } else { + gpr_mu_unlock(&h->mu); + } + grpc_exec_ctx_finish(&exec_ctx); +} + +static grpc_error *do_handshaker_next_locked( + grpc_exec_ctx *exec_ctx, security_handshaker *h, + const unsigned char *bytes_received, size_t bytes_received_size) { + // Invoke TSI handshaker. + unsigned char *bytes_to_send = NULL; + size_t bytes_to_send_size = 0; + tsi_handshaker_result *handshaker_result = NULL; + tsi_result result = tsi_handshaker_next( + h->handshaker, bytes_received, bytes_received_size, &bytes_to_send, + &bytes_to_send_size, &handshaker_result, + &on_handshake_next_done_grpc_wrapper, h); + if (result == TSI_ASYNC) { + // Handshaker operating asynchronously. Nothing else to do here; + // callback will be invoked in a TSI thread. + return GRPC_ERROR_NONE; + } + // Handshaker returned synchronously. Invoke callback directly in + // this thread with our existing exec_ctx. + return on_handshake_next_done_locked(exec_ctx, h, result, bytes_to_send, + bytes_to_send_size, handshaker_result); } static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx, @@ -241,72 +306,34 @@ static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx, security_handshaker_unref(exec_ctx, h); return; } - // Process received data. - tsi_result result = TSI_OK; - size_t consumed_slice_size = 0; + // Copy all slices received. size_t i; + size_t bytes_received_size = 0; for (i = 0; i < h->args->read_buffer->count; i++) { - consumed_slice_size = GRPC_SLICE_LENGTH(h->args->read_buffer->slices[i]); - result = tsi_handshaker_process_bytes_from_peer( - h->handshaker, GRPC_SLICE_START_PTR(h->args->read_buffer->slices[i]), - &consumed_slice_size); - if (!tsi_handshaker_is_in_progress(h->handshaker)) break; + bytes_received_size += GRPC_SLICE_LENGTH(h->args->read_buffer->slices[i]); } - if (tsi_handshaker_is_in_progress(h->handshaker)) { - /* We may need more data. */ - if (result == TSI_INCOMPLETE_DATA) { - grpc_endpoint_read(exec_ctx, h->args->endpoint, h->args->read_buffer, - &h->on_handshake_data_received_from_peer); - goto done; - } else { - error = send_handshake_bytes_to_peer_locked(exec_ctx, h); - if (error != GRPC_ERROR_NONE) { - security_handshake_failed_locked(exec_ctx, h, error); - gpr_mu_unlock(&h->mu); - security_handshaker_unref(exec_ctx, h); - return; - } - goto done; - } + if (bytes_received_size > h->handshake_buffer_size) { + h->handshake_buffer = gpr_realloc(h->handshake_buffer, bytes_received_size); + h->handshake_buffer_size = bytes_received_size; } - if (result != TSI_OK) { - security_handshake_failed_locked( - exec_ctx, h, - grpc_set_tsi_error_result( - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Handshake failed"), result)); - gpr_mu_unlock(&h->mu); - security_handshaker_unref(exec_ctx, h); - return; - } - /* Handshake is done and successful this point. */ - bool has_left_overs_in_current_slice = - (consumed_slice_size < - GRPC_SLICE_LENGTH(h->args->read_buffer->slices[i])); - size_t num_left_overs = (has_left_overs_in_current_slice ? 1 : 0) + - h->args->read_buffer->count - i - 1; - if (num_left_overs > 0) { - /* Put the leftovers in our buffer (ownership transfered). */ - if (has_left_overs_in_current_slice) { - grpc_slice tail = grpc_slice_split_tail(&h->args->read_buffer->slices[i], - consumed_slice_size); - grpc_slice_buffer_add(&h->left_overs, tail); - /* split_tail above increments refcount. */ - grpc_slice_unref_internal(exec_ctx, tail); - } - grpc_slice_buffer_addn( - &h->left_overs, &h->args->read_buffer->slices[i + 1], - num_left_overs - (size_t)has_left_overs_in_current_slice); + size_t offset = 0; + for (i = 0; i < h->args->read_buffer->count; i++) { + size_t slice_size = GPR_SLICE_LENGTH(h->args->read_buffer->slices[i]); + memcpy(h->handshake_buffer + offset, + GRPC_SLICE_START_PTR(h->args->read_buffer->slices[i]), slice_size); + offset += slice_size; } - // Check peer. - error = check_peer_locked(exec_ctx, h); + // Call TSI handshaker. + error = do_handshaker_next_locked(exec_ctx, h, h->handshake_buffer, + bytes_received_size); + if (error != GRPC_ERROR_NONE) { security_handshake_failed_locked(exec_ctx, h, error); gpr_mu_unlock(&h->mu); security_handshaker_unref(exec_ctx, h); - return; + } else { + gpr_mu_unlock(&h->mu); } -done: - gpr_mu_unlock(&h->mu); } static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx, void *arg, @@ -321,8 +348,8 @@ static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx, void *arg, security_handshaker_unref(exec_ctx, h); return; } - /* We may be done. */ - if (tsi_handshaker_is_in_progress(h->handshaker)) { + // We may be done. + if (h->handshaker_result == NULL) { grpc_endpoint_read(exec_ctx, h->args->endpoint, h->args->read_buffer, &h->on_handshake_data_received_from_peer); } else { @@ -371,7 +398,7 @@ static void security_handshaker_do_handshake(grpc_exec_ctx *exec_ctx, h->args = args; h->on_handshake_done = on_handshake_done; gpr_ref(&h->refs); - grpc_error *error = send_handshake_bytes_to_peer_locked(exec_ctx, h); + grpc_error *error = do_handshaker_next_locked(exec_ctx, h, NULL, 0); if (error != GRPC_ERROR_NONE) { security_handshake_failed_locked(exec_ctx, h, error); gpr_mu_unlock(&h->mu); @@ -404,7 +431,6 @@ static grpc_handshaker *security_handshaker_create( grpc_schedule_on_exec_ctx); grpc_closure_init(&h->on_peer_checked, on_peer_checked, h, grpc_schedule_on_exec_ctx); - grpc_slice_buffer_init(&h->left_overs); grpc_slice_buffer_init(&h->outgoing); return &h->base; } |