aboutsummaryrefslogtreecommitdiffhomepage
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/core/client_channel/resolvers/fake_resolver_test.cc5
-rw-r--r--test/core/end2end/cq_verifier_uv.cc4
-rw-r--r--test/core/end2end/tests/bad_ping.cc9
-rw-r--r--test/core/end2end/tests/shutdown_finishes_calls.cc7
-rw-r--r--test/core/iomgr/udp_server_test.cc107
-rw-r--r--test/core/surface/completion_queue_threading_test.cc2
-rw-r--r--test/core/util/mock_endpoint.cc19
-rw-r--r--test/cpp/qps/client_async.cc4
-rw-r--r--test/cpp/qps/server_async.cc6
-rw-r--r--test/cpp/util/grpc_tool.cc2
-rw-r--r--test/cpp/util/slice_test.cc4
11 files changed, 103 insertions, 66 deletions
diff --git a/test/core/client_channel/resolvers/fake_resolver_test.cc b/test/core/client_channel/resolvers/fake_resolver_test.cc
index 03af8954e1..14caa3ea5d 100644
--- a/test/core/client_channel/resolvers/fake_resolver_test.cc
+++ b/test/core/client_channel/resolvers/fake_resolver_test.cc
@@ -234,6 +234,11 @@ static void test_fake_resolver() {
grpc_timeout_milliseconds_to_deadline(100)) ==
nullptr);
// Clean up.
+ // Note: Need to explicitly unref the resolver and flush the exec_ctx
+ // to make sure that the final resolver callback (with error set to
+ // "Resolver Shutdown") is invoked before on_res_arg goes out of scope.
+ resolver.reset();
+ grpc_core::ExecCtx::Get()->Flush();
GRPC_COMBINER_UNREF(combiner, "test_fake_resolver");
}
diff --git a/test/core/end2end/cq_verifier_uv.cc b/test/core/end2end/cq_verifier_uv.cc
index e23b3ae2a0..45d827ef61 100644
--- a/test/core/end2end/cq_verifier_uv.cc
+++ b/test/core/end2end/cq_verifier_uv.cc
@@ -58,7 +58,7 @@ static void timer_close_cb(uv_handle_t* handle) {
void cq_verifier_destroy(cq_verifier* v) {
cq_verify(v);
uv_close((uv_handle_t*)&v->timer, timer_close_cb);
- while (reinterpret_cast<timer_state>(v->timer.data) != TIMER_CLOSED) {
+ while (static_cast<timer_state>(v->timer.data) != TIMER_CLOSED) {
uv_run(uv_default_loop(), UV_RUN_NOWAIT);
}
gpr_free(v);
@@ -85,7 +85,7 @@ grpc_event cq_verifier_next_event(cq_verifier* v, int timeout_seconds) {
ev = grpc_completion_queue_next(v->cq, gpr_inf_past(GPR_CLOCK_MONOTONIC),
NULL);
// Stop the loop if the timer goes off or we get a non-timeout event
- while ((reinterpret_cast<timer_state>(v->timer.data) != TIMER_TRIGGERED) &&
+ while ((static_cast<timer_state>(v->timer.data) != TIMER_TRIGGERED) &&
ev.type == GRPC_QUEUE_TIMEOUT) {
uv_run(uv_default_loop(), UV_RUN_ONCE);
ev = grpc_completion_queue_next(v->cq, gpr_inf_past(GPR_CLOCK_MONOTONIC),
diff --git a/test/core/end2end/tests/bad_ping.cc b/test/core/end2end/tests/bad_ping.cc
index f305ea5703..95f72ca4c8 100644
--- a/test/core/end2end/tests/bad_ping.cc
+++ b/test/core/end2end/tests/bad_ping.cc
@@ -155,14 +155,15 @@ static void test_bad_ping(grpc_end2end_test_config config) {
cq_verify(cqv);
// Send too many pings to the server to trigger the punishment:
- // Each ping will trigger a ping strike, and we need at least MAX_PING_STRIKES
- // strikes to trigger the punishment. So (MAX_PING_STRIKES + 1) pings are
+ // The first ping will let server mark its last_recv time. Afterwards, each
+ // ping will trigger a ping strike, and we need at least MAX_PING_STRIKES
+ // strikes to trigger the punishment. So (MAX_PING_STRIKES + 2) pings are
// needed here.
int i;
- for (i = 1; i <= MAX_PING_STRIKES + 1; i++) {
+ for (i = 1; i <= MAX_PING_STRIKES + 2; i++) {
grpc_channel_ping(f.client, f.cq, tag(200 + i), nullptr);
CQ_EXPECT_COMPLETION(cqv, tag(200 + i), 1);
- if (i == MAX_PING_STRIKES + 1) {
+ if (i == MAX_PING_STRIKES + 2) {
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
}
cq_verify(cqv);
diff --git a/test/core/end2end/tests/shutdown_finishes_calls.cc b/test/core/end2end/tests/shutdown_finishes_calls.cc
index 34c4ebbf0a..28728ef46c 100644
--- a/test/core/end2end/tests/shutdown_finishes_calls.cc
+++ b/test/core/end2end/tests/shutdown_finishes_calls.cc
@@ -150,6 +150,13 @@ static void test_early_server_shutdown_finishes_inflight_calls(
nullptr);
GPR_ASSERT(GRPC_CALL_OK == error);
+ /* Make sure we don't shutdown the server while HTTP/2 PING frames are still
+ * being exchanged on the newly established connection. It can lead to
+ * failures when testing with HTTP proxy. See
+ * https://github.com/grpc/grpc/issues/14471
+ */
+ gpr_sleep_until(n_seconds_from_now(1));
+
/* shutdown and destroy the server */
grpc_server_shutdown_and_notify(f.server, f.cq, tag(1000));
grpc_server_cancel_all_calls(f.server);
diff --git a/test/core/iomgr/udp_server_test.cc b/test/core/iomgr/udp_server_test.cc
index 13cbf2f6df..60f293972e 100644
--- a/test/core/iomgr/udp_server_test.cc
+++ b/test/core/iomgr/udp_server_test.cc
@@ -36,6 +36,7 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/useful.h"
+#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/socket_factory_posix.h"
@@ -54,42 +55,70 @@ static int g_number_of_starts = 0;
int rcv_buf_size = 1024;
int snd_buf_size = 1024;
-static void on_start(grpc_fd* emfd, void* user_data) { g_number_of_starts++; }
+class TestGrpcUdpHandler : public GrpcUdpHandler {
+ public:
+ TestGrpcUdpHandler(grpc_fd* emfd, void* user_data)
+ : GrpcUdpHandler(emfd, user_data), emfd_(emfd) {
+ g_number_of_starts++;
+ }
+ ~TestGrpcUdpHandler() override {}
-static bool on_read(grpc_fd* emfd) {
- char read_buffer[512];
- ssize_t byte_count;
+ protected:
+ bool Read() override {
+ char read_buffer[512];
+ ssize_t byte_count;
- gpr_mu_lock(g_mu);
- byte_count =
- recv(grpc_fd_wrapped_fd(emfd), read_buffer, sizeof(read_buffer), 0);
+ gpr_mu_lock(g_mu);
+ byte_count =
+ recv(grpc_fd_wrapped_fd(emfd()), read_buffer, sizeof(read_buffer), 0);
- g_number_of_reads++;
- g_number_of_bytes_read += static_cast<int>(byte_count);
+ g_number_of_reads++;
+ g_number_of_bytes_read += static_cast<int>(byte_count);
- GPR_ASSERT(
- GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(g_pollset, nullptr)));
- gpr_mu_unlock(g_mu);
- return false;
-}
+ GPR_ASSERT(GRPC_LOG_IF_ERROR("pollset_kick",
+ grpc_pollset_kick(g_pollset, nullptr)));
+ gpr_mu_unlock(g_mu);
+ return false;
+ }
-static void on_write(grpc_fd* emfd, void* user_data,
- grpc_closure* notify_on_write_closure) {
- gpr_mu_lock(g_mu);
- g_number_of_writes++;
+ void OnCanWrite(void* user_data,
+ grpc_closure* notify_on_write_closure) override {
+ gpr_mu_lock(g_mu);
+ g_number_of_writes++;
- GPR_ASSERT(
- GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(g_pollset, nullptr)));
- gpr_mu_unlock(g_mu);
-}
+ GPR_ASSERT(GRPC_LOG_IF_ERROR("pollset_kick",
+ grpc_pollset_kick(g_pollset, nullptr)));
+ gpr_mu_unlock(g_mu);
+ }
-static void on_fd_orphaned(grpc_fd* emfd, grpc_closure* closure,
- void* user_data) {
- gpr_log(GPR_INFO, "gRPC FD about to be orphaned: %d",
- grpc_fd_wrapped_fd(emfd));
- GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE);
- g_number_of_orphan_calls++;
-}
+ void OnFdAboutToOrphan(grpc_closure* orphan_fd_closure,
+ void* user_data) override {
+ gpr_log(GPR_INFO, "gRPC FD about to be orphaned: %d",
+ grpc_fd_wrapped_fd(emfd()));
+ GRPC_CLOSURE_SCHED(orphan_fd_closure, GRPC_ERROR_NONE);
+ g_number_of_orphan_calls++;
+ }
+
+ grpc_fd* emfd() { return emfd_; }
+
+ private:
+ grpc_fd* emfd_;
+};
+
+class TestGrpcUdpHandlerFactory : public GrpcUdpHandlerFactory {
+ public:
+ GrpcUdpHandler* CreateUdpHandler(grpc_fd* emfd, void* user_data) override {
+ gpr_log(GPR_INFO, "create udp handler for fd %d", grpc_fd_wrapped_fd(emfd));
+ return grpc_core::New<TestGrpcUdpHandler>(emfd, user_data);
+ }
+
+ void DestroyUdpHandler(GrpcUdpHandler* handler) override {
+ gpr_log(GPR_INFO, "Destroy handler");
+ grpc_core::Delete(reinterpret_cast<TestGrpcUdpHandler*>(handler));
+ }
+};
+
+TestGrpcUdpHandlerFactory handler_factory;
struct test_socket_factory {
grpc_socket_factory base;
@@ -184,13 +213,12 @@ static void test_no_op_with_port(void) {
resolved_addr.len = sizeof(struct sockaddr_in);
addr->sin_family = AF_INET;
GPR_ASSERT(grpc_udp_server_add_port(s, &resolved_addr, rcv_buf_size,
- snd_buf_size, on_start, on_read, on_write,
- on_fd_orphaned));
+ snd_buf_size, &handler_factory));
grpc_udp_server_destroy(s, nullptr);
- /* The server had a single FD, which should have been orphaned. */
- GPR_ASSERT(g_number_of_orphan_calls == 1);
+ /* The server haven't start listening, so no udp handler to be notified. */
+ GPR_ASSERT(g_number_of_orphan_calls == 0);
shutdown_and_destroy_pollset();
}
@@ -216,8 +244,7 @@ static void test_no_op_with_port_and_socket_factory(void) {
resolved_addr.len = sizeof(struct sockaddr_in);
addr->sin_family = AF_INET;
GPR_ASSERT(grpc_udp_server_add_port(s, &resolved_addr, rcv_buf_size,
- snd_buf_size, on_start, on_read, on_write,
- on_fd_orphaned));
+ snd_buf_size, &handler_factory));
GPR_ASSERT(socket_factory->number_of_socket_calls == 1);
GPR_ASSERT(socket_factory->number_of_bind_calls == 1);
@@ -225,8 +252,8 @@ static void test_no_op_with_port_and_socket_factory(void) {
grpc_socket_factory_unref(&socket_factory->base);
- /* The server had a single FD, which should have been orphaned. */
- GPR_ASSERT(g_number_of_orphan_calls == 1);
+ /* The server haven't start listening, so no udp handler to be notified. */
+ GPR_ASSERT(g_number_of_orphan_calls == 0);
shutdown_and_destroy_pollset();
}
@@ -244,8 +271,7 @@ static void test_no_op_with_port_and_start(void) {
resolved_addr.len = sizeof(struct sockaddr_in);
addr->sin_family = AF_INET;
GPR_ASSERT(grpc_udp_server_add_port(s, &resolved_addr, rcv_buf_size,
- snd_buf_size, on_start, on_read, on_write,
- on_fd_orphaned));
+ snd_buf_size, &handler_factory));
grpc_udp_server_start(s, nullptr, 0, nullptr);
GPR_ASSERT(g_number_of_starts == 1);
@@ -278,8 +304,7 @@ static void test_receive(int number_of_clients) {
resolved_addr.len = sizeof(struct sockaddr_storage);
addr->ss_family = AF_INET;
GPR_ASSERT(grpc_udp_server_add_port(s, &resolved_addr, rcv_buf_size,
- snd_buf_size, on_start, on_read, on_write,
- on_fd_orphaned));
+ snd_buf_size, &handler_factory));
svrfd = grpc_udp_server_get_fd(s, 0);
GPR_ASSERT(svrfd >= 0);
diff --git a/test/core/surface/completion_queue_threading_test.cc b/test/core/surface/completion_queue_threading_test.cc
index 9c8d8d8395..0b82803af6 100644
--- a/test/core/surface/completion_queue_threading_test.cc
+++ b/test/core/surface/completion_queue_threading_test.cc
@@ -219,7 +219,7 @@ static void test_threading(size_t producers, size_t consumers) {
"test_threading", producers, consumers);
/* start all threads: they will wait for phase1 */
- grpc_core::Thread* threads = reinterpret_cast<grpc_core::Thread*>(
+ grpc_core::Thread* threads = static_cast<grpc_core::Thread*>(
gpr_malloc(sizeof(*threads) * (producers + consumers)));
for (i = 0; i < producers + consumers; i++) {
gpr_event_init(&options[i].on_started);
diff --git a/test/core/util/mock_endpoint.cc b/test/core/util/mock_endpoint.cc
index adeff18ef0..1156cd5fc5 100644
--- a/test/core/util/mock_endpoint.cc
+++ b/test/core/util/mock_endpoint.cc
@@ -30,7 +30,7 @@
#include <grpc/support/string_util.h>
#include "src/core/lib/iomgr/sockaddr.h"
-typedef struct grpc_mock_endpoint {
+typedef struct mock_endpoint {
grpc_endpoint base;
gpr_mu mu;
void (*on_write)(grpc_slice slice);
@@ -38,11 +38,11 @@ typedef struct grpc_mock_endpoint {
grpc_slice_buffer* on_read_out;
grpc_closure* on_read;
grpc_resource_user* resource_user;
-} grpc_mock_endpoint;
+} mock_endpoint;
static void me_read(grpc_endpoint* ep, grpc_slice_buffer* slices,
grpc_closure* cb) {
- grpc_mock_endpoint* m = reinterpret_cast<grpc_mock_endpoint*>(ep);
+ mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep);
gpr_mu_lock(&m->mu);
if (m->read_buffer.count > 0) {
grpc_slice_buffer_swap(&m->read_buffer, slices);
@@ -56,7 +56,7 @@ static void me_read(grpc_endpoint* ep, grpc_slice_buffer* slices,
static void me_write(grpc_endpoint* ep, grpc_slice_buffer* slices,
grpc_closure* cb) {
- grpc_mock_endpoint* m = reinterpret_cast<grpc_mock_endpoint*>(ep);
+ mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep);
for (size_t i = 0; i < slices->count; i++) {
m->on_write(slices->slices[i]);
}
@@ -72,7 +72,7 @@ static void me_delete_from_pollset_set(grpc_endpoint* ep,
grpc_pollset_set* pollset) {}
static void me_shutdown(grpc_endpoint* ep, grpc_error* why) {
- grpc_mock_endpoint* m = reinterpret_cast<grpc_mock_endpoint*>(ep);
+ mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep);
gpr_mu_lock(&m->mu);
if (m->on_read) {
GRPC_CLOSURE_SCHED(m->on_read,
@@ -86,7 +86,7 @@ static void me_shutdown(grpc_endpoint* ep, grpc_error* why) {
}
static void me_destroy(grpc_endpoint* ep) {
- grpc_mock_endpoint* m = reinterpret_cast<grpc_mock_endpoint*>(ep);
+ mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep);
grpc_slice_buffer_destroy(&m->read_buffer);
grpc_resource_user_unref(m->resource_user);
gpr_free(m);
@@ -97,7 +97,7 @@ static char* me_get_peer(grpc_endpoint* ep) {
}
static grpc_resource_user* me_get_resource_user(grpc_endpoint* ep) {
- grpc_mock_endpoint* m = reinterpret_cast<grpc_mock_endpoint*>(ep);
+ mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep);
return m->resource_user;
}
@@ -118,8 +118,7 @@ static const grpc_endpoint_vtable vtable = {
grpc_endpoint* grpc_mock_endpoint_create(void (*on_write)(grpc_slice slice),
grpc_resource_quota* resource_quota) {
- grpc_mock_endpoint* m =
- static_cast<grpc_mock_endpoint*>(gpr_malloc(sizeof(*m)));
+ mock_endpoint* m = static_cast<mock_endpoint*>(gpr_malloc(sizeof(*m)));
m->base.vtable = &vtable;
char* name;
gpr_asprintf(&name, "mock_endpoint_%" PRIxPTR, (intptr_t)m);
@@ -133,7 +132,7 @@ grpc_endpoint* grpc_mock_endpoint_create(void (*on_write)(grpc_slice slice),
}
void grpc_mock_endpoint_put_read(grpc_endpoint* ep, grpc_slice slice) {
- grpc_mock_endpoint* m = reinterpret_cast<grpc_mock_endpoint*>(ep);
+ mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep);
gpr_mu_lock(&m->mu);
if (m->on_read != nullptr) {
grpc_slice_buffer_add(m->on_read_out, slice);
diff --git a/test/cpp/qps/client_async.cc b/test/cpp/qps/client_async.cc
index e3fba36a7a..8215ecbf55 100644
--- a/test/cpp/qps/client_async.cc
+++ b/test/cpp/qps/client_async.cc
@@ -50,9 +50,9 @@ class ClientRpcContext {
// next state, return false if done. Collect stats when appropriate
virtual bool RunNextState(bool, HistogramEntry* entry) = 0;
virtual void StartNewClone(CompletionQueue* cq) = 0;
- static void* tag(ClientRpcContext* c) { return reinterpret_cast<void*>(c); }
+ static void* tag(ClientRpcContext* c) { return static_cast<void*>(c); }
static ClientRpcContext* detag(void* t) {
- return reinterpret_cast<ClientRpcContext*>(t);
+ return static_cast<ClientRpcContext*>(t);
}
virtual void Start(CompletionQueue* cq, const ClientConfig& config) = 0;
diff --git a/test/cpp/qps/server_async.cc b/test/cpp/qps/server_async.cc
index b88b88445c..f1dfea24d8 100644
--- a/test/cpp/qps/server_async.cc
+++ b/test/cpp/qps/server_async.cc
@@ -240,11 +240,9 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
private:
std::mutex mu_;
};
- static void* tag(ServerRpcContext* func) {
- return reinterpret_cast<void*>(func);
- }
+ static void* tag(ServerRpcContext* func) { return static_cast<void*>(func); }
static ServerRpcContext* detag(void* tag) {
- return reinterpret_cast<ServerRpcContext*>(tag);
+ return static_cast<ServerRpcContext*>(tag);
}
class ServerRpcContextUnaryImpl final : public ServerRpcContext {
diff --git a/test/cpp/util/grpc_tool.cc b/test/cpp/util/grpc_tool.cc
index 30c43b206f..e9dd7512c4 100644
--- a/test/cpp/util/grpc_tool.cc
+++ b/test/cpp/util/grpc_tool.cc
@@ -747,6 +747,8 @@ bool GrpcTool::CallMethod(int argc, const char** argv,
}
}
Status status = call.Finish(&server_trailing_metadata);
+ PrintMetadata(server_trailing_metadata,
+ "Received trailing metadata from server:");
if (status.ok()) {
fprintf(stderr, "Rpc succeeded with OK status\n");
return true;
diff --git a/test/cpp/util/slice_test.cc b/test/cpp/util/slice_test.cc
index c2e55f3374..5f0b9c17cc 100644
--- a/test/cpp/util/slice_test.cc
+++ b/test/cpp/util/slice_test.cc
@@ -67,7 +67,7 @@ TEST_F(SliceTest, StaticBuf) {
TEST_F(SliceTest, SliceNew) {
char* x = new char[strlen(kContent) + 1];
strcpy(x, kContent);
- Slice spp(x, strlen(x), [](void* p) { delete[] reinterpret_cast<char*>(p); });
+ Slice spp(x, strlen(x), [](void* p) { delete[] static_cast<char*>(p); });
CheckSlice(spp, kContent);
}
@@ -86,7 +86,7 @@ TEST_F(SliceTest, SliceNewWithUserData) {
strcpy(t->x, kContent);
Slice spp(t->x, strlen(t->x),
[](void* p) {
- auto* t = reinterpret_cast<stest*>(p);
+ auto* t = static_cast<stest*>(p);
delete[] t->x;
delete t;
},