aboutsummaryrefslogtreecommitdiffhomepage
path: root/test/cpp
diff options
context:
space:
mode:
Diffstat (limited to 'test/cpp')
-rw-r--r--test/cpp/client/BUILD51
-rw-r--r--test/cpp/client/client_channel_stress_test.cc329
-rw-r--r--test/cpp/codegen/BUILD1
-rw-r--r--test/cpp/codegen/compiler_test_golden17
-rw-r--r--test/cpp/codegen/golden_file_test.cc2
-rw-r--r--test/cpp/codegen/proto_utils_test.cc112
-rw-r--r--test/cpp/common/auth_property_iterator_test.cc4
-rw-r--r--test/cpp/common/channel_arguments_test.cc4
-rw-r--r--test/cpp/common/secure_auth_context_test.cc6
-rw-r--r--test/cpp/end2end/async_end2end_test.cc136
-rw-r--r--test/cpp/end2end/client_crash_test.cc3
-rw-r--r--test/cpp/end2end/client_crash_test_server.cc4
-rw-r--r--test/cpp/end2end/client_lb_end2end_test.cc22
-rw-r--r--test/cpp/end2end/end2end_test.cc24
-rw-r--r--test/cpp/end2end/generic_end2end_test.cc12
-rw-r--r--test/cpp/end2end/grpclb_end2end_test.cc63
-rw-r--r--test/cpp/end2end/mock_test.cc16
-rw-r--r--test/cpp/end2end/thread_stress_test.cc40
-rw-r--r--test/cpp/grpclb/grpclb_api_test.cc2
-rw-r--r--test/cpp/grpclb/grpclb_test.cc240
-rw-r--r--test/cpp/interop/http2_client.cc2
-rw-r--r--test/cpp/interop/interop_server.cc4
-rw-r--r--test/cpp/interop/interop_test.cc2
-rw-r--r--test/cpp/interop/reconnect_interop_server.cc2
-rw-r--r--test/cpp/interop/stress_test.cc4
-rw-r--r--test/cpp/microbenchmarks/bm_arena.cc2
-rw-r--r--test/cpp/microbenchmarks/bm_call_create.cc345
-rw-r--r--test/cpp/microbenchmarks/bm_chttp2_hpack.cc188
-rw-r--r--test/cpp/microbenchmarks/bm_chttp2_transport.cc204
-rw-r--r--test/cpp/microbenchmarks/bm_closure.cc72
-rw-r--r--test/cpp/microbenchmarks/bm_cq.cc35
-rw-r--r--test/cpp/microbenchmarks/bm_cq_multiple_threads.cc10
-rw-r--r--test/cpp/microbenchmarks/bm_error.cc10
-rw-r--r--test/cpp/microbenchmarks/bm_fullstack_trickle.cc34
-rw-r--r--test/cpp/microbenchmarks/bm_metadata.cc20
-rw-r--r--test/cpp/microbenchmarks/bm_pollset.cc13
-rw-r--r--test/cpp/microbenchmarks/fullstack_fixtures.h16
-rw-r--r--test/cpp/microbenchmarks/fullstack_streaming_ping_pong.h47
-rw-r--r--test/cpp/microbenchmarks/helpers.cc29
-rw-r--r--test/cpp/microbenchmarks/helpers.h2
-rw-r--r--test/cpp/naming/README.md2
-rwxr-xr-xtest/cpp/naming/create_private_dns_zone.sh4
-rwxr-xr-xtest/cpp/naming/private_dns_zone_init.sh152
-rw-r--r--test/cpp/naming/resolver_component_test.cc90
-rwxr-xr-xtest/cpp/naming/resolver_component_tests_runner.sh26
-rw-r--r--test/cpp/naming/resolver_component_tests_runner_invoker.cc16
-rwxr-xr-xtest/cpp/naming/resolver_gce_integration_tests_runner.sh156
-rw-r--r--test/cpp/naming/resolver_test_record_groups.yaml10
-rw-r--r--test/cpp/performance/writes_per_rpc_test.cc9
-rw-r--r--test/cpp/qps/BUILD12
-rw-r--r--test/cpp/qps/client.h77
-rw-r--r--test/cpp/qps/client_async.cc70
-rw-r--r--test/cpp/qps/client_sync.cc39
-rw-r--r--test/cpp/qps/driver.cc55
-rw-r--r--test/cpp/qps/driver.h2
-rwxr-xr-xtest/cpp/qps/gen_build_yaml.py18
-rw-r--r--test/cpp/qps/histogram.h4
-rw-r--r--test/cpp/qps/inproc_sync_unary_ping_pong_test.cc66
-rw-r--r--test/cpp/qps/interarrival.h4
-rw-r--r--test/cpp/qps/json_run_localhost.cc18
-rw-r--r--test/cpp/qps/parse_json.cc4
-rw-r--r--test/cpp/qps/parse_json.h4
-rw-r--r--test/cpp/qps/qps_interarrival_test.cc8
-rw-r--r--test/cpp/qps/qps_json_driver.cc9
-rw-r--r--test/cpp/qps/qps_openloop_test.cc6
-rw-r--r--test/cpp/qps/qps_worker.cc21
-rw-r--r--test/cpp/qps/qps_worker.h14
-rw-r--r--test/cpp/qps/secure_sync_unary_ping_pong_test.cc6
-rw-r--r--test/cpp/qps/server.h8
-rw-r--r--test/cpp/qps/server_async.cc192
-rw-r--r--test/cpp/qps/server_sync.cc20
-rw-r--r--test/cpp/qps/worker.cc3
-rw-r--r--test/cpp/test/server_context_test_spouse_test.cc2
-rw-r--r--test/cpp/thread_manager/thread_manager_test.cc14
-rw-r--r--test/cpp/util/create_test_channel.cc4
-rw-r--r--test/cpp/util/error_details_test.cc20
-rw-r--r--test/cpp/util/grpc_tool.cc10
-rw-r--r--test/cpp/util/grpc_tool.h4
-rw-r--r--test/cpp/util/grpc_tool_test.cc32
-rw-r--r--test/cpp/util/proto_reflection_descriptor_database.cc4
-rw-r--r--test/cpp/util/service_describer.h2
-rw-r--r--test/cpp/util/test_config_cc.cc2
82 files changed, 2133 insertions, 1215 deletions
diff --git a/test/cpp/client/BUILD b/test/cpp/client/BUILD
new file mode 100644
index 0000000000..12825e88c2
--- /dev/null
+++ b/test/cpp/client/BUILD
@@ -0,0 +1,51 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+licenses(["notice"]) # Apache v2
+
+load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_package")
+
+grpc_package(name = "test/cpp/client")
+
+grpc_cc_test(
+ name = "credentials_test",
+ srcs = ["credentials_test.cc"],
+ external_deps = [
+ "gtest",
+ ],
+ deps = [
+ "//:gpr",
+ "//:grpc",
+ "//:grpc++",
+ ],
+)
+
+grpc_cc_test(
+ name = "client_channel_stress_test",
+ srcs = ["client_channel_stress_test.cc"],
+ deps = [
+ "//:gpr",
+ "//:grpc",
+ "//:grpc++",
+ "//:grpc_resolver_fake",
+ "//src/proto/grpc/lb/v1:load_balancer_proto",
+ "//src/proto/grpc/testing:echo_messages_proto",
+ "//src/proto/grpc/testing:echo_proto",
+ "//src/proto/grpc/testing/duplicate:echo_duplicate_proto",
+ "//test/core/util:gpr_test_util",
+ "//test/core/util:grpc_test_util",
+ "//test/cpp/end2end:test_service_impl",
+ "//test/cpp/util:test_util",
+ ],
+)
diff --git a/test/cpp/client/client_channel_stress_test.cc b/test/cpp/client/client_channel_stress_test.cc
new file mode 100644
index 0000000000..8940f6ff9e
--- /dev/null
+++ b/test/cpp/client/client_channel_stress_test.cc
@@ -0,0 +1,329 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <atomic>
+#include <memory>
+#include <mutex>
+#include <sstream>
+#include <thread>
+
+#include <grpc++/channel.h>
+#include <grpc++/client_context.h>
+#include <grpc++/create_channel.h>
+#include <grpc++/server.h>
+#include <grpc++/server_builder.h>
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/thd.h>
+#include <grpc/support/time.h>
+
+extern "C" {
+#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
+#include "src/core/lib/iomgr/sockaddr.h"
+}
+
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/end2end/test_service_impl.h"
+
+#include "src/proto/grpc/lb/v1/load_balancer.grpc.pb.h"
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+
+using grpc::lb::v1::LoadBalanceRequest;
+using grpc::lb::v1::LoadBalanceResponse;
+using grpc::lb::v1::LoadBalancer;
+
+namespace grpc {
+namespace testing {
+namespace {
+
+const size_t kNumBackends = 10;
+const size_t kNumBalancers = 5;
+const size_t kNumClientThreads = 100;
+const int kResolutionUpdateIntervalMs = 50;
+const int kServerlistUpdateIntervalMs = 10;
+const int kTestDurationSec = 30;
+
+using BackendServiceImpl = TestServiceImpl;
+
+class BalancerServiceImpl : public LoadBalancer::Service {
+ public:
+ using Stream = ServerReaderWriter<LoadBalanceResponse, LoadBalanceRequest>;
+
+ explicit BalancerServiceImpl(const std::vector<int>& all_backend_ports)
+ : all_backend_ports_(all_backend_ports) {}
+
+ Status BalanceLoad(ServerContext* context, Stream* stream) override {
+ gpr_log(GPR_INFO, "LB[%p]: Start BalanceLoad.", this);
+ LoadBalanceRequest request;
+ stream->Read(&request);
+ while (!shutdown_) {
+ stream->Write(BuildRandomResponseForBackends());
+ std::this_thread::sleep_for(
+ std::chrono::milliseconds(kServerlistUpdateIntervalMs));
+ }
+ gpr_log(GPR_INFO, "LB[%p]: Finish BalanceLoad.", this);
+ return Status::OK;
+ }
+
+ void Shutdown() { shutdown_ = true; }
+
+ private:
+ grpc::string Ip4ToPackedString(const char* ip_str) {
+ struct in_addr ip4;
+ GPR_ASSERT(inet_pton(AF_INET, ip_str, &ip4) == 1);
+ return grpc::string(reinterpret_cast<const char*>(&ip4), sizeof(ip4));
+ }
+
+ LoadBalanceResponse BuildRandomResponseForBackends() {
+ // Generate a random serverlist with varying size (if N =
+ // all_backend_ports_.size(), num_non_drop_entry is in [0, 2N],
+ // num_drop_entry is in [0, N]), order, duplicate, and drop rate.
+ size_t num_non_drop_entry =
+ std::rand() % (all_backend_ports_.size() * 2 + 1);
+ size_t num_drop_entry = std::rand() % (all_backend_ports_.size() + 1);
+ std::vector<int> random_backend_indices;
+ for (size_t i = 0; i < num_non_drop_entry; ++i) {
+ random_backend_indices.push_back(std::rand() % all_backend_ports_.size());
+ }
+ for (size_t i = 0; i < num_drop_entry; ++i) {
+ random_backend_indices.push_back(-1);
+ }
+ std::random_shuffle(random_backend_indices.begin(),
+ random_backend_indices.end());
+ // Build the response according to the random list generated above.
+ LoadBalanceResponse response;
+ for (int index : random_backend_indices) {
+ auto* server = response.mutable_server_list()->add_servers();
+ if (index < 0) {
+ server->set_drop(true);
+ server->set_load_balance_token("load_balancing");
+ } else {
+ server->set_ip_address(Ip4ToPackedString("127.0.0.1"));
+ server->set_port(all_backend_ports_[index]);
+ }
+ }
+ return response;
+ }
+
+ std::atomic_bool shutdown_{false};
+ const std::vector<int> all_backend_ports_;
+};
+
+class ClientChannelStressTest {
+ public:
+ void Run() {
+ Start();
+ // Keep updating resolution for the test duration.
+ gpr_log(GPR_INFO, "Start updating resolution.");
+ const auto wait_duration =
+ std::chrono::milliseconds(kResolutionUpdateIntervalMs);
+ std::vector<AddressData> addresses;
+ auto start_time = std::chrono::steady_clock::now();
+ while (true) {
+ if (std::chrono::duration_cast<std::chrono::seconds>(
+ std::chrono::steady_clock::now() - start_time)
+ .count() > kTestDurationSec) {
+ break;
+ }
+ // Generate a random subset of balancers.
+ addresses.clear();
+ for (const auto& balancer_server : balancer_servers_) {
+ // Select each address with probability of 0.8.
+ if (std::rand() % 10 < 8) {
+ addresses.emplace_back(AddressData{balancer_server.port_, true, ""});
+ }
+ }
+ std::random_shuffle(addresses.begin(), addresses.end());
+ SetNextResolution(addresses);
+ std::this_thread::sleep_for(wait_duration);
+ }
+ gpr_log(GPR_INFO, "Finish updating resolution.");
+ Shutdown();
+ }
+
+ private:
+ template <typename T>
+ struct ServerThread {
+ explicit ServerThread(const grpc::string& type,
+ const grpc::string& server_host, T* service)
+ : type_(type), service_(service) {
+ std::mutex mu;
+ // We need to acquire the lock here in order to prevent the notify_one
+ // by ServerThread::Start from firing before the wait below is hit.
+ std::unique_lock<std::mutex> lock(mu);
+ port_ = grpc_pick_unused_port_or_die();
+ gpr_log(GPR_INFO, "starting %s server on port %d", type_.c_str(), port_);
+ std::condition_variable cond;
+ thread_.reset(new std::thread(
+ std::bind(&ServerThread::Start, this, server_host, &mu, &cond)));
+ cond.wait(lock);
+ gpr_log(GPR_INFO, "%s server startup complete", type_.c_str());
+ }
+
+ void Start(const grpc::string& server_host, std::mutex* mu,
+ std::condition_variable* cond) {
+ // We need to acquire the lock here in order to prevent the notify_one
+ // below from firing before its corresponding wait is executed.
+ std::lock_guard<std::mutex> lock(*mu);
+ std::ostringstream server_address;
+ server_address << server_host << ":" << port_;
+ ServerBuilder builder;
+ builder.AddListeningPort(server_address.str(),
+ InsecureServerCredentials());
+ builder.RegisterService(service_);
+ server_ = builder.BuildAndStart();
+ cond->notify_one();
+ }
+
+ void Shutdown() {
+ gpr_log(GPR_INFO, "%s about to shutdown", type_.c_str());
+ server_->Shutdown(grpc_timeout_milliseconds_to_deadline(0));
+ thread_->join();
+ gpr_log(GPR_INFO, "%s shutdown completed", type_.c_str());
+ }
+
+ int port_;
+ grpc::string type_;
+ std::unique_ptr<Server> server_;
+ T* service_;
+ std::unique_ptr<std::thread> thread_;
+ };
+
+ struct AddressData {
+ int port;
+ bool is_balancer;
+ grpc::string balancer_name;
+ };
+
+ void SetNextResolution(const std::vector<AddressData>& address_data) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_lb_addresses* addresses =
+ grpc_lb_addresses_create(address_data.size(), nullptr);
+ for (size_t i = 0; i < address_data.size(); ++i) {
+ char* lb_uri_str;
+ gpr_asprintf(&lb_uri_str, "ipv4:127.0.0.1:%d", address_data[i].port);
+ grpc_uri* lb_uri = grpc_uri_parse(&exec_ctx, lb_uri_str, true);
+ GPR_ASSERT(lb_uri != nullptr);
+ grpc_lb_addresses_set_address_from_uri(
+ addresses, i, lb_uri, address_data[i].is_balancer,
+ address_data[i].balancer_name.c_str(), nullptr);
+ grpc_uri_destroy(lb_uri);
+ gpr_free(lb_uri_str);
+ }
+ grpc_arg fake_addresses = grpc_lb_addresses_create_channel_arg(addresses);
+ grpc_channel_args fake_result = {1, &fake_addresses};
+ grpc_fake_resolver_response_generator_set_response(
+ &exec_ctx, response_generator_, &fake_result);
+ grpc_lb_addresses_destroy(&exec_ctx, addresses);
+ grpc_exec_ctx_finish(&exec_ctx);
+ }
+
+ void KeepSendingRequests() {
+ gpr_log(GPR_INFO, "Start sending requests.");
+ while (!shutdown_) {
+ ClientContext context;
+ context.set_deadline(grpc_timeout_milliseconds_to_deadline(1000));
+ EchoRequest request;
+ request.set_message("test");
+ EchoResponse response;
+ {
+ std::lock_guard<std::mutex> lock(stub_mutex_);
+ stub_->Echo(&context, request, &response);
+ }
+ }
+ gpr_log(GPR_INFO, "Finish sending requests.");
+ }
+
+ void CreateStub() {
+ ChannelArguments args;
+ response_generator_ = grpc_fake_resolver_response_generator_create();
+ args.SetPointer(GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR,
+ response_generator_);
+ std::ostringstream uri;
+ uri << "fake:///servername_not_used";
+ channel_ =
+ CreateCustomChannel(uri.str(), InsecureChannelCredentials(), args);
+ stub_ = grpc::testing::EchoTestService::NewStub(channel_);
+ }
+
+ void Start() {
+ // Start the backends.
+ std::vector<int> backend_ports;
+ for (size_t i = 0; i < kNumBackends; ++i) {
+ backends_.emplace_back(new BackendServiceImpl());
+ backend_servers_.emplace_back(ServerThread<BackendServiceImpl>(
+ "backend", server_host_, backends_.back().get()));
+ backend_ports.push_back(backend_servers_.back().port_);
+ }
+ // Start the load balancers.
+ for (size_t i = 0; i < kNumBalancers; ++i) {
+ balancers_.emplace_back(new BalancerServiceImpl(backend_ports));
+ balancer_servers_.emplace_back(ServerThread<BalancerServiceImpl>(
+ "balancer", server_host_, balancers_.back().get()));
+ }
+ // Start sending RPCs in multiple threads.
+ CreateStub();
+ for (size_t i = 0; i < kNumClientThreads; ++i) {
+ client_threads_.emplace_back(
+ std::thread(&ClientChannelStressTest::KeepSendingRequests, this));
+ }
+ }
+
+ void Shutdown() {
+ shutdown_ = true;
+ for (size_t i = 0; i < client_threads_.size(); ++i) {
+ client_threads_[i].join();
+ }
+ for (size_t i = 0; i < balancers_.size(); ++i) {
+ balancers_[i]->Shutdown();
+ balancer_servers_[i].Shutdown();
+ }
+ for (size_t i = 0; i < backends_.size(); ++i) {
+ backend_servers_[i].Shutdown();
+ }
+ grpc_fake_resolver_response_generator_unref(response_generator_);
+ }
+
+ std::atomic_bool shutdown_{false};
+ const grpc::string server_host_ = "localhost";
+ std::shared_ptr<Channel> channel_;
+ std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
+ std::mutex stub_mutex_;
+ std::vector<std::unique_ptr<BackendServiceImpl>> backends_;
+ std::vector<std::unique_ptr<BalancerServiceImpl>> balancers_;
+ std::vector<ServerThread<BackendServiceImpl>> backend_servers_;
+ std::vector<ServerThread<BalancerServiceImpl>> balancer_servers_;
+ grpc_fake_resolver_response_generator* response_generator_;
+ std::vector<std::thread> client_threads_;
+};
+
+} // namespace
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ grpc_init();
+ grpc_test_init(argc, argv);
+ grpc::testing::ClientChannelStressTest test;
+ test.Run();
+ grpc_shutdown();
+ return 0;
+}
diff --git a/test/cpp/codegen/BUILD b/test/cpp/codegen/BUILD
index 8de46be816..6cc81e3398 100644
--- a/test/cpp/codegen/BUILD
+++ b/test/cpp/codegen/BUILD
@@ -51,6 +51,7 @@ grpc_cc_test(
],
external_deps = [
"gtest",
+ "protobuf",
],
)
diff --git a/test/cpp/codegen/compiler_test_golden b/test/cpp/codegen/compiler_test_golden
index 3d664e8825..026a94112a 100644
--- a/test/cpp/codegen/compiler_test_golden
+++ b/test/cpp/codegen/compiler_test_golden
@@ -39,7 +39,6 @@
namespace grpc {
class CompletionQueue;
class Channel;
-class RpcService;
class ServerCompletionQueue;
class ServerContext;
} // namespace grpc
@@ -169,10 +168,10 @@ class ServiceA final {
::grpc::ClientReaderWriter< ::grpc::testing::Request, ::grpc::testing::Response>* MethodA4Raw(::grpc::ClientContext* context) override;
::grpc::ClientAsyncReaderWriter< ::grpc::testing::Request, ::grpc::testing::Response>* AsyncMethodA4Raw(::grpc::ClientContext* context, ::grpc::CompletionQueue* cq, void* tag) override;
::grpc::ClientAsyncReaderWriter< ::grpc::testing::Request, ::grpc::testing::Response>* PrepareAsyncMethodA4Raw(::grpc::ClientContext* context, ::grpc::CompletionQueue* cq) override;
- const ::grpc::RpcMethod rpcmethod_MethodA1_;
- const ::grpc::RpcMethod rpcmethod_MethodA2_;
- const ::grpc::RpcMethod rpcmethod_MethodA3_;
- const ::grpc::RpcMethod rpcmethod_MethodA4_;
+ const ::grpc::internal::RpcMethod rpcmethod_MethodA1_;
+ const ::grpc::internal::RpcMethod rpcmethod_MethodA2_;
+ const ::grpc::internal::RpcMethod rpcmethod_MethodA3_;
+ const ::grpc::internal::RpcMethod rpcmethod_MethodA4_;
};
static std::unique_ptr<Stub> NewStub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options = ::grpc::StubOptions());
@@ -352,7 +351,7 @@ class ServiceA final {
public:
WithStreamedUnaryMethod_MethodA1() {
::grpc::Service::MarkMethodStreamed(0,
- new ::grpc::StreamedUnaryHandler< ::grpc::testing::Request, ::grpc::testing::Response>(std::bind(&WithStreamedUnaryMethod_MethodA1<BaseClass>::StreamedMethodA1, this, std::placeholders::_1, std::placeholders::_2)));
+ new ::grpc::internal::StreamedUnaryHandler< ::grpc::testing::Request, ::grpc::testing::Response>(std::bind(&WithStreamedUnaryMethod_MethodA1<BaseClass>::StreamedMethodA1, this, std::placeholders::_1, std::placeholders::_2)));
}
~WithStreamedUnaryMethod_MethodA1() override {
BaseClassMustBeDerivedFromService(this);
@@ -373,7 +372,7 @@ class ServiceA final {
public:
WithSplitStreamingMethod_MethodA3() {
::grpc::Service::MarkMethodStreamed(2,
- new ::grpc::SplitServerStreamingHandler< ::grpc::testing::Request, ::grpc::testing::Response>(std::bind(&WithSplitStreamingMethod_MethodA3<BaseClass>::StreamedMethodA3, this, std::placeholders::_1, std::placeholders::_2)));
+ new ::grpc::internal::SplitServerStreamingHandler< ::grpc::testing::Request, ::grpc::testing::Response>(std::bind(&WithSplitStreamingMethod_MethodA3<BaseClass>::StreamedMethodA3, this, std::placeholders::_1, std::placeholders::_2)));
}
~WithSplitStreamingMethod_MethodA3() override {
BaseClassMustBeDerivedFromService(this);
@@ -427,7 +426,7 @@ class ServiceB final {
std::shared_ptr< ::grpc::ChannelInterface> channel_;
::grpc::ClientAsyncResponseReader< ::grpc::testing::Response>* AsyncMethodB1Raw(::grpc::ClientContext* context, const ::grpc::testing::Request& request, ::grpc::CompletionQueue* cq) override;
::grpc::ClientAsyncResponseReader< ::grpc::testing::Response>* PrepareAsyncMethodB1Raw(::grpc::ClientContext* context, const ::grpc::testing::Request& request, ::grpc::CompletionQueue* cq) override;
- const ::grpc::RpcMethod rpcmethod_MethodB1_;
+ const ::grpc::internal::RpcMethod rpcmethod_MethodB1_;
};
static std::unique_ptr<Stub> NewStub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options = ::grpc::StubOptions());
@@ -484,7 +483,7 @@ class ServiceB final {
public:
WithStreamedUnaryMethod_MethodB1() {
::grpc::Service::MarkMethodStreamed(0,
- new ::grpc::StreamedUnaryHandler< ::grpc::testing::Request, ::grpc::testing::Response>(std::bind(&WithStreamedUnaryMethod_MethodB1<BaseClass>::StreamedMethodB1, this, std::placeholders::_1, std::placeholders::_2)));
+ new ::grpc::internal::StreamedUnaryHandler< ::grpc::testing::Request, ::grpc::testing::Response>(std::bind(&WithStreamedUnaryMethod_MethodB1<BaseClass>::StreamedMethodB1, this, std::placeholders::_1, std::placeholders::_2)));
}
~WithStreamedUnaryMethod_MethodB1() override {
BaseClassMustBeDerivedFromService(this);
diff --git a/test/cpp/codegen/golden_file_test.cc b/test/cpp/codegen/golden_file_test.cc
index 5df1b8a16b..14880982b5 100644
--- a/test/cpp/codegen/golden_file_test.cc
+++ b/test/cpp/codegen/golden_file_test.cc
@@ -58,7 +58,7 @@ TEST(GoldenMockFileTest, TestGeneratedMockFile) {
kMockGoldenFilePath);
}
-int main(int argc, char **argv) {
+int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
::google::ParseCommandLineFlags(&argc, &argv, true);
if (FLAGS_generated_file_path.empty()) {
diff --git a/test/cpp/codegen/proto_utils_test.cc b/test/cpp/codegen/proto_utils_test.cc
index fd05c90e9d..cc355bb24a 100644
--- a/test/cpp/codegen/proto_utils_test.cc
+++ b/test/cpp/codegen/proto_utils_test.cc
@@ -16,15 +16,16 @@
*
*/
+#include <grpc++/impl/codegen/grpc_library.h>
#include <grpc++/impl/codegen/proto_utils.h>
#include <grpc++/impl/grpc_library.h>
+#include <grpc/impl/codegen/byte_buffer.h>
+#include <grpc/slice.h>
#include <gtest/gtest.h>
namespace grpc {
namespace internal {
-static GrpcLibraryInitializer g_gli_initializer;
-
// Provide access to GrpcBufferWriter internals.
class GrpcBufferWriterPeer {
public:
@@ -44,35 +45,120 @@ class ProtoUtilsTest : public ::testing::Test {};
// GrpcBufferWriter Next()/Backup() invocations could result in a dangling
// pointer returned by Next() due to the interaction between grpc_slice inlining
// and GRPC_SLICE_START_PTR.
-TEST_F(ProtoUtilsTest, BackupNext) {
- // Ensure the GrpcBufferWriter internals are initialized.
- g_gli_initializer.summon();
-
+TEST_F(ProtoUtilsTest, TinyBackupThenNext) {
grpc_byte_buffer* bp;
- GrpcBufferWriter writer(&bp, 8192);
+ const int block_size = 1024;
+ GrpcBufferWriter writer(&bp, block_size, 8192);
GrpcBufferWriterPeer peer(&writer);
void* data;
int size;
// Allocate a slice.
ASSERT_TRUE(writer.Next(&data, &size));
- EXPECT_EQ(8192, size);
- // Return a single byte. Before the fix that this test acts as a regression
- // for, this would have resulted in an inlined backup slice.
+ EXPECT_EQ(block_size, size);
+ // Return a single byte.
writer.BackUp(1);
- EXPECT_TRUE(!peer.have_backup());
- // On the next allocation, the slice is non-inlined.
+ EXPECT_FALSE(peer.have_backup());
+ // On the next allocation, the returned slice is non-inlined.
ASSERT_TRUE(writer.Next(&data, &size));
- EXPECT_TRUE(peer.slice().refcount != NULL);
+ EXPECT_TRUE(peer.slice().refcount != nullptr);
+ EXPECT_EQ(block_size, size);
// Cleanup.
g_core_codegen_interface->grpc_byte_buffer_destroy(bp);
}
+namespace {
+
+// Set backup_size to 0 to indicate no backup is needed.
+void BufferWriterTest(int block_size, int total_size, int backup_size) {
+ grpc_byte_buffer* bp;
+ GrpcBufferWriter writer(&bp, block_size, total_size);
+
+ int written_size = 0;
+ void* data;
+ int size = 0;
+ bool backed_up_entire_slice = false;
+
+ while (written_size < total_size) {
+ EXPECT_TRUE(writer.Next(&data, &size));
+ EXPECT_GT(size, 0);
+ EXPECT_TRUE(data);
+ int write_size = size;
+ bool should_backup = false;
+ if (backup_size > 0 && size > backup_size) {
+ write_size = size - backup_size;
+ should_backup = true;
+ } else if (size == backup_size && !backed_up_entire_slice) {
+ // only backup entire slice once.
+ backed_up_entire_slice = true;
+ should_backup = true;
+ write_size = 0;
+ }
+ // May need a last backup.
+ if (write_size + written_size > total_size) {
+ write_size = total_size - written_size;
+ should_backup = true;
+ backup_size = size - write_size;
+ ASSERT_GT(backup_size, 0);
+ }
+ for (int i = 0; i < write_size; i++) {
+ ((uint8_t*)data)[i] = written_size % 128;
+ written_size++;
+ }
+ if (should_backup) {
+ writer.BackUp(backup_size);
+ }
+ }
+ EXPECT_EQ(grpc_byte_buffer_length(bp), (size_t)total_size);
+
+ grpc_byte_buffer_reader reader;
+ grpc_byte_buffer_reader_init(&reader, bp);
+ int read_bytes = 0;
+ while (read_bytes < total_size) {
+ grpc_slice s;
+ EXPECT_TRUE(grpc_byte_buffer_reader_next(&reader, &s));
+ for (size_t i = 0; i < GRPC_SLICE_LENGTH(s); i++) {
+ EXPECT_EQ(GRPC_SLICE_START_PTR(s)[i], read_bytes % 128);
+ read_bytes++;
+ }
+ grpc_slice_unref(s);
+ }
+ EXPECT_EQ(read_bytes, total_size);
+ grpc_byte_buffer_reader_destroy(&reader);
+ grpc_byte_buffer_destroy(bp);
+}
+
+TEST(WriterTest, TinyBlockTinyBackup) {
+ for (int i = 2; i < (int)GRPC_SLICE_INLINED_SIZE; i++) {
+ BufferWriterTest(i, 256, 1);
+ }
+}
+
+TEST(WriterTest, SmallBlockTinyBackup) { BufferWriterTest(64, 256, 1); }
+
+TEST(WriterTest, SmallBlockNoBackup) { BufferWriterTest(64, 256, 0); }
+
+TEST(WriterTest, SmallBlockFullBackup) { BufferWriterTest(64, 256, 64); }
+
+TEST(WriterTest, LargeBlockTinyBackup) { BufferWriterTest(4096, 8192, 1); }
+
+TEST(WriterTest, LargeBlockNoBackup) { BufferWriterTest(4096, 8192, 0); }
+
+TEST(WriterTest, LargeBlockFullBackup) { BufferWriterTest(4096, 8192, 4096); }
+
+TEST(WriterTest, LargeBlockLargeBackup) { BufferWriterTest(4096, 8192, 4095); }
+
+} // namespace
} // namespace internal
} // namespace grpc
int main(int argc, char** argv) {
+ // Ensure the GrpcBufferWriter internals are initialized.
+ grpc::internal::GrpcLibraryInitializer init;
+ init.summon();
+ grpc::GrpcLibraryCodegen lib;
+
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
diff --git a/test/cpp/common/auth_property_iterator_test.cc b/test/cpp/common/auth_property_iterator_test.cc
index e25d2a7597..fce409aa2f 100644
--- a/test/cpp/common/auth_property_iterator_test.cc
+++ b/test/cpp/common/auth_property_iterator_test.cc
@@ -22,9 +22,7 @@
#include "src/cpp/common/secure_auth_context.h"
#include "test/cpp/util/string_ref_helper.h"
-extern "C" {
#include "src/core/lib/security/context/security_context.h"
-}
using ::grpc::testing::ToString;
@@ -42,7 +40,7 @@ class TestAuthPropertyIterator : public AuthPropertyIterator {
class AuthPropertyIteratorTest : public ::testing::Test {
protected:
void SetUp() override {
- ctx_ = grpc_auth_context_create(NULL);
+ ctx_ = grpc_auth_context_create(nullptr);
grpc_auth_context_add_cstring_property(ctx_, "name", "chapi");
grpc_auth_context_add_cstring_property(ctx_, "name", "chapo");
grpc_auth_context_add_cstring_property(ctx_, "foo", "bar");
diff --git a/test/cpp/common/channel_arguments_test.cc b/test/cpp/common/channel_arguments_test.cc
index cfe64f11b1..d6ed2e5aa2 100644
--- a/test/cpp/common/channel_arguments_test.cc
+++ b/test/cpp/common/channel_arguments_test.cc
@@ -23,10 +23,8 @@
#include <grpc/support/useful.h>
#include <gtest/gtest.h>
-extern "C" {
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/socket_mutator.h"
-}
namespace grpc {
namespace testing {
@@ -72,7 +70,7 @@ grpc_socket_mutator_vtable test_mutator_vtable = {
TestSocketMutator::TestSocketMutator() {
grpc_socket_mutator_init(this, &test_mutator_vtable);
}
-}
+} // namespace
class ChannelArgumentsTest : public ::testing::Test {
protected:
diff --git a/test/cpp/common/secure_auth_context_test.cc b/test/cpp/common/secure_auth_context_test.cc
index 91c7a3b5df..7a0530c20a 100644
--- a/test/cpp/common/secure_auth_context_test.cc
+++ b/test/cpp/common/secure_auth_context_test.cc
@@ -22,9 +22,7 @@
#include <gtest/gtest.h>
#include "test/cpp/util/string_ref_helper.h"
-extern "C" {
#include "src/core/lib/security/context/security_context.h"
-}
using grpc::testing::ToString;
@@ -44,7 +42,7 @@ TEST_F(SecureAuthContextTest, EmptyContext) {
}
TEST_F(SecureAuthContextTest, Properties) {
- grpc_auth_context* ctx = grpc_auth_context_create(NULL);
+ grpc_auth_context* ctx = grpc_auth_context_create(nullptr);
SecureAuthContext context(ctx, true);
context.AddProperty("name", "chapi");
context.AddProperty("name", "chapo");
@@ -62,7 +60,7 @@ TEST_F(SecureAuthContextTest, Properties) {
}
TEST_F(SecureAuthContextTest, Iterators) {
- grpc_auth_context* ctx = grpc_auth_context_create(NULL);
+ grpc_auth_context* ctx = grpc_auth_context_create(nullptr);
SecureAuthContext context(ctx, true);
context.AddProperty("name", "chapi");
context.AddProperty("name", "chapo");
diff --git a/test/cpp/end2end/async_end2end_test.cc b/test/cpp/end2end/async_end2end_test.cc
index 2a33e8ae11..1ea087e706 100644
--- a/test/cpp/end2end/async_end2end_test.cc
+++ b/test/cpp/end2end/async_end2end_test.cc
@@ -28,12 +28,14 @@
#include <grpc++/server_builder.h>
#include <grpc++/server_context.h>
#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/thd.h>
#include <grpc/support/time.h>
#include <grpc/support/tls.h>
#include "src/core/lib/iomgr/port.h"
+#include "src/core/lib/support/env.h"
#include "src/proto/grpc/health/v1/health.grpc.pb.h"
#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
@@ -99,7 +101,7 @@ class PollingOverrider {
class Verifier {
public:
- explicit Verifier(bool spin) : spin_(spin) {}
+ explicit Verifier(bool spin) : spin_(spin), lambda_run_(false) {}
// Expect sets the expected ok value for a specific tag
Verifier& Expect(int i, bool expect_ok) {
return ExpectUnless(i, expect_ok, false);
@@ -142,6 +144,18 @@ class Verifier {
return detag(got_tag);
}
+ template <typename T>
+ CompletionQueue::NextStatus DoOnceThenAsyncNext(
+ CompletionQueue* cq, void** got_tag, bool* ok, T deadline,
+ std::function<void(void)> lambda) {
+ if (lambda_run_) {
+ return cq->AsyncNext(got_tag, ok, deadline);
+ } else {
+ lambda_run_ = true;
+ return cq->DoThenAsyncNext(lambda, got_tag, ok, deadline);
+ }
+ }
+
// Verify keeps calling Next until all currently set
// expected tags are complete
void Verify(CompletionQueue* cq) { Verify(cq, false); }
@@ -154,6 +168,7 @@ class Verifier {
Next(cq, ignore_ok);
}
}
+
// This version of Verify stops after a certain deadline
void Verify(CompletionQueue* cq,
std::chrono::system_clock::time_point deadline) {
@@ -193,6 +208,47 @@ class Verifier {
}
}
+ // This version of Verify stops after a certain deadline, and uses the
+ // DoThenAsyncNext API
+ // to call the lambda
+ void Verify(CompletionQueue* cq,
+ std::chrono::system_clock::time_point deadline,
+ std::function<void(void)> lambda) {
+ if (expectations_.empty()) {
+ bool ok;
+ void* got_tag;
+ if (spin_) {
+ while (std::chrono::system_clock::now() < deadline) {
+ EXPECT_EQ(DoOnceThenAsyncNext(cq, &got_tag, &ok, deadline, lambda),
+ CompletionQueue::TIMEOUT);
+ }
+ } else {
+ EXPECT_EQ(DoOnceThenAsyncNext(cq, &got_tag, &ok, deadline, lambda),
+ CompletionQueue::TIMEOUT);
+ }
+ } else {
+ while (!expectations_.empty()) {
+ bool ok;
+ void* got_tag;
+ if (spin_) {
+ for (;;) {
+ GPR_ASSERT(std::chrono::system_clock::now() < deadline);
+ auto r = DoOnceThenAsyncNext(
+ cq, &got_tag, &ok, gpr_time_0(GPR_CLOCK_REALTIME), lambda);
+ if (r == CompletionQueue::TIMEOUT) continue;
+ if (r == CompletionQueue::GOT_EVENT) break;
+ gpr_log(GPR_ERROR, "unexpected result from AsyncNext");
+ abort();
+ }
+ } else {
+ EXPECT_EQ(DoOnceThenAsyncNext(cq, &got_tag, &ok, deadline, lambda),
+ CompletionQueue::GOT_EVENT);
+ }
+ GotTag(got_tag, ok, false);
+ }
+ }
+ }
+
private:
void GotTag(void* got_tag, bool ok, bool ignore_ok) {
auto it = expectations_.find(got_tag);
@@ -226,6 +282,7 @@ class Verifier {
std::map<void*, bool> expectations_;
std::map<void*, MaybeExpect> maybe_expectations_;
bool spin_;
+ bool lambda_run_;
};
bool plugin_has_sync_methods(std::unique_ptr<ServerBuilderPlugin>& plugin) {
@@ -401,9 +458,18 @@ TEST_P(AsyncEnd2endTest, SequentialRpcs) {
}
TEST_P(AsyncEnd2endTest, ReconnectChannel) {
+ // GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS is set to 100ms in main()
if (GetParam().inproc) {
return;
}
+ int poller_slowdown_factor = 1;
+ // It needs 2 pollset_works to reconnect the channel with polling engine
+ // "poll"
+ char* s = gpr_getenv("GRPC_POLL_STRATEGY");
+ if (s != nullptr && 0 == strcmp(s, "poll")) {
+ poller_slowdown_factor = 2;
+ }
+ gpr_free(s);
ResetStub();
SendRpc(1);
server_->Shutdown();
@@ -413,10 +479,13 @@ TEST_P(AsyncEnd2endTest, ReconnectChannel) {
while (cq_->Next(&ignored_tag, &ignored_ok))
;
BuildAndStartServer();
- // It needs more than kConnectivityCheckIntervalMsec time to reconnect the
- // channel.
- gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_millis(1600, GPR_TIMESPAN)));
+ // It needs more than GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS time to
+ // reconnect the channel.
+ gpr_sleep_until(gpr_time_add(
+ gpr_now(GPR_CLOCK_REALTIME),
+ gpr_time_from_millis(
+ 300 * poller_slowdown_factor * grpc_test_slowdown_factor(),
+ GPR_TIMESPAN)));
SendRpc(1);
}
@@ -490,6 +559,60 @@ TEST_P(AsyncEnd2endTest, AsyncNextRpc) {
EXPECT_TRUE(recv_status.ok());
}
+// Test a simple RPC using the async version of Next
+TEST_P(AsyncEnd2endTest, DoThenAsyncNextRpc) {
+ ResetStub();
+
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
+
+ send_request.set_message(GetParam().message_content);
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
+ stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
+
+ std::chrono::system_clock::time_point time_now(
+ std::chrono::system_clock::now());
+ std::chrono::system_clock::time_point time_limit(
+ std::chrono::system_clock::now() + std::chrono::seconds(10));
+ Verifier(GetParam().disable_blocking).Verify(cq_.get(), time_now);
+ Verifier(GetParam().disable_blocking).Verify(cq_.get(), time_now);
+
+ auto resp_writer_ptr = &response_writer;
+ auto lambda_2 = [&, this, resp_writer_ptr]() {
+ gpr_log(GPR_ERROR, "CALLED");
+ service_->RequestEcho(&srv_ctx, &recv_request, resp_writer_ptr, cq_.get(),
+ cq_.get(), tag(2));
+ };
+
+ Verifier(GetParam().disable_blocking)
+ .Expect(2, true)
+ .Verify(cq_.get(), time_limit, lambda_2);
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ auto recv_resp_ptr = &recv_response;
+ auto status_ptr = &recv_status;
+ send_response.set_message(recv_request.message());
+ auto lambda_3 = [&, this, resp_writer_ptr, send_response]() {
+ resp_writer_ptr->Finish(send_response, Status::OK, tag(3));
+ };
+ response_reader->Finish(recv_resp_ptr, status_ptr, tag(4));
+ Verifier(GetParam().disable_blocking)
+ .Expect(3, true)
+ .Expect(4, true)
+ .Verify(cq_.get(), std::chrono::system_clock::time_point::max(),
+ lambda_3);
+
+ EXPECT_EQ(send_response.message(), recv_response.message());
+ EXPECT_TRUE(recv_status.ok());
+}
+
// Two pings and a final pong.
TEST_P(AsyncEnd2endTest, SimpleClientStreaming) {
ResetStub();
@@ -1890,6 +2013,9 @@ INSTANTIATE_TEST_CASE_P(AsyncEnd2endServerTryCancel,
} // namespace grpc
int main(int argc, char** argv) {
+ // Change the backup poll interval from 5s to 100ms to speed up the
+ // ReconnectChannel test
+ gpr_setenv("GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS", "100");
grpc_test_init(argc, argv);
gpr_tls_init(&g_is_async_end2end_test);
::testing::InitGoogleTest(&argc, argv);
diff --git a/test/cpp/end2end/client_crash_test.cc b/test/cpp/end2end/client_crash_test.cc
index 4d2304feca..f34b27511b 100644
--- a/test/cpp/end2end/client_crash_test.cc
+++ b/test/cpp/end2end/client_crash_test.cc
@@ -56,7 +56,8 @@ class CrashTest : public ::testing::Test {
addr_stream << "localhost:" << port;
auto addr = addr_stream.str();
server_.reset(new SubProcess({
- g_root + "/client_crash_test_server", "--address=" + addr,
+ g_root + "/client_crash_test_server",
+ "--address=" + addr,
}));
GPR_ASSERT(server_);
return grpc::testing::EchoTestService::NewStub(
diff --git a/test/cpp/end2end/client_crash_test_server.cc b/test/cpp/end2end/client_crash_test_server.cc
index 01dcd40f9a..887504d308 100644
--- a/test/cpp/end2end/client_crash_test_server.cc
+++ b/test/cpp/end2end/client_crash_test_server.cc
@@ -68,8 +68,8 @@ void RunServer() {
std::cout << "Server listening on " << FLAGS_address << std::endl;
server->Wait();
}
-}
-}
+} // namespace testing
+} // namespace grpc
int main(int argc, char** argv) {
ParseCommandLineFlags(&argc, &argv, true);
diff --git a/test/cpp/end2end/client_lb_end2end_test.cc b/test/cpp/end2end/client_lb_end2end_test.cc
index c236f76e89..f8bb12fde1 100644
--- a/test/cpp/end2end/client_lb_end2end_test.cc
+++ b/test/cpp/end2end/client_lb_end2end_test.cc
@@ -33,10 +33,9 @@
#include <grpc/support/thd.h>
#include <grpc/support/time.h>
-extern "C" {
#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
#include "src/core/ext/filters/client_channel/subchannel_index.h"
-}
+#include "src/core/lib/support/env.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/core/util/port.h"
@@ -86,7 +85,11 @@ class MyTestServiceImpl : public TestServiceImpl {
class ClientLbEnd2endTest : public ::testing::Test {
protected:
ClientLbEnd2endTest()
- : server_host_("localhost"), kRequestMessage_("Live long and prosper.") {}
+ : server_host_("localhost"), kRequestMessage_("Live long and prosper.") {
+ // Make the backup poller poll very frequently in order to pick up
+ // updates from all the subchannels's FDs.
+ gpr_setenv("GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS", "1");
+ }
void SetUp() override {
response_generator_ = grpc_fake_resolver_response_generator_create();
@@ -110,22 +113,23 @@ class ClientLbEnd2endTest : public ::testing::Test {
void SetNextResolution(const std::vector<int>& ports) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_lb_addresses* addresses = grpc_lb_addresses_create(ports.size(), NULL);
+ grpc_lb_addresses* addresses =
+ grpc_lb_addresses_create(ports.size(), nullptr);
for (size_t i = 0; i < ports.size(); ++i) {
char* lb_uri_str;
gpr_asprintf(&lb_uri_str, "ipv4:127.0.0.1:%d", ports[i]);
grpc_uri* lb_uri = grpc_uri_parse(&exec_ctx, lb_uri_str, true);
- GPR_ASSERT(lb_uri != NULL);
+ GPR_ASSERT(lb_uri != nullptr);
grpc_lb_addresses_set_address_from_uri(addresses, i, lb_uri,
false /* is balancer */,
- "" /* balancer name */, NULL);
+ "" /* balancer name */, nullptr);
grpc_uri_destroy(lb_uri);
gpr_free(lb_uri_str);
}
const grpc_arg fake_addresses =
grpc_lb_addresses_create_channel_arg(addresses);
grpc_channel_args* fake_result =
- grpc_channel_args_copy_and_add(NULL, &fake_addresses, 1);
+ grpc_channel_args_copy_and_add(nullptr, &fake_addresses, 1);
grpc_fake_resolver_response_generator_set_response(
&exec_ctx, response_generator_, fake_result);
grpc_channel_args_destroy(&exec_ctx, fake_result);
@@ -305,7 +309,7 @@ TEST_F(ClientLbEnd2endTest, PickFirstUpdates) {
ports.clear();
SetNextResolution(ports);
gpr_log(GPR_INFO, "****** SET none *******");
- grpc_connectivity_state channel_state = GRPC_CHANNEL_INIT;
+ grpc_connectivity_state channel_state;
do {
channel_state = channel_->GetState(true /* try to connect */);
} while (channel_state == GRPC_CHANNEL_READY);
@@ -481,7 +485,7 @@ TEST_F(ClientLbEnd2endTest, RoundRobinUpdates) {
// An empty update will result in the channel going into TRANSIENT_FAILURE.
ports.clear();
SetNextResolution(ports);
- grpc_connectivity_state channel_state = GRPC_CHANNEL_INIT;
+ grpc_connectivity_state channel_state;
do {
channel_state = channel_->GetState(true /* try to connect */);
} while (channel_state == GRPC_CHANNEL_READY);
diff --git a/test/cpp/end2end/end2end_test.cc b/test/cpp/end2end/end2end_test.cc
index 1aa547d4e3..c71034bbe8 100644
--- a/test/cpp/end2end/end2end_test.cc
+++ b/test/cpp/end2end/end2end_test.cc
@@ -30,11 +30,13 @@
#include <grpc++/server_builder.h>
#include <grpc++/server_context.h>
#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/thd.h>
#include <grpc/support/time.h>
#include "src/core/lib/security/credentials/credentials.h"
+#include "src/core/lib/support/env.h"
#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/core/util/port.h"
@@ -704,13 +706,25 @@ TEST_P(End2endTest, ReconnectChannel) {
if (GetParam().inproc) {
return;
}
+ gpr_setenv("GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS", "200");
+ int poller_slowdown_factor = 1;
+ // It needs 2 pollset_works to reconnect the channel with polling engine
+ // "poll"
+ char* s = gpr_getenv("GRPC_POLL_STRATEGY");
+ if (s != nullptr && 0 == strcmp(s, "poll")) {
+ poller_slowdown_factor = 2;
+ }
+ gpr_free(s);
ResetStub();
SendRpc(stub_.get(), 1, false);
RestartServer(std::shared_ptr<AuthMetadataProcessor>());
- // It needs more than kConnectivityCheckIntervalMsec time to reconnect the
- // channel.
- gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_millis(1600, GPR_TIMESPAN)));
+ // It needs more than GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS time to
+ // reconnect the channel.
+ gpr_sleep_until(gpr_time_add(
+ gpr_now(GPR_CLOCK_REALTIME),
+ gpr_time_from_millis(
+ 300 * poller_slowdown_factor * grpc_test_slowdown_factor(),
+ GPR_TIMESPAN)));
SendRpc(stub_.get(), 1, false);
}
@@ -1134,7 +1148,7 @@ TEST_P(End2endTest, ChannelState) {
CompletionQueue cq;
std::chrono::system_clock::time_point deadline =
std::chrono::system_clock::now() + std::chrono::milliseconds(10);
- channel_->NotifyOnStateChange(GRPC_CHANNEL_IDLE, deadline, &cq, NULL);
+ channel_->NotifyOnStateChange(GRPC_CHANNEL_IDLE, deadline, &cq, nullptr);
void* tag;
bool ok = true;
cq.Next(&tag, &ok);
diff --git a/test/cpp/end2end/generic_end2end_test.cc b/test/cpp/end2end/generic_end2end_test.cc
index 9450182302..40949e8f3a 100644
--- a/test/cpp/end2end/generic_end2end_test.cc
+++ b/test/cpp/end2end/generic_end2end_test.cc
@@ -216,9 +216,10 @@ TEST_F(GenericEnd2endTest, SequentialUnaryRpcs) {
std::unique_ptr<ByteBuffer> cli_send_buffer =
SerializeToByteBuffer(&send_request);
+ // Use the same cq as server so that events can be polled in time.
std::unique_ptr<GenericClientAsyncResponseReader> call =
generic_stub_->PrepareUnaryCall(&cli_ctx, kMethodName,
- *cli_send_buffer.get(), &cli_cq_);
+ *cli_send_buffer.get(), srv_cq_.get());
call->StartCall();
ByteBuffer cli_recv_buffer;
call->Finish(&cli_recv_buffer, &recv_status, tag(1));
@@ -226,7 +227,7 @@ TEST_F(GenericEnd2endTest, SequentialUnaryRpcs) {
generic_service_.RequestCall(&srv_ctx, &stream, srv_cq_.get(),
srv_cq_.get(), tag(4));
- verify_ok(srv_cq_.get(), 4, true);
+ server_ok(4);
EXPECT_EQ(server_host_, srv_ctx.host().substr(0, server_host_.length()));
EXPECT_EQ(kMethodName, srv_ctx.method());
@@ -245,7 +246,7 @@ TEST_F(GenericEnd2endTest, SequentialUnaryRpcs) {
stream.Finish(Status::OK, tag(7));
server_ok(7);
- client_ok(1);
+ verify_ok(srv_cq_.get(), 1, true);
EXPECT_TRUE(ParseFromByteBuffer(&cli_recv_buffer, &recv_response));
EXPECT_EQ(send_response.message(), recv_response.message());
EXPECT_TRUE(recv_status.ok());
@@ -321,8 +322,9 @@ TEST_F(GenericEnd2endTest, SimpleBidiStreaming) {
TEST_F(GenericEnd2endTest, Deadline) {
ResetStub();
- SendRpc(1, true, gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_time_from_seconds(10, GPR_TIMESPAN)));
+ SendRpc(1, true,
+ gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
+ gpr_time_from_seconds(10, GPR_TIMESPAN)));
}
} // namespace
diff --git a/test/cpp/end2end/grpclb_end2end_test.cc b/test/cpp/end2end/grpclb_end2end_test.cc
index f73a9c1791..c15ab88da1 100644
--- a/test/cpp/end2end/grpclb_end2end_test.cc
+++ b/test/cpp/end2end/grpclb_end2end_test.cc
@@ -33,10 +33,9 @@
#include <grpc/support/thd.h>
#include <grpc/support/time.h>
-extern "C" {
#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
#include "src/core/lib/iomgr/sockaddr.h"
-}
+#include "src/core/lib/support/env.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
@@ -332,8 +331,11 @@ class GrpclbEnd2endTest : public ::testing::Test {
num_backends_(num_backends),
num_balancers_(num_balancers),
client_load_reporting_interval_seconds_(
- client_load_reporting_interval_seconds),
- kRequestMessage_("Live long and prosper.") {}
+ client_load_reporting_interval_seconds) {
+ // Make the backup poller poll very frequently in order to pick up
+ // updates from all the subchannels's FDs.
+ gpr_setenv("GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS", "1");
+ }
void SetUp() override {
response_generator_ = grpc_fake_resolver_response_generator_create();
@@ -559,7 +561,6 @@ class GrpclbEnd2endTest : public ::testing::Test {
std::unique_ptr<std::thread> thread_;
};
- const grpc::string kMessage_ = "Live long and prosper.";
const grpc::string server_host_;
const size_t num_backends_;
const size_t num_balancers_;
@@ -571,7 +572,7 @@ class GrpclbEnd2endTest : public ::testing::Test {
std::vector<ServerThread<BackendService>> backend_servers_;
std::vector<ServerThread<BalancerService>> balancer_servers_;
grpc_fake_resolver_response_generator* response_generator_;
- const grpc::string kRequestMessage_;
+ const grpc::string kRequestMessage_ = "Live long and prosper.";
};
class SingleBalancerTest : public GrpclbEnd2endTest {
@@ -658,8 +659,9 @@ TEST_F(SingleBalancerTest, Fallback) {
// Send non-empty serverlist only after kServerlistDelayMs.
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(kNumBackendInResolution /* start_index */), {}),
+ 0,
+ BalancerServiceImpl::BuildResponseForBackends(
+ GetBackendPorts(kNumBackendInResolution /* start_index */), {}),
kServerlistDelayMs);
// Wait until all the fallback backends are reachable.
@@ -724,10 +726,11 @@ TEST_F(SingleBalancerTest, FallbackUpdate) {
// Send non-empty serverlist only after kServerlistDelayMs.
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(kNumBackendInResolution +
- kNumBackendInResolutionUpdate /* start_index */),
- {}),
+ 0,
+ BalancerServiceImpl::BuildResponseForBackends(
+ GetBackendPorts(kNumBackendInResolution +
+ kNumBackendInResolutionUpdate /* start_index */),
+ {}),
kServerlistDelayMs);
// Wait until all the fallback backends are reachable.
@@ -1068,10 +1071,11 @@ TEST_F(SingleBalancerTest, Drop) {
num_of_drop_by_load_balancing_addresses;
const int num_total_addresses = num_backends_ + num_of_drop_addresses;
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(),
- {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
- {"load_balancing", num_of_drop_by_load_balancing_addresses}}),
+ 0,
+ BalancerServiceImpl::BuildResponseForBackends(
+ GetBackendPorts(),
+ {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
+ {"load_balancing", num_of_drop_by_load_balancing_addresses}}),
0);
// Wait until all backends are ready.
WaitForAllBackends();
@@ -1086,7 +1090,7 @@ TEST_F(SingleBalancerTest, Drop) {
} else {
EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
<< " message=" << status.error_message();
- EXPECT_EQ(response.message(), kMessage_);
+ EXPECT_EQ(response.message(), kRequestMessage_);
}
}
EXPECT_EQ(kNumRpcsPerAddress * num_of_drop_addresses, num_drops);
@@ -1107,9 +1111,10 @@ TEST_F(SingleBalancerTest, DropAllFirst) {
const int num_of_drop_by_rate_limiting_addresses = 1;
const int num_of_drop_by_load_balancing_addresses = 1;
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(
- {}, {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
- {"load_balancing", num_of_drop_by_load_balancing_addresses}}),
+ 0,
+ BalancerServiceImpl::BuildResponseForBackends(
+ {}, {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
+ {"load_balancing", num_of_drop_by_load_balancing_addresses}}),
0);
const Status status = SendRpc();
EXPECT_FALSE(status.ok());
@@ -1123,9 +1128,10 @@ TEST_F(SingleBalancerTest, DropAll) {
const int num_of_drop_by_rate_limiting_addresses = 1;
const int num_of_drop_by_load_balancing_addresses = 1;
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(
- {}, {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
- {"load_balancing", num_of_drop_by_load_balancing_addresses}}),
+ 0,
+ BalancerServiceImpl::BuildResponseForBackends(
+ {}, {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
+ {"load_balancing", num_of_drop_by_load_balancing_addresses}}),
1000);
// First call succeeds.
@@ -1187,10 +1193,11 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, Drop) {
num_of_drop_by_load_balancing_addresses;
const int num_total_addresses = num_backends_ + num_of_drop_addresses;
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(),
- {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
- {"load_balancing", num_of_drop_by_load_balancing_addresses}}),
+ 0,
+ BalancerServiceImpl::BuildResponseForBackends(
+ GetBackendPorts(),
+ {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
+ {"load_balancing", num_of_drop_by_load_balancing_addresses}}),
0);
// Wait until all backends are ready.
int num_warmup_ok = 0;
@@ -1210,7 +1217,7 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, Drop) {
} else {
EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
<< " message=" << status.error_message();
- EXPECT_EQ(response.message(), kMessage_);
+ EXPECT_EQ(response.message(), kRequestMessage_);
}
}
EXPECT_EQ(kNumRpcsPerAddress * num_of_drop_addresses, num_drops);
diff --git a/test/cpp/end2end/mock_test.cc b/test/cpp/end2end/mock_test.cc
index 0b63c25055..61f4111e3b 100644
--- a/test/cpp/end2end/mock_test.cc
+++ b/test/cpp/end2end/mock_test.cc
@@ -44,19 +44,19 @@
#include <iostream>
using namespace std;
+using ::testing::AtLeast;
+using ::testing::DoAll;
+using ::testing::Invoke;
+using ::testing::Return;
+using ::testing::SaveArg;
+using ::testing::SetArgPointee;
+using ::testing::WithArg;
+using ::testing::_;
using grpc::testing::EchoRequest;
using grpc::testing::EchoResponse;
using grpc::testing::EchoTestService;
using grpc::testing::MockClientReaderWriter;
using std::chrono::system_clock;
-using ::testing::AtLeast;
-using ::testing::SetArgPointee;
-using ::testing::SaveArg;
-using ::testing::_;
-using ::testing::Return;
-using ::testing::Invoke;
-using ::testing::WithArg;
-using ::testing::DoAll;
namespace grpc {
namespace testing {
diff --git a/test/cpp/end2end/thread_stress_test.cc b/test/cpp/end2end/thread_stress_test.cc
index f990a7ed9d..90b2eddbbb 100644
--- a/test/cpp/end2end/thread_stress_test.cc
+++ b/test/cpp/end2end/thread_stress_test.cc
@@ -50,23 +50,6 @@ const int kNumRpcs = 1000; // Number of RPCs per thread
namespace grpc {
namespace testing {
-namespace {
-
-// When echo_deadline is requested, deadline seen in the ServerContext is set in
-// the response in seconds.
-void MaybeEchoDeadline(ServerContext* context, const EchoRequest* request,
- EchoResponse* response) {
- if (request->has_param() && request->param().echo_deadline()) {
- gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
- if (context->deadline() != system_clock::time_point::max()) {
- Timepoint2Timespec(context->deadline(), &deadline);
- }
- response->mutable_param()->set_request_deadline(deadline.tv_sec);
- }
-}
-
-} // namespace
-
class TestServiceImpl : public ::grpc::testing::EchoTestService::Service {
public:
TestServiceImpl() : signal_client_(false) {}
@@ -74,29 +57,6 @@ class TestServiceImpl : public ::grpc::testing::EchoTestService::Service {
Status Echo(ServerContext* context, const EchoRequest* request,
EchoResponse* response) override {
response->set_message(request->message());
- MaybeEchoDeadline(context, request, response);
- if (request->has_param() && request->param().client_cancel_after_us()) {
- {
- std::unique_lock<std::mutex> lock(mu_);
- signal_client_ = true;
- }
- while (!context->IsCancelled()) {
- gpr_sleep_until(gpr_time_add(
- gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_micros(request->param().client_cancel_after_us(),
- GPR_TIMESPAN)));
- }
- return Status::CANCELLED;
- } else if (request->has_param() &&
- request->param().server_cancel_after_us()) {
- gpr_sleep_until(gpr_time_add(
- gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_micros(request->param().server_cancel_after_us(),
- GPR_TIMESPAN)));
- return Status::CANCELLED;
- } else {
- EXPECT_FALSE(context->IsCancelled());
- }
return Status::OK;
}
diff --git a/test/cpp/grpclb/grpclb_api_test.cc b/test/cpp/grpclb/grpclb_api_test.cc
index 6b0350e1f9..7b62080b49 100644
--- a/test/cpp/grpclb/grpclb_api_test.cc
+++ b/test/cpp/grpclb/grpclb_api_test.cc
@@ -48,7 +48,7 @@ grpc::string PackedStringToIp(const grpc_grpclb_ip_address& pb_ip) {
} else {
abort();
}
- GPR_ASSERT(inet_ntop(af, (void*)pb_ip.bytes, ip_str, 46) != NULL);
+ GPR_ASSERT(inet_ntop(af, (void*)pb_ip.bytes, ip_str, 46) != nullptr);
return ip_str;
}
diff --git a/test/cpp/grpclb/grpclb_test.cc b/test/cpp/grpclb/grpclb_test.cc
index e740ea513a..ca846c72fd 100644
--- a/test/cpp/grpclb/grpclb_test.cc
+++ b/test/cpp/grpclb/grpclb_test.cc
@@ -35,13 +35,14 @@
#include <grpc/support/time.h>
#include <grpc++/impl/codegen/config.h>
-extern "C" {
+
#include "src/core/ext/filters/client_channel/client_channel.h"
#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/security/credentials/fake/fake_credentials.h"
+#include "src/core/lib/support/env.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/support/tmpfile.h"
#include "src/core/lib/surface/channel.h"
@@ -49,7 +50,6 @@ extern "C" {
#include "test/core/end2end/cq_verifier.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
-}
#include "src/proto/grpc/lb/v1/load_balancer.pb.h"
@@ -87,19 +87,19 @@ namespace grpc {
namespace {
typedef struct client_fixture {
- grpc_channel *client;
- char *server_uri;
- grpc_completion_queue *cq;
+ grpc_channel* client;
+ char* server_uri;
+ grpc_completion_queue* cq;
} client_fixture;
typedef struct server_fixture {
- grpc_server *server;
- grpc_call *server_call;
- grpc_completion_queue *cq;
- char *servers_hostport;
- const char *balancer_name;
+ grpc_server* server;
+ grpc_call* server_call;
+ grpc_completion_queue* cq;
+ char* servers_hostport;
+ const char* balancer_name;
int port;
- const char *lb_token_prefix;
+ const char* lb_token_prefix;
gpr_thd_id tid;
int num_calls_serviced;
} server_fixture;
@@ -111,12 +111,12 @@ typedef struct test_fixture {
int lb_server_update_delay_ms;
} test_fixture;
-static void *tag(intptr_t t) { return (void *)t; }
+static void* tag(intptr_t t) { return (void*)t; }
static grpc_slice build_response_payload_slice(
- const char *host, int *ports, size_t nports,
+ const char* host, int* ports, size_t nports,
int64_t expiration_interval_secs, int32_t expiration_interval_nanos,
- const char *token_prefix) {
+ const char* token_prefix) {
// server_list {
// servers {
// ip_address: <in_addr/6 bytes of an IP>
@@ -126,10 +126,10 @@ static grpc_slice build_response_payload_slice(
// ...
// }
grpc::lb::v1::LoadBalanceResponse response;
- auto *serverlist = response.mutable_server_list();
+ auto* serverlist = response.mutable_server_list();
if (expiration_interval_secs > 0 || expiration_interval_nanos > 0) {
- auto *expiration_interval = serverlist->mutable_expiration_interval();
+ auto* expiration_interval = serverlist->mutable_expiration_interval();
if (expiration_interval_secs > 0) {
expiration_interval->set_seconds(expiration_interval_secs);
}
@@ -138,12 +138,12 @@ static grpc_slice build_response_payload_slice(
}
}
for (size_t i = 0; i < nports; i++) {
- auto *server = serverlist->add_servers();
+ auto* server = serverlist->add_servers();
// TODO(dgq): test ipv6
struct in_addr ip4;
GPR_ASSERT(inet_pton(AF_INET, host, &ip4) == 1);
server->set_ip_address(
- string(reinterpret_cast<const char *>(&ip4), sizeof(ip4)));
+ string(reinterpret_cast<const char*>(&ip4), sizeof(ip4)));
server->set_port(ports[i]);
// Missing tokens are acceptable. Test that path.
if (strlen(token_prefix) > 0) {
@@ -151,15 +151,15 @@ static grpc_slice build_response_payload_slice(
server->set_load_balance_token(token_data);
}
}
- const string &enc_resp = response.SerializeAsString();
+ const string& enc_resp = response.SerializeAsString();
return grpc_slice_from_copied_buffer(enc_resp.data(), enc_resp.size());
}
-static void drain_cq(grpc_completion_queue *cq) {
+static void drain_cq(grpc_completion_queue* cq) {
grpc_event ev;
do {
ev = grpc_completion_queue_next(cq, grpc_timeout_seconds_to_deadline(5),
- NULL);
+ nullptr);
} while (ev.type != GRPC_QUEUE_SHUTDOWN);
}
@@ -168,18 +168,18 @@ static void sleep_ms(int delay_ms) {
gpr_time_from_millis(delay_ms, GPR_TIMESPAN)));
}
-static void start_lb_server(server_fixture *sf, int *ports, size_t nports,
+static void start_lb_server(server_fixture* sf, int* ports, size_t nports,
int update_delay_ms) {
- grpc_call *s;
- cq_verifier *cqv = cq_verifier_create(sf->cq);
+ grpc_call* s;
+ cq_verifier* cqv = cq_verifier_create(sf->cq);
grpc_op ops[6];
- grpc_op *op;
+ grpc_op* op;
grpc_metadata_array request_metadata_recv;
grpc_call_details call_details;
grpc_call_error error;
int was_cancelled = 2;
- grpc_byte_buffer *request_payload_recv;
- grpc_byte_buffer *response_payload;
+ grpc_byte_buffer* request_payload_recv;
+ grpc_byte_buffer* response_payload;
memset(ops, 0, sizeof(ops));
grpc_metadata_array_init(&request_metadata_recv);
@@ -198,16 +198,16 @@ static void start_lb_server(server_fixture *sf, int *ports, size_t nports,
// make sure we've received the initial metadata from the grpclb request.
GPR_ASSERT(request_metadata_recv.count > 0);
- GPR_ASSERT(request_metadata_recv.metadata != NULL);
+ GPR_ASSERT(request_metadata_recv.metadata != nullptr);
// receive request for backends
op = ops;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message.recv_message = &request_payload_recv;
op->flags = 0;
- op->reserved = NULL;
+ op->reserved = nullptr;
op++;
- error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(202), NULL);
+ error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(202), nullptr);
GPR_ASSERT(GRPC_CALL_OK == error);
CQ_EXPECT_COMPLETION(cqv, tag(202), 1);
cq_verify(cqv);
@@ -232,14 +232,14 @@ static void start_lb_server(server_fixture *sf, int *ports, size_t nports,
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
- op->reserved = NULL;
+ op->reserved = nullptr;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
- op->reserved = NULL;
+ op->reserved = nullptr;
op++;
- error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(201), NULL);
+ error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(201), nullptr);
GPR_ASSERT(GRPC_CALL_OK == error);
gpr_log(GPR_INFO, "LB Server[%s](%s) after tag 201", sf->servers_hostport,
sf->balancer_name);
@@ -262,9 +262,10 @@ static void start_lb_server(server_fixture *sf, int *ports, size_t nports,
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message.send_message = response_payload;
op->flags = 0;
- op->reserved = NULL;
+ op->reserved = nullptr;
op++;
- error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(203), NULL);
+ error =
+ grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(203), nullptr);
GPR_ASSERT(GRPC_CALL_OK == error);
CQ_EXPECT_COMPLETION(cqv, tag(203), 1);
cq_verify(cqv);
@@ -284,9 +285,9 @@ static void start_lb_server(server_fixture *sf, int *ports, size_t nports,
grpc_slice status_details = grpc_slice_from_static_string("xyz");
op->data.send_status_from_server.status_details = &status_details;
op->flags = 0;
- op->reserved = NULL;
+ op->reserved = nullptr;
op++;
- error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(204), NULL);
+ error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(204), nullptr);
GPR_ASSERT(GRPC_CALL_OK == error);
CQ_EXPECT_COMPLETION(cqv, tag(201), 1);
@@ -303,17 +304,17 @@ static void start_lb_server(server_fixture *sf, int *ports, size_t nports,
grpc_call_details_destroy(&call_details);
}
-static void start_backend_server(server_fixture *sf) {
- grpc_call *s;
- cq_verifier *cqv;
+static void start_backend_server(server_fixture* sf) {
+ grpc_call* s;
+ cq_verifier* cqv;
grpc_op ops[6];
- grpc_op *op;
+ grpc_op* op;
grpc_metadata_array request_metadata_recv;
grpc_call_details call_details;
grpc_call_error error;
int was_cancelled;
- grpc_byte_buffer *request_payload_recv;
- grpc_byte_buffer *response_payload;
+ grpc_byte_buffer* request_payload_recv;
+ grpc_byte_buffer* response_payload;
grpc_event ev;
while (true) {
@@ -328,8 +329,8 @@ static void start_backend_server(server_fixture *sf) {
tag(100));
GPR_ASSERT(GRPC_CALL_OK == error);
gpr_log(GPR_INFO, "Server[%s] up", sf->servers_hostport);
- ev = grpc_completion_queue_next(sf->cq,
- grpc_timeout_seconds_to_deadline(60), NULL);
+ ev = grpc_completion_queue_next(
+ sf->cq, grpc_timeout_seconds_to_deadline(60), nullptr);
if (!ev.success) {
gpr_log(GPR_INFO, "Server[%s] being torn down", sf->servers_hostport);
cq_verifier_destroy(cqv);
@@ -339,8 +340,9 @@ static void start_backend_server(server_fixture *sf) {
}
GPR_ASSERT(ev.type == GRPC_OP_COMPLETE);
const string expected_token =
- strlen(sf->lb_token_prefix) == 0 ? "" : sf->lb_token_prefix +
- std::to_string(sf->port);
+ strlen(sf->lb_token_prefix) == 0
+ ? ""
+ : sf->lb_token_prefix + std::to_string(sf->port);
GPR_ASSERT(contains_metadata(&request_metadata_recv, "lb-token",
expected_token.c_str()));
@@ -350,14 +352,15 @@ static void start_backend_server(server_fixture *sf) {
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
- op->reserved = NULL;
+ op->reserved = nullptr;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
- op->reserved = NULL;
+ op->reserved = nullptr;
op++;
- error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(101), NULL);
+ error =
+ grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(101), nullptr);
GPR_ASSERT(GRPC_CALL_OK == error);
gpr_log(GPR_INFO, "Server[%s] after tag 101", sf->servers_hostport);
@@ -368,15 +371,16 @@ static void start_backend_server(server_fixture *sf) {
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message.recv_message = &request_payload_recv;
op->flags = 0;
- op->reserved = NULL;
+ op->reserved = nullptr;
op++;
- error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
+ error =
+ grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), nullptr);
GPR_ASSERT(GRPC_CALL_OK == error);
ev = grpc_completion_queue_next(
- sf->cq, grpc_timeout_seconds_to_deadline(3), NULL);
+ sf->cq, grpc_timeout_seconds_to_deadline(3), nullptr);
if (ev.type == GRPC_OP_COMPLETE && ev.success) {
GPR_ASSERT(ev.tag = tag(102));
- if (request_payload_recv == NULL) {
+ if (request_payload_recv == nullptr) {
exit = true;
gpr_log(GPR_INFO,
"Server[%s] recv \"close\" from client, exiting. Call #%d",
@@ -397,13 +401,13 @@ static void start_backend_server(server_fixture *sf) {
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message.send_message = response_payload;
op->flags = 0;
- op->reserved = NULL;
+ op->reserved = nullptr;
op++;
- error =
- grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(103), NULL);
+ error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(103),
+ nullptr);
GPR_ASSERT(GRPC_CALL_OK == error);
ev = grpc_completion_queue_next(
- sf->cq, grpc_timeout_seconds_to_deadline(3), NULL);
+ sf->cq, grpc_timeout_seconds_to_deadline(3), nullptr);
if (ev.type == GRPC_OP_COMPLETE && ev.success) {
GPR_ASSERT(ev.tag = tag(103));
} else {
@@ -431,9 +435,10 @@ static void start_backend_server(server_fixture *sf) {
grpc_slice_from_static_string("Backend server out a-ok");
op->data.send_status_from_server.status_details = &status_details;
op->flags = 0;
- op->reserved = NULL;
+ op->reserved = nullptr;
op++;
- error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(104), NULL);
+ error =
+ grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(104), nullptr);
GPR_ASSERT(GRPC_CALL_OK == error);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
@@ -449,18 +454,18 @@ static void start_backend_server(server_fixture *sf) {
}
}
-static void perform_request(client_fixture *cf) {
- grpc_call *c;
- cq_verifier *cqv = cq_verifier_create(cf->cq);
+static void perform_request(client_fixture* cf) {
+ grpc_call* c;
+ cq_verifier* cqv = cq_verifier_create(cf->cq);
grpc_op ops[6];
- grpc_op *op;
+ grpc_op* op;
grpc_metadata_array initial_metadata_recv;
grpc_metadata_array trailing_metadata_recv;
grpc_status_code status;
grpc_call_error error;
grpc_slice details;
- grpc_byte_buffer *request_payload;
- grpc_byte_buffer *response_payload_recv;
+ grpc_byte_buffer* request_payload;
+ grpc_byte_buffer* response_payload_recv;
int i;
memset(ops, 0, sizeof(ops));
@@ -468,13 +473,13 @@ static void perform_request(client_fixture *cf) {
grpc_slice_from_copied_string("hello world");
grpc_slice host = grpc_slice_from_static_string("foo.test.google.fr:1234");
- c = grpc_channel_create_call(cf->client, NULL, GRPC_PROPAGATE_DEFAULTS,
+ c = grpc_channel_create_call(cf->client, nullptr, GRPC_PROPAGATE_DEFAULTS,
cf->cq, grpc_slice_from_static_string("/foo"),
&host, grpc_timeout_seconds_to_deadline(5),
- NULL);
+ nullptr);
gpr_log(GPR_INFO, "Call 0x%" PRIxPTR " created", (intptr_t)c);
GPR_ASSERT(c);
- char *peer;
+ char* peer;
grpc_metadata_array_init(&initial_metadata_recv);
grpc_metadata_array_init(&trailing_metadata_recv);
@@ -483,21 +488,21 @@ static void perform_request(client_fixture *cf) {
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
- op->reserved = NULL;
+ op->reserved = nullptr;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
- op->reserved = NULL;
+ op->reserved = nullptr;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->flags = 0;
- op->reserved = NULL;
+ op->reserved = nullptr;
op++;
- error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), NULL);
+ error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), nullptr);
GPR_ASSERT(GRPC_CALL_OK == error);
for (i = 0; i < 4; i++) {
@@ -507,14 +512,14 @@ static void perform_request(client_fixture *cf) {
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message.send_message = request_payload;
op->flags = 0;
- op->reserved = NULL;
+ op->reserved = nullptr;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message.recv_message = &response_payload_recv;
op->flags = 0;
- op->reserved = NULL;
+ op->reserved = nullptr;
op++;
- error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(2), NULL);
+ error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(2), nullptr);
GPR_ASSERT(GRPC_CALL_OK == error);
CQ_EXPECT_COMPLETION(cqv, tag(2), 1);
@@ -531,9 +536,9 @@ static void perform_request(client_fixture *cf) {
op = ops;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
- op->reserved = NULL;
+ op->reserved = nullptr;
op++;
- error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(3), NULL);
+ error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(3), nullptr);
GPR_ASSERT(GRPC_CALL_OK == error);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
@@ -555,47 +560,47 @@ static void perform_request(client_fixture *cf) {
}
#define BALANCERS_NAME "lb.name"
-static void setup_client(const server_fixture *lb_server,
- const server_fixture *backends, client_fixture *cf) {
+static void setup_client(const server_fixture* lb_server,
+ const server_fixture* backends, client_fixture* cf) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- char *expected_target_names = NULL;
- const char *backends_name = lb_server->servers_hostport;
+ char* expected_target_names = nullptr;
+ const char* backends_name = lb_server->servers_hostport;
gpr_asprintf(&expected_target_names, "%s;%s", backends_name, BALANCERS_NAME);
- grpc_fake_resolver_response_generator *response_generator =
+ grpc_fake_resolver_response_generator* response_generator =
grpc_fake_resolver_response_generator_create();
- grpc_lb_addresses *addresses = grpc_lb_addresses_create(1, NULL);
- char *lb_uri_str;
+ grpc_lb_addresses* addresses = grpc_lb_addresses_create(1, nullptr);
+ char* lb_uri_str;
gpr_asprintf(&lb_uri_str, "ipv4:%s", lb_server->servers_hostport);
- grpc_uri *lb_uri = grpc_uri_parse(&exec_ctx, lb_uri_str, true);
- GPR_ASSERT(lb_uri != NULL);
+ grpc_uri* lb_uri = grpc_uri_parse(&exec_ctx, lb_uri_str, true);
+ GPR_ASSERT(lb_uri != nullptr);
grpc_lb_addresses_set_address_from_uri(addresses, 0, lb_uri, true,
- lb_server->balancer_name, NULL);
+ lb_server->balancer_name, nullptr);
grpc_uri_destroy(lb_uri);
gpr_free(lb_uri_str);
gpr_asprintf(&cf->server_uri, "fake:///%s", lb_server->servers_hostport);
const grpc_arg fake_addresses =
grpc_lb_addresses_create_channel_arg(addresses);
- grpc_channel_args *fake_result =
- grpc_channel_args_copy_and_add(NULL, &fake_addresses, 1);
+ grpc_channel_args* fake_result =
+ grpc_channel_args_copy_and_add(nullptr, &fake_addresses, 1);
grpc_lb_addresses_destroy(&exec_ctx, addresses);
const grpc_arg new_args[] = {
grpc_fake_transport_expected_targets_arg(expected_target_names),
grpc_fake_resolver_response_generator_arg(response_generator)};
- grpc_channel_args *args =
- grpc_channel_args_copy_and_add(NULL, new_args, GPR_ARRAY_SIZE(new_args));
+ grpc_channel_args* args = grpc_channel_args_copy_and_add(
+ nullptr, new_args, GPR_ARRAY_SIZE(new_args));
gpr_free(expected_target_names);
- cf->cq = grpc_completion_queue_create_for_next(NULL);
- grpc_channel_credentials *fake_creds =
+ cf->cq = grpc_completion_queue_create_for_next(nullptr);
+ grpc_channel_credentials* fake_creds =
grpc_fake_transport_security_credentials_create();
cf->client =
- grpc_secure_channel_create(fake_creds, cf->server_uri, args, NULL);
+ grpc_secure_channel_create(fake_creds, cf->server_uri, args, nullptr);
grpc_fake_resolver_response_generator_set_response(
&exec_ctx, response_generator, fake_result);
grpc_channel_args_destroy(&exec_ctx, fake_result);
@@ -605,23 +610,23 @@ static void setup_client(const server_fixture *lb_server,
grpc_exec_ctx_finish(&exec_ctx);
}
-static void teardown_client(client_fixture *cf) {
+static void teardown_client(client_fixture* cf) {
grpc_completion_queue_shutdown(cf->cq);
drain_cq(cf->cq);
grpc_completion_queue_destroy(cf->cq);
- cf->cq = NULL;
+ cf->cq = nullptr;
grpc_channel_destroy(cf->client);
- cf->client = NULL;
+ cf->client = nullptr;
gpr_free(cf->server_uri);
}
-static void setup_server(const char *host, server_fixture *sf) {
+static void setup_server(const char* host, server_fixture* sf) {
int assigned_port;
- sf->cq = grpc_completion_queue_create_for_next(NULL);
- const char *colon_idx = strchr(host, ':');
+ sf->cq = grpc_completion_queue_create_for_next(nullptr);
+ const char* colon_idx = strchr(host, ':');
if (colon_idx) {
- const char *port_str = colon_idx + 1;
+ const char* port_str = colon_idx + 1;
sf->port = atoi(port_str);
sf->servers_hostport = gpr_strdup(host);
} else {
@@ -629,11 +634,11 @@ static void setup_server(const char *host, server_fixture *sf) {
gpr_join_host_port(&sf->servers_hostport, host, sf->port);
}
- grpc_server_credentials *server_creds =
+ grpc_server_credentials* server_creds =
grpc_fake_transport_security_server_credentials_create();
- sf->server = grpc_server_create(NULL, NULL);
- grpc_server_register_completion_queue(sf->server, sf->cq, NULL);
+ sf->server = grpc_server_create(nullptr, nullptr);
+ grpc_server_register_completion_queue(sf->server, sf->cq, nullptr);
GPR_ASSERT((assigned_port = grpc_server_add_secure_http2_port(
sf->server, sf->servers_hostport, server_creds)) > 0);
grpc_server_credentials_release(server_creds);
@@ -641,23 +646,23 @@ static void setup_server(const char *host, server_fixture *sf) {
grpc_server_start(sf->server);
}
-static void teardown_server(server_fixture *sf) {
+static void teardown_server(server_fixture* sf) {
if (!sf->server) return;
gpr_log(GPR_INFO, "Server[%s] shutting down", sf->servers_hostport);
- grpc_completion_queue *shutdown_cq =
- grpc_completion_queue_create_for_pluck(NULL);
+ grpc_completion_queue* shutdown_cq =
+ grpc_completion_queue_create_for_pluck(nullptr);
grpc_server_shutdown_and_notify(sf->server, shutdown_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(shutdown_cq, tag(1000),
grpc_timeout_seconds_to_deadline(5),
- NULL)
+ nullptr)
.type == GRPC_OP_COMPLETE);
grpc_completion_queue_destroy(shutdown_cq);
grpc_server_destroy(sf->server);
gpr_thd_join(sf->tid);
- sf->server = NULL;
+ sf->server = nullptr;
grpc_completion_queue_shutdown(sf->cq);
drain_cq(sf->cq);
grpc_completion_queue_destroy(sf->cq);
@@ -666,13 +671,13 @@ static void teardown_server(server_fixture *sf) {
gpr_free(sf->servers_hostport);
}
-static void fork_backend_server(void *arg) {
- server_fixture *sf = static_cast<server_fixture *>(arg);
+static void fork_backend_server(void* arg) {
+ server_fixture* sf = static_cast<server_fixture*>(arg);
start_backend_server(sf);
}
-static void fork_lb_server(void *arg) {
- test_fixture *tf = static_cast<test_fixture *>(arg);
+static void fork_lb_server(void* arg) {
+ test_fixture* tf = static_cast<test_fixture*>(arg);
int ports[NUM_BACKENDS];
for (int i = 0; i < NUM_BACKENDS; i++) {
ports[i] = tf->lb_backends[i].port;
@@ -710,7 +715,7 @@ static test_fixture setup_test_fixture(int lb_server_update_delay_ms) {
return tf;
}
-static void teardown_test_fixture(test_fixture *tf) {
+static void teardown_test_fixture(test_fixture* tf) {
teardown_client(&tf->client);
for (int i = 0; i < NUM_BACKENDS; ++i) {
teardown_server(&tf->lb_backends[i]);
@@ -787,9 +792,12 @@ TEST(GrpclbTest, InvalidAddressInServerlist) {}
} // namespace
} // namespace grpc
-int main(int argc, char **argv) {
+int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
grpc_test_init(argc, argv);
+ // Make the backup poller poll very frequently in order to pick up
+ // updates from all the subchannels's FDs.
+ gpr_setenv("GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS", "1");
grpc_init();
const auto result = RUN_ALL_TESTS();
grpc_shutdown();
diff --git a/test/cpp/interop/http2_client.cc b/test/cpp/interop/http2_client.cc
index 1e04e57d09..2de7abcf17 100644
--- a/test/cpp/interop/http2_client.cc
+++ b/test/cpp/interop/http2_client.cc
@@ -217,7 +217,7 @@ int main(int argc, char** argv) {
"goaway", "max_streams", "ping",
"rst_after_data", "rst_after_header", "rst_during_data"};
char* joined_testcases =
- gpr_strjoin_sep(testcases, GPR_ARRAY_SIZE(testcases), "\n", NULL);
+ gpr_strjoin_sep(testcases, GPR_ARRAY_SIZE(testcases), "\n", nullptr);
gpr_log(GPR_ERROR, "Unsupported test case %s. Valid options are\n%s",
FLAGS_test_case.c_str(), joined_testcases);
diff --git a/test/cpp/interop/interop_server.cc b/test/cpp/interop/interop_server.cc
index 4149724b1e..a24cdc7d2d 100644
--- a/test/cpp/interop/interop_server.cc
+++ b/test/cpp/interop/interop_server.cc
@@ -51,8 +51,9 @@ using grpc::ServerCredentials;
using grpc::ServerReader;
using grpc::ServerReaderWriter;
using grpc::ServerWriter;
-using grpc::WriteOptions;
using grpc::SslServerCredentialsOptions;
+using grpc::Status;
+using grpc::WriteOptions;
using grpc::testing::InteropServerContextInspector;
using grpc::testing::Payload;
using grpc::testing::SimpleRequest;
@@ -62,7 +63,6 @@ using grpc::testing::StreamingInputCallResponse;
using grpc::testing::StreamingOutputCallRequest;
using grpc::testing::StreamingOutputCallResponse;
using grpc::testing::TestService;
-using grpc::Status;
const char kEchoInitialMetadataKey[] = "x-grpc-test-echo-initial";
const char kEchoTrailingBinMetadataKey[] = "x-grpc-test-echo-trailing-bin";
diff --git a/test/cpp/interop/interop_test.cc b/test/cpp/interop/interop_test.cc
index c18fe1ec1f..1bf0d8d10f 100644
--- a/test/cpp/interop/interop_test.cc
+++ b/test/cpp/interop/interop_test.cc
@@ -37,10 +37,8 @@
#include "test/core/util/port.h"
#include "test/cpp/util/test_config.h"
-extern "C" {
#include "src/core/lib/iomgr/socket_utils_posix.h"
#include "src/core/lib/support/string.h"
-}
DEFINE_string(extra_server_flags, "", "Extra flags to pass to server.");
diff --git a/test/cpp/interop/reconnect_interop_server.cc b/test/cpp/interop/reconnect_interop_server.cc
index 50af8fcc8a..5e257e1b38 100644
--- a/test/cpp/interop/reconnect_interop_server.cc
+++ b/test/cpp/interop/reconnect_interop_server.cc
@@ -53,9 +53,9 @@ using grpc::ServerWriter;
using grpc::SslServerCredentialsOptions;
using grpc::Status;
using grpc::testing::Empty;
-using grpc::testing::ReconnectService;
using grpc::testing::ReconnectInfo;
using grpc::testing::ReconnectParams;
+using grpc::testing::ReconnectService;
static bool got_sigint = false;
diff --git a/test/cpp/interop/stress_test.cc b/test/cpp/interop/stress_test.cc
index c6d3600be8..028ff11b20 100644
--- a/test/cpp/interop/stress_test.cc
+++ b/test/cpp/interop/stress_test.cc
@@ -106,13 +106,13 @@ DEFINE_bool(use_test_ca, false, "False to use SSL roots for google");
DEFINE_string(server_host_override, "foo.test.google.fr",
"Override the server host which is sent in HTTP header");
-using grpc::testing::kTestCaseList;
using grpc::testing::MetricsService;
using grpc::testing::MetricsServiceImpl;
using grpc::testing::StressTestInteropClient;
using grpc::testing::TestCaseType;
using grpc::testing::UNKNOWN_TEST;
using grpc::testing::WeightedRandomTestSelector;
+using grpc::testing::kTestCaseList;
static int log_level = GPR_LOG_SEVERITY_DEBUG;
@@ -230,7 +230,7 @@ int main(int argc, char** argv) {
log_level = FLAGS_log_level;
gpr_set_log_function(TestLogFunction);
- srand(time(NULL));
+ srand(time(nullptr));
// Parse the server addresses
std::vector<grpc::string> server_addresses;
diff --git a/test/cpp/microbenchmarks/bm_arena.cc b/test/cpp/microbenchmarks/bm_arena.cc
index 165b74670d..5b7c611919 100644
--- a/test/cpp/microbenchmarks/bm_arena.cc
+++ b/test/cpp/microbenchmarks/bm_arena.cc
@@ -18,9 +18,7 @@
/* Benchmark arenas */
-extern "C" {
#include "src/core/lib/support/arena.h"
-}
#include "test/cpp/microbenchmarks/helpers.h"
#include "third_party/benchmark/include/benchmark/benchmark.h"
diff --git a/test/cpp/microbenchmarks/bm_call_create.cc b/test/cpp/microbenchmarks/bm_call_create.cc
index cf9a42e8c6..a45c577320 100644
--- a/test/cpp/microbenchmarks/bm_call_create.cc
+++ b/test/cpp/microbenchmarks/bm_call_create.cc
@@ -29,7 +29,6 @@
#include <grpc/support/alloc.h>
#include <grpc/support/string_util.h>
-extern "C" {
#include "src/core/ext/filters/client_channel/client_channel.h"
#include "src/core/ext/filters/deadline/deadline_filter.h"
#include "src/core/ext/filters/http/client/http_client_filter.h"
@@ -43,15 +42,14 @@ extern "C" {
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/transport/transport_impl.h"
-}
#include "src/cpp/client/create_channel_internal.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/cpp/microbenchmarks/helpers.h"
-auto &force_library_initialization = Library::get();
+auto& force_library_initialization = Library::get();
-void BM_Zalloc(benchmark::State &state) {
+void BM_Zalloc(benchmark::State& state) {
// speed of light for call creation is zalloc, so benchmark a few interesting
// sizes
TrackCounters track_counters;
@@ -80,20 +78,20 @@ BENCHMARK(BM_Zalloc)
class BaseChannelFixture {
public:
- BaseChannelFixture(grpc_channel *channel) : channel_(channel) {}
+ BaseChannelFixture(grpc_channel* channel) : channel_(channel) {}
~BaseChannelFixture() { grpc_channel_destroy(channel_); }
- grpc_channel *channel() const { return channel_; }
+ grpc_channel* channel() const { return channel_; }
private:
- grpc_channel *const channel_;
+ grpc_channel* const channel_;
};
class InsecureChannel : public BaseChannelFixture {
public:
InsecureChannel()
: BaseChannelFixture(
- grpc_insecure_channel_create("localhost:1234", NULL, NULL)) {}
+ grpc_insecure_channel_create("localhost:1234", nullptr, nullptr)) {}
};
class LameChannel : public BaseChannelFixture {
@@ -104,17 +102,17 @@ class LameChannel : public BaseChannelFixture {
};
template <class Fixture>
-static void BM_CallCreateDestroy(benchmark::State &state) {
+static void BM_CallCreateDestroy(benchmark::State& state) {
TrackCounters track_counters;
Fixture fixture;
- grpc_completion_queue *cq = grpc_completion_queue_create_for_next(NULL);
+ grpc_completion_queue* cq = grpc_completion_queue_create_for_next(nullptr);
gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
- void *method_hdl =
- grpc_channel_register_call(fixture.channel(), "/foo/bar", NULL, NULL);
+ void* method_hdl = grpc_channel_register_call(fixture.channel(), "/foo/bar",
+ nullptr, nullptr);
while (state.KeepRunning()) {
grpc_call_unref(grpc_channel_create_registered_call(
- fixture.channel(), NULL, GRPC_PROPAGATE_DEFAULTS, cq, method_hdl,
- deadline, NULL));
+ fixture.channel(), nullptr, GRPC_PROPAGATE_DEFAULTS, cq, method_hdl,
+ deadline, nullptr));
}
grpc_completion_queue_destroy(cq);
track_counters.Finish(state);
@@ -126,11 +124,11 @@ BENCHMARK_TEMPLATE(BM_CallCreateDestroy, LameChannel);
////////////////////////////////////////////////////////////////////////////////
// Benchmarks isolating individual filters
-static void *tag(int i) {
- return reinterpret_cast<void *>(static_cast<intptr_t>(i));
+static void* tag(int i) {
+ return reinterpret_cast<void*>(static_cast<intptr_t>(i));
}
-static void BM_LameChannelCallCreateCpp(benchmark::State &state) {
+static void BM_LameChannelCallCreateCpp(benchmark::State& state) {
TrackCounters track_counters;
auto stub =
grpc::testing::EchoTestService::NewStub(grpc::CreateChannelInternal(
@@ -145,7 +143,7 @@ static void BM_LameChannelCallCreateCpp(benchmark::State &state) {
grpc::ClientContext cli_ctx;
auto reader = stub->AsyncEcho(&cli_ctx, send_request, &cq);
reader->Finish(&recv_response, &recv_status, tag(0));
- void *t;
+ void* t;
bool ok;
GPR_ASSERT(cq.Next(&t, &ok));
GPR_ASSERT(ok);
@@ -154,16 +152,16 @@ static void BM_LameChannelCallCreateCpp(benchmark::State &state) {
}
BENCHMARK(BM_LameChannelCallCreateCpp);
-static void do_nothing(void *ignored) {}
+static void do_nothing(void* ignored) {}
-static void BM_LameChannelCallCreateCore(benchmark::State &state) {
+static void BM_LameChannelCallCreateCore(benchmark::State& state) {
TrackCounters track_counters;
- grpc_channel *channel;
- grpc_completion_queue *cq;
+ grpc_channel* channel;
+ grpc_completion_queue* cq;
grpc_metadata_array initial_metadata_recv;
grpc_metadata_array trailing_metadata_recv;
- grpc_byte_buffer *response_payload_recv = NULL;
+ grpc_byte_buffer* response_payload_recv = nullptr;
grpc_status_code status;
grpc_slice details;
grpc::testing::EchoRequest send_request;
@@ -172,23 +170,23 @@ static void BM_LameChannelCallCreateCore(benchmark::State &state) {
channel = grpc_lame_client_channel_create(
"localhost:1234", GRPC_STATUS_UNAUTHENTICATED, "blah");
- cq = grpc_completion_queue_create_for_next(NULL);
- void *rc = grpc_channel_register_call(
- channel, "/grpc.testing.EchoTestService/Echo", NULL, NULL);
+ cq = grpc_completion_queue_create_for_next(nullptr);
+ void* rc = grpc_channel_register_call(
+ channel, "/grpc.testing.EchoTestService/Echo", nullptr, nullptr);
while (state.KeepRunning()) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
- grpc_call *call = grpc_channel_create_registered_call(
- channel, NULL, GRPC_PROPAGATE_DEFAULTS, cq, rc,
- gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
+ grpc_call* call = grpc_channel_create_registered_call(
+ channel, nullptr, GRPC_PROPAGATE_DEFAULTS, cq, rc,
+ gpr_inf_future(GPR_CLOCK_REALTIME), nullptr);
grpc_metadata_array_init(&initial_metadata_recv);
grpc_metadata_array_init(&trailing_metadata_recv);
- grpc_byte_buffer *request_payload_send =
+ grpc_byte_buffer* request_payload_send =
grpc_raw_byte_buffer_create(&send_request_slice, 1);
// Fill in call ops
grpc_op ops[6];
memset(ops, 0, sizeof(ops));
- grpc_op *op = ops;
+ grpc_op* op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op++;
@@ -212,9 +210,9 @@ static void BM_LameChannelCallCreateCore(benchmark::State &state) {
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(call, ops,
(size_t)(op - ops),
- (void *)1, NULL));
+ (void*)1, nullptr));
grpc_event ev = grpc_completion_queue_next(
- cq, gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
+ cq, gpr_inf_future(GPR_CLOCK_REALTIME), nullptr);
GPR_ASSERT(ev.type != GRPC_QUEUE_SHUTDOWN);
GPR_ASSERT(ev.success != 0);
grpc_call_unref(call);
@@ -230,14 +228,14 @@ static void BM_LameChannelCallCreateCore(benchmark::State &state) {
}
BENCHMARK(BM_LameChannelCallCreateCore);
-static void BM_LameChannelCallCreateCoreSeparateBatch(benchmark::State &state) {
+static void BM_LameChannelCallCreateCoreSeparateBatch(benchmark::State& state) {
TrackCounters track_counters;
- grpc_channel *channel;
- grpc_completion_queue *cq;
+ grpc_channel* channel;
+ grpc_completion_queue* cq;
grpc_metadata_array initial_metadata_recv;
grpc_metadata_array trailing_metadata_recv;
- grpc_byte_buffer *response_payload_recv = NULL;
+ grpc_byte_buffer* response_payload_recv = nullptr;
grpc_status_code status;
grpc_slice details;
grpc::testing::EchoRequest send_request;
@@ -246,23 +244,23 @@ static void BM_LameChannelCallCreateCoreSeparateBatch(benchmark::State &state) {
channel = grpc_lame_client_channel_create(
"localhost:1234", GRPC_STATUS_UNAUTHENTICATED, "blah");
- cq = grpc_completion_queue_create_for_next(NULL);
- void *rc = grpc_channel_register_call(
- channel, "/grpc.testing.EchoTestService/Echo", NULL, NULL);
+ cq = grpc_completion_queue_create_for_next(nullptr);
+ void* rc = grpc_channel_register_call(
+ channel, "/grpc.testing.EchoTestService/Echo", nullptr, nullptr);
while (state.KeepRunning()) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
- grpc_call *call = grpc_channel_create_registered_call(
- channel, NULL, GRPC_PROPAGATE_DEFAULTS, cq, rc,
- gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
+ grpc_call* call = grpc_channel_create_registered_call(
+ channel, nullptr, GRPC_PROPAGATE_DEFAULTS, cq, rc,
+ gpr_inf_future(GPR_CLOCK_REALTIME), nullptr);
grpc_metadata_array_init(&initial_metadata_recv);
grpc_metadata_array_init(&trailing_metadata_recv);
- grpc_byte_buffer *request_payload_send =
+ grpc_byte_buffer* request_payload_send =
grpc_raw_byte_buffer_create(&send_request_slice, 1);
// Fill in call ops
grpc_op ops[3];
memset(ops, 0, sizeof(ops));
- grpc_op *op = ops;
+ grpc_op* op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op++;
@@ -273,7 +271,7 @@ static void BM_LameChannelCallCreateCoreSeparateBatch(benchmark::State &state) {
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(call, ops,
(size_t)(op - ops),
- (void *)0, NULL));
+ (void*)nullptr, nullptr));
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
@@ -291,13 +289,13 @@ static void BM_LameChannelCallCreateCoreSeparateBatch(benchmark::State &state) {
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(call, ops,
(size_t)(op - ops),
- (void *)1, NULL));
+ (void*)1, nullptr));
grpc_event ev = grpc_completion_queue_next(
- cq, gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
+ cq, gpr_inf_future(GPR_CLOCK_REALTIME), nullptr);
GPR_ASSERT(ev.type != GRPC_QUEUE_SHUTDOWN);
GPR_ASSERT(ev.success == 0);
ev = grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME),
- NULL);
+ nullptr);
GPR_ASSERT(ev.type != GRPC_QUEUE_SHUTDOWN);
GPR_ASSERT(ev.success != 0);
grpc_call_unref(call);
@@ -313,31 +311,31 @@ static void BM_LameChannelCallCreateCoreSeparateBatch(benchmark::State &state) {
}
BENCHMARK(BM_LameChannelCallCreateCoreSeparateBatch);
-static void FilterDestroy(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void FilterDestroy(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
gpr_free(arg);
}
-static void DoNothing(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {}
+static void DoNothing(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {}
class FakeClientChannelFactory : public grpc_client_channel_factory {
public:
FakeClientChannelFactory() { vtable = &vtable_; }
private:
- static void NoRef(grpc_client_channel_factory *factory) {}
- static void NoUnref(grpc_exec_ctx *exec_ctx,
- grpc_client_channel_factory *factory) {}
- static grpc_subchannel *CreateSubchannel(grpc_exec_ctx *exec_ctx,
- grpc_client_channel_factory *factory,
- const grpc_subchannel_args *args) {
+ static void NoRef(grpc_client_channel_factory* factory) {}
+ static void NoUnref(grpc_exec_ctx* exec_ctx,
+ grpc_client_channel_factory* factory) {}
+ static grpc_subchannel* CreateSubchannel(grpc_exec_ctx* exec_ctx,
+ grpc_client_channel_factory* factory,
+ const grpc_subchannel_args* args) {
return nullptr;
}
- static grpc_channel *CreateClientChannel(grpc_exec_ctx *exec_ctx,
- grpc_client_channel_factory *factory,
- const char *target,
+ static grpc_channel* CreateClientChannel(grpc_exec_ctx* exec_ctx,
+ grpc_client_channel_factory* factory,
+ const char* target,
grpc_client_channel_type type,
- const grpc_channel_args *args) {
+ const grpc_channel_args* args) {
return nullptr;
}
@@ -347,11 +345,11 @@ class FakeClientChannelFactory : public grpc_client_channel_factory {
const grpc_client_channel_factory_vtable FakeClientChannelFactory::vtable_ = {
NoRef, NoUnref, CreateSubchannel, CreateClientChannel};
-static grpc_arg StringArg(const char *key, const char *value) {
+static grpc_arg StringArg(const char* key, const char* value) {
grpc_arg a;
a.type = GRPC_ARG_STRING;
- a.key = const_cast<char *>(key);
- a.value.string = const_cast<char *>(value);
+ a.key = const_cast<char*>(key);
+ a.value.string = const_cast<char*>(value);
return a;
}
@@ -360,45 +358,45 @@ enum FixtureFlags : uint32_t {
REQUIRES_TRANSPORT = 2,
};
-template <const grpc_channel_filter *kFilter, uint32_t kFlags>
+template <const grpc_channel_filter* kFilter, uint32_t kFlags>
struct Fixture {
- const grpc_channel_filter *filter = kFilter;
+ const grpc_channel_filter* filter = kFilter;
const uint32_t flags = kFlags;
};
namespace dummy_filter {
-static void StartTransportStreamOp(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {}
+static void StartTransportStreamOp(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ grpc_transport_stream_op_batch* op) {}
-static void StartTransportOp(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_transport_op *op) {}
+static void StartTransportOp(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ grpc_transport_op* op) {}
-static grpc_error *InitCallElem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_element_args *args) {
+static grpc_error* InitCallElem(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ const grpc_call_element_args* args) {
return GRPC_ERROR_NONE;
}
-static void SetPollsetOrPollsetSet(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_polling_entity *pollent) {}
+static void SetPollsetOrPollsetSet(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ grpc_polling_entity* pollent) {}
-static void DestroyCallElem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const grpc_call_final_info *final_info,
- grpc_closure *then_sched_closure) {}
+static void DestroyCallElem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ const grpc_call_final_info* final_info,
+ grpc_closure* then_sched_closure) {}
-grpc_error *InitChannelElem(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
- grpc_channel_element_args *args) {
+grpc_error* InitChannelElem(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
+ grpc_channel_element_args* args) {
return GRPC_ERROR_NONE;
}
-void DestroyChannelElem(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem) {}
+void DestroyChannelElem(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem) {}
-void GetChannelInfo(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
- const grpc_channel_info *channel_info) {}
+void GetChannelInfo(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
+ const grpc_channel_info* channel_info) {}
static const grpc_channel_filter dummy_filter = {StartTransportStreamOp,
StartTransportOp,
@@ -421,42 +419,42 @@ namespace dummy_transport {
size_t sizeof_stream; /* = sizeof(transport stream) */
/* name of this transport implementation */
-const char *name;
+const char* name;
/* implementation of grpc_transport_init_stream */
-int InitStream(grpc_exec_ctx *exec_ctx, grpc_transport *self,
- grpc_stream *stream, grpc_stream_refcount *refcount,
- const void *server_data, gpr_arena *arena) {
+int InitStream(grpc_exec_ctx* exec_ctx, grpc_transport* self,
+ grpc_stream* stream, grpc_stream_refcount* refcount,
+ const void* server_data, gpr_arena* arena) {
return 0;
}
/* implementation of grpc_transport_set_pollset */
-void SetPollset(grpc_exec_ctx *exec_ctx, grpc_transport *self,
- grpc_stream *stream, grpc_pollset *pollset) {}
+void SetPollset(grpc_exec_ctx* exec_ctx, grpc_transport* self,
+ grpc_stream* stream, grpc_pollset* pollset) {}
/* implementation of grpc_transport_set_pollset */
-void SetPollsetSet(grpc_exec_ctx *exec_ctx, grpc_transport *self,
- grpc_stream *stream, grpc_pollset_set *pollset_set) {}
+void SetPollsetSet(grpc_exec_ctx* exec_ctx, grpc_transport* self,
+ grpc_stream* stream, grpc_pollset_set* pollset_set) {}
/* implementation of grpc_transport_perform_stream_op */
-void PerformStreamOp(grpc_exec_ctx *exec_ctx, grpc_transport *self,
- grpc_stream *stream, grpc_transport_stream_op_batch *op) {
+void PerformStreamOp(grpc_exec_ctx* exec_ctx, grpc_transport* self,
+ grpc_stream* stream, grpc_transport_stream_op_batch* op) {
GRPC_CLOSURE_SCHED(exec_ctx, op->on_complete, GRPC_ERROR_NONE);
}
/* implementation of grpc_transport_perform_op */
-void PerformOp(grpc_exec_ctx *exec_ctx, grpc_transport *self,
- grpc_transport_op *op) {}
+void PerformOp(grpc_exec_ctx* exec_ctx, grpc_transport* self,
+ grpc_transport_op* op) {}
/* implementation of grpc_transport_destroy_stream */
-void DestroyStream(grpc_exec_ctx *exec_ctx, grpc_transport *self,
- grpc_stream *stream, grpc_closure *then_sched_closure) {}
+void DestroyStream(grpc_exec_ctx* exec_ctx, grpc_transport* self,
+ grpc_stream* stream, grpc_closure* then_sched_closure) {}
/* implementation of grpc_transport_destroy */
-void Destroy(grpc_exec_ctx *exec_ctx, grpc_transport *self) {}
+void Destroy(grpc_exec_ctx* exec_ctx, grpc_transport* self) {}
/* implementation of grpc_transport_get_endpoint */
-grpc_endpoint *GetEndpoint(grpc_exec_ctx *exec_ctx, grpc_transport *self) {
+grpc_endpoint* GetEndpoint(grpc_exec_ctx* exec_ctx, grpc_transport* self) {
return nullptr;
}
@@ -474,8 +472,8 @@ class NoOp {
public:
class Op {
public:
- Op(grpc_exec_ctx *exec_ctx, NoOp *p, grpc_call_stack *s) {}
- void Finish(grpc_exec_ctx *exec_ctx) {}
+ Op(grpc_exec_ctx* exec_ctx, NoOp* p, grpc_call_stack* s) {}
+ void Finish(grpc_exec_ctx* exec_ctx) {}
};
};
@@ -491,11 +489,11 @@ class SendEmptyMetadata {
class Op {
public:
- Op(grpc_exec_ctx *exec_ctx, SendEmptyMetadata *p, grpc_call_stack *s) {
+ Op(grpc_exec_ctx* exec_ctx, SendEmptyMetadata* p, grpc_call_stack* s) {
grpc_metadata_batch_init(&batch_);
p->op_payload_.send_initial_metadata.send_initial_metadata = &batch_;
}
- void Finish(grpc_exec_ctx *exec_ctx) {
+ void Finish(grpc_exec_ctx* exec_ctx) {
grpc_metadata_batch_destroy(exec_ctx, &batch_);
}
@@ -516,7 +514,7 @@ class SendEmptyMetadata {
// Fixture<> template to specify this), and TestOp defines some unit of work to
// perform on said filter.
template <class Fixture, class TestOp>
-static void BM_IsolatedFilter(benchmark::State &state) {
+static void BM_IsolatedFilter(benchmark::State& state) {
TrackCounters track_counters;
Fixture fixture;
std::ostringstream label;
@@ -529,7 +527,7 @@ static void BM_IsolatedFilter(benchmark::State &state) {
grpc_channel_args channel_args = {args.size(), &args[0]};
- std::vector<const grpc_channel_filter *> filters;
+ std::vector<const grpc_channel_filter*> filters;
if (fixture.filter != nullptr) {
filters.push_back(fixture.filter);
}
@@ -540,9 +538,9 @@ static void BM_IsolatedFilter(benchmark::State &state) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
size_t channel_size = grpc_channel_stack_size(
- filters.size() == 0 ? NULL : &filters[0], filters.size());
- grpc_channel_stack *channel_stack =
- static_cast<grpc_channel_stack *>(gpr_zalloc(channel_size));
+ filters.size() == 0 ? nullptr : &filters[0], filters.size());
+ grpc_channel_stack* channel_stack =
+ static_cast<grpc_channel_stack*>(gpr_zalloc(channel_size));
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"channel_stack_init",
grpc_channel_stack_init(&exec_ctx, 1, FilterDestroy, channel_stack,
@@ -552,8 +550,8 @@ static void BM_IsolatedFilter(benchmark::State &state) {
: nullptr,
"CHANNEL", channel_stack)));
grpc_exec_ctx_flush(&exec_ctx);
- grpc_call_stack *call_stack = static_cast<grpc_call_stack *>(
- gpr_zalloc(channel_stack->call_stack_size));
+ grpc_call_stack* call_stack =
+ static_cast<grpc_call_stack*>(gpr_zalloc(channel_stack->call_stack_size));
grpc_millis deadline = GRPC_MILLIS_INF_FUTURE;
gpr_timespec start_time = gpr_now(GPR_CLOCK_MONOTONIC);
grpc_slice method = grpc_slice_from_static_string("/foo/bar");
@@ -561,8 +559,8 @@ static void BM_IsolatedFilter(benchmark::State &state) {
TestOp test_op_data;
grpc_call_element_args call_args;
call_args.call_stack = call_stack;
- call_args.server_transport_data = NULL;
- call_args.context = NULL;
+ call_args.server_transport_data = nullptr;
+ call_args.context = nullptr;
call_args.path = method;
call_args.start_time = start_time;
call_args.deadline = deadline;
@@ -571,9 +569,9 @@ static void BM_IsolatedFilter(benchmark::State &state) {
while (state.KeepRunning()) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
GRPC_ERROR_UNREF(grpc_call_stack_init(&exec_ctx, channel_stack, 1,
- DoNothing, NULL, &call_args));
+ DoNothing, nullptr, &call_args));
typename TestOp::Op op(&exec_ctx, &test_op_data, call_stack);
- grpc_call_stack_destroy(&exec_ctx, call_stack, &final_info, NULL);
+ grpc_call_stack_destroy(&exec_ctx, call_stack, &final_info, nullptr);
op.Finish(&exec_ctx);
grpc_exec_ctx_flush(&exec_ctx);
// recreate arena every 64k iterations to avoid oom
@@ -630,12 +628,14 @@ BENCHMARK_TEMPLATE(BM_IsolatedFilter, LoadReportingFilter, SendEmptyMetadata);
namespace isolated_call_filter {
-typedef struct { grpc_call_combiner *call_combiner; } call_data;
+typedef struct {
+ grpc_call_combiner* call_combiner;
+} call_data;
-static void StartTransportStreamOp(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
- call_data *calld = static_cast<call_data *>(elem->call_data);
+static void StartTransportStreamOp(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ grpc_transport_stream_op_batch* op) {
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (op->recv_initial_metadata) {
GRPC_CALL_COMBINER_START(
exec_ctx, calld->call_combiner,
@@ -650,42 +650,42 @@ static void StartTransportStreamOp(grpc_exec_ctx *exec_ctx,
GRPC_CLOSURE_SCHED(exec_ctx, op->on_complete, GRPC_ERROR_NONE);
}
-static void StartTransportOp(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_transport_op *op) {
+static void StartTransportOp(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ grpc_transport_op* op) {
if (op->disconnect_with_error != GRPC_ERROR_NONE) {
GRPC_ERROR_UNREF(op->disconnect_with_error);
}
GRPC_CLOSURE_SCHED(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
}
-static grpc_error *InitCallElem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_element_args *args) {
- call_data *calld = static_cast<call_data *>(elem->call_data);
+static grpc_error* InitCallElem(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ const grpc_call_element_args* args) {
+ call_data* calld = static_cast<call_data*>(elem->call_data);
calld->call_combiner = args->call_combiner;
return GRPC_ERROR_NONE;
}
-static void SetPollsetOrPollsetSet(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_polling_entity *pollent) {}
+static void SetPollsetOrPollsetSet(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ grpc_polling_entity* pollent) {}
-static void DestroyCallElem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const grpc_call_final_info *final_info,
- grpc_closure *then_sched_closure) {
+static void DestroyCallElem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ const grpc_call_final_info* final_info,
+ grpc_closure* then_sched_closure) {
GRPC_CLOSURE_SCHED(exec_ctx, then_sched_closure, GRPC_ERROR_NONE);
}
-grpc_error *InitChannelElem(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
- grpc_channel_element_args *args) {
+grpc_error* InitChannelElem(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
+ grpc_channel_element_args* args) {
return GRPC_ERROR_NONE;
}
-void DestroyChannelElem(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem) {}
+void DestroyChannelElem(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem) {}
-void GetChannelInfo(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
- const grpc_channel_info *channel_info) {}
+void GetChannelInfo(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
+ const grpc_channel_info* channel_info) {}
static const grpc_channel_filter isolated_call_filter = {
StartTransportStreamOp,
@@ -704,57 +704,58 @@ static const grpc_channel_filter isolated_call_filter = {
class IsolatedCallFixture : public TrackCounters {
public:
IsolatedCallFixture() {
- grpc_channel_stack_builder *builder = grpc_channel_stack_builder_create();
+ grpc_channel_stack_builder* builder = grpc_channel_stack_builder_create();
grpc_channel_stack_builder_set_name(builder, "dummy");
grpc_channel_stack_builder_set_target(builder, "dummy_target");
GPR_ASSERT(grpc_channel_stack_builder_append_filter(
- builder, &isolated_call_filter::isolated_call_filter, NULL, NULL));
+ builder, &isolated_call_filter::isolated_call_filter, nullptr,
+ nullptr));
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
channel_ = grpc_channel_create_with_builder(&exec_ctx, builder,
GRPC_CLIENT_CHANNEL);
grpc_exec_ctx_finish(&exec_ctx);
}
- cq_ = grpc_completion_queue_create_for_next(NULL);
+ cq_ = grpc_completion_queue_create_for_next(nullptr);
}
- void Finish(benchmark::State &state) {
+ void Finish(benchmark::State& state) {
grpc_completion_queue_destroy(cq_);
grpc_channel_destroy(channel_);
TrackCounters::Finish(state);
}
- grpc_channel *channel() const { return channel_; }
- grpc_completion_queue *cq() const { return cq_; }
+ grpc_channel* channel() const { return channel_; }
+ grpc_completion_queue* cq() const { return cq_; }
private:
- grpc_completion_queue *cq_;
- grpc_channel *channel_;
+ grpc_completion_queue* cq_;
+ grpc_channel* channel_;
};
-static void BM_IsolatedCall_NoOp(benchmark::State &state) {
+static void BM_IsolatedCall_NoOp(benchmark::State& state) {
IsolatedCallFixture fixture;
gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
- void *method_hdl =
- grpc_channel_register_call(fixture.channel(), "/foo/bar", NULL, NULL);
+ void* method_hdl = grpc_channel_register_call(fixture.channel(), "/foo/bar",
+ nullptr, nullptr);
while (state.KeepRunning()) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
grpc_call_unref(grpc_channel_create_registered_call(
fixture.channel(), nullptr, GRPC_PROPAGATE_DEFAULTS, fixture.cq(),
- method_hdl, deadline, NULL));
+ method_hdl, deadline, nullptr));
}
fixture.Finish(state);
}
BENCHMARK(BM_IsolatedCall_NoOp);
-static void BM_IsolatedCall_Unary(benchmark::State &state) {
+static void BM_IsolatedCall_Unary(benchmark::State& state) {
IsolatedCallFixture fixture;
gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
- void *method_hdl =
- grpc_channel_register_call(fixture.channel(), "/foo/bar", NULL, NULL);
+ void* method_hdl = grpc_channel_register_call(fixture.channel(), "/foo/bar",
+ nullptr, nullptr);
grpc_slice slice = grpc_slice_from_static_string("hello world");
- grpc_byte_buffer *send_message = grpc_raw_byte_buffer_create(&slice, 1);
- grpc_byte_buffer *recv_message = NULL;
+ grpc_byte_buffer* send_message = grpc_raw_byte_buffer_create(&slice, 1);
+ grpc_byte_buffer* recv_message = nullptr;
grpc_status_code status_code;
grpc_slice status_details = grpc_empty_slice();
grpc_metadata_array recv_initial_metadata;
@@ -778,12 +779,12 @@ static void BM_IsolatedCall_Unary(benchmark::State &state) {
ops[5].data.recv_status_on_client.trailing_metadata = &recv_trailing_metadata;
while (state.KeepRunning()) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
- grpc_call *call = grpc_channel_create_registered_call(
+ grpc_call* call = grpc_channel_create_registered_call(
fixture.channel(), nullptr, GRPC_PROPAGATE_DEFAULTS, fixture.cq(),
- method_hdl, deadline, NULL);
- grpc_call_start_batch(call, ops, 6, tag(1), NULL);
+ method_hdl, deadline, nullptr);
+ grpc_call_start_batch(call, ops, 6, tag(1), nullptr);
grpc_completion_queue_next(fixture.cq(),
- gpr_inf_future(GPR_CLOCK_MONOTONIC), NULL);
+ gpr_inf_future(GPR_CLOCK_MONOTONIC), nullptr);
grpc_call_unref(call);
}
fixture.Finish(state);
@@ -793,13 +794,13 @@ static void BM_IsolatedCall_Unary(benchmark::State &state) {
}
BENCHMARK(BM_IsolatedCall_Unary);
-static void BM_IsolatedCall_StreamingSend(benchmark::State &state) {
+static void BM_IsolatedCall_StreamingSend(benchmark::State& state) {
IsolatedCallFixture fixture;
gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
- void *method_hdl =
- grpc_channel_register_call(fixture.channel(), "/foo/bar", NULL, NULL);
+ void* method_hdl = grpc_channel_register_call(fixture.channel(), "/foo/bar",
+ nullptr, nullptr);
grpc_slice slice = grpc_slice_from_static_string("hello world");
- grpc_byte_buffer *send_message = grpc_raw_byte_buffer_create(&slice, 1);
+ grpc_byte_buffer* send_message = grpc_raw_byte_buffer_create(&slice, 1);
grpc_metadata_array recv_initial_metadata;
grpc_metadata_array_init(&recv_initial_metadata);
grpc_metadata_array recv_trailing_metadata;
@@ -810,20 +811,20 @@ static void BM_IsolatedCall_StreamingSend(benchmark::State &state) {
ops[1].op = GRPC_OP_RECV_INITIAL_METADATA;
ops[1].data.recv_initial_metadata.recv_initial_metadata =
&recv_initial_metadata;
- grpc_call *call = grpc_channel_create_registered_call(
+ grpc_call* call = grpc_channel_create_registered_call(
fixture.channel(), nullptr, GRPC_PROPAGATE_DEFAULTS, fixture.cq(),
- method_hdl, deadline, NULL);
- grpc_call_start_batch(call, ops, 2, tag(1), NULL);
+ method_hdl, deadline, nullptr);
+ grpc_call_start_batch(call, ops, 2, tag(1), nullptr);
grpc_completion_queue_next(fixture.cq(), gpr_inf_future(GPR_CLOCK_MONOTONIC),
- NULL);
+ nullptr);
memset(ops, 0, sizeof(ops));
ops[0].op = GRPC_OP_SEND_MESSAGE;
ops[0].data.send_message.send_message = send_message;
while (state.KeepRunning()) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
- grpc_call_start_batch(call, ops, 1, tag(2), NULL);
+ grpc_call_start_batch(call, ops, 1, tag(2), nullptr);
grpc_completion_queue_next(fixture.cq(),
- gpr_inf_future(GPR_CLOCK_MONOTONIC), NULL);
+ gpr_inf_future(GPR_CLOCK_MONOTONIC), nullptr);
}
grpc_call_unref(call);
fixture.Finish(state);
diff --git a/test/cpp/microbenchmarks/bm_chttp2_hpack.cc b/test/cpp/microbenchmarks/bm_chttp2_hpack.cc
index 5428cc47e7..3fff8b02d6 100644
--- a/test/cpp/microbenchmarks/bm_chttp2_hpack.cc
+++ b/test/cpp/microbenchmarks/bm_chttp2_hpack.cc
@@ -22,21 +22,22 @@
#include <grpc/support/log.h>
#include <string.h>
#include <sstream>
-extern "C" {
+
#include "src/core/ext/transport/chttp2/transport/hpack_encoder.h"
#include "src/core/ext/transport/chttp2/transport/hpack_parser.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/transport/static_metadata.h"
-}
+#include "src/core/lib/transport/timeout_encoding.h"
+
#include "test/cpp/microbenchmarks/helpers.h"
#include "third_party/benchmark/include/benchmark/benchmark.h"
-auto &force_library_initialization = Library::get();
+auto& force_library_initialization = Library::get();
static grpc_slice MakeSlice(std::vector<uint8_t> bytes) {
grpc_slice s = grpc_slice_malloc(bytes.size());
- uint8_t *p = GRPC_SLICE_START_PTR(s);
+ uint8_t* p = GRPC_SLICE_START_PTR(s);
for (auto b : bytes) {
*p++ = b;
}
@@ -47,7 +48,7 @@ static grpc_slice MakeSlice(std::vector<uint8_t> bytes) {
// HPACK encoder
//
-static void BM_HpackEncoderInitDestroy(benchmark::State &state) {
+static void BM_HpackEncoderInitDestroy(benchmark::State& state) {
TrackCounters track_counters;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_chttp2_hpack_compressor c;
@@ -61,7 +62,7 @@ static void BM_HpackEncoderInitDestroy(benchmark::State &state) {
}
BENCHMARK(BM_HpackEncoderInitDestroy);
-static void BM_HpackEncoderEncodeDeadline(benchmark::State &state) {
+static void BM_HpackEncoderEncodeDeadline(benchmark::State& state) {
TrackCounters track_counters;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_millis saved_now = grpc_exec_ctx_now(&exec_ctx);
@@ -84,7 +85,7 @@ static void BM_HpackEncoderEncodeDeadline(benchmark::State &state) {
(size_t)1024,
&stats,
};
- grpc_chttp2_encode_header(&exec_ctx, &c, NULL, 0, &b, &hopt, &outbuf);
+ grpc_chttp2_encode_header(&exec_ctx, &c, nullptr, 0, &b, &hopt, &outbuf);
grpc_slice_buffer_reset_and_unref_internal(&exec_ctx, &outbuf);
grpc_exec_ctx_flush(&exec_ctx);
}
@@ -94,17 +95,19 @@ static void BM_HpackEncoderEncodeDeadline(benchmark::State &state) {
grpc_exec_ctx_finish(&exec_ctx);
std::ostringstream label;
- label << "framing_bytes/iter:" << (static_cast<double>(stats.framing_bytes) /
- static_cast<double>(state.iterations()))
- << " header_bytes/iter:" << (static_cast<double>(stats.header_bytes) /
- static_cast<double>(state.iterations()));
+ label << "framing_bytes/iter:"
+ << (static_cast<double>(stats.framing_bytes) /
+ static_cast<double>(state.iterations()))
+ << " header_bytes/iter:"
+ << (static_cast<double>(stats.header_bytes) /
+ static_cast<double>(state.iterations()));
track_counters.AddLabel(label.str());
track_counters.Finish(state);
}
BENCHMARK(BM_HpackEncoderEncodeDeadline);
template <class Fixture>
-static void BM_HpackEncoderEncodeHeader(benchmark::State &state) {
+static void BM_HpackEncoderEncodeHeader(benchmark::State& state) {
TrackCounters track_counters;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
static bool logged_representative_output = false;
@@ -133,11 +136,11 @@ static void BM_HpackEncoderEncodeHeader(benchmark::State &state) {
(size_t)state.range(1),
&stats,
};
- grpc_chttp2_encode_header(&exec_ctx, &c, NULL, 0, &b, &hopt, &outbuf);
+ grpc_chttp2_encode_header(&exec_ctx, &c, nullptr, 0, &b, &hopt, &outbuf);
if (!logged_representative_output && state.iterations() > 3) {
logged_representative_output = true;
for (size_t i = 0; i < outbuf.count; i++) {
- char *s = grpc_dump_slice(outbuf.slices[i], GPR_DUMP_HEX);
+ char* s = grpc_dump_slice(outbuf.slices[i], GPR_DUMP_HEX);
gpr_log(GPR_DEBUG, "%" PRIdPTR ": %s", i, s);
gpr_free(s);
}
@@ -151,10 +154,12 @@ static void BM_HpackEncoderEncodeHeader(benchmark::State &state) {
grpc_exec_ctx_finish(&exec_ctx);
std::ostringstream label;
- label << "framing_bytes/iter:" << (static_cast<double>(stats.framing_bytes) /
- static_cast<double>(state.iterations()))
- << " header_bytes/iter:" << (static_cast<double>(stats.header_bytes) /
- static_cast<double>(state.iterations()));
+ label << "framing_bytes/iter:"
+ << (static_cast<double>(stats.framing_bytes) /
+ static_cast<double>(state.iterations()))
+ << " header_bytes/iter:"
+ << (static_cast<double>(stats.header_bytes) /
+ static_cast<double>(state.iterations()));
track_counters.AddLabel(label.str());
track_counters.Finish(state);
}
@@ -164,7 +169,7 @@ namespace hpack_encoder_fixtures {
class EmptyBatch {
public:
static constexpr bool kEnableTrueBinary = false;
- static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx *exec_ctx) {
+ static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) {
return {};
}
};
@@ -172,7 +177,7 @@ class EmptyBatch {
class SingleStaticElem {
public:
static constexpr bool kEnableTrueBinary = false;
- static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx *exec_ctx) {
+ static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) {
return {GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_DEFLATE};
}
};
@@ -180,7 +185,7 @@ class SingleStaticElem {
class SingleInternedElem {
public:
static constexpr bool kEnableTrueBinary = false;
- static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx *exec_ctx) {
+ static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) {
return {grpc_mdelem_from_slices(
exec_ctx, grpc_slice_intern(grpc_slice_from_static_string("abc")),
grpc_slice_intern(grpc_slice_from_static_string("def")))};
@@ -191,7 +196,7 @@ template <int kLength, bool kTrueBinary>
class SingleInternedBinaryElem {
public:
static constexpr bool kEnableTrueBinary = kTrueBinary;
- static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx *exec_ctx) {
+ static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) {
grpc_slice bytes = MakeBytes();
std::vector<grpc_mdelem> out = {grpc_mdelem_from_slices(
exec_ctx, grpc_slice_intern(grpc_slice_from_static_string("abc-bin")),
@@ -213,7 +218,7 @@ class SingleInternedBinaryElem {
class SingleInternedKeyElem {
public:
static constexpr bool kEnableTrueBinary = false;
- static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx *exec_ctx) {
+ static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) {
return {grpc_mdelem_from_slices(
exec_ctx, grpc_slice_intern(grpc_slice_from_static_string("abc")),
grpc_slice_from_static_string("def"))};
@@ -223,7 +228,7 @@ class SingleInternedKeyElem {
class SingleNonInternedElem {
public:
static constexpr bool kEnableTrueBinary = false;
- static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx *exec_ctx) {
+ static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) {
return {grpc_mdelem_from_slices(exec_ctx,
grpc_slice_from_static_string("abc"),
grpc_slice_from_static_string("def"))};
@@ -234,7 +239,7 @@ template <int kLength, bool kTrueBinary>
class SingleNonInternedBinaryElem {
public:
static constexpr bool kEnableTrueBinary = kTrueBinary;
- static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx *exec_ctx) {
+ static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) {
return {grpc_mdelem_from_slices(
exec_ctx, grpc_slice_from_static_string("abc-bin"), MakeBytes())};
}
@@ -252,9 +257,10 @@ class SingleNonInternedBinaryElem {
class RepresentativeClientInitialMetadata {
public:
static constexpr bool kEnableTrueBinary = true;
- static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx *exec_ctx) {
+ static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) {
return {
- GRPC_MDELEM_SCHEME_HTTP, GRPC_MDELEM_METHOD_POST,
+ GRPC_MDELEM_SCHEME_HTTP,
+ GRPC_MDELEM_METHOD_POST,
grpc_mdelem_from_slices(
exec_ctx, GRPC_MDSTR_PATH,
grpc_slice_intern(grpc_slice_from_static_string("/foo/bar"))),
@@ -277,9 +283,10 @@ class RepresentativeClientInitialMetadata {
class MoreRepresentativeClientInitialMetadata {
public:
static constexpr bool kEnableTrueBinary = true;
- static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx *exec_ctx) {
+ static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) {
return {
- GRPC_MDELEM_SCHEME_HTTP, GRPC_MDELEM_METHOD_POST,
+ GRPC_MDELEM_SCHEME_HTTP,
+ GRPC_MDELEM_METHOD_POST,
grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_PATH,
grpc_slice_intern(grpc_slice_from_static_string(
"/grpc.test.FooService/BarMethod"))),
@@ -313,7 +320,7 @@ class MoreRepresentativeClientInitialMetadata {
class RepresentativeServerInitialMetadata {
public:
static constexpr bool kEnableTrueBinary = true;
- static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx *exec_ctx) {
+ static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) {
return {GRPC_MDELEM_STATUS_200,
GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC,
GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_DEFLATE_COMMA_GZIP};
@@ -323,7 +330,7 @@ class RepresentativeServerInitialMetadata {
class RepresentativeServerTrailingMetadata {
public:
static constexpr bool kEnableTrueBinary = true;
- static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx *exec_ctx) {
+ static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) {
return {GRPC_MDELEM_GRPC_STATUS_0};
}
};
@@ -422,7 +429,7 @@ BENCHMARK_TEMPLATE(BM_HpackEncoderEncodeHeader,
// HPACK parser
//
-static void BM_HpackParserInitDestroy(benchmark::State &state) {
+static void BM_HpackParserInitDestroy(benchmark::State& state) {
TrackCounters track_counters;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_chttp2_hpack_parser p;
@@ -436,20 +443,20 @@ static void BM_HpackParserInitDestroy(benchmark::State &state) {
}
BENCHMARK(BM_HpackParserInitDestroy);
-static void UnrefHeader(grpc_exec_ctx *exec_ctx, void *user_data,
+static void UnrefHeader(grpc_exec_ctx* exec_ctx, void* user_data,
grpc_mdelem md) {
GRPC_MDELEM_UNREF(exec_ctx, md);
}
-template <class Fixture>
-static void BM_HpackParserParseHeader(benchmark::State &state) {
+template <class Fixture, void (*OnHeader)(grpc_exec_ctx*, void*, grpc_mdelem)>
+static void BM_HpackParserParseHeader(benchmark::State& state) {
TrackCounters track_counters;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
std::vector<grpc_slice> init_slices = Fixture::GetInitSlices();
std::vector<grpc_slice> benchmark_slices = Fixture::GetBenchmarkSlices();
grpc_chttp2_hpack_parser p;
grpc_chttp2_hpack_parser_init(&exec_ctx, &p);
- p.on_header = UnrefHeader;
+ p.on_header = OnHeader;
p.on_header_user_data = nullptr;
for (auto slice : init_slices) {
GPR_ASSERT(GRPC_ERROR_NONE ==
@@ -759,32 +766,97 @@ class RepresentativeServerTrailingMetadata {
}
};
-BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, EmptyBatch);
-BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, IndexedSingleStaticElem);
-BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, AddIndexedSingleStaticElem);
-BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, KeyIndexedSingleStaticElem);
-BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, IndexedSingleInternedElem);
-BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, AddIndexedSingleInternedElem);
-BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, KeyIndexedSingleInternedElem);
-BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, NonIndexedElem);
-BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, NonIndexedBinaryElem<1, false>);
-BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, NonIndexedBinaryElem<3, false>);
-BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, NonIndexedBinaryElem<10, false>);
-BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, NonIndexedBinaryElem<31, false>);
-BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, NonIndexedBinaryElem<100, false>);
-BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, NonIndexedBinaryElem<1, true>);
-BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, NonIndexedBinaryElem<3, true>);
-BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, NonIndexedBinaryElem<10, true>);
-BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, NonIndexedBinaryElem<31, true>);
-BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, NonIndexedBinaryElem<100, true>);
+static void free_timeout(void* p) { gpr_free(p); }
+
+// New implementation.
+static void OnHeaderNew(grpc_exec_ctx* exec_ctx, void* user_data,
+ grpc_mdelem md) {
+ if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_GRPC_TIMEOUT)) {
+ grpc_millis* cached_timeout =
+ static_cast<grpc_millis*>(grpc_mdelem_get_user_data(md, free_timeout));
+ grpc_millis timeout;
+ if (cached_timeout != nullptr) {
+ timeout = *cached_timeout;
+ } else {
+ if (!grpc_http2_decode_timeout(GRPC_MDVALUE(md), &timeout)) {
+ char* val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
+ gpr_log(GPR_ERROR, "Ignoring bad timeout value '%s'", val);
+ gpr_free(val);
+ timeout = GRPC_MILLIS_INF_FUTURE;
+ }
+ if (GRPC_MDELEM_IS_INTERNED(md)) {
+ /* not already parsed: parse it now, and store the
+ * result away */
+ cached_timeout = (grpc_millis*)gpr_malloc(sizeof(grpc_millis));
+ *cached_timeout = timeout;
+ grpc_mdelem_set_user_data(md, free_timeout, cached_timeout);
+ }
+ }
+ benchmark::DoNotOptimize(timeout);
+ GRPC_MDELEM_UNREF(exec_ctx, md);
+ } else {
+ GPR_ASSERT(0);
+ }
+}
+
+// Send the same deadline repeatedly
+class SameDeadline {
+ public:
+ static std::vector<grpc_slice> GetInitSlices() {
+ return {
+ grpc_slice_from_static_string("@\x0cgrpc-timeout\x03"
+ "30S")};
+ }
+ static std::vector<grpc_slice> GetBenchmarkSlices() {
+ // Use saved key and literal value.
+ return {MakeSlice({0x0f, 0x2f, 0x03, '3', '0', 'S'})};
+ }
+};
+
+BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, EmptyBatch, UnrefHeader);
+BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, IndexedSingleStaticElem,
+ UnrefHeader);
+BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, AddIndexedSingleStaticElem,
+ UnrefHeader);
+BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, KeyIndexedSingleStaticElem,
+ UnrefHeader);
+BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, IndexedSingleInternedElem,
+ UnrefHeader);
+BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, AddIndexedSingleInternedElem,
+ UnrefHeader);
+BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, KeyIndexedSingleInternedElem,
+ UnrefHeader);
+BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, NonIndexedElem, UnrefHeader);
+BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, NonIndexedBinaryElem<1, false>,
+ UnrefHeader);
+BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, NonIndexedBinaryElem<3, false>,
+ UnrefHeader);
+BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, NonIndexedBinaryElem<10, false>,
+ UnrefHeader);
+BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, NonIndexedBinaryElem<31, false>,
+ UnrefHeader);
+BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, NonIndexedBinaryElem<100, false>,
+ UnrefHeader);
+BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, NonIndexedBinaryElem<1, true>,
+ UnrefHeader);
+BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, NonIndexedBinaryElem<3, true>,
+ UnrefHeader);
+BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, NonIndexedBinaryElem<10, true>,
+ UnrefHeader);
+BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, NonIndexedBinaryElem<31, true>,
+ UnrefHeader);
+BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, NonIndexedBinaryElem<100, true>,
+ UnrefHeader);
BENCHMARK_TEMPLATE(BM_HpackParserParseHeader,
- RepresentativeClientInitialMetadata);
+ RepresentativeClientInitialMetadata, UnrefHeader);
BENCHMARK_TEMPLATE(BM_HpackParserParseHeader,
- MoreRepresentativeClientInitialMetadata);
+ MoreRepresentativeClientInitialMetadata, UnrefHeader);
BENCHMARK_TEMPLATE(BM_HpackParserParseHeader,
- RepresentativeServerInitialMetadata);
+ RepresentativeServerInitialMetadata, UnrefHeader);
BENCHMARK_TEMPLATE(BM_HpackParserParseHeader,
- RepresentativeServerTrailingMetadata);
+ RepresentativeServerTrailingMetadata, UnrefHeader);
+
+BENCHMARK_TEMPLATE(BM_HpackParserParseHeader, SameDeadline, OnHeaderNew);
} // namespace hpack_parser_fixtures
diff --git a/test/cpp/microbenchmarks/bm_chttp2_transport.cc b/test/cpp/microbenchmarks/bm_chttp2_transport.cc
index 3a484bb790..b95ee0cc01 100644
--- a/test/cpp/microbenchmarks/bm_chttp2_transport.cc
+++ b/test/cpp/microbenchmarks/bm_chttp2_transport.cc
@@ -35,7 +35,7 @@
#include "test/cpp/microbenchmarks/helpers.h"
#include "third_party/benchmark/include/benchmark/benchmark.h"
-auto &force_library_initialization = Library::get();
+auto& force_library_initialization = Library::get();
////////////////////////////////////////////////////////////////////////////////
// Helper classes
@@ -44,15 +44,21 @@ auto &force_library_initialization = Library::get();
class DummyEndpoint : public grpc_endpoint {
public:
DummyEndpoint() {
- static const grpc_endpoint_vtable my_vtable = {
- read, write, add_to_pollset, add_to_pollset_set,
- shutdown, destroy, get_resource_user, get_peer,
- get_fd};
+ static const grpc_endpoint_vtable my_vtable = {read,
+ write,
+ add_to_pollset,
+ add_to_pollset_set,
+ delete_from_pollset_set,
+ shutdown,
+ destroy,
+ get_resource_user,
+ get_peer,
+ get_fd};
grpc_endpoint::vtable = &my_vtable;
ru_ = grpc_resource_user_create(Library::get().rq(), "dummy_endpoint");
}
- void PushInput(grpc_exec_ctx *exec_ctx, grpc_slice slice) {
+ void PushInput(grpc_exec_ctx* exec_ctx, grpc_slice slice) {
if (read_cb_ == nullptr) {
GPR_ASSERT(!have_slice_);
buffered_slice_ = slice;
@@ -65,14 +71,14 @@ class DummyEndpoint : public grpc_endpoint {
}
private:
- grpc_resource_user *ru_;
- grpc_closure *read_cb_ = nullptr;
- grpc_slice_buffer *slices_ = nullptr;
+ grpc_resource_user* ru_;
+ grpc_closure* read_cb_ = nullptr;
+ grpc_slice_buffer* slices_ = nullptr;
bool have_slice_ = false;
grpc_slice buffered_slice_;
- void QueueRead(grpc_exec_ctx *exec_ctx, grpc_slice_buffer *slices,
- grpc_closure *cb) {
+ void QueueRead(grpc_exec_ctx* exec_ctx, grpc_slice_buffer* slices,
+ grpc_closure* cb) {
GPR_ASSERT(read_cb_ == nullptr);
if (have_slice_) {
have_slice_ = false;
@@ -84,51 +90,54 @@ class DummyEndpoint : public grpc_endpoint {
slices_ = slices;
}
- static void read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_slice_buffer *slices, grpc_closure *cb) {
- static_cast<DummyEndpoint *>(ep)->QueueRead(exec_ctx, slices, cb);
+ static void read(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_slice_buffer* slices, grpc_closure* cb) {
+ static_cast<DummyEndpoint*>(ep)->QueueRead(exec_ctx, slices, cb);
}
- static void write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_slice_buffer *slices, grpc_closure *cb) {
+ static void write(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_slice_buffer* slices, grpc_closure* cb) {
GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_ERROR_NONE);
}
- static grpc_workqueue *get_workqueue(grpc_endpoint *ep) { return NULL; }
+ static grpc_workqueue* get_workqueue(grpc_endpoint* ep) { return nullptr; }
- static void add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_pollset *pollset) {}
+ static void add_to_pollset(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_pollset* pollset) {}
- static void add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_pollset_set *pollset) {}
+ static void add_to_pollset_set(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_pollset_set* pollset) {}
- static void shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_error *why) {
- grpc_resource_user_shutdown(exec_ctx,
- static_cast<DummyEndpoint *>(ep)->ru_);
- GRPC_CLOSURE_SCHED(exec_ctx, static_cast<DummyEndpoint *>(ep)->read_cb_,
+ static void delete_from_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_endpoint* ep,
+ grpc_pollset_set* pollset) {}
+
+ static void shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_error* why) {
+ grpc_resource_user_shutdown(exec_ctx, static_cast<DummyEndpoint*>(ep)->ru_);
+ GRPC_CLOSURE_SCHED(exec_ctx, static_cast<DummyEndpoint*>(ep)->read_cb_,
why);
}
- static void destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
- grpc_resource_user_unref(exec_ctx, static_cast<DummyEndpoint *>(ep)->ru_);
- delete static_cast<DummyEndpoint *>(ep);
+ static void destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) {
+ grpc_resource_user_unref(exec_ctx, static_cast<DummyEndpoint*>(ep)->ru_);
+ delete static_cast<DummyEndpoint*>(ep);
}
- static grpc_resource_user *get_resource_user(grpc_endpoint *ep) {
- return static_cast<DummyEndpoint *>(ep)->ru_;
+ static grpc_resource_user* get_resource_user(grpc_endpoint* ep) {
+ return static_cast<DummyEndpoint*>(ep)->ru_;
}
- static char *get_peer(grpc_endpoint *ep) { return gpr_strdup("test"); }
- static int get_fd(grpc_endpoint *ep) { return 0; }
+ static char* get_peer(grpc_endpoint* ep) { return gpr_strdup("test"); }
+ static int get_fd(grpc_endpoint* ep) { return 0; }
};
class Fixture {
public:
- Fixture(const grpc::ChannelArguments &args, bool client) {
+ Fixture(const grpc::ChannelArguments& args, bool client) {
grpc_channel_args c_args = args.c_channel_args();
ep_ = new DummyEndpoint;
t_ = grpc_create_chttp2_transport(exec_ctx(), &c_args, ep_, client);
- grpc_chttp2_transport_start_reading(exec_ctx(), t_, NULL);
+ grpc_chttp2_transport_start_reading(exec_ctx(), t_, nullptr);
FlushExecCtx();
}
@@ -139,18 +148,18 @@ class Fixture {
grpc_exec_ctx_finish(&exec_ctx_);
}
- grpc_chttp2_transport *chttp2_transport() {
- return reinterpret_cast<grpc_chttp2_transport *>(t_);
+ grpc_chttp2_transport* chttp2_transport() {
+ return reinterpret_cast<grpc_chttp2_transport*>(t_);
}
- grpc_transport *transport() { return t_; }
- grpc_exec_ctx *exec_ctx() { return &exec_ctx_; }
+ grpc_transport* transport() { return t_; }
+ grpc_exec_ctx* exec_ctx() { return &exec_ctx_; }
void PushInput(grpc_slice slice) { ep_->PushInput(exec_ctx(), slice); }
private:
- DummyEndpoint *ep_;
+ DummyEndpoint* ep_;
grpc_exec_ctx exec_ctx_ = GRPC_EXEC_CTX_INIT;
- grpc_transport *t_;
+ grpc_transport* t_;
};
class Closure : public grpc_closure {
@@ -160,37 +169,37 @@ class Closure : public grpc_closure {
template <class F>
std::unique_ptr<Closure> MakeClosure(
- F f, grpc_closure_scheduler *sched = grpc_schedule_on_exec_ctx) {
+ F f, grpc_closure_scheduler* sched = grpc_schedule_on_exec_ctx) {
struct C : public Closure {
- C(const F &f, grpc_closure_scheduler *sched) : f_(f) {
+ C(const F& f, grpc_closure_scheduler* sched) : f_(f) {
GRPC_CLOSURE_INIT(this, Execute, this, sched);
}
F f_;
- static void Execute(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- static_cast<C *>(arg)->f_(exec_ctx, error);
+ static void Execute(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+ static_cast<C*>(arg)->f_(exec_ctx, error);
}
};
return std::unique_ptr<Closure>(new C(f, sched));
}
template <class F>
-grpc_closure *MakeOnceClosure(
- F f, grpc_closure_scheduler *sched = grpc_schedule_on_exec_ctx) {
+grpc_closure* MakeOnceClosure(
+ F f, grpc_closure_scheduler* sched = grpc_schedule_on_exec_ctx) {
struct C : public grpc_closure {
- C(const F &f) : f_(f) {}
+ C(const F& f) : f_(f) {}
F f_;
- static void Execute(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- static_cast<C *>(arg)->f_(exec_ctx, error);
- delete static_cast<C *>(arg);
+ static void Execute(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+ static_cast<C*>(arg)->f_(exec_ctx, error);
+ delete static_cast<C*>(arg);
}
};
- auto *c = new C{f};
+ auto* c = new C{f};
return GRPC_CLOSURE_INIT(c, C::Execute, c, sched);
}
class Stream {
public:
- Stream(Fixture *f) : f_(f) {
+ Stream(Fixture* f) : f_(f) {
stream_size_ = grpc_transport_stream_size(f->transport());
stream_ = gpr_malloc(stream_size_);
arena_ = gpr_arena_create(4096);
@@ -202,7 +211,7 @@ class Stream {
gpr_arena_destroy(arena_);
}
- void Init(benchmark::State &state) {
+ void Init(benchmark::State& state) {
GRPC_STREAM_REF_INIT(&refcount_, 1, &Stream::FinishDestroy, this,
"test_stream");
gpr_event_init(&done_);
@@ -212,11 +221,11 @@ class Stream {
arena_ = gpr_arena_create(4096);
}
grpc_transport_init_stream(f_->exec_ctx(), f_->transport(),
- static_cast<grpc_stream *>(stream_), &refcount_,
- NULL, arena_);
+ static_cast<grpc_stream*>(stream_), &refcount_,
+ nullptr, arena_);
}
- void DestroyThen(grpc_exec_ctx *exec_ctx, grpc_closure *closure) {
+ void DestroyThen(grpc_exec_ctx* exec_ctx, grpc_closure* closure) {
destroy_closure_ = closure;
#ifndef NDEBUG
grpc_stream_unref(exec_ctx, &refcount_, "DestroyThen");
@@ -225,31 +234,31 @@ class Stream {
#endif
}
- void Op(grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *op) {
+ void Op(grpc_exec_ctx* exec_ctx, grpc_transport_stream_op_batch* op) {
grpc_transport_perform_stream_op(exec_ctx, f_->transport(),
- static_cast<grpc_stream *>(stream_), op);
+ static_cast<grpc_stream*>(stream_), op);
}
- grpc_chttp2_stream *chttp2_stream() {
- return static_cast<grpc_chttp2_stream *>(stream_);
+ grpc_chttp2_stream* chttp2_stream() {
+ return static_cast<grpc_chttp2_stream*>(stream_);
}
private:
- static void FinishDestroy(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- auto stream = static_cast<Stream *>(arg);
+ static void FinishDestroy(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ auto stream = static_cast<Stream*>(arg);
grpc_transport_destroy_stream(exec_ctx, stream->f_->transport(),
- static_cast<grpc_stream *>(stream->stream_),
+ static_cast<grpc_stream*>(stream->stream_),
stream->destroy_closure_);
- gpr_event_set(&stream->done_, (void *)1);
+ gpr_event_set(&stream->done_, (void*)1);
}
- Fixture *f_;
+ Fixture* f_;
grpc_stream_refcount refcount_;
- gpr_arena *arena_;
+ gpr_arena* arena_;
size_t stream_size_;
- void *stream_;
- grpc_closure *destroy_closure_ = nullptr;
+ void* stream_;
+ grpc_closure* destroy_closure_ = nullptr;
gpr_event done_;
};
@@ -257,7 +266,7 @@ class Stream {
// Benchmarks
//
-static void BM_StreamCreateDestroy(benchmark::State &state) {
+static void BM_StreamCreateDestroy(benchmark::State& state) {
TrackCounters track_counters;
Fixture f(grpc::ChannelArguments(), true);
Stream s(&f);
@@ -268,7 +277,7 @@ static void BM_StreamCreateDestroy(benchmark::State &state) {
op.payload = &op_payload;
op_payload.cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
std::unique_ptr<Closure> next =
- MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) {
+ MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) {
if (!state.KeepRunning()) return;
s.Init(state);
s.Op(exec_ctx, &op);
@@ -282,9 +291,10 @@ BENCHMARK(BM_StreamCreateDestroy);
class RepresentativeClientInitialMetadata {
public:
- static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx *exec_ctx) {
+ static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) {
return {
- GRPC_MDELEM_SCHEME_HTTP, GRPC_MDELEM_METHOD_POST,
+ GRPC_MDELEM_SCHEME_HTTP,
+ GRPC_MDELEM_METHOD_POST,
grpc_mdelem_from_slices(
exec_ctx, GRPC_MDSTR_PATH,
grpc_slice_intern(grpc_slice_from_static_string("/foo/bar"))),
@@ -302,7 +312,7 @@ class RepresentativeClientInitialMetadata {
};
template <class Metadata>
-static void BM_StreamCreateSendInitialMetadataDestroy(benchmark::State &state) {
+static void BM_StreamCreateSendInitialMetadataDestroy(benchmark::State& state) {
TrackCounters track_counters;
Fixture f(grpc::ChannelArguments(), true);
Stream s(&f);
@@ -329,7 +339,7 @@ static void BM_StreamCreateSendInitialMetadataDestroy(benchmark::State &state) {
}
f.FlushExecCtx();
- start = MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) {
+ start = MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) {
if (!state.KeepRunning()) return;
s.Init(state);
reset_op();
@@ -338,7 +348,7 @@ static void BM_StreamCreateSendInitialMetadataDestroy(benchmark::State &state) {
op.payload->send_initial_metadata.send_initial_metadata = &b;
s.Op(exec_ctx, &op);
});
- done = MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) {
+ done = MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) {
reset_op();
op.cancel_stream = true;
op.payload->cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
@@ -353,7 +363,7 @@ static void BM_StreamCreateSendInitialMetadataDestroy(benchmark::State &state) {
BENCHMARK_TEMPLATE(BM_StreamCreateSendInitialMetadataDestroy,
RepresentativeClientInitialMetadata);
-static void BM_TransportEmptyOp(benchmark::State &state) {
+static void BM_TransportEmptyOp(benchmark::State& state) {
TrackCounters track_counters;
Fixture f(grpc::ChannelArguments(), true);
Stream s(&f);
@@ -366,7 +376,7 @@ static void BM_TransportEmptyOp(benchmark::State &state) {
op.payload = &op_payload;
};
std::unique_ptr<Closure> c =
- MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) {
+ MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) {
if (!state.KeepRunning()) return;
reset_op();
op.on_complete = c.get();
@@ -378,8 +388,8 @@ static void BM_TransportEmptyOp(benchmark::State &state) {
op.cancel_stream = true;
op_payload.cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
s.Op(f.exec_ctx(), &op);
- s.DestroyThen(f.exec_ctx(), MakeOnceClosure([](grpc_exec_ctx *exec_ctx,
- grpc_error *error) {}));
+ s.DestroyThen(f.exec_ctx(), MakeOnceClosure([](grpc_exec_ctx* exec_ctx,
+ grpc_error* error) {}));
f.FlushExecCtx();
track_counters.Finish(state);
}
@@ -387,7 +397,7 @@ BENCHMARK(BM_TransportEmptyOp);
std::vector<std::unique_ptr<gpr_event>> done_events;
-static void BM_TransportStreamSend(benchmark::State &state) {
+static void BM_TransportStreamSend(benchmark::State& state) {
TrackCounters track_counters;
Fixture f(grpc::ChannelArguments(), true);
auto s = std::unique_ptr<Stream>(new Stream(&f));
@@ -418,13 +428,13 @@ static void BM_TransportStreamSend(benchmark::State &state) {
grpc_metadata_batch_add_tail(f.exec_ctx(), &b, &storage[i], elems[i])));
}
- gpr_event *bm_done = new gpr_event;
+ gpr_event* bm_done = new gpr_event;
gpr_event_init(bm_done);
std::unique_ptr<Closure> c =
- MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) {
+ MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) {
if (!state.KeepRunning()) {
- gpr_event_set(bm_done, (void *)1);
+ gpr_event_set(bm_done, (void*)1);
return;
}
// force outgoing window to be yuge
@@ -452,8 +462,8 @@ static void BM_TransportStreamSend(benchmark::State &state) {
op.cancel_stream = true;
op.payload->cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
s->Op(f.exec_ctx(), &op);
- s->DestroyThen(f.exec_ctx(), MakeOnceClosure([](grpc_exec_ctx *exec_ctx,
- grpc_error *error) {}));
+ s->DestroyThen(f.exec_ctx(), MakeOnceClosure([](grpc_exec_ctx* exec_ctx,
+ grpc_error* error) {}));
f.FlushExecCtx();
s.reset();
track_counters.Finish(state);
@@ -519,7 +529,7 @@ static grpc_slice CreateIncomingDataSlice(size_t length, size_t frame_size) {
return grpc_slice_from_copied_buffer(framed.data(), framed.size());
}
-static void BM_TransportStreamRecv(benchmark::State &state) {
+static void BM_TransportStreamRecv(benchmark::State& state) {
TrackCounters track_counters;
Fixture f(grpc::ChannelArguments(), true);
Stream s(&f);
@@ -527,7 +537,7 @@ static void BM_TransportStreamRecv(benchmark::State &state) {
grpc_transport_stream_op_batch_payload op_payload;
memset(&op_payload, 0, sizeof(op_payload));
grpc_transport_stream_op_batch op;
- grpc_byte_stream *recv_stream;
+ grpc_byte_stream* recv_stream;
grpc_slice incoming_data = CreateIncomingDataSlice(state.range(0), 16384);
auto reset_op = [&]() {
@@ -550,7 +560,7 @@ static void BM_TransportStreamRecv(benchmark::State &state) {
}
std::unique_ptr<Closure> do_nothing =
- MakeClosure([](grpc_exec_ctx *exec_ctx, grpc_error *error) {});
+ MakeClosure([](grpc_exec_ctx* exec_ctx, grpc_error* error) {});
uint32_t received;
@@ -560,7 +570,7 @@ static void BM_TransportStreamRecv(benchmark::State &state) {
grpc_slice recv_slice;
std::unique_ptr<Closure> c =
- MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) {
+ MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) {
if (!state.KeepRunning()) return;
// force outgoing window to be yuge
s.chttp2_stream()->flow_control->TestOnlyForceHugeWindow();
@@ -575,15 +585,15 @@ static void BM_TransportStreamRecv(benchmark::State &state) {
f.PushInput(grpc_slice_ref(incoming_data));
});
- drain_start = MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) {
- if (recv_stream == NULL) {
+ drain_start = MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) {
+ if (recv_stream == nullptr) {
GPR_ASSERT(!state.KeepRunning());
return;
}
GRPC_CLOSURE_RUN(exec_ctx, drain.get(), GRPC_ERROR_NONE);
});
- drain = MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) {
+ drain = MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) {
do {
if (received == recv_stream->length) {
grpc_byte_stream_destroy(exec_ctx, recv_stream);
@@ -599,7 +609,7 @@ static void BM_TransportStreamRecv(benchmark::State &state) {
grpc_slice_unref_internal(exec_ctx, recv_slice), true));
});
- drain_continue = MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) {
+ drain_continue = MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) {
grpc_byte_stream_pull(exec_ctx, recv_stream, &recv_slice);
received += GRPC_SLICE_LENGTH(recv_slice);
grpc_slice_unref_internal(exec_ctx, recv_slice);
@@ -633,8 +643,8 @@ static void BM_TransportStreamRecv(benchmark::State &state) {
op.cancel_stream = true;
op.payload->cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
s.Op(f.exec_ctx(), &op);
- s.DestroyThen(f.exec_ctx(), MakeOnceClosure([](grpc_exec_ctx *exec_ctx,
- grpc_error *error) {}));
+ s.DestroyThen(f.exec_ctx(), MakeOnceClosure([](grpc_exec_ctx* exec_ctx,
+ grpc_error* error) {}));
f.FlushExecCtx();
track_counters.Finish(state);
grpc_metadata_batch_destroy(f.exec_ctx(), &b);
diff --git a/test/cpp/microbenchmarks/bm_closure.cc b/test/cpp/microbenchmarks/bm_closure.cc
index 41649b8a73..2434d4e84e 100644
--- a/test/cpp/microbenchmarks/bm_closure.cc
+++ b/test/cpp/microbenchmarks/bm_closure.cc
@@ -22,12 +22,10 @@
#include <grpc/grpc.h>
#include <sstream>
-extern "C" {
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/support/spinlock.h"
-}
#include "test/cpp/microbenchmarks/helpers.h"
@@ -61,7 +59,7 @@ static void BM_ClosureInitAgainstExecCtx(benchmark::State& state) {
grpc_closure c;
while (state.KeepRunning()) {
benchmark::DoNotOptimize(
- GRPC_CLOSURE_INIT(&c, DoNothing, NULL, grpc_schedule_on_exec_ctx));
+ GRPC_CLOSURE_INIT(&c, DoNothing, nullptr, grpc_schedule_on_exec_ctx));
}
track_counters.Finish(state);
}
@@ -74,7 +72,7 @@ static void BM_ClosureInitAgainstCombiner(benchmark::State& state) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
benchmark::DoNotOptimize(GRPC_CLOSURE_INIT(
- &c, DoNothing, NULL, grpc_combiner_scheduler(combiner)));
+ &c, DoNothing, nullptr, grpc_combiner_scheduler(combiner)));
}
GRPC_COMBINER_UNREF(&exec_ctx, combiner, "finished");
grpc_exec_ctx_finish(&exec_ctx);
@@ -85,7 +83,7 @@ BENCHMARK(BM_ClosureInitAgainstCombiner);
static void BM_ClosureRunOnExecCtx(benchmark::State& state) {
TrackCounters track_counters;
grpc_closure c;
- GRPC_CLOSURE_INIT(&c, DoNothing, NULL, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&c, DoNothing, nullptr, grpc_schedule_on_exec_ctx);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
GRPC_CLOSURE_RUN(&exec_ctx, &c, GRPC_ERROR_NONE);
@@ -100,9 +98,10 @@ static void BM_ClosureCreateAndRun(benchmark::State& state) {
TrackCounters track_counters;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
- GRPC_CLOSURE_RUN(&exec_ctx, GRPC_CLOSURE_CREATE(DoNothing, NULL,
- grpc_schedule_on_exec_ctx),
- GRPC_ERROR_NONE);
+ GRPC_CLOSURE_RUN(
+ &exec_ctx,
+ GRPC_CLOSURE_CREATE(DoNothing, nullptr, grpc_schedule_on_exec_ctx),
+ GRPC_ERROR_NONE);
}
grpc_exec_ctx_finish(&exec_ctx);
track_counters.Finish(state);
@@ -114,9 +113,10 @@ static void BM_ClosureInitAndRun(benchmark::State& state) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_closure c;
while (state.KeepRunning()) {
- GRPC_CLOSURE_RUN(&exec_ctx, GRPC_CLOSURE_INIT(&c, DoNothing, NULL,
- grpc_schedule_on_exec_ctx),
- GRPC_ERROR_NONE);
+ GRPC_CLOSURE_RUN(
+ &exec_ctx,
+ GRPC_CLOSURE_INIT(&c, DoNothing, nullptr, grpc_schedule_on_exec_ctx),
+ GRPC_ERROR_NONE);
}
grpc_exec_ctx_finish(&exec_ctx);
track_counters.Finish(state);
@@ -126,7 +126,7 @@ BENCHMARK(BM_ClosureInitAndRun);
static void BM_ClosureSchedOnExecCtx(benchmark::State& state) {
TrackCounters track_counters;
grpc_closure c;
- GRPC_CLOSURE_INIT(&c, DoNothing, NULL, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&c, DoNothing, nullptr, grpc_schedule_on_exec_ctx);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
GRPC_CLOSURE_SCHED(&exec_ctx, &c, GRPC_ERROR_NONE);
@@ -141,8 +141,8 @@ static void BM_ClosureSched2OnExecCtx(benchmark::State& state) {
TrackCounters track_counters;
grpc_closure c1;
grpc_closure c2;
- GRPC_CLOSURE_INIT(&c1, DoNothing, NULL, grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&c2, DoNothing, NULL, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&c1, DoNothing, nullptr, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&c2, DoNothing, nullptr, grpc_schedule_on_exec_ctx);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
GRPC_CLOSURE_SCHED(&exec_ctx, &c1, GRPC_ERROR_NONE);
@@ -159,9 +159,9 @@ static void BM_ClosureSched3OnExecCtx(benchmark::State& state) {
grpc_closure c1;
grpc_closure c2;
grpc_closure c3;
- GRPC_CLOSURE_INIT(&c1, DoNothing, NULL, grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&c2, DoNothing, NULL, grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&c3, DoNothing, NULL, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&c1, DoNothing, nullptr, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&c2, DoNothing, nullptr, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&c3, DoNothing, nullptr, grpc_schedule_on_exec_ctx);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
GRPC_CLOSURE_SCHED(&exec_ctx, &c1, GRPC_ERROR_NONE);
@@ -182,7 +182,7 @@ static void BM_AcquireMutex(benchmark::State& state) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
gpr_mu_lock(&mu);
- DoNothing(&exec_ctx, NULL, GRPC_ERROR_NONE);
+ DoNothing(&exec_ctx, nullptr, GRPC_ERROR_NONE);
gpr_mu_unlock(&mu);
}
grpc_exec_ctx_finish(&exec_ctx);
@@ -198,7 +198,7 @@ static void BM_TryAcquireMutex(benchmark::State& state) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
if (gpr_mu_trylock(&mu)) {
- DoNothing(&exec_ctx, NULL, GRPC_ERROR_NONE);
+ DoNothing(&exec_ctx, nullptr, GRPC_ERROR_NONE);
gpr_mu_unlock(&mu);
} else {
abort();
@@ -216,7 +216,7 @@ static void BM_AcquireSpinlock(benchmark::State& state) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
gpr_spinlock_lock(&mu);
- DoNothing(&exec_ctx, NULL, GRPC_ERROR_NONE);
+ DoNothing(&exec_ctx, nullptr, GRPC_ERROR_NONE);
gpr_spinlock_unlock(&mu);
}
grpc_exec_ctx_finish(&exec_ctx);
@@ -231,7 +231,7 @@ static void BM_TryAcquireSpinlock(benchmark::State& state) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
if (gpr_spinlock_trylock(&mu)) {
- DoNothing(&exec_ctx, NULL, GRPC_ERROR_NONE);
+ DoNothing(&exec_ctx, nullptr, GRPC_ERROR_NONE);
gpr_spinlock_unlock(&mu);
} else {
abort();
@@ -246,7 +246,7 @@ static void BM_ClosureSchedOnCombiner(benchmark::State& state) {
TrackCounters track_counters;
grpc_combiner* combiner = grpc_combiner_create();
grpc_closure c;
- GRPC_CLOSURE_INIT(&c, DoNothing, NULL, grpc_combiner_scheduler(combiner));
+ GRPC_CLOSURE_INIT(&c, DoNothing, nullptr, grpc_combiner_scheduler(combiner));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
GRPC_CLOSURE_SCHED(&exec_ctx, &c, GRPC_ERROR_NONE);
@@ -263,8 +263,8 @@ static void BM_ClosureSched2OnCombiner(benchmark::State& state) {
grpc_combiner* combiner = grpc_combiner_create();
grpc_closure c1;
grpc_closure c2;
- GRPC_CLOSURE_INIT(&c1, DoNothing, NULL, grpc_combiner_scheduler(combiner));
- GRPC_CLOSURE_INIT(&c2, DoNothing, NULL, grpc_combiner_scheduler(combiner));
+ GRPC_CLOSURE_INIT(&c1, DoNothing, nullptr, grpc_combiner_scheduler(combiner));
+ GRPC_CLOSURE_INIT(&c2, DoNothing, nullptr, grpc_combiner_scheduler(combiner));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
GRPC_CLOSURE_SCHED(&exec_ctx, &c1, GRPC_ERROR_NONE);
@@ -283,9 +283,9 @@ static void BM_ClosureSched3OnCombiner(benchmark::State& state) {
grpc_closure c1;
grpc_closure c2;
grpc_closure c3;
- GRPC_CLOSURE_INIT(&c1, DoNothing, NULL, grpc_combiner_scheduler(combiner));
- GRPC_CLOSURE_INIT(&c2, DoNothing, NULL, grpc_combiner_scheduler(combiner));
- GRPC_CLOSURE_INIT(&c3, DoNothing, NULL, grpc_combiner_scheduler(combiner));
+ GRPC_CLOSURE_INIT(&c1, DoNothing, nullptr, grpc_combiner_scheduler(combiner));
+ GRPC_CLOSURE_INIT(&c2, DoNothing, nullptr, grpc_combiner_scheduler(combiner));
+ GRPC_CLOSURE_INIT(&c3, DoNothing, nullptr, grpc_combiner_scheduler(combiner));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
GRPC_CLOSURE_SCHED(&exec_ctx, &c1, GRPC_ERROR_NONE);
@@ -305,8 +305,10 @@ static void BM_ClosureSched2OnTwoCombiners(benchmark::State& state) {
grpc_combiner* combiner2 = grpc_combiner_create();
grpc_closure c1;
grpc_closure c2;
- GRPC_CLOSURE_INIT(&c1, DoNothing, NULL, grpc_combiner_scheduler(combiner1));
- GRPC_CLOSURE_INIT(&c2, DoNothing, NULL, grpc_combiner_scheduler(combiner2));
+ GRPC_CLOSURE_INIT(&c1, DoNothing, nullptr,
+ grpc_combiner_scheduler(combiner1));
+ GRPC_CLOSURE_INIT(&c2, DoNothing, nullptr,
+ grpc_combiner_scheduler(combiner2));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
GRPC_CLOSURE_SCHED(&exec_ctx, &c1, GRPC_ERROR_NONE);
@@ -328,10 +330,14 @@ static void BM_ClosureSched4OnTwoCombiners(benchmark::State& state) {
grpc_closure c2;
grpc_closure c3;
grpc_closure c4;
- GRPC_CLOSURE_INIT(&c1, DoNothing, NULL, grpc_combiner_scheduler(combiner1));
- GRPC_CLOSURE_INIT(&c2, DoNothing, NULL, grpc_combiner_scheduler(combiner2));
- GRPC_CLOSURE_INIT(&c3, DoNothing, NULL, grpc_combiner_scheduler(combiner1));
- GRPC_CLOSURE_INIT(&c4, DoNothing, NULL, grpc_combiner_scheduler(combiner2));
+ GRPC_CLOSURE_INIT(&c1, DoNothing, nullptr,
+ grpc_combiner_scheduler(combiner1));
+ GRPC_CLOSURE_INIT(&c2, DoNothing, nullptr,
+ grpc_combiner_scheduler(combiner2));
+ GRPC_CLOSURE_INIT(&c3, DoNothing, nullptr,
+ grpc_combiner_scheduler(combiner1));
+ GRPC_CLOSURE_INIT(&c4, DoNothing, nullptr,
+ grpc_combiner_scheduler(combiner2));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
GRPC_CLOSURE_SCHED(&exec_ctx, &c1, GRPC_ERROR_NONE);
diff --git a/test/cpp/microbenchmarks/bm_cq.cc b/test/cpp/microbenchmarks/bm_cq.cc
index a0c0414f2f..f0dede7333 100644
--- a/test/cpp/microbenchmarks/bm_cq.cc
+++ b/test/cpp/microbenchmarks/bm_cq.cc
@@ -26,9 +26,7 @@
#include <grpc/support/log.h>
#include "test/cpp/microbenchmarks/helpers.h"
-extern "C" {
#include "src/core/lib/surface/completion_queue.h"
-}
namespace grpc {
namespace testing {
@@ -49,7 +47,7 @@ static void BM_CreateDestroyCpp2(benchmark::State& state) {
TrackCounters track_counters;
while (state.KeepRunning()) {
grpc_completion_queue* core_cq =
- grpc_completion_queue_create_for_next(NULL);
+ grpc_completion_queue_create_for_next(nullptr);
CompletionQueue cq(core_cq);
}
track_counters.Finish(state);
@@ -61,7 +59,8 @@ static void BM_CreateDestroyCore(benchmark::State& state) {
while (state.KeepRunning()) {
// TODO: sreek Templatize this benchmark and pass completion type and
// polling type as parameters
- grpc_completion_queue_destroy(grpc_completion_queue_create_for_next(NULL));
+ grpc_completion_queue_destroy(
+ grpc_completion_queue_create_for_next(nullptr));
}
track_counters.Finish(state);
}
@@ -70,7 +69,7 @@ BENCHMARK(BM_CreateDestroyCore);
static void DoneWithCompletionOnStack(grpc_exec_ctx* exec_ctx, void* arg,
grpc_cq_completion* completion) {}
-class DummyTag final : public CompletionQueueTag {
+class DummyTag final : public internal::CompletionQueueTag {
public:
bool FinalizeResult(void** tag, bool* status) override { return true; }
};
@@ -85,7 +84,7 @@ static void BM_Pass1Cpp(benchmark::State& state) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_ASSERT(grpc_cq_begin_op(c_cq, &dummy_tag));
grpc_cq_end_op(&exec_ctx, c_cq, &dummy_tag, GRPC_ERROR_NONE,
- DoneWithCompletionOnStack, NULL, &completion);
+ DoneWithCompletionOnStack, nullptr, &completion);
grpc_exec_ctx_finish(&exec_ctx);
void* tag;
bool ok;
@@ -98,16 +97,16 @@ BENCHMARK(BM_Pass1Cpp);
static void BM_Pass1Core(benchmark::State& state) {
TrackCounters track_counters;
// TODO: sreek Templatize this benchmark and pass polling_type as a param
- grpc_completion_queue* cq = grpc_completion_queue_create_for_next(NULL);
+ grpc_completion_queue* cq = grpc_completion_queue_create_for_next(nullptr);
gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
while (state.KeepRunning()) {
grpc_cq_completion completion;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- GPR_ASSERT(grpc_cq_begin_op(cq, NULL));
- grpc_cq_end_op(&exec_ctx, cq, NULL, GRPC_ERROR_NONE,
- DoneWithCompletionOnStack, NULL, &completion);
+ GPR_ASSERT(grpc_cq_begin_op(cq, nullptr));
+ grpc_cq_end_op(&exec_ctx, cq, nullptr, GRPC_ERROR_NONE,
+ DoneWithCompletionOnStack, nullptr, &completion);
grpc_exec_ctx_finish(&exec_ctx);
- grpc_completion_queue_next(cq, deadline, NULL);
+ grpc_completion_queue_next(cq, deadline, nullptr);
}
grpc_completion_queue_destroy(cq);
track_counters.Finish(state);
@@ -117,16 +116,16 @@ BENCHMARK(BM_Pass1Core);
static void BM_Pluck1Core(benchmark::State& state) {
TrackCounters track_counters;
// TODO: sreek Templatize this benchmark and pass polling_type as a param
- grpc_completion_queue* cq = grpc_completion_queue_create_for_pluck(NULL);
+ grpc_completion_queue* cq = grpc_completion_queue_create_for_pluck(nullptr);
gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
while (state.KeepRunning()) {
grpc_cq_completion completion;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- GPR_ASSERT(grpc_cq_begin_op(cq, NULL));
- grpc_cq_end_op(&exec_ctx, cq, NULL, GRPC_ERROR_NONE,
- DoneWithCompletionOnStack, NULL, &completion);
+ GPR_ASSERT(grpc_cq_begin_op(cq, nullptr));
+ grpc_cq_end_op(&exec_ctx, cq, nullptr, GRPC_ERROR_NONE,
+ DoneWithCompletionOnStack, nullptr, &completion);
grpc_exec_ctx_finish(&exec_ctx);
- grpc_completion_queue_pluck(cq, NULL, deadline, NULL);
+ grpc_completion_queue_pluck(cq, nullptr, deadline, nullptr);
}
grpc_completion_queue_destroy(cq);
track_counters.Finish(state);
@@ -136,10 +135,10 @@ BENCHMARK(BM_Pluck1Core);
static void BM_EmptyCore(benchmark::State& state) {
TrackCounters track_counters;
// TODO: sreek Templatize this benchmark and pass polling_type as a param
- grpc_completion_queue* cq = grpc_completion_queue_create_for_next(NULL);
+ grpc_completion_queue* cq = grpc_completion_queue_create_for_next(nullptr);
gpr_timespec deadline = gpr_inf_past(GPR_CLOCK_MONOTONIC);
while (state.KeepRunning()) {
- grpc_completion_queue_next(cq, deadline, NULL);
+ grpc_completion_queue_next(cq, deadline, nullptr);
}
grpc_completion_queue_destroy(cq);
track_counters.Finish(state);
diff --git a/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc b/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc
index 57a69acf01..7ccebb55ee 100644
--- a/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc
+++ b/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc
@@ -25,11 +25,9 @@
#include <grpc/support/log.h>
#include "test/cpp/microbenchmarks/helpers.h"
-extern "C" {
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/port.h"
#include "src/core/lib/surface/completion_queue.h"
-}
struct grpc_pollset {
gpr_mu mu;
@@ -82,7 +80,7 @@ static grpc_error* pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* ps,
gpr_mu_unlock(&ps->mu);
GPR_ASSERT(grpc_cq_begin_op(g_cq, g_tag));
- grpc_cq_end_op(exec_ctx, g_cq, g_tag, GRPC_ERROR_NONE, cq_done_cb, NULL,
+ grpc_cq_end_op(exec_ctx, g_cq, g_tag, GRPC_ERROR_NONE, cq_done_cb, nullptr,
(grpc_cq_completion*)gpr_malloc(sizeof(grpc_cq_completion)));
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&ps->mu);
@@ -110,7 +108,7 @@ static void setup() {
g_old_vtable = grpc_get_event_engine_test_only();
grpc_set_event_engine_test_only(&g_vtable);
- g_cq = grpc_completion_queue_create_for_next(NULL);
+ g_cq = grpc_completion_queue_create_for_next(nullptr);
}
static void teardown() {
@@ -118,7 +116,7 @@ static void teardown() {
/* Drain any events */
gpr_timespec deadline = gpr_time_0(GPR_CLOCK_MONOTONIC);
- while (grpc_completion_queue_next(g_cq, deadline, NULL).type !=
+ while (grpc_completion_queue_next(g_cq, deadline, nullptr).type !=
GRPC_QUEUE_SHUTDOWN) {
/* Do nothing */
}
@@ -153,7 +151,7 @@ static void BM_Cq_Throughput(benchmark::State& state) {
}
while (state.KeepRunning()) {
- GPR_ASSERT(grpc_completion_queue_next(g_cq, deadline, NULL).type ==
+ GPR_ASSERT(grpc_completion_queue_next(g_cq, deadline, nullptr).type ==
GRPC_OP_COMPLETE);
}
diff --git a/test/cpp/microbenchmarks/bm_error.cc b/test/cpp/microbenchmarks/bm_error.cc
index 56b80dfcf6..bbd8b3c339 100644
--- a/test/cpp/microbenchmarks/bm_error.cc
+++ b/test/cpp/microbenchmarks/bm_error.cc
@@ -21,10 +21,8 @@
#include <benchmark/benchmark.h>
#include <memory>
-extern "C" {
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/transport/error_utils.h"
-}
#include "test/cpp/microbenchmarks/helpers.h"
@@ -253,7 +251,7 @@ static void BM_ErrorGetStatus(benchmark::State& state) {
grpc_status_code status;
grpc_slice slice;
grpc_error_get_status(&exec_ctx, fixture.error(), fixture.deadline(),
- &status, &slice, NULL);
+ &status, &slice, nullptr, nullptr);
}
grpc_exec_ctx_finish(&exec_ctx);
track_counters.Finish(state);
@@ -267,7 +265,7 @@ static void BM_ErrorGetStatusCode(benchmark::State& state) {
while (state.KeepRunning()) {
grpc_status_code status;
grpc_error_get_status(&exec_ctx, fixture.error(), fixture.deadline(),
- &status, NULL, NULL);
+ &status, nullptr, nullptr, nullptr);
}
grpc_exec_ctx_finish(&exec_ctx);
track_counters.Finish(state);
@@ -280,8 +278,8 @@ static void BM_ErrorHttpError(benchmark::State& state) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
grpc_http2_error_code error;
- grpc_error_get_status(&exec_ctx, fixture.error(), fixture.deadline(), NULL,
- NULL, &error);
+ grpc_error_get_status(&exec_ctx, fixture.error(), fixture.deadline(),
+ nullptr, nullptr, &error, nullptr);
}
grpc_exec_ctx_finish(&exec_ctx);
track_counters.Finish(state);
diff --git a/test/cpp/microbenchmarks/bm_fullstack_trickle.cc b/test/cpp/microbenchmarks/bm_fullstack_trickle.cc
index 389b8c90ab..bb974fad50 100644
--- a/test/cpp/microbenchmarks/bm_fullstack_trickle.cc
+++ b/test/cpp/microbenchmarks/bm_fullstack_trickle.cc
@@ -21,6 +21,7 @@
#include <benchmark/benchmark.h>
#include <gflags/gflags.h>
#include <fstream>
+
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
#include "src/core/ext/transport/chttp2/transport/internal.h"
#include "src/core/lib/iomgr/timer_manager.h"
@@ -134,25 +135,26 @@ class TrickledCHTTP2 : public EndpointPairFixture {
? static_cast<grpc_chttp2_stream*>(server->stream_map.values[0])
: nullptr;
write_csv(
- log_.get(), static_cast<double>(now.tv_sec) +
- 1e-9 * static_cast<double>(now.tv_nsec),
+ log_.get(),
+ static_cast<double>(now.tv_sec) +
+ 1e-9 * static_cast<double>(now.tv_nsec),
iteration, grpc_trickle_get_backlog(endpoint_pair_.client),
grpc_trickle_get_backlog(endpoint_pair_.server),
client->lists[GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT].head != nullptr,
client->lists[GRPC_CHTTP2_LIST_STALLED_BY_STREAM].head != nullptr,
server->lists[GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT].head != nullptr,
server->lists[GRPC_CHTTP2_LIST_STALLED_BY_STREAM].head != nullptr,
- client->flow_control->remote_window(),
- server->flow_control->remote_window(),
- client->flow_control->announced_window(),
- server->flow_control->announced_window(),
- client_stream ? client_stream->flow_control->remote_window_delta() : -1,
- server_stream ? server_stream->flow_control->remote_window_delta() : -1,
- client_stream ? client_stream->flow_control->local_window_delta() : -1,
- server_stream ? server_stream->flow_control->local_window_delta() : -1,
- client_stream ? client_stream->flow_control->announced_window_delta()
+ client->flow_control->remote_window_,
+ server->flow_control->remote_window_,
+ client->flow_control->announced_window_,
+ server->flow_control->announced_window_,
+ client_stream ? client_stream->flow_control->remote_window_delta_ : -1,
+ server_stream ? server_stream->flow_control->remote_window_delta_ : -1,
+ client_stream ? client_stream->flow_control->local_window_delta_ : -1,
+ server_stream ? server_stream->flow_control->local_window_delta_ : -1,
+ client_stream ? client_stream->flow_control->announced_window_delta_
: -1,
- server_stream ? server_stream->flow_control->announced_window_delta()
+ server_stream ? server_stream->flow_control->announced_window_delta_
: -1,
client->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
@@ -214,10 +216,10 @@ class TrickledCHTTP2 : public EndpointPairFixture {
void UpdateStats(grpc_chttp2_transport* t, Stats* s,
size_t backlog) GPR_ATTRIBUTE_NO_TSAN {
if (backlog == 0) {
- if (t->lists[GRPC_CHTTP2_LIST_STALLED_BY_STREAM].head != NULL) {
+ if (t->lists[GRPC_CHTTP2_LIST_STALLED_BY_STREAM].head != nullptr) {
s->streams_stalled_due_to_stream_flow_control++;
}
- if (t->lists[GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT].head != NULL) {
+ if (t->lists[GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT].head != nullptr) {
s->streams_stalled_due_to_transport_flow_control++;
}
}
@@ -440,8 +442,8 @@ static void UnaryTrickleArgs(benchmark::internal::Benchmark* b) {
}
}
BENCHMARK(BM_PumpUnbalancedUnary_Trickle)->Apply(UnaryTrickleArgs);
-}
-}
+} // namespace testing
+} // namespace grpc
extern "C" gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type);
diff --git a/test/cpp/microbenchmarks/bm_metadata.cc b/test/cpp/microbenchmarks/bm_metadata.cc
index 360bbabe13..73bce08466 100644
--- a/test/cpp/microbenchmarks/bm_metadata.cc
+++ b/test/cpp/microbenchmarks/bm_metadata.cc
@@ -21,10 +21,8 @@
#include <benchmark/benchmark.h>
#include <grpc/grpc.h>
-extern "C" {
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/static_metadata.h"
-}
#include "test/cpp/microbenchmarks/helpers.h"
@@ -94,7 +92,7 @@ static void BM_MetadataFromNonInternedSlices(benchmark::State& state) {
gpr_slice v = grpc_slice_from_static_string("value");
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
- GRPC_MDELEM_UNREF(&exec_ctx, grpc_mdelem_create(&exec_ctx, k, v, NULL));
+ GRPC_MDELEM_UNREF(&exec_ctx, grpc_mdelem_create(&exec_ctx, k, v, nullptr));
}
grpc_exec_ctx_finish(&exec_ctx);
track_counters.Finish(state);
@@ -107,7 +105,7 @@ static void BM_MetadataFromInternedSlices(benchmark::State& state) {
gpr_slice v = grpc_slice_intern(grpc_slice_from_static_string("value"));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
- GRPC_MDELEM_UNREF(&exec_ctx, grpc_mdelem_create(&exec_ctx, k, v, NULL));
+ GRPC_MDELEM_UNREF(&exec_ctx, grpc_mdelem_create(&exec_ctx, k, v, nullptr));
}
grpc_exec_ctx_finish(&exec_ctx);
grpc_slice_unref(k);
@@ -122,9 +120,9 @@ static void BM_MetadataFromInternedSlicesAlreadyInIndex(
gpr_slice k = grpc_slice_intern(grpc_slice_from_static_string("key"));
gpr_slice v = grpc_slice_intern(grpc_slice_from_static_string("value"));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_mdelem seed = grpc_mdelem_create(&exec_ctx, k, v, NULL);
+ grpc_mdelem seed = grpc_mdelem_create(&exec_ctx, k, v, nullptr);
while (state.KeepRunning()) {
- GRPC_MDELEM_UNREF(&exec_ctx, grpc_mdelem_create(&exec_ctx, k, v, NULL));
+ GRPC_MDELEM_UNREF(&exec_ctx, grpc_mdelem_create(&exec_ctx, k, v, nullptr));
}
GRPC_MDELEM_UNREF(&exec_ctx, seed);
grpc_exec_ctx_finish(&exec_ctx);
@@ -140,7 +138,7 @@ static void BM_MetadataFromInternedKey(benchmark::State& state) {
gpr_slice v = grpc_slice_from_static_string("value");
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
- GRPC_MDELEM_UNREF(&exec_ctx, grpc_mdelem_create(&exec_ctx, k, v, NULL));
+ GRPC_MDELEM_UNREF(&exec_ctx, grpc_mdelem_create(&exec_ctx, k, v, nullptr));
}
grpc_exec_ctx_finish(&exec_ctx);
grpc_slice_unref(k);
@@ -211,7 +209,7 @@ static void BM_MetadataFromStaticMetadataStrings(benchmark::State& state) {
gpr_slice v = GRPC_MDSTR_200;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
- GRPC_MDELEM_UNREF(&exec_ctx, grpc_mdelem_create(&exec_ctx, k, v, NULL));
+ GRPC_MDELEM_UNREF(&exec_ctx, grpc_mdelem_create(&exec_ctx, k, v, nullptr));
}
grpc_exec_ctx_finish(&exec_ctx);
grpc_slice_unref(k);
@@ -226,7 +224,7 @@ static void BM_MetadataFromStaticMetadataStringsNotIndexed(
gpr_slice v = GRPC_MDSTR_GZIP;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
- GRPC_MDELEM_UNREF(&exec_ctx, grpc_mdelem_create(&exec_ctx, k, v, NULL));
+ GRPC_MDELEM_UNREF(&exec_ctx, grpc_mdelem_create(&exec_ctx, k, v, nullptr));
}
grpc_exec_ctx_finish(&exec_ctx);
grpc_slice_unref(k);
@@ -275,7 +273,7 @@ static void BM_MetadataRefUnrefAllocated(benchmark::State& state) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_mdelem el =
grpc_mdelem_create(&exec_ctx, grpc_slice_from_static_string("a"),
- grpc_slice_from_static_string("b"), NULL);
+ grpc_slice_from_static_string("b"), nullptr);
while (state.KeepRunning()) {
GRPC_MDELEM_UNREF(&exec_ctx, GRPC_MDELEM_REF(el));
}
@@ -289,7 +287,7 @@ static void BM_MetadataRefUnrefStatic(benchmark::State& state) {
TrackCounters track_counters;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_mdelem el =
- grpc_mdelem_create(&exec_ctx, GRPC_MDSTR_STATUS, GRPC_MDSTR_200, NULL);
+ grpc_mdelem_create(&exec_ctx, GRPC_MDSTR_STATUS, GRPC_MDSTR_200, nullptr);
while (state.KeepRunning()) {
GRPC_MDELEM_UNREF(&exec_ctx, GRPC_MDELEM_REF(el));
}
diff --git a/test/cpp/microbenchmarks/bm_pollset.cc b/test/cpp/microbenchmarks/bm_pollset.cc
index eab1e89480..4da79693f1 100644
--- a/test/cpp/microbenchmarks/bm_pollset.cc
+++ b/test/cpp/microbenchmarks/bm_pollset.cc
@@ -23,12 +23,10 @@
#include <grpc/support/log.h>
#include <grpc/support/useful.h>
-extern "C" {
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/port.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
-}
#include "test/cpp/microbenchmarks/helpers.h"
#include "third_party/benchmark/include/benchmark/benchmark.h"
@@ -119,7 +117,7 @@ static void BM_PollEmptyPollset(benchmark::State& state) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_mu_lock(mu);
while (state.KeepRunning()) {
- GRPC_ERROR_UNREF(grpc_pollset_work(&exec_ctx, ps, NULL, 0));
+ GRPC_ERROR_UNREF(grpc_pollset_work(&exec_ctx, ps, nullptr, 0));
}
grpc_closure shutdown_ps_closure;
GRPC_CLOSURE_INIT(&shutdown_ps_closure, shutdown_ps, ps,
@@ -147,7 +145,8 @@ static void BM_PollAddFd(benchmark::State& state) {
grpc_pollset_add_fd(&exec_ctx, ps, fd);
grpc_exec_ctx_flush(&exec_ctx);
}
- grpc_fd_orphan(&exec_ctx, fd, NULL, NULL, false /* already_closed */, "xxx");
+ grpc_fd_orphan(&exec_ctx, fd, nullptr, nullptr, false /* already_closed */,
+ "xxx");
grpc_closure shutdown_ps_closure;
GRPC_CLOSURE_INIT(&shutdown_ps_closure, shutdown_ps, ps,
grpc_schedule_on_exec_ctx);
@@ -242,10 +241,10 @@ static void BM_SingleThreadPollOneFd(benchmark::State& state) {
gpr_mu_lock(mu);
while (!done) {
GRPC_ERROR_UNREF(
- grpc_pollset_work(&exec_ctx, ps, NULL, GRPC_MILLIS_INF_FUTURE));
+ grpc_pollset_work(&exec_ctx, ps, nullptr, GRPC_MILLIS_INF_FUTURE));
}
- grpc_fd_orphan(&exec_ctx, wakeup, NULL, NULL, false /* already_closed */,
- "done");
+ grpc_fd_orphan(&exec_ctx, wakeup, nullptr, nullptr,
+ false /* already_closed */, "done");
wakeup_fd.read_fd = 0;
grpc_closure shutdown_ps_closure;
GRPC_CLOSURE_INIT(&shutdown_ps_closure, shutdown_ps, ps,
diff --git a/test/cpp/microbenchmarks/fullstack_fixtures.h b/test/cpp/microbenchmarks/fullstack_fixtures.h
index a7f8504505..7f1aa48b56 100644
--- a/test/cpp/microbenchmarks/fullstack_fixtures.h
+++ b/test/cpp/microbenchmarks/fullstack_fixtures.h
@@ -25,9 +25,9 @@
#include <grpc++/security/server_credentials.h>
#include <grpc++/server.h>
#include <grpc++/server_builder.h>
+#include <grpc/support/atm.h>
#include <grpc/support/log.h>
-extern "C" {
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/endpoint.h"
@@ -39,7 +39,6 @@ extern "C" {
#include "src/core/lib/surface/server.h"
#include "test/core/util/passthru_endpoint.h"
#include "test/core/util/port.h"
-}
#include "src/cpp/client/create_channel_internal.h"
#include "test/cpp/microbenchmarks/helpers.h"
@@ -186,8 +185,9 @@ class EndpointPairFixture : public BaseFixture {
}
grpc_server_setup_transport(&exec_ctx, server_->c_server(),
- server_transport_, NULL, server_args);
- grpc_chttp2_transport_start_reading(&exec_ctx, server_transport_, NULL);
+ server_transport_, nullptr, server_args);
+ grpc_chttp2_transport_start_reading(&exec_ctx, server_transport_,
+ nullptr);
}
/* create channel */
@@ -203,7 +203,8 @@ class EndpointPairFixture : public BaseFixture {
grpc_channel* channel =
grpc_channel_create(&exec_ctx, "target", &c_args,
GRPC_CLIENT_DIRECT_CHANNEL, client_transport_);
- grpc_chttp2_transport_start_reading(&exec_ctx, client_transport_, NULL);
+ grpc_chttp2_transport_start_reading(&exec_ctx, client_transport_,
+ nullptr);
channel_ = CreateChannelInternal("", channel);
}
@@ -245,7 +246,7 @@ class SockPair : public EndpointPairFixture {
SockPair(Service* service, const FixtureConfiguration& fixture_configuration =
FixtureConfiguration())
: EndpointPairFixture(service,
- grpc_iomgr_create_endpoint_pair("test", NULL),
+ grpc_iomgr_create_endpoint_pair("test", nullptr),
fixture_configuration) {}
};
@@ -259,7 +260,8 @@ class InProcessCHTTP2 : public EndpointPairFixture {
void AddToLabel(std::ostream& out, benchmark::State& state) {
EndpointPairFixture::AddToLabel(out, state);
out << " writes/iter:"
- << (double)stats_.num_writes / (double)state.iterations();
+ << static_cast<double>(gpr_atm_no_barrier_load(&stats_.num_writes)) /
+ static_cast<double>(state.iterations());
}
private:
diff --git a/test/cpp/microbenchmarks/fullstack_streaming_ping_pong.h b/test/cpp/microbenchmarks/fullstack_streaming_ping_pong.h
index 6df044f344..0763d07855 100644
--- a/test/cpp/microbenchmarks/fullstack_streaming_ping_pong.h
+++ b/test/cpp/microbenchmarks/fullstack_streaming_ping_pong.h
@@ -278,7 +278,7 @@ static void BM_StreamingPingPongWithCoalescingApi(benchmark::State& state) {
void* t;
bool ok;
- int need_tags;
+ int expect_tags = 0;
// Send 'max_ping_pongs' number of ping pong messages
int ping_pong_cnt = 0;
@@ -289,7 +289,7 @@ static void BM_StreamingPingPongWithCoalescingApi(benchmark::State& state) {
request_rw->Write(send_request, tag(2)); // Start client send
}
- need_tags = (1 << 2) | (1 << 3) | (1 << 4) | (1 << 5);
+ int await_tags = (1 << 2);
if (ping_pong_cnt == 0) {
// wait for the server call structure (call_hook, etc.) to be
@@ -301,8 +301,8 @@ static void BM_StreamingPingPongWithCoalescingApi(benchmark::State& state) {
// In some cases tag:2 comes before tag:0 (write tag comes out
// first), this while loop is to make sure get tag:0.
int i = (int)(intptr_t)t;
- GPR_ASSERT(need_tags & (1 << i));
- need_tags &= ~(1 << i);
+ GPR_ASSERT(await_tags & (1 << i));
+ await_tags &= ~(1 << i);
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
}
}
@@ -310,7 +310,11 @@ static void BM_StreamingPingPongWithCoalescingApi(benchmark::State& state) {
response_rw.Read(&recv_request, tag(3)); // Start server recv
request_rw->Read(&recv_response, tag(4)); // Start client recv
- while (need_tags) {
+ await_tags |= (1 << 3) | (1 << 4);
+ expect_tags = await_tags;
+ await_tags |= (1 << 5);
+
+ while (await_tags != 0) {
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
GPR_ASSERT(ok);
int i = (int)(intptr_t)t;
@@ -321,34 +325,39 @@ static void BM_StreamingPingPongWithCoalescingApi(benchmark::State& state) {
if (write_and_finish == 1) {
response_rw.WriteAndFinish(send_response, WriteOptions(),
Status::OK, tag(5));
+ expect_tags |= (1 << 5);
} else {
response_rw.WriteLast(send_response, WriteOptions(), tag(5));
- // WriteLast buffers the write, so neither server write op nor
- // client read op will finish inside the while loop.
- need_tags &= ~(1 << 4);
- need_tags &= ~(1 << 5);
+ // WriteLast buffers the write, so it's possible neither server
+ // write op nor client read op will finish inside the while
+ // loop.
+ await_tags &= ~(1 << 4);
+ await_tags &= ~(1 << 5);
+ expect_tags |= (1 << 5);
}
} else {
response_rw.Write(send_response, tag(5));
+ expect_tags |= (1 << 5);
}
}
- GPR_ASSERT(need_tags & (1 << i));
- need_tags &= ~(1 << i);
+ GPR_ASSERT(expect_tags & (1 << i));
+ expect_tags &= ~(1 << i);
+ await_tags &= ~(1 << i);
}
ping_pong_cnt++;
}
if (max_ping_pongs == 0) {
- need_tags = (1 << 6) | (1 << 7) | (1 << 8);
+ expect_tags |= (1 << 6) | (1 << 7) | (1 << 8);
} else {
if (write_and_finish == 1) {
- need_tags = (1 << 8);
+ expect_tags |= (1 << 8);
} else {
// server's buffered write and the client's read of the buffered write
// tags should come up.
- need_tags = (1 << 4) | (1 << 5) | (1 << 7) | (1 << 8);
+ expect_tags |= (1 << 7) | (1 << 8);
}
}
@@ -360,8 +369,8 @@ static void BM_StreamingPingPongWithCoalescingApi(benchmark::State& state) {
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
while ((int)(intptr_t)t != 0) {
int i = (int)(intptr_t)t;
- GPR_ASSERT(need_tags & (1 << i));
- need_tags &= ~(1 << i);
+ GPR_ASSERT(expect_tags & (1 << i));
+ expect_tags &= ~(1 << i);
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
}
response_rw.Finish(Status::OK, tag(7));
@@ -374,11 +383,11 @@ static void BM_StreamingPingPongWithCoalescingApi(benchmark::State& state) {
Status recv_status;
request_rw->Finish(&recv_status, tag(8));
- while (need_tags) {
+ while (expect_tags) {
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
int i = (int)(intptr_t)t;
- GPR_ASSERT(need_tags & (1 << i));
- need_tags &= ~(1 << i);
+ GPR_ASSERT(expect_tags & (1 << i));
+ expect_tags &= ~(1 << i);
}
GPR_ASSERT(recv_status.ok());
diff --git a/test/cpp/microbenchmarks/helpers.cc b/test/cpp/microbenchmarks/helpers.cc
index 6802a0aa99..a4c0a3a0ce 100644
--- a/test/cpp/microbenchmarks/helpers.cc
+++ b/test/cpp/microbenchmarks/helpers.cc
@@ -16,11 +16,13 @@
*
*/
+#include <string.h>
+
#include "test/cpp/microbenchmarks/helpers.h"
-void TrackCounters::Finish(benchmark::State &state) {
+void TrackCounters::Finish(benchmark::State& state) {
std::ostringstream out;
- for (const auto &l : labels_) {
+ for (const auto& l : labels_) {
out << l << ' ';
}
AddToLabel(out, state);
@@ -31,11 +33,11 @@ void TrackCounters::Finish(benchmark::State &state) {
state.SetLabel(label.c_str());
}
-void TrackCounters::AddLabel(const grpc::string &label) {
+void TrackCounters::AddLabel(const grpc::string& label) {
labels_.push_back(label);
}
-void TrackCounters::AddToLabel(std::ostream &out, benchmark::State &state) {
+void TrackCounters::AddToLabel(std::ostream& out, benchmark::State& state) {
grpc_stats_data stats_end;
grpc_stats_collect(&stats_end);
grpc_stats_data stats;
@@ -45,16 +47,21 @@ void TrackCounters::AddToLabel(std::ostream &out, benchmark::State &state) {
<< "/iter:" << ((double)stats.counters[i] / (double)state.iterations());
}
for (int i = 0; i < GRPC_STATS_HISTOGRAM_COUNT; i++) {
- out << " " << grpc_stats_histogram_name[i] << "-median:"
- << grpc_stats_histo_percentile(&stats, (grpc_stats_histograms)i, 50.0)
- << " " << grpc_stats_histogram_name[i] << "-99p:"
- << grpc_stats_histo_percentile(&stats, (grpc_stats_histograms)i, 99.0);
+ std::ostringstream median_ss;
+ median_ss << grpc_stats_histogram_name[i] << "-median";
+ state.counters[median_ss.str()] = benchmark::Counter(
+ grpc_stats_histo_percentile(&stats, (grpc_stats_histograms)i, 50.0));
+ std::ostringstream tail_ss;
+ tail_ss << grpc_stats_histogram_name[i] << "-99p";
+ state.counters[tail_ss.str()] = benchmark::Counter(
+ grpc_stats_histo_percentile(&stats, (grpc_stats_histograms)i, 99.0));
}
#ifdef GPR_LOW_LEVEL_COUNTERS
grpc_memory_counters counters_at_end = grpc_memory_counters_snapshot();
- out << " locks/iter:" << ((double)(gpr_atm_no_barrier_load(&gpr_mu_locks) -
- mu_locks_at_start_) /
- (double)state.iterations())
+ out << " locks/iter:"
+ << ((double)(gpr_atm_no_barrier_load(&gpr_mu_locks) -
+ mu_locks_at_start_) /
+ (double)state.iterations())
<< " atm_cas/iter:"
<< ((double)(gpr_atm_no_barrier_load(&gpr_counter_atm_cas) -
atm_cas_at_start_) /
diff --git a/test/cpp/microbenchmarks/helpers.h b/test/cpp/microbenchmarks/helpers.h
index b6cea7c317..07be589df6 100644
--- a/test/cpp/microbenchmarks/helpers.h
+++ b/test/cpp/microbenchmarks/helpers.h
@@ -22,11 +22,9 @@
#include <sstream>
#include <vector>
-extern "C" {
#include <grpc/support/port_platform.h>
#include "src/core/lib/debug/stats.h"
#include "test/core/util/memory_counters.h"
-}
#include <benchmark/benchmark.h>
#include <grpc++/impl/grpc_library.h>
diff --git a/test/cpp/naming/README.md b/test/cpp/naming/README.md
index e33184620c..e0dd208465 100644
--- a/test/cpp/naming/README.md
+++ b/test/cpp/naming/README.md
@@ -31,7 +31,7 @@ After making a change to `resolver_test_record_groups.yaml`:
3. From the repo root, run:
```
-$ test/cpp/naming/create_dns_private_zone.sh
+$ test/cpp/naming/create_private_dns_zone.sh
$ test/cpp/naming/private_dns_zone_init.sh
```
diff --git a/test/cpp/naming/create_private_dns_zone.sh b/test/cpp/naming/create_private_dns_zone.sh
index 3d7520b90a..55a4cfe36e 100755
--- a/test/cpp/naming/create_private_dns_zone.sh
+++ b/test/cpp/naming/create_private_dns_zone.sh
@@ -20,8 +20,8 @@ set -ex
cd $(dirname $0)/../../..
gcloud alpha dns managed-zones create \
- resolver-tests-version-1-grpctestingexp-zone-id \
- --dns-name=resolver-tests-version-1.grpctestingexp. \
+ resolver-tests-version-4-grpctestingexp-zone-id \
+ --dns-name=resolver-tests-version-4.grpctestingexp. \
--description="GCE-DNS-private-zone-for-GRPC-testing" \
--visibility=private \
--networks=default
diff --git a/test/cpp/naming/private_dns_zone_init.sh b/test/cpp/naming/private_dns_zone_init.sh
index 4eaf750ab7..8fa5a8a475 100755
--- a/test/cpp/naming/private_dns_zone_init.sh
+++ b/test/cpp/naming/private_dns_zone_init.sh
@@ -19,197 +19,197 @@ set -ex
cd $(dirname $0)/../../..
-gcloud dns record-sets transaction start -z=resolver-tests-version-1-grpctestingexp-zone-id
+gcloud dns record-sets transaction start -z=resolver-tests-version-4-grpctestingexp-zone-id
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=_grpclb._tcp.srv-ipv4-single-target.resolver-tests-version-1.grpctestingexp. \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=_grpclb._tcp.srv-ipv4-single-target.resolver-tests-version-4.grpctestingexp. \
--type=SRV \
--ttl=2100 \
- "0 0 1234 ipv4-single-target.resolver-tests-version-1.grpctestingexp."
+ "0 0 1234 ipv4-single-target.resolver-tests-version-4.grpctestingexp."
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=ipv4-single-target.resolver-tests-version-1.grpctestingexp. \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=ipv4-single-target.resolver-tests-version-4.grpctestingexp. \
--type=A \
--ttl=2100 \
"1.2.3.4"
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=_grpclb._tcp.srv-ipv4-multi-target.resolver-tests-version-1.grpctestingexp. \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=_grpclb._tcp.srv-ipv4-multi-target.resolver-tests-version-4.grpctestingexp. \
--type=SRV \
--ttl=2100 \
- "0 0 1234 ipv4-multi-target.resolver-tests-version-1.grpctestingexp."
+ "0 0 1234 ipv4-multi-target.resolver-tests-version-4.grpctestingexp."
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=ipv4-multi-target.resolver-tests-version-1.grpctestingexp. \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=ipv4-multi-target.resolver-tests-version-4.grpctestingexp. \
--type=A \
--ttl=2100 \
"1.2.3.5" "1.2.3.6" "1.2.3.7"
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=_grpclb._tcp.srv-ipv6-single-target.resolver-tests-version-1.grpctestingexp. \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=_grpclb._tcp.srv-ipv6-single-target.resolver-tests-version-4.grpctestingexp. \
--type=SRV \
--ttl=2100 \
- "0 0 1234 ipv6-single-target.resolver-tests-version-1.grpctestingexp."
+ "0 0 1234 ipv6-single-target.resolver-tests-version-4.grpctestingexp."
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=ipv6-single-target.resolver-tests-version-1.grpctestingexp. \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=ipv6-single-target.resolver-tests-version-4.grpctestingexp. \
--type=AAAA \
--ttl=2100 \
"2607:f8b0:400a:801::1001"
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=_grpclb._tcp.srv-ipv6-multi-target.resolver-tests-version-1.grpctestingexp. \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=_grpclb._tcp.srv-ipv6-multi-target.resolver-tests-version-4.grpctestingexp. \
--type=SRV \
--ttl=2100 \
- "0 0 1234 ipv6-multi-target.resolver-tests-version-1.grpctestingexp."
+ "0 0 1234 ipv6-multi-target.resolver-tests-version-4.grpctestingexp."
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=ipv6-multi-target.resolver-tests-version-1.grpctestingexp. \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=ipv6-multi-target.resolver-tests-version-4.grpctestingexp. \
--type=AAAA \
--ttl=2100 \
"2607:f8b0:400a:801::1002" "2607:f8b0:400a:801::1003" "2607:f8b0:400a:801::1004"
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=srv-ipv4-simple-service-config.resolver-tests-version-1.grpctestingexp. \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=_grpc_config.srv-ipv4-simple-service-config.resolver-tests-version-4.grpctestingexp. \
--type=TXT \
--ttl=2100 \
'"grpc_config=[{\"serviceConfig\":{\"loadBalancingPolicy\":\"round_robin\",\"methodConfig\":[{\"name\":[{\"method\":\"Foo\",\"service\":\"SimpleService\",\"waitForReady\":true}]}]}}]"'
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=ipv4-simple-service-config.resolver-tests-version-1.grpctestingexp. \
- --type=A \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=_grpclb._tcp.srv-ipv4-simple-service-config.resolver-tests-version-4.grpctestingexp. \
+ --type=SRV \
--ttl=2100 \
- "1.2.3.4"
+ "0 0 1234 ipv4-simple-service-config.resolver-tests-version-4.grpctestingexp."
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=_grpclb._tcp.srv-ipv4-simple-service-config.resolver-tests-version-1.grpctestingexp. \
- --type=SRV \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=ipv4-simple-service-config.resolver-tests-version-4.grpctestingexp. \
+ --type=A \
--ttl=2100 \
- "0 0 1234 ipv4-simple-service-config.resolver-tests-version-1.grpctestingexp."
+ "1.2.3.4"
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=ipv4-no-srv-simple-service-config.resolver-tests-version-1.grpctestingexp. \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=ipv4-no-srv-simple-service-config.resolver-tests-version-4.grpctestingexp. \
--type=A \
--ttl=2100 \
"1.2.3.4"
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=ipv4-no-srv-simple-service-config.resolver-tests-version-1.grpctestingexp. \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=_grpc_config.ipv4-no-srv-simple-service-config.resolver-tests-version-4.grpctestingexp. \
--type=TXT \
--ttl=2100 \
'"grpc_config=[{\"serviceConfig\":{\"loadBalancingPolicy\":\"round_robin\",\"methodConfig\":[{\"name\":[{\"method\":\"Foo\",\"service\":\"NoSrvSimpleService\",\"waitForReady\":true}]}]}}]"'
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=ipv4-no-config-for-cpp.resolver-tests-version-1.grpctestingexp. \
- --type=A \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=_grpc_config.ipv4-no-config-for-cpp.resolver-tests-version-4.grpctestingexp. \
+ --type=TXT \
--ttl=2100 \
- "1.2.3.4"
+ '"grpc_config=[{\"clientLanguage\":[\"python\"],\"serviceConfig\":{\"loadBalancingPolicy\":\"round_robin\",\"methodConfig\":[{\"name\":[{\"method\":\"Foo\",\"service\":\"PythonService\",\"waitForReady\":true}]}]}}]"'
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=ipv4-no-config-for-cpp.resolver-tests-version-1.grpctestingexp. \
- --type=TXT \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=ipv4-no-config-for-cpp.resolver-tests-version-4.grpctestingexp. \
+ --type=A \
--ttl=2100 \
- '"grpc_config=[{\"clientLanguage\":[\"python\"],\"serviceConfig\":{\"loadBalancingPolicy\":\"round_robin\",\"methodConfig\":[{\"name\":[{\"method\":\"Foo\",\"service\":\"PythonService\",\"waitForReady\":true}]}]}}]"'
+ "1.2.3.4"
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=ipv4-cpp-config-has-zero-percentage.resolver-tests-version-1.grpctestingexp. \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=ipv4-cpp-config-has-zero-percentage.resolver-tests-version-4.grpctestingexp. \
--type=A \
--ttl=2100 \
"1.2.3.4"
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=ipv4-cpp-config-has-zero-percentage.resolver-tests-version-1.grpctestingexp. \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=_grpc_config.ipv4-cpp-config-has-zero-percentage.resolver-tests-version-4.grpctestingexp. \
--type=TXT \
--ttl=2100 \
'"grpc_config=[{\"percentage\":0,\"serviceConfig\":{\"loadBalancingPolicy\":\"round_robin\",\"methodConfig\":[{\"name\":[{\"method\":\"Foo\",\"service\":\"CppService\",\"waitForReady\":true}]}]}}]"'
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=ipv4-second-language-is-cpp.resolver-tests-version-1.grpctestingexp. \
- --type=A \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=_grpc_config.ipv4-second-language-is-cpp.resolver-tests-version-4.grpctestingexp. \
+ --type=TXT \
--ttl=2100 \
- "1.2.3.4"
+ '"grpc_config=[{\"clientLanguage\":[\"go\"],\"serviceConfig\":{\"loadBalancingPolicy\":\"round_robin\",\"methodConfig\":[{\"name\":[{\"method\":\"Foo\",\"service\":\"GoService\",\"waitForReady\":true}]}]}},{\"clientLanguage\":[\"c++\"],\"serviceConfig\":{" "\"loadBalancingPolicy\":\"round_robin\",\"methodConfig\":[{\"name\":[{\"method\":\"Foo\",\"service\":\"CppService\",\"waitForReady\":true}]}]}}]"'
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=ipv4-second-language-is-cpp.resolver-tests-version-1.grpctestingexp. \
- --type=TXT \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=ipv4-second-language-is-cpp.resolver-tests-version-4.grpctestingexp. \
+ --type=A \
--ttl=2100 \
- '"grpc_config=[{\"clientLanguage\":[\"go\"],\"serviceConfig\":{\"loadBalancingPolicy\":\"round_robin\",\"methodConfig\":[{\"name\":[{\"method\":\"Foo\",\"service\":\"GoService\",\"waitForReady\":true}]}]}},{\"clientLanguage\":[\"c++\"],\"serviceConfig\":{" "\"loadBalancingPolicy\":\"round_robin\",\"methodConfig\":[{\"name\":[{\"method\":\"Foo\",\"service\":\"CppService\",\"waitForReady\":true}]}]}}]"'
+ "1.2.3.4"
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=ipv4-config-with-percentages.resolver-tests-version-1.grpctestingexp. \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=ipv4-config-with-percentages.resolver-tests-version-4.grpctestingexp. \
--type=A \
--ttl=2100 \
"1.2.3.4"
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=ipv4-config-with-percentages.resolver-tests-version-1.grpctestingexp. \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=_grpc_config.ipv4-config-with-percentages.resolver-tests-version-4.grpctestingexp. \
--type=TXT \
--ttl=2100 \
'"grpc_config=[{\"percentage\":0,\"serviceConfig\":{\"loadBalancingPolicy\":\"round_robin\",\"methodConfig\":[{\"name\":[{\"method\":\"Foo\",\"service\":\"NeverPickedService\",\"waitForReady\":true}]}]}},{\"percentage\":100,\"serviceConfig\":{\"loadBalanc" "ingPolicy\":\"round_robin\",\"methodConfig\":[{\"name\":[{\"method\":\"Foo\",\"service\":\"AlwaysPickedService\",\"waitForReady\":true}]}]}}]"'
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=_grpclb._tcp.srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=_grpclb._tcp.srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp. \
--type=SRV \
--ttl=2100 \
- "0 0 1234 balancer-for-ipv4-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp."
+ "0 0 1234 balancer-for-ipv4-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp."
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=balancer-for-ipv4-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=balancer-for-ipv4-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp. \
--type=A \
--ttl=2100 \
"1.2.3.4"
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp. \
--type=A \
--ttl=2100 \
"1.2.3.4"
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=_grpclb._tcp.srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=_grpclb._tcp.srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp. \
--type=SRV \
--ttl=2100 \
- "0 0 1234 balancer-for-ipv6-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp."
+ "0 0 1234 balancer-for-ipv6-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp."
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=balancer-for-ipv6-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=balancer-for-ipv6-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp. \
--type=AAAA \
--ttl=2100 \
"2607:f8b0:400a:801::1002"
gcloud dns record-sets transaction add \
- -z=resolver-tests-version-1-grpctestingexp-zone-id \
- --name=srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. \
+ -z=resolver-tests-version-4-grpctestingexp-zone-id \
+ --name=srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp. \
--type=AAAA \
--ttl=2100 \
"2607:f8b0:400a:801::1002"
-gcloud dns record-sets transaction describe -z=resolver-tests-version-1-grpctestingexp-zone-id
-gcloud dns record-sets transaction execute -z=resolver-tests-version-1-grpctestingexp-zone-id
-gcloud dns record-sets list -z=resolver-tests-version-1-grpctestingexp-zone-id
+gcloud dns record-sets transaction describe -z=resolver-tests-version-4-grpctestingexp-zone-id
+gcloud dns record-sets transaction execute -z=resolver-tests-version-4-grpctestingexp-zone-id
+gcloud dns record-sets list -z=resolver-tests-version-4-grpctestingexp-zone-id
diff --git a/test/cpp/naming/resolver_component_test.cc b/test/cpp/naming/resolver_component_test.cc
index 7d0371bea4..6f1f0c44b9 100644
--- a/test/cpp/naming/resolver_component_test.cc
+++ b/test/cpp/naming/resolver_component_test.cc
@@ -32,7 +32,6 @@
#include "test/cpp/util/subprocess.h"
#include "test/cpp/util/test_config.h"
-extern "C" {
#include "src/core/ext/filters/client_channel/client_channel.h"
#include "src/core/ext/filters/client_channel/resolver.h"
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h"
@@ -47,10 +46,9 @@ extern "C" {
#include "src/core/lib/support/string.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
-}
-using std::vector;
using grpc::SubProcess;
+using std::vector;
using testing::UnorderedElementsAreArray;
// Hack copied from "test/cpp/end2end/server_crash_test_client.cc"!
@@ -85,12 +83,12 @@ class GrpcLBAddress final {
GrpcLBAddress(std::string address, bool is_balancer)
: is_balancer(is_balancer), address(address) {}
- bool operator==(const GrpcLBAddress &other) const {
+ bool operator==(const GrpcLBAddress& other) const {
return this->is_balancer == other.is_balancer &&
this->address == other.address;
}
- bool operator!=(const GrpcLBAddress &other) const {
+ bool operator!=(const GrpcLBAddress& other) const {
return !(*this == other);
}
@@ -141,35 +139,36 @@ gpr_timespec TestDeadline(void) {
struct ArgsStruct {
gpr_event ev;
gpr_atm done_atm;
- gpr_mu *mu;
- grpc_pollset *pollset;
- grpc_pollset_set *pollset_set;
- grpc_combiner *lock;
- grpc_channel_args *channel_args;
+ gpr_mu* mu;
+ grpc_pollset* pollset;
+ grpc_pollset_set* pollset_set;
+ grpc_combiner* lock;
+ grpc_channel_args* channel_args;
vector<GrpcLBAddress> expected_addrs;
std::string expected_service_config_string;
std::string expected_lb_policy;
};
-void ArgsInit(grpc_exec_ctx *exec_ctx, ArgsStruct *args) {
+void ArgsInit(grpc_exec_ctx* exec_ctx, ArgsStruct* args) {
gpr_event_init(&args->ev);
- args->pollset = (grpc_pollset *)gpr_zalloc(grpc_pollset_size());
+ args->pollset = (grpc_pollset*)gpr_zalloc(grpc_pollset_size());
grpc_pollset_init(args->pollset, &args->mu);
args->pollset_set = grpc_pollset_set_create();
grpc_pollset_set_add_pollset(exec_ctx, args->pollset_set, args->pollset);
args->lock = grpc_combiner_create();
gpr_atm_rel_store(&args->done_atm, 0);
- args->channel_args = NULL;
+ args->channel_args = nullptr;
}
-void DoNothing(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {}
+void DoNothing(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {}
-void ArgsFinish(grpc_exec_ctx *exec_ctx, ArgsStruct *args) {
+void ArgsFinish(grpc_exec_ctx* exec_ctx, ArgsStruct* args) {
GPR_ASSERT(gpr_event_wait(&args->ev, TestDeadline()));
grpc_pollset_set_del_pollset(exec_ctx, args->pollset_set, args->pollset);
grpc_pollset_set_destroy(exec_ctx, args->pollset_set);
grpc_closure DoNothing_cb;
- GRPC_CLOSURE_INIT(&DoNothing_cb, DoNothing, NULL, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&DoNothing_cb, DoNothing, nullptr,
+ grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(exec_ctx, args->pollset, &DoNothing_cb);
// exec_ctx needs to be flushed before calling grpc_pollset_destroy()
grpc_channel_args_destroy(exec_ctx, args->channel_args);
@@ -184,7 +183,7 @@ gpr_timespec NSecondDeadline(int seconds) {
gpr_time_from_seconds(seconds, GPR_TIMESPAN));
}
-void PollPollsetUntilRequestDone(ArgsStruct *args) {
+void PollPollsetUntilRequestDone(ArgsStruct* args) {
gpr_timespec deadline = NSecondDeadline(10);
while (true) {
bool done = gpr_atm_acq_load(&args->done_atm) != 0;
@@ -196,7 +195,7 @@ void PollPollsetUntilRequestDone(ArgsStruct *args) {
gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRId64 ".%09d", done,
time_left.tv_sec, time_left.tv_nsec);
GPR_ASSERT(gpr_time_cmp(time_left, gpr_time_0(GPR_TIMESPAN)) >= 0);
- grpc_pollset_worker *worker = NULL;
+ grpc_pollset_worker* worker = nullptr;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_mu_lock(args->mu);
GRPC_LOG_IF_ERROR("pollset_work",
@@ -206,53 +205,53 @@ void PollPollsetUntilRequestDone(ArgsStruct *args) {
gpr_mu_unlock(args->mu);
grpc_exec_ctx_finish(&exec_ctx);
}
- gpr_event_set(&args->ev, (void *)1);
+ gpr_event_set(&args->ev, (void*)1);
}
-void CheckServiceConfigResultLocked(grpc_channel_args *channel_args,
- ArgsStruct *args) {
- const grpc_arg *service_config_arg =
+void CheckServiceConfigResultLocked(grpc_channel_args* channel_args,
+ ArgsStruct* args) {
+ const grpc_arg* service_config_arg =
grpc_channel_args_find(channel_args, GRPC_ARG_SERVICE_CONFIG);
if (args->expected_service_config_string != "") {
- GPR_ASSERT(service_config_arg != NULL);
+ GPR_ASSERT(service_config_arg != nullptr);
GPR_ASSERT(service_config_arg->type == GRPC_ARG_STRING);
EXPECT_EQ(service_config_arg->value.string,
args->expected_service_config_string);
} else {
- GPR_ASSERT(service_config_arg == NULL);
+ GPR_ASSERT(service_config_arg == nullptr);
}
}
-void CheckLBPolicyResultLocked(grpc_channel_args *channel_args,
- ArgsStruct *args) {
- const grpc_arg *lb_policy_arg =
+void CheckLBPolicyResultLocked(grpc_channel_args* channel_args,
+ ArgsStruct* args) {
+ const grpc_arg* lb_policy_arg =
grpc_channel_args_find(channel_args, GRPC_ARG_LB_POLICY_NAME);
if (args->expected_lb_policy != "") {
- GPR_ASSERT(lb_policy_arg != NULL);
+ GPR_ASSERT(lb_policy_arg != nullptr);
GPR_ASSERT(lb_policy_arg->type == GRPC_ARG_STRING);
EXPECT_EQ(lb_policy_arg->value.string, args->expected_lb_policy);
} else {
- GPR_ASSERT(lb_policy_arg == NULL);
+ GPR_ASSERT(lb_policy_arg == nullptr);
}
}
-void CheckResolverResultLocked(grpc_exec_ctx *exec_ctx, void *argsp,
- grpc_error *err) {
- ArgsStruct *args = (ArgsStruct *)argsp;
- grpc_channel_args *channel_args = args->channel_args;
- const grpc_arg *channel_arg =
+void CheckResolverResultLocked(grpc_exec_ctx* exec_ctx, void* argsp,
+ grpc_error* err) {
+ ArgsStruct* args = (ArgsStruct*)argsp;
+ grpc_channel_args* channel_args = args->channel_args;
+ const grpc_arg* channel_arg =
grpc_channel_args_find(channel_args, GRPC_ARG_LB_ADDRESSES);
- GPR_ASSERT(channel_arg != NULL);
+ GPR_ASSERT(channel_arg != nullptr);
GPR_ASSERT(channel_arg->type == GRPC_ARG_POINTER);
- grpc_lb_addresses *addresses =
- (grpc_lb_addresses *)channel_arg->value.pointer.p;
+ grpc_lb_addresses* addresses =
+ (grpc_lb_addresses*)channel_arg->value.pointer.p;
gpr_log(GPR_INFO, "num addrs found: %" PRIdPTR ". expected %" PRIdPTR,
addresses->num_addresses, args->expected_addrs.size());
GPR_ASSERT(addresses->num_addresses == args->expected_addrs.size());
std::vector<GrpcLBAddress> found_lb_addrs;
for (size_t i = 0; i < addresses->num_addresses; i++) {
grpc_lb_address addr = addresses->addresses[i];
- char *str;
+ char* str;
grpc_sockaddr_to_string(&str, &addr.address, 1 /* normalize */);
gpr_log(GPR_INFO, "%s", str);
found_lb_addrs.emplace_back(
@@ -260,8 +259,9 @@ void CheckResolverResultLocked(grpc_exec_ctx *exec_ctx, void *argsp,
gpr_free(str);
}
if (args->expected_addrs.size() != found_lb_addrs.size()) {
- gpr_log(GPR_DEBUG, "found lb addrs size is: %" PRIdPTR
- ". expected addrs size is %" PRIdPTR,
+ gpr_log(GPR_DEBUG,
+ "found lb addrs size is: %" PRIdPTR
+ ". expected addrs size is %" PRIdPTR,
found_lb_addrs.size(), args->expected_addrs.size());
abort();
}
@@ -273,7 +273,7 @@ void CheckResolverResultLocked(grpc_exec_ctx *exec_ctx, void *argsp,
gpr_atm_rel_store(&args->done_atm, 1);
gpr_mu_lock(args->mu);
GRPC_LOG_IF_ERROR("pollset_kick",
- grpc_pollset_kick(exec_ctx, args->pollset, NULL));
+ grpc_pollset_kick(exec_ctx, args->pollset, nullptr));
gpr_mu_unlock(args->mu);
}
@@ -285,17 +285,17 @@ TEST(ResolverComponentTest, TestResolvesRelevantRecords) {
args.expected_service_config_string = FLAGS_expected_chosen_service_config;
args.expected_lb_policy = FLAGS_expected_lb_policy;
// maybe build the address with an authority
- char *whole_uri = NULL;
+ char* whole_uri = nullptr;
GPR_ASSERT(asprintf(&whole_uri, "dns://%s/%s",
FLAGS_local_dns_server_address.c_str(),
FLAGS_target_name.c_str()));
// create resolver and resolve
- grpc_resolver *resolver = grpc_resolver_create(&exec_ctx, whole_uri, NULL,
+ grpc_resolver* resolver = grpc_resolver_create(&exec_ctx, whole_uri, nullptr,
args.pollset_set, args.lock);
gpr_free(whole_uri);
grpc_closure on_resolver_result_changed;
GRPC_CLOSURE_INIT(&on_resolver_result_changed, CheckResolverResultLocked,
- (void *)&args, grpc_combiner_scheduler(args.lock));
+ (void*)&args, grpc_combiner_scheduler(args.lock));
grpc_resolver_next_locked(&exec_ctx, resolver, &args.channel_args,
&on_resolver_result_changed);
grpc_exec_ctx_flush(&exec_ctx);
@@ -307,7 +307,7 @@ TEST(ResolverComponentTest, TestResolvesRelevantRecords) {
} // namespace
-int main(int argc, char **argv) {
+int main(int argc, char** argv) {
grpc_init();
grpc_test_init(argc, argv);
::testing::InitGoogleTest(&argc, argv);
diff --git a/test/cpp/naming/resolver_component_tests_runner.sh b/test/cpp/naming/resolver_component_tests_runner.sh
index 407db5ed66..11a45d72ce 100755
--- a/test/cpp/naming/resolver_component_tests_runner.sh
+++ b/test/cpp/naming/resolver_component_tests_runner.sh
@@ -73,7 +73,7 @@ EXIT_CODE=0
# in the resolver.
$FLAGS_test_bin_path \
- --target_name='srv-ipv4-single-target.resolver-tests-version-1.grpctestingexp.' \
+ --target_name='srv-ipv4-single-target.resolver-tests-version-4.grpctestingexp.' \
--expected_addrs='1.2.3.4:1234,True' \
--expected_chosen_service_config='' \
--expected_lb_policy='' \
@@ -81,7 +81,7 @@ $FLAGS_test_bin_path \
wait $! || EXIT_CODE=1
$FLAGS_test_bin_path \
- --target_name='srv-ipv4-multi-target.resolver-tests-version-1.grpctestingexp.' \
+ --target_name='srv-ipv4-multi-target.resolver-tests-version-4.grpctestingexp.' \
--expected_addrs='1.2.3.5:1234,True;1.2.3.6:1234,True;1.2.3.7:1234,True' \
--expected_chosen_service_config='' \
--expected_lb_policy='' \
@@ -89,7 +89,7 @@ $FLAGS_test_bin_path \
wait $! || EXIT_CODE=1
$FLAGS_test_bin_path \
- --target_name='srv-ipv6-single-target.resolver-tests-version-1.grpctestingexp.' \
+ --target_name='srv-ipv6-single-target.resolver-tests-version-4.grpctestingexp.' \
--expected_addrs='[2607:f8b0:400a:801::1001]:1234,True' \
--expected_chosen_service_config='' \
--expected_lb_policy='' \
@@ -97,7 +97,7 @@ $FLAGS_test_bin_path \
wait $! || EXIT_CODE=1
$FLAGS_test_bin_path \
- --target_name='srv-ipv6-multi-target.resolver-tests-version-1.grpctestingexp.' \
+ --target_name='srv-ipv6-multi-target.resolver-tests-version-4.grpctestingexp.' \
--expected_addrs='[2607:f8b0:400a:801::1002]:1234,True;[2607:f8b0:400a:801::1003]:1234,True;[2607:f8b0:400a:801::1004]:1234,True' \
--expected_chosen_service_config='' \
--expected_lb_policy='' \
@@ -105,7 +105,7 @@ $FLAGS_test_bin_path \
wait $! || EXIT_CODE=1
$FLAGS_test_bin_path \
- --target_name='srv-ipv4-simple-service-config.resolver-tests-version-1.grpctestingexp.' \
+ --target_name='srv-ipv4-simple-service-config.resolver-tests-version-4.grpctestingexp.' \
--expected_addrs='1.2.3.4:1234,True' \
--expected_chosen_service_config='{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"SimpleService","waitForReady":true}]}]}' \
--expected_lb_policy='round_robin' \
@@ -113,7 +113,7 @@ $FLAGS_test_bin_path \
wait $! || EXIT_CODE=1
$FLAGS_test_bin_path \
- --target_name='ipv4-no-srv-simple-service-config.resolver-tests-version-1.grpctestingexp.' \
+ --target_name='ipv4-no-srv-simple-service-config.resolver-tests-version-4.grpctestingexp.' \
--expected_addrs='1.2.3.4:443,False' \
--expected_chosen_service_config='{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"NoSrvSimpleService","waitForReady":true}]}]}' \
--expected_lb_policy='round_robin' \
@@ -121,7 +121,7 @@ $FLAGS_test_bin_path \
wait $! || EXIT_CODE=1
$FLAGS_test_bin_path \
- --target_name='ipv4-no-config-for-cpp.resolver-tests-version-1.grpctestingexp.' \
+ --target_name='ipv4-no-config-for-cpp.resolver-tests-version-4.grpctestingexp.' \
--expected_addrs='1.2.3.4:443,False' \
--expected_chosen_service_config='' \
--expected_lb_policy='' \
@@ -129,7 +129,7 @@ $FLAGS_test_bin_path \
wait $! || EXIT_CODE=1
$FLAGS_test_bin_path \
- --target_name='ipv4-cpp-config-has-zero-percentage.resolver-tests-version-1.grpctestingexp.' \
+ --target_name='ipv4-cpp-config-has-zero-percentage.resolver-tests-version-4.grpctestingexp.' \
--expected_addrs='1.2.3.4:443,False' \
--expected_chosen_service_config='' \
--expected_lb_policy='' \
@@ -137,7 +137,7 @@ $FLAGS_test_bin_path \
wait $! || EXIT_CODE=1
$FLAGS_test_bin_path \
- --target_name='ipv4-second-language-is-cpp.resolver-tests-version-1.grpctestingexp.' \
+ --target_name='ipv4-second-language-is-cpp.resolver-tests-version-4.grpctestingexp.' \
--expected_addrs='1.2.3.4:443,False' \
--expected_chosen_service_config='{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"CppService","waitForReady":true}]}]}' \
--expected_lb_policy='round_robin' \
@@ -145,7 +145,7 @@ $FLAGS_test_bin_path \
wait $! || EXIT_CODE=1
$FLAGS_test_bin_path \
- --target_name='ipv4-config-with-percentages.resolver-tests-version-1.grpctestingexp.' \
+ --target_name='ipv4-config-with-percentages.resolver-tests-version-4.grpctestingexp.' \
--expected_addrs='1.2.3.4:443,False' \
--expected_chosen_service_config='{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"AlwaysPickedService","waitForReady":true}]}]}' \
--expected_lb_policy='round_robin' \
@@ -153,7 +153,7 @@ $FLAGS_test_bin_path \
wait $! || EXIT_CODE=1
$FLAGS_test_bin_path \
- --target_name='srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp.' \
+ --target_name='srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp.' \
--expected_addrs='1.2.3.4:1234,True;1.2.3.4:443,False' \
--expected_chosen_service_config='' \
--expected_lb_policy='' \
@@ -161,7 +161,7 @@ $FLAGS_test_bin_path \
wait $! || EXIT_CODE=1
$FLAGS_test_bin_path \
- --target_name='srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp.' \
+ --target_name='srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp.' \
--expected_addrs='[2607:f8b0:400a:801::1002]:1234,True;[2607:f8b0:400a:801::1002]:443,False' \
--expected_chosen_service_config='' \
--expected_lb_policy='' \
@@ -169,7 +169,7 @@ $FLAGS_test_bin_path \
wait $! || EXIT_CODE=1
$FLAGS_test_bin_path \
- --target_name='ipv4-config-causing-fallback-to-tcp.resolver-tests-version-1.grpctestingexp.' \
+ --target_name='ipv4-config-causing-fallback-to-tcp.resolver-tests-version-4.grpctestingexp.' \
--expected_addrs='1.2.3.4:443,False' \
--expected_chosen_service_config='{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwo","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooThree","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooFour","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooFive","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooSix","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooSeven","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooEight","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooNine","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTen","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooEleven","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]}]}' \
--expected_lb_policy='' \
diff --git a/test/cpp/naming/resolver_component_tests_runner_invoker.cc b/test/cpp/naming/resolver_component_tests_runner_invoker.cc
index b14391284d..0beb27de1b 100644
--- a/test/cpp/naming/resolver_component_tests_runner_invoker.cc
+++ b/test/cpp/naming/resolver_component_tests_runner_invoker.cc
@@ -32,10 +32,8 @@
#include "test/cpp/util/subprocess.h"
#include "test/cpp/util/test_config.h"
-extern "C" {
#include "src/core/lib/support/env.h"
#include "test/core/util/port.h"
-}
DEFINE_bool(
running_under_bazel, false,
@@ -62,16 +60,16 @@ static void register_sighandler() {
struct sigaction act;
memset(&act, 0, sizeof(act));
act.sa_handler = sighandler;
- sigaction(SIGINT, &act, NULL);
- sigaction(SIGTERM, &act, NULL);
+ sigaction(SIGINT, &act, nullptr);
+ sigaction(SIGTERM, &act, nullptr);
}
namespace {
const int kTestTimeoutSeconds = 60 * 2;
-void RunSigHandlingThread(SubProcess *test_driver, gpr_mu *test_driver_mu,
- gpr_cv *test_driver_cv, int *test_driver_done) {
+void RunSigHandlingThread(SubProcess* test_driver, gpr_mu* test_driver_mu,
+ gpr_cv* test_driver_cv, int* test_driver_done) {
gpr_timespec overall_deadline =
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_seconds(kTestTimeoutSeconds, GPR_TIMESPAN));
@@ -94,7 +92,7 @@ void RunSigHandlingThread(SubProcess *test_driver, gpr_mu *test_driver_mu,
test_driver->Interrupt();
return;
}
-}
+} // namespace
namespace grpc {
@@ -106,7 +104,7 @@ void InvokeResolverComponentTestsRunner(std::string test_runner_bin_path,
std::string records_config_path) {
int test_dns_server_port = grpc_pick_unused_port_or_die();
- SubProcess *test_driver = new SubProcess(
+ SubProcess* test_driver = new SubProcess(
{test_runner_bin_path, "--test_bin_path=" + test_bin_path,
"--dns_server_bin_path=" + dns_server_bin_path,
"--records_config_path=" + records_config_path,
@@ -153,7 +151,7 @@ void InvokeResolverComponentTestsRunner(std::string test_runner_bin_path,
} // namespace grpc
-int main(int argc, char **argv) {
+int main(int argc, char** argv) {
grpc::testing::InitTest(&argc, &argv, true);
grpc_init();
GPR_ASSERT(FLAGS_test_bin_name != "");
diff --git a/test/cpp/naming/resolver_gce_integration_tests_runner.sh b/test/cpp/naming/resolver_gce_integration_tests_runner.sh
index b20d18d9d1..091f9efbbd 100755
--- a/test/cpp/naming/resolver_gce_integration_tests_runner.sh
+++ b/test/cpp/naming/resolver_gce_integration_tests_runner.sh
@@ -34,191 +34,191 @@ echo "Sanity check DNS records are resolveable with dig:"
EXIT_CODE=0
ONE_FAILED=0
-dig SRV _grpclb._tcp.srv-ipv4-single-target.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig SRV _grpclb._tcp.srv-ipv4-single-target.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig SRV _grpclb._tcp.srv-ipv4-single-target.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig SRV _grpclb._tcp.srv-ipv4-single-target.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
ONE_FAILED=0
-dig A ipv4-single-target.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig A ipv4-single-target.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig A ipv4-single-target.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig A ipv4-single-target.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
ONE_FAILED=0
-dig SRV _grpclb._tcp.srv-ipv4-multi-target.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig SRV _grpclb._tcp.srv-ipv4-multi-target.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig SRV _grpclb._tcp.srv-ipv4-multi-target.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig SRV _grpclb._tcp.srv-ipv4-multi-target.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
ONE_FAILED=0
-dig A ipv4-multi-target.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig A ipv4-multi-target.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig A ipv4-multi-target.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig A ipv4-multi-target.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
ONE_FAILED=0
-dig SRV _grpclb._tcp.srv-ipv6-single-target.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig SRV _grpclb._tcp.srv-ipv6-single-target.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig SRV _grpclb._tcp.srv-ipv6-single-target.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig SRV _grpclb._tcp.srv-ipv6-single-target.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
ONE_FAILED=0
-dig AAAA ipv6-single-target.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig AAAA ipv6-single-target.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig AAAA ipv6-single-target.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig AAAA ipv6-single-target.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
ONE_FAILED=0
-dig SRV _grpclb._tcp.srv-ipv6-multi-target.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig SRV _grpclb._tcp.srv-ipv6-multi-target.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig SRV _grpclb._tcp.srv-ipv6-multi-target.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig SRV _grpclb._tcp.srv-ipv6-multi-target.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
ONE_FAILED=0
-dig AAAA ipv6-multi-target.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig AAAA ipv6-multi-target.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig AAAA ipv6-multi-target.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig AAAA ipv6-multi-target.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
ONE_FAILED=0
-dig TXT srv-ipv4-simple-service-config.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig TXT _grpc_config.srv-ipv4-simple-service-config.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig TXT srv-ipv4-simple-service-config.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig TXT _grpc_config.srv-ipv4-simple-service-config.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
ONE_FAILED=0
-dig A ipv4-simple-service-config.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig SRV _grpclb._tcp.srv-ipv4-simple-service-config.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig A ipv4-simple-service-config.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig SRV _grpclb._tcp.srv-ipv4-simple-service-config.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
ONE_FAILED=0
-dig SRV _grpclb._tcp.srv-ipv4-simple-service-config.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig A ipv4-simple-service-config.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig SRV _grpclb._tcp.srv-ipv4-simple-service-config.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig A ipv4-simple-service-config.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
ONE_FAILED=0
-dig A ipv4-no-srv-simple-service-config.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig A ipv4-no-srv-simple-service-config.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig A ipv4-no-srv-simple-service-config.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig A ipv4-no-srv-simple-service-config.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
ONE_FAILED=0
-dig TXT ipv4-no-srv-simple-service-config.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig TXT _grpc_config.ipv4-no-srv-simple-service-config.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig TXT ipv4-no-srv-simple-service-config.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig TXT _grpc_config.ipv4-no-srv-simple-service-config.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
ONE_FAILED=0
-dig A ipv4-no-config-for-cpp.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig TXT _grpc_config.ipv4-no-config-for-cpp.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig A ipv4-no-config-for-cpp.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig TXT _grpc_config.ipv4-no-config-for-cpp.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
ONE_FAILED=0
-dig TXT ipv4-no-config-for-cpp.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig A ipv4-no-config-for-cpp.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig TXT ipv4-no-config-for-cpp.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig A ipv4-no-config-for-cpp.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
ONE_FAILED=0
-dig A ipv4-cpp-config-has-zero-percentage.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig A ipv4-cpp-config-has-zero-percentage.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig A ipv4-cpp-config-has-zero-percentage.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig A ipv4-cpp-config-has-zero-percentage.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
ONE_FAILED=0
-dig TXT ipv4-cpp-config-has-zero-percentage.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig TXT _grpc_config.ipv4-cpp-config-has-zero-percentage.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig TXT ipv4-cpp-config-has-zero-percentage.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig TXT _grpc_config.ipv4-cpp-config-has-zero-percentage.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
ONE_FAILED=0
-dig A ipv4-second-language-is-cpp.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig TXT _grpc_config.ipv4-second-language-is-cpp.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig A ipv4-second-language-is-cpp.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig TXT _grpc_config.ipv4-second-language-is-cpp.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
ONE_FAILED=0
-dig TXT ipv4-second-language-is-cpp.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig A ipv4-second-language-is-cpp.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig TXT ipv4-second-language-is-cpp.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig A ipv4-second-language-is-cpp.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
ONE_FAILED=0
-dig A ipv4-config-with-percentages.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig A ipv4-config-with-percentages.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig A ipv4-config-with-percentages.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig A ipv4-config-with-percentages.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
ONE_FAILED=0
-dig TXT ipv4-config-with-percentages.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig TXT _grpc_config.ipv4-config-with-percentages.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig TXT ipv4-config-with-percentages.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig TXT _grpc_config.ipv4-config-with-percentages.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
ONE_FAILED=0
-dig SRV _grpclb._tcp.srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig SRV _grpclb._tcp.srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig SRV _grpclb._tcp.srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig SRV _grpclb._tcp.srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
ONE_FAILED=0
-dig A balancer-for-ipv4-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig A balancer-for-ipv4-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig A balancer-for-ipv4-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig A balancer-for-ipv4-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
ONE_FAILED=0
-dig A srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig A srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig A srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig A srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
ONE_FAILED=0
-dig SRV _grpclb._tcp.srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig SRV _grpclb._tcp.srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig SRV _grpclb._tcp.srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig SRV _grpclb._tcp.srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
ONE_FAILED=0
-dig AAAA balancer-for-ipv6-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig AAAA balancer-for-ipv6-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig AAAA balancer-for-ipv6-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig AAAA balancer-for-ipv6-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
ONE_FAILED=0
-dig AAAA srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
+dig AAAA srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Sanity check: dig AAAA srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Sanity check: dig AAAA srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp. FAILED"
exit 1
fi
@@ -226,133 +226,133 @@ echo "Sanity check PASSED. Run resolver tests:"
ONE_FAILED=0
bins/$CONFIG/resolver_component_test \
- --target_name='srv-ipv4-single-target.resolver-tests-version-1.grpctestingexp.' \
+ --target_name='srv-ipv4-single-target.resolver-tests-version-4.grpctestingexp.' \
--expected_addrs='1.2.3.4:1234,True' \
--expected_chosen_service_config='' \
--expected_lb_policy='' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Test based on target record: srv-ipv4-single-target.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Test based on target record: srv-ipv4-single-target.resolver-tests-version-4.grpctestingexp. FAILED"
EXIT_CODE=1
fi
ONE_FAILED=0
bins/$CONFIG/resolver_component_test \
- --target_name='srv-ipv4-multi-target.resolver-tests-version-1.grpctestingexp.' \
+ --target_name='srv-ipv4-multi-target.resolver-tests-version-4.grpctestingexp.' \
--expected_addrs='1.2.3.5:1234,True;1.2.3.6:1234,True;1.2.3.7:1234,True' \
--expected_chosen_service_config='' \
--expected_lb_policy='' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Test based on target record: srv-ipv4-multi-target.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Test based on target record: srv-ipv4-multi-target.resolver-tests-version-4.grpctestingexp. FAILED"
EXIT_CODE=1
fi
ONE_FAILED=0
bins/$CONFIG/resolver_component_test \
- --target_name='srv-ipv6-single-target.resolver-tests-version-1.grpctestingexp.' \
+ --target_name='srv-ipv6-single-target.resolver-tests-version-4.grpctestingexp.' \
--expected_addrs='[2607:f8b0:400a:801::1001]:1234,True' \
--expected_chosen_service_config='' \
--expected_lb_policy='' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Test based on target record: srv-ipv6-single-target.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Test based on target record: srv-ipv6-single-target.resolver-tests-version-4.grpctestingexp. FAILED"
EXIT_CODE=1
fi
ONE_FAILED=0
bins/$CONFIG/resolver_component_test \
- --target_name='srv-ipv6-multi-target.resolver-tests-version-1.grpctestingexp.' \
+ --target_name='srv-ipv6-multi-target.resolver-tests-version-4.grpctestingexp.' \
--expected_addrs='[2607:f8b0:400a:801::1002]:1234,True;[2607:f8b0:400a:801::1003]:1234,True;[2607:f8b0:400a:801::1004]:1234,True' \
--expected_chosen_service_config='' \
--expected_lb_policy='' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Test based on target record: srv-ipv6-multi-target.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Test based on target record: srv-ipv6-multi-target.resolver-tests-version-4.grpctestingexp. FAILED"
EXIT_CODE=1
fi
ONE_FAILED=0
bins/$CONFIG/resolver_component_test \
- --target_name='srv-ipv4-simple-service-config.resolver-tests-version-1.grpctestingexp.' \
+ --target_name='srv-ipv4-simple-service-config.resolver-tests-version-4.grpctestingexp.' \
--expected_addrs='1.2.3.4:1234,True' \
--expected_chosen_service_config='{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"SimpleService","waitForReady":true}]}]}' \
--expected_lb_policy='round_robin' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Test based on target record: srv-ipv4-simple-service-config.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Test based on target record: srv-ipv4-simple-service-config.resolver-tests-version-4.grpctestingexp. FAILED"
EXIT_CODE=1
fi
ONE_FAILED=0
bins/$CONFIG/resolver_component_test \
- --target_name='ipv4-no-srv-simple-service-config.resolver-tests-version-1.grpctestingexp.' \
+ --target_name='ipv4-no-srv-simple-service-config.resolver-tests-version-4.grpctestingexp.' \
--expected_addrs='1.2.3.4:443,False' \
--expected_chosen_service_config='{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"NoSrvSimpleService","waitForReady":true}]}]}' \
--expected_lb_policy='round_robin' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Test based on target record: ipv4-no-srv-simple-service-config.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Test based on target record: ipv4-no-srv-simple-service-config.resolver-tests-version-4.grpctestingexp. FAILED"
EXIT_CODE=1
fi
ONE_FAILED=0
bins/$CONFIG/resolver_component_test \
- --target_name='ipv4-no-config-for-cpp.resolver-tests-version-1.grpctestingexp.' \
+ --target_name='ipv4-no-config-for-cpp.resolver-tests-version-4.grpctestingexp.' \
--expected_addrs='1.2.3.4:443,False' \
--expected_chosen_service_config='' \
--expected_lb_policy='' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Test based on target record: ipv4-no-config-for-cpp.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Test based on target record: ipv4-no-config-for-cpp.resolver-tests-version-4.grpctestingexp. FAILED"
EXIT_CODE=1
fi
ONE_FAILED=0
bins/$CONFIG/resolver_component_test \
- --target_name='ipv4-cpp-config-has-zero-percentage.resolver-tests-version-1.grpctestingexp.' \
+ --target_name='ipv4-cpp-config-has-zero-percentage.resolver-tests-version-4.grpctestingexp.' \
--expected_addrs='1.2.3.4:443,False' \
--expected_chosen_service_config='' \
--expected_lb_policy='' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Test based on target record: ipv4-cpp-config-has-zero-percentage.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Test based on target record: ipv4-cpp-config-has-zero-percentage.resolver-tests-version-4.grpctestingexp. FAILED"
EXIT_CODE=1
fi
ONE_FAILED=0
bins/$CONFIG/resolver_component_test \
- --target_name='ipv4-second-language-is-cpp.resolver-tests-version-1.grpctestingexp.' \
+ --target_name='ipv4-second-language-is-cpp.resolver-tests-version-4.grpctestingexp.' \
--expected_addrs='1.2.3.4:443,False' \
--expected_chosen_service_config='{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"CppService","waitForReady":true}]}]}' \
--expected_lb_policy='round_robin' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Test based on target record: ipv4-second-language-is-cpp.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Test based on target record: ipv4-second-language-is-cpp.resolver-tests-version-4.grpctestingexp. FAILED"
EXIT_CODE=1
fi
ONE_FAILED=0
bins/$CONFIG/resolver_component_test \
- --target_name='ipv4-config-with-percentages.resolver-tests-version-1.grpctestingexp.' \
+ --target_name='ipv4-config-with-percentages.resolver-tests-version-4.grpctestingexp.' \
--expected_addrs='1.2.3.4:443,False' \
--expected_chosen_service_config='{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"AlwaysPickedService","waitForReady":true}]}]}' \
--expected_lb_policy='round_robin' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Test based on target record: ipv4-config-with-percentages.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Test based on target record: ipv4-config-with-percentages.resolver-tests-version-4.grpctestingexp. FAILED"
EXIT_CODE=1
fi
ONE_FAILED=0
bins/$CONFIG/resolver_component_test \
- --target_name='srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp.' \
+ --target_name='srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp.' \
--expected_addrs='1.2.3.4:1234,True;1.2.3.4:443,False' \
--expected_chosen_service_config='' \
--expected_lb_policy='' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Test based on target record: srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Test based on target record: srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp. FAILED"
EXIT_CODE=1
fi
ONE_FAILED=0
bins/$CONFIG/resolver_component_test \
- --target_name='srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp.' \
+ --target_name='srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp.' \
--expected_addrs='[2607:f8b0:400a:801::1002]:1234,True;[2607:f8b0:400a:801::1002]:443,False' \
--expected_chosen_service_config='' \
--expected_lb_policy='' || ONE_FAILED=1
if [[ "$ONE_FAILED" != 0 ]]; then
- echo "Test based on target record: srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. FAILED"
+ echo "Test based on target record: srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp. FAILED"
EXIT_CODE=1
fi
diff --git a/test/cpp/naming/resolver_test_record_groups.yaml b/test/cpp/naming/resolver_test_record_groups.yaml
index 2b3204335c..6c4f89d09b 100644
--- a/test/cpp/naming/resolver_test_record_groups.yaml
+++ b/test/cpp/naming/resolver_test_record_groups.yaml
@@ -1,4 +1,4 @@
-resolver_tests_common_zone_name: resolver-tests-version-1.grpctestingexp.
+resolver_tests_common_zone_name: resolver-tests-version-4.grpctestingexp.
resolver_component_tests:
- expected_addrs:
- {address: '1.2.3.4:1234', is_balancer: true}
@@ -58,7 +58,7 @@ resolver_component_tests:
- {TTL: '2100', data: 0 0 1234 ipv4-simple-service-config, type: SRV}
ipv4-simple-service-config:
- {TTL: '2100', data: 1.2.3.4, type: A}
- srv-ipv4-simple-service-config:
+ _grpc_config.srv-ipv4-simple-service-config:
- {TTL: '2100', data: 'grpc_config=[{"serviceConfig":{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"SimpleService","waitForReady":true}]}]}}]',
type: TXT}
- expected_addrs:
@@ -69,6 +69,7 @@ resolver_component_tests:
records:
ipv4-no-srv-simple-service-config:
- {TTL: '2100', data: 1.2.3.4, type: A}
+ _grpc_config.ipv4-no-srv-simple-service-config:
- {TTL: '2100', data: 'grpc_config=[{"serviceConfig":{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"NoSrvSimpleService","waitForReady":true}]}]}}]',
type: TXT}
- expected_addrs:
@@ -79,6 +80,7 @@ resolver_component_tests:
records:
ipv4-no-config-for-cpp:
- {TTL: '2100', data: 1.2.3.4, type: A}
+ _grpc_config.ipv4-no-config-for-cpp:
- {TTL: '2100', data: 'grpc_config=[{"clientLanguage":["python"],"serviceConfig":{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"PythonService","waitForReady":true}]}]}}]',
type: TXT}
- expected_addrs:
@@ -89,6 +91,7 @@ resolver_component_tests:
records:
ipv4-cpp-config-has-zero-percentage:
- {TTL: '2100', data: 1.2.3.4, type: A}
+ _grpc_config.ipv4-cpp-config-has-zero-percentage:
- {TTL: '2100', data: 'grpc_config=[{"percentage":0,"serviceConfig":{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"CppService","waitForReady":true}]}]}}]',
type: TXT}
- expected_addrs:
@@ -99,6 +102,7 @@ resolver_component_tests:
records:
ipv4-second-language-is-cpp:
- {TTL: '2100', data: 1.2.3.4, type: A}
+ _grpc_config.ipv4-second-language-is-cpp:
- {TTL: '2100', data: 'grpc_config=[{"clientLanguage":["go"],"serviceConfig":{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"GoService","waitForReady":true}]}]}},{"clientLanguage":["c++"],"serviceConfig":{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"CppService","waitForReady":true}]}]}}]',
type: TXT}
- expected_addrs:
@@ -109,6 +113,7 @@ resolver_component_tests:
records:
ipv4-config-with-percentages:
- {TTL: '2100', data: 1.2.3.4, type: A}
+ _grpc_config.ipv4-config-with-percentages:
- {TTL: '2100', data: 'grpc_config=[{"percentage":0,"serviceConfig":{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"NeverPickedService","waitForReady":true}]}]}},{"percentage":100,"serviceConfig":{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"AlwaysPickedService","waitForReady":true}]}]}}]',
type: TXT}
- expected_addrs:
@@ -145,5 +150,6 @@ resolver_component_tests:
records:
ipv4-config-causing-fallback-to-tcp:
- {TTL: '2100', data: 1.2.3.4, type: A}
+ _grpc_config.ipv4-config-causing-fallback-to-tcp:
- {TTL: '2100', data: 'grpc_config=[{"serviceConfig":{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwo","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooThree","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooFour","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooFive","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooSix","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooSeven","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooEight","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooNine","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTen","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooEleven","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]}]}}]',
type: TXT}
diff --git a/test/cpp/performance/writes_per_rpc_test.cc b/test/cpp/performance/writes_per_rpc_test.cc
index f4f794cb88..23fff2ea8b 100644
--- a/test/cpp/performance/writes_per_rpc_test.cc
+++ b/test/cpp/performance/writes_per_rpc_test.cc
@@ -26,7 +26,6 @@
#include <grpc/support/log.h>
#include <gtest/gtest.h>
-extern "C" {
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/endpoint.h"
@@ -38,7 +37,7 @@ extern "C" {
#include "src/core/lib/surface/server.h"
#include "test/core/util/passthru_endpoint.h"
#include "test/core/util/port.h"
-}
+
#include "src/cpp/client/create_channel_internal.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/core/util/test_config.h"
@@ -101,8 +100,8 @@ class EndpointPairFixture {
}
grpc_server_setup_transport(&exec_ctx, server_->c_server(), transport,
- NULL, server_args);
- grpc_chttp2_transport_start_reading(&exec_ctx, transport, NULL);
+ nullptr, server_args);
+ grpc_chttp2_transport_start_reading(&exec_ctx, transport, nullptr);
}
/* create channel */
@@ -117,7 +116,7 @@ class EndpointPairFixture {
GPR_ASSERT(transport);
grpc_channel* channel = grpc_channel_create(
&exec_ctx, "target", &c_args, GRPC_CLIENT_DIRECT_CHANNEL, transport);
- grpc_chttp2_transport_start_reading(&exec_ctx, transport, NULL);
+ grpc_chttp2_transport_start_reading(&exec_ctx, transport, nullptr);
channel_ = CreateChannelInternal("", channel);
}
diff --git a/test/cpp/qps/BUILD b/test/cpp/qps/BUILD
index 3352269517..0d91d52f22 100644
--- a/test/cpp/qps/BUILD
+++ b/test/cpp/qps/BUILD
@@ -109,6 +109,18 @@ grpc_cc_library(
deps = ["//:gpr"],
)
+grpc_cc_test(
+ name = "inproc_sync_unary_ping_pong_test",
+ srcs = ["inproc_sync_unary_ping_pong_test.cc"],
+ deps = [
+ ":benchmark_config",
+ ":driver_impl",
+ "//:grpc++",
+ "//test/cpp/util:test_config",
+ "//test/cpp/util:test_util",
+ ],
+)
+
grpc_cc_library(
name = "interarrival",
hdrs = ["interarrival.h"],
diff --git a/test/cpp/qps/client.h b/test/cpp/qps/client.h
index abf755b393..82c6361abd 100644
--- a/test/cpp/qps/client.h
+++ b/test/cpp/qps/client.h
@@ -37,10 +37,14 @@
#include "src/cpp/util/core_stats.h"
#include "test/cpp/qps/histogram.h"
#include "test/cpp/qps/interarrival.h"
+#include "test/cpp/qps/qps_worker.h"
+#include "test/cpp/qps/server.h"
#include "test/cpp/qps/usage_timer.h"
#include "test/cpp/util/create_test_channel.h"
#include "test/cpp/util/test_credentials_provider.h"
+#define INPROC_NAME_PREFIX "qpsinproc:"
+
namespace grpc {
namespace testing {
@@ -226,8 +230,6 @@ class Client {
}
virtual void DestroyMultithreading() = 0;
- virtual void InitThreadFunc(size_t thread_idx) = 0;
- virtual bool ThreadFunc(HistogramEntry* histogram, size_t thread_idx) = 0;
void SetupLoadTest(const ClientConfig& config, size_t num_threads) {
// Set up the load distribution based on the number of threads
@@ -275,7 +277,6 @@ class Client {
: std::bind(&Client::NextIssueTime, this, thread_idx);
}
- private:
class Thread {
public:
Thread(Client* client, size_t idx)
@@ -295,6 +296,16 @@ class Client {
MergeStatusHistogram(statuses_, s);
}
+ void UpdateHistogram(HistogramEntry* entry) {
+ std::lock_guard<std::mutex> g(mu_);
+ if (entry->value_used()) {
+ histogram_.Add(entry->value());
+ }
+ if (entry->status_used()) {
+ statuses_[entry->status()]++;
+ }
+ }
+
private:
Thread(const Thread&);
Thread& operator=(const Thread&);
@@ -310,29 +321,8 @@ class Client {
wait_loop++;
}
- client_->InitThreadFunc(idx_);
-
- for (;;) {
- // run the loop body
- HistogramEntry entry;
- const bool thread_still_ok = client_->ThreadFunc(&entry, idx_);
- // lock, update histogram if needed and see if we're done
- std::lock_guard<std::mutex> g(mu_);
- if (entry.value_used()) {
- histogram_.Add(entry.value());
- }
- if (entry.status_used()) {
- statuses_[entry.status()]++;
- }
- if (!thread_still_ok) {
- gpr_log(GPR_ERROR, "Finishing client thread due to RPC error");
- }
- if (!thread_still_ok ||
- static_cast<bool>(gpr_atm_acq_load(&client_->thread_pool_done_))) {
- client_->CompleteThread();
- return;
- }
- }
+ client_->ThreadFunc(idx_, this);
+ client_->CompleteThread();
}
std::mutex mu_;
@@ -343,6 +333,12 @@ class Client {
std::thread impl_;
};
+ bool ThreadCompleted() {
+ return static_cast<bool>(gpr_atm_acq_load(&thread_pool_done_));
+ }
+
+ virtual void ThreadFunc(size_t thread_idx, Client::Thread* t) = 0;
+
std::vector<std::unique_ptr<Thread>> threads_;
std::unique_ptr<UsageTimer> timer_;
@@ -422,11 +418,21 @@ class ClientImpl : public Client {
type = config.security_params().cred_type();
}
- channel_ = CreateTestChannel(
- target, type, config.security_params().server_host_override(),
- !config.security_params().use_test_ca(),
- std::shared_ptr<CallCredentials>(), args);
- gpr_log(GPR_INFO, "Connecting to %s", target.c_str());
+ grpc::string inproc_pfx(INPROC_NAME_PREFIX);
+ if (target.find(inproc_pfx) != 0) {
+ channel_ = CreateTestChannel(
+ target, type, config.security_params().server_host_override(),
+ !config.security_params().use_test_ca(),
+ std::shared_ptr<CallCredentials>(), args);
+ gpr_log(GPR_INFO, "Connecting to %s", target.c_str());
+ is_inproc_ = false;
+ } else {
+ grpc::string tgt = target;
+ tgt.erase(0, inproc_pfx.length());
+ int srv_num = std::stoi(tgt);
+ channel_ = (*g_inproc_servers)[srv_num]->InProcessChannel(args);
+ is_inproc_ = true;
+ }
stub_ = create_stub(channel_);
}
Channel* get_channel() { return channel_.get(); }
@@ -434,9 +440,11 @@ class ClientImpl : public Client {
std::unique_ptr<std::thread> WaitForReady() {
return std::unique_ptr<std::thread>(new std::thread([this]() {
- GPR_ASSERT(channel_->WaitForConnected(
- gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_seconds(10, GPR_TIMESPAN))));
+ if (!is_inproc_) {
+ GPR_ASSERT(channel_->WaitForConnected(
+ gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
+ gpr_time_from_seconds(10, GPR_TIMESPAN))));
+ }
}));
}
@@ -455,6 +463,7 @@ class ClientImpl : public Client {
std::shared_ptr<Channel> channel_;
std::unique_ptr<StubType> stub_;
+ bool is_inproc_;
};
std::vector<ClientChannelInfo> channels_;
std::function<std::unique_ptr<StubType>(const std::shared_ptr<Channel>&)>
diff --git a/test/cpp/qps/client_async.cc b/test/cpp/qps/client_async.cc
index 9ed4e0b355..07888214e7 100644
--- a/test/cpp/qps/client_async.cc
+++ b/test/cpp/qps/client_async.cc
@@ -149,9 +149,9 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
// Specify which protected members we are using since there is no
// member name resolution until the template types are fully resolved
public:
+ using Client::NextIssuer;
using Client::SetupLoadTest;
using Client::closed_loop_;
- using Client::NextIssuer;
using ClientImpl<StubType, RequestType>::cores_;
using ClientImpl<StubType, RequestType>::channels_;
using ClientImpl<StubType, RequestType>::request_;
@@ -236,32 +236,56 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
this->EndThreads(); // this needed for resolution
}
- void InitThreadFunc(size_t thread_idx) override final {}
- bool ThreadFunc(HistogramEntry* entry, size_t thread_idx) override final {
- void* got_tag;
- bool ok;
-
- if (cli_cqs_[cq_[thread_idx]]->Next(&got_tag, &ok)) {
- // Got a regular event, so process it
- ClientRpcContext* ctx = ClientRpcContext::detag(got_tag);
- // Proceed while holding a lock to make sure that
- // this thread isn't supposed to shut down
- std::lock_guard<std::mutex> l(shutdown_state_[thread_idx]->mutex);
- if (shutdown_state_[thread_idx]->shutdown) {
+ ClientRpcContext* ProcessTag(size_t thread_idx, void* tag) {
+ ClientRpcContext* ctx = ClientRpcContext::detag(tag);
+ if (shutdown_state_[thread_idx]->shutdown) {
+ ctx->TryCancel();
+ delete ctx;
+ bool ok;
+ while (cli_cqs_[cq_[thread_idx]]->Next(&tag, &ok)) {
+ ctx = ClientRpcContext::detag(tag);
ctx->TryCancel();
delete ctx;
- return true;
}
- if (!ctx->RunNextState(ok, entry)) {
- // The RPC and callback are done, so clone the ctx
- // and kickstart the new one
- ctx->StartNewClone(cli_cqs_[cq_[thread_idx]].get());
- delete ctx;
+ return nullptr;
+ }
+ return ctx;
+ }
+
+ void ThreadFunc(size_t thread_idx, Client::Thread* t) override final {
+ void* got_tag;
+ bool ok;
+
+ HistogramEntry entry;
+ HistogramEntry* entry_ptr = &entry;
+ if (!cli_cqs_[cq_[thread_idx]]->Next(&got_tag, &ok)) {
+ return;
+ }
+ std::mutex* shutdown_mu = &shutdown_state_[thread_idx]->mutex;
+ shutdown_mu->lock();
+ ClientRpcContext* ctx = ProcessTag(thread_idx, got_tag);
+ if (ctx == nullptr) {
+ shutdown_mu->unlock();
+ return;
+ }
+ while (cli_cqs_[cq_[thread_idx]]->DoThenAsyncNext(
+ [&, ctx, ok, entry_ptr, shutdown_mu]() {
+ if (!ctx->RunNextState(ok, entry_ptr)) {
+ // The RPC and callback are done, so clone the ctx
+ // and kickstart the new one
+ ctx->StartNewClone(cli_cqs_[cq_[thread_idx]].get());
+ delete ctx;
+ }
+ shutdown_mu->unlock();
+ },
+ &got_tag, &ok, gpr_inf_future(GPR_CLOCK_REALTIME))) {
+ t->UpdateHistogram(entry_ptr);
+ shutdown_mu->lock();
+ ctx = ProcessTag(thread_idx, got_tag);
+ if (ctx == nullptr) {
+ shutdown_mu->unlock();
+ return;
}
- return true;
- } else {
- // queue is shutting down, so we must be done
- return true;
}
}
diff --git a/test/cpp/qps/client_sync.cc b/test/cpp/qps/client_sync.cc
index 94554a46b2..9f20b148eb 100644
--- a/test/cpp/qps/client_sync.cc
+++ b/test/cpp/qps/client_sync.cc
@@ -62,6 +62,25 @@ class SynchronousClient
virtual ~SynchronousClient(){};
+ virtual void InitThreadFuncImpl(size_t thread_idx) = 0;
+ virtual bool ThreadFuncImpl(HistogramEntry* entry, size_t thread_idx) = 0;
+
+ void ThreadFunc(size_t thread_idx, Thread* t) override {
+ InitThreadFuncImpl(thread_idx);
+ for (;;) {
+ // run the loop body
+ HistogramEntry entry;
+ const bool thread_still_ok = ThreadFuncImpl(&entry, thread_idx);
+ t->UpdateHistogram(&entry);
+ if (!thread_still_ok) {
+ gpr_log(GPR_ERROR, "Finishing client thread due to RPC error");
+ }
+ if (!thread_still_ok || ThreadCompleted()) {
+ return;
+ }
+ }
+ }
+
protected:
// WaitToIssue returns false if we realize that we need to break out
bool WaitToIssue(int thread_idx) {
@@ -103,9 +122,9 @@ class SynchronousUnaryClient final : public SynchronousClient {
}
~SynchronousUnaryClient() {}
- void InitThreadFunc(size_t thread_idx) override {}
+ void InitThreadFuncImpl(size_t thread_idx) override {}
- bool ThreadFunc(HistogramEntry* entry, size_t thread_idx) override {
+ bool ThreadFuncImpl(HistogramEntry* entry, size_t thread_idx) override {
if (!WaitToIssue(thread_idx)) {
return true;
}
@@ -192,13 +211,13 @@ class SynchronousStreamingPingPongClient final
}
}
- void InitThreadFunc(size_t thread_idx) override {
+ void InitThreadFuncImpl(size_t thread_idx) override {
auto* stub = channels_[thread_idx % channels_.size()].get_stub();
stream_[thread_idx] = stub->StreamingCall(&context_[thread_idx]);
messages_issued_[thread_idx] = 0;
}
- bool ThreadFunc(HistogramEntry* entry, size_t thread_idx) override {
+ bool ThreadFuncImpl(HistogramEntry* entry, size_t thread_idx) override {
if (!WaitToIssue(thread_idx)) {
return true;
}
@@ -246,14 +265,14 @@ class SynchronousStreamingFromClientClient final
}
}
- void InitThreadFunc(size_t thread_idx) override {
+ void InitThreadFuncImpl(size_t thread_idx) override {
auto* stub = channels_[thread_idx % channels_.size()].get_stub();
stream_[thread_idx] = stub->StreamingFromClient(&context_[thread_idx],
&responses_[thread_idx]);
last_issue_[thread_idx] = UsageTimer::Now();
}
- bool ThreadFunc(HistogramEntry* entry, size_t thread_idx) override {
+ bool ThreadFuncImpl(HistogramEntry* entry, size_t thread_idx) override {
// Figure out how to make histogram sensible if this is rate-paced
if (!WaitToIssue(thread_idx)) {
return true;
@@ -282,13 +301,13 @@ class SynchronousStreamingFromServerClient final
public:
SynchronousStreamingFromServerClient(const ClientConfig& config)
: SynchronousStreamingClient(config), last_recv_(num_threads_) {}
- void InitThreadFunc(size_t thread_idx) override {
+ void InitThreadFuncImpl(size_t thread_idx) override {
auto* stub = channels_[thread_idx % channels_.size()].get_stub();
stream_[thread_idx] =
stub->StreamingFromServer(&context_[thread_idx], request_);
last_recv_[thread_idx] = UsageTimer::Now();
}
- bool ThreadFunc(HistogramEntry* entry, size_t thread_idx) override {
+ bool ThreadFuncImpl(HistogramEntry* entry, size_t thread_idx) override {
GPR_TIMER_SCOPE("SynchronousStreamingFromServerClient::ThreadFunc", 0);
if (stream_[thread_idx]->Read(&responses_[thread_idx])) {
double now = UsageTimer::Now();
@@ -328,11 +347,11 @@ class SynchronousStreamingBothWaysClient final
}
}
- void InitThreadFunc(size_t thread_idx) override {
+ void InitThreadFuncImpl(size_t thread_idx) override {
auto* stub = channels_[thread_idx % channels_.size()].get_stub();
stream_[thread_idx] = stub->StreamingBothWays(&context_[thread_idx]);
}
- bool ThreadFunc(HistogramEntry* entry, size_t thread_idx) override {
+ bool ThreadFuncImpl(HistogramEntry* entry, size_t thread_idx) override {
// TODO (vjpai): Do this
return true;
}
diff --git a/test/cpp/qps/driver.cc b/test/cpp/qps/driver.cc
index 4458e389e7..22d039d4b7 100644
--- a/test/cpp/qps/driver.cc
+++ b/test/cpp/qps/driver.cc
@@ -36,16 +36,17 @@
#include "src/proto/grpc/testing/services.grpc.pb.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
+#include "test/cpp/qps/client.h"
#include "test/cpp/qps/driver.h"
#include "test/cpp/qps/histogram.h"
#include "test/cpp/qps/qps_worker.h"
#include "test/cpp/qps/stats.h"
#include "test/cpp/util/test_credentials_provider.h"
+using std::deque;
using std::list;
using std::thread;
using std::unique_ptr;
-using std::deque;
using std::vector;
namespace grpc {
@@ -63,11 +64,11 @@ static std::string get_host(const std::string& worker) {
}
static deque<string> get_workers(const string& env_name) {
+ deque<string> out;
char* env = gpr_getenv(env_name.c_str());
if (!env) {
env = gpr_strdup("");
}
- deque<string> out;
char* p = env;
if (strlen(env) != 0) {
for (;;) {
@@ -146,9 +147,8 @@ static void postprocess_scenario_result(ScenarioResult* result) {
result->mutable_summary()->set_server_cpu_usage(0);
} else {
auto server_cpu_usage =
- 100 -
- 100 * average(result->server_stats(), ServerIdleCpuTime) /
- average(result->server_stats(), ServerTotalCpuTime);
+ 100 - 100 * average(result->server_stats(), ServerIdleCpuTime) /
+ average(result->server_stats(), ServerTotalCpuTime);
result->mutable_summary()->set_server_cpu_usage(server_cpu_usage);
}
@@ -187,12 +187,17 @@ static void postprocess_scenario_result(ScenarioResult* result) {
client_queries_per_cpu_sec);
}
+std::vector<grpc::testing::Server*>* g_inproc_servers = nullptr;
+
std::unique_ptr<ScenarioResult> RunScenario(
const ClientConfig& initial_client_config, size_t num_clients,
const ServerConfig& initial_server_config, size_t num_servers,
int warmup_seconds, int benchmark_seconds, int spawn_local_worker_count,
const grpc::string& qps_server_target_override,
- const grpc::string& credential_type) {
+ const grpc::string& credential_type, bool run_inproc) {
+ if (run_inproc) {
+ g_inproc_servers = new std::vector<grpc::testing::Server*>;
+ }
// Log everything from the driver
gpr_set_log_verbosity(GPR_LOG_SEVERITY_DEBUG);
@@ -210,8 +215,8 @@ std::unique_ptr<ScenarioResult> RunScenario(
ClientConfig result_client_config;
const ServerConfig result_server_config = initial_server_config;
- // Get client, server lists
- auto workers = get_workers("QPS_WORKERS");
+ // Get client, server lists; ignore if inproc test
+ auto workers = (!run_inproc) ? get_workers("QPS_WORKERS") : deque<string>();
ClientConfig client_config = initial_client_config;
// Spawn some local workers if desired
@@ -227,9 +232,10 @@ std::unique_ptr<ScenarioResult> RunScenario(
called_init = true;
}
- int driver_port = grpc_pick_unused_port_or_die();
- local_workers.emplace_back(new QpsWorker(driver_port, 0, credential_type));
char addr[256];
+ // we use port # of -1 to indicate inproc
+ int driver_port = (!run_inproc) ? grpc_pick_unused_port_or_die() : -1;
+ local_workers.emplace_back(new QpsWorker(driver_port, 0, credential_type));
sprintf(addr, "localhost:%d", driver_port);
if (spawn_local_worker_count < 0) {
workers.push_front(addr);
@@ -265,9 +271,14 @@ std::unique_ptr<ScenarioResult> RunScenario(
for (size_t i = 0; i < num_servers; i++) {
gpr_log(GPR_INFO, "Starting server on %s (worker #%" PRIuPTR ")",
workers[i].c_str(), i);
- servers[i].stub = WorkerService::NewStub(CreateChannel(
- workers[i], GetCredentialsProvider()->GetChannelCredentials(
- credential_type, &channel_args)));
+ if (!run_inproc) {
+ servers[i].stub = WorkerService::NewStub(CreateChannel(
+ workers[i], GetCredentialsProvider()->GetChannelCredentials(
+ credential_type, &channel_args)));
+ } else {
+ servers[i].stub = WorkerService::NewStub(
+ local_workers[i]->InProcessChannel(channel_args));
+ }
ServerConfig server_config = initial_server_config;
if (server_config.core_limit() != 0) {
@@ -289,6 +300,10 @@ std::unique_ptr<ScenarioResult> RunScenario(
// overriding the qps server target only works if there is 1 server
GPR_ASSERT(num_servers == 1);
client_config.add_server_targets(qps_server_target_override);
+ } else if (run_inproc) {
+ std::string cli_target(INPROC_NAME_PREFIX);
+ cli_target += std::to_string(i);
+ client_config.add_server_targets(cli_target);
} else {
std::string host;
char* cli_target;
@@ -312,9 +327,14 @@ std::unique_ptr<ScenarioResult> RunScenario(
const auto& worker = workers[i + num_servers];
gpr_log(GPR_INFO, "Starting client on %s (worker #%" PRIuPTR ")",
worker.c_str(), i + num_servers);
- clients[i].stub = WorkerService::NewStub(
- CreateChannel(worker, GetCredentialsProvider()->GetChannelCredentials(
- credential_type, &channel_args)));
+ if (!run_inproc) {
+ clients[i].stub = WorkerService::NewStub(
+ CreateChannel(worker, GetCredentialsProvider()->GetChannelCredentials(
+ credential_type, &channel_args)));
+ } else {
+ clients[i].stub = WorkerService::NewStub(
+ local_workers[i + num_servers]->InProcessChannel(channel_args));
+ }
ClientConfig per_client_config = client_config;
if (initial_client_config.core_limit() != 0) {
@@ -495,6 +515,9 @@ std::unique_ptr<ScenarioResult> RunScenario(
}
}
+ if (g_inproc_servers != nullptr) {
+ delete g_inproc_servers;
+ }
postprocess_scenario_result(result.get());
return result;
}
diff --git a/test/cpp/qps/driver.h b/test/cpp/qps/driver.h
index 29f2776d79..fede4d8045 100644
--- a/test/cpp/qps/driver.h
+++ b/test/cpp/qps/driver.h
@@ -32,7 +32,7 @@ std::unique_ptr<ScenarioResult> RunScenario(
const grpc::testing::ServerConfig& server_config, size_t num_servers,
int warmup_seconds, int benchmark_seconds, int spawn_local_worker_count,
const grpc::string& qps_server_target_override,
- const grpc::string& credential_type);
+ const grpc::string& credential_type, bool run_inproc);
bool RunQuit(const grpc::string& credential_type);
} // namespace testing
diff --git a/test/cpp/qps/gen_build_yaml.py b/test/cpp/qps/gen_build_yaml.py
index 65553f57f1..1ef8f65b0b 100755
--- a/test/cpp/qps/gen_build_yaml.py
+++ b/test/cpp/qps/gen_build_yaml.py
@@ -85,6 +85,24 @@ print yaml.dump({
if 'scalable' in scenario_json.get('CATEGORIES', [])
] + [
{
+ 'name': 'qps_json_driver',
+ 'shortname': 'qps_json_driver:inproc_%s' % scenario_json['name'],
+ 'args': ['--run_inproc', '--scenarios_json', _scenario_json_string(scenario_json, False)],
+ 'ci_platforms': ['linux'],
+ 'platforms': ['linux'],
+ 'flaky': False,
+ 'language': 'c++',
+ 'boringssl': True,
+ 'defaults': 'boringssl',
+ 'cpu_cost': guess_cpu(scenario_json, False),
+ 'exclude_configs': ['tsan', 'asan'],
+ 'timeout_seconds': 6*60,
+ 'excluded_poll_engines': scenario_json.get('EXCLUDED_POLL_ENGINES', [])
+ }
+ for scenario_json in scenario_config.CXXLanguage().scenarios()
+ if 'inproc' in scenario_json.get('CATEGORIES', [])
+ ] + [
+ {
'name': 'json_run_localhost',
'shortname': 'json_run_localhost:%s_low_thread_count' % scenario_json['name'],
'args': ['--scenarios_json', _scenario_json_string(scenario_json, True)],
diff --git a/test/cpp/qps/histogram.h b/test/cpp/qps/histogram.h
index 3d04ee57da..e31d5d78a8 100644
--- a/test/cpp/qps/histogram.h
+++ b/test/cpp/qps/histogram.h
@@ -70,7 +70,7 @@ class Histogram {
gpr_histogram* impl_;
};
-}
-}
+} // namespace testing
+} // namespace grpc
#endif /* TEST_QPS_HISTOGRAM_H */
diff --git a/test/cpp/qps/inproc_sync_unary_ping_pong_test.cc b/test/cpp/qps/inproc_sync_unary_ping_pong_test.cc
new file mode 100644
index 0000000000..f2e977d48b
--- /dev/null
+++ b/test/cpp/qps/inproc_sync_unary_ping_pong_test.cc
@@ -0,0 +1,66 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <set>
+
+#include <grpc/support/log.h>
+
+#include "test/cpp/qps/benchmark_config.h"
+#include "test/cpp/qps/driver.h"
+#include "test/cpp/qps/report.h"
+#include "test/cpp/qps/server.h"
+#include "test/cpp/util/test_config.h"
+#include "test/cpp/util/test_credentials_provider.h"
+
+namespace grpc {
+namespace testing {
+
+static const int WARMUP = 5;
+static const int BENCHMARK = 5;
+
+static void RunSynchronousUnaryPingPong() {
+ gpr_log(GPR_INFO, "Running Synchronous Unary Ping Pong");
+
+ ClientConfig client_config;
+ client_config.set_client_type(SYNC_CLIENT);
+ client_config.set_outstanding_rpcs_per_channel(1);
+ client_config.set_client_channels(1);
+ client_config.set_rpc_type(UNARY);
+ client_config.mutable_load_params()->mutable_closed_loop();
+
+ ServerConfig server_config;
+ server_config.set_server_type(SYNC_SERVER);
+
+ const auto result =
+ RunScenario(client_config, 1, server_config, 1, WARMUP, BENCHMARK, -2, "",
+ kInsecureCredentialsType, true);
+
+ GetReporter()->ReportQPS(*result);
+ GetReporter()->ReportLatency(*result);
+}
+
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ grpc::testing::InitTest(&argc, &argv, true);
+
+ grpc::testing::RunSynchronousUnaryPingPong();
+
+ return 0;
+}
diff --git a/test/cpp/qps/interarrival.h b/test/cpp/qps/interarrival.h
index 1fa310c209..9c48066c9c 100644
--- a/test/cpp/qps/interarrival.h
+++ b/test/cpp/qps/interarrival.h
@@ -102,7 +102,7 @@ class InterarrivalTimer {
std::vector<time_table::const_iterator> thread_posns_;
time_table random_table_;
};
-}
-}
+} // namespace testing
+} // namespace grpc
#endif
diff --git a/test/cpp/qps/json_run_localhost.cc b/test/cpp/qps/json_run_localhost.cc
index 1d394b216f..db8b2a3943 100644
--- a/test/cpp/qps/json_run_localhost.cc
+++ b/test/cpp/qps/json_run_localhost.cc
@@ -46,7 +46,7 @@ std::string as_string(const T& val) {
static void sighandler(int sig) {
const int errno_saved = errno;
- if (g_driver != NULL) g_driver->Interrupt();
+ if (g_driver != nullptr) g_driver->Interrupt();
for (int i = 0; i < kNumWorkers; ++i) {
if (g_workers[i]) g_workers[i]->Interrupt();
}
@@ -58,8 +58,8 @@ static void register_sighandler() {
memset(&act, 0, sizeof(act));
act.sa_handler = sighandler;
- sigaction(SIGINT, &act, NULL);
- sigaction(SIGTERM, &act, NULL);
+ sigaction(SIGINT, &act, nullptr);
+ sigaction(SIGTERM, &act, nullptr);
}
static void LogStatus(int status, const char* label) {
@@ -117,8 +117,14 @@ int main(int argc, char** argv) {
}
}
- delete g_driver;
- g_driver = NULL;
- for (int i = 0; i < kNumWorkers; ++i) delete g_workers[i];
+ if (g_driver != nullptr) {
+ delete g_driver;
+ }
+ g_driver = nullptr;
+ for (int i = 0; i < kNumWorkers; ++i) {
+ if (g_workers[i] != nullptr) {
+ delete g_workers[i];
+ }
+ }
GPR_ASSERT(driver_join_status == 0);
}
diff --git a/test/cpp/qps/parse_json.cc b/test/cpp/qps/parse_json.cc
index 343e073f3f..a98ae394db 100644
--- a/test/cpp/qps/parse_json.cc
+++ b/test/cpp/qps/parse_json.cc
@@ -61,5 +61,5 @@ grpc::string SerializeJson(const GRPC_CUSTOM_MESSAGE& msg,
return json_string;
}
-} // testing
-} // grpc
+} // namespace testing
+} // namespace grpc
diff --git a/test/cpp/qps/parse_json.h b/test/cpp/qps/parse_json.h
index b320d26c1a..f2fffb52d4 100644
--- a/test/cpp/qps/parse_json.h
+++ b/test/cpp/qps/parse_json.h
@@ -31,7 +31,7 @@ void ParseJson(const grpc::string& json, const grpc::string& type,
grpc::string SerializeJson(const GRPC_CUSTOM_MESSAGE& msg,
const grpc::string& type);
-} // testing
-} // grpc
+} // namespace testing
+} // namespace grpc
#endif // TEST_QPS_PARSE_JSON_H
diff --git a/test/cpp/qps/qps_interarrival_test.cc b/test/cpp/qps/qps_interarrival_test.cc
index 87f09e8c5f..461bf624ce 100644
--- a/test/cpp/qps/qps_interarrival_test.cc
+++ b/test/cpp/qps/qps_interarrival_test.cc
@@ -25,13 +25,13 @@
#include "test/cpp/qps/interarrival.h"
#include "test/cpp/util/test_config.h"
-using grpc::testing::RandomDistInterface;
using grpc::testing::InterarrivalTimer;
+using grpc::testing::RandomDistInterface;
-static void RunTest(RandomDistInterface &&r, int threads, std::string title) {
+static void RunTest(RandomDistInterface&& r, int threads, std::string title) {
InterarrivalTimer timer;
timer.init(r, threads);
- gpr_histogram *h(gpr_histogram_create(0.01, 60e9));
+ gpr_histogram* h(gpr_histogram_create(0.01, 60e9));
for (int i = 0; i < 10000000; i++) {
for (int j = 0; j < threads; j++) {
@@ -50,7 +50,7 @@ static void RunTest(RandomDistInterface &&r, int threads, std::string title) {
using grpc::testing::ExpDist;
-int main(int argc, char **argv) {
+int main(int argc, char** argv) {
grpc::testing::InitTest(&argc, &argv, true);
RunTest(ExpDist(10.0), 5, std::string("Exponential(10)"));
diff --git a/test/cpp/qps/qps_json_driver.cc b/test/cpp/qps/qps_json_driver.cc
index cca59f64d8..b2449da69c 100644
--- a/test/cpp/qps/qps_json_driver.cc
+++ b/test/cpp/qps/qps_json_driver.cc
@@ -30,6 +30,7 @@
#include "test/cpp/qps/driver.h"
#include "test/cpp/qps/parse_json.h"
#include "test/cpp/qps/report.h"
+#include "test/cpp/qps/server.h"
#include "test/cpp/util/test_config.h"
#include "test/cpp/util/test_credentials_provider.h"
@@ -64,6 +65,7 @@ DEFINE_string(json_file_out, "", "File to write the JSON output to.");
DEFINE_string(credential_type, grpc::testing::kInsecureCredentialsType,
"Credential type for communication with workers");
+DEFINE_bool(run_inproc, false, "Perform an in-process transport test");
namespace grpc {
namespace testing {
@@ -75,8 +77,9 @@ static std::unique_ptr<ScenarioResult> RunAndReport(const Scenario& scenario,
RunScenario(scenario.client_config(), scenario.num_clients(),
scenario.server_config(), scenario.num_servers(),
scenario.warmup_seconds(), scenario.benchmark_seconds(),
- scenario.spawn_local_worker_count(),
- FLAGS_qps_server_target_override, FLAGS_credential_type);
+ !FLAGS_run_inproc ? scenario.spawn_local_worker_count() : -2,
+ FLAGS_qps_server_target_override, FLAGS_credential_type,
+ FLAGS_run_inproc);
// Amend the result with scenario config. Eventually we should adjust
// RunScenario contract so we don't need to touch the result here.
@@ -178,7 +181,7 @@ static bool QpsDriver() {
if (scfile) {
// Read the json data from disk
FILE* json_file = fopen(FLAGS_scenarios_file.c_str(), "r");
- GPR_ASSERT(json_file != NULL);
+ GPR_ASSERT(json_file != nullptr);
fseek(json_file, 0, SEEK_END);
long len = ftell(json_file);
char* data = new char[len];
diff --git a/test/cpp/qps/qps_openloop_test.cc b/test/cpp/qps/qps_openloop_test.cc
index 069b3fa076..df929b9811 100644
--- a/test/cpp/qps/qps_openloop_test.cc
+++ b/test/cpp/qps/qps_openloop_test.cc
@@ -24,6 +24,7 @@
#include "test/cpp/qps/benchmark_config.h"
#include "test/cpp/qps/driver.h"
#include "test/cpp/qps/report.h"
+#include "test/cpp/qps/server.h"
#include "test/cpp/util/test_config.h"
#include "test/cpp/util/test_credentials_provider.h"
@@ -49,8 +50,9 @@ static void RunQPS() {
server_config.set_server_type(ASYNC_SERVER);
server_config.set_async_server_threads(8);
- const auto result = RunScenario(client_config, 1, server_config, 1, WARMUP,
- BENCHMARK, -2, "", kInsecureCredentialsType);
+ const auto result =
+ RunScenario(client_config, 1, server_config, 1, WARMUP, BENCHMARK, -2, "",
+ kInsecureCredentialsType, false);
GetReporter()->ReportQPSPerCore(*result);
GetReporter()->ReportLatency(*result);
diff --git a/test/cpp/qps/qps_worker.cc b/test/cpp/qps/qps_worker.cc
index d20bc1b074..c288b03ec5 100644
--- a/test/cpp/qps/qps_worker.cc
+++ b/test/cpp/qps/qps_worker.cc
@@ -225,11 +225,14 @@ class WorkerServiceImpl final : public WorkerService::Service {
if (!args.has_setup()) {
return Status(StatusCode::INVALID_ARGUMENT, "Bad server creation args");
}
- if (server_port_ != 0) {
+ if (server_port_ > 0) {
args.mutable_setup()->set_port(server_port_);
}
gpr_log(GPR_INFO, "RunServerBody: about to create server");
auto server = CreateServer(args.setup());
+ if (g_inproc_servers != nullptr) {
+ g_inproc_servers->push_back(server.get());
+ }
if (!server) {
return Status(StatusCode::INVALID_ARGUMENT, "Couldn't create server");
}
@@ -269,17 +272,17 @@ QpsWorker::QpsWorker(int driver_port, int server_port,
impl_.reset(new WorkerServiceImpl(server_port, this));
gpr_atm_rel_store(&done_, static_cast<gpr_atm>(0));
- char* server_address = NULL;
- gpr_join_host_port(&server_address, "::", driver_port);
-
ServerBuilder builder;
- builder.AddListeningPort(
- server_address,
- GetCredentialsProvider()->GetServerCredentials(credential_type));
+ if (driver_port >= 0) {
+ char* server_address = nullptr;
+ gpr_join_host_port(&server_address, "::", driver_port);
+ builder.AddListeningPort(
+ server_address,
+ GetCredentialsProvider()->GetServerCredentials(credential_type));
+ gpr_free(server_address);
+ }
builder.RegisterService(impl_.get());
- gpr_free(server_address);
-
server_ = builder.BuildAndStart();
}
diff --git a/test/cpp/qps/qps_worker.h b/test/cpp/qps/qps_worker.h
index 360125fb17..a5167426d0 100644
--- a/test/cpp/qps/qps_worker.h
+++ b/test/cpp/qps/qps_worker.h
@@ -21,17 +21,21 @@
#include <memory>
+#include <grpc++/server.h>
+#include <grpc++/support/channel_arguments.h>
#include <grpc++/support/config.h>
#include <grpc/support/atm.h>
-namespace grpc {
+#include "test/cpp/qps/server.h"
-class Server;
+namespace grpc {
namespace testing {
class WorkerServiceImpl;
+extern std::vector<grpc::testing::Server*>* g_inproc_servers;
+
class QpsWorker {
public:
explicit QpsWorker(int driver_port, int server_port,
@@ -41,9 +45,13 @@ class QpsWorker {
bool Done() const;
void MarkDone();
+ std::shared_ptr<Channel> InProcessChannel(const ChannelArguments& args) {
+ return server_->InProcessChannel(args);
+ }
+
private:
std::unique_ptr<WorkerServiceImpl> impl_;
- std::unique_ptr<Server> server_;
+ std::unique_ptr<grpc::Server> server_;
gpr_atm done_;
};
diff --git a/test/cpp/qps/secure_sync_unary_ping_pong_test.cc b/test/cpp/qps/secure_sync_unary_ping_pong_test.cc
index 137b33ee25..bb415e9d63 100644
--- a/test/cpp/qps/secure_sync_unary_ping_pong_test.cc
+++ b/test/cpp/qps/secure_sync_unary_ping_pong_test.cc
@@ -23,6 +23,7 @@
#include "test/cpp/qps/benchmark_config.h"
#include "test/cpp/qps/driver.h"
#include "test/cpp/qps/report.h"
+#include "test/cpp/qps/server.h"
#include "test/cpp/util/test_config.h"
#include "test/cpp/util/test_credentials_provider.h"
@@ -52,8 +53,9 @@ static void RunSynchronousUnaryPingPong() {
client_config.mutable_security_params()->CopyFrom(security);
server_config.mutable_security_params()->CopyFrom(security);
- const auto result = RunScenario(client_config, 1, server_config, 1, WARMUP,
- BENCHMARK, -2, "", kInsecureCredentialsType);
+ const auto result =
+ RunScenario(client_config, 1, server_config, 1, WARMUP, BENCHMARK, -2, "",
+ kInsecureCredentialsType, false);
GetReporter()->ReportQPS(*result);
GetReporter()->ReportLatency(*result);
diff --git a/test/cpp/qps/server.h b/test/cpp/qps/server.h
index 16d101d5e6..9da33566dd 100644
--- a/test/cpp/qps/server.h
+++ b/test/cpp/qps/server.h
@@ -42,10 +42,9 @@ class Server {
explicit Server(const ServerConfig& config)
: timer_(new UsageTimer), last_reset_poll_count_(0) {
cores_ = gpr_cpu_num_cores();
- if (config.port()) {
+ if (config.port()) { // positive for a fixed port, negative for inproc
port_ = config.port();
-
- } else {
+ } else { // zero for dynamic port
port_ = grpc_pick_unused_port_or_die();
}
}
@@ -115,6 +114,9 @@ class Server {
return 0;
}
+ virtual std::shared_ptr<Channel> InProcessChannel(
+ const ChannelArguments& args) = 0;
+
protected:
static void ApplyConfigToBuilder(const ServerConfig& config,
ServerBuilder* builder) {
diff --git a/test/cpp/qps/server_async.cc b/test/cpp/qps/server_async.cc
index 4a82f98199..72ae772147 100644
--- a/test/cpp/qps/server_async.cc
+++ b/test/cpp/qps/server_async.cc
@@ -48,40 +48,43 @@ template <class RequestType, class ResponseType, class ServiceType,
class AsyncQpsServerTest final : public grpc::testing::Server {
public:
AsyncQpsServerTest(
- const ServerConfig &config,
- std::function<void(ServerBuilder *, ServiceType *)> register_service,
- std::function<void(ServiceType *, ServerContextType *, RequestType *,
- ServerAsyncResponseWriter<ResponseType> *,
- CompletionQueue *, ServerCompletionQueue *, void *)>
+ const ServerConfig& config,
+ std::function<void(ServerBuilder*, ServiceType*)> register_service,
+ std::function<void(ServiceType*, ServerContextType*, RequestType*,
+ ServerAsyncResponseWriter<ResponseType>*,
+ CompletionQueue*, ServerCompletionQueue*, void*)>
request_unary_function,
- std::function<void(ServiceType *, ServerContextType *,
- ServerAsyncReaderWriter<ResponseType, RequestType> *,
- CompletionQueue *, ServerCompletionQueue *, void *)>
+ std::function<void(ServiceType*, ServerContextType*,
+ ServerAsyncReaderWriter<ResponseType, RequestType>*,
+ CompletionQueue*, ServerCompletionQueue*, void*)>
request_streaming_function,
- std::function<void(ServiceType *, ServerContextType *,
- ServerAsyncReader<ResponseType, RequestType> *,
- CompletionQueue *, ServerCompletionQueue *, void *)>
+ std::function<void(ServiceType*, ServerContextType*,
+ ServerAsyncReader<ResponseType, RequestType>*,
+ CompletionQueue*, ServerCompletionQueue*, void*)>
request_streaming_from_client_function,
- std::function<void(ServiceType *, ServerContextType *, RequestType *,
- ServerAsyncWriter<ResponseType> *, CompletionQueue *,
- ServerCompletionQueue *, void *)>
+ std::function<void(ServiceType*, ServerContextType*, RequestType*,
+ ServerAsyncWriter<ResponseType>*, CompletionQueue*,
+ ServerCompletionQueue*, void*)>
request_streaming_from_server_function,
- std::function<void(ServiceType *, ServerContextType *,
- ServerAsyncReaderWriter<ResponseType, RequestType> *,
- CompletionQueue *, ServerCompletionQueue *, void *)>
+ std::function<void(ServiceType*, ServerContextType*,
+ ServerAsyncReaderWriter<ResponseType, RequestType>*,
+ CompletionQueue*, ServerCompletionQueue*, void*)>
request_streaming_both_ways_function,
- std::function<grpc::Status(const PayloadConfig &, const RequestType *,
- ResponseType *)>
+ std::function<grpc::Status(const PayloadConfig&, RequestType*,
+ ResponseType*)>
process_rpc)
: Server(config) {
- char *server_address = NULL;
-
- gpr_join_host_port(&server_address, "::", port());
-
ServerBuilder builder;
- builder.AddListeningPort(server_address,
- Server::CreateServerCredentials(config));
- gpr_free(server_address);
+
+ auto port_num = port();
+ // Negative port number means inproc server, so no listen port needed
+ if (port_num >= 0) {
+ char* server_address = nullptr;
+ gpr_join_host_port(&server_address, "::", port_num);
+ builder.AddListeningPort(server_address,
+ Server::CreateServerCredentials(config));
+ gpr_free(server_address);
+ }
register_service(&builder, &async_service_);
@@ -168,7 +171,7 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
}
for (auto cq = srv_cqs_.begin(); cq != srv_cqs_.end(); ++cq) {
bool ok;
- void *got_tag;
+ void* got_tag;
while ((*cq)->Next(&got_tag, &ok))
;
}
@@ -183,6 +186,11 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
return count;
}
+ std::shared_ptr<Channel> InProcessChannel(
+ const ChannelArguments& args) override {
+ return server_->InProcessChannel(args);
+ }
+
private:
void ShutdownThreadFunc() {
// TODO (vpai): Remove this deadline and allow Shutdown to finish properly
@@ -193,24 +201,32 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
void ThreadFunc(int thread_idx) {
// Wait until work is available or we are shutting down
bool ok;
- void *got_tag;
- while (srv_cqs_[cq_[thread_idx]]->Next(&got_tag, &ok)) {
- ServerRpcContext *ctx = detag(got_tag);
+ void* got_tag;
+ if (!srv_cqs_[cq_[thread_idx]]->Next(&got_tag, &ok)) {
+ return;
+ }
+ ServerRpcContext* ctx;
+ std::mutex* mu_ptr = &shutdown_state_[thread_idx]->mutex;
+ do {
+ ctx = detag(got_tag);
// The tag is a pointer to an RPC context to invoke
// Proceed while holding a lock to make sure that
// this thread isn't supposed to shut down
- std::lock_guard<std::mutex> l(shutdown_state_[thread_idx]->mutex);
+ mu_ptr->lock();
if (shutdown_state_[thread_idx]->shutdown) {
+ mu_ptr->unlock();
return;
}
- std::lock_guard<ServerRpcContext> l2(*ctx);
- const bool still_going = ctx->RunNextState(ok);
- // if this RPC context is done, refresh it
- if (!still_going) {
- ctx->Reset();
- }
- }
- return;
+ } while (srv_cqs_[cq_[thread_idx]]->DoThenAsyncNext(
+ [&, ctx, ok, mu_ptr]() {
+ ctx->lock();
+ if (!ctx->RunNextState(ok)) {
+ ctx->Reset();
+ }
+ ctx->unlock();
+ mu_ptr->unlock();
+ },
+ &got_tag, &ok, gpr_inf_future(GPR_CLOCK_REALTIME)));
}
class ServerRpcContext {
@@ -224,22 +240,21 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
private:
std::mutex mu_;
};
- static void *tag(ServerRpcContext *func) {
- return reinterpret_cast<void *>(func);
+ static void* tag(ServerRpcContext* func) {
+ return reinterpret_cast<void*>(func);
}
- static ServerRpcContext *detag(void *tag) {
- return reinterpret_cast<ServerRpcContext *>(tag);
+ static ServerRpcContext* detag(void* tag) {
+ return reinterpret_cast<ServerRpcContext*>(tag);
}
class ServerRpcContextUnaryImpl final : public ServerRpcContext {
public:
ServerRpcContextUnaryImpl(
- std::function<void(ServerContextType *, RequestType *,
- grpc::ServerAsyncResponseWriter<ResponseType> *,
- void *)>
+ std::function<void(ServerContextType*, RequestType*,
+ grpc::ServerAsyncResponseWriter<ResponseType>*,
+ void*)>
request_method,
- std::function<grpc::Status(const RequestType *, ResponseType *)>
- invoke_method)
+ std::function<grpc::Status(RequestType*, ResponseType*)> invoke_method)
: srv_ctx_(new ServerContextType),
next_state_(&ServerRpcContextUnaryImpl::invoker),
request_method_(request_method),
@@ -281,11 +296,10 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
RequestType req_;
ResponseType response_;
bool (ServerRpcContextUnaryImpl::*next_state_)(bool);
- std::function<void(ServerContextType *, RequestType *,
- grpc::ServerAsyncResponseWriter<ResponseType> *, void *)>
+ std::function<void(ServerContextType*, RequestType*,
+ grpc::ServerAsyncResponseWriter<ResponseType>*, void*)>
request_method_;
- std::function<grpc::Status(const RequestType *, ResponseType *)>
- invoke_method_;
+ std::function<grpc::Status(RequestType*, ResponseType*)> invoke_method_;
grpc::ServerAsyncResponseWriter<ResponseType> response_writer_;
};
@@ -293,11 +307,10 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
public:
ServerRpcContextStreamingImpl(
std::function<void(
- ServerContextType *,
- grpc::ServerAsyncReaderWriter<ResponseType, RequestType> *, void *)>
+ ServerContextType*,
+ grpc::ServerAsyncReaderWriter<ResponseType, RequestType>*, void*)>
request_method,
- std::function<grpc::Status(const RequestType *, ResponseType *)>
- invoke_method)
+ std::function<grpc::Status(RequestType*, ResponseType*)> invoke_method)
: srv_ctx_(new ServerContextType),
next_state_(&ServerRpcContextStreamingImpl::request_done),
request_method_(request_method),
@@ -361,11 +374,10 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
ResponseType response_;
bool (ServerRpcContextStreamingImpl::*next_state_)(bool);
std::function<void(
- ServerContextType *,
- grpc::ServerAsyncReaderWriter<ResponseType, RequestType> *, void *)>
+ ServerContextType*,
+ grpc::ServerAsyncReaderWriter<ResponseType, RequestType>*, void*)>
request_method_;
- std::function<grpc::Status(const RequestType *, ResponseType *)>
- invoke_method_;
+ std::function<grpc::Status(RequestType*, ResponseType*)> invoke_method_;
grpc::ServerAsyncReaderWriter<ResponseType, RequestType> stream_;
};
@@ -373,12 +385,11 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
: public ServerRpcContext {
public:
ServerRpcContextStreamingFromClientImpl(
- std::function<void(ServerContextType *,
- grpc::ServerAsyncReader<ResponseType, RequestType> *,
- void *)>
+ std::function<void(ServerContextType*,
+ grpc::ServerAsyncReader<ResponseType, RequestType>*,
+ void*)>
request_method,
- std::function<grpc::Status(const RequestType *, ResponseType *)>
- invoke_method)
+ std::function<grpc::Status(RequestType*, ResponseType*)> invoke_method)
: srv_ctx_(new ServerContextType),
next_state_(&ServerRpcContextStreamingFromClientImpl::request_done),
request_method_(request_method),
@@ -431,12 +442,11 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
RequestType req_;
ResponseType response_;
bool (ServerRpcContextStreamingFromClientImpl::*next_state_)(bool);
- std::function<void(ServerContextType *,
- grpc::ServerAsyncReader<ResponseType, RequestType> *,
- void *)>
+ std::function<void(ServerContextType*,
+ grpc::ServerAsyncReader<ResponseType, RequestType>*,
+ void*)>
request_method_;
- std::function<grpc::Status(const RequestType *, ResponseType *)>
- invoke_method_;
+ std::function<grpc::Status(RequestType*, ResponseType*)> invoke_method_;
grpc::ServerAsyncReader<ResponseType, RequestType> stream_;
};
@@ -444,11 +454,10 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
: public ServerRpcContext {
public:
ServerRpcContextStreamingFromServerImpl(
- std::function<void(ServerContextType *, RequestType *,
- grpc::ServerAsyncWriter<ResponseType> *, void *)>
+ std::function<void(ServerContextType*, RequestType*,
+ grpc::ServerAsyncWriter<ResponseType>*, void*)>
request_method,
- std::function<grpc::Status(const RequestType *, ResponseType *)>
- invoke_method)
+ std::function<grpc::Status(RequestType*, ResponseType*)> invoke_method)
: srv_ctx_(new ServerContextType),
next_state_(&ServerRpcContextStreamingFromServerImpl::request_done),
request_method_(request_method),
@@ -501,11 +510,10 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
RequestType req_;
ResponseType response_;
bool (ServerRpcContextStreamingFromServerImpl::*next_state_)(bool);
- std::function<void(ServerContextType *, RequestType *,
- grpc::ServerAsyncWriter<ResponseType> *, void *)>
+ std::function<void(ServerContextType*, RequestType*,
+ grpc::ServerAsyncWriter<ResponseType>*, void*)>
request_method_;
- std::function<grpc::Status(const RequestType *, ResponseType *)>
- invoke_method_;
+ std::function<grpc::Status(RequestType*, ResponseType*)> invoke_method_;
grpc::ServerAsyncWriter<ResponseType> stream_;
};
@@ -525,30 +533,34 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
std::vector<std::unique_ptr<PerThreadShutdownState>> shutdown_state_;
};
-static void RegisterBenchmarkService(ServerBuilder *builder,
- BenchmarkService::AsyncService *service) {
+static void RegisterBenchmarkService(ServerBuilder* builder,
+ BenchmarkService::AsyncService* service) {
builder->RegisterService(service);
}
-static void RegisterGenericService(ServerBuilder *builder,
- grpc::AsyncGenericService *service) {
+static void RegisterGenericService(ServerBuilder* builder,
+ grpc::AsyncGenericService* service) {
builder->RegisterAsyncGenericService(service);
}
-static Status ProcessSimpleRPC(const PayloadConfig &,
- const SimpleRequest *request,
- SimpleResponse *response) {
+static Status ProcessSimpleRPC(const PayloadConfig&, SimpleRequest* request,
+ SimpleResponse* response) {
if (request->response_size() > 0) {
if (!Server::SetPayload(request->response_type(), request->response_size(),
response->mutable_payload())) {
return Status(grpc::StatusCode::INTERNAL, "Error creating payload.");
}
}
+ // We are done using the request. Clear it to reduce working memory.
+ // This proves to reduce cache misses in large message size cases.
+ request->Clear();
return Status::OK;
}
-static Status ProcessGenericRPC(const PayloadConfig &payload_config,
- const ByteBuffer *request,
- ByteBuffer *response) {
+static Status ProcessGenericRPC(const PayloadConfig& payload_config,
+ ByteBuffer* request, ByteBuffer* response) {
+ // We are done using the request. Clear it to reduce working memory.
+ // This proves to reduce cache misses in large message size cases.
+ request->Clear();
int resp_size = payload_config.bytebuf_params().resp_size();
std::unique_ptr<char[]> buf(new char[resp_size]);
Slice slice(buf.get(), resp_size);
@@ -556,7 +568,7 @@ static Status ProcessGenericRPC(const PayloadConfig &payload_config,
return Status::OK;
}
-std::unique_ptr<Server> CreateAsyncServer(const ServerConfig &config) {
+std::unique_ptr<Server> CreateAsyncServer(const ServerConfig& config) {
return std::unique_ptr<Server>(
new AsyncQpsServerTest<SimpleRequest, SimpleResponse,
BenchmarkService::AsyncService,
@@ -569,7 +581,7 @@ std::unique_ptr<Server> CreateAsyncServer(const ServerConfig &config) {
&BenchmarkService::AsyncService::RequestStreamingBothWays,
ProcessSimpleRPC));
}
-std::unique_ptr<Server> CreateAsyncGenericServer(const ServerConfig &config) {
+std::unique_ptr<Server> CreateAsyncGenericServer(const ServerConfig& config) {
return std::unique_ptr<Server>(
new AsyncQpsServerTest<ByteBuffer, ByteBuffer, grpc::AsyncGenericService,
grpc::GenericServerContext>(
diff --git a/test/cpp/qps/server_sync.cc b/test/cpp/qps/server_sync.cc
index 9954e2c0bf..ea89a30e2e 100644
--- a/test/cpp/qps/server_sync.cc
+++ b/test/cpp/qps/server_sync.cc
@@ -156,12 +156,15 @@ class SynchronousServer final : public grpc::testing::Server {
explicit SynchronousServer(const ServerConfig& config) : Server(config) {
ServerBuilder builder;
- char* server_address = NULL;
-
- gpr_join_host_port(&server_address, "::", port());
- builder.AddListeningPort(server_address,
- Server::CreateServerCredentials(config));
- gpr_free(server_address);
+ auto port_num = port();
+ // Negative port number means inproc server, so no listen port needed
+ if (port_num >= 0) {
+ char* server_address = nullptr;
+ gpr_join_host_port(&server_address, "::", port_num);
+ builder.AddListeningPort(server_address,
+ Server::CreateServerCredentials(config));
+ gpr_free(server_address);
+ }
ApplyConfigToBuilder(config, &builder);
@@ -170,6 +173,11 @@ class SynchronousServer final : public grpc::testing::Server {
impl_ = builder.BuildAndStart();
}
+ std::shared_ptr<Channel> InProcessChannel(
+ const ChannelArguments& args) override {
+ return impl_->InProcessChannel(args);
+ }
+
private:
BenchmarkServiceImpl service_;
std::unique_ptr<grpc::Server> impl_;
diff --git a/test/cpp/qps/worker.cc b/test/cpp/qps/worker.cc
index 27010b7315..38287464d9 100644
--- a/test/cpp/qps/worker.cc
+++ b/test/cpp/qps/worker.cc
@@ -20,6 +20,7 @@
#include <chrono>
#include <thread>
+#include <vector>
#include <gflags/gflags.h>
#include <grpc/grpc.h>
@@ -41,6 +42,8 @@ static void sigint_handler(int x) { got_sigint = true; }
namespace grpc {
namespace testing {
+std::vector<grpc::testing::Server*>* g_inproc_servers = nullptr;
+
static void RunServer() {
QpsWorker worker(FLAGS_driver_port, FLAGS_server_port, FLAGS_credential_type);
diff --git a/test/cpp/test/server_context_test_spouse_test.cc b/test/cpp/test/server_context_test_spouse_test.cc
index c1ddb0019b..d1dc9d7cac 100644
--- a/test/cpp/test/server_context_test_spouse_test.cc
+++ b/test/cpp/test/server_context_test_spouse_test.cc
@@ -87,7 +87,7 @@ TEST(ServerContextTestSpouseTest, TrailingMetadata) {
ASSERT_EQ(metadata, spouse.GetTrailingMetadata());
}
-} // namespace
+} // namespace testing
} // namespace grpc
int main(int argc, char** argv) {
diff --git a/test/cpp/thread_manager/thread_manager_test.cc b/test/cpp/thread_manager/thread_manager_test.cc
index 85e58f466e..8282d46694 100644
--- a/test/cpp/thread_manager/thread_manager_test.cc
+++ b/test/cpp/thread_manager/thread_manager_test.cc
@@ -37,8 +37,8 @@ class ThreadManagerTest final : public grpc::ThreadManager {
num_poll_for_work_(0),
num_work_found_(0) {}
- grpc::ThreadManager::WorkStatus PollForWork(void **tag, bool *ok) override;
- void DoWork(void *tag, bool ok) override;
+ grpc::ThreadManager::WorkStatus PollForWork(void** tag, bool* ok) override;
+ void DoWork(void* tag, bool ok) override;
void PerformTest();
private:
@@ -65,8 +65,8 @@ void ThreadManagerTest::SleepForMs(int duration_ms) {
gpr_sleep_until(sleep_time);
}
-grpc::ThreadManager::WorkStatus ThreadManagerTest::PollForWork(void **tag,
- bool *ok) {
+grpc::ThreadManager::WorkStatus ThreadManagerTest::PollForWork(void** tag,
+ bool* ok) {
int call_num = gpr_atm_no_barrier_fetch_add(&num_poll_for_work_, 1);
if (call_num >= kMaxNumPollForWork) {
@@ -89,7 +89,7 @@ grpc::ThreadManager::WorkStatus ThreadManagerTest::PollForWork(void **tag,
}
}
-void ThreadManagerTest::DoWork(void *tag, bool ok) {
+void ThreadManagerTest::DoWork(void* tag, bool ok) {
gpr_atm_no_barrier_fetch_add(&num_do_work_, 1);
SleepForMs(kDoWorkDurationMsec); // Simulate doing work by sleeping
}
@@ -110,8 +110,8 @@ void ThreadManagerTest::PerformTest() {
}
} // namespace grpc
-int main(int argc, char **argv) {
- std::srand(std::time(NULL));
+int main(int argc, char** argv) {
+ std::srand(std::time(nullptr));
grpc::testing::InitTest(&argc, &argv, true);
grpc::ThreadManagerTest test_rpc_manager;
diff --git a/test/cpp/util/create_test_channel.cc b/test/cpp/util/create_test_channel.cc
index 34b6d60d01..4d047473b9 100644
--- a/test/cpp/util/create_test_channel.cc
+++ b/test/cpp/util/create_test_channel.cc
@@ -74,7 +74,7 @@ std::shared_ptr<Channel> CreateTestChannel(
ChannelArguments channel_args(args);
std::shared_ptr<ChannelCredentials> channel_creds;
if (cred_type.empty()) {
- return CreateChannel(server, InsecureChannelCredentials());
+ return CreateCustomChannel(server, InsecureChannelCredentials(), args);
} else if (cred_type == testing::kTlsCredentialsType) { // cred_type == "ssl"
if (use_prod_roots) {
gpr_once_init(&g_once_init_add_prod_ssl_provider, &AddProdSslType);
@@ -101,7 +101,7 @@ std::shared_ptr<Channel> CreateTestChannel(
cred_type, &channel_args);
GPR_ASSERT(channel_creds != nullptr);
- return CreateChannel(server, channel_creds);
+ return CreateCustomChannel(server, channel_creds, args);
}
}
diff --git a/test/cpp/util/error_details_test.cc b/test/cpp/util/error_details_test.cc
index 69a6876a3f..16a00fb201 100644
--- a/test/cpp/util/error_details_test.cc
+++ b/test/cpp/util/error_details_test.cc
@@ -82,7 +82,7 @@ TEST(SetTest, NullInput) {
TEST(SetTest, OutOfScopeErrorCode) {
google::rpc::Status expected;
- expected.set_code(20); // Out of scope (DATA_LOSS is 15).
+ expected.set_code(17); // Out of scope (UNAUTHENTICATED is 16).
expected.set_message("I am an error message");
testing::EchoRequest expected_details;
expected_details.set_message(grpc::string(100, '\0'));
@@ -96,6 +96,24 @@ TEST(SetTest, OutOfScopeErrorCode) {
EXPECT_EQ(expected.SerializeAsString(), to.error_details());
}
+TEST(SetTest, ValidScopeErrorCode) {
+ for (int c = StatusCode::OK; c <= StatusCode::UNAUTHENTICATED; c++) {
+ google::rpc::Status expected;
+ expected.set_code(c);
+ expected.set_message("I am an error message");
+ testing::EchoRequest expected_details;
+ expected_details.set_message(grpc::string(100, '\0'));
+ expected.add_details()->PackFrom(expected_details);
+
+ Status to;
+ Status s = SetErrorDetails(expected, &to);
+ EXPECT_TRUE(s.ok());
+ EXPECT_EQ(c, to.error_code());
+ EXPECT_EQ(expected.message(), to.error_message());
+ EXPECT_EQ(expected.SerializeAsString(), to.error_details());
+ }
+}
+
} // namespace
} // namespace grpc
diff --git a/test/cpp/util/grpc_tool.cc b/test/cpp/util/grpc_tool.cc
index cd6084ac6d..a6d08cd83c 100644
--- a/test/cpp/util/grpc_tool.cc
+++ b/test/cpp/util/grpc_tool.cc
@@ -230,7 +230,7 @@ const Command* FindCommand(const grpc::string& name) {
return &ops[i];
}
}
- return NULL;
+ return nullptr;
}
} // namespace
@@ -245,13 +245,13 @@ int GrpcToolMainLib(int argc, const char** argv, const CliCredentials& cred,
argv += 2;
const Command* cmd = FindCommand(command);
- if (cmd != NULL) {
+ if (cmd != nullptr) {
GrpcTool grpc_tool;
if (argc < cmd->min_args || argc > cmd->max_args) {
// Force the command to print its usage message
fprintf(stderr, "\nWrong number of arguments for %s\n", command.c_str());
grpc_tool.SetPrintCommandMode(1);
- return cmd->function(&grpc_tool, -1, NULL, cred, callback);
+ return cmd->function(&grpc_tool, -1, nullptr, cred, callback);
}
const bool ok = cmd->function(&grpc_tool, argc, argv, cred, callback);
return ok ? 0 : 1;
@@ -281,11 +281,11 @@ bool GrpcTool::Help(int argc, const char** argv, const CliCredentials& cred,
Usage("");
} else {
const Command* cmd = FindCommand(argv[0]);
- if (cmd == NULL) {
+ if (cmd == nullptr) {
Usage("Unknown command '" + grpc::string(argv[0]) + "'");
}
SetPrintCommandMode(0);
- cmd->function(this, -1, NULL, cred, callback);
+ cmd->function(this, -1, nullptr, cred, callback);
}
return true;
}
diff --git a/test/cpp/util/grpc_tool.h b/test/cpp/util/grpc_tool.h
index 076ce530d9..a10422f882 100644
--- a/test/cpp/util/grpc_tool.h
+++ b/test/cpp/util/grpc_tool.h
@@ -28,9 +28,9 @@
namespace grpc {
namespace testing {
-typedef std::function<bool(const grpc::string &)> GrpcToolOutputCallback;
+typedef std::function<bool(const grpc::string&)> GrpcToolOutputCallback;
-int GrpcToolMainLib(int argc, const char **argv, const CliCredentials &cred,
+int GrpcToolMainLib(int argc, const char** argv, const CliCredentials& cred,
GrpcToolOutputCallback callback);
} // namespace testing
diff --git a/test/cpp/util/grpc_tool_test.cc b/test/cpp/util/grpc_tool_test.cc
index d0b3d7b81b..1c07b2a8ef 100644
--- a/test/cpp/util/grpc_tool_test.cc
+++ b/test/cpp/util/grpc_tool_test.cc
@@ -395,7 +395,7 @@ TEST_F(GrpcToolTest, CallCommand) {
std::bind(PrintStream, &output_stream,
std::placeholders::_1)));
// Expected output: "message: \"Hello\""
- EXPECT_TRUE(NULL !=
+ EXPECT_TRUE(nullptr !=
strstr(output_stream.str().c_str(), "message: \"Hello\""));
ShutdownServer();
}
@@ -421,9 +421,9 @@ TEST_F(GrpcToolTest, CallCommandBatch) {
// Expected output: "message: "Hello0"\nmessage: "Hello1"\nmessage:
// "Hello2"\n"
- EXPECT_TRUE(NULL != strstr(output_stream.str().c_str(),
- "message: \"Hello0\"\nmessage: "
- "\"Hello1\"\nmessage: \"Hello2\"\n"));
+ EXPECT_TRUE(nullptr != strstr(output_stream.str().c_str(),
+ "message: \"Hello0\"\nmessage: "
+ "\"Hello1\"\nmessage: \"Hello2\"\n"));
std::cin.rdbuf(orig);
ShutdownServer();
}
@@ -448,8 +448,8 @@ TEST_F(GrpcToolTest, CallCommandBatchWithBadRequest) {
FLAGS_batch = false;
// Expected output: "message: "Hello0"\nmessage: "Hello2"\n"
- EXPECT_TRUE(NULL != strstr(output_stream.str().c_str(),
- "message: \"Hello0\"\nmessage: \"Hello2\"\n"));
+ EXPECT_TRUE(nullptr != strstr(output_stream.str().c_str(),
+ "message: \"Hello0\"\nmessage: \"Hello2\"\n"));
std::cin.rdbuf(orig);
ShutdownServer();
}
@@ -473,8 +473,8 @@ TEST_F(GrpcToolTest, CallCommandRequestStream) {
std::placeholders::_1)));
// Expected output: "message: \"Hello0Hello1Hello2\""
- EXPECT_TRUE(NULL != strstr(output_stream.str().c_str(),
- "message: \"Hello0Hello1Hello2\""));
+ EXPECT_TRUE(nullptr != strstr(output_stream.str().c_str(),
+ "message: \"Hello0Hello1Hello2\""));
std::cin.rdbuf(orig);
ShutdownServer();
}
@@ -498,7 +498,7 @@ TEST_F(GrpcToolTest, CallCommandRequestStreamWithBadRequest) {
std::placeholders::_1)));
// Expected output: "message: \"Hello0Hello2\""
- EXPECT_TRUE(NULL !=
+ EXPECT_TRUE(nullptr !=
strstr(output_stream.str().c_str(), "message: \"Hello0Hello2\""));
std::cin.rdbuf(orig);
ShutdownServer();
@@ -521,8 +521,8 @@ TEST_F(GrpcToolTest, CallCommandResponseStream) {
for (int i = 0; i < kServerDefaultResponseStreamsToSend; i++) {
grpc::string expected_response_text =
"message: \"Hello" + grpc::to_string(i) + "\"\n";
- EXPECT_TRUE(NULL != strstr(output_stream.str().c_str(),
- expected_response_text.c_str()));
+ EXPECT_TRUE(nullptr != strstr(output_stream.str().c_str(),
+ expected_response_text.c_str()));
}
ShutdownServer();
@@ -547,9 +547,9 @@ TEST_F(GrpcToolTest, CallCommandBidiStream) {
// Expected output: "message: \"Hello0\"\nmessage: \"Hello1\"\nmessage:
// \"Hello2\"\n\n"
- EXPECT_TRUE(NULL != strstr(output_stream.str().c_str(),
- "message: \"Hello0\"\nmessage: "
- "\"Hello1\"\nmessage: \"Hello2\"\n"));
+ EXPECT_TRUE(nullptr != strstr(output_stream.str().c_str(),
+ "message: \"Hello0\"\nmessage: "
+ "\"Hello1\"\nmessage: \"Hello2\"\n"));
std::cin.rdbuf(orig);
ShutdownServer();
}
@@ -573,8 +573,8 @@ TEST_F(GrpcToolTest, CallCommandBidiStreamWithBadRequest) {
// Expected output: "message: \"Hello0\"\nmessage: \"Hello1\"\nmessage:
// \"Hello2\"\n\n"
- EXPECT_TRUE(NULL != strstr(output_stream.str().c_str(),
- "message: \"Hello0\"\nmessage: \"Hello2\"\n"));
+ EXPECT_TRUE(nullptr != strstr(output_stream.str().c_str(),
+ "message: \"Hello0\"\nmessage: \"Hello2\"\n"));
std::cin.rdbuf(orig);
ShutdownServer();
diff --git a/test/cpp/util/proto_reflection_descriptor_database.cc b/test/cpp/util/proto_reflection_descriptor_database.cc
index 184828c7b6..0f77934672 100644
--- a/test/cpp/util/proto_reflection_descriptor_database.cc
+++ b/test/cpp/util/proto_reflection_descriptor_database.cc
@@ -22,11 +22,11 @@
#include <grpc/support/log.h>
+using grpc::reflection::v1alpha::ErrorResponse;
+using grpc::reflection::v1alpha::ListServiceResponse;
using grpc::reflection::v1alpha::ServerReflection;
using grpc::reflection::v1alpha::ServerReflectionRequest;
using grpc::reflection::v1alpha::ServerReflectionResponse;
-using grpc::reflection::v1alpha::ListServiceResponse;
-using grpc::reflection::v1alpha::ErrorResponse;
namespace grpc {
diff --git a/test/cpp/util/service_describer.h b/test/cpp/util/service_describer.h
index b7a8c9207f..b7ab7578b4 100644
--- a/test/cpp/util/service_describer.h
+++ b/test/cpp/util/service_describer.h
@@ -36,7 +36,7 @@ grpc::string SummarizeService(const grpc::protobuf::ServiceDescriptor* service);
grpc::string SummarizeMethod(const grpc::protobuf::MethodDescriptor* method);
-} // namespase testing
+} // namespace testing
} // namespace grpc
#endif // GRPC_TEST_CPP_UTIL_SERVICE_DESCRIBER_H
diff --git a/test/cpp/util/test_config_cc.cc b/test/cpp/util/test_config_cc.cc
index f7e034253e..e4b6886335 100644
--- a/test/cpp/util/test_config_cc.cc
+++ b/test/cpp/util/test_config_cc.cc
@@ -16,8 +16,8 @@
*
*/
-#include "test/cpp/util/test_config.h"
#include <gflags/gflags.h>
+#include "test/cpp/util/test_config.h"
// In some distros, gflags is in the namespace google, and in some others,
// in gflags. This hack is enabling us to find both.