aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Craig Tiller <ctiller@google.com>2017-09-21 15:12:43 -0700
committerGravatar Craig Tiller <ctiller@google.com>2017-09-21 15:12:43 -0700
commit6bfb8589ad1f814a2d167db1cf0084e4b935ea45 (patch)
tree9afee83b00f5269e3cb14fb5534e8ddbf2c9fdc6
parentf7225eb5f67d766726a17a1623c1975dcdd57c26 (diff)
parenta1f7f513a78c9223354ff92a57a723a08d2d82d2 (diff)
Merge github.com:grpc/grpc into flowctl+millis
-rw-r--r--test/cpp/end2end/grpclb_end2end_test.cc124
-rwxr-xr-xtools/gce/linux_performance_worker_init.sh5
-rwxr-xr-xtools/run_tests/performance/run_worker_php.sh28
-rw-r--r--tools/run_tests/performance/scenario_config.py28
4 files changed, 159 insertions, 26 deletions
diff --git a/test/cpp/end2end/grpclb_end2end_test.cc b/test/cpp/end2end/grpclb_end2end_test.cc
index 570a3d1067..77ed155292 100644
--- a/test/cpp/end2end/grpclb_end2end_test.cc
+++ b/test/cpp/end2end/grpclb_end2end_test.cc
@@ -398,11 +398,40 @@ class GrpclbEnd2endTest : public ::testing::Test {
return true;
}
- void WaitForAllBackends() {
+ void SendRpcAndCount(int* num_total, int* num_ok, int* num_failure,
+ int* num_drops) {
+ const Status status = SendRpc();
+ if (status.ok()) {
+ ++*num_ok;
+ } else {
+ if (status.error_message() == "Call dropped by load balancing policy") {
+ ++*num_drops;
+ } else {
+ ++*num_failure;
+ }
+ }
+ ++*num_total;
+ }
+
+ std::tuple<int, int, int> WaitForAllBackends(
+ int num_requests_multiple_of = 1) {
+ int num_ok = 0;
+ int num_failure = 0;
+ int num_drops = 0;
+ int num_total = 0;
while (!SeenAllBackends()) {
- CheckRpcSendOk();
+ SendRpcAndCount(&num_total, &num_ok, &num_failure, &num_drops);
+ }
+ while (num_total % num_requests_multiple_of != 0) {
+ SendRpcAndCount(&num_total, &num_ok, &num_failure, &num_drops);
}
ResetBackendCounters();
+ gpr_log(GPR_INFO,
+ "Performed %d warm up requests (a multiple of %d) against the "
+ "backends. %d succeeded, %d failed, %d dropped.",
+ num_total, num_requests_multiple_of, num_ok, num_failure,
+ num_drops);
+ return std::make_tuple(num_ok, num_failure, num_drops);
}
void WaitForBackend(size_t backend_idx) {
@@ -556,10 +585,8 @@ TEST_F(SingleBalancerTest, Vanilla) {
0);
// Make sure that trying to connect works without a call.
channel_->GetState(true /* try_to_connect */);
-
// We need to wait for all backends to come online.
WaitForAllBackends();
-
// Send kNumRpcsPerAddress RPCs per server.
CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
@@ -863,13 +890,22 @@ TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) {
TEST_F(SingleBalancerTest, Drop) {
const size_t kNumRpcsPerAddress = 100;
+ const int num_of_drop_by_rate_limiting_addresses = 1;
+ const int num_of_drop_by_load_balancing_addresses = 2;
+ const int num_of_drop_addresses = num_of_drop_by_rate_limiting_addresses +
+ num_of_drop_by_load_balancing_addresses;
+ const int num_total_addresses = num_backends_ + num_of_drop_addresses;
ScheduleResponseForBalancer(
0, BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(), {{"rate_limiting", 1}, {"load_balancing", 2}}),
+ GetBackendPorts(),
+ {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
+ {"load_balancing", num_of_drop_by_load_balancing_addresses}}),
0);
+ // Wait until all backends are ready.
+ WaitForAllBackends();
// Send kNumRpcsPerAddress RPCs for each server and drop address.
size_t num_drops = 0;
- for (size_t i = 0; i < kNumRpcsPerAddress * (num_backends_ + 3); ++i) {
+ for (size_t i = 0; i < kNumRpcsPerAddress * num_total_addresses; ++i) {
EchoResponse response;
const Status status = SendRpc(&response);
if (!status.ok() &&
@@ -881,7 +917,7 @@ TEST_F(SingleBalancerTest, Drop) {
EXPECT_EQ(response.message(), kMessage_);
}
}
- EXPECT_EQ(kNumRpcsPerAddress * 3, num_drops);
+ EXPECT_EQ(kNumRpcsPerAddress * num_of_drop_addresses, num_drops);
// Each backend should have gotten 100 requests.
for (size_t i = 0; i < backends_.size(); ++i) {
@@ -896,9 +932,12 @@ TEST_F(SingleBalancerTest, Drop) {
TEST_F(SingleBalancerTest, DropAllFirst) {
// All registered addresses are marked as "drop".
+ const int num_of_drop_by_rate_limiting_addresses = 1;
+ const int num_of_drop_by_load_balancing_addresses = 1;
ScheduleResponseForBalancer(
0, BalancerServiceImpl::BuildResponseForBackends(
- {}, {{"rate_limiting", 1}, {"load_balancing", 1}}),
+ {}, {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
+ {"load_balancing", num_of_drop_by_load_balancing_addresses}}),
0);
const Status status = SendRpc();
EXPECT_FALSE(status.ok());
@@ -909,9 +948,12 @@ TEST_F(SingleBalancerTest, DropAll) {
ScheduleResponseForBalancer(
0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
0);
+ const int num_of_drop_by_rate_limiting_addresses = 1;
+ const int num_of_drop_by_load_balancing_addresses = 1;
ScheduleResponseForBalancer(
0, BalancerServiceImpl::BuildResponseForBackends(
- {}, {{"rate_limiting", 1}, {"load_balancing", 1}}),
+ {}, {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
+ {"load_balancing", num_of_drop_by_load_balancing_addresses}}),
1000);
// First call succeeds.
@@ -936,6 +978,11 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, Vanilla) {
ScheduleResponseForBalancer(
0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
0);
+ // Wait until all backends are ready.
+ int num_ok = 0;
+ int num_failure = 0;
+ int num_drops = 0;
+ std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends();
// Send kNumRpcsPerAddress RPCs per server.
CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
// Each backend should have gotten 100 requests.
@@ -950,24 +997,39 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, Vanilla) {
EXPECT_EQ(1U, balancer_servers_[0].service_->response_count());
const ClientStats client_stats = WaitForLoadReports();
- EXPECT_EQ(kNumRpcsPerAddress * num_backends_, client_stats.num_calls_started);
- EXPECT_EQ(kNumRpcsPerAddress * num_backends_,
+ EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok,
+ client_stats.num_calls_started);
+ EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok,
client_stats.num_calls_finished);
EXPECT_EQ(0U, client_stats.num_calls_finished_with_client_failed_to_send);
- EXPECT_EQ(kNumRpcsPerAddress * num_backends_,
+ EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + (num_ok + num_drops),
client_stats.num_calls_finished_known_received);
EXPECT_THAT(client_stats.drop_token_counts, ::testing::ElementsAre());
}
TEST_F(SingleBalancerWithClientLoadReportingTest, Drop) {
const size_t kNumRpcsPerAddress = 3;
+ const int num_of_drop_by_rate_limiting_addresses = 2;
+ const int num_of_drop_by_load_balancing_addresses = 1;
+ const int num_of_drop_addresses = num_of_drop_by_rate_limiting_addresses +
+ num_of_drop_by_load_balancing_addresses;
+ const int num_total_addresses = num_backends_ + num_of_drop_addresses;
ScheduleResponseForBalancer(
0, BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(), {{"rate_limiting", 2}, {"load_balancing", 1}}),
+ GetBackendPorts(),
+ {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
+ {"load_balancing", num_of_drop_by_load_balancing_addresses}}),
0);
-
+ // Wait until all backends are ready.
+ int num_warmup_ok = 0;
+ int num_warmup_failure = 0;
+ int num_warmup_drops = 0;
+ std::tie(num_warmup_ok, num_warmup_failure, num_warmup_drops) =
+ WaitForAllBackends(num_total_addresses /* num_requests_multiple_of */);
+ const int num_total_warmup_requests =
+ num_warmup_ok + num_warmup_failure + num_warmup_drops;
size_t num_drops = 0;
- for (size_t i = 0; i < kNumRpcsPerAddress * (num_backends_ + 3); ++i) {
+ for (size_t i = 0; i < kNumRpcsPerAddress * num_total_addresses; ++i) {
EchoResponse response;
const Status status = SendRpc(&response);
if (!status.ok() &&
@@ -979,8 +1041,7 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, Drop) {
EXPECT_EQ(response.message(), kMessage_);
}
}
- EXPECT_EQ(kNumRpcsPerAddress * 3, num_drops);
-
+ EXPECT_EQ(kNumRpcsPerAddress * num_of_drop_addresses, num_drops);
// Each backend should have gotten 100 requests.
for (size_t i = 0; i < backends_.size(); ++i) {
EXPECT_EQ(kNumRpcsPerAddress,
@@ -993,17 +1054,28 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, Drop) {
EXPECT_EQ(1U, balancer_servers_[0].service_->response_count());
const ClientStats client_stats = WaitForLoadReports();
- EXPECT_EQ(kNumRpcsPerAddress * (num_backends_ + 3),
- client_stats.num_calls_started);
- EXPECT_EQ(kNumRpcsPerAddress * (num_backends_ + 3),
- client_stats.num_calls_finished);
+ EXPECT_EQ(
+ kNumRpcsPerAddress * num_total_addresses + num_total_warmup_requests,
+ client_stats.num_calls_started);
+ EXPECT_EQ(
+ kNumRpcsPerAddress * num_total_addresses + num_total_warmup_requests,
+ client_stats.num_calls_finished);
EXPECT_EQ(0U, client_stats.num_calls_finished_with_client_failed_to_send);
- EXPECT_EQ(kNumRpcsPerAddress * num_backends_,
+ EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_warmup_ok,
client_stats.num_calls_finished_known_received);
- EXPECT_THAT(client_stats.drop_token_counts,
- ::testing::ElementsAre(
- ::testing::Pair("load_balancing", kNumRpcsPerAddress),
- ::testing::Pair("rate_limiting", kNumRpcsPerAddress * 2)));
+ // The number of warmup request is a multiple of the number of addresses.
+ // Therefore, all addresses in the scheduled balancer response are hit the
+ // same number of times.
+ const int num_times_drop_addresses_hit =
+ num_warmup_drops / num_of_drop_addresses;
+ EXPECT_THAT(
+ client_stats.drop_token_counts,
+ ::testing::ElementsAre(
+ ::testing::Pair("load_balancing",
+ (kNumRpcsPerAddress + num_times_drop_addresses_hit)),
+ ::testing::Pair(
+ "rate_limiting",
+ (kNumRpcsPerAddress + num_times_drop_addresses_hit) * 2)));
}
} // namespace
diff --git a/tools/gce/linux_performance_worker_init.sh b/tools/gce/linux_performance_worker_init.sh
index 8f0a0f65c3..88d8de7402 100755
--- a/tools/gce/linux_performance_worker_init.sh
+++ b/tools/gce/linux_performance_worker_init.sh
@@ -128,6 +128,11 @@ ruby -v
# Install bundler (prerequisite for gRPC Ruby)
gem install bundler
+# PHP dependencies
+sudo apt-get install -y php php-dev phpunit php-pear unzip zlib1g-dev
+curl -sS https://getcomposer.org/installer | php
+sudo mv composer.phar /usr/local/bin/composer
+
# Java dependencies - nothing as we already have Java JDK 8
# Go dependencies
diff --git a/tools/run_tests/performance/run_worker_php.sh b/tools/run_tests/performance/run_worker_php.sh
new file mode 100755
index 0000000000..5d0c4fa4fd
--- /dev/null
+++ b/tools/run_tests/performance/run_worker_php.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source ~/.rvm/scripts/rvm
+set -ex
+
+repo=$(dirname $0)/../../..
+
+# First set up all dependences needed for PHP QPS test
+cd $repo
+cd src/php/tests/qps
+composer install
+# The proxy worker for PHP is implemented in Ruby
+cd ../../../..
+ruby src/ruby/qps/proxy-worker.rb $@
+
diff --git a/tools/run_tests/performance/scenario_config.py b/tools/run_tests/performance/scenario_config.py
index a7560400dd..5efc9f5648 100644
--- a/tools/run_tests/performance/scenario_config.py
+++ b/tools/run_tests/performance/scenario_config.py
@@ -800,6 +800,33 @@ class RubyLanguage:
return 'ruby'
+class PhpLanguage:
+
+ def __init__(self):
+ pass
+ self.safename = str(self)
+
+ def worker_cmdline(self):
+ return ['tools/run_tests/performance/run_worker_php.sh']
+
+ def worker_port_offset(self):
+ return 800
+
+ def scenarios(self):
+ yield _ping_pong_scenario(
+ 'php_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY',
+ client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
+ server_language='c++', async_server_threads=1)
+
+ yield _ping_pong_scenario(
+ 'php_to_cpp_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING',
+ client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
+ server_language='c++', async_server_threads=1)
+
+ def __str__(self):
+ return 'php'
+
+
class JavaLanguage:
def __init__(self):
@@ -997,6 +1024,7 @@ LANGUAGES = {
'node' : NodeLanguage(),
'node_express': NodeExpressLanguage(),
'ruby' : RubyLanguage(),
+ 'php' : PhpLanguage(),
'java' : JavaLanguage(),
'python' : PythonLanguage(),
'go' : GoLanguage(),