aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/run_tests
diff options
context:
space:
mode:
authorGravatar murgatroid99 <mlumish@google.com>2017-01-12 12:39:25 -0800
committerGravatar murgatroid99 <mlumish@google.com>2017-01-12 12:39:25 -0800
commit433d3f1d42d527c646f347d9098ac486a65ad83d (patch)
tree1865eff38ece6b0b4907e630cd0bc2eb24d41919 /tools/run_tests
parentc34cac28912595814fb35225957eed0ddfd5d610 (diff)
parent03b22ecbe6c329299d2838088a8a9fc48f6b341b (diff)
Merge branch 'master' into node_electron_build
Diffstat (limited to 'tools/run_tests')
-rw-r--r--tools/run_tests/README.md4
-rw-r--r--tools/run_tests/generated/sources_and_headers.json55
-rwxr-xr-xtools/run_tests/performance/bq_upload_result.py8
-rw-r--r--tools/run_tests/performance/scenario_result_schema.json22
-rwxr-xr-xtools/run_tests/run_interop_tests.py140
-rwxr-xr-xtools/run_tests/run_performance_tests.py20
-rwxr-xr-xtools/run_tests/sanity/check_submodules.sh9
-rwxr-xr-xtools/run_tests/sanity/core_banned_functions.py60
-rw-r--r--tools/run_tests/sanity/sanity_tests.yaml2
9 files changed, 284 insertions, 36 deletions
diff --git a/tools/run_tests/README.md b/tools/run_tests/README.md
index dd727f4309..e709ddd2c0 100644
--- a/tools/run_tests/README.md
+++ b/tools/run_tests/README.md
@@ -22,6 +22,10 @@ The script is also capable of running interop tests for grpc-java and grpc-go, u
######Example
`tools/run_tests/run_interop_tests.py -l csharp -s c++ --use_docker` (run interop tests with C# client and C++ server)
+Note: if you see an error like `no space left on device` when running the
+interop tests using Docker, make sure that Docker is building the image files in
+a location with sufficient disk space.
+
#Performance benchmarks (run_performance_tests.py)
Runs predefined benchmark scenarios for given languages. Besides the simple configuration of running all the scenarios locally,
diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json
index 6ae269cc20..278915e0f0 100644
--- a/tools/run_tests/generated/sources_and_headers.json
+++ b/tools/run_tests/generated/sources_and_headers.json
@@ -2774,6 +2774,23 @@
},
{
"deps": [
+ "grpc",
+ "grpc++",
+ "grpc++_test_config",
+ "grpc++_test_util",
+ "grpc_test_util",
+ "http2_client_main"
+ ],
+ "headers": [],
+ "is_filegroup": false,
+ "language": "c++",
+ "name": "http2_client",
+ "src": [],
+ "third_party": false,
+ "type": "target"
+ },
+ {
+ "deps": [
"gpr",
"gpr_test_util",
"grpc",
@@ -5390,6 +5407,33 @@
},
{
"deps": [
+ "grpc",
+ "grpc++",
+ "grpc++_test_config",
+ "grpc++_test_util",
+ "grpc_test_util"
+ ],
+ "headers": [
+ "src/proto/grpc/testing/empty.grpc.pb.h",
+ "src/proto/grpc/testing/empty.pb.h",
+ "src/proto/grpc/testing/messages.grpc.pb.h",
+ "src/proto/grpc/testing/messages.pb.h",
+ "src/proto/grpc/testing/test.grpc.pb.h",
+ "src/proto/grpc/testing/test.pb.h",
+ "test/cpp/interop/http2_client.h"
+ ],
+ "is_filegroup": false,
+ "language": "c++",
+ "name": "http2_client_main",
+ "src": [
+ "test/cpp/interop/http2_client.cc",
+ "test/cpp/interop/http2_client.h"
+ ],
+ "third_party": false,
+ "type": "lib"
+ },
+ {
+ "deps": [
"gpr",
"grpc",
"grpc++",
@@ -5447,6 +5491,7 @@
"gpr",
"grpc",
"grpc++",
+ "grpc++_test_util",
"grpc_test_util"
],
"headers": [
@@ -6681,6 +6726,8 @@
"src/core/lib/channel/context.h",
"src/core/lib/channel/deadline_filter.h",
"src/core/lib/channel/handshaker.h",
+ "src/core/lib/channel/handshaker_factory.h",
+ "src/core/lib/channel/handshaker_registry.h",
"src/core/lib/channel/http_client_filter.h",
"src/core/lib/channel/http_server_filter.h",
"src/core/lib/channel/message_size_filter.h",
@@ -6747,6 +6794,7 @@
"src/core/lib/json/json_reader.h",
"src/core/lib/json/json_writer.h",
"src/core/lib/slice/percent_encoding.h",
+ "src/core/lib/slice/slice_internal.h",
"src/core/lib/slice/slice_string_helpers.h",
"src/core/lib/surface/api_trace.h",
"src/core/lib/surface/call.h",
@@ -6799,6 +6847,10 @@
"src/core/lib/channel/deadline_filter.h",
"src/core/lib/channel/handshaker.c",
"src/core/lib/channel/handshaker.h",
+ "src/core/lib/channel/handshaker_factory.c",
+ "src/core/lib/channel/handshaker_factory.h",
+ "src/core/lib/channel/handshaker_registry.c",
+ "src/core/lib/channel/handshaker_registry.h",
"src/core/lib/channel/http_client_filter.c",
"src/core/lib/channel/http_client_filter.h",
"src/core/lib/channel/http_server_filter.c",
@@ -6940,6 +6992,7 @@
"src/core/lib/slice/percent_encoding.h",
"src/core/lib/slice/slice.c",
"src/core/lib/slice/slice_buffer.c",
+ "src/core/lib/slice/slice_internal.h",
"src/core/lib/slice/slice_string_helpers.c",
"src/core/lib/slice/slice_string_helpers.h",
"src/core/lib/surface/alarm.c",
@@ -7068,6 +7121,7 @@
"include/grpc/impl/codegen/byte_buffer_reader.h",
"include/grpc/impl/codegen/compression_types.h",
"include/grpc/impl/codegen/connectivity_state.h",
+ "include/grpc/impl/codegen/exec_ctx_fwd.h",
"include/grpc/impl/codegen/grpc_types.h",
"include/grpc/impl/codegen/propagation_bits.h",
"include/grpc/impl/codegen/status.h"
@@ -7079,6 +7133,7 @@
"include/grpc/impl/codegen/byte_buffer_reader.h",
"include/grpc/impl/codegen/compression_types.h",
"include/grpc/impl/codegen/connectivity_state.h",
+ "include/grpc/impl/codegen/exec_ctx_fwd.h",
"include/grpc/impl/codegen/grpc_types.h",
"include/grpc/impl/codegen/propagation_bits.h",
"include/grpc/impl/codegen/status.h"
diff --git a/tools/run_tests/performance/bq_upload_result.py b/tools/run_tests/performance/bq_upload_result.py
index ddcf053ae5..89d2a9b320 100755
--- a/tools/run_tests/performance/bq_upload_result.py
+++ b/tools/run_tests/performance/bq_upload_result.py
@@ -115,9 +115,11 @@ def _flatten_result_inplace(scenario_result):
scenario_result['scenario']['clientConfig'] = json.dumps(scenario_result['scenario']['clientConfig'])
scenario_result['scenario']['serverConfig'] = json.dumps(scenario_result['scenario']['serverConfig'])
scenario_result['latencies'] = json.dumps(scenario_result['latencies'])
+ scenario_result['serverCpuStats'] = []
for stats in scenario_result['serverStats']:
- stats.pop('totalCpuTime', None)
- stats.pop('idleCpuTime', None)
+ scenario_result['serverCpuStats'].append(dict())
+ scenario_result['serverCpuStats'][-1]['totalCpuTime'] = stats.pop('totalCpuTime', None)
+ scenario_result['serverCpuStats'][-1]['idleCpuTime'] = stats.pop('idleCpuTime', None)
for stats in scenario_result['clientStats']:
stats['latencies'] = json.dumps(stats['latencies'])
stats.pop('requestResults', None)
@@ -125,7 +127,7 @@ def _flatten_result_inplace(scenario_result):
scenario_result['clientSuccess'] = json.dumps(scenario_result['clientSuccess'])
scenario_result['serverSuccess'] = json.dumps(scenario_result['serverSuccess'])
scenario_result['requestResults'] = json.dumps(scenario_result.get('requestResults', []))
- scenario_result['summary'].pop('serverCpuUsage', None)
+ scenario_result['serverCpuUsage'] = scenario_result['summary'].pop('serverCpuUsage', None)
scenario_result['summary'].pop('successfulRequestsPerSecond', None)
scenario_result['summary'].pop('failedRequestsPerSecond', None)
diff --git a/tools/run_tests/performance/scenario_result_schema.json b/tools/run_tests/performance/scenario_result_schema.json
index 3285f212d7..8ec41c377c 100644
--- a/tools/run_tests/performance/scenario_result_schema.json
+++ b/tools/run_tests/performance/scenario_result_schema.json
@@ -213,5 +213,27 @@
"name": "requestResults",
"type": "STRING",
"mode": "NULLABLE"
+ },
+ {
+ "name": "serverCpuStats",
+ "type": "RECORD",
+ "mode": "REPEATED",
+ "fields": [
+ {
+ "name": "totalCpuTime",
+ "type": "INTEGER",
+ "mode": "NULLABLE"
+ },
+ {
+ "name": "idleCpuTime",
+ "type": "INTEGER",
+ "mode": "NULLABLE"
+ }
+ ]
+ },
+ {
+ "name": "serverCpuUsage",
+ "type": "FLOAT",
+ "mode": "NULLABLE"
}
]
diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py
index c14f18af81..56efac50ca 100755
--- a/tools/run_tests/run_interop_tests.py
+++ b/tools/run_tests/run_interop_tests.py
@@ -169,6 +169,9 @@ class JavaLanguage:
def client_cmd(self, args):
return ['./run-test-client.sh'] + args
+ def client_cmd_http2interop(self, args):
+ return ['./run-http2-client.sh'] + args
+
def cloud_to_prod_env(self):
return {}
@@ -179,10 +182,10 @@ class JavaLanguage:
return {}
def unimplemented_test_cases(self):
- return _SKIP_ADVANCED + _SKIP_COMPRESSION
+ return _SKIP_COMPRESSION
def unimplemented_test_cases_server(self):
- return _SKIP_ADVANCED + _SKIP_COMPRESSION
+ return _SKIP_COMPRESSION
def __str__(self):
return 'java'
@@ -217,6 +220,33 @@ class GoLanguage:
def __str__(self):
return 'go'
+class Http2Server:
+ """Represents the HTTP/2 Interop Test server
+
+ This pretends to be a language in order to be built and run, but really it
+ isn't.
+ """
+ def __init__(self):
+ self.server_cwd = None
+ self.safename = str(self)
+
+ def server_cmd(self, args):
+ return ['python test/http2_test/http2_test_server.py']
+
+ def cloud_to_prod_env(self):
+ return {}
+
+ def global_env(self):
+ return {}
+
+ def unimplemented_test_cases(self):
+ return _TEST_CASES
+
+ def unimplemented_test_cases_server(self):
+ return _TEST_CASES
+
+ def __str__(self):
+ return 'http2'
class Http2Client:
"""Represents the HTTP/2 Interop Test
@@ -350,10 +380,10 @@ class RubyLanguage:
return {}
def unimplemented_test_cases(self):
- return _SKIP_ADVANCED + _SKIP_SERVER_COMPRESSION
+ return _SKIP_SERVER_COMPRESSION
def unimplemented_test_cases_server(self):
- return _SKIP_ADVANCED + _SKIP_COMPRESSION
+ return _SKIP_COMPRESSION
def __str__(self):
return 'ruby'
@@ -375,6 +405,11 @@ class PythonLanguage:
'--args="{}"'.format(' '.join(args))
]
+ def client_cmd_http2interop(self, args):
+ return [ 'py27/bin/python',
+ 'src/python/grpcio_tests/tests/http2/_negative_http2_client.py',
+ ] + args
+
def cloud_to_prod_env(self):
return {}
@@ -429,7 +464,10 @@ _TEST_CASES = ['large_unary', 'empty_unary', 'ping_pong',
_AUTH_TEST_CASES = ['compute_engine_creds', 'jwt_token_creds',
'oauth2_auth_token', 'per_rpc_creds']
-_HTTP2_TEST_CASES = ["tls", "framing"]
+_HTTP2_TEST_CASES = ['tls', 'framing']
+
+_HTTP2_BADSERVER_TEST_CASES = ['rst_after_header', 'rst_after_data', 'rst_during_data',
+ 'goaway', 'ping', 'max_streams']
DOCKER_WORKDIR_ROOT = '/var/local/git/grpc'
@@ -550,13 +588,25 @@ def cloud_to_prod_jobspec(language, test_case, server_host_name,
def cloud_to_cloud_jobspec(language, test_case, server_name, server_host,
server_port, docker_image=None):
"""Creates jobspec for cloud-to-cloud interop test"""
- cmdline = bash_cmdline(language.client_cmd([
+ interop_only_options = [
'--server_host_override=foo.test.google.fr',
'--use_tls=true',
'--use_test_ca=true',
+ ]
+ common_options = [
'--test_case=%s' % test_case,
'--server_host=%s' % server_host,
- '--server_port=%s' % server_port]))
+ ]
+ if test_case in _HTTP2_BADSERVER_TEST_CASES:
+ # We are running the http2_badserver_interop test. Adjust command line accordingly.
+ offset = sorted(_HTTP2_BADSERVER_TEST_CASES).index(test_case)
+ client_options = common_options + ['--server_port=%s' %
+ (int(server_port)+offset)]
+ cmdline = bash_cmdline(language.client_cmd_http2interop(client_options))
+ else:
+ client_options = interop_only_options + common_options + ['--server_port=%s' % server_port]
+ cmdline = bash_cmdline(language.client_cmd(client_options))
+
cwd = language.client_cwd
environ = language.global_env()
if docker_image:
@@ -590,13 +640,30 @@ def server_jobspec(language, docker_image):
cmdline = bash_cmdline(
language.server_cmd(['--port=%s' % _DEFAULT_SERVER_PORT]))
environ = language.global_env()
+ if language.safename == 'http2':
+ # we are running the http2 interop server. Open next N ports beginning
+ # with the server port. These ports are used for http2 interop test
+ # (one test case per port). We also attach the docker container running
+ # the server to local network, so we don't have to mess with port mapping
+ port_args = [
+ '-p', str(_DEFAULT_SERVER_PORT+0),
+ '-p', str(_DEFAULT_SERVER_PORT+1),
+ '-p', str(_DEFAULT_SERVER_PORT+2),
+ '-p', str(_DEFAULT_SERVER_PORT+3),
+ '-p', str(_DEFAULT_SERVER_PORT+4),
+ '-p', str(_DEFAULT_SERVER_PORT+5),
+ '-p', str(_DEFAULT_SERVER_PORT+6),
+ '--net=host',
+ ]
+ else:
+ port_args = ['-p', str(_DEFAULT_SERVER_PORT)]
+
docker_cmdline = docker_run_cmdline(cmdline,
image=docker_image,
cwd=language.server_cwd,
environ=environ,
- docker_args=['-p', str(_DEFAULT_SERVER_PORT),
- '--name', container_name])
-
+ docker_args=port_args +
+ ['--name', container_name])
server_job = jobset.JobSpec(
cmdline=docker_cmdline,
environ=environ,
@@ -730,7 +797,12 @@ argp.add_argument('--http2_interop',
default=False,
action='store_const',
const=True,
- help='Enable HTTP/2 interop tests')
+ help='Enable HTTP/2 client edge case testing. (Bad client, good server)')
+argp.add_argument('--http2_badserver_interop',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Enable HTTP/2 server edge case testing. (Good client, bad server)')
args = argp.parse_args()
@@ -757,6 +829,7 @@ languages = set(_LANGUAGES[l]
for x in args.language))
http2Interop = Http2Client() if args.http2_interop else None
+http2InteropServer = Http2Server() if args.http2_badserver_interop else None
docker_images={}
if args.use_docker:
@@ -766,6 +839,9 @@ if args.use_docker:
if args.http2_interop:
languages_to_build.add(http2Interop)
+ if args.http2_badserver_interop:
+ languages_to_build.add(http2InteropServer)
+
build_jobs = []
for l in languages_to_build:
job = build_interop_image_jobspec(l)
@@ -797,6 +873,14 @@ try:
server_jobs[lang] = job
server_addresses[lang] = ('localhost', job.mapped_port(_DEFAULT_SERVER_PORT))
+ if args.http2_badserver_interop:
+ # launch a HTTP2 server emulator that creates edge cases
+ lang = str(http2InteropServer)
+ spec = server_jobspec(http2InteropServer, docker_images.get(lang))
+ job = dockerjob.DockerJob(spec)
+ server_jobs[lang] = job
+ server_addresses[lang] = ('localhost', _DEFAULT_SERVER_PORT)
+
jobs = []
if args.cloud_to_prod:
for server_host_name in args.prod_servers:
@@ -840,17 +924,18 @@ try:
skip_server = [] # test cases unimplemented by server
if server_language:
skip_server = server_language.unimplemented_test_cases_server()
- for language in languages:
- for test_case in _TEST_CASES:
- if not test_case in language.unimplemented_test_cases():
- if not test_case in skip_server:
- test_job = cloud_to_cloud_jobspec(language,
- test_case,
- server_name,
- server_host,
- server_port,
- docker_image=docker_images.get(str(language)))
- jobs.append(test_job)
+ if not args.http2_badserver_interop:
+ for language in languages:
+ for test_case in _TEST_CASES:
+ if not test_case in language.unimplemented_test_cases():
+ if not test_case in skip_server:
+ test_job = cloud_to_cloud_jobspec(language,
+ test_case,
+ server_name,
+ server_host,
+ server_port,
+ docker_image=docker_images.get(str(language)))
+ jobs.append(test_job)
if args.http2_interop:
for test_case in _HTTP2_TEST_CASES:
@@ -865,6 +950,17 @@ try:
docker_image=docker_images.get(str(http2Interop)))
jobs.append(test_job)
+ if args.http2_badserver_interop:
+ for language in languages:
+ for test_case in _HTTP2_BADSERVER_TEST_CASES:
+ test_job = cloud_to_cloud_jobspec(language,
+ test_case,
+ server_name,
+ server_host,
+ server_port,
+ docker_image=docker_images.get(str(language)))
+ jobs.append(test_job)
+
if not jobs:
print('No jobs to run.')
for image in docker_images.itervalues():
diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py
index b7b742d7af..d6eed3f5bd 100755
--- a/tools/run_tests/run_performance_tests.py
+++ b/tools/run_tests/run_performance_tests.py
@@ -113,7 +113,7 @@ def create_qpsworker_job(language, shortname=None, port=10000, remote_host=None,
def create_scenario_jobspec(scenario_json, workers, remote_host=None,
- bq_result_table=None):
+ bq_result_table=None, server_cpu_load=0):
"""Runs one scenario using QPS driver."""
# setting QPS_WORKERS env variable here makes sure it works with SSH too.
cmd = 'QPS_WORKERS="%s" ' % ','.join(workers)
@@ -121,7 +121,9 @@ def create_scenario_jobspec(scenario_json, workers, remote_host=None,
cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
cmd += 'tools/run_tests/performance/run_qps_driver.sh '
cmd += '--scenarios_json=%s ' % pipes.quote(json.dumps({'scenarios': [scenario_json]}))
- cmd += '--scenario_result_file=scenario_result.json'
+ cmd += '--scenario_result_file=scenario_result.json '
+ if server_cpu_load != 0:
+ cmd += '--search_param=offered_load --initial_search_value=1000 --targeted_cpu_load=%d --stride=500 --error_tolerance=0.01' % server_cpu_load
if remote_host:
user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd))
@@ -129,7 +131,7 @@ def create_scenario_jobspec(scenario_json, workers, remote_host=None,
return jobset.JobSpec(
cmdline=[cmd],
shortname='qps_json_driver.%s' % scenario_json['name'],
- timeout_seconds=3*60,
+ timeout_seconds=12*60,
shell=True,
verbose_success=True)
@@ -318,7 +320,7 @@ Scenario = collections.namedtuple('Scenario', 'jobspec workers name')
def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*',
category='all', bq_result_table=None,
- netperf=False, netperf_hosts=[]):
+ netperf=False, netperf_hosts=[], server_cpu_load=0):
"""Create jobspecs for scenarios to run."""
all_workers = [worker
for workers in workers_by_lang.values()
@@ -379,7 +381,8 @@ def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*',
create_scenario_jobspec(scenario_json,
[w.host_and_port for w in workers],
remote_host=remote_host,
- bq_result_table=bq_result_table),
+ bq_result_table=bq_result_table,
+ server_cpu_load=server_cpu_load),
workers,
scenario_json['name'])
scenarios.append(scenario)
@@ -461,6 +464,9 @@ argp.add_argument('--netperf',
action='store_const',
const=True,
help='Run netperf benchmark as one of the scenarios.')
+argp.add_argument('--server_cpu_load',
+ default=0, type=int,
+ help='Select a targeted server cpu load to run. 0 means ignore this flag')
argp.add_argument('-x', '--xml_report', default='report.xml', type=str,
help='Name of XML report file to generate.')
argp.add_argument('--perf_args',
@@ -490,7 +496,6 @@ argp.add_argument('--skip_generate_flamegraphs',
'May be useful if "perf_args" arguments do not make sense for '
'generating flamegraphs (e.g., "--perf_args=stat ...")'))
-
args = argp.parse_args()
languages = set(scenario_config.LANGUAGES[l]
@@ -540,7 +545,8 @@ scenarios = create_scenarios(languages,
category=args.category,
bq_result_table=args.bq_result_table,
netperf=args.netperf,
- netperf_hosts=args.remote_worker_host)
+ netperf_hosts=args.remote_worker_host,
+ server_cpu_load=args.server_cpu_load)
if not scenarios:
raise Exception('No scenarios to run')
diff --git a/tools/run_tests/sanity/check_submodules.sh b/tools/run_tests/sanity/check_submodules.sh
index be12f968d2..61e8185854 100755
--- a/tools/run_tests/sanity/check_submodules.sh
+++ b/tools/run_tests/sanity/check_submodules.sh
@@ -41,13 +41,14 @@ want_submodules=`mktemp /tmp/submXXXXXX`
git submodule | awk '{ print $1 }' | sort > $submodules
cat << EOF | awk '{ print $1 }' | sort > $want_submodules
- c880e42ba1c8032d4cdde2aba0541d8a9d9fa2e9 third_party/boringssl (version_for_cocoapods_2.0-100-gc880e42)
- 05b155ff59114735ec8cd089f669c4c3d8f59029 third_party/gflags (v2.1.0-45-g05b155f)
44c25c892a6229b20db7cd9dc05584ea865896de third_party/benchmark (v0.1.0-343-g44c25c8)
+ c880e42ba1c8032d4cdde2aba0541d8a9d9fa2e9 third_party/boringssl (c880e42)
+ 886e7d75368e3f4fab3f4d0d3584e4abfc557755 third_party/boringssl-with-bazel (version_for_cocoapods_7.0-857-g886e7d7)
+ 05b155ff59114735ec8cd089f669c4c3d8f59029 third_party/gflags (v2.1.0-45-g05b155f)
c99458533a9b4c743ed51537e25989ea55944908 third_party/googletest (release-1.7.0)
- a428e42072765993ff674fda72863c9f1aa2d268 third_party/protobuf (v3.1.0)
+ a428e42072765993ff674fda72863c9f1aa2d268 third_party/protobuf (v3.1.0-alpha-1)
+ bcad91771b7f0bff28a1cac1981d7ef2b9bcef3c third_party/thrift (bcad917)
50893291621658f355bc5b4d450a8d06a563053d third_party/zlib (v1.2.8)
- bcad91771b7f0bff28a1cac1981d7ef2b9bcef3c third_party/thrift
EOF
diff -u $submodules $want_submodules
diff --git a/tools/run_tests/sanity/core_banned_functions.py b/tools/run_tests/sanity/core_banned_functions.py
new file mode 100755
index 0000000000..afac10bf80
--- /dev/null
+++ b/tools/run_tests/sanity/core_banned_functions.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python2.7
+
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import sys
+
+os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../../..'))
+
+# map of banned function signature to whitelist
+BANNED_EXCEPT = {
+ 'grpc_resource_quota_ref(': ['src/core/lib/iomgr/resource_quota.c'],
+ 'grpc_resource_quota_unref(': ['src/core/lib/iomgr/resource_quota.c'],
+ 'grpc_slice_buffer_destroy(': ['src/core/lib/slice/slice_buffer.c'],
+ 'grpc_slice_buffer_reset_and_unref(': ['src/core/lib/slice/slice_buffer.c'],
+ 'grpc_slice_ref(': ['src/core/lib/slice/slice.c'],
+ 'grpc_slice_unref(': ['src/core/lib/slice/slice.c'],
+}
+
+errors = 0
+for root, dirs, files in os.walk('src/core'):
+ for filename in files:
+ path = os.path.join(root, filename)
+ if os.path.splitext(path)[1] != '.c': continue
+ with open(path) as f:
+ text = f.read()
+ for banned, exceptions in BANNED_EXCEPT.items():
+ if path in exceptions: continue
+ if banned in text:
+ print 'Illegal use of "%s" in %s' % (banned, path)
+ errors += 1
+
+assert errors == 0
diff --git a/tools/run_tests/sanity/sanity_tests.yaml b/tools/run_tests/sanity/sanity_tests.yaml
index 32e62dd529..37819166e3 100644
--- a/tools/run_tests/sanity/sanity_tests.yaml
+++ b/tools/run_tests/sanity/sanity_tests.yaml
@@ -3,6 +3,7 @@
- script: tools/run_tests/sanity/check_sources_and_headers.py
- script: tools/run_tests/sanity/check_submodules.sh
- script: tools/run_tests/sanity/check_test_filtering.py
+- script: tools/run_tests/sanity/core_banned_functions.py
- script: tools/buildgen/generate_projects.sh -j 3
cpu_cost: 3
- script: tools/distrib/check_copyright.py
@@ -12,3 +13,4 @@
- script: tools/distrib/check_nanopb_output.sh
- script: tools/distrib/check_include_guards.py
- script: tools/distrib/python/check_grpcio_tools.py
+