diff options
Diffstat (limited to 'tools')
37 files changed, 957 insertions, 358 deletions
diff --git a/tools/cmake/gRPCConfig.cmake.in b/tools/cmake/gRPCConfig.cmake.in deleted file mode 100644 index 1a0fa6a462..0000000000 --- a/tools/cmake/gRPCConfig.cmake.in +++ /dev/null @@ -1,8 +0,0 @@ -# Depend packages
-@_gRPC_FIND_ZLIB@
-@_gRPC_FIND_PROTOBUF@
-@_gRPC_FIND_SSL@
-@_gRPC_FIND_CARES@
-
-# Targets
-include(${CMAKE_CURRENT_LIST_DIR}/gRPCTargets.cmake)
diff --git a/tools/cmake/gRPCConfigVersion.cmake.in b/tools/cmake/gRPCConfigVersion.cmake.in deleted file mode 100644 index f3c19fd403..0000000000 --- a/tools/cmake/gRPCConfigVersion.cmake.in +++ /dev/null @@ -1,11 +0,0 @@ -set(PACKAGE_VERSION "@PACKAGE_VERSION@")
-
-# Check whether the requested PACKAGE_FIND_VERSION is compatible
-if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}")
- set(PACKAGE_VERSION_COMPATIBLE FALSE)
-else()
- set(PACKAGE_VERSION_COMPATIBLE TRUE)
- if ("${PACKAGE_VERSION}" VERSION_EQUAL "${PACKAGE_FIND_VERSION}")
- set(PACKAGE_VERSION_EXACT TRUE)
- endif()
-endif()
diff --git a/tools/codegen/core/gen_hpack_tables.c b/tools/codegen/core/gen_hpack_tables.c index 858ae20c2d..73dfa9fbd6 100644 --- a/tools/codegen/core/gen_hpack_tables.c +++ b/tools/codegen/core/gen_hpack_tables.c @@ -189,7 +189,10 @@ static unsigned state_index(unsigned bitofs, symset syms, unsigned *isnew) { return i; } GPR_ASSERT(nhuffstates != MAXHUFFSTATES); - i = nhuffstates++; + + i = nhuffstates; + nhuffstates++; + huffstates[i].bitofs = bitofs; huffstates[i].syms = syms; huffstates[i].next = nibblelut_empty(); diff --git a/tools/codegen/core/gen_static_metadata.py b/tools/codegen/core/gen_static_metadata.py index e56c627721..6ee8a7cace 100755 --- a/tools/codegen/core/gen_static_metadata.py +++ b/tools/codegen/core/gen_static_metadata.py @@ -132,29 +132,33 @@ CONFIG = [ ('www-authenticate', ''), ] +# Entries marked with is_default=True are ignored when counting +# non-default initial metadata that prevents the chttp2 server from +# sending a Trailers-Only response. METADATA_BATCH_CALLOUTS = [ - ':path', - ':method', - ':status', - ':authority', - ':scheme', - 'te', - 'grpc-message', - 'grpc-status', - 'grpc-payload-bin', - 'grpc-encoding', - 'grpc-accept-encoding', - 'grpc-server-stats-bin', - 'grpc-tags-bin', - 'grpc-trace-bin', - 'content-type', - 'content-encoding', - 'accept-encoding', - 'grpc-internal-encoding-request', - 'grpc-internal-stream-encoding-request', - 'user-agent', - 'host', - 'lb-token', + # (name, is_default) + (':path', True), + (':method', True), + (':status', True), + (':authority', True), + (':scheme', True), + ('te', True), + ('grpc-message', True), + ('grpc-status', True), + ('grpc-payload-bin', True), + ('grpc-encoding', True), + ('grpc-accept-encoding', True), + ('grpc-server-stats-bin', True), + ('grpc-tags-bin', True), + ('grpc-trace-bin', True), + ('content-type', True), + ('content-encoding', True), + ('accept-encoding', True), + ('grpc-internal-encoding-request', True), + ('grpc-internal-stream-encoding-request', True), + ('user-agent', True), + ('host', True), + ('lb-token', True), ] COMPRESSION_ALGORITHMS = [ @@ -235,7 +239,7 @@ all_elems = list() static_userdata = {} # put metadata batch callouts first, to make the check of if a static metadata # string is a callout trivial -for elem in METADATA_BATCH_CALLOUTS: +for elem, _ in METADATA_BATCH_CALLOUTS: if elem not in all_strs: all_strs.append(elem) for elem in CONFIG: @@ -372,7 +376,7 @@ def slice_def(i): # validate configuration -for elem in METADATA_BATCH_CALLOUTS: +for elem, _ in METADATA_BATCH_CALLOUTS: assert elem in all_strs print >> H, '#define GRPC_STATIC_MDSTR_COUNT %d' % len(all_strs) @@ -540,7 +544,7 @@ for a, b in all_elems: print >> C, '};' print >> H, 'typedef enum {' -for elem in METADATA_BATCH_CALLOUTS: +for elem, _ in METADATA_BATCH_CALLOUTS: print >> H, ' %s,' % mangle(elem, 'batch').upper() print >> H, ' GRPC_BATCH_CALLOUTS_COUNT' print >> H, '} grpc_metadata_batch_callouts_index;' @@ -548,7 +552,7 @@ print >> H print >> H, 'typedef union {' print >> H, ' struct grpc_linked_mdelem *array[GRPC_BATCH_CALLOUTS_COUNT];' print >> H, ' struct {' -for elem in METADATA_BATCH_CALLOUTS: +for elem, _ in METADATA_BATCH_CALLOUTS: print >> H, ' struct grpc_linked_mdelem *%s;' % mangle(elem, '').lower() print >> H, ' } named;' print >> H, '} grpc_metadata_batch_callouts;' @@ -556,6 +560,14 @@ print >> H print >> H, '#define GRPC_BATCH_INDEX_OF(slice) \\' print >> H, ' (GRPC_IS_STATIC_METADATA_STRING((slice)) ? (grpc_metadata_batch_callouts_index)GPR_CLAMP(GRPC_STATIC_METADATA_INDEX((slice)), 0, GRPC_BATCH_CALLOUTS_COUNT) : GRPC_BATCH_CALLOUTS_COUNT)' print >> H +print >> H, ('extern bool grpc_static_callout_is_default[' + 'GRPC_BATCH_CALLOUTS_COUNT];') +print >> H +print >> C, 'bool grpc_static_callout_is_default[GRPC_BATCH_CALLOUTS_COUNT] = {' +for elem, is_default in METADATA_BATCH_CALLOUTS: + print >> C, ' %s, // %s' % (str(is_default).lower(), elem) +print >> C, '};' +print >> C print >> H, 'extern const uint8_t grpc_static_accept_encoding_metadata[%d];' % ( 1 << len(COMPRESSION_ALGORITHMS)) diff --git a/tools/codegen/core/gen_stats_data.py b/tools/codegen/core/gen_stats_data.py new file mode 100755 index 0000000000..8e4ef594af --- /dev/null +++ b/tools/codegen/core/gen_stats_data.py @@ -0,0 +1,315 @@ +#!/usr/bin/env python2.7 + +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import ctypes +import math +import sys +import yaml +import json + +with open('src/core/lib/debug/stats_data.yaml') as f: + attrs = yaml.load(f.read()) + +REQUIRED_FIELDS = ['name', 'doc'] + +def make_type(name, fields): + return (collections.namedtuple(name, ' '.join(list(set(REQUIRED_FIELDS + fields)))), []) + +def c_str(s, encoding='ascii'): + if isinstance(s, unicode): + s = s.encode(encoding) + result = '' + for c in s: + if not (32 <= ord(c) < 127) or c in ('\\', '"'): + result += '\\%03o' % ord(c) + else: + result += c + return '"' + result + '"' + +types = ( + make_type('Counter', []), + make_type('Histogram', ['max', 'buckets']), +) + +inst_map = dict((t[0].__name__, t[1]) for t in types) + +stats = [] + +for attr in attrs: + found = False + for t, lst in types: + t_name = t.__name__.lower() + if t_name in attr: + name = attr[t_name] + del attr[t_name] + lst.append(t(name=name, **attr)) + found = True + break + assert found, "Bad decl: %s" % attr + +def dbl2u64(d): + return ctypes.c_ulonglong.from_buffer(ctypes.c_double(d)).value + +def shift_works_until(mapped_bounds, shift_bits): + for i, ab in enumerate(zip(mapped_bounds, mapped_bounds[1:])): + a, b = ab + if (a >> shift_bits) == (b >> shift_bits): + return i + return len(mapped_bounds) + +def find_ideal_shift(mapped_bounds, max_size): + best = None + for shift_bits in reversed(range(0,64)): + n = shift_works_until(mapped_bounds, shift_bits) + if n == 0: continue + table_size = mapped_bounds[n-1] >> shift_bits + if table_size > max_size: continue + if table_size > 65535: continue + if best is None: + best = (shift_bits, n, table_size) + elif best[1] < n: + best = (shift_bits, n, table_size) + print best + return best + +def gen_map_table(mapped_bounds, shift_data): + tbl = [] + cur = 0 + print mapped_bounds + mapped_bounds = [x >> shift_data[0] for x in mapped_bounds] + print mapped_bounds + for i in range(0, mapped_bounds[shift_data[1]-1]): + while i > mapped_bounds[cur]: + cur += 1 + tbl.append(cur) + return tbl + +static_tables = [] + +def decl_static_table(values, type): + global static_tables + v = (type, values) + for i, vp in enumerate(static_tables): + if v == vp: return i + print "ADD TABLE: %s %r" % (type, values) + r = len(static_tables) + static_tables.append(v) + return r + +def type_for_uint_table(table): + mv = max(table) + if mv < 2**8: + return 'uint8_t' + elif mv < 2**16: + return 'uint16_t' + elif mv < 2**32: + return 'uint32_t' + else: + return 'uint64_t' + +def gen_bucket_code(histogram): + bounds = [0, 1] + done_trivial = False + done_unmapped = False + first_nontrivial = None + first_unmapped = None + while len(bounds) < histogram.buckets + 1: + if len(bounds) == histogram.buckets: + nextb = int(histogram.max) + else: + mul = math.pow(float(histogram.max) / bounds[-1], + 1.0 / (histogram.buckets + 1 - len(bounds))) + nextb = int(math.ceil(bounds[-1] * mul)) + if nextb <= bounds[-1] + 1: + nextb = bounds[-1] + 1 + elif not done_trivial: + done_trivial = True + first_nontrivial = len(bounds) + bounds.append(nextb) + bounds_idx = decl_static_table(bounds, 'int') + if done_trivial: + first_nontrivial_code = dbl2u64(first_nontrivial) + code_bounds = [dbl2u64(x) - first_nontrivial_code for x in bounds] + shift_data = find_ideal_shift(code_bounds[first_nontrivial:], 256 * histogram.buckets) + #print first_nontrivial, shift_data, bounds + #if shift_data is not None: print [hex(x >> shift_data[0]) for x in code_bounds[first_nontrivial:]] + code = 'value = GPR_CLAMP(value, 0, %d);\n' % histogram.max + map_table = gen_map_table(code_bounds[first_nontrivial:], shift_data) + if first_nontrivial is None: + code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, value);\n' + % histogram.name.upper()) + else: + code += 'if (value < %d) {\n' % first_nontrivial + code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, value);\n' + % histogram.name.upper()) + code += 'return;\n' + code += '}' + first_nontrivial_code = dbl2u64(first_nontrivial) + if shift_data is not None: + map_table_idx = decl_static_table(map_table, type_for_uint_table(map_table)) + code += 'union { double dbl; uint64_t uint; } _val, _bkt;\n' + code += '_val.dbl = value;\n' + code += 'if (_val.uint < %dull) {\n' % ((map_table[-1] << shift_data[0]) + first_nontrivial_code) + code += 'int bucket = ' + code += 'grpc_stats_table_%d[((_val.uint - %dull) >> %d)] + %d;\n' % (map_table_idx, first_nontrivial_code, shift_data[0], first_nontrivial) + code += '_bkt.dbl = grpc_stats_table_%d[bucket];\n' % bounds_idx + code += 'bucket -= (_val.uint < _bkt.uint);\n' + code += 'GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, bucket);\n' % histogram.name.upper() + code += 'return;\n' + code += '}\n' + code += 'GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, '% histogram.name.upper() + code += 'grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_%d, %d));\n' % (bounds_idx, histogram.buckets) + return (code, bounds_idx) + +# utility: print a big comment block into a set of files +def put_banner(files, banner): + for f in files: + print >>f, '/*' + for line in banner: + print >>f, ' * %s' % line + print >>f, ' */' + print >>f + +with open('src/core/lib/debug/stats_data.h', 'w') as H: + # copy-paste copyright notice from this file + with open(sys.argv[0]) as my_source: + copyright = [] + for line in my_source: + if line[0] != '#': break + for line in my_source: + if line[0] == '#': + copyright.append(line) + break + for line in my_source: + if line[0] != '#': + break + copyright.append(line) + put_banner([H], [line[2:].rstrip() for line in copyright]) + + put_banner([H], ["Automatically generated by tools/codegen/core/gen_stats_data.py"]) + + print >>H, "#ifndef GRPC_CORE_LIB_DEBUG_STATS_DATA_H" + print >>H, "#define GRPC_CORE_LIB_DEBUG_STATS_DATA_H" + print >>H + print >>H, "#include <inttypes.h>" + print >>H, "#include \"src/core/lib/iomgr/exec_ctx.h\"" + print >>H + + for typename, instances in sorted(inst_map.items()): + print >>H, "typedef enum {" + for inst in instances: + print >>H, " GRPC_STATS_%s_%s," % (typename.upper(), inst.name.upper()) + print >>H, " GRPC_STATS_%s_COUNT" % (typename.upper()) + print >>H, "} grpc_stats_%ss;" % (typename.lower()) + print >>H, "extern const char *grpc_stats_%s_name[GRPC_STATS_%s_COUNT];" % ( + typename.lower(), typename.upper()) + print >>H, "extern const char *grpc_stats_%s_doc[GRPC_STATS_%s_COUNT];" % ( + typename.lower(), typename.upper()) + + histo_start = [] + histo_buckets = [] + histo_bucket_boundaries = [] + + print >>H, "typedef enum {" + first_slot = 0 + for histogram in inst_map['Histogram']: + histo_start.append(first_slot) + histo_buckets.append(histogram.buckets) + print >>H, " GRPC_STATS_HISTOGRAM_%s_FIRST_SLOT = %d," % (histogram.name.upper(), first_slot) + print >>H, " GRPC_STATS_HISTOGRAM_%s_BUCKETS = %d," % (histogram.name.upper(), histogram.buckets) + first_slot += histogram.buckets + print >>H, " GRPC_STATS_HISTOGRAM_BUCKETS = %d" % first_slot + print >>H, "} grpc_stats_histogram_constants;" + + for ctr in inst_map['Counter']: + print >>H, ("#define GRPC_STATS_INC_%s(exec_ctx) " + + "GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_%s)") % ( + ctr.name.upper(), ctr.name.upper()) + for histogram in inst_map['Histogram']: + print >>H, "#define GRPC_STATS_INC_%s(exec_ctx, value) grpc_stats_inc_%s((exec_ctx), (int)(value))" % ( + histogram.name.upper(), histogram.name.lower()) + print >>H, "void grpc_stats_inc_%s(grpc_exec_ctx *exec_ctx, int x);" % histogram.name.lower() + + for i, tbl in enumerate(static_tables): + print >>H, "extern const %s grpc_stats_table_%d[%d];" % (tbl[0], i, len(tbl[1])) + + print >>H, "extern const int grpc_stats_histo_buckets[%d];" % len(inst_map['Histogram']) + print >>H, "extern const int grpc_stats_histo_start[%d];" % len(inst_map['Histogram']) + print >>H, "extern const int *const grpc_stats_histo_bucket_boundaries[%d];" % len(inst_map['Histogram']) + print >>H, "extern void (*const grpc_stats_inc_histogram[%d])(grpc_exec_ctx *exec_ctx, int x);" % len(inst_map['Histogram']) + + print >>H + print >>H, "#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */" + +with open('src/core/lib/debug/stats_data.c', 'w') as C: + # copy-paste copyright notice from this file + with open(sys.argv[0]) as my_source: + copyright = [] + for line in my_source: + if line[0] != '#': break + for line in my_source: + if line[0] == '#': + copyright.append(line) + break + for line in my_source: + if line[0] != '#': + break + copyright.append(line) + put_banner([C], [line[2:].rstrip() for line in copyright]) + + put_banner([C], ["Automatically generated by tools/codegen/core/gen_stats_data.py"]) + + print >>C, "#include \"src/core/lib/debug/stats_data.h\"" + print >>C, "#include \"src/core/lib/debug/stats.h\"" + print >>C, "#include \"src/core/lib/iomgr/exec_ctx.h\"" + print >>C, "#include <grpc/support/useful.h>" + + histo_code = [] + for histogram in inst_map['Histogram']: + code, bounds_idx = gen_bucket_code(histogram) + histo_bucket_boundaries.append(bounds_idx) + histo_code.append(code) + + for typename, instances in sorted(inst_map.items()): + print >>C, "const char *grpc_stats_%s_name[GRPC_STATS_%s_COUNT] = {" % ( + typename.lower(), typename.upper()) + for inst in instances: + print >>C, " %s," % c_str(inst.name) + print >>C, "};" + print >>C, "const char *grpc_stats_%s_doc[GRPC_STATS_%s_COUNT] = {" % ( + typename.lower(), typename.upper()) + for inst in instances: + print >>C, " %s," % c_str(inst.doc) + print >>C, "};" + + for i, tbl in enumerate(static_tables): + print >>C, "const %s grpc_stats_table_%d[%d] = {%s};" % ( + tbl[0], i, len(tbl[1]), ','.join('%s' % x for x in tbl[1])) + + for histogram, code in zip(inst_map['Histogram'], histo_code): + print >>C, ("void grpc_stats_inc_%s(grpc_exec_ctx *exec_ctx, int value) {%s}") % ( + histogram.name.lower(), + code) + + print >>C, "const int grpc_stats_histo_buckets[%d] = {%s};" % ( + len(inst_map['Histogram']), ','.join('%s' % x for x in histo_buckets)) + print >>C, "const int grpc_stats_histo_start[%d] = {%s};" % ( + len(inst_map['Histogram']), ','.join('%s' % x for x in histo_start)) + print >>C, "const int *const grpc_stats_histo_bucket_boundaries[%d] = {%s};" % ( + len(inst_map['Histogram']), ','.join('grpc_stats_table_%d' % x for x in histo_bucket_boundaries)) + print >>C, "void (*const grpc_stats_inc_histogram[%d])(grpc_exec_ctx *exec_ctx, int x) = {%s};" % ( + len(inst_map['Histogram']), ','.join('grpc_stats_inc_%s' % histogram.name.lower() for histogram in inst_map['Histogram'])) diff --git a/tools/distrib/docker_for_windows.rb b/tools/distrib/docker_for_windows.rb index b5cd57f1f0..010e2067b8 100755 --- a/tools/distrib/docker_for_windows.rb +++ b/tools/distrib/docker_for_windows.rb @@ -23,11 +23,21 @@ def docker_for_windows_image() dockerfile = File.join(grpc_root, 'third_party', 'rake-compiler-dock', 'Dockerfile') dockerpath = File.dirname(dockerfile) version = Digest::SHA1.file(dockerfile).hexdigest - image_name = 'grpc/rake-compiler-dock:' + version - cmd = "docker build -t #{image_name} --file #{dockerfile} #{dockerpath}" - puts cmd - system cmd - raise "Failed to build the docker image." unless $? == 0 + image_name = 'rake-compiler-dock_' + version + # if "DOCKERHUB_ORGANIZATION" env is set, we try to pull the pre-built + # rake-compiler-dock image from dockerhub rather then building from scratch. + if ENV.has_key?('DOCKERHUB_ORGANIZATION') + image_name = ENV['DOCKERHUB_ORGANIZATION'] + '/' + image_name + cmd = "docker pull #{image_name}" + puts cmd + system cmd + raise "Failed to pull the docker image." unless $? == 0 + else + cmd = "docker build -t #{image_name} --file #{dockerfile} #{dockerpath}" + puts cmd + system cmd + raise "Failed to build the docker image." unless $? == 0 + end image_name end diff --git a/tools/dockerfile/push_testing_images.sh b/tools/dockerfile/push_testing_images.sh index c88ba434f5..e9151303f7 100755 --- a/tools/dockerfile/push_testing_images.sh +++ b/tools/dockerfile/push_testing_images.sh @@ -29,7 +29,7 @@ cd - DOCKERHUB_ORGANIZATION=grpctesting -for DOCKERFILE_DIR in tools/dockerfile/test/* tools/dockerfile/grpc_artifact_* tools/dockerfile/interoptest/* +for DOCKERFILE_DIR in tools/dockerfile/test/* tools/dockerfile/grpc_artifact_* tools/dockerfile/interoptest/* third_party/rake-compiler-dock do # Generate image name based on Dockerfile checksum. That works well as long # as can count on dockerfiles being written in a way that changing the logical diff --git a/tools/doxygen/Doxyfile.c++ b/tools/doxygen/Doxyfile.c++ index b83e581e06..62f113907d 100644 --- a/tools/doxygen/Doxyfile.c++ +++ b/tools/doxygen/Doxyfile.c++ @@ -893,6 +893,7 @@ include/grpc/impl/codegen/propagation_bits.h \ include/grpc/impl/codegen/slice.h \ include/grpc/impl/codegen/status.h \ include/grpc/impl/codegen/sync.h \ +include/grpc/impl/codegen/sync_custom.h \ include/grpc/impl/codegen/sync_generic.h \ include/grpc/impl/codegen/sync_posix.h \ include/grpc/impl/codegen/sync_windows.h \ @@ -916,6 +917,7 @@ include/grpc/support/port_platform.h \ include/grpc/support/string_util.h \ include/grpc/support/subprocess.h \ include/grpc/support/sync.h \ +include/grpc/support/sync_custom.h \ include/grpc/support/sync_generic.h \ include/grpc/support/sync_posix.h \ include/grpc/support/sync_windows.h \ diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal index 67dfb067aa..49919415bd 100644 --- a/tools/doxygen/Doxyfile.c++.internal +++ b/tools/doxygen/Doxyfile.c++.internal @@ -894,6 +894,7 @@ include/grpc/impl/codegen/propagation_bits.h \ include/grpc/impl/codegen/slice.h \ include/grpc/impl/codegen/status.h \ include/grpc/impl/codegen/sync.h \ +include/grpc/impl/codegen/sync_custom.h \ include/grpc/impl/codegen/sync_generic.h \ include/grpc/impl/codegen/sync_posix.h \ include/grpc/impl/codegen/sync_windows.h \ @@ -917,6 +918,7 @@ include/grpc/support/port_platform.h \ include/grpc/support/string_util.h \ include/grpc/support/subprocess.h \ include/grpc/support/sync.h \ +include/grpc/support/sync_custom.h \ include/grpc/support/sync_generic.h \ include/grpc/support/sync_posix.h \ include/grpc/support/sync_windows.h \ @@ -940,10 +942,13 @@ src/core/lib/channel/handshaker_registry.h \ src/core/lib/compression/algorithm_metadata.h \ src/core/lib/compression/message_compress.h \ src/core/lib/compression/stream_compression.h \ +src/core/lib/debug/stats.h \ +src/core/lib/debug/stats_data.h \ src/core/lib/debug/trace.h \ src/core/lib/http/format_request.h \ src/core/lib/http/httpcli.h \ src/core/lib/http/parser.h \ +src/core/lib/iomgr/call_combiner.h \ src/core/lib/iomgr/closure.h \ src/core/lib/iomgr/combiner.h \ src/core/lib/iomgr/endpoint.h \ @@ -951,8 +956,6 @@ src/core/lib/iomgr/endpoint_pair.h \ src/core/lib/iomgr/error.h \ src/core/lib/iomgr/error_internal.h \ src/core/lib/iomgr/ev_epoll1_linux.h \ -src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h \ -src/core/lib/iomgr/ev_epoll_thread_pool_linux.h \ src/core/lib/iomgr/ev_epollex_linux.h \ src/core/lib/iomgr/ev_epollsig_linux.h \ src/core/lib/iomgr/ev_poll_posix.h \ @@ -1031,7 +1034,6 @@ src/core/lib/support/spinlock.h \ src/core/lib/support/stack_lockfree.h \ src/core/lib/support/string.h \ src/core/lib/support/string_windows.h \ -src/core/lib/support/thd_internal.h \ src/core/lib/support/time_precise.h \ src/core/lib/support/tmpfile.h \ src/core/lib/surface/alarm_internal.h \ diff --git a/tools/doxygen/Doxyfile.core b/tools/doxygen/Doxyfile.core index c5ae421d40..632735342b 100644 --- a/tools/doxygen/Doxyfile.core +++ b/tools/doxygen/Doxyfile.core @@ -40,7 +40,7 @@ PROJECT_NAME = "GRPC Core" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 4.0.0-dev +PROJECT_NUMBER = 5.0.0-dev # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a @@ -827,6 +827,8 @@ include/grpc/impl/codegen/slice.h \ include/grpc/impl/codegen/status.h \ include/grpc/impl/codegen/sync.h \ include/grpc/impl/codegen/sync.h \ +include/grpc/impl/codegen/sync_custom.h \ +include/grpc/impl/codegen/sync_custom.h \ include/grpc/impl/codegen/sync_generic.h \ include/grpc/impl/codegen/sync_generic.h \ include/grpc/impl/codegen/sync_posix.h \ @@ -853,6 +855,7 @@ include/grpc/support/port_platform.h \ include/grpc/support/string_util.h \ include/grpc/support/subprocess.h \ include/grpc/support/sync.h \ +include/grpc/support/sync_custom.h \ include/grpc/support/sync_generic.h \ include/grpc/support/sync_posix.h \ include/grpc/support/sync_windows.h \ diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal index 4901fc9218..e352cb78f1 100644 --- a/tools/doxygen/Doxyfile.core.internal +++ b/tools/doxygen/Doxyfile.core.internal @@ -40,7 +40,7 @@ PROJECT_NAME = "GRPC Core" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 4.0.0-dev +PROJECT_NUMBER = 5.0.0-dev # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a @@ -827,6 +827,8 @@ include/grpc/impl/codegen/slice.h \ include/grpc/impl/codegen/status.h \ include/grpc/impl/codegen/sync.h \ include/grpc/impl/codegen/sync.h \ +include/grpc/impl/codegen/sync_custom.h \ +include/grpc/impl/codegen/sync_custom.h \ include/grpc/impl/codegen/sync_generic.h \ include/grpc/impl/codegen/sync_generic.h \ include/grpc/impl/codegen/sync_posix.h \ @@ -853,6 +855,7 @@ include/grpc/support/port_platform.h \ include/grpc/support/string_util.h \ include/grpc/support/subprocess.h \ include/grpc/support/sync.h \ +include/grpc/support/sync_custom.h \ include/grpc/support/sync_generic.h \ include/grpc/support/sync_posix.h \ include/grpc/support/sync_windows.h \ @@ -976,10 +979,10 @@ src/core/ext/filters/http/message_compress/message_compress_filter.c \ src/core/ext/filters/http/message_compress/message_compress_filter.h \ src/core/ext/filters/http/server/http_server_filter.c \ src/core/ext/filters/http/server/http_server_filter.h \ -src/core/ext/filters/load_reporting/load_reporting.c \ -src/core/ext/filters/load_reporting/load_reporting.h \ -src/core/ext/filters/load_reporting/load_reporting_filter.c \ -src/core/ext/filters/load_reporting/load_reporting_filter.h \ +src/core/ext/filters/load_reporting/server_load_reporting_filter.c \ +src/core/ext/filters/load_reporting/server_load_reporting_filter.h \ +src/core/ext/filters/load_reporting/server_load_reporting_plugin.c \ +src/core/ext/filters/load_reporting/server_load_reporting_plugin.h \ src/core/ext/filters/max_age/max_age_filter.c \ src/core/ext/filters/max_age/max_age_filter.h \ src/core/ext/filters/message_size/message_size_filter.c \ @@ -1074,6 +1077,10 @@ src/core/lib/compression/message_compress.c \ src/core/lib/compression/message_compress.h \ src/core/lib/compression/stream_compression.c \ src/core/lib/compression/stream_compression.h \ +src/core/lib/debug/stats.c \ +src/core/lib/debug/stats.h \ +src/core/lib/debug/stats_data.c \ +src/core/lib/debug/stats_data.h \ src/core/lib/debug/trace.c \ src/core/lib/debug/trace.h \ src/core/lib/http/format_request.c \ @@ -1084,6 +1091,8 @@ src/core/lib/http/httpcli_security_connector.c \ src/core/lib/http/parser.c \ src/core/lib/http/parser.h \ src/core/lib/iomgr/README.md \ +src/core/lib/iomgr/call_combiner.c \ +src/core/lib/iomgr/call_combiner.h \ src/core/lib/iomgr/closure.c \ src/core/lib/iomgr/closure.h \ src/core/lib/iomgr/combiner.c \ @@ -1099,10 +1108,6 @@ src/core/lib/iomgr/error.h \ src/core/lib/iomgr/error_internal.h \ src/core/lib/iomgr/ev_epoll1_linux.c \ src/core/lib/iomgr/ev_epoll1_linux.h \ -src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c \ -src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h \ -src/core/lib/iomgr/ev_epoll_thread_pool_linux.c \ -src/core/lib/iomgr/ev_epoll_thread_pool_linux.h \ src/core/lib/iomgr/ev_epollex_linux.c \ src/core/lib/iomgr/ev_epollex_linux.h \ src/core/lib/iomgr/ev_epollsig_linux.c \ @@ -1330,7 +1335,6 @@ src/core/lib/support/sync.c \ src/core/lib/support/sync_posix.c \ src/core/lib/support/sync_windows.c \ src/core/lib/support/thd.c \ -src/core/lib/support/thd_internal.h \ src/core/lib/support/thd_posix.c \ src/core/lib/support/thd_windows.c \ src/core/lib/support/time.c \ @@ -1424,9 +1428,13 @@ src/core/tsi/transport_security_adapter.h \ src/core/tsi/transport_security_grpc.c \ src/core/tsi/transport_security_grpc.h \ src/core/tsi/transport_security_interface.h \ +third_party/nanopb/pb.h \ third_party/nanopb/pb_common.c \ +third_party/nanopb/pb_common.h \ third_party/nanopb/pb_decode.c \ -third_party/nanopb/pb_encode.c +third_party/nanopb/pb_decode.h \ +third_party/nanopb/pb_encode.c \ +third_party/nanopb/pb_encode.h # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses diff --git a/tools/flakes/detect_flakes.py b/tools/flakes/detect_flakes.py index 2aff4c0872..c5c7f61771 100644 --- a/tools/flakes/detect_flakes.py +++ b/tools/flakes/detect_flakes.py @@ -33,14 +33,17 @@ sys.path.append(gcp_utils_dir) import big_query_utils def print_table(table): - for i, (k, v) in enumerate(table.items()): + kokoro_base_url = 'https://kokoro.corp.google.com/job/' + for k, v in table.items(): job_name = v[0] build_id = v[1] ts = int(float(v[2])) # TODO(dgq): timezone handling is wrong. We need to determine the timezone # of the computer running this script. human_ts = datetime.datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S PDT') - print("{}. Test: {}, Timestamp: {}, id: {}@{}\n".format(i, k, human_ts, job_name, build_id)) + job_path = '{}/{}'.format('/job/'.join(job_name.split('/')), build_id) + full_kokoro_url = kokoro_base_url + job_path + print("Test: {}, Timestamp: {}, url: {}\n".format(k, human_ts, full_kokoro_url)) def get_flaky_tests(days_lower_bound, days_upper_bound, limit=None): diff --git a/tools/gce/create_linux_performance_worker.sh b/tools/gce/create_linux_performance_worker.sh index 9b85f7b437..7f53732c05 100755 --- a/tools/gce/create_linux_performance_worker.sh +++ b/tools/gce/create_linux_performance_worker.sh @@ -34,7 +34,7 @@ gcloud compute instances create $INSTANCE_NAME \ --zone "$ZONE" \ --machine-type $MACHINE_TYPE \ --image-project ubuntu-os-cloud \ - --image-family ubuntu-1610 \ + --image-family ubuntu-1704 \ --boot-disk-size 300 \ --scopes https://www.googleapis.com/auth/bigquery diff --git a/tools/internal_ci/helper_scripts/prepare_build_macos_rc b/tools/internal_ci/helper_scripts/prepare_build_macos_rc index bd250957f4..f7fbec93ff 100644 --- a/tools/internal_ci/helper_scripts/prepare_build_macos_rc +++ b/tools/internal_ci/helper_scripts/prepare_build_macos_rc @@ -28,8 +28,7 @@ ulimit -n 10000 ulimit -a # Add GCP credentials for BQ access -# pip does not install google-api-python-client properly, so use easy_install -sudo easy_install --upgrade google-api-python-client +pip install google-api-python-client --user python export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/GrpcTesting-d0eeee2db331.json # required to build protobuf @@ -53,8 +52,8 @@ pod repo update # needed by python # python brew install coreutils # we need grealpath -sudo pip install virtualenv -sudo pip install -U six tox setuptools +pip install virtualenv --user python +pip install -U six tox setuptools --user python export PYTHONPATH=/Library/Python/3.4/site-packages # python 3.4 diff --git a/tools/internal_ci/linux/grpc_interop_tocloud.cfg b/tools/internal_ci/linux/grpc_interop_tocloud.cfg index c613f668d9..2803616007 100644 --- a/tools/internal_ci/linux/grpc_interop_tocloud.cfg +++ b/tools/internal_ci/linux/grpc_interop_tocloud.cfg @@ -17,7 +17,7 @@ # Location of the continuous shell script in repository. build_file: "grpc/tools/internal_ci/linux/grpc_interop_tocloud.sh" # grpc_interop tests can take 6+ hours to complete. -timeout_mins: 480 +timeout_mins: 60 action { define_artifacts { regex: "**/sponge_log.xml" diff --git a/tools/internal_ci/linux/grpc_sanity_webhook_test.cfg b/tools/internal_ci/linux/grpc_sanity_webhook_test.cfg new file mode 100644 index 0000000000..24e7984f3a --- /dev/null +++ b/tools/internal_ci/linux/grpc_sanity_webhook_test.cfg @@ -0,0 +1,30 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Config file for the internal CI (in protobuf text format) + +# Location of the continuous shell script in repository. +build_file: "grpc/tools/internal_ci/linux/grpc_run_tests_matrix.sh" +timeout_mins: 20 +action { + define_artifacts { + regex: "**/*sponge_log.xml" + regex: "github/grpc/reports/**" + } +} + +env_vars { + key: "RUN_TESTS_FLAGS" + value: "-f basictests linux sanity opt --inner_jobs 16 -j 1 --internal_ci" +} diff --git a/tools/internal_ci/macos/grpc_basictests_dbg.cfg b/tools/internal_ci/macos/grpc_basictests_dbg.cfg index 53bda1ff0a..f058f0c7e4 100644 --- a/tools/internal_ci/macos/grpc_basictests_dbg.cfg +++ b/tools/internal_ci/macos/grpc_basictests_dbg.cfg @@ -27,5 +27,5 @@ action { env_vars { key: "RUN_TESTS_FLAGS" - value: "-f basictests macos dbg --internal_ci -j 1 --inner_jobs 4 --bq_result_table aggregate_results" + value: "-f basictests macos dbg --internal_ci -j 1 --inner_jobs 4" } diff --git a/tools/internal_ci/macos/grpc_basictests_opt.cfg b/tools/internal_ci/macos/grpc_basictests_opt.cfg index d359eb601a..5048baaf48 100644 --- a/tools/internal_ci/macos/grpc_basictests_opt.cfg +++ b/tools/internal_ci/macos/grpc_basictests_opt.cfg @@ -27,5 +27,5 @@ action { env_vars { key: "RUN_TESTS_FLAGS" - value: "-f basictests macos opt --internal_ci -j 1 --inner_jobs 4 --bq_result_table aggregate_results" + value: "-f basictests macos opt --internal_ci -j 1 --inner_jobs 4" } diff --git a/tools/internal_ci/windows/grpc_basictests.cfg b/tools/internal_ci/windows/grpc_basictests.cfg index 396d29ef09..8e644e4c5e 100644 --- a/tools/internal_ci/windows/grpc_basictests.cfg +++ b/tools/internal_ci/windows/grpc_basictests.cfg @@ -16,7 +16,7 @@ # Location of the continuous shell script in repository. build_file: "grpc/tools/internal_ci/windows/grpc_run_tests_matrix.bat" -timeout_mins: 360 +timeout_mins: 240 action { define_artifacts { regex: "**/*sponge_log.xml" diff --git a/tools/internal_ci/windows/grpc_build_artifacts.bat b/tools/internal_ci/windows/grpc_build_artifacts.bat index 29c876dd35..17d9571d43 100644 --- a/tools/internal_ci/windows/grpc_build_artifacts.bat +++ b/tools/internal_ci/windows/grpc_build_artifacts.bat @@ -26,7 +26,7 @@ cd /d %~dp0\..\..\.. call tools/internal_ci/helper_scripts/prepare_build_windows.bat -python tools/run_tests/task_runner.py -f artifact windows || goto :error +python tools/run_tests/task_runner.py -f artifact windows -j 4 || goto :error goto :EOF :error diff --git a/tools/internal_ci/windows/grpc_portability.cfg b/tools/internal_ci/windows/grpc_portability.cfg index cd04d77f5d..94e71753ef 100644 --- a/tools/internal_ci/windows/grpc_portability.cfg +++ b/tools/internal_ci/windows/grpc_portability.cfg @@ -16,7 +16,7 @@ # Location of the continuous shell script in repository. build_file: "grpc/tools/internal_ci/windows/grpc_run_tests_matrix.bat" -timeout_mins: 360 +timeout_mins: 240 action { define_artifacts { regex: "**/*sponge_log.xml" diff --git a/tools/internal_ci/windows/grpc_portability_build_only.cfg b/tools/internal_ci/windows/grpc_portability_build_only.cfg index b2b58ece2d..3bc27f1f24 100644 --- a/tools/internal_ci/windows/grpc_portability_build_only.cfg +++ b/tools/internal_ci/windows/grpc_portability_build_only.cfg @@ -16,7 +16,7 @@ # Location of the continuous shell script in repository. build_file: "grpc/tools/internal_ci/windows/grpc_run_tests_matrix.bat" -timeout_mins: 360 +timeout_mins: 240 action { define_artifacts { regex: "**/*sponge_log.xml" diff --git a/tools/internal_ci/windows/pull_request/grpc_basictests.cfg b/tools/internal_ci/windows/pull_request/grpc_basictests.cfg index 678ebeb2af..91777cd7cb 100644 --- a/tools/internal_ci/windows/pull_request/grpc_basictests.cfg +++ b/tools/internal_ci/windows/pull_request/grpc_basictests.cfg @@ -16,7 +16,7 @@ # Location of the continuous shell script in repository. build_file: "grpc/tools/internal_ci/windows/grpc_run_tests_matrix.bat" -timeout_mins: 360 +timeout_mins: 240 action { define_artifacts { regex: "**/*sponge_log.xml" diff --git a/tools/internal_ci/windows/pull_request/grpc_portability.cfg b/tools/internal_ci/windows/pull_request/grpc_portability.cfg index c395cb4a94..2bda487629 100644 --- a/tools/internal_ci/windows/pull_request/grpc_portability.cfg +++ b/tools/internal_ci/windows/pull_request/grpc_portability.cfg @@ -16,7 +16,7 @@ # Location of the continuous shell script in repository. build_file: "grpc/tools/internal_ci/windows/grpc_run_tests_matrix.bat" -timeout_mins: 360 +timeout_mins: 240 action { define_artifacts { regex: "**/*sponge_log.xml" diff --git a/tools/jenkins/build_artifacts.sh b/tools/jenkins/build_artifacts.sh index 166c5104cc..ed2c86adbd 100755 --- a/tools/jenkins/build_artifacts.sh +++ b/tools/jenkins/build_artifacts.sh @@ -24,8 +24,4 @@ set -ex -o igncr || set -ex curr_platform="$platform" unset platform # variable named 'platform' breaks the windows build -if [ "$curr_platform" == "linux" ] && [ "$language" == "ruby" ] ; then - ./tools/run_tests/artifacts/build_artifact_ruby.sh -else - python tools/run_tests/task_runner.py -f artifact $language $curr_platform $architecture -fi +python tools/run_tests/task_runner.py -f artifact $language $curr_platform $architecture diff --git a/tools/jenkins/run_performance_profile_daily.sh b/tools/jenkins/run_performance_profile_daily.sh index 26ee87d240..04a2464aee 100755 --- a/tools/jenkins/run_performance_profile_daily.sh +++ b/tools/jenkins/run_performance_profile_daily.sh @@ -27,4 +27,6 @@ else PYTHON=python2.7 fi -$PYTHON tools/run_tests/run_microbenchmark.py --collect summary perf latency +BENCHMARKS_TO_RUN="bm_fullstack_unary_ping_pong bm_fullstack_streaming_ping_pong bm_fullstack_streaming_pump bm_closure bm_cq bm_call_create bm_error bm_chttp2_hpack bm_chttp2_transport bm_pollset bm_metadata" + +$PYTHON tools/run_tests/run_microbenchmark.py --collect summary perf latency -b $BENCHMARKS_TO_RUN diff --git a/tools/run_tests/artifacts/artifact_targets.py b/tools/run_tests/artifacts/artifact_targets.py index bd186263ca..12263282ae 100644 --- a/tools/run_tests/artifacts/artifact_targets.py +++ b/tools/run_tests/artifacts/artifact_targets.py @@ -150,6 +150,7 @@ class PythonArtifact: self.py_version, '32' if self.arch == 'x86' else '64'], environ=environ, + timeout_seconds=45*60, use_workspace=True) else: environ['PYTHON'] = self.py_version @@ -257,6 +258,7 @@ class NodeExtArtifact: ['tools\\run_tests\\artifacts\\build_artifact_node.bat', self.gyp_arch], use_workspace=True, + timeout_seconds=45*60, cpu_cost=cpu_cost) else: if self.platform == 'linux': diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index ec7d53665a..2f6e34bfb3 100644 --- a/tools/run_tests/generated/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -541,6 +541,23 @@ "gpr", "gpr_test_util", "grpc", + "transport_security_test_lib" + ], + "headers": [], + "is_filegroup": false, + "language": "c", + "name": "fake_transport_security_test", + "src": [ + "test/core/tsi/fake_transport_security_test.c" + ], + "third_party": false, + "type": "target" + }, + { + "deps": [ + "gpr", + "gpr_test_util", + "grpc", "grpc_test_util" ], "headers": [], @@ -1027,6 +1044,23 @@ "headers": [], "is_filegroup": false, "language": "c", + "name": "grpc_channel_stack_builder_test", + "src": [ + "test/core/channel/channel_stack_builder_test.c" + ], + "third_party": false, + "type": "target" + }, + { + "deps": [ + "gpr", + "gpr_test_util", + "grpc", + "grpc_test_util" + ], + "headers": [], + "is_filegroup": false, + "language": "c", "name": "grpc_channel_stack_test", "src": [ "test/core/channel/channel_stack_test.c" @@ -2168,6 +2202,23 @@ "gpr", "gpr_test_util", "grpc", + "transport_security_test_lib" + ], + "headers": [], + "is_filegroup": false, + "language": "c", + "name": "ssl_transport_security_test", + "src": [ + "test/core/tsi/ssl_transport_security_test.c" + ], + "third_party": false, + "type": "target" + }, + { + "deps": [ + "gpr", + "gpr_test_util", + "grpc", "grpc_test_util" ], "headers": [], @@ -2739,12 +2790,15 @@ "grpc_test_util_unsecure", "grpc_unsecure" ], - "headers": [], + "headers": [ + "test/cpp/microbenchmarks/fullstack_streaming_ping_pong.h" + ], "is_filegroup": false, "language": "c++", "name": "bm_fullstack_streaming_ping_pong", "src": [ - "test/cpp/microbenchmarks/bm_fullstack_streaming_ping_pong.cc" + "test/cpp/microbenchmarks/bm_fullstack_streaming_ping_pong.cc", + "test/cpp/microbenchmarks/fullstack_streaming_ping_pong.h" ], "third_party": false, "type": "target" @@ -2760,12 +2814,15 @@ "grpc_test_util_unsecure", "grpc_unsecure" ], - "headers": [], + "headers": [ + "test/cpp/microbenchmarks/fullstack_streaming_pump.h" + ], "is_filegroup": false, "language": "c++", "name": "bm_fullstack_streaming_pump", "src": [ - "test/cpp/microbenchmarks/bm_fullstack_streaming_pump.cc" + "test/cpp/microbenchmarks/bm_fullstack_streaming_pump.cc", + "test/cpp/microbenchmarks/fullstack_streaming_pump.h" ], "third_party": false, "type": "target" @@ -2775,6 +2832,7 @@ "benchmark", "gpr", "gpr_test_util", + "grpc++_test_config", "grpc++_test_util_unsecure", "grpc++_unsecure", "grpc_benchmark", @@ -2802,12 +2860,15 @@ "grpc_test_util_unsecure", "grpc_unsecure" ], - "headers": [], + "headers": [ + "test/cpp/microbenchmarks/fullstack_unary_ping_pong.h" + ], "is_filegroup": false, "language": "c++", "name": "bm_fullstack_unary_ping_pong", "src": [ - "test/cpp/microbenchmarks/bm_fullstack_unary_ping_pong.cc" + "test/cpp/microbenchmarks/bm_fullstack_unary_ping_pong.cc", + "test/cpp/microbenchmarks/fullstack_unary_ping_pong.h" ], "third_party": false, "type": "target" @@ -2968,7 +3029,8 @@ "gpr", "grpc", "grpc++", - "grpc++_codegen_base" + "grpc++_codegen_base", + "grpc++_core_stats" ], "headers": [ "src/proto/grpc/testing/control.grpc.pb.h", @@ -3001,7 +3063,8 @@ "gpr", "grpc", "grpc++_codegen_base", - "grpc++_codegen_base_src" + "grpc++_codegen_base_src", + "grpc++_core_stats" ], "headers": [ "src/proto/grpc/testing/control.grpc.pb.h", @@ -3701,6 +3764,7 @@ "gpr_test_util", "grpc", "grpc++", + "grpc++_core_stats", "grpc++_test_config", "grpc++_test_util", "grpc_test_util", @@ -3722,6 +3786,7 @@ "gpr_test_util", "grpc", "grpc++", + "grpc++_core_stats", "grpc++_test_config", "grpc++_test_util", "grpc_test_util", @@ -3743,6 +3808,7 @@ "gpr_test_util", "grpc", "grpc++", + "grpc++_core_stats", "grpc++_test_config", "grpc++_test_util", "grpc_test_util", @@ -3850,6 +3916,7 @@ "gpr_test_util", "grpc", "grpc++", + "grpc++_core_stats", "grpc++_test_config", "grpc++_test_util", "grpc_test_util", @@ -4017,6 +4084,24 @@ "gpr", "gpr_test_util", "grpc", + "grpc++_test_util", + "grpc_test_util" + ], + "headers": [], + "is_filegroup": false, + "language": "c++", + "name": "stats_test", + "src": [ + "test/core/debug/stats_test.cc" + ], + "third_party": false, + "type": "target" + }, + { + "deps": [ + "gpr", + "gpr_test_util", + "grpc", "grpc++", "grpc_test_util" ], @@ -5814,7 +5899,6 @@ "grpc_lb_policy_grpclb_secure", "grpc_lb_policy_pick_first", "grpc_lb_policy_round_robin", - "grpc_load_reporting", "grpc_max_age_filter", "grpc_message_size_filter", "grpc_resolver_dns_ares", @@ -5823,6 +5907,7 @@ "grpc_resolver_sockaddr", "grpc_secure", "grpc_server_backward_compatibility", + "grpc_server_load_reporting", "grpc_transport_chttp2_client_insecure", "grpc_transport_chttp2_client_secure", "grpc_transport_chttp2_server_insecure", @@ -5844,7 +5929,7 @@ "deps": [ "gpr", "grpc_base", - "grpc_load_reporting", + "grpc_server_load_reporting", "grpc_transport_chttp2_client_secure", "grpc_transport_cronet_client_secure" ], @@ -5921,7 +6006,6 @@ "grpc_lb_policy_grpclb", "grpc_lb_policy_pick_first", "grpc_lb_policy_round_robin", - "grpc_load_reporting", "grpc_max_age_filter", "grpc_message_size_filter", "grpc_resolver_dns_ares", @@ -5929,6 +6013,7 @@ "grpc_resolver_fake", "grpc_resolver_sockaddr", "grpc_server_backward_compatibility", + "grpc_server_load_reporting", "grpc_transport_chttp2_client_insecure", "grpc_transport_chttp2_server_insecure", "grpc_transport_inproc", @@ -6023,6 +6108,26 @@ }, { "deps": [ + "grpc++" + ], + "headers": [ + "src/cpp/util/core_stats.h", + "src/proto/grpc/core/stats.grpc.pb.h", + "src/proto/grpc/core/stats.pb.h", + "src/proto/grpc/core/stats_mock.grpc.pb.h" + ], + "is_filegroup": false, + "language": "c++", + "name": "grpc++_core_stats", + "src": [ + "src/cpp/util/core_stats.cc", + "src/cpp/util/core_stats.h" + ], + "third_party": false, + "type": "lib" + }, + { + "deps": [ "census", "gpr", "grpc", @@ -6517,6 +6622,7 @@ "deps": [ "grpc", "grpc++", + "grpc++_core_stats", "grpc++_test_util", "grpc_test_util" ], @@ -7652,6 +7758,7 @@ "include/grpc/support/string_util.h", "include/grpc/support/subprocess.h", "include/grpc/support/sync.h", + "include/grpc/support/sync_custom.h", "include/grpc/support/sync_generic.h", "include/grpc/support/sync_posix.h", "include/grpc/support/sync_windows.h", @@ -7677,7 +7784,6 @@ "src/core/lib/support/stack_lockfree.h", "src/core/lib/support/string.h", "src/core/lib/support/string_windows.h", - "src/core/lib/support/thd_internal.h", "src/core/lib/support/time_precise.h", "src/core/lib/support/tmpfile.h" ], @@ -7701,6 +7807,7 @@ "include/grpc/support/string_util.h", "include/grpc/support/subprocess.h", "include/grpc/support/sync.h", + "include/grpc/support/sync_custom.h", "include/grpc/support/sync_generic.h", "include/grpc/support/sync_posix.h", "include/grpc/support/sync_windows.h", @@ -7726,7 +7833,6 @@ "src/core/lib/support/stack_lockfree.h", "src/core/lib/support/string.h", "src/core/lib/support/string_windows.h", - "src/core/lib/support/thd_internal.h", "src/core/lib/support/time_precise.h", "src/core/lib/support/tmpfile.h" ], @@ -7744,6 +7850,7 @@ "include/grpc/impl/codegen/gpr_types.h", "include/grpc/impl/codegen/port_platform.h", "include/grpc/impl/codegen/sync.h", + "include/grpc/impl/codegen/sync_custom.h", "include/grpc/impl/codegen/sync_generic.h", "include/grpc/impl/codegen/sync_posix.h", "include/grpc/impl/codegen/sync_windows.h" @@ -7760,6 +7867,7 @@ "include/grpc/impl/codegen/gpr_types.h", "include/grpc/impl/codegen/port_platform.h", "include/grpc/impl/codegen/sync.h", + "include/grpc/impl/codegen/sync_custom.h", "include/grpc/impl/codegen/sync_generic.h", "include/grpc/impl/codegen/sync_posix.h", "include/grpc/impl/codegen/sync_windows.h" @@ -7819,9 +7927,12 @@ "src/core/lib/compression/compression.c", "src/core/lib/compression/message_compress.c", "src/core/lib/compression/stream_compression.c", + "src/core/lib/debug/stats.c", + "src/core/lib/debug/stats_data.c", "src/core/lib/http/format_request.c", "src/core/lib/http/httpcli.c", "src/core/lib/http/parser.c", + "src/core/lib/iomgr/call_combiner.c", "src/core/lib/iomgr/closure.c", "src/core/lib/iomgr/combiner.c", "src/core/lib/iomgr/endpoint.c", @@ -7830,8 +7941,6 @@ "src/core/lib/iomgr/endpoint_pair_windows.c", "src/core/lib/iomgr/error.c", "src/core/lib/iomgr/ev_epoll1_linux.c", - "src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c", - "src/core/lib/iomgr/ev_epoll_thread_pool_linux.c", "src/core/lib/iomgr/ev_epollex_linux.c", "src/core/lib/iomgr/ev_epollsig_linux.c", "src/core/lib/iomgr/ev_poll_posix.c", @@ -7970,9 +8079,12 @@ "src/core/lib/compression/algorithm_metadata.h", "src/core/lib/compression/message_compress.h", "src/core/lib/compression/stream_compression.h", + "src/core/lib/debug/stats.h", + "src/core/lib/debug/stats_data.h", "src/core/lib/http/format_request.h", "src/core/lib/http/httpcli.h", "src/core/lib/http/parser.h", + "src/core/lib/iomgr/call_combiner.h", "src/core/lib/iomgr/closure.h", "src/core/lib/iomgr/combiner.h", "src/core/lib/iomgr/endpoint.h", @@ -7980,8 +8092,6 @@ "src/core/lib/iomgr/error.h", "src/core/lib/iomgr/error_internal.h", "src/core/lib/iomgr/ev_epoll1_linux.h", - "src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h", - "src/core/lib/iomgr/ev_epoll_thread_pool_linux.h", "src/core/lib/iomgr/ev_epollex_linux.h", "src/core/lib/iomgr/ev_epollsig_linux.h", "src/core/lib/iomgr/ev_poll_posix.h", @@ -8100,9 +8210,12 @@ "src/core/lib/compression/algorithm_metadata.h", "src/core/lib/compression/message_compress.h", "src/core/lib/compression/stream_compression.h", + "src/core/lib/debug/stats.h", + "src/core/lib/debug/stats_data.h", "src/core/lib/http/format_request.h", "src/core/lib/http/httpcli.h", "src/core/lib/http/parser.h", + "src/core/lib/iomgr/call_combiner.h", "src/core/lib/iomgr/closure.h", "src/core/lib/iomgr/combiner.h", "src/core/lib/iomgr/endpoint.h", @@ -8110,8 +8223,6 @@ "src/core/lib/iomgr/error.h", "src/core/lib/iomgr/error_internal.h", "src/core/lib/iomgr/ev_epoll1_linux.h", - "src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h", - "src/core/lib/iomgr/ev_epoll_thread_pool_linux.h", "src/core/lib/iomgr/ev_epollex_linux.h", "src/core/lib/iomgr/ev_epollsig_linux.h", "src/core/lib/iomgr/ev_poll_posix.h", @@ -8463,27 +8574,6 @@ "grpc_base" ], "headers": [ - "src/core/ext/filters/load_reporting/load_reporting.h", - "src/core/ext/filters/load_reporting/load_reporting_filter.h" - ], - "is_filegroup": true, - "language": "c", - "name": "grpc_load_reporting", - "src": [ - "src/core/ext/filters/load_reporting/load_reporting.c", - "src/core/ext/filters/load_reporting/load_reporting.h", - "src/core/ext/filters/load_reporting/load_reporting_filter.c", - "src/core/ext/filters/load_reporting/load_reporting_filter.h" - ], - "third_party": false, - "type": "filegroup" - }, - { - "deps": [ - "gpr", - "grpc_base" - ], - "headers": [ "src/core/ext/filters/max_age/max_age_filter.h" ], "is_filegroup": true, @@ -8691,6 +8781,27 @@ { "deps": [ "gpr", + "grpc_base" + ], + "headers": [ + "src/core/ext/filters/load_reporting/server_load_reporting_filter.h", + "src/core/ext/filters/load_reporting/server_load_reporting_plugin.h" + ], + "is_filegroup": true, + "language": "c", + "name": "grpc_server_load_reporting", + "src": [ + "src/core/ext/filters/load_reporting/server_load_reporting_filter.c", + "src/core/ext/filters/load_reporting/server_load_reporting_filter.h", + "src/core/ext/filters/load_reporting/server_load_reporting_plugin.c", + "src/core/ext/filters/load_reporting/server_load_reporting_plugin.h" + ], + "third_party": false, + "type": "filegroup" + }, + { + "deps": [ + "gpr", "gpr_test_util", "grpc_base", "grpc_client_channel", @@ -9099,6 +9210,23 @@ }, { "deps": [ + "grpc" + ], + "headers": [ + "test/core/tsi/transport_security_test_lib.h" + ], + "is_filegroup": true, + "language": "c", + "name": "transport_security_test_lib", + "src": [ + "test/core/tsi/transport_security_test_lib.c", + "test/core/tsi/transport_security_test_lib.h" + ], + "third_party": false, + "type": "filegroup" + }, + { + "deps": [ "gpr", "grpc_base", "grpc_trace", diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json index 118e5c6cee..1bb7a4aa24 100644 --- a/tools/run_tests/generated/tests.json +++ b/tools/run_tests/generated/tests.json @@ -652,6 +652,26 @@ ], "cpu_cost": 1.0, "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "gtest": false, + "language": "c", + "name": "fake_transport_security_test", + "platforms": [ + "linux", + "mac", + "posix" + ] + }, + { + "args": [], + "ci_platforms": [ + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], "exclude_iomgrs": [ "uv" ], @@ -1225,6 +1245,28 @@ "flaky": false, "gtest": false, "language": "c", + "name": "grpc_channel_stack_builder_test", + "platforms": [ + "linux", + "mac", + "posix", + "windows" + ] + }, + { + "args": [], + "ci_platforms": [ + "linux", + "mac", + "posix", + "windows" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "gtest": false, + "language": "c", "name": "grpc_channel_stack_test", "platforms": [ "linux", @@ -2278,6 +2320,26 @@ "ci_platforms": [ "linux", "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "gtest": false, + "language": "c", + "name": "ssl_transport_security_test", + "platforms": [ + "linux", + "mac", + "posix" + ] + }, + { + "args": [], + "ci_platforms": [ + "linux", + "mac", "posix", "windows" ], @@ -3943,6 +4005,28 @@ "exclude_configs": [], "exclude_iomgrs": [], "flaky": false, + "gtest": true, + "language": "c++", + "name": "stats_test", + "platforms": [ + "linux", + "mac", + "posix", + "windows" + ] + }, + { + "args": [], + "ci_platforms": [ + "linux", + "mac", + "posix", + "windows" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, "gtest": false, "language": "c++", "name": "status_test", diff --git a/tools/run_tests/helper_scripts/pre_build_cmake.bat b/tools/run_tests/helper_scripts/pre_build_cmake.bat index a770aa8118..d89fc5fec2 100644 --- a/tools/run_tests/helper_scripts/pre_build_cmake.bat +++ b/tools/run_tests/helper_scripts/pre_build_cmake.bat @@ -14,7 +14,8 @@ setlocal -set ARCHITECTURE=%1 +set GENERATOR=%1 +set ARCHITECTURE=%2 cd /d %~dp0\..\..\.. @@ -27,7 +28,7 @@ cd build @rem If yasm is not on the path, use hardcoded path instead. yasm --version || set USE_HARDCODED_YASM_PATH_MAYBE=-DCMAKE_ASM_NASM_COMPILER="C:/Program Files (x86)/yasm/yasm.exe" -cmake -G "Visual Studio 14 2015" -A %ARCHITECTURE% -DgRPC_BUILD_TESTS=ON %USE_HARDCODED_YASM_PATH_MAYBE% ../.. || goto :error +cmake -G %GENERATOR% -A %ARCHITECTURE% -DgRPC_BUILD_TESTS=ON %USE_HARDCODED_YASM_PATH_MAYBE% ../.. || goto :error endlocal diff --git a/tools/run_tests/performance/scenario_config.py b/tools/run_tests/performance/scenario_config.py index 8868f40f49..cbcc4d0ecc 100644 --- a/tools/run_tests/performance/scenario_config.py +++ b/tools/run_tests/performance/scenario_config.py @@ -154,7 +154,9 @@ def _ping_pong_scenario(name, rpc_type, scenario['client_config']['payload_config'] = _payload_type(use_generic_payload, req_size, resp_size) - optimization_target = 'blend' + # Optimization target of 'throughput' does not work well with epoll1 polling + # engine. Use the default value of 'blend' + optimization_target = 'throughput' if unconstrained_client: outstanding_calls = outstanding if outstanding is not None else OUTSTANDING_REQUESTS[unconstrained_client] @@ -169,7 +171,6 @@ def _ping_pong_scenario(name, rpc_type, scenario['client_config']['outstanding_rpcs_per_channel'] = deep scenario['client_config']['client_channels'] = wide scenario['client_config']['async_client_threads'] = 0 - optimization_target = 'throughput' else: scenario['client_config']['outstanding_rpcs_per_channel'] = 1 scenario['client_config']['client_channels'] = 1 diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py index 312abd59c4..c136af58cb 100755 --- a/tools/run_tests/run_microbenchmark.py +++ b/tools/run_tests/run_microbenchmark.py @@ -83,12 +83,14 @@ def collect_latency(bm_name, args): jobset.JobSpec(['bins/basicprof/%s' % bm_name, '--benchmark_filter=^%s$' % line, '--benchmark_min_time=0.05'], - environ={'LATENCY_TRACE': '%s.trace' % fnize(line)})) + environ={'LATENCY_TRACE': '%s.trace' % fnize(line)}, + shortname='profile-%s' % fnize(line))) profile_analysis.append( jobset.JobSpec([sys.executable, 'tools/profiling/latency_profile/profile_analyzer.py', '--source', '%s.trace' % fnize(line), '--fmt', 'simple', - '--out', 'reports/%s.txt' % fnize(line)], timeout_seconds=None)) + '--out', 'reports/%s.txt' % fnize(line)], timeout_seconds=20*60, + shortname='analyze-%s' % fnize(line))) cleanup.append(jobset.JobSpec(['rm', '%s.trace' % fnize(line)])) # periodically flush out the list of jobs: profile_analysis jobs at least # consume upwards of five gigabytes of ram in some cases, and so analysing @@ -126,14 +128,16 @@ def collect_perf(bm_name, args): '-g', '-F', '997', 'bins/mutrace/%s' % bm_name, '--benchmark_filter=^%s$' % line, - '--benchmark_min_time=10'])) + '--benchmark_min_time=10'], + shortname='perf-%s' % fnize(line))) profile_analysis.append( jobset.JobSpec(['tools/run_tests/performance/process_local_perf_flamegraphs.sh'], environ = { 'PERF_BASE_NAME': fnize(line), 'OUTPUT_DIR': 'reports', 'OUTPUT_FILENAME': fnize(line), - })) + }, + shortname='flame-%s' % fnize(line))) cleanup.append(jobset.JobSpec(['rm', '%s-perf.data' % fnize(line)])) cleanup.append(jobset.JobSpec(['rm', '%s-out.perf' % fnize(line)])) # periodically flush out the list of jobs: temporary space required for this diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py index b9277c919b..3bfd736c51 100755 --- a/tools/run_tests/run_performance_tests.py +++ b/tools/run_tests/run_performance_tests.py @@ -281,18 +281,18 @@ def create_qpsworkers(languages, worker_hosts, perf_cmd=None): for worker_idx, worker in enumerate(workers)] -def perf_report_processor_job(worker_host, perf_base_name, output_filename): +def perf_report_processor_job(worker_host, perf_base_name, output_filename, flame_graph_reports): print('Creating perf report collection job for %s' % worker_host) cmd = '' if worker_host != 'localhost': user_at_host = "%s@%s" % (_REMOTE_HOST_USERNAME, worker_host) cmd = "USER_AT_HOST=%s OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s\ tools/run_tests/performance/process_remote_perf_flamegraphs.sh" \ - % (user_at_host, output_filename, args.flame_graph_reports, perf_base_name) + % (user_at_host, output_filename, flame_graph_reports, perf_base_name) else: cmd = "OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s\ tools/run_tests/performance/process_local_perf_flamegraphs.sh" \ - % (output_filename, args.flame_graph_reports, perf_base_name) + % (output_filename, flame_graph_reports, perf_base_name) return jobset.JobSpec(cmdline=cmd, timeout_seconds=3*60, @@ -332,7 +332,7 @@ def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*', for language in languages: for scenario_json in language.scenarios(): - if re.search(args.regex, scenario_json['name']): + if re.search(regex, scenario_json['name']): categories = scenario_json.get('CATEGORIES', ['scalable', 'smoketest']) if category in categories or category == 'all': workers = workers_by_lang[str(language)][:] @@ -376,7 +376,7 @@ def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*', return scenarios -def finish_qps_workers(jobs): +def finish_qps_workers(jobs, qpsworker_jobs): """Waits for given jobs to finish and eventually kills them.""" retries = 0 num_killed = 0 @@ -399,10 +399,10 @@ profile_output_files = [] # Collect perf text reports and flamegraphs if perf_cmd was used # Note the base names of perf text reports are used when creating and processing # perf data. The scenario name uniqifies the output name in the final -# perf reports directory. +# perf reports directory. # Alos, the perf profiles need to be fetched and processed after each scenario # in order to avoid clobbering the output files. -def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name): +def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name, flame_graph_reports): perf_report_jobs = [] global profile_output_files for host_and_port in hosts_and_base_names: @@ -411,181 +411,184 @@ def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name): # from the base filename, create .svg output filename host = host_and_port.split(':')[0] profile_output_files.append('%s.svg' % output_filename) - perf_report_jobs.append(perf_report_processor_job(host, perf_base_name, output_filename)) + perf_report_jobs.append(perf_report_processor_job(host, perf_base_name, output_filename, flame_graph_reports)) jobset.message('START', 'Collecting perf reports from qps workers', do_newline=True) failures, _ = jobset.run(perf_report_jobs, newline_on_success=True, maxjobs=1) jobset.message('END', 'Collecting perf reports from qps workers', do_newline=True) return failures +def main(): + argp = argparse.ArgumentParser(description='Run performance tests.') + argp.add_argument('-l', '--language', + choices=['all'] + sorted(scenario_config.LANGUAGES.keys()), + nargs='+', + required=True, + help='Languages to benchmark.') + argp.add_argument('--remote_driver_host', + default=None, + help='Run QPS driver on given host. By default, QPS driver is run locally.') + argp.add_argument('--remote_worker_host', + nargs='+', + default=[], + help='Worker hosts where to start QPS workers.') + argp.add_argument('--dry_run', + default=False, + action='store_const', + const=True, + help='Just list scenarios to be run, but don\'t run them.') + argp.add_argument('-r', '--regex', default='.*', type=str, + help='Regex to select scenarios to run.') + argp.add_argument('--bq_result_table', default=None, type=str, + help='Bigquery "dataset.table" to upload results to.') + argp.add_argument('--category', + choices=['smoketest','all','scalable','sweep'], + default='all', + help='Select a category of tests to run.') + argp.add_argument('--netperf', + default=False, + action='store_const', + const=True, + help='Run netperf benchmark as one of the scenarios.') + argp.add_argument('--server_cpu_load', + default=0, type=int, + help='Select a targeted server cpu load to run. 0 means ignore this flag') + argp.add_argument('-x', '--xml_report', default='report.xml', type=str, + help='Name of XML report file to generate.') + argp.add_argument('--perf_args', + help=('Example usage: "--perf_args=record -F 99 -g". ' + 'Wrap QPS workers in a perf command ' + 'with the arguments to perf specified here. ' + '".svg" flame graph profiles will be ' + 'created for each Qps Worker on each scenario. ' + 'Files will output to "<repo_root>/<args.flame_graph_reports>" ' + 'directory. Output files from running the worker ' + 'under perf are saved in the repo root where its ran. ' + 'Note that the perf "-g" flag is necessary for ' + 'flame graphs generation to work (assuming the binary ' + 'being profiled uses frame pointers, check out ' + '"--call-graph dwarf" option using libunwind otherwise.) ' + 'Also note that the entire "--perf_args=<arg(s)>" must ' + 'be wrapped in quotes as in the example usage. ' + 'If the "--perg_args" is unspecified, "perf" will ' + 'not be used at all. ' + 'See http://www.brendangregg.com/perf.html ' + 'for more general perf examples.')) + argp.add_argument('--skip_generate_flamegraphs', + default=False, + action='store_const', + const=True, + help=('Turn flame graph generation off. ' + 'May be useful if "perf_args" arguments do not make sense for ' + 'generating flamegraphs (e.g., "--perf_args=stat ...")')) + argp.add_argument('-f', '--flame_graph_reports', default='perf_reports', type=str, + help='Name of directory to output flame graph profiles to, if any are created.') + + args = argp.parse_args() + + languages = set(scenario_config.LANGUAGES[l] + for l in itertools.chain.from_iterable( + six.iterkeys(scenario_config.LANGUAGES) if x == 'all' + else [x] for x in args.language)) + + + # Put together set of remote hosts where to run and build + remote_hosts = set() + if args.remote_worker_host: + for host in args.remote_worker_host: + remote_hosts.add(host) + if args.remote_driver_host: + remote_hosts.add(args.remote_driver_host) + + if not args.dry_run: + if remote_hosts: + archive_repo(languages=[str(l) for l in languages]) + prepare_remote_hosts(remote_hosts, prepare_local=True) + else: + prepare_remote_hosts([], prepare_local=True) + + build_local = False + if not args.remote_driver_host: + build_local = True + if not args.dry_run: + build_on_remote_hosts(remote_hosts, languages=[str(l) for l in languages], build_local=build_local) + + perf_cmd = None + if args.perf_args: + print('Running workers under perf profiler') + # Expect /usr/bin/perf to be installed here, as is usual + perf_cmd = ['/usr/bin/perf'] + perf_cmd.extend(re.split('\s+', args.perf_args)) + + qpsworker_jobs = create_qpsworkers(languages, args.remote_worker_host, perf_cmd=perf_cmd) + + # get list of worker addresses for each language. + workers_by_lang = dict([(str(language), []) for language in languages]) + for job in qpsworker_jobs: + workers_by_lang[str(job.language)].append(job) + + scenarios = create_scenarios(languages, + workers_by_lang=workers_by_lang, + remote_host=args.remote_driver_host, + regex=args.regex, + category=args.category, + bq_result_table=args.bq_result_table, + netperf=args.netperf, + netperf_hosts=args.remote_worker_host, + server_cpu_load=args.server_cpu_load) + + if not scenarios: + raise Exception('No scenarios to run') + + total_scenario_failures = 0 + qps_workers_killed = 0 + merged_resultset = {} + perf_report_failures = 0 + + for scenario in scenarios: + if args.dry_run: + print(scenario.name) + else: + scenario_failures = 0 + try: + for worker in scenario.workers: + worker.start() + jobs = [scenario.jobspec] + if scenario.workers: + jobs.append(create_quit_jobspec(scenario.workers, remote_host=args.remote_driver_host)) + scenario_failures, resultset = jobset.run(jobs, newline_on_success=True, maxjobs=1, clear_alarms=False) + total_scenario_failures += scenario_failures + merged_resultset = dict(itertools.chain(six.iteritems(merged_resultset), + six.iteritems(resultset))) + finally: + # Consider qps workers that need to be killed as failures + qps_workers_killed += finish_qps_workers(scenario.workers, qpsworker_jobs) + + if perf_cmd and scenario_failures == 0 and not args.skip_generate_flamegraphs: + workers_and_base_names = {} + for worker in scenario.workers: + if not worker.perf_file_base_name: + raise Exception('using perf buf perf report filename is unspecified') + workers_and_base_names[worker.host_and_port] = worker.perf_file_base_name + perf_report_failures += run_collect_perf_profile_jobs(workers_and_base_names, scenario.name, args.flame_graph_reports) + + + # Still write the index.html even if some scenarios failed. + # 'profile_output_files' will only have names for scenarios that passed + if perf_cmd and not args.skip_generate_flamegraphs: + # write the index fil to the output dir, with all profiles from all scenarios/workers + report_utils.render_perf_profiling_results('%s/index.html' % args.flame_graph_reports, profile_output_files) + + report_utils.render_junit_xml_report(merged_resultset, args.xml_report, + suite_name='benchmarks') + + if total_scenario_failures > 0 or qps_workers_killed > 0: + print('%s scenarios failed and %s qps worker jobs killed' % (total_scenario_failures, qps_workers_killed)) + sys.exit(1) -argp = argparse.ArgumentParser(description='Run performance tests.') -argp.add_argument('-l', '--language', - choices=['all'] + sorted(scenario_config.LANGUAGES.keys()), - nargs='+', - required=True, - help='Languages to benchmark.') -argp.add_argument('--remote_driver_host', - default=None, - help='Run QPS driver on given host. By default, QPS driver is run locally.') -argp.add_argument('--remote_worker_host', - nargs='+', - default=[], - help='Worker hosts where to start QPS workers.') -argp.add_argument('--dry_run', - default=False, - action='store_const', - const=True, - help='Just list scenarios to be run, but don\'t run them.') -argp.add_argument('-r', '--regex', default='.*', type=str, - help='Regex to select scenarios to run.') -argp.add_argument('--bq_result_table', default=None, type=str, - help='Bigquery "dataset.table" to upload results to.') -argp.add_argument('--category', - choices=['smoketest','all','scalable','sweep'], - default='all', - help='Select a category of tests to run.') -argp.add_argument('--netperf', - default=False, - action='store_const', - const=True, - help='Run netperf benchmark as one of the scenarios.') -argp.add_argument('--server_cpu_load', - default=0, type=int, - help='Select a targeted server cpu load to run. 0 means ignore this flag') -argp.add_argument('-x', '--xml_report', default='report.xml', type=str, - help='Name of XML report file to generate.') -argp.add_argument('--perf_args', - help=('Example usage: "--perf_args=record -F 99 -g". ' - 'Wrap QPS workers in a perf command ' - 'with the arguments to perf specified here. ' - '".svg" flame graph profiles will be ' - 'created for each Qps Worker on each scenario. ' - 'Files will output to "<repo_root>/<args.flame_graph_reports>" ' - 'directory. Output files from running the worker ' - 'under perf are saved in the repo root where its ran. ' - 'Note that the perf "-g" flag is necessary for ' - 'flame graphs generation to work (assuming the binary ' - 'being profiled uses frame pointers, check out ' - '"--call-graph dwarf" option using libunwind otherwise.) ' - 'Also note that the entire "--perf_args=<arg(s)>" must ' - 'be wrapped in quotes as in the example usage. ' - 'If the "--perg_args" is unspecified, "perf" will ' - 'not be used at all. ' - 'See http://www.brendangregg.com/perf.html ' - 'for more general perf examples.')) -argp.add_argument('--skip_generate_flamegraphs', - default=False, - action='store_const', - const=True, - help=('Turn flame graph generation off. ' - 'May be useful if "perf_args" arguments do not make sense for ' - 'generating flamegraphs (e.g., "--perf_args=stat ...")')) -argp.add_argument('-f', '--flame_graph_reports', default='perf_reports', type=str, - help='Name of directory to output flame graph profiles to, if any are created.') - -args = argp.parse_args() - -languages = set(scenario_config.LANGUAGES[l] - for l in itertools.chain.from_iterable( - six.iterkeys(scenario_config.LANGUAGES) if x == 'all' - else [x] for x in args.language)) - - -# Put together set of remote hosts where to run and build -remote_hosts = set() -if args.remote_worker_host: - for host in args.remote_worker_host: - remote_hosts.add(host) -if args.remote_driver_host: - remote_hosts.add(args.remote_driver_host) - -if not args.dry_run: - if remote_hosts: - archive_repo(languages=[str(l) for l in languages]) - prepare_remote_hosts(remote_hosts, prepare_local=True) - else: - prepare_remote_hosts([], prepare_local=True) - -build_local = False -if not args.remote_driver_host: - build_local = True -if not args.dry_run: - build_on_remote_hosts(remote_hosts, languages=[str(l) for l in languages], build_local=build_local) - -perf_cmd = None -if args.perf_args: - print('Running workers under perf profiler') - # Expect /usr/bin/perf to be installed here, as is usual - perf_cmd = ['/usr/bin/perf'] - perf_cmd.extend(re.split('\s+', args.perf_args)) - -qpsworker_jobs = create_qpsworkers(languages, args.remote_worker_host, perf_cmd=perf_cmd) - -# get list of worker addresses for each language. -workers_by_lang = dict([(str(language), []) for language in languages]) -for job in qpsworker_jobs: - workers_by_lang[str(job.language)].append(job) - -scenarios = create_scenarios(languages, - workers_by_lang=workers_by_lang, - remote_host=args.remote_driver_host, - regex=args.regex, - category=args.category, - bq_result_table=args.bq_result_table, - netperf=args.netperf, - netperf_hosts=args.remote_worker_host, - server_cpu_load=args.server_cpu_load) - -if not scenarios: - raise Exception('No scenarios to run') - -total_scenario_failures = 0 -qps_workers_killed = 0 -merged_resultset = {} -perf_report_failures = 0 - -for scenario in scenarios: - if args.dry_run: - print(scenario.name) - else: - scenario_failures = 0 - try: - for worker in scenario.workers: - worker.start() - jobs = [scenario.jobspec] - if scenario.workers: - jobs.append(create_quit_jobspec(scenario.workers, remote_host=args.remote_driver_host)) - scenario_failures, resultset = jobset.run(jobs, newline_on_success=True, maxjobs=1) - total_scenario_failures += scenario_failures - merged_resultset = dict(itertools.chain(six.iteritems(merged_resultset), - six.iteritems(resultset))) - finally: - # Consider qps workers that need to be killed as failures - qps_workers_killed += finish_qps_workers(scenario.workers) - - if perf_cmd and scenario_failures == 0 and not args.skip_generate_flamegraphs: - workers_and_base_names = {} - for worker in scenario.workers: - if not worker.perf_file_base_name: - raise Exception('using perf buf perf report filename is unspecified') - workers_and_base_names[worker.host_and_port] = worker.perf_file_base_name - perf_report_failures += run_collect_perf_profile_jobs(workers_and_base_names, scenario.name) - - -# Still write the index.html even if some scenarios failed. -# 'profile_output_files' will only have names for scenarios that passed -if perf_cmd and not args.skip_generate_flamegraphs: - # write the index fil to the output dir, with all profiles from all scenarios/workers - report_utils.render_perf_profiling_results('%s/index.html' % args.flame_graph_reports, profile_output_files) - -report_utils.render_junit_xml_report(merged_resultset, args.xml_report, - suite_name='benchmarks') - -if total_scenario_failures > 0 or qps_workers_killed > 0: - print('%s scenarios failed and %s qps worker jobs killed' % (total_scenario_failures, qps_workers_killed)) - sys.exit(1) - -if perf_report_failures > 0: - print('%s perf profile collection jobs failed' % perf_report_failures) - sys.exit(1) + if perf_report_failures > 0: + print('%s perf profile collection jobs failed' % perf_report_failures) + sys.exit(1) + +if __name__ == "__main__": + main() diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index d874b2a320..b66c5f7f71 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -69,17 +69,22 @@ _POLLING_STRATEGIES = { } -def get_flaky_tests(limit=None): +BigQueryTestData = collections.namedtuple('BigQueryTestData', 'name flaky cpu') + + +def get_bqtest_data(limit=None): import big_query_utils bq = big_query_utils.create_big_query() query = """ SELECT filtered_test_name, + SUM(result != 'PASSED' AND result != 'SKIPPED') > 0 as flaky, + MAX(cpu_measured) as cpu FROM ( SELECT REGEXP_REPLACE(test_name, r'/\d+', '') AS filtered_test_name, - result + result, cpu_measured FROM [grpc-testing:jenkins_test_results.aggregate_results] WHERE @@ -89,15 +94,15 @@ SELECT GROUP BY filtered_test_name HAVING - SUM(result != 'PASSED' AND result != 'SKIPPED') > 0""" + flaky OR cpu > 0""" if limit: query += " limit {}".format(limit) query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query) page = bq.jobs().getQueryResults( pageToken=None, **query_job['jobReference']).execute(num_retries=3) - flake_names = [row['f'][0]['v'] for row in page['rows']] - return flake_names + test_data = [BigQueryTestData(row['f'][0]['v'], row['f'][1]['v'] == 'true', float(row['f'][2]['v'])) for row in page['rows']] + return test_data def platform_string(): @@ -141,6 +146,9 @@ class Config(object): if not flaky and shortname and shortname in flaky_tests: print('Setting %s to flaky' % shortname) flaky = True + if shortname in shortname_to_cpu: + print('Update CPU cost for %s: %f -> %f' % (shortname, cpu_cost, shortname_to_cpu[shortname])) + cpu_cost = shortname_to_cpu[shortname] return jobset.JobSpec(cmdline=self.tool_prefix + cmdline, shortname=shortname, environ=actual_environ, @@ -235,8 +243,10 @@ class CLanguage(object): self.config = config self.args = args if self.platform == 'windows': - _check_compiler(self.args.compiler, ['default', 'cmake']) + _check_compiler(self.args.compiler, ['default', 'cmake', 'cmake_vs2015', + 'cmake_vs2017']) _check_arch(self.args.arch, ['default', 'x64', 'x86']) + self._cmake_generator_option = 'Visual Studio 15 2017' if self.args.compiler == 'cmake_vs2017' else 'Visual Studio 14 2015' self._cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32' self._use_cmake = True self._make_options = [] @@ -363,11 +373,13 @@ class CLanguage(object): 'check_epollexclusive'] def make_options(self): - return self._make_options; + return self._make_options def pre_build_steps(self): if self.platform == 'windows': - return [['tools\\run_tests\\helper_scripts\\pre_build_cmake.bat', self._cmake_arch_option]] + return [['tools\\run_tests\\helper_scripts\\pre_build_cmake.bat', + self._cmake_generator_option, + self._cmake_arch_option]] elif self._use_cmake: return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']] else: @@ -1205,7 +1217,7 @@ argp.add_argument('--compiler', 'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8', 'electron1.3', 'electron1.6', 'coreclr', - 'cmake'], + 'cmake', 'cmake_vs2015', 'cmake_vs2017'], default='default', help='Selects compiler to use. Allowed values depend on the platform and language.') argp.add_argument('--iomgr_platform', @@ -1250,9 +1262,12 @@ argp.add_argument('--disable_auto_set_flakes', default=False, const=True, action args = argp.parse_args() flaky_tests = set() +shortname_to_cpu = {} if not args.disable_auto_set_flakes: try: - flaky_tests = set(get_flaky_tests()) + for test in get_bqtest_data(): + if test.flaky: flaky_tests.add(test.name) + if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu except: print("Unexpected error getting flaky tests:", sys.exc_info()[0]) @@ -1364,27 +1379,11 @@ _check_arch_option(args.arch) def make_jobspec(cfg, targets, makefile='Makefile'): if platform_string() == 'windows': - if makefile.startswith('cmake/build/'): - return [jobset.JobSpec(['cmake', '--build', '.', - '--target', '%s' % target, - '--config', _MSBUILD_CONFIG[cfg]], - cwd=os.path.dirname(makefile), - timeout_seconds=None) for target in targets] - extra_args = [] - # better do parallel compilation - # empirically /m:2 gives the best performance/price and should prevent - # overloading the windows workers. - extra_args.extend(['/m:2']) - # disable PDB generation: it's broken, and we don't need it during CI - extra_args.extend(['/p:Jenkins=true']) - return [ - jobset.JobSpec([_windows_build_bat(args.compiler), - 'vsprojects\\%s.sln' % target, - '/p:Configuration=%s' % _MSBUILD_CONFIG[cfg]] + - extra_args + - language_make_options, - shell=True, timeout_seconds=None) - for target in targets] + return [jobset.JobSpec(['cmake', '--build', '.', + '--target', '%s' % target, + '--config', _MSBUILD_CONFIG[cfg]], + cwd=os.path.dirname(makefile), + timeout_seconds=None) for target in targets] else: if targets and makefile.startswith('cmake/build/'): # With cmake, we've passed all the build configuration in the pre-build step already @@ -1528,7 +1527,7 @@ def _build_and_run( # When running on travis, we want out test runs to be as similar as possible # for reproducibility purposes. if args.travis and args.max_time <= 0: - massaged_one_run = sorted(one_run, key=lambda x: x.shortname) + massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost) else: # whereas otherwise, we want to shuffle things up to give all tests a # chance to run. diff --git a/tools/run_tests/run_tests_matrix.py b/tools/run_tests/run_tests_matrix.py index 7d26b284da..00680b02d3 100755 --- a/tools/run_tests/run_tests_matrix.py +++ b/tools/run_tests/run_tests_matrix.py @@ -209,16 +209,15 @@ def _create_portability_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS) extra_args=extra_args, inner_jobs=inner_jobs) - # portability C on Windows - for arch in ['x86', 'x64']: - test_jobs += _generate_jobs(languages=['c'], - configs=['dbg'], - platforms=['windows'], - arch=arch, - compiler='default', - labels=['portability', 'corelang'], - extra_args=extra_args, - inner_jobs=inner_jobs) + # portability C on Windows 64-bit (x86 is the default) + test_jobs += _generate_jobs(languages=['c'], + configs=['dbg'], + platforms=['windows'], + arch='x64', + compiler='default', + labels=['portability', 'corelang'], + extra_args=extra_args, + inner_jobs=inner_jobs) # portability C++ on Windows # TODO(jtattermusch): some of the tests are failing, so we force --build_only @@ -231,6 +230,17 @@ def _create_portability_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS) extra_args=extra_args + ['--build_only'], inner_jobs=inner_jobs) + # portability C and C++ on Windows using VS2017 (build only) + # TODO(jtattermusch): some of the tests are failing, so we force --build_only + test_jobs += _generate_jobs(languages=['c', 'c++'], + configs=['dbg'], + platforms=['windows'], + arch='x64', + compiler='cmake_vs2017', + labels=['portability', 'corelang'], + extra_args=extra_args + ['--build_only'], + inner_jobs=inner_jobs) + # C and C++ with the c-ares DNS resolver on Linux test_jobs += _generate_jobs(languages=['c', 'c++'], configs=['dbg'], platforms=['linux'], diff --git a/tools/run_tests/sanity/check_submodules.sh b/tools/run_tests/sanity/check_submodules.sh index b0a0c3a8eb..7c934b1ba7 100755 --- a/tools/run_tests/sanity/check_submodules.sh +++ b/tools/run_tests/sanity/check_submodules.sh @@ -33,7 +33,7 @@ cat << EOF | awk '{ print $1 }' | sort > $want_submodules ec44c6c1675c25b9827aacd08c02433cccde7780 third_party/googletest (release-1.8.0) 80a37e0782d2d702d52234b62dd4b9ec74fd2c95 third_party/protobuf (v3.4.0) cacf7f1d4e3d44d871b605da3b647f07d718623f third_party/zlib (v1.2.11) - 7691f773af79bf75a62d1863fd0f13ebf9dc51b1 third_party/cares/cares (1.12.0) + 3be1924221e1326df520f8498d704a5c4c8d0cce third_party/cares/cares (cares-1_13_0) EOF diff -u $submodules $want_submodules diff --git a/tools/ubsan_suppressions.txt b/tools/ubsan_suppressions.txt index 2dcfeea9af..6ccc306cf0 100644 --- a/tools/ubsan_suppressions.txt +++ b/tools/ubsan_suppressions.txt @@ -5,6 +5,7 @@ nonnull-attribute:rsa_blinding_get nonnull-attribute:ssl_copy_key_material alignment:CRYPTO_cbc128_encrypt alignment:CRYPTO_gcm128_encrypt +alignment:poly1305_block_copy nonnull-attribute:google::protobuf::* alignment:google::protobuf::* nonnull-attribute:_tr_stored_block |