aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/gce_setup
diff options
context:
space:
mode:
Diffstat (limited to 'tools/gce_setup')
-rw-r--r--tools/gce_setup/README.md48
-rwxr-xr-xtools/gce_setup/compute_extras.sh255
-rwxr-xr-xtools/gce_setup/grpc_docker.sh503
-rwxr-xr-xtools/gce_setup/new_grpc_docker_builder.sh153
-rwxr-xr-xtools/gce_setup/new_grpc_docker_builder_on_startup.sh70
-rwxr-xr-xtools/gce_setup/shared_startup_funcs.sh432
6 files changed, 1461 insertions, 0 deletions
diff --git a/tools/gce_setup/README.md b/tools/gce_setup/README.md
new file mode 100644
index 0000000000..253e94daa9
--- /dev/null
+++ b/tools/gce_setup/README.md
@@ -0,0 +1,48 @@
+GCE images for GRPC
+===================
+
+This directory contains a number of shell files used for setting up GCE images
+and instances for developing and testing gRPC.
+
+
+
+Goal
+----
+
+- provides a script to create a GCE image that has everything needed to try
+out gRPC on GCE.
+- provide another script that creates a new GCE instance from the latest image
+
+- additional scripts may be added in the future
+
+
+Usage
+------
+
+# Minimal usage (see the scripts themselves for options)
+
+$ create_grpc_dev_image.sh # creates a grpc GCE image
+$ ...
+$ new_grpc_dev_instance.sh # creates an instance using the latest grpc GCE image
+
+
+Requirements
+------------
+
+Install [Google Cloud SDK](https://developers.google.com/cloud/sdk/)
+
+Contents
+--------
+
+Library scripts that contain bash functions used in the other scripts:
+- shared_setup_funcs.sh # funcs used in create_grpc_dev_image and new_grpc_dev_instance
+- gcutil_extras.sh # wrappers for common tasks that us gcutil
+- build_grpc_dist.sh # funcs building the GRPC library and tests into a debian dist
+
+GCE [startup scripts](https://developers.google.com/compute/docs/howtos/startupscript)
+- *_on_startup.sh
+
+Main scripts (as of 2014/09/04)
+- create_grpc_dev_instance.sh
+- new_grpc_dev_instance.sh
+
diff --git a/tools/gce_setup/compute_extras.sh b/tools/gce_setup/compute_extras.sh
new file mode 100755
index 0000000000..e0def1a743
--- /dev/null
+++ b/tools/gce_setup/compute_extras.sh
@@ -0,0 +1,255 @@
+#!/bin/bash
+
+# Bash funcs shared that combine common gcutil actions into single commands
+
+# remove_instance removes a named instance
+#
+# remove_instance <project> <instance_name> [<zone>="us-central1-b"]
+remove_instance() {
+ local project=$1
+ [[ -n $project ]] || {
+ echo "$FUNCNAME: missing arg: project" 1>&2
+ return 1
+ }
+ local an_instance=$2
+ [[ -n $an_instance ]] || {
+ echo "$FUNCNAME: missing arg: an_instance" 1>&2
+ return 1
+ }
+ local zone=$3
+ [[ -n $zone ]] || zone="us-central1-b"
+
+ gcloud --project $project --quiet \
+ compute instances delete $an_instance --zone=$zone
+}
+
+# has_instance checks if a project contains a named instance
+#
+# has_instance <project> <instance_name>
+has_instance() {
+ local project=$1
+ [[ -n $project ]] || {
+ echo "$FUNCNAME: missing arg: project" 1>&2
+ return 1
+ }
+ local checked_instance=$2
+ [[ -n $checked_instance ]] || {
+ echo "$FUNCNAME: missing arg: checked_instance" 1>&2
+ return 1
+ }
+
+ instances=$(gcloud --project $project compute instances list \
+ | sed -e 's/ \+/ /g' | cut -d' ' -f 1)
+ for i in $instances
+ do
+ if [[ $i == $checked_instance ]]
+ then
+ return 0
+ fi
+ done
+
+ return 1
+}
+
+# find_network_ip finds the ip address of a instance if it is present in the project.
+#
+# find_network_ip <project> <instance_name>
+find_network_ip() {
+ local project=$1
+ [[ -n $project ]] || {
+ echo "$FUNCNAME: missing arg: project" 1>&2
+ return 1
+ }
+ local checked_instance=$2
+ [[ -n $checked_instance ]] || {
+ echo "$FUNCNAME: missing arg: checked_instance" 1>&2
+ return 1
+ }
+
+ has_instance $project $checked_instance || return 1
+ gcloud --project $project compute instances list \
+ | grep -e "$checked_instance\s" | sed -e 's/ \+/ /g' | cut -d' ' -f 4
+}
+
+# delete_disks deletes a bunch of disks matching a pattern
+#
+# delete_disks <project> <disk_pattern>
+delete_disks() {
+ local project=$1
+ [[ -n $project ]] || {
+ echo "$FUNCNAME: missing arg: project" 1>&2
+ return 1
+ }
+ local disk_pattern=$2
+ [[ -n $disk_pattern ]] || {
+ echo "$FUNCNAME: missing arg: disk_pattern" 1>&2
+ return 1
+ }
+
+ trash_disks=$(gcloud --project=$project compute disks list \
+ | sed -e 's/ \+/ /g' | cut -d' ' -f 1 | grep $disk_pattern)
+ [[ -n $trash_disks ]] && gcloud --project $project \
+ --quiet compute disks delete $trash_disks
+}
+
+# has_firewall checks if a project contains a named firewall
+#
+# has_firewall <project> <checked_firewall>
+has_firewall() {
+ local project=$1
+ [[ -n $project ]] || {
+ echo "$FUNCNAME: missing arg: project" 1>&2
+ return 1
+ }
+ local checked_firewall=$2
+ [[ -n $checked_firewall ]] || {
+ echo "$FUNCNAME: missing arg: checked_firewall" 1>&2
+ return 1
+ }
+
+ instances=$(gcloud --project $project compute firewall-rules list \
+ | sed -e 's/ \+/ /g' | cut -d' ' -f 1)
+ for i in $instances
+ do
+ if [[ $i == $checked_firewall ]]
+ then
+ return 0
+ fi
+ done
+
+ return 1
+}
+
+# remove_firewall removes a named firewall from a project.
+#
+# remove_firewall <project> <checked_firewall>
+remove_firewall() {
+ local project=$1
+ [[ -n $project ]] || {
+ echo "$FUNCNAME: missing arg: project" 1>&2
+ return 1
+ }
+ local a_firewall=$2
+ [[ -n $a_firewall ]] || {
+ echo "$FUNCNAME: missing arg: a_firewall" 1>&2
+ return 1
+ }
+
+ gcloud --project $project --quiet compute firewall-rules delete $a_firewall
+}
+
+# has_network checks if a project contains a named network
+#
+# has_network <project> <checked_network>
+has_network() {
+ local project=$1
+ [[ -n $project ]] || {
+ echo "$FUNCNAME: missing arg: project" 1>&2
+ return 1
+ }
+ local checked_network=$2
+ [[ -n $checked_network ]] || {
+ echo "$FUNCNAME: missing arg: checked_network" 1>&2
+ return 1
+ }
+
+ instances=$(gcloud --project $project compute networks list \
+ | sed -e 's/ \+/ /g' | cut -d' ' -f 1)
+ for i in $instances
+ do
+ if [[ $i == $checked_network ]]
+ then
+ return 0
+ fi
+ done
+
+ return 1
+}
+
+# maybe_setup_dev_network adds a network with the given name with firewalls
+# useful to development
+#
+# - All machines can accessed internally and externally over SSH (port 22)
+# - All machines can access one another other the internal network
+# - All machines can be accessed externally via port 80, 443, 8080 and 8443
+maybe_setup_dev_network() {
+ local name=$1
+ [[ -n $name ]] || {
+ echo "$FUNCNAME: missing arg: network name" 1>&2
+ return 1
+ }
+
+ local project=$2
+ [[ -n $project ]] || {
+ echo "$FUNCNAME: missing arg: project" 1>&2
+ return 1
+ }
+
+ has_network $project $name || {
+ echo "creating network '$name'" 1>&2
+ gcloud compute --project $project networks create $name || return 1
+ }
+
+ # allow instances on the network to connect to each other internally
+ has_firewall $project "$name-ssh" || {
+ echo "adding firewall '$name-ssh'" 1>&2
+ gcloud compute --project $project firewall-rules create "$name-ssh" \
+ --network $name \
+ --allow tcp:22 || return 1;
+ }
+
+ # allow instances on the network to connect to each other internally
+ has_firewall $project "$name-internal" || {
+ echo "adding firewall '$name-internal'" 1>&2
+ gcloud compute --project $project firewall-rules create "$name-internal" \
+ --network $name \
+ --source-ranges 10.0.0.0/16 --allow tcp udp icmp || return 1;
+ }
+
+ # allow instances on the network to be connected to from external ips on
+ # specific ports
+ has_firewall $project "$name-external" || {
+ echo "adding firewall '$name-external'" 1>&2
+ gcloud compute --project $project firewall-rules create "$name-external" \
+ --network $name \
+ --allow tcp:80 tcp:8080 tcp:443 tcp:8443 || return 1;
+ }
+}
+
+# maybe_remove_dev_network removes a network set up by maybe_setup_dev_network
+maybe_remove_dev_network() {
+ local name=$1
+ [[ -n $name ]] || {
+ echo "$FUNCNAME: missing arg: network name" 1>&2
+ return 1
+ }
+
+ local project=$2
+ [[ -n $project ]] || {
+ echo "$FUNCNAME: missing arg: project" 1>&2
+ return 1
+ }
+
+ has_network $project $name || {
+ echo "network $name is not present"
+ return 0
+ }
+ for i in $(gcloud compute firewall-rules list \
+ | grep "$name-" | cut -d' ' -f 1)
+ do
+ gcloud compute --quiet firewall-rules delete $i || return 1;
+ done
+ gcloud compute --quiet networks delete $name
+}
+
+# find_named_ip finds the external ip address for a given name.
+#
+# find_named_ip <named-ip-address>
+find_named_ip() {
+ local name=$1
+ [[ -n $name ]] || { echo "$FUNCNAME: missing arg: name" 1>&2; return 1; }
+ [[ $name == 'none' ]] && return 0;
+
+ gcloud compute addresses list | sed -e 's/ \+/ /g' \
+ | grep $name | cut -d' ' -f 3
+}
diff --git a/tools/gce_setup/grpc_docker.sh b/tools/gce_setup/grpc_docker.sh
new file mode 100755
index 0000000000..094b97bf3c
--- /dev/null
+++ b/tools/gce_setup/grpc_docker.sh
@@ -0,0 +1,503 @@
+#!/bin/bash
+#
+# Contains funcs that help maintain GRPC's Docker images.
+#
+# Most funcs rely on the special-purpose GCE instance to build the docker
+# instances and store them in a GCS-backed docker repository.
+#
+# The GCE instance
+# - should be based on the container-optimized GCE instance
+# [https://cloud.google.com/compute/docs/containers].
+# - should be running google/docker-registry image
+# [https://registry.hub.docker.com/u/google/docker-registry/], so that images
+# can be saved to GCS
+# - should have the GCE support scripts from this directory install on it.
+#
+# The expected workflow is
+# - start a grpc docker GCE instance
+# * on startup, some of the docker images will be regenerated automatically
+# - used grpc_update_image to update images via that instance
+
+# Pushes a dockerfile dir to cloud storage.
+#
+# dockerfile is expected to the parent directory to a nunber of directoies each
+# of which specifies a Dockerfiles.
+#
+# grpc_push_dockerfiles path/to/docker_parent_dir gs://bucket/path/to/gcs/parent
+grpc_push_dockerfiles() {
+ local docker_dir=$1
+ [[ -n $docker_dir ]] || {
+ echo "$FUNCNAME: missing arg: docker_dir" 1>&2
+ return 1
+ }
+
+ local gs_root_uri=$2
+ [[ -n $gs_root_uri ]] || {
+ echo "$FUNCNAME: missing arg: gs_root_uri" 1>&2
+ return 1
+ }
+
+ find $docker_dir -name '*~' -o -name '#*#' -exec rm -fv {} \; || {
+ echo "$FUNCNAME: failed: cleanup of tmp files in $docker_dir" 1>&2
+ return 1
+ }
+ gsutil cp -R $docker_dir $gs_root_uri || {
+ echo "$FUNCNAME: failed: cp $docker_dir -> $gs_root_uri" 1>&2
+ return 1
+ }
+}
+
+# Adds the user to docker group on a GCE instance, and restarts the docker
+# daemon
+grpc_add_docker_user() {
+ local host=$1
+ [[ -n $host ]] || {
+ echo "$FUNCNAME: missing arg: host" 1>&2
+ return 1
+ }
+
+ local project=$2
+ local project_opt=''
+ [[ -n $project ]] && project_opt=" --project $project"
+
+ local zone=$3
+ local zone_opt=''
+ [[ -n $zone ]] && zone_opt=" --zone $zone"
+
+
+ local func_lib="/var/local/startup_scripts/shared_startup_funcs.sh"
+ local ssh_cmd="source $func_lib && grpc_docker_add_docker_group"
+ gcloud compute $project_opt ssh $zone_opt $host --command "$ssh_cmd"
+}
+
+# Updates a docker image specified in a local dockerfile via the docker
+# container GCE instance.
+#
+# the docker container GCE instance
+# - should have been setup using ./new_grpc_docker_instance
+# - so will have /var/local/startup_scripts/shared_startup_funcs.sh, a copy of
+# ./shared_startup_funcs.sh
+#
+# grpc_update_image gs://bucket/path/to/dockerfile parent \.
+# image_label path/to/docker_dir docker_gce_instance [project] [zone]
+grpc_update_image() {
+ local gs_root_uri=$1
+ [[ -n $gs_root_uri ]] || {
+ echo "$FUNCNAME: missing arg: gs_root_uri" 1>&2
+ return 1
+ }
+
+ local image_label=$2
+ [[ -n $image_label ]] || {
+ echo "$FUNCNAME: missing arg: host" 1>&2
+ return 1
+ }
+
+ local docker_dir=$3
+ [[ -n $docker_dir ]] || {
+ echo "$FUNCNAME: missing arg: docker_dir" 1>&2
+ return 1
+ }
+ [[ -d $docker_dir ]] || {
+ echo "could find directory $docker_dir" 1>&2
+ return 1
+ }
+ local docker_parent_dir=$(dirname $docker_dir)
+ local gce_docker_dir="/var/local/dockerfile/$(basename $docker_dir)"
+
+ local host=$4
+ [[ -n $host ]] || {
+ echo "$FUNCNAME: missing arg: host" 1>&2
+ return 1
+ }
+
+ local project=$5
+ local project_opt=''
+ [[ -n $project ]] && project_opt=" --project $project"
+
+ local zone=$6
+ local zone_opt=''
+ [[ -n $zone ]] && zone_opt=" --zone $zone"
+
+ local func_lib="/var/local/startup_scripts/shared_startup_funcs.sh"
+ local ssh_cmd="source $func_lib"
+ local ssh_cmd+=" && grpc_dockerfile_refresh $image_label $gce_docker_dir"
+
+ grpc_push_dockerfiles $docker_parent_dir $gs_root_uri || return 1
+ gcloud compute $project_opt ssh $zone_opt $host --command "$ssh_cmd"
+}
+
+# gce_has_instance checks if a project contains a named instance
+#
+# gce_has_instance <project> <instance_name>
+gce_has_instance() {
+ local project=$1
+ [[ -n $project ]] || { echo "$FUNCNAME: missing arg: project" 1>&2; return 1; }
+ local checked_instance=$2
+ [[ -n $checked_instance ]] || {
+ echo "$FUNCNAME: missing arg: checked_instance" 1>&2
+ return 1
+ }
+
+ instances=$(gcloud --project $project compute instances list \
+ | sed -e 's/ \+/ /g' | cut -d' ' -f 1)
+ for i in $instances
+ do
+ if [[ $i == $checked_instance ]]
+ then
+ return 0
+ fi
+ done
+
+ echo "instance '$checked_instance' not found in compute project $project" 1>&2
+ return 1
+}
+
+# gce_find_internal_ip finds the ip address of a instance if it is present in
+# the project.
+#
+# gce_find_internal_ip <project> <instance_name>
+gce_find_internal_ip() {
+ local project=$1
+ [[ -n $project ]] || { echo "$FUNCNAME: missing arg: project" 1>&2; return 1; }
+ local checked_instance=$2
+ [[ -n $checked_instance ]] || {
+ echo "$FUNCNAME: missing arg: checked_instance" 1>&2
+ return 1
+ }
+
+ gce_has_instance $project $checked_instance || return 1
+ gcloud --project $project compute instances list \
+ | grep -e "$checked_instance\s" \
+ | sed -e 's/ \+/ /g' | cut -d' ' -f 4
+}
+
+# sets the vars grpc_zone and grpc_project
+#
+# to be used in funcs that want to set the zone and project and potential
+# override them with
+#
+# grpc_zone
+# - is set to the value gcloud config value for compute/zone if that's present
+# - it defaults to asia-east1-a
+# - it can be overridden by passing -z <other value>
+#
+# grpc_project
+# - is set to the value gcloud config value for project if that's present
+# - it defaults to stoked-keyword-656 (the grpc cloud testing project)
+# - it can be overridden by passing -p <other value>
+grpc_set_project_and_zone() {
+ dry_run=0
+ grpc_zone=$(gcloud config list compute/zone --format text \
+ | sed -e 's/ \+/ /g' | cut -d' ' -f 2)
+ # pick a known zone as a default
+ [[ $grpc_zone == 'None' ]] && grpc_zone='asia-east1-a'
+
+ grpc_project=$(gcloud config list project --format text \
+ | sed -e 's/ \+/ /g' | cut -d' ' -f 2)
+ # pick an known zone as a default
+ [[ $grpc_project == 'None' ]] && grpc_project='stoked-keyword-656'
+
+ # see if -p or -z is used to override the the project or zone
+ local OPTIND
+ local OPTARG
+ local arg_func
+ while getopts :p:z:f:n name
+ do
+ case $name in
+ f) declare -F $OPTARG >> /dev/null && {
+ arg_func=$OPTARG;
+ } || {
+ echo "-f: arg_func value: $OPTARG is not defined"
+ return 2
+ }
+ ;;
+ n) dry_run=1 ;;
+ p) grpc_project=$OPTARG ;;
+ z) grpc_zone=$OPTARG ;;
+ :) [[ $OPT_ARG == 'f' ]] && {
+ echo "-f: arg_func provided" 1>&2
+ return 2
+ } || {
+ # ignore -p or -z without args, just use the defaults
+ continue
+ }
+ ;;
+ \?) echo "-$OPTARG: unknown flag; it's ignored" 1>&2; continue ;;
+ esac
+ done
+ shift $((OPTIND-1))
+ [[ -n $arg_func ]] && $arg_func "$@"
+}
+
+# construct the flags to be passed to the binary running the test client
+#
+# call-seq:
+# flags=$(grpc_interop_test_flags <server_ip> <server_port> <test_case>)
+# [[ -n flags ]] || return 1
+grpc_interop_test_flags() {
+ [[ -n $1 ]] && { # server_ip
+ local server_ip=$1
+ shift
+ } || {
+ echo "$FUNCNAME: missing arg: server_ip" 1>&2
+ return 1
+ }
+ [[ -n $1 ]] && { # port
+ local port=$1
+ shift
+ } || {
+ echo "$FUNCNAME: missing arg: port" 1>&2
+ return 1
+ }
+ [[ -n $1 ]] && { # test_case
+ local test_case=$1
+ shift
+ } || {
+ echo "$FUNCNAME: missing arg: test_case" 1>&2
+ return 1
+ }
+ echo "--server_host=$server_ip --server_port=$port --test_case=$test_case"
+}
+
+# checks the positional args and assigns them to variables visible in the caller
+#
+# these are the positional args passed to grpc_interop_test after option flags
+# are removed
+#
+# five args are expected, in order
+# - test_case
+# - host <the gce docker instance on which to run the test>
+# - client to run
+# - server_host <the gce docker instance on which the test server is running>
+# - server type
+grpc_interop_test_args() {
+ [[ -n $1 ]] && { # test_case
+ test_case=$1
+ shift
+ } || {
+ echo "$FUNCNAME: missing arg: test_case" 1>&2
+ return 1
+ }
+
+ [[ -n $1 ]] && { # host
+ host=$1
+ shift
+ } || {
+ echo "$FUNCNAME: missing arg: host" 1>&2
+ return 1
+ }
+
+ [[ -n $1 ]] && { # client_type
+ case $1 in
+ cxx|go|java|nodejs|php|python|ruby)
+ grpc_gen_test_cmd="grpc_interop_gen_$1_cmd"
+ declare -F $grpc_gen_test_cmd >> /dev/null || {
+ echo "-f: test_func for $1 => $grpc_gen_test_cmd is not defined" 1>&2
+ return 2
+ }
+ shift
+ ;;
+ *)
+ echo "bad client_type: $1" 1>&2
+ return 1
+ ;;
+ esac
+ } || {
+ echo "$FUNCNAME: missing arg: client_type" 1>&2
+ return 1
+ }
+
+ [[ -n $1 ]] && { # grpc_server
+ grpc_server=$1
+ shift
+ } || {
+ echo "$FUNCNAME: missing arg: grpc_server" 1>&2
+ return 1
+ }
+
+ [[ -n $1 ]] && { # server_type
+ case $1 in
+ cxx) grpc_port=8010 ;;
+ go) grpc_port=8020 ;;
+ java) grpc_port=8030 ;;
+ nodejs) grpc_port=8040 ;;
+ python) grpc_port=8050 ;;
+ ruby) grpc_port=8060 ;;
+ *) echo "bad server_type: $1" 1>&2; return 1 ;;
+ esac
+ shift
+ } || {
+ echo "$FUNCNAME: missing arg: server_type" 1>&2
+ return 1
+ }
+}
+
+grpc_launch_server_args() {
+ [[ -n $1 ]] && { # host
+ host=$1
+ shift
+ } || {
+ echo "$FUNCNAME: missing arg: host" 1>&2
+ return 1
+ }
+
+ [[ -n $1 ]] && { # server_type
+ case $1 in
+ cxx) grpc_port=8010 ;;
+ go) grpc_port=8020 ;;
+ java) grpc_port=8030 ;;
+ nodejs) grpc_port=8040 ;;
+ python) grpc_port=8050 ;;
+ ruby) grpc_port=8060 ;;
+ *) echo "bad server_type: $1" 1>&2; return 1 ;;
+ esac
+ docker_label="grpc/$1"
+ docker_name="grpc_interop_$1"
+ shift
+ } || {
+ echo "$FUNCNAME: missing arg: server_type" 1>&2
+ return 1
+ }
+}
+
+# Launches a server on a docker instance.
+#
+# call-seq;
+# grpc_launch_server <server_name> <server_type>
+#
+# Runs the server_type on a GCE instance running docker with server_name
+grpc_launch_server() {
+ # declare vars local so that they don't pollute the shell environment
+ # where they this func is used.
+ local grpc_zone grpc_project dry_run # set by grpc_set_project_and_zone
+ # set by grpc_launch_server_args
+ local docker_label docker_name host grpc_port
+
+ # set the project zone and check that all necessary args are provided
+ grpc_set_project_and_zone -f grpc_launch_server_args "$@" || return 1
+ gce_has_instance $grpc_project $host || return 1;
+
+ cmd="sudo docker run -d --name $docker_name"
+ cmd+=" -p $grpc_port:$grpc_port $docker_label"
+ local project_opt="--project $grpc_project"
+ local zone_opt="--zone $grpc_zone"
+ local ssh_cmd="bash -l -c \"$cmd\""
+ echo "will run:"
+ echo " $ssh_cmd"
+ echo "on $host"
+ [[ $dry_run == 1 ]] && return 0 # don't run the command on a dry run
+ gcloud compute $project_opt ssh $zone_opt $host --command "$cmd"
+}
+
+# Runs a test command on a docker instance.
+#
+# call-seq:
+# grpc_interop_test <test_name> <host> <client_type> \
+# <server_host> <server_type>
+#
+# N.B: server_name defaults to 'grpc-docker-server'
+#
+# requirements:
+# host is a GCE instance running docker with access to the gRPC docker images
+# server_name is a GCE docker instance running the gRPC server in docker
+# test_name is one of the named gRPC tests [http://go/grpc_interop_tests]
+# client_type is one of [cxx,go,java,php,python,ruby]
+# server_type is one of [cxx,go,java,python,ruby]
+#
+# it assumes:
+# that each grpc-imp has a docker image named grpc/<imp>, e.g, grpc/java
+# a test is run using $ docker run 'path/to/interop_test_bin --flags'
+# the required images are available on <host>
+#
+# server_name [default:grpc-docker-server] is an instance that runs the
+# <server_type> server on the standard test port for the <server_type>
+#
+# each server_type runs it tests on a standard test port as follows:
+# cxx: 8010
+# go: 8020
+# java: 8030
+# nodejs: 8040
+# python: 8050
+# ruby: 8060
+#
+# each client_type should have an associated bash func:
+# grpc_interop_gen_<client_type>_cmd
+# the func provides the dockerized commmand for running client_type's test.
+# If no such func is available, tests for that client type cannot be run.
+#
+# the flags for running a test are the same:
+#
+# --server_host=<svr_addr> --server_port=<svr_port> --test_case=<...>
+grpc_interop_test() {
+ # declare vars local so that they don't pollute the shell environment
+ # where they this func is used.
+
+ local grpc_zone grpc_project dry_run # set by grpc_set_project_and_zone
+ # grpc_interop_test_args
+ local test_case host grpc_gen_test_cmd grpc_server grpc_port
+
+ # set the project zone and check that all necessary args are provided
+ grpc_set_project_and_zone -f grpc_interop_test_args "$@" || return 1
+ gce_has_instance $grpc_project $host || return 1;
+
+ local addr=$(gce_find_internal_ip $grpc_project $grpc_server)
+ [[ -n $addr ]] || return 1
+ local flags=$(grpc_interop_test_flags $addr $grpc_port $test_case)
+ [[ -n $flags ]] || return 1
+ cmd=$($grpc_gen_test_cmd $flags)
+ [[ -n $cmd ]] || return 1
+
+ local project_opt="--project $grpc_project"
+ local zone_opt="--zone $grpc_zone"
+ local ssh_cmd="bash -l -c \"$cmd\""
+ echo "will run:"
+ echo " $ssh_cmd"
+ echo "on $host"
+ [[ $dry_run == 1 ]] && return 0 # don't run the command on a dry run
+ gcloud compute $project_opt ssh $zone_opt $host --command "$cmd"
+}
+
+# constructs the full dockerized ruby interop test cmd.
+#
+# call-seq:
+# flags= .... # generic flags to include the command
+# cmd=$($grpc_gen_test_cmd $flags)
+grpc_interop_gen_ruby_cmd() {
+ local cmd_prefix="sudo docker run grpc/ruby bin/bash -l -c"
+ local test_script="/var/local/git/grpc/src/ruby/bin/interop/interop_client.rb"
+ local the_cmd="$cmd_prefix 'ruby $test_script $@'"
+ echo $the_cmd
+}
+
+# constructs the full dockerized java interop test cmd.
+#
+# call-seq:
+# flags= .... # generic flags to include the command
+# cmd=$($grpc_gen_test_cmd $flags)
+grpc_interop_gen_java_cmd() {
+ local cmd_prefix="sudo docker run grpc/java";
+ local test_script="/var/local/git/grpc-java/run-test-client.sh";
+ local test_script+=" --transport=NETTY_TLS --grpc_version=2"
+ local the_cmd="$cmd_prefix $test_script $@";
+ echo $the_cmd
+}
+
+# constructs the full dockerized php interop test cmd.
+#
+# TODO(mlumish): update this to use the script once that's on git-on-borg
+#
+# call-seq:
+# flags= .... # generic flags to include the command
+# cmd=$($grpc_gen_test_cmd $flags)
+grpc_interop_gen_php_cmd() {
+ local cmd_prefix="sudo docker run grpc/php bin/bash -l -c";
+ local test_script="cd /var/local/git/grpc/src/php/tests/interop";
+ local test_script+=" && php -d extension_dir=../../ext/grpc/modules/";
+ local test_script+=" -d extension=grpc.so interop_client.php";
+ local the_cmd="$cmd_prefix '$test_script $@ 1>&2'";
+ echo $the_cmd
+}
+
+
+# TODO(grpc-team): add grpc_interop_gen_xxx_cmd for python|cxx|nodejs|go
diff --git a/tools/gce_setup/new_grpc_docker_builder.sh b/tools/gce_setup/new_grpc_docker_builder.sh
new file mode 100755
index 0000000000..9a3988f343
--- /dev/null
+++ b/tools/gce_setup/new_grpc_docker_builder.sh
@@ -0,0 +1,153 @@
+#!/bin/bash
+
+# Triggers the build of a GCE 'grpc-docker' instance.
+#
+# Usage:
+# /path/to/new_grpc_docker_builder.sh \
+# [--project <cloud-project-id> | -p<cloud-project-id>] \
+# [--instance <instance-to-create> | -i<instance-to-create>] \
+# [--address <named_cloud_static_ip> | -a<named_cloud_static_ip>]
+#
+# To run a new docker builder instance.
+# $ /path/to/new_grpc_docker_builder.sh -pmy-project -imy-instance -amy-ip
+#
+# See main() for the full list of flags
+
+function this_dir() {
+ SCRIPT_PATH="${BASH_SOURCE[0]}";
+ if ([ -h "${SCRIPT_PATH}" ]) then
+ while([ -h "${SCRIPT_PATH}" ]) do SCRIPT_PATH=`readlink "${SCRIPT_PATH}"`; done
+ fi
+ pushd . > /dev/null
+ cd `dirname ${SCRIPT_PATH}` > /dev/null
+ SCRIPT_PATH=`pwd`;
+ popd > /dev/null
+ echo $SCRIPT_PATH
+}
+
+source $(this_dir)/compute_extras.sh
+source $(this_dir)/grpc_docker.sh
+
+cp_startup_script() {
+ local script_dir=$1
+ [[ -n $script_dir ]] || { echo "missing arg: script_dir" 1>&2; return 1; }
+
+ local gs_script_root=$2
+ [[ -n $gs_script_root ]] || { echo "missing arg: gs_script_root" 1>&2; return 1; }
+
+ local script_path=$3
+ [[ -n $script_path ]] || { echo "missing arg: script_name" 1>&2; return 1; }
+
+ local startup_script=$script_dir/$script_path
+ local gs_startup_uri=$gs_script_root/$script_path
+ gsutil cp $startup_script $gs_startup_uri
+}
+
+# add_instance adds a generic instance that runs
+# new_grpc_docker_builder_on_startup.sh on startup
+add_instance() {
+ local project=$1
+ [[ -n $project ]] || { echo "missing arg: project" 1>&2; return 1; }
+ local gs_admin_root=$2
+ [[ -n $gs_admin_root ]] || { echo "missing arg: gs_admin_root" 1>&2; return 1; }
+ local instance=$3
+ [[ -n $instance ]] || { echo "missing arg: instance" 1>&2; return 1; }
+ local zone=$4
+ [[ -n $zone ]] || { echo "missing arg: zone" 1>&2; return 1; }
+ local address=$5
+ [[ -n $address ]] || { echo "missing arg: address" 1>&2; return 1; }
+
+ local script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+ local gs_script_root="$gs_admin_root/startup"
+
+ local on_startup=new_grpc_docker_builder_on_startup.sh
+ local gs_on_startup=$gs_script_root/$on_startup
+ cp_startup_script $script_dir $gs_script_root $on_startup || {
+ echo "Could not save script to $gs_on_startup" 1>&2
+ return 1
+ }
+ startup_md="startup-script-url=$gs_on_startup"
+
+ local shared_startup=shared_startup_funcs.sh
+ local gs_shared_startup=$gs_script_root/$shared_startup
+ cp_startup_script $script_dir $gs_script_root $shared_startup || {
+ echo "Could not save script to $gs_shared_startup" 1>&2
+ return 1
+ }
+ startup_md+=" shared_startup_script_url=$gs_shared_startup"
+
+ local docker_dir=$(this_dir)/../dockerfile
+ grpc_push_dockerfiles $docker_dir $gs_admin_root || return 1;
+ startup_md+=" gs_dockerfile_root=$gs_admin_root/dockerfile"
+ startup_md+=" gs_docker_reg=$gs_admin_root/docker_images"
+
+ local address_flag=""
+ local the_address=$(find_named_ip $address)
+ [[ -n $the_address ]] && address_flag="--address $the_address"
+ local the_image='container-vm-v20140925'
+ local scopes='compute-rw storage-full'
+ scopes+=' https://www.googleapis.com/auth/gerritcodereview'
+ gcloud --project $project compute instances create $instance \
+ $address_flag \
+ --image $the_image \
+ --image-project google-containers \
+ --metadata $startup_md \
+ --machine-type='n1-standard-1' \
+ --scopes $scopes \
+ --tags grpc testing \
+ --zone $zone \
+ --boot-disk-size 500GB
+}
+
+main() {
+ local INSTANCE_NAME="grpc-docker-builder"
+ local PROJECT="stoked-keyword-656"
+ local GS_ADMIN_ROOT="gs://tmp-grpc-dev/admin"
+ local ZONE='asia-east1-a'
+ local ADDRESS_NAME='grpc-php-dev-static-1' # use 'none' if no static ip is needed
+
+ # Parse the options
+ opts=`getopt -o a::p::g::i::z:: --long address_name::,project::,gs_admin_root::,instance_name::,zone:: -n $0 -- "$@"`
+ eval set -- "$opts"
+ while true ; do
+ case "$1" in
+ -p|--project)
+ case "$2" in
+ "") shift 2 ;;
+ *) PROJECT=$2; shift 2 ;;
+ esac ;;
+ -a|--address_name)
+ case $2 in
+ "") shift 2 ;;
+ *) ADDRESS_NAME=$2; shift 2 ;;
+ esac ;;
+ -g|--gs_admin_root)
+ case "$2" in
+ "") shift 2 ;;
+ *) GS_ADMIN_ROOT=$2; shift 2 ;;
+ esac ;;
+ -i|--instance_name)
+ case "$2" in
+ "") shift 2 ;;
+ *) INSTANCE_NAME=$2; shift 2 ;;
+ esac ;;
+ -z|--zone)
+ case "$2" in
+ "") shift 2 ;;
+ *) ZONE=$2; shift 2 ;;
+ esac ;;
+ --) shift ; break ;;
+ *) echo "Internal error!" ; exit 1 ;;
+ esac
+ done
+
+ # verify that the instance does not currently exist
+ has_instance $PROJECT $INSTANCE_NAME && remove_instance $PROJECT $INSTANCE_NAME $ZONE
+ has_instance $PROJECT $INSTANCE_NAME && { echo "$INSTANCE_NAME already exists" 1>&2; return 1; }
+
+ # N.B the quotes around are necessary to allow cmds with spaces
+ add_instance $PROJECT $GS_ADMIN_ROOT $INSTANCE_NAME $ZONE $ADDRESS_NAME
+}
+
+set -x
+main "$@"
diff --git a/tools/gce_setup/new_grpc_docker_builder_on_startup.sh b/tools/gce_setup/new_grpc_docker_builder_on_startup.sh
new file mode 100755
index 0000000000..87e8aac1e7
--- /dev/null
+++ b/tools/gce_setup/new_grpc_docker_builder_on_startup.sh
@@ -0,0 +1,70 @@
+#!/bin/bash
+# Startup script that initializes a grpc-dev GCE machine.
+#
+# A grpc-docker GCE machine is based on docker container image.
+#
+# On startup, it copies the grpc dockerfiles to a local directory, and update its address
+# so that the docker containers within it have git-on-borg-access.
+
+# _load_metadata curls a metadata url
+_load_metadata() {
+ local metadata_root=http://metadata/computeMetadata/v1
+ local uri=$1
+ [[ -n $uri ]] || { echo "missing arg: uri" >&2; return 1; }
+
+ if [[ $uri =~ ^'attributes/' ]]
+ then
+ for a in $(curl -H "X-Google-Metadata-Request: True" $metadata_root/instance/attributes/)
+ do
+ [[ $uri =~ "/$a"$ ]] && { curl $metadata_root/instance/$uri -H "X-Google-Metadata-Request: True"; return; }
+ done
+ fi
+
+ # if the uri is a full request uri
+ [[ $uri =~ ^$metadata_root ]] && { curl $uri -H "X-Google-Metadata-Request: True"; return; }
+}
+
+_source_gs_script() {
+ local script_attr=$1
+ [[ -n $script_attr ]] || { echo "missing arg: script_attr" >&2; return 1; }
+
+ local gs_uri=$(_load_metadata "attributes/$script_attr")
+ [[ -n $gs_uri ]] || { echo "missing metadata: $script_attr" >&2; return 1; }
+
+ local out_dir='/var/local/startup_scripts'
+ local script_path=$out_dir/$(basename $gs_uri)
+ mkdir -p $out_dir
+ gsutil cp $gs_uri $script_path || {
+ echo "could not cp $gs_uri -> $script_path"
+ return 1
+ }
+ chmod a+rwx $out_dir $script_path
+ source $script_path
+}
+
+main() {
+ local script_attr='shared_startup_script_url'
+ _source_gs_script $script_attr || {
+ echo "halting, script 'attributes/$script_attr' could not be sourced"
+ return 1
+ }
+ grpc_dockerfile_pull
+ chmod -R a+rw /var/local/dockerfile
+
+ # Install git and emacs
+ apt-get update && apt-get install -y git emacs || return 1
+
+ # Enable access to git repos on git-on-borg
+ local git_root='/var/local/git'
+ install_gob_daemon $git_root/gerrit-gcompute-tools || return 1
+
+ # Startup the docker registry
+ grpc_docker_launch_registry && grpc_docker_pull_known
+
+ # Add a sentinel file to indicate that startup has completed.
+ local sentinel_file=/var/log/GRPC_DOCKER_IS_UP
+ touch $sentinel_file
+}
+
+set -x
+main "$@"
diff --git a/tools/gce_setup/shared_startup_funcs.sh b/tools/gce_setup/shared_startup_funcs.sh
new file mode 100755
index 0000000000..8bd62e213e
--- /dev/null
+++ b/tools/gce_setup/shared_startup_funcs.sh
@@ -0,0 +1,432 @@
+#!/bin/bash
+# Contains common funcs shared by instance startup scripts.
+#
+# The funcs assume that the code is being run on a GCE instance during instance
+# startup.
+
+function die() {
+ local msg="$0 failed"
+ if [[ -n $1 ]]
+ then
+ msg=$1
+ fi
+ echo $msg
+ exit 1
+}
+
+# umount_by_disk_id umounts a disk given its disk_id.
+umount_by_disk_id() {
+ local disk_id=$1
+ [[ -n $disk_id ]] || { echo "missing arg: disk_id" >&2; return 1; }
+
+ # Unmount the disk first
+ sudo umount /dev/disk/by-id/google-$disk_id || { echo "Could not unmount /mnt/disk-by-id/google-$disk_id" >&2; return 1; }
+}
+
+# check_metadata confirms that the result of curling a metadata url does not
+# contain 'Error 404'
+check_metadata() {
+ local curl_output=$1
+ [[ -n $curl_output ]] || { echo "missing arg: curl_output" >&2; return 1; }
+
+ if [[ $curl_output =~ "Error 404" ]]
+ then
+ return 1
+ fi
+
+ return 0
+}
+
+# name_this_instance determines the current instance name.
+name_this_instance() {
+ local the_full_host_name
+ the_full_host_name=$(load_metadata "http://metadata/computeMetadata/v1/instance/hostname")
+ check_metadata $the_full_host_name || return 1
+ local the_instance
+ the_instance=$(echo $the_full_host_name | cut -d . -f 1 -) || {
+ echo "could not get the instance name from $the_full_host_name" >&2
+ return 1
+ }
+
+ echo $the_instance
+}
+
+# delete_this_instance deletes this GCE instance. (it will shutdown as a result
+# of running this cmd)
+delete_this_instance() {
+ local the_full_zone
+ the_full_zone=$(load_metadata "http://metadata/computeMetadata/v1/instance/zone")
+ check_metadata $the_full_zone || return 1
+ local the_zone
+ the_zone=$(echo $the_full_zone | cut -d / -f 4 -) || { echo "could not get zone from $the_full_zone" >&2; return 1; }
+
+ local the_full_host_name
+ the_full_host_name=$(load_metadata "http://metadata/computeMetadata/v1/instance/hostname")
+ check_metadata $the_full_host_name || return 1
+ local the_instance
+ the_instance=$(echo $the_full_host_name | cut -d . -f 1 -) || { echo "could not get zone from $the_full_host_name" >&2; return 1; }
+
+ echo "using gcloud compute instances delete to remove: ${the_instance}"
+ gcloud compute --quiet instances delete --delete-disks boot --zone $the_zone $the_instance
+}
+
+# save_image_info updates the 'images' release info file on GCS.
+save_image_info() {
+ local image_id=$1
+ [[ -n $image_id ]] || { echo "missing arg: image_id" >&2; return 1; }
+
+ local repo_gs_uri=$2
+ [[ -n $repo_gs_uri ]] || { echo "missing arg: repo_gs_uri" >&2; return 1; }
+
+ local sentinel="/tmp/$image_id.txt"
+ echo $image_id > $sentinel || { echo "could not create /tmp/$image_id.txt" >&2; return 1; }
+
+ local gs_sentinel="$repo_gs_uri/images/info/LATEST"
+ gsutil cp $sentinel $gs_sentinel || { echo "failed to update $gs_sentinel" >&2; return 1; }
+}
+
+# creates an image, getting the name and cloud storage uri from the supplied
+# instance metadata.
+create_image() {
+ local image_id
+ image_id=$(load_metadata "attributes/image_id")
+ [[ -n $image_id ]] || { echo "missing metadata: image_id" >&2; return 1; }
+
+ local repo_gs_uri
+ repo_gs_uri=$(load_metadata "attributes/repo_gs_uri")
+ [[ -n $repo_gs_uri ]] || { echo "missing metadata: repo_gs_uri" >&2; return 1; }
+
+ local the_project
+ the_project=$(load_metadata "http://metadata/computeMetadata/v1/project/project-id")
+ check_metadata $the_project || return 1
+
+ sudo gcimagebundle -d /dev/sda -o /tmp/ --log_file=/tmp/$image_id.log || { echo "image creation failed" >&2; return 1; }
+ image_path=$(ls /tmp/*.tar.gz)
+ image_gs_uri="$repo_gs_uri/images/$image_id.tar.gz"
+
+ # copy the image to cloud storage
+ gsutil cp $image_path $image_gs_uri || { echo "failed to save image to $repo_gs_uri/$image_path " >&2; return 1; }
+ gcloud compute --project=$the_project images create \
+ $image_id --source-uri $image_gs_uri || { echo "failed to register $image_gs_uri as $image_id" >&2; return 1; }
+
+ save_image_info $image_id $repo_gs_uri
+}
+
+# load_metadata curls a metadata url
+load_metadata() {
+ local metadata_root=http://metadata/computeMetadata/v1
+ local uri=$1
+ [[ -n $uri ]] || { echo "missing arg: uri" >&2; return 1; }
+
+ if [[ $uri =~ ^'attributes/' ]]
+ then
+ for a in $(curl -H "X-Google-Metadata-Request: True" $metadata_root/instance/attributes/)
+ do
+ [[ $uri =~ "/$a"$ ]] && { curl $metadata_root/instance/$uri -H "X-Google-Metadata-Request: True"; return; }
+ done
+ fi
+
+ # if the uri is a full request uri
+ [[ $uri =~ ^$metadata_root ]] && { curl $uri -H "X-Google-Metadata-Request: True"; return; }
+}
+
+install_python_module() {
+ local mod=$1
+ [[ -z $mod ]] && { echo "missing arg: mod" >&2; return 1; }
+
+ echo '------------------------------------'
+ echo 'Installing: $mod'
+ echo '------------------------------------'
+ echo
+ install_with_apt_get gcc python-dev python-setuptools
+ sudo apt-get install -y gcc python-dev python-setuptools
+ sudo easy_install -U pip
+ sudo pip uninstall -y $mod
+ sudo pip install -U $mod
+}
+
+install_with_apt_get() {
+ local pkgs=$@
+ echo '---------------------------'
+ echo 'Installing: $pkgs'
+ echo '---------------------------'
+ echo
+ sudo apt-get install -y $pkgs
+}
+
+# pulls code from a git repo @HEAD to a local directory, removing the current version if present.
+setup_git_dir() {
+ local git_http_repo=$1
+ [[ -n $git_http_repo ]] || { echo "missing arg: git_http_repo" >&2; return 1; }
+
+ local git_dir=$2
+ [[ -n $git_dir ]] || { echo "missing arg: git_dir" >&2; return 1; }
+
+ if [[ -e $git_dir ]]
+ then
+ rm -fR $git_dir || { echo "could not remove existing repo at $git_dir" >&2; return 1; }
+ fi
+
+ local git_user
+ git_user=$(load_metadata "http://metadata/computeMetadata/v1/instance/service-accounts/default/email")
+ check_metadata $git_user || return 1
+ urlsafe_git_user=$(echo $git_user | sed -e s/@/%40/g) || return 1
+
+ local access_token=$(load_metadata "http://metadata/computeMetadata/v1/instance/service-accounts/default/token?alt=text")
+ check_metadata $access_token || return 1
+ local git_pwd=$(echo $access_token | cut -d' ' -f 2) || return 1
+
+ git clone https://$urlsafe_git_user:$git_pwd@$git_http_repo $git_dir
+}
+
+# network_copy copies a file to another gce instance.
+network_copy() {
+ local the_node=$1
+ [[ -n $the_node ]] || { echo "missing arg: the_node" >&2; return 1; }
+
+ local src=$2
+ [[ -n $src ]] || { echo "missing arg: src" >&2; return 1; }
+
+ local dst=$3
+ [[ -n $dst ]] || { echo "missing arg: dst" >&2; return 1; }
+
+ gcloud compute copy-files --zone=us-central1-b $src $node:$dst
+}
+
+# gcs_copy copies a file to a location beneath a root gcs object path.
+gcs_copy() {
+ local gce_root=$1
+ [[ -n $gce_root ]] || { echo "missing arg: gce_root" >&2; return 1; }
+
+ local src=$2
+ [[ -n $src ]] || { echo "missing arg: src" >&2; return 1; }
+
+ local dst=$3
+ [[ -n $dst ]] || { echo "missing arg: dst" >&2; return 1; }
+
+ gsutil cp $src $gce_root/$dst
+}
+
+# find_named_ip finds the external ip address for a given name.
+find_named_ip() {
+ local name=$1
+ [[ -n $name ]] || { echo "missing arg: name" >&2; return 1; }
+
+ gcloud compute addresses list | sed -e 's/ \+/ /g' | grep $name | cut -d' ' -f 3
+}
+
+# update_address_to updates this instances ip address to the reserved ip address with a given name
+update_address_to() {
+ local name=$1
+ [[ -n $name ]] || { echo "missing arg: name" >&2; return 1; }
+
+ named_ip=$(find_named_ip $name)
+ [[ -n $named_ip ]] || { echo "did not find an address corresponding to $name" >&2; return 1; }
+
+ local the_full_zone
+ the_full_zone=$(load_metadata "http://metadata/computeMetadata/v1/instance/zone")
+ check_metadata $the_full_zone || return 1
+ local the_zone
+ the_zone=$(echo $the_full_zone | cut -d / -f 4 -) || {
+ echo "could not get zone from $the_full_zone" >&2
+ return 1
+ }
+
+ local the_full_host_name
+ the_full_host_name=$(load_metadata "http://metadata/computeMetadata/v1/instance/hostname")
+ check_metadata $the_full_host_name || return 1
+ local the_instance
+ the_instance=$(echo $the_full_host_name | cut -d . -f 1 -) || {
+ echo "could not determine the instance from $the_full_host_name" >&2
+ return 1
+ }
+
+ gcloud compute instances delete-access-config --zone $the_zone $the_instance || {
+ echo "could not delete the access config for $the_instance" >&2
+ return 1
+ }
+ gcloud compute instances add-access-config --zone $the_zone $the_instance --address $named_ip || {
+ echo "could not update the access config for $the_instance to $named_ip" >&2
+ return 1
+ }
+}
+
+# Allows instances to checkout repos on git-on-borg.
+#
+install_gob_daemon() {
+ local gob_dir=$1
+ [[ -n $gob_dir ]] || { echo "missing args: gob_dir" >&2; return 1; }
+
+ local gob_repo=$2
+ [[ -n $gob_repo ]] || gob_repo='https://gerrit.googlesource.com/gcompute-tools/'
+
+ if [[ -e $gob_dir ]]
+ then
+ rm -fv $gob_dir || {
+ echo "could not remove existing git repo at $gob_dir" >&2
+ return 1
+ }
+ fi
+
+ git clone $gob_repo $gob_dir || { echo "failed to pull gerrit cookie repo" >&2; return 1; }
+ local startup_script=/etc/profile.d/gob_cookie_daemon.sh
+
+ cat <<EOF >> $startup_script
+#!/bin/bash
+
+$gob_dir/git-cookie-authdaemon
+
+EOF
+
+ chmod 755 $startup_script
+ $startup_script
+}
+
+# grpc_docker_add_docker_group
+#
+# Adds a docker group, restarts docker, relaunches the docker registry
+grpc_docker_add_docker_group() {
+ [[ -f /var/log/GRPC_DOCKER_IS_UP ]] || {
+ echo "missing file /var/log/GRPC_DOCKER_IS_UP; either wrong machine or still starting up" >&2;
+ return 1
+ }
+ sudo groupadd docker
+
+ local user=$(id -un)
+ [[ -n ${user} ]] || { echo 'could not determine the user' >&2; return 1; }
+ sudo gpasswd -a ${user} docker
+ sudo service docker restart || return 1;
+ grpc_docker_launch_registry
+}
+
+# grpc_dockerfile_pull <local_docker_parent_dir>
+#
+# requires: attributes/gs_dockerfile_root is set to cloud storage directory
+# containing the dockerfile directory
+grpc_dockerfile_pull() {
+ local dockerfile_parent=$1
+ [[ -n $dockerfile_parent ]] || dockerfile_parent='/var/local'
+
+ local gs_dockerfile_root=$(load_metadata "attributes/gs_dockerfile_root")
+ [[ -n $gs_dockerfile_root ]] || { echo "missing metadata: gs_dockerfile_root" >&2; return 1; }
+
+ mkdir -p $dockerfile_parent
+ gsutil cp -R $gs_dockerfile_root $dockerfile_parent || {
+ echo "Did not copy docker files from $gs_dockerfile_root -> $dockerfile_parent"
+ return 1
+ }
+ }
+
+# grpc_docker_launch_registry
+#
+# requires: attributes/gs_docker_reg is set to the cloud storage directory to
+# use to store docker images
+grpc_docker_launch_registry() {
+ local gs_docker_reg=$(load_metadata "attributes/gs_docker_reg")
+ [[ -n $gs_docker_reg ]] || { echo "missing metadata: gs_docker_reg" >&2; return 1; }
+
+ local gs_bucket=$(echo $gs_docker_reg | sed -r 's|gs://([^/]*?).*|\1|g')
+ [[ -n $gs_bucket ]] || {
+ echo "could not determine cloud storage bucket from $gs_bucket" >&2;
+ return 1
+ }
+
+ local storage_path_env=''
+ local image_path=$(echo $gs_docker_reg | sed -r 's|gs://[^/]*(.*)|\1|g' | sed -e 's:/$::g')
+ [[ -n $image_path ]] && {
+ storage_path_env="-e STORAGE_PATH=$image_path"
+ }
+
+ sudo docker run -d -e GCS_BUCKET=$gs_bucket $storage_path_env -p 5000:5000 google/docker-registry
+ # wait a couple of minutes max, for the registry to come up
+ local is_up=0
+ for i in {1..24}
+ do
+ local secs=`expr $i \* 5`
+ echo "is docker registry up? waited for $secs secs ..."
+ wget -q localhost:5000 && {
+ echo 'docker registry is up!'
+ is_up=1
+ break
+ }
+ sleep 5
+ done
+
+ [[ $is_up == 0 ]] && {
+ echo "docker registry not available after 120 seconds"; return 1;
+ } || return 0
+}
+
+# grpc_docker_pull_known
+#
+# This pulls a set of known docker images from a private docker registry to
+# the local image cache. It re-labels the images so that FROM in dockerfiles
+# used in dockerfiles running on the docker instance can find the images OK.
+#
+# optional: address of a grpc docker registry, the default is 0.0.0.0:5000
+grpc_docker_pull_known() {
+ local addr=$1
+ [[ -n $addr ]] || addr="0.0.0.0:5000"
+ local known="base cxx php_base php ruby_base ruby java_base java"
+ echo "... pulling docker images for '$known'"
+ for i in $known
+ do
+ sudo docker pull ${addr}/grpc/$i \
+ && sudo docker tag ${addr}/grpc/$i grpc/$i || {
+ # log and continue
+ echo "docker op error: could not pull ${addr}/grpc/$i"
+ }
+ done
+}
+
+# grpc_dockerfile_build_install
+#
+# requires: $1 is the label to apply to the docker image
+# requires: $2 is a local directory containing a Dockerfile
+# requires: there is a docker registry running on 5000, e.g, grpc_docker_launch_registry was run
+#
+# grpc_dockerfile_install "grpc/image" /var/local/dockerfile/grpc_image
+grpc_dockerfile_install() {
+ local image_label=$1
+ [[ -n $image_label ]] || { echo "missing arg: image_label" >&2; return 1; }
+ local docker_img_url=0.0.0.0:5000/$image_label
+
+ local dockerfile_dir=$2
+ [[ -n $dockerfile_dir ]] || { echo "missing arg: dockerfile_dir" >&2; return 1; }
+
+ local cache_opt='--no-cache'
+ local cache=$3
+ [[ $cache == "cache=yes" ]] && { cache_opt=''; }
+ [[ $cache == "cache=1" ]] && { cache_opt=''; }
+ [[ $cache == "cache=true" ]] && { cache_opt=''; }
+
+ [[ -d $dockerfile_dir ]] || { echo "not a valid dir: $dockerfile_dir"; return 1; }
+
+ # TODO(temiola): maybe make cache/no-cache a func option?
+ sudo docker build $cache_opt -t $image_label $dockerfile_dir || {
+ echo "docker op error: build of $image_label <- $dockerfile_dir"
+ return 1
+ }
+ sudo docker tag $image_label $docker_img_url || {
+ echo "docker op error: tag of $docker_img_url"
+ return 1
+ }
+ sudo docker push $docker_img_url || {
+ echo "docker op error: push of $docker_img_url"
+ return 1
+ }
+}
+
+# grpc_dockerfile_refresh
+#
+# requires: $1 is the label to apply to the docker image
+# requires: $2 is a local directory containing a Dockerfile
+# requires: there is a docker registry running on 5000, e.g, grpc_docker_launch_registry was run
+#
+# invokes pull_dockerfiles to refresh them all from cloud storage, then grpc_dockerfile_install
+#
+# grpc_dockerfile_refresh "grpc/mylabel" /var/local/dockerfile/dir_containing_my_dockerfile
+grpc_dockerfile_refresh() {
+ grpc_dockerfile_pull || return 1
+ grpc_dockerfile_install "$@"
+}