aboutsummaryrefslogtreecommitdiffhomepage
path: root/vendor
diff options
context:
space:
mode:
authorGravatar Frédéric Guillot <fred@miniflux.net>2017-11-22 22:22:33 -0800
committerGravatar Frédéric Guillot <fred@miniflux.net>2017-11-22 22:22:33 -0800
commitcc6d272eb7719bcca02c7f36a1badbeecb153759 (patch)
tree2fd6e92dcaa19faccfc25ff67499abcc6dabbaaf /vendor
parent9877051f12621aa71daad520caa2847c47c746f8 (diff)
Add OAuth2 authentication
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/golang/protobuf/.gitignore16
-rw-r--r--vendor/github.com/golang/protobuf/.travis.yml18
-rw-r--r--vendor/github.com/golang/protobuf/AUTHORS3
-rw-r--r--vendor/github.com/golang/protobuf/CONTRIBUTORS3
-rw-r--r--vendor/github.com/golang/protobuf/LICENSE31
-rw-r--r--vendor/github.com/golang/protobuf/Make.protobuf40
-rw-r--r--vendor/github.com/golang/protobuf/Makefile55
-rw-r--r--vendor/github.com/golang/protobuf/README.md244
-rw-r--r--vendor/github.com/golang/protobuf/_conformance/Makefile33
-rw-r--r--vendor/github.com/golang/protobuf/_conformance/conformance.go161
-rw-r--r--vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.pb.go1885
-rw-r--r--vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.proto285
-rw-r--r--vendor/github.com/golang/protobuf/descriptor/descriptor.go93
-rw-r--r--vendor/github.com/golang/protobuf/descriptor/descriptor_test.go32
-rw-r--r--vendor/github.com/golang/protobuf/jsonpb/jsonpb.go1082
-rw-r--r--vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go897
-rw-r--r--vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile33
-rw-r--r--vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go266
-rw-r--r--vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto69
-rw-r--r--vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go852
-rw-r--r--vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto147
-rw-r--r--vendor/github.com/golang/protobuf/proto/Makefile43
-rw-r--r--vendor/github.com/golang/protobuf/proto/all_test.go2278
-rw-r--r--vendor/github.com/golang/protobuf/proto/any_test.go300
-rw-r--r--vendor/github.com/golang/protobuf/proto/clone.go229
-rw-r--r--vendor/github.com/golang/protobuf/proto/clone_test.go300
-rw-r--r--vendor/github.com/golang/protobuf/proto/decode.go970
-rw-r--r--vendor/github.com/golang/protobuf/proto/decode_test.go258
-rw-r--r--vendor/github.com/golang/protobuf/proto/encode.go1362
-rw-r--r--vendor/github.com/golang/protobuf/proto/encode_test.go85
-rw-r--r--vendor/github.com/golang/protobuf/proto/equal.go300
-rw-r--r--vendor/github.com/golang/protobuf/proto/equal_test.go224
-rw-r--r--vendor/github.com/golang/protobuf/proto/extensions.go587
-rw-r--r--vendor/github.com/golang/protobuf/proto/extensions_test.go536
-rw-r--r--vendor/github.com/golang/protobuf/proto/lib.go897
-rw-r--r--vendor/github.com/golang/protobuf/proto/map_test.go46
-rw-r--r--vendor/github.com/golang/protobuf/proto/message_set.go311
-rw-r--r--vendor/github.com/golang/protobuf/proto/message_set_test.go66
-rw-r--r--vendor/github.com/golang/protobuf/proto/pointer_reflect.go484
-rw-r--r--vendor/github.com/golang/protobuf/proto/pointer_unsafe.go270
-rw-r--r--vendor/github.com/golang/protobuf/proto/properties.go872
-rw-r--r--vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go347
-rw-r--r--vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto87
-rw-r--r--vendor/github.com/golang/protobuf/proto/proto3_test.go135
-rw-r--r--vendor/github.com/golang/protobuf/proto/size2_test.go63
-rw-r--r--vendor/github.com/golang/protobuf/proto/size_test.go164
-rw-r--r--vendor/github.com/golang/protobuf/proto/testdata/Makefile50
-rw-r--r--vendor/github.com/golang/protobuf/proto/testdata/golden_test.go86
-rw-r--r--vendor/github.com/golang/protobuf/proto/testdata/test.pb.go4147
-rw-r--r--vendor/github.com/golang/protobuf/proto/testdata/test.proto548
-rw-r--r--vendor/github.com/golang/protobuf/proto/text.go854
-rw-r--r--vendor/github.com/golang/protobuf/proto/text_parser.go895
-rw-r--r--vendor/github.com/golang/protobuf/proto/text_parser_test.go673
-rw-r--r--vendor/github.com/golang/protobuf/proto/text_test.go474
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/Makefile33
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile37
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go2215
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto849
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/doc.go51
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/generator/Makefile40
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go2866
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go114
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go463
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go34
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/main.go98
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile45
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go293
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden83
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto167
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile73
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base.proto46
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra.proto38
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go210
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user.proto100
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc.proto59
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.pb.go.golden113
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.proto70
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp2.proto43
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp3.proto38
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go46
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto44
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto46
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto43
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go870
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden870
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto156
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto53
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any.go139
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any/any.pb.go178
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any/any.proto149
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any_test.go113
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/doc.go35
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/duration.go102
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go144
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/duration/duration.proto117
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/duration_test.go121
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go66
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/empty/empty.proto52
-rwxr-xr-xvendor/github.com/golang/protobuf/ptypes/regen.sh43
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go380
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/struct/struct.proto96
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/timestamp.go134
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go160
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto133
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/timestamp_test.go153
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go260
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto118
-rw-r--r--vendor/golang.org/x/net/http2/server.go11
-rw-r--r--vendor/golang.org/x/oauth2/.travis.yml13
-rw-r--r--vendor/golang.org/x/oauth2/AUTHORS3
-rw-r--r--vendor/golang.org/x/oauth2/CONTRIBUTING.md31
-rw-r--r--vendor/golang.org/x/oauth2/CONTRIBUTORS3
-rw-r--r--vendor/golang.org/x/oauth2/LICENSE27
-rw-r--r--vendor/golang.org/x/oauth2/README.md77
-rw-r--r--vendor/golang.org/x/oauth2/amazon/amazon.go16
-rw-r--r--vendor/golang.org/x/oauth2/bitbucket/bitbucket.go16
-rw-r--r--vendor/golang.org/x/oauth2/client_appengine.go25
-rw-r--r--vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go104
-rw-r--r--vendor/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go97
-rw-r--r--vendor/golang.org/x/oauth2/example_test.go89
-rw-r--r--vendor/golang.org/x/oauth2/facebook/facebook.go16
-rw-r--r--vendor/golang.org/x/oauth2/fitbit/fitbit.go16
-rw-r--r--vendor/golang.org/x/oauth2/foursquare/foursquare.go16
-rw-r--r--vendor/golang.org/x/oauth2/github/github.go16
-rw-r--r--vendor/golang.org/x/oauth2/google/appengine.go89
-rw-r--r--vendor/golang.org/x/oauth2/google/appengine_hook.go14
-rw-r--r--vendor/golang.org/x/oauth2/google/appengineflex_hook.go11
-rw-r--r--vendor/golang.org/x/oauth2/google/default.go137
-rw-r--r--vendor/golang.org/x/oauth2/google/example_test.go150
-rw-r--r--vendor/golang.org/x/oauth2/google/google.go202
-rw-r--r--vendor/golang.org/x/oauth2/google/google_test.go116
-rw-r--r--vendor/golang.org/x/oauth2/google/jwt.go74
-rw-r--r--vendor/golang.org/x/oauth2/google/jwt_test.go91
-rw-r--r--vendor/golang.org/x/oauth2/google/sdk.go172
-rw-r--r--vendor/golang.org/x/oauth2/google/sdk_test.go46
-rw-r--r--vendor/golang.org/x/oauth2/google/testdata/gcloud/credentials122
-rw-r--r--vendor/golang.org/x/oauth2/google/testdata/gcloud/properties2
-rw-r--r--vendor/golang.org/x/oauth2/heroku/heroku.go16
-rw-r--r--vendor/golang.org/x/oauth2/hipchat/hipchat.go60
-rw-r--r--vendor/golang.org/x/oauth2/internal/doc.go6
-rw-r--r--vendor/golang.org/x/oauth2/internal/oauth2.go75
-rw-r--r--vendor/golang.org/x/oauth2/internal/oauth2_test.go61
-rw-r--r--vendor/golang.org/x/oauth2/internal/token.go251
-rw-r--r--vendor/golang.org/x/oauth2/internal/token_test.go104
-rw-r--r--vendor/golang.org/x/oauth2/internal/transport.go68
-rw-r--r--vendor/golang.org/x/oauth2/internal/transport_test.go38
-rw-r--r--vendor/golang.org/x/oauth2/jws/jws.go182
-rw-r--r--vendor/golang.org/x/oauth2/jws/jws_test.go46
-rw-r--r--vendor/golang.org/x/oauth2/jwt/example_test.go33
-rw-r--r--vendor/golang.org/x/oauth2/jwt/jwt.go159
-rw-r--r--vendor/golang.org/x/oauth2/jwt/jwt_test.go190
-rw-r--r--vendor/golang.org/x/oauth2/linkedin/linkedin.go16
-rw-r--r--vendor/golang.org/x/oauth2/mediamath/mediamath.go22
-rw-r--r--vendor/golang.org/x/oauth2/microsoft/microsoft.go16
-rw-r--r--vendor/golang.org/x/oauth2/oauth2.go344
-rw-r--r--vendor/golang.org/x/oauth2/oauth2_test.go490
-rw-r--r--vendor/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go16
-rw-r--r--vendor/golang.org/x/oauth2/paypal/paypal.go22
-rw-r--r--vendor/golang.org/x/oauth2/slack/slack.go16
-rw-r--r--vendor/golang.org/x/oauth2/token.go158
-rw-r--r--vendor/golang.org/x/oauth2/token_test.go72
-rw-r--r--vendor/golang.org/x/oauth2/transport.go132
-rw-r--r--vendor/golang.org/x/oauth2/transport_test.go108
-rw-r--r--vendor/golang.org/x/oauth2/uber/uber.go16
-rw-r--r--vendor/golang.org/x/oauth2/vk/vk.go16
-rw-r--r--vendor/golang.org/x/oauth2/yandex/yandex.go16
-rw-r--r--vendor/golang.org/x/sys/unix/dev_darwin_test.go2
-rw-r--r--vendor/golang.org/x/sys/unix/dev_dragonfly_test.go2
-rw-r--r--vendor/golang.org/x/sys/unix/dev_linux_test.go2
-rw-r--r--vendor/golang.org/x/sys/unix/dev_netbsd_test.go2
-rw-r--r--vendor/golang.org/x/sys/unix/dev_openbsd_test.go2
-rw-r--r--vendor/golang.org/x/sys/unix/dev_solaris_test.go2
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_darwin_arm.go4
-rw-r--r--vendor/golang.org/x/sys/windows/types_windows.go23
-rw-r--r--vendor/google.golang.org/appengine/.travis.yml18
-rw-r--r--vendor/google.golang.org/appengine/LICENSE202
-rw-r--r--vendor/google.golang.org/appengine/README.md73
-rw-r--r--vendor/google.golang.org/appengine/aetest/doc.go42
-rw-r--r--vendor/google.golang.org/appengine/aetest/instance.go51
-rw-r--r--vendor/google.golang.org/appengine/aetest/instance_classic.go21
-rw-r--r--vendor/google.golang.org/appengine/aetest/instance_test.go116
-rw-r--r--vendor/google.golang.org/appengine/aetest/instance_vm.go276
-rw-r--r--vendor/google.golang.org/appengine/aetest/user.go36
-rw-r--r--vendor/google.golang.org/appengine/appengine.go112
-rw-r--r--vendor/google.golang.org/appengine/appengine_test.go49
-rw-r--r--vendor/google.golang.org/appengine/appengine_vm.go20
-rw-r--r--vendor/google.golang.org/appengine/blobstore/blobstore.go276
-rw-r--r--vendor/google.golang.org/appengine/blobstore/blobstore_test.go183
-rw-r--r--vendor/google.golang.org/appengine/blobstore/read.go160
-rw-r--r--vendor/google.golang.org/appengine/capability/capability.go52
-rw-r--r--vendor/google.golang.org/appengine/channel/channel.go83
-rw-r--r--vendor/google.golang.org/appengine/channel/channel_test.go21
-rw-r--r--vendor/google.golang.org/appengine/cloudsql/cloudsql.go62
-rw-r--r--vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go17
-rw-r--r--vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go16
-rw-r--r--vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go342
-rw-r--r--vendor/google.golang.org/appengine/cmd/aedeploy/aedeploy.go268
-rw-r--r--vendor/google.golang.org/appengine/cmd/aefix/ae.go185
-rw-r--r--vendor/google.golang.org/appengine/cmd/aefix/ae_test.go144
-rw-r--r--vendor/google.golang.org/appengine/cmd/aefix/fix.go848
-rw-r--r--vendor/google.golang.org/appengine/cmd/aefix/main.go258
-rw-r--r--vendor/google.golang.org/appengine/cmd/aefix/main_test.go129
-rw-r--r--vendor/google.golang.org/appengine/cmd/aefix/typecheck.go673
-rw-r--r--vendor/google.golang.org/appengine/datastore/datastore.go406
-rw-r--r--vendor/google.golang.org/appengine/datastore/datastore_test.go1567
-rw-r--r--vendor/google.golang.org/appengine/datastore/doc.go351
-rw-r--r--vendor/google.golang.org/appengine/datastore/key.go309
-rw-r--r--vendor/google.golang.org/appengine/datastore/key_test.go204
-rw-r--r--vendor/google.golang.org/appengine/datastore/load.go334
-rw-r--r--vendor/google.golang.org/appengine/datastore/metadata.go78
-rw-r--r--vendor/google.golang.org/appengine/datastore/prop.go296
-rw-r--r--vendor/google.golang.org/appengine/datastore/prop_test.go604
-rw-r--r--vendor/google.golang.org/appengine/datastore/query.go724
-rw-r--r--vendor/google.golang.org/appengine/datastore/query_test.go583
-rw-r--r--vendor/google.golang.org/appengine/datastore/save.go300
-rw-r--r--vendor/google.golang.org/appengine/datastore/time_test.go65
-rw-r--r--vendor/google.golang.org/appengine/datastore/transaction.go87
-rw-r--r--vendor/google.golang.org/appengine/delay/delay.go278
-rw-r--r--vendor/google.golang.org/appengine/delay/delay_test.go375
-rw-r--r--vendor/google.golang.org/appengine/demos/guestbook/app.yaml14
-rw-r--r--vendor/google.golang.org/appengine/demos/guestbook/favicon.icobin0 -> 1150 bytes
-rw-r--r--vendor/google.golang.org/appengine/demos/guestbook/guestbook.go109
-rw-r--r--vendor/google.golang.org/appengine/demos/guestbook/index.yaml7
-rw-r--r--vendor/google.golang.org/appengine/demos/guestbook/templates/guestbook.html26
-rw-r--r--vendor/google.golang.org/appengine/demos/helloworld/app.yaml10
-rw-r--r--vendor/google.golang.org/appengine/demos/helloworld/favicon.icobin0 -> 1150 bytes
-rw-r--r--vendor/google.golang.org/appengine/demos/helloworld/helloworld.go50
-rw-r--r--vendor/google.golang.org/appengine/errors.go46
-rw-r--r--vendor/google.golang.org/appengine/file/file.go28
-rw-r--r--vendor/google.golang.org/appengine/identity.go142
-rw-r--r--vendor/google.golang.org/appengine/image/image.go67
-rw-r--r--vendor/google.golang.org/appengine/internal/aetesting/fake.go81
-rw-r--r--vendor/google.golang.org/appengine/internal/api.go646
-rw-r--r--vendor/google.golang.org/appengine/internal/api_classic.go159
-rw-r--r--vendor/google.golang.org/appengine/internal/api_common.go86
-rw-r--r--vendor/google.golang.org/appengine/internal/api_race_test.go9
-rw-r--r--vendor/google.golang.org/appengine/internal/api_test.go467
-rw-r--r--vendor/google.golang.org/appengine/internal/app_id.go28
-rw-r--r--vendor/google.golang.org/appengine/internal/app_id_test.go34
-rw-r--r--vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go296
-rw-r--r--vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto64
-rw-r--r--vendor/google.golang.org/appengine/internal/base/api_base.pb.go133
-rw-r--r--vendor/google.golang.org/appengine/internal/base/api_base.proto33
-rw-r--r--vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.pb.go347
-rw-r--r--vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.proto71
-rw-r--r--vendor/google.golang.org/appengine/internal/capability/capability_service.pb.go125
-rw-r--r--vendor/google.golang.org/appengine/internal/capability/capability_service.proto28
-rw-r--r--vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go154
-rw-r--r--vendor/google.golang.org/appengine/internal/channel/channel_service.proto30
-rw-r--r--vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go2778
-rwxr-xr-xvendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto541
-rw-r--r--vendor/google.golang.org/appengine/internal/identity.go14
-rw-r--r--vendor/google.golang.org/appengine/internal/identity_classic.go27
-rw-r--r--vendor/google.golang.org/appengine/internal/identity_vm.go97
-rw-r--r--vendor/google.golang.org/appengine/internal/image/images_service.pb.go845
-rw-r--r--vendor/google.golang.org/appengine/internal/image/images_service.proto162
-rw-r--r--vendor/google.golang.org/appengine/internal/internal.go110
-rw-r--r--vendor/google.golang.org/appengine/internal/internal_vm_test.go60
-rw-r--r--vendor/google.golang.org/appengine/internal/log/log_service.pb.go899
-rw-r--r--vendor/google.golang.org/appengine/internal/log/log_service.proto150
-rw-r--r--vendor/google.golang.org/appengine/internal/mail/mail_service.pb.go229
-rw-r--r--vendor/google.golang.org/appengine/internal/mail/mail_service.proto45
-rw-r--r--vendor/google.golang.org/appengine/internal/main.go15
-rw-r--r--vendor/google.golang.org/appengine/internal/main_vm.go44
-rw-r--r--vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go938
-rw-r--r--vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto165
-rw-r--r--vendor/google.golang.org/appengine/internal/metadata.go61
-rw-r--r--vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go375
-rw-r--r--vendor/google.golang.org/appengine/internal/modules/modules_service.proto80
-rw-r--r--vendor/google.golang.org/appengine/internal/net.go56
-rw-r--r--vendor/google.golang.org/appengine/internal/net_test.go58
-rwxr-xr-xvendor/google.golang.org/appengine/internal/regen.sh40
-rw-r--r--vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go231
-rw-r--r--vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto44
-rw-r--r--vendor/google.golang.org/appengine/internal/search/search.pb.go2127
-rw-r--r--vendor/google.golang.org/appengine/internal/search/search.proto388
-rw-r--r--vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go1858
-rw-r--r--vendor/google.golang.org/appengine/internal/socket/socket_service.proto460
-rw-r--r--vendor/google.golang.org/appengine/internal/system/system_service.pb.go198
-rw-r--r--vendor/google.golang.org/appengine/internal/system/system_service.proto49
-rw-r--r--vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go1888
-rw-r--r--vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto342
-rw-r--r--vendor/google.golang.org/appengine/internal/transaction.go107
-rw-r--r--vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go355
-rw-r--r--vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto64
-rw-r--r--vendor/google.golang.org/appengine/internal/user/user_service.pb.go289
-rw-r--r--vendor/google.golang.org/appengine/internal/user/user_service.proto58
-rw-r--r--vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go427
-rw-r--r--vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.proto83
-rw-r--r--vendor/google.golang.org/appengine/log/api.go40
-rw-r--r--vendor/google.golang.org/appengine/log/log.go323
-rw-r--r--vendor/google.golang.org/appengine/log/log_test.go112
-rw-r--r--vendor/google.golang.org/appengine/mail/mail.go123
-rw-r--r--vendor/google.golang.org/appengine/mail/mail_test.go65
-rw-r--r--vendor/google.golang.org/appengine/memcache/memcache.go526
-rw-r--r--vendor/google.golang.org/appengine/memcache/memcache_test.go263
-rw-r--r--vendor/google.golang.org/appengine/module/module.go113
-rw-r--r--vendor/google.golang.org/appengine/module/module_test.go124
-rw-r--r--vendor/google.golang.org/appengine/namespace.go25
-rw-r--r--vendor/google.golang.org/appengine/namespace_test.go39
-rw-r--r--vendor/google.golang.org/appengine/remote_api/client.go174
-rw-r--r--vendor/google.golang.org/appengine/remote_api/client_test.go24
-rw-r--r--vendor/google.golang.org/appengine/remote_api/remote_api.go152
-rw-r--r--vendor/google.golang.org/appengine/runtime/runtime.go148
-rw-r--r--vendor/google.golang.org/appengine/runtime/runtime_test.go101
-rw-r--r--vendor/google.golang.org/appengine/search/doc.go209
-rw-r--r--vendor/google.golang.org/appengine/search/field.go82
-rw-r--r--vendor/google.golang.org/appengine/search/search.go1121
-rw-r--r--vendor/google.golang.org/appengine/search/search_test.go1000
-rw-r--r--vendor/google.golang.org/appengine/search/struct.go251
-rw-r--r--vendor/google.golang.org/appengine/search/struct_test.go213
-rw-r--r--vendor/google.golang.org/appengine/socket/doc.go10
-rw-r--r--vendor/google.golang.org/appengine/socket/socket_classic.go290
-rw-r--r--vendor/google.golang.org/appengine/socket/socket_vm.go64
-rw-r--r--vendor/google.golang.org/appengine/taskqueue/taskqueue.go496
-rw-r--r--vendor/google.golang.org/appengine/taskqueue/taskqueue_test.go116
-rw-r--r--vendor/google.golang.org/appengine/timeout.go20
-rw-r--r--vendor/google.golang.org/appengine/urlfetch/urlfetch.go210
-rw-r--r--vendor/google.golang.org/appengine/user/oauth.go52
-rw-r--r--vendor/google.golang.org/appengine/user/user.go84
-rw-r--r--vendor/google.golang.org/appengine/user/user_classic.go35
-rw-r--r--vendor/google.golang.org/appengine/user/user_test.go99
-rw-r--r--vendor/google.golang.org/appengine/user/user_vm.go38
-rw-r--r--vendor/google.golang.org/appengine/xmpp/xmpp.go253
-rw-r--r--vendor/google.golang.org/appengine/xmpp/xmpp_test.go173
325 files changed, 81277 insertions, 7 deletions
diff --git a/vendor/github.com/golang/protobuf/.gitignore b/vendor/github.com/golang/protobuf/.gitignore
new file mode 100644
index 0000000..8f5b596
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/.gitignore
@@ -0,0 +1,16 @@
+.DS_Store
+*.[568ao]
+*.ao
+*.so
+*.pyc
+._*
+.nfs.*
+[568a].out
+*~
+*.orig
+core
+_obj
+_test
+_testmain.go
+protoc-gen-go/testdata/multi/*.pb.go
+_conformance/_conformance
diff --git a/vendor/github.com/golang/protobuf/.travis.yml b/vendor/github.com/golang/protobuf/.travis.yml
new file mode 100644
index 0000000..93c6780
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/.travis.yml
@@ -0,0 +1,18 @@
+sudo: false
+language: go
+go:
+- 1.6.x
+- 1.7.x
+- 1.8.x
+- 1.9.x
+
+install:
+ - go get -v -d -t github.com/golang/protobuf/...
+ - curl -L https://github.com/google/protobuf/releases/download/v3.3.0/protoc-3.3.0-linux-x86_64.zip -o /tmp/protoc.zip
+ - unzip /tmp/protoc.zip -d $HOME/protoc
+
+env:
+ - PATH=$HOME/protoc/bin:$PATH
+
+script:
+ - make all test
diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS
new file mode 100644
index 0000000..15167cd
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS
new file mode 100644
index 0000000..1c4577e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 0000000..1b1b192
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,31 @@
+Go support for Protocol Buffers - Google's data interchange format
+
+Copyright 2010 The Go Authors. All rights reserved.
+https://github.com/golang/protobuf
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/golang/protobuf/Make.protobuf b/vendor/github.com/golang/protobuf/Make.protobuf
new file mode 100644
index 0000000..15071de
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/Make.protobuf
@@ -0,0 +1,40 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Includable Makefile to add a rule for generating .pb.go files from .proto files
+# (Google protocol buffer descriptions).
+# Typical use if myproto.proto is a file in package mypackage in this directory:
+#
+# include $(GOROOT)/src/pkg/github.com/golang/protobuf/Make.protobuf
+
+%.pb.go: %.proto
+ protoc --go_out=. $<
+
diff --git a/vendor/github.com/golang/protobuf/Makefile b/vendor/github.com/golang/protobuf/Makefile
new file mode 100644
index 0000000..a1421d8
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/Makefile
@@ -0,0 +1,55 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+all: install
+
+install:
+ go install ./proto ./jsonpb ./ptypes
+ go install ./protoc-gen-go
+
+test:
+ go test ./proto ./jsonpb ./ptypes
+ make -C protoc-gen-go/testdata test
+
+clean:
+ go clean ./...
+
+nuke:
+ go clean -i ./...
+
+regenerate:
+ make -C protoc-gen-go/descriptor regenerate
+ make -C protoc-gen-go/plugin regenerate
+ make -C protoc-gen-go/testdata regenerate
+ make -C proto/testdata regenerate
+ make -C jsonpb/jsonpb_test_proto regenerate
+ make -C _conformance regenerate
diff --git a/vendor/github.com/golang/protobuf/README.md b/vendor/github.com/golang/protobuf/README.md
new file mode 100644
index 0000000..9c4c815
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/README.md
@@ -0,0 +1,244 @@
+# Go support for Protocol Buffers
+
+[![Build Status](https://travis-ci.org/golang/protobuf.svg?branch=master)](https://travis-ci.org/golang/protobuf)
+[![GoDoc](https://godoc.org/github.com/golang/protobuf?status.svg)](https://godoc.org/github.com/golang/protobuf)
+
+Google's data interchange format.
+Copyright 2010 The Go Authors.
+https://github.com/golang/protobuf
+
+This package and the code it generates requires at least Go 1.4.
+
+This software implements Go bindings for protocol buffers. For
+information about protocol buffers themselves, see
+ https://developers.google.com/protocol-buffers/
+
+## Installation ##
+
+To use this software, you must:
+- Install the standard C++ implementation of protocol buffers from
+ https://developers.google.com/protocol-buffers/
+- Of course, install the Go compiler and tools from
+ https://golang.org/
+ See
+ https://golang.org/doc/install
+ for details or, if you are using gccgo, follow the instructions at
+ https://golang.org/doc/install/gccgo
+- Grab the code from the repository and install the proto package.
+ The simplest way is to run `go get -u github.com/golang/protobuf/protoc-gen-go`.
+ The compiler plugin, protoc-gen-go, will be installed in $GOBIN,
+ defaulting to $GOPATH/bin. It must be in your $PATH for the protocol
+ compiler, protoc, to find it.
+
+This software has two parts: a 'protocol compiler plugin' that
+generates Go source files that, once compiled, can access and manage
+protocol buffers; and a library that implements run-time support for
+encoding (marshaling), decoding (unmarshaling), and accessing protocol
+buffers.
+
+There is support for gRPC in Go using protocol buffers.
+See the note at the bottom of this file for details.
+
+There are no insertion points in the plugin.
+
+
+## Using protocol buffers with Go ##
+
+Once the software is installed, there are two steps to using it.
+First you must compile the protocol buffer definitions and then import
+them, with the support library, into your program.
+
+To compile the protocol buffer definition, run protoc with the --go_out
+parameter set to the directory you want to output the Go code to.
+
+ protoc --go_out=. *.proto
+
+The generated files will be suffixed .pb.go. See the Test code below
+for an example using such a file.
+
+
+The package comment for the proto library contains text describing
+the interface provided in Go for protocol buffers. Here is an edited
+version.
+
+==========
+
+The proto package converts data structures to and from the
+wire format of protocol buffers. It works in concert with the
+Go source code generated for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ Helpers for getting values are superseded by the
+ GetFoo methods and their use is deprecated.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed with the enum's type name. Enum types have
+ a String method, and a Enum method to assist in message construction.
+ - Nested groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+ - Non-repeated fields of non-message type are values instead of pointers.
+ - Enum types do not get an Enum method.
+
+Consider file test.proto, containing
+
+```proto
+ syntax = "proto2";
+ package example;
+
+ enum FOO { X = 17; };
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ }
+```
+
+To create and play with a Test object from the example package,
+
+```go
+ package main
+
+ import (
+ "log"
+
+ "github.com/golang/protobuf/proto"
+ "path/to/example"
+ )
+
+ func main() {
+ test := &example.Test {
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Reps: []int64{1, 2, 3},
+ Optionalgroup: &example.Test_OptionalGroup {
+ RequiredField: proto.String("good bye"),
+ },
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &example.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // etc.
+ }
+```
+
+## Parameters ##
+
+To pass extra parameters to the plugin, use a comma-separated
+parameter list separated from the output directory by a colon:
+
+
+ protoc --go_out=plugins=grpc,import_path=mypackage:. *.proto
+
+
+- `import_prefix=xxx` - a prefix that is added onto the beginning of
+ all imports. Useful for things like generating protos in a
+ subdirectory, or regenerating vendored protobufs in-place.
+- `import_path=foo/bar` - used as the package if no input files
+ declare `go_package`. If it contains slashes, everything up to the
+ rightmost slash is ignored.
+- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to
+ load. The only plugin in this repo is `grpc`.
+- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is
+ associated with Go package quux/shme. This is subject to the
+ import_prefix parameter.
+
+## gRPC Support ##
+
+If a proto file specifies RPC services, protoc-gen-go can be instructed to
+generate code compatible with gRPC (http://www.grpc.io/). To do this, pass
+the `plugins` parameter to protoc-gen-go; the usual way is to insert it into
+the --go_out argument to protoc:
+
+ protoc --go_out=plugins=grpc:. *.proto
+
+## Compatibility ##
+
+The library and the generated code are expected to be stable over time.
+However, we reserve the right to make breaking changes without notice for the
+following reasons:
+
+- Security. A security issue in the specification or implementation may come to
+ light whose resolution requires breaking compatibility. We reserve the right
+ to address such security issues.
+- Unspecified behavior. There are some aspects of the Protocol Buffers
+ specification that are undefined. Programs that depend on such unspecified
+ behavior may break in future releases.
+- Specification errors or changes. If it becomes necessary to address an
+ inconsistency, incompleteness, or change in the Protocol Buffers
+ specification, resolving the issue could affect the meaning or legality of
+ existing programs. We reserve the right to address such issues, including
+ updating the implementations.
+- Bugs. If the library has a bug that violates the specification, a program
+ that depends on the buggy behavior may break if the bug is fixed. We reserve
+ the right to fix such bugs.
+- Adding methods or fields to generated structs. These may conflict with field
+ names that already exist in a schema, causing applications to break. When the
+ code generator encounters a field in the schema that would collide with a
+ generated field or method name, the code generator will append an underscore
+ to the generated field or method name.
+- Adding, removing, or changing methods or fields in generated structs that
+ start with `XXX`. These parts of the generated code are exported out of
+ necessity, but should not be considered part of the public API.
+- Adding, removing, or changing unexported symbols in generated code.
+
+Any breaking changes outside of these will be announced 6 months in advance to
+protobuf@googlegroups.com.
+
+You should, whenever possible, use generated code created by the `protoc-gen-go`
+tool built at the same commit as the `proto` package. The `proto` package
+declares package-level constants in the form `ProtoPackageIsVersionX`.
+Application code and generated code may depend on one of these constants to
+ensure that compilation will fail if the available version of the proto library
+is too old. Whenever we make a change to the generated code that requires newer
+library support, in the same commit we will increment the version number of the
+generated code and declare a new package-level constant whose name incorporates
+the latest version number. Removing a compatibility constant is considered a
+breaking change and would be subject to the announcement policy stated above.
+
+The `protoc-gen-go/generator` package exposes a plugin interface,
+which is used by the gRPC code generation. This interface is not
+supported and is subject to incompatible changes without notice.
diff --git a/vendor/github.com/golang/protobuf/_conformance/Makefile b/vendor/github.com/golang/protobuf/_conformance/Makefile
new file mode 100644
index 0000000..89800e2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/_conformance/Makefile
@@ -0,0 +1,33 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2016 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+regenerate:
+ protoc --go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,Mgoogle/protobuf/struct.proto=github.com/golang/protobuf/ptypes/struct,Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,Mgoogle/protobuf/wrappers.proto=github.com/golang/protobuf/ptypes/wrappers,Mgoogle/protobuf/field_mask.proto=google.golang.org/genproto/protobuf:. conformance_proto/conformance.proto
diff --git a/vendor/github.com/golang/protobuf/_conformance/conformance.go b/vendor/github.com/golang/protobuf/_conformance/conformance.go
new file mode 100644
index 0000000..c54212c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/_conformance/conformance.go
@@ -0,0 +1,161 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// conformance implements the conformance test subprocess protocol as
+// documented in conformance.proto.
+package main
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "os"
+
+ pb "github.com/golang/protobuf/_conformance/conformance_proto"
+ "github.com/golang/protobuf/jsonpb"
+ "github.com/golang/protobuf/proto"
+)
+
+func main() {
+ var sizeBuf [4]byte
+ inbuf := make([]byte, 0, 4096)
+ outbuf := proto.NewBuffer(nil)
+ for {
+ if _, err := io.ReadFull(os.Stdin, sizeBuf[:]); err == io.EOF {
+ break
+ } else if err != nil {
+ fmt.Fprintln(os.Stderr, "go conformance: read request:", err)
+ os.Exit(1)
+ }
+ size := binary.LittleEndian.Uint32(sizeBuf[:])
+ if int(size) > cap(inbuf) {
+ inbuf = make([]byte, size)
+ }
+ inbuf = inbuf[:size]
+ if _, err := io.ReadFull(os.Stdin, inbuf); err != nil {
+ fmt.Fprintln(os.Stderr, "go conformance: read request:", err)
+ os.Exit(1)
+ }
+
+ req := new(pb.ConformanceRequest)
+ if err := proto.Unmarshal(inbuf, req); err != nil {
+ fmt.Fprintln(os.Stderr, "go conformance: parse request:", err)
+ os.Exit(1)
+ }
+ res := handle(req)
+
+ if err := outbuf.Marshal(res); err != nil {
+ fmt.Fprintln(os.Stderr, "go conformance: marshal response:", err)
+ os.Exit(1)
+ }
+ binary.LittleEndian.PutUint32(sizeBuf[:], uint32(len(outbuf.Bytes())))
+ if _, err := os.Stdout.Write(sizeBuf[:]); err != nil {
+ fmt.Fprintln(os.Stderr, "go conformance: write response:", err)
+ os.Exit(1)
+ }
+ if _, err := os.Stdout.Write(outbuf.Bytes()); err != nil {
+ fmt.Fprintln(os.Stderr, "go conformance: write response:", err)
+ os.Exit(1)
+ }
+ outbuf.Reset()
+ }
+}
+
+var jsonMarshaler = jsonpb.Marshaler{
+ OrigName: true,
+}
+
+func handle(req *pb.ConformanceRequest) *pb.ConformanceResponse {
+ var err error
+ var msg pb.TestAllTypes
+ switch p := req.Payload.(type) {
+ case *pb.ConformanceRequest_ProtobufPayload:
+ err = proto.Unmarshal(p.ProtobufPayload, &msg)
+ case *pb.ConformanceRequest_JsonPayload:
+ err = jsonpb.UnmarshalString(p.JsonPayload, &msg)
+ if err != nil && err.Error() == "unmarshaling Any not supported yet" {
+ return &pb.ConformanceResponse{
+ Result: &pb.ConformanceResponse_Skipped{
+ Skipped: err.Error(),
+ },
+ }
+ }
+ default:
+ return &pb.ConformanceResponse{
+ Result: &pb.ConformanceResponse_RuntimeError{
+ RuntimeError: "unknown request payload type",
+ },
+ }
+ }
+ if err != nil {
+ return &pb.ConformanceResponse{
+ Result: &pb.ConformanceResponse_ParseError{
+ ParseError: err.Error(),
+ },
+ }
+ }
+ switch req.RequestedOutputFormat {
+ case pb.WireFormat_PROTOBUF:
+ p, err := proto.Marshal(&msg)
+ if err != nil {
+ return &pb.ConformanceResponse{
+ Result: &pb.ConformanceResponse_SerializeError{
+ SerializeError: err.Error(),
+ },
+ }
+ }
+ return &pb.ConformanceResponse{
+ Result: &pb.ConformanceResponse_ProtobufPayload{
+ ProtobufPayload: p,
+ },
+ }
+ case pb.WireFormat_JSON:
+ p, err := jsonMarshaler.MarshalToString(&msg)
+ if err != nil {
+ return &pb.ConformanceResponse{
+ Result: &pb.ConformanceResponse_SerializeError{
+ SerializeError: err.Error(),
+ },
+ }
+ }
+ return &pb.ConformanceResponse{
+ Result: &pb.ConformanceResponse_JsonPayload{
+ JsonPayload: p,
+ },
+ }
+ default:
+ return &pb.ConformanceResponse{
+ Result: &pb.ConformanceResponse_RuntimeError{
+ RuntimeError: "unknown output format",
+ },
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.pb.go b/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.pb.go
new file mode 100644
index 0000000..ec354ea
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.pb.go
@@ -0,0 +1,1885 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: conformance_proto/conformance.proto
+
+/*
+Package conformance is a generated protocol buffer package.
+
+It is generated from these files:
+ conformance_proto/conformance.proto
+
+It has these top-level messages:
+ ConformanceRequest
+ ConformanceResponse
+ TestAllTypes
+ ForeignMessage
+*/
+package conformance
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_protobuf "github.com/golang/protobuf/ptypes/any"
+import google_protobuf1 "github.com/golang/protobuf/ptypes/duration"
+import google_protobuf2 "google.golang.org/genproto/protobuf"
+import google_protobuf3 "github.com/golang/protobuf/ptypes/struct"
+import google_protobuf4 "github.com/golang/protobuf/ptypes/timestamp"
+import google_protobuf5 "github.com/golang/protobuf/ptypes/wrappers"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type WireFormat int32
+
+const (
+ WireFormat_UNSPECIFIED WireFormat = 0
+ WireFormat_PROTOBUF WireFormat = 1
+ WireFormat_JSON WireFormat = 2
+)
+
+var WireFormat_name = map[int32]string{
+ 0: "UNSPECIFIED",
+ 1: "PROTOBUF",
+ 2: "JSON",
+}
+var WireFormat_value = map[string]int32{
+ "UNSPECIFIED": 0,
+ "PROTOBUF": 1,
+ "JSON": 2,
+}
+
+func (x WireFormat) String() string {
+ return proto.EnumName(WireFormat_name, int32(x))
+}
+func (WireFormat) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+type ForeignEnum int32
+
+const (
+ ForeignEnum_FOREIGN_FOO ForeignEnum = 0
+ ForeignEnum_FOREIGN_BAR ForeignEnum = 1
+ ForeignEnum_FOREIGN_BAZ ForeignEnum = 2
+)
+
+var ForeignEnum_name = map[int32]string{
+ 0: "FOREIGN_FOO",
+ 1: "FOREIGN_BAR",
+ 2: "FOREIGN_BAZ",
+}
+var ForeignEnum_value = map[string]int32{
+ "FOREIGN_FOO": 0,
+ "FOREIGN_BAR": 1,
+ "FOREIGN_BAZ": 2,
+}
+
+func (x ForeignEnum) String() string {
+ return proto.EnumName(ForeignEnum_name, int32(x))
+}
+func (ForeignEnum) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+type TestAllTypes_NestedEnum int32
+
+const (
+ TestAllTypes_FOO TestAllTypes_NestedEnum = 0
+ TestAllTypes_BAR TestAllTypes_NestedEnum = 1
+ TestAllTypes_BAZ TestAllTypes_NestedEnum = 2
+ TestAllTypes_NEG TestAllTypes_NestedEnum = -1
+)
+
+var TestAllTypes_NestedEnum_name = map[int32]string{
+ 0: "FOO",
+ 1: "BAR",
+ 2: "BAZ",
+ -1: "NEG",
+}
+var TestAllTypes_NestedEnum_value = map[string]int32{
+ "FOO": 0,
+ "BAR": 1,
+ "BAZ": 2,
+ "NEG": -1,
+}
+
+func (x TestAllTypes_NestedEnum) String() string {
+ return proto.EnumName(TestAllTypes_NestedEnum_name, int32(x))
+}
+func (TestAllTypes_NestedEnum) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} }
+
+// Represents a single test case's input. The testee should:
+//
+// 1. parse this proto (which should always succeed)
+// 2. parse the protobuf or JSON payload in "payload" (which may fail)
+// 3. if the parse succeeded, serialize the message in the requested format.
+type ConformanceRequest struct {
+ // The payload (whether protobuf of JSON) is always for a TestAllTypes proto
+ // (see below).
+ //
+ // Types that are valid to be assigned to Payload:
+ // *ConformanceRequest_ProtobufPayload
+ // *ConformanceRequest_JsonPayload
+ Payload isConformanceRequest_Payload `protobuf_oneof:"payload"`
+ // Which format should the testee serialize its message to?
+ RequestedOutputFormat WireFormat `protobuf:"varint,3,opt,name=requested_output_format,json=requestedOutputFormat,enum=conformance.WireFormat" json:"requested_output_format,omitempty"`
+}
+
+func (m *ConformanceRequest) Reset() { *m = ConformanceRequest{} }
+func (m *ConformanceRequest) String() string { return proto.CompactTextString(m) }
+func (*ConformanceRequest) ProtoMessage() {}
+func (*ConformanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+type isConformanceRequest_Payload interface {
+ isConformanceRequest_Payload()
+}
+
+type ConformanceRequest_ProtobufPayload struct {
+ ProtobufPayload []byte `protobuf:"bytes,1,opt,name=protobuf_payload,json=protobufPayload,proto3,oneof"`
+}
+type ConformanceRequest_JsonPayload struct {
+ JsonPayload string `protobuf:"bytes,2,opt,name=json_payload,json=jsonPayload,oneof"`
+}
+
+func (*ConformanceRequest_ProtobufPayload) isConformanceRequest_Payload() {}
+func (*ConformanceRequest_JsonPayload) isConformanceRequest_Payload() {}
+
+func (m *ConformanceRequest) GetPayload() isConformanceRequest_Payload {
+ if m != nil {
+ return m.Payload
+ }
+ return nil
+}
+
+func (m *ConformanceRequest) GetProtobufPayload() []byte {
+ if x, ok := m.GetPayload().(*ConformanceRequest_ProtobufPayload); ok {
+ return x.ProtobufPayload
+ }
+ return nil
+}
+
+func (m *ConformanceRequest) GetJsonPayload() string {
+ if x, ok := m.GetPayload().(*ConformanceRequest_JsonPayload); ok {
+ return x.JsonPayload
+ }
+ return ""
+}
+
+func (m *ConformanceRequest) GetRequestedOutputFormat() WireFormat {
+ if m != nil {
+ return m.RequestedOutputFormat
+ }
+ return WireFormat_UNSPECIFIED
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*ConformanceRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _ConformanceRequest_OneofMarshaler, _ConformanceRequest_OneofUnmarshaler, _ConformanceRequest_OneofSizer, []interface{}{
+ (*ConformanceRequest_ProtobufPayload)(nil),
+ (*ConformanceRequest_JsonPayload)(nil),
+ }
+}
+
+func _ConformanceRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*ConformanceRequest)
+ // payload
+ switch x := m.Payload.(type) {
+ case *ConformanceRequest_ProtobufPayload:
+ b.EncodeVarint(1<<3 | proto.WireBytes)
+ b.EncodeRawBytes(x.ProtobufPayload)
+ case *ConformanceRequest_JsonPayload:
+ b.EncodeVarint(2<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.JsonPayload)
+ case nil:
+ default:
+ return fmt.Errorf("ConformanceRequest.Payload has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _ConformanceRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*ConformanceRequest)
+ switch tag {
+ case 1: // payload.protobuf_payload
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeRawBytes(true)
+ m.Payload = &ConformanceRequest_ProtobufPayload{x}
+ return true, err
+ case 2: // payload.json_payload
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Payload = &ConformanceRequest_JsonPayload{x}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _ConformanceRequest_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*ConformanceRequest)
+ // payload
+ switch x := m.Payload.(type) {
+ case *ConformanceRequest_ProtobufPayload:
+ n += proto.SizeVarint(1<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.ProtobufPayload)))
+ n += len(x.ProtobufPayload)
+ case *ConformanceRequest_JsonPayload:
+ n += proto.SizeVarint(2<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.JsonPayload)))
+ n += len(x.JsonPayload)
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// Represents a single test case's output.
+type ConformanceResponse struct {
+ // Types that are valid to be assigned to Result:
+ // *ConformanceResponse_ParseError
+ // *ConformanceResponse_SerializeError
+ // *ConformanceResponse_RuntimeError
+ // *ConformanceResponse_ProtobufPayload
+ // *ConformanceResponse_JsonPayload
+ // *ConformanceResponse_Skipped
+ Result isConformanceResponse_Result `protobuf_oneof:"result"`
+}
+
+func (m *ConformanceResponse) Reset() { *m = ConformanceResponse{} }
+func (m *ConformanceResponse) String() string { return proto.CompactTextString(m) }
+func (*ConformanceResponse) ProtoMessage() {}
+func (*ConformanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+type isConformanceResponse_Result interface {
+ isConformanceResponse_Result()
+}
+
+type ConformanceResponse_ParseError struct {
+ ParseError string `protobuf:"bytes,1,opt,name=parse_error,json=parseError,oneof"`
+}
+type ConformanceResponse_SerializeError struct {
+ SerializeError string `protobuf:"bytes,6,opt,name=serialize_error,json=serializeError,oneof"`
+}
+type ConformanceResponse_RuntimeError struct {
+ RuntimeError string `protobuf:"bytes,2,opt,name=runtime_error,json=runtimeError,oneof"`
+}
+type ConformanceResponse_ProtobufPayload struct {
+ ProtobufPayload []byte `protobuf:"bytes,3,opt,name=protobuf_payload,json=protobufPayload,proto3,oneof"`
+}
+type ConformanceResponse_JsonPayload struct {
+ JsonPayload string `protobuf:"bytes,4,opt,name=json_payload,json=jsonPayload,oneof"`
+}
+type ConformanceResponse_Skipped struct {
+ Skipped string `protobuf:"bytes,5,opt,name=skipped,oneof"`
+}
+
+func (*ConformanceResponse_ParseError) isConformanceResponse_Result() {}
+func (*ConformanceResponse_SerializeError) isConformanceResponse_Result() {}
+func (*ConformanceResponse_RuntimeError) isConformanceResponse_Result() {}
+func (*ConformanceResponse_ProtobufPayload) isConformanceResponse_Result() {}
+func (*ConformanceResponse_JsonPayload) isConformanceResponse_Result() {}
+func (*ConformanceResponse_Skipped) isConformanceResponse_Result() {}
+
+func (m *ConformanceResponse) GetResult() isConformanceResponse_Result {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+func (m *ConformanceResponse) GetParseError() string {
+ if x, ok := m.GetResult().(*ConformanceResponse_ParseError); ok {
+ return x.ParseError
+ }
+ return ""
+}
+
+func (m *ConformanceResponse) GetSerializeError() string {
+ if x, ok := m.GetResult().(*ConformanceResponse_SerializeError); ok {
+ return x.SerializeError
+ }
+ return ""
+}
+
+func (m *ConformanceResponse) GetRuntimeError() string {
+ if x, ok := m.GetResult().(*ConformanceResponse_RuntimeError); ok {
+ return x.RuntimeError
+ }
+ return ""
+}
+
+func (m *ConformanceResponse) GetProtobufPayload() []byte {
+ if x, ok := m.GetResult().(*ConformanceResponse_ProtobufPayload); ok {
+ return x.ProtobufPayload
+ }
+ return nil
+}
+
+func (m *ConformanceResponse) GetJsonPayload() string {
+ if x, ok := m.GetResult().(*ConformanceResponse_JsonPayload); ok {
+ return x.JsonPayload
+ }
+ return ""
+}
+
+func (m *ConformanceResponse) GetSkipped() string {
+ if x, ok := m.GetResult().(*ConformanceResponse_Skipped); ok {
+ return x.Skipped
+ }
+ return ""
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*ConformanceResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _ConformanceResponse_OneofMarshaler, _ConformanceResponse_OneofUnmarshaler, _ConformanceResponse_OneofSizer, []interface{}{
+ (*ConformanceResponse_ParseError)(nil),
+ (*ConformanceResponse_SerializeError)(nil),
+ (*ConformanceResponse_RuntimeError)(nil),
+ (*ConformanceResponse_ProtobufPayload)(nil),
+ (*ConformanceResponse_JsonPayload)(nil),
+ (*ConformanceResponse_Skipped)(nil),
+ }
+}
+
+func _ConformanceResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*ConformanceResponse)
+ // result
+ switch x := m.Result.(type) {
+ case *ConformanceResponse_ParseError:
+ b.EncodeVarint(1<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.ParseError)
+ case *ConformanceResponse_SerializeError:
+ b.EncodeVarint(6<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.SerializeError)
+ case *ConformanceResponse_RuntimeError:
+ b.EncodeVarint(2<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.RuntimeError)
+ case *ConformanceResponse_ProtobufPayload:
+ b.EncodeVarint(3<<3 | proto.WireBytes)
+ b.EncodeRawBytes(x.ProtobufPayload)
+ case *ConformanceResponse_JsonPayload:
+ b.EncodeVarint(4<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.JsonPayload)
+ case *ConformanceResponse_Skipped:
+ b.EncodeVarint(5<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.Skipped)
+ case nil:
+ default:
+ return fmt.Errorf("ConformanceResponse.Result has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _ConformanceResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*ConformanceResponse)
+ switch tag {
+ case 1: // result.parse_error
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Result = &ConformanceResponse_ParseError{x}
+ return true, err
+ case 6: // result.serialize_error
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Result = &ConformanceResponse_SerializeError{x}
+ return true, err
+ case 2: // result.runtime_error
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Result = &ConformanceResponse_RuntimeError{x}
+ return true, err
+ case 3: // result.protobuf_payload
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeRawBytes(true)
+ m.Result = &ConformanceResponse_ProtobufPayload{x}
+ return true, err
+ case 4: // result.json_payload
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Result = &ConformanceResponse_JsonPayload{x}
+ return true, err
+ case 5: // result.skipped
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Result = &ConformanceResponse_Skipped{x}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _ConformanceResponse_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*ConformanceResponse)
+ // result
+ switch x := m.Result.(type) {
+ case *ConformanceResponse_ParseError:
+ n += proto.SizeVarint(1<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.ParseError)))
+ n += len(x.ParseError)
+ case *ConformanceResponse_SerializeError:
+ n += proto.SizeVarint(6<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.SerializeError)))
+ n += len(x.SerializeError)
+ case *ConformanceResponse_RuntimeError:
+ n += proto.SizeVarint(2<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.RuntimeError)))
+ n += len(x.RuntimeError)
+ case *ConformanceResponse_ProtobufPayload:
+ n += proto.SizeVarint(3<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.ProtobufPayload)))
+ n += len(x.ProtobufPayload)
+ case *ConformanceResponse_JsonPayload:
+ n += proto.SizeVarint(4<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.JsonPayload)))
+ n += len(x.JsonPayload)
+ case *ConformanceResponse_Skipped:
+ n += proto.SizeVarint(5<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.Skipped)))
+ n += len(x.Skipped)
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// This proto includes every type of field in both singular and repeated
+// forms.
+type TestAllTypes struct {
+ // Singular
+ OptionalInt32 int32 `protobuf:"varint,1,opt,name=optional_int32,json=optionalInt32" json:"optional_int32,omitempty"`
+ OptionalInt64 int64 `protobuf:"varint,2,opt,name=optional_int64,json=optionalInt64" json:"optional_int64,omitempty"`
+ OptionalUint32 uint32 `protobuf:"varint,3,opt,name=optional_uint32,json=optionalUint32" json:"optional_uint32,omitempty"`
+ OptionalUint64 uint64 `protobuf:"varint,4,opt,name=optional_uint64,json=optionalUint64" json:"optional_uint64,omitempty"`
+ OptionalSint32 int32 `protobuf:"zigzag32,5,opt,name=optional_sint32,json=optionalSint32" json:"optional_sint32,omitempty"`
+ OptionalSint64 int64 `protobuf:"zigzag64,6,opt,name=optional_sint64,json=optionalSint64" json:"optional_sint64,omitempty"`
+ OptionalFixed32 uint32 `protobuf:"fixed32,7,opt,name=optional_fixed32,json=optionalFixed32" json:"optional_fixed32,omitempty"`
+ OptionalFixed64 uint64 `protobuf:"fixed64,8,opt,name=optional_fixed64,json=optionalFixed64" json:"optional_fixed64,omitempty"`
+ OptionalSfixed32 int32 `protobuf:"fixed32,9,opt,name=optional_sfixed32,json=optionalSfixed32" json:"optional_sfixed32,omitempty"`
+ OptionalSfixed64 int64 `protobuf:"fixed64,10,opt,name=optional_sfixed64,json=optionalSfixed64" json:"optional_sfixed64,omitempty"`
+ OptionalFloat float32 `protobuf:"fixed32,11,opt,name=optional_float,json=optionalFloat" json:"optional_float,omitempty"`
+ OptionalDouble float64 `protobuf:"fixed64,12,opt,name=optional_double,json=optionalDouble" json:"optional_double,omitempty"`
+ OptionalBool bool `protobuf:"varint,13,opt,name=optional_bool,json=optionalBool" json:"optional_bool,omitempty"`
+ OptionalString string `protobuf:"bytes,14,opt,name=optional_string,json=optionalString" json:"optional_string,omitempty"`
+ OptionalBytes []byte `protobuf:"bytes,15,opt,name=optional_bytes,json=optionalBytes,proto3" json:"optional_bytes,omitempty"`
+ OptionalNestedMessage *TestAllTypes_NestedMessage `protobuf:"bytes,18,opt,name=optional_nested_message,json=optionalNestedMessage" json:"optional_nested_message,omitempty"`
+ OptionalForeignMessage *ForeignMessage `protobuf:"bytes,19,opt,name=optional_foreign_message,json=optionalForeignMessage" json:"optional_foreign_message,omitempty"`
+ OptionalNestedEnum TestAllTypes_NestedEnum `protobuf:"varint,21,opt,name=optional_nested_enum,json=optionalNestedEnum,enum=conformance.TestAllTypes_NestedEnum" json:"optional_nested_enum,omitempty"`
+ OptionalForeignEnum ForeignEnum `protobuf:"varint,22,opt,name=optional_foreign_enum,json=optionalForeignEnum,enum=conformance.ForeignEnum" json:"optional_foreign_enum,omitempty"`
+ OptionalStringPiece string `protobuf:"bytes,24,opt,name=optional_string_piece,json=optionalStringPiece" json:"optional_string_piece,omitempty"`
+ OptionalCord string `protobuf:"bytes,25,opt,name=optional_cord,json=optionalCord" json:"optional_cord,omitempty"`
+ RecursiveMessage *TestAllTypes `protobuf:"bytes,27,opt,name=recursive_message,json=recursiveMessage" json:"recursive_message,omitempty"`
+ // Repeated
+ RepeatedInt32 []int32 `protobuf:"varint,31,rep,packed,name=repeated_int32,json=repeatedInt32" json:"repeated_int32,omitempty"`
+ RepeatedInt64 []int64 `protobuf:"varint,32,rep,packed,name=repeated_int64,json=repeatedInt64" json:"repeated_int64,omitempty"`
+ RepeatedUint32 []uint32 `protobuf:"varint,33,rep,packed,name=repeated_uint32,json=repeatedUint32" json:"repeated_uint32,omitempty"`
+ RepeatedUint64 []uint64 `protobuf:"varint,34,rep,packed,name=repeated_uint64,json=repeatedUint64" json:"repeated_uint64,omitempty"`
+ RepeatedSint32 []int32 `protobuf:"zigzag32,35,rep,packed,name=repeated_sint32,json=repeatedSint32" json:"repeated_sint32,omitempty"`
+ RepeatedSint64 []int64 `protobuf:"zigzag64,36,rep,packed,name=repeated_sint64,json=repeatedSint64" json:"repeated_sint64,omitempty"`
+ RepeatedFixed32 []uint32 `protobuf:"fixed32,37,rep,packed,name=repeated_fixed32,json=repeatedFixed32" json:"repeated_fixed32,omitempty"`
+ RepeatedFixed64 []uint64 `protobuf:"fixed64,38,rep,packed,name=repeated_fixed64,json=repeatedFixed64" json:"repeated_fixed64,omitempty"`
+ RepeatedSfixed32 []int32 `protobuf:"fixed32,39,rep,packed,name=repeated_sfixed32,json=repeatedSfixed32" json:"repeated_sfixed32,omitempty"`
+ RepeatedSfixed64 []int64 `protobuf:"fixed64,40,rep,packed,name=repeated_sfixed64,json=repeatedSfixed64" json:"repeated_sfixed64,omitempty"`
+ RepeatedFloat []float32 `protobuf:"fixed32,41,rep,packed,name=repeated_float,json=repeatedFloat" json:"repeated_float,omitempty"`
+ RepeatedDouble []float64 `protobuf:"fixed64,42,rep,packed,name=repeated_double,json=repeatedDouble" json:"repeated_double,omitempty"`
+ RepeatedBool []bool `protobuf:"varint,43,rep,packed,name=repeated_bool,json=repeatedBool" json:"repeated_bool,omitempty"`
+ RepeatedString []string `protobuf:"bytes,44,rep,name=repeated_string,json=repeatedString" json:"repeated_string,omitempty"`
+ RepeatedBytes [][]byte `protobuf:"bytes,45,rep,name=repeated_bytes,json=repeatedBytes,proto3" json:"repeated_bytes,omitempty"`
+ RepeatedNestedMessage []*TestAllTypes_NestedMessage `protobuf:"bytes,48,rep,name=repeated_nested_message,json=repeatedNestedMessage" json:"repeated_nested_message,omitempty"`
+ RepeatedForeignMessage []*ForeignMessage `protobuf:"bytes,49,rep,name=repeated_foreign_message,json=repeatedForeignMessage" json:"repeated_foreign_message,omitempty"`
+ RepeatedNestedEnum []TestAllTypes_NestedEnum `protobuf:"varint,51,rep,packed,name=repeated_nested_enum,json=repeatedNestedEnum,enum=conformance.TestAllTypes_NestedEnum" json:"repeated_nested_enum,omitempty"`
+ RepeatedForeignEnum []ForeignEnum `protobuf:"varint,52,rep,packed,name=repeated_foreign_enum,json=repeatedForeignEnum,enum=conformance.ForeignEnum" json:"repeated_foreign_enum,omitempty"`
+ RepeatedStringPiece []string `protobuf:"bytes,54,rep,name=repeated_string_piece,json=repeatedStringPiece" json:"repeated_string_piece,omitempty"`
+ RepeatedCord []string `protobuf:"bytes,55,rep,name=repeated_cord,json=repeatedCord" json:"repeated_cord,omitempty"`
+ // Map
+ MapInt32Int32 map[int32]int32 `protobuf:"bytes,56,rep,name=map_int32_int32,json=mapInt32Int32" json:"map_int32_int32,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
+ MapInt64Int64 map[int64]int64 `protobuf:"bytes,57,rep,name=map_int64_int64,json=mapInt64Int64" json:"map_int64_int64,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
+ MapUint32Uint32 map[uint32]uint32 `protobuf:"bytes,58,rep,name=map_uint32_uint32,json=mapUint32Uint32" json:"map_uint32_uint32,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
+ MapUint64Uint64 map[uint64]uint64 `protobuf:"bytes,59,rep,name=map_uint64_uint64,json=mapUint64Uint64" json:"map_uint64_uint64,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
+ MapSint32Sint32 map[int32]int32 `protobuf:"bytes,60,rep,name=map_sint32_sint32,json=mapSint32Sint32" json:"map_sint32_sint32,omitempty" protobuf_key:"zigzag32,1,opt,name=key" protobuf_val:"zigzag32,2,opt,name=value"`
+ MapSint64Sint64 map[int64]int64 `protobuf:"bytes,61,rep,name=map_sint64_sint64,json=mapSint64Sint64" json:"map_sint64_sint64,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"zigzag64,2,opt,name=value"`
+ MapFixed32Fixed32 map[uint32]uint32 `protobuf:"bytes,62,rep,name=map_fixed32_fixed32,json=mapFixed32Fixed32" json:"map_fixed32_fixed32,omitempty" protobuf_key:"fixed32,1,opt,name=key" protobuf_val:"fixed32,2,opt,name=value"`
+ MapFixed64Fixed64 map[uint64]uint64 `protobuf:"bytes,63,rep,name=map_fixed64_fixed64,json=mapFixed64Fixed64" json:"map_fixed64_fixed64,omitempty" protobuf_key:"fixed64,1,opt,name=key" protobuf_val:"fixed64,2,opt,name=value"`
+ MapSfixed32Sfixed32 map[int32]int32 `protobuf:"bytes,64,rep,name=map_sfixed32_sfixed32,json=mapSfixed32Sfixed32" json:"map_sfixed32_sfixed32,omitempty" protobuf_key:"fixed32,1,opt,name=key" protobuf_val:"fixed32,2,opt,name=value"`
+ MapSfixed64Sfixed64 map[int64]int64 `protobuf:"bytes,65,rep,name=map_sfixed64_sfixed64,json=mapSfixed64Sfixed64" json:"map_sfixed64_sfixed64,omitempty" protobuf_key:"fixed64,1,opt,name=key" protobuf_val:"fixed64,2,opt,name=value"`
+ MapInt32Float map[int32]float32 `protobuf:"bytes,66,rep,name=map_int32_float,json=mapInt32Float" json:"map_int32_float,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"fixed32,2,opt,name=value"`
+ MapInt32Double map[int32]float64 `protobuf:"bytes,67,rep,name=map_int32_double,json=mapInt32Double" json:"map_int32_double,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"fixed64,2,opt,name=value"`
+ MapBoolBool map[bool]bool `protobuf:"bytes,68,rep,name=map_bool_bool,json=mapBoolBool" json:"map_bool_bool,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
+ MapStringString map[string]string `protobuf:"bytes,69,rep,name=map_string_string,json=mapStringString" json:"map_string_string,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ MapStringBytes map[string][]byte `protobuf:"bytes,70,rep,name=map_string_bytes,json=mapStringBytes" json:"map_string_bytes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ MapStringNestedMessage map[string]*TestAllTypes_NestedMessage `protobuf:"bytes,71,rep,name=map_string_nested_message,json=mapStringNestedMessage" json:"map_string_nested_message,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ MapStringForeignMessage map[string]*ForeignMessage `protobuf:"bytes,72,rep,name=map_string_foreign_message,json=mapStringForeignMessage" json:"map_string_foreign_message,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ MapStringNestedEnum map[string]TestAllTypes_NestedEnum `protobuf:"bytes,73,rep,name=map_string_nested_enum,json=mapStringNestedEnum" json:"map_string_nested_enum,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=conformance.TestAllTypes_NestedEnum"`
+ MapStringForeignEnum map[string]ForeignEnum `protobuf:"bytes,74,rep,name=map_string_foreign_enum,json=mapStringForeignEnum" json:"map_string_foreign_enum,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=conformance.ForeignEnum"`
+ // Types that are valid to be assigned to OneofField:
+ // *TestAllTypes_OneofUint32
+ // *TestAllTypes_OneofNestedMessage
+ // *TestAllTypes_OneofString
+ // *TestAllTypes_OneofBytes
+ // *TestAllTypes_OneofBool
+ // *TestAllTypes_OneofUint64
+ // *TestAllTypes_OneofFloat
+ // *TestAllTypes_OneofDouble
+ // *TestAllTypes_OneofEnum
+ OneofField isTestAllTypes_OneofField `protobuf_oneof:"oneof_field"`
+ // Well-known types
+ OptionalBoolWrapper *google_protobuf5.BoolValue `protobuf:"bytes,201,opt,name=optional_bool_wrapper,json=optionalBoolWrapper" json:"optional_bool_wrapper,omitempty"`
+ OptionalInt32Wrapper *google_protobuf5.Int32Value `protobuf:"bytes,202,opt,name=optional_int32_wrapper,json=optionalInt32Wrapper" json:"optional_int32_wrapper,omitempty"`
+ OptionalInt64Wrapper *google_protobuf5.Int64Value `protobuf:"bytes,203,opt,name=optional_int64_wrapper,json=optionalInt64Wrapper" json:"optional_int64_wrapper,omitempty"`
+ OptionalUint32Wrapper *google_protobuf5.UInt32Value `protobuf:"bytes,204,opt,name=optional_uint32_wrapper,json=optionalUint32Wrapper" json:"optional_uint32_wrapper,omitempty"`
+ OptionalUint64Wrapper *google_protobuf5.UInt64Value `protobuf:"bytes,205,opt,name=optional_uint64_wrapper,json=optionalUint64Wrapper" json:"optional_uint64_wrapper,omitempty"`
+ OptionalFloatWrapper *google_protobuf5.FloatValue `protobuf:"bytes,206,opt,name=optional_float_wrapper,json=optionalFloatWrapper" json:"optional_float_wrapper,omitempty"`
+ OptionalDoubleWrapper *google_protobuf5.DoubleValue `protobuf:"bytes,207,opt,name=optional_double_wrapper,json=optionalDoubleWrapper" json:"optional_double_wrapper,omitempty"`
+ OptionalStringWrapper *google_protobuf5.StringValue `protobuf:"bytes,208,opt,name=optional_string_wrapper,json=optionalStringWrapper" json:"optional_string_wrapper,omitempty"`
+ OptionalBytesWrapper *google_protobuf5.BytesValue `protobuf:"bytes,209,opt,name=optional_bytes_wrapper,json=optionalBytesWrapper" json:"optional_bytes_wrapper,omitempty"`
+ RepeatedBoolWrapper []*google_protobuf5.BoolValue `protobuf:"bytes,211,rep,name=repeated_bool_wrapper,json=repeatedBoolWrapper" json:"repeated_bool_wrapper,omitempty"`
+ RepeatedInt32Wrapper []*google_protobuf5.Int32Value `protobuf:"bytes,212,rep,name=repeated_int32_wrapper,json=repeatedInt32Wrapper" json:"repeated_int32_wrapper,omitempty"`
+ RepeatedInt64Wrapper []*google_protobuf5.Int64Value `protobuf:"bytes,213,rep,name=repeated_int64_wrapper,json=repeatedInt64Wrapper" json:"repeated_int64_wrapper,omitempty"`
+ RepeatedUint32Wrapper []*google_protobuf5.UInt32Value `protobuf:"bytes,214,rep,name=repeated_uint32_wrapper,json=repeatedUint32Wrapper" json:"repeated_uint32_wrapper,omitempty"`
+ RepeatedUint64Wrapper []*google_protobuf5.UInt64Value `protobuf:"bytes,215,rep,name=repeated_uint64_wrapper,json=repeatedUint64Wrapper" json:"repeated_uint64_wrapper,omitempty"`
+ RepeatedFloatWrapper []*google_protobuf5.FloatValue `protobuf:"bytes,216,rep,name=repeated_float_wrapper,json=repeatedFloatWrapper" json:"repeated_float_wrapper,omitempty"`
+ RepeatedDoubleWrapper []*google_protobuf5.DoubleValue `protobuf:"bytes,217,rep,name=repeated_double_wrapper,json=repeatedDoubleWrapper" json:"repeated_double_wrapper,omitempty"`
+ RepeatedStringWrapper []*google_protobuf5.StringValue `protobuf:"bytes,218,rep,name=repeated_string_wrapper,json=repeatedStringWrapper" json:"repeated_string_wrapper,omitempty"`
+ RepeatedBytesWrapper []*google_protobuf5.BytesValue `protobuf:"bytes,219,rep,name=repeated_bytes_wrapper,json=repeatedBytesWrapper" json:"repeated_bytes_wrapper,omitempty"`
+ OptionalDuration *google_protobuf1.Duration `protobuf:"bytes,301,opt,name=optional_duration,json=optionalDuration" json:"optional_duration,omitempty"`
+ OptionalTimestamp *google_protobuf4.Timestamp `protobuf:"bytes,302,opt,name=optional_timestamp,json=optionalTimestamp" json:"optional_timestamp,omitempty"`
+ OptionalFieldMask *google_protobuf2.FieldMask `protobuf:"bytes,303,opt,name=optional_field_mask,json=optionalFieldMask" json:"optional_field_mask,omitempty"`
+ OptionalStruct *google_protobuf3.Struct `protobuf:"bytes,304,opt,name=optional_struct,json=optionalStruct" json:"optional_struct,omitempty"`
+ OptionalAny *google_protobuf.Any `protobuf:"bytes,305,opt,name=optional_any,json=optionalAny" json:"optional_any,omitempty"`
+ OptionalValue *google_protobuf3.Value `protobuf:"bytes,306,opt,name=optional_value,json=optionalValue" json:"optional_value,omitempty"`
+ RepeatedDuration []*google_protobuf1.Duration `protobuf:"bytes,311,rep,name=repeated_duration,json=repeatedDuration" json:"repeated_duration,omitempty"`
+ RepeatedTimestamp []*google_protobuf4.Timestamp `protobuf:"bytes,312,rep,name=repeated_timestamp,json=repeatedTimestamp" json:"repeated_timestamp,omitempty"`
+ RepeatedFieldmask []*google_protobuf2.FieldMask `protobuf:"bytes,313,rep,name=repeated_fieldmask,json=repeatedFieldmask" json:"repeated_fieldmask,omitempty"`
+ RepeatedStruct []*google_protobuf3.Struct `protobuf:"bytes,324,rep,name=repeated_struct,json=repeatedStruct" json:"repeated_struct,omitempty"`
+ RepeatedAny []*google_protobuf.Any `protobuf:"bytes,315,rep,name=repeated_any,json=repeatedAny" json:"repeated_any,omitempty"`
+ RepeatedValue []*google_protobuf3.Value `protobuf:"bytes,316,rep,name=repeated_value,json=repeatedValue" json:"repeated_value,omitempty"`
+ // Test field-name-to-JSON-name convention.
+ // (protobuf says names can be any valid C/C++ identifier.)
+ Fieldname1 int32 `protobuf:"varint,401,opt,name=fieldname1" json:"fieldname1,omitempty"`
+ FieldName2 int32 `protobuf:"varint,402,opt,name=field_name2,json=fieldName2" json:"field_name2,omitempty"`
+ XFieldName3 int32 `protobuf:"varint,403,opt,name=_field_name3,json=FieldName3" json:"_field_name3,omitempty"`
+ Field_Name4_ int32 `protobuf:"varint,404,opt,name=field__name4_,json=fieldName4" json:"field__name4_,omitempty"`
+ Field0Name5 int32 `protobuf:"varint,405,opt,name=field0name5" json:"field0name5,omitempty"`
+ Field_0Name6 int32 `protobuf:"varint,406,opt,name=field_0_name6,json=field0Name6" json:"field_0_name6,omitempty"`
+ FieldName7 int32 `protobuf:"varint,407,opt,name=fieldName7" json:"fieldName7,omitempty"`
+ FieldName8 int32 `protobuf:"varint,408,opt,name=FieldName8" json:"FieldName8,omitempty"`
+ Field_Name9 int32 `protobuf:"varint,409,opt,name=field_Name9,json=fieldName9" json:"field_Name9,omitempty"`
+ Field_Name10 int32 `protobuf:"varint,410,opt,name=Field_Name10,json=FieldName10" json:"Field_Name10,omitempty"`
+ FIELD_NAME11 int32 `protobuf:"varint,411,opt,name=FIELD_NAME11,json=FIELDNAME11" json:"FIELD_NAME11,omitempty"`
+ FIELDName12 int32 `protobuf:"varint,412,opt,name=FIELD_name12,json=FIELDName12" json:"FIELD_name12,omitempty"`
+ XFieldName13 int32 `protobuf:"varint,413,opt,name=__field_name13,json=FieldName13" json:"__field_name13,omitempty"`
+ X_FieldName14 int32 `protobuf:"varint,414,opt,name=__Field_name14,json=FieldName14" json:"__Field_name14,omitempty"`
+ Field_Name15 int32 `protobuf:"varint,415,opt,name=field__name15,json=fieldName15" json:"field__name15,omitempty"`
+ Field__Name16 int32 `protobuf:"varint,416,opt,name=field__Name16,json=fieldName16" json:"field__Name16,omitempty"`
+ FieldName17__ int32 `protobuf:"varint,417,opt,name=field_name17__,json=fieldName17" json:"field_name17__,omitempty"`
+ FieldName18__ int32 `protobuf:"varint,418,opt,name=Field_name18__,json=FieldName18" json:"Field_name18__,omitempty"`
+}
+
+func (m *TestAllTypes) Reset() { *m = TestAllTypes{} }
+func (m *TestAllTypes) String() string { return proto.CompactTextString(m) }
+func (*TestAllTypes) ProtoMessage() {}
+func (*TestAllTypes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+type isTestAllTypes_OneofField interface {
+ isTestAllTypes_OneofField()
+}
+
+type TestAllTypes_OneofUint32 struct {
+ OneofUint32 uint32 `protobuf:"varint,111,opt,name=oneof_uint32,json=oneofUint32,oneof"`
+}
+type TestAllTypes_OneofNestedMessage struct {
+ OneofNestedMessage *TestAllTypes_NestedMessage `protobuf:"bytes,112,opt,name=oneof_nested_message,json=oneofNestedMessage,oneof"`
+}
+type TestAllTypes_OneofString struct {
+ OneofString string `protobuf:"bytes,113,opt,name=oneof_string,json=oneofString,oneof"`
+}
+type TestAllTypes_OneofBytes struct {
+ OneofBytes []byte `protobuf:"bytes,114,opt,name=oneof_bytes,json=oneofBytes,proto3,oneof"`
+}
+type TestAllTypes_OneofBool struct {
+ OneofBool bool `protobuf:"varint,115,opt,name=oneof_bool,json=oneofBool,oneof"`
+}
+type TestAllTypes_OneofUint64 struct {
+ OneofUint64 uint64 `protobuf:"varint,116,opt,name=oneof_uint64,json=oneofUint64,oneof"`
+}
+type TestAllTypes_OneofFloat struct {
+ OneofFloat float32 `protobuf:"fixed32,117,opt,name=oneof_float,json=oneofFloat,oneof"`
+}
+type TestAllTypes_OneofDouble struct {
+ OneofDouble float64 `protobuf:"fixed64,118,opt,name=oneof_double,json=oneofDouble,oneof"`
+}
+type TestAllTypes_OneofEnum struct {
+ OneofEnum TestAllTypes_NestedEnum `protobuf:"varint,119,opt,name=oneof_enum,json=oneofEnum,enum=conformance.TestAllTypes_NestedEnum,oneof"`
+}
+
+func (*TestAllTypes_OneofUint32) isTestAllTypes_OneofField() {}
+func (*TestAllTypes_OneofNestedMessage) isTestAllTypes_OneofField() {}
+func (*TestAllTypes_OneofString) isTestAllTypes_OneofField() {}
+func (*TestAllTypes_OneofBytes) isTestAllTypes_OneofField() {}
+func (*TestAllTypes_OneofBool) isTestAllTypes_OneofField() {}
+func (*TestAllTypes_OneofUint64) isTestAllTypes_OneofField() {}
+func (*TestAllTypes_OneofFloat) isTestAllTypes_OneofField() {}
+func (*TestAllTypes_OneofDouble) isTestAllTypes_OneofField() {}
+func (*TestAllTypes_OneofEnum) isTestAllTypes_OneofField() {}
+
+func (m *TestAllTypes) GetOneofField() isTestAllTypes_OneofField {
+ if m != nil {
+ return m.OneofField
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetOptionalInt32() int32 {
+ if m != nil {
+ return m.OptionalInt32
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetOptionalInt64() int64 {
+ if m != nil {
+ return m.OptionalInt64
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetOptionalUint32() uint32 {
+ if m != nil {
+ return m.OptionalUint32
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetOptionalUint64() uint64 {
+ if m != nil {
+ return m.OptionalUint64
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetOptionalSint32() int32 {
+ if m != nil {
+ return m.OptionalSint32
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetOptionalSint64() int64 {
+ if m != nil {
+ return m.OptionalSint64
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetOptionalFixed32() uint32 {
+ if m != nil {
+ return m.OptionalFixed32
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetOptionalFixed64() uint64 {
+ if m != nil {
+ return m.OptionalFixed64
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetOptionalSfixed32() int32 {
+ if m != nil {
+ return m.OptionalSfixed32
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetOptionalSfixed64() int64 {
+ if m != nil {
+ return m.OptionalSfixed64
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetOptionalFloat() float32 {
+ if m != nil {
+ return m.OptionalFloat
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetOptionalDouble() float64 {
+ if m != nil {
+ return m.OptionalDouble
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetOptionalBool() bool {
+ if m != nil {
+ return m.OptionalBool
+ }
+ return false
+}
+
+func (m *TestAllTypes) GetOptionalString() string {
+ if m != nil {
+ return m.OptionalString
+ }
+ return ""
+}
+
+func (m *TestAllTypes) GetOptionalBytes() []byte {
+ if m != nil {
+ return m.OptionalBytes
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetOptionalNestedMessage() *TestAllTypes_NestedMessage {
+ if m != nil {
+ return m.OptionalNestedMessage
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetOptionalForeignMessage() *ForeignMessage {
+ if m != nil {
+ return m.OptionalForeignMessage
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetOptionalNestedEnum() TestAllTypes_NestedEnum {
+ if m != nil {
+ return m.OptionalNestedEnum
+ }
+ return TestAllTypes_FOO
+}
+
+func (m *TestAllTypes) GetOptionalForeignEnum() ForeignEnum {
+ if m != nil {
+ return m.OptionalForeignEnum
+ }
+ return ForeignEnum_FOREIGN_FOO
+}
+
+func (m *TestAllTypes) GetOptionalStringPiece() string {
+ if m != nil {
+ return m.OptionalStringPiece
+ }
+ return ""
+}
+
+func (m *TestAllTypes) GetOptionalCord() string {
+ if m != nil {
+ return m.OptionalCord
+ }
+ return ""
+}
+
+func (m *TestAllTypes) GetRecursiveMessage() *TestAllTypes {
+ if m != nil {
+ return m.RecursiveMessage
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedInt32() []int32 {
+ if m != nil {
+ return m.RepeatedInt32
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedInt64() []int64 {
+ if m != nil {
+ return m.RepeatedInt64
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedUint32() []uint32 {
+ if m != nil {
+ return m.RepeatedUint32
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedUint64() []uint64 {
+ if m != nil {
+ return m.RepeatedUint64
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedSint32() []int32 {
+ if m != nil {
+ return m.RepeatedSint32
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedSint64() []int64 {
+ if m != nil {
+ return m.RepeatedSint64
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedFixed32() []uint32 {
+ if m != nil {
+ return m.RepeatedFixed32
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedFixed64() []uint64 {
+ if m != nil {
+ return m.RepeatedFixed64
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedSfixed32() []int32 {
+ if m != nil {
+ return m.RepeatedSfixed32
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedSfixed64() []int64 {
+ if m != nil {
+ return m.RepeatedSfixed64
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedFloat() []float32 {
+ if m != nil {
+ return m.RepeatedFloat
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedDouble() []float64 {
+ if m != nil {
+ return m.RepeatedDouble
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedBool() []bool {
+ if m != nil {
+ return m.RepeatedBool
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedString() []string {
+ if m != nil {
+ return m.RepeatedString
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedBytes() [][]byte {
+ if m != nil {
+ return m.RepeatedBytes
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedNestedMessage() []*TestAllTypes_NestedMessage {
+ if m != nil {
+ return m.RepeatedNestedMessage
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedForeignMessage() []*ForeignMessage {
+ if m != nil {
+ return m.RepeatedForeignMessage
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedNestedEnum() []TestAllTypes_NestedEnum {
+ if m != nil {
+ return m.RepeatedNestedEnum
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedForeignEnum() []ForeignEnum {
+ if m != nil {
+ return m.RepeatedForeignEnum
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedStringPiece() []string {
+ if m != nil {
+ return m.RepeatedStringPiece
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedCord() []string {
+ if m != nil {
+ return m.RepeatedCord
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetMapInt32Int32() map[int32]int32 {
+ if m != nil {
+ return m.MapInt32Int32
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetMapInt64Int64() map[int64]int64 {
+ if m != nil {
+ return m.MapInt64Int64
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetMapUint32Uint32() map[uint32]uint32 {
+ if m != nil {
+ return m.MapUint32Uint32
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetMapUint64Uint64() map[uint64]uint64 {
+ if m != nil {
+ return m.MapUint64Uint64
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetMapSint32Sint32() map[int32]int32 {
+ if m != nil {
+ return m.MapSint32Sint32
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetMapSint64Sint64() map[int64]int64 {
+ if m != nil {
+ return m.MapSint64Sint64
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetMapFixed32Fixed32() map[uint32]uint32 {
+ if m != nil {
+ return m.MapFixed32Fixed32
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetMapFixed64Fixed64() map[uint64]uint64 {
+ if m != nil {
+ return m.MapFixed64Fixed64
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetMapSfixed32Sfixed32() map[int32]int32 {
+ if m != nil {
+ return m.MapSfixed32Sfixed32
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetMapSfixed64Sfixed64() map[int64]int64 {
+ if m != nil {
+ return m.MapSfixed64Sfixed64
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetMapInt32Float() map[int32]float32 {
+ if m != nil {
+ return m.MapInt32Float
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetMapInt32Double() map[int32]float64 {
+ if m != nil {
+ return m.MapInt32Double
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetMapBoolBool() map[bool]bool {
+ if m != nil {
+ return m.MapBoolBool
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetMapStringString() map[string]string {
+ if m != nil {
+ return m.MapStringString
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetMapStringBytes() map[string][]byte {
+ if m != nil {
+ return m.MapStringBytes
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetMapStringNestedMessage() map[string]*TestAllTypes_NestedMessage {
+ if m != nil {
+ return m.MapStringNestedMessage
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetMapStringForeignMessage() map[string]*ForeignMessage {
+ if m != nil {
+ return m.MapStringForeignMessage
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetMapStringNestedEnum() map[string]TestAllTypes_NestedEnum {
+ if m != nil {
+ return m.MapStringNestedEnum
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetMapStringForeignEnum() map[string]ForeignEnum {
+ if m != nil {
+ return m.MapStringForeignEnum
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetOneofUint32() uint32 {
+ if x, ok := m.GetOneofField().(*TestAllTypes_OneofUint32); ok {
+ return x.OneofUint32
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetOneofNestedMessage() *TestAllTypes_NestedMessage {
+ if x, ok := m.GetOneofField().(*TestAllTypes_OneofNestedMessage); ok {
+ return x.OneofNestedMessage
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetOneofString() string {
+ if x, ok := m.GetOneofField().(*TestAllTypes_OneofString); ok {
+ return x.OneofString
+ }
+ return ""
+}
+
+func (m *TestAllTypes) GetOneofBytes() []byte {
+ if x, ok := m.GetOneofField().(*TestAllTypes_OneofBytes); ok {
+ return x.OneofBytes
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetOneofBool() bool {
+ if x, ok := m.GetOneofField().(*TestAllTypes_OneofBool); ok {
+ return x.OneofBool
+ }
+ return false
+}
+
+func (m *TestAllTypes) GetOneofUint64() uint64 {
+ if x, ok := m.GetOneofField().(*TestAllTypes_OneofUint64); ok {
+ return x.OneofUint64
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetOneofFloat() float32 {
+ if x, ok := m.GetOneofField().(*TestAllTypes_OneofFloat); ok {
+ return x.OneofFloat
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetOneofDouble() float64 {
+ if x, ok := m.GetOneofField().(*TestAllTypes_OneofDouble); ok {
+ return x.OneofDouble
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetOneofEnum() TestAllTypes_NestedEnum {
+ if x, ok := m.GetOneofField().(*TestAllTypes_OneofEnum); ok {
+ return x.OneofEnum
+ }
+ return TestAllTypes_FOO
+}
+
+func (m *TestAllTypes) GetOptionalBoolWrapper() *google_protobuf5.BoolValue {
+ if m != nil {
+ return m.OptionalBoolWrapper
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetOptionalInt32Wrapper() *google_protobuf5.Int32Value {
+ if m != nil {
+ return m.OptionalInt32Wrapper
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetOptionalInt64Wrapper() *google_protobuf5.Int64Value {
+ if m != nil {
+ return m.OptionalInt64Wrapper
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetOptionalUint32Wrapper() *google_protobuf5.UInt32Value {
+ if m != nil {
+ return m.OptionalUint32Wrapper
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetOptionalUint64Wrapper() *google_protobuf5.UInt64Value {
+ if m != nil {
+ return m.OptionalUint64Wrapper
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetOptionalFloatWrapper() *google_protobuf5.FloatValue {
+ if m != nil {
+ return m.OptionalFloatWrapper
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetOptionalDoubleWrapper() *google_protobuf5.DoubleValue {
+ if m != nil {
+ return m.OptionalDoubleWrapper
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetOptionalStringWrapper() *google_protobuf5.StringValue {
+ if m != nil {
+ return m.OptionalStringWrapper
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetOptionalBytesWrapper() *google_protobuf5.BytesValue {
+ if m != nil {
+ return m.OptionalBytesWrapper
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedBoolWrapper() []*google_protobuf5.BoolValue {
+ if m != nil {
+ return m.RepeatedBoolWrapper
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedInt32Wrapper() []*google_protobuf5.Int32Value {
+ if m != nil {
+ return m.RepeatedInt32Wrapper
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedInt64Wrapper() []*google_protobuf5.Int64Value {
+ if m != nil {
+ return m.RepeatedInt64Wrapper
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedUint32Wrapper() []*google_protobuf5.UInt32Value {
+ if m != nil {
+ return m.RepeatedUint32Wrapper
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedUint64Wrapper() []*google_protobuf5.UInt64Value {
+ if m != nil {
+ return m.RepeatedUint64Wrapper
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedFloatWrapper() []*google_protobuf5.FloatValue {
+ if m != nil {
+ return m.RepeatedFloatWrapper
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedDoubleWrapper() []*google_protobuf5.DoubleValue {
+ if m != nil {
+ return m.RepeatedDoubleWrapper
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedStringWrapper() []*google_protobuf5.StringValue {
+ if m != nil {
+ return m.RepeatedStringWrapper
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedBytesWrapper() []*google_protobuf5.BytesValue {
+ if m != nil {
+ return m.RepeatedBytesWrapper
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetOptionalDuration() *google_protobuf1.Duration {
+ if m != nil {
+ return m.OptionalDuration
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetOptionalTimestamp() *google_protobuf4.Timestamp {
+ if m != nil {
+ return m.OptionalTimestamp
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetOptionalFieldMask() *google_protobuf2.FieldMask {
+ if m != nil {
+ return m.OptionalFieldMask
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetOptionalStruct() *google_protobuf3.Struct {
+ if m != nil {
+ return m.OptionalStruct
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetOptionalAny() *google_protobuf.Any {
+ if m != nil {
+ return m.OptionalAny
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetOptionalValue() *google_protobuf3.Value {
+ if m != nil {
+ return m.OptionalValue
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedDuration() []*google_protobuf1.Duration {
+ if m != nil {
+ return m.RepeatedDuration
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedTimestamp() []*google_protobuf4.Timestamp {
+ if m != nil {
+ return m.RepeatedTimestamp
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedFieldmask() []*google_protobuf2.FieldMask {
+ if m != nil {
+ return m.RepeatedFieldmask
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedStruct() []*google_protobuf3.Struct {
+ if m != nil {
+ return m.RepeatedStruct
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedAny() []*google_protobuf.Any {
+ if m != nil {
+ return m.RepeatedAny
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetRepeatedValue() []*google_protobuf3.Value {
+ if m != nil {
+ return m.RepeatedValue
+ }
+ return nil
+}
+
+func (m *TestAllTypes) GetFieldname1() int32 {
+ if m != nil {
+ return m.Fieldname1
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetFieldName2() int32 {
+ if m != nil {
+ return m.FieldName2
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetXFieldName3() int32 {
+ if m != nil {
+ return m.XFieldName3
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetField_Name4_() int32 {
+ if m != nil {
+ return m.Field_Name4_
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetField0Name5() int32 {
+ if m != nil {
+ return m.Field0Name5
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetField_0Name6() int32 {
+ if m != nil {
+ return m.Field_0Name6
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetFieldName7() int32 {
+ if m != nil {
+ return m.FieldName7
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetFieldName8() int32 {
+ if m != nil {
+ return m.FieldName8
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetField_Name9() int32 {
+ if m != nil {
+ return m.Field_Name9
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetField_Name10() int32 {
+ if m != nil {
+ return m.Field_Name10
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetFIELD_NAME11() int32 {
+ if m != nil {
+ return m.FIELD_NAME11
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetFIELDName12() int32 {
+ if m != nil {
+ return m.FIELDName12
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetXFieldName13() int32 {
+ if m != nil {
+ return m.XFieldName13
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetX_FieldName14() int32 {
+ if m != nil {
+ return m.X_FieldName14
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetField_Name15() int32 {
+ if m != nil {
+ return m.Field_Name15
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetField__Name16() int32 {
+ if m != nil {
+ return m.Field__Name16
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetFieldName17__() int32 {
+ if m != nil {
+ return m.FieldName17__
+ }
+ return 0
+}
+
+func (m *TestAllTypes) GetFieldName18__() int32 {
+ if m != nil {
+ return m.FieldName18__
+ }
+ return 0
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*TestAllTypes) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _TestAllTypes_OneofMarshaler, _TestAllTypes_OneofUnmarshaler, _TestAllTypes_OneofSizer, []interface{}{
+ (*TestAllTypes_OneofUint32)(nil),
+ (*TestAllTypes_OneofNestedMessage)(nil),
+ (*TestAllTypes_OneofString)(nil),
+ (*TestAllTypes_OneofBytes)(nil),
+ (*TestAllTypes_OneofBool)(nil),
+ (*TestAllTypes_OneofUint64)(nil),
+ (*TestAllTypes_OneofFloat)(nil),
+ (*TestAllTypes_OneofDouble)(nil),
+ (*TestAllTypes_OneofEnum)(nil),
+ }
+}
+
+func _TestAllTypes_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*TestAllTypes)
+ // oneof_field
+ switch x := m.OneofField.(type) {
+ case *TestAllTypes_OneofUint32:
+ b.EncodeVarint(111<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.OneofUint32))
+ case *TestAllTypes_OneofNestedMessage:
+ b.EncodeVarint(112<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.OneofNestedMessage); err != nil {
+ return err
+ }
+ case *TestAllTypes_OneofString:
+ b.EncodeVarint(113<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.OneofString)
+ case *TestAllTypes_OneofBytes:
+ b.EncodeVarint(114<<3 | proto.WireBytes)
+ b.EncodeRawBytes(x.OneofBytes)
+ case *TestAllTypes_OneofBool:
+ t := uint64(0)
+ if x.OneofBool {
+ t = 1
+ }
+ b.EncodeVarint(115<<3 | proto.WireVarint)
+ b.EncodeVarint(t)
+ case *TestAllTypes_OneofUint64:
+ b.EncodeVarint(116<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.OneofUint64))
+ case *TestAllTypes_OneofFloat:
+ b.EncodeVarint(117<<3 | proto.WireFixed32)
+ b.EncodeFixed32(uint64(math.Float32bits(x.OneofFloat)))
+ case *TestAllTypes_OneofDouble:
+ b.EncodeVarint(118<<3 | proto.WireFixed64)
+ b.EncodeFixed64(math.Float64bits(x.OneofDouble))
+ case *TestAllTypes_OneofEnum:
+ b.EncodeVarint(119<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.OneofEnum))
+ case nil:
+ default:
+ return fmt.Errorf("TestAllTypes.OneofField has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _TestAllTypes_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*TestAllTypes)
+ switch tag {
+ case 111: // oneof_field.oneof_uint32
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.OneofField = &TestAllTypes_OneofUint32{uint32(x)}
+ return true, err
+ case 112: // oneof_field.oneof_nested_message
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(TestAllTypes_NestedMessage)
+ err := b.DecodeMessage(msg)
+ m.OneofField = &TestAllTypes_OneofNestedMessage{msg}
+ return true, err
+ case 113: // oneof_field.oneof_string
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.OneofField = &TestAllTypes_OneofString{x}
+ return true, err
+ case 114: // oneof_field.oneof_bytes
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeRawBytes(true)
+ m.OneofField = &TestAllTypes_OneofBytes{x}
+ return true, err
+ case 115: // oneof_field.oneof_bool
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.OneofField = &TestAllTypes_OneofBool{x != 0}
+ return true, err
+ case 116: // oneof_field.oneof_uint64
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.OneofField = &TestAllTypes_OneofUint64{x}
+ return true, err
+ case 117: // oneof_field.oneof_float
+ if wire != proto.WireFixed32 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed32()
+ m.OneofField = &TestAllTypes_OneofFloat{math.Float32frombits(uint32(x))}
+ return true, err
+ case 118: // oneof_field.oneof_double
+ if wire != proto.WireFixed64 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed64()
+ m.OneofField = &TestAllTypes_OneofDouble{math.Float64frombits(x)}
+ return true, err
+ case 119: // oneof_field.oneof_enum
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.OneofField = &TestAllTypes_OneofEnum{TestAllTypes_NestedEnum(x)}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _TestAllTypes_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*TestAllTypes)
+ // oneof_field
+ switch x := m.OneofField.(type) {
+ case *TestAllTypes_OneofUint32:
+ n += proto.SizeVarint(111<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.OneofUint32))
+ case *TestAllTypes_OneofNestedMessage:
+ s := proto.Size(x.OneofNestedMessage)
+ n += proto.SizeVarint(112<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *TestAllTypes_OneofString:
+ n += proto.SizeVarint(113<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.OneofString)))
+ n += len(x.OneofString)
+ case *TestAllTypes_OneofBytes:
+ n += proto.SizeVarint(114<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.OneofBytes)))
+ n += len(x.OneofBytes)
+ case *TestAllTypes_OneofBool:
+ n += proto.SizeVarint(115<<3 | proto.WireVarint)
+ n += 1
+ case *TestAllTypes_OneofUint64:
+ n += proto.SizeVarint(116<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.OneofUint64))
+ case *TestAllTypes_OneofFloat:
+ n += proto.SizeVarint(117<<3 | proto.WireFixed32)
+ n += 4
+ case *TestAllTypes_OneofDouble:
+ n += proto.SizeVarint(118<<3 | proto.WireFixed64)
+ n += 8
+ case *TestAllTypes_OneofEnum:
+ n += proto.SizeVarint(119<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.OneofEnum))
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+type TestAllTypes_NestedMessage struct {
+ A int32 `protobuf:"varint,1,opt,name=a" json:"a,omitempty"`
+ Corecursive *TestAllTypes `protobuf:"bytes,2,opt,name=corecursive" json:"corecursive,omitempty"`
+}
+
+func (m *TestAllTypes_NestedMessage) Reset() { *m = TestAllTypes_NestedMessage{} }
+func (m *TestAllTypes_NestedMessage) String() string { return proto.CompactTextString(m) }
+func (*TestAllTypes_NestedMessage) ProtoMessage() {}
+func (*TestAllTypes_NestedMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} }
+
+func (m *TestAllTypes_NestedMessage) GetA() int32 {
+ if m != nil {
+ return m.A
+ }
+ return 0
+}
+
+func (m *TestAllTypes_NestedMessage) GetCorecursive() *TestAllTypes {
+ if m != nil {
+ return m.Corecursive
+ }
+ return nil
+}
+
+type ForeignMessage struct {
+ C int32 `protobuf:"varint,1,opt,name=c" json:"c,omitempty"`
+}
+
+func (m *ForeignMessage) Reset() { *m = ForeignMessage{} }
+func (m *ForeignMessage) String() string { return proto.CompactTextString(m) }
+func (*ForeignMessage) ProtoMessage() {}
+func (*ForeignMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+
+func (m *ForeignMessage) GetC() int32 {
+ if m != nil {
+ return m.C
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterType((*ConformanceRequest)(nil), "conformance.ConformanceRequest")
+ proto.RegisterType((*ConformanceResponse)(nil), "conformance.ConformanceResponse")
+ proto.RegisterType((*TestAllTypes)(nil), "conformance.TestAllTypes")
+ proto.RegisterType((*TestAllTypes_NestedMessage)(nil), "conformance.TestAllTypes.NestedMessage")
+ proto.RegisterType((*ForeignMessage)(nil), "conformance.ForeignMessage")
+ proto.RegisterEnum("conformance.WireFormat", WireFormat_name, WireFormat_value)
+ proto.RegisterEnum("conformance.ForeignEnum", ForeignEnum_name, ForeignEnum_value)
+ proto.RegisterEnum("conformance.TestAllTypes_NestedEnum", TestAllTypes_NestedEnum_name, TestAllTypes_NestedEnum_value)
+}
+
+func init() { proto.RegisterFile("conformance_proto/conformance.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 2737 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x5a, 0xd9, 0x72, 0xdb, 0xc8,
+ 0xd5, 0x16, 0x08, 0x59, 0x4b, 0x93, 0x92, 0xa8, 0xd6, 0xd6, 0x96, 0x5d, 0x63, 0x58, 0xb2, 0x7f,
+ 0xd3, 0xf6, 0x8c, 0xac, 0x05, 0x86, 0x65, 0xcf, 0x3f, 0x8e, 0x45, 0x9b, 0xb4, 0xe4, 0x8c, 0x25,
+ 0x17, 0x64, 0x8d, 0xab, 0x9c, 0x0b, 0x06, 0xa6, 0x20, 0x15, 0xc7, 0x24, 0xc1, 0x01, 0x48, 0x4f,
+ 0x94, 0xcb, 0xbc, 0x41, 0xf6, 0x7d, 0xbd, 0xcf, 0x7a, 0x93, 0xa4, 0x92, 0xab, 0x54, 0x6e, 0xb2,
+ 0x27, 0x95, 0x3d, 0x79, 0x85, 0xbc, 0x43, 0x52, 0xbd, 0xa2, 0xbb, 0x01, 0x50, 0xf4, 0x54, 0x0d,
+ 0x25, 0x1e, 0x7c, 0xfd, 0x9d, 0xd3, 0xe7, 0x1c, 0x7c, 0x2d, 0x1c, 0x18, 0x2c, 0xd7, 0x83, 0xf6,
+ 0x51, 0x10, 0xb6, 0xbc, 0x76, 0xdd, 0xaf, 0x75, 0xc2, 0xa0, 0x1b, 0xdc, 0x90, 0x2c, 0x2b, 0xc4,
+ 0x02, 0xf3, 0x92, 0x69, 0xf1, 0xec, 0x71, 0x10, 0x1c, 0x37, 0xfd, 0x1b, 0xe4, 0xd2, 0x8b, 0xde,
+ 0xd1, 0x0d, 0xaf, 0x7d, 0x42, 0x71, 0x8b, 0x6f, 0xe8, 0x97, 0x0e, 0x7b, 0xa1, 0xd7, 0x6d, 0x04,
+ 0x6d, 0x76, 0xdd, 0xd2, 0xaf, 0x1f, 0x35, 0xfc, 0xe6, 0x61, 0xad, 0xe5, 0x45, 0x2f, 0x19, 0xe2,
+ 0xbc, 0x8e, 0x88, 0xba, 0x61, 0xaf, 0xde, 0x65, 0x57, 0x2f, 0xe8, 0x57, 0xbb, 0x8d, 0x96, 0x1f,
+ 0x75, 0xbd, 0x56, 0x27, 0x2b, 0x80, 0x0f, 0x43, 0xaf, 0xd3, 0xf1, 0xc3, 0x88, 0x5e, 0x5f, 0xfa,
+ 0x85, 0x01, 0xe0, 0xfd, 0x78, 0x2f, 0xae, 0xff, 0x41, 0xcf, 0x8f, 0xba, 0xf0, 0x3a, 0x28, 0xf2,
+ 0x15, 0xb5, 0x8e, 0x77, 0xd2, 0x0c, 0xbc, 0x43, 0x64, 0x58, 0x46, 0xa9, 0xb0, 0x3d, 0xe4, 0x4e,
+ 0xf1, 0x2b, 0x4f, 0xe8, 0x05, 0xb8, 0x0c, 0x0a, 0xef, 0x47, 0x41, 0x5b, 0x00, 0x73, 0x96, 0x51,
+ 0x1a, 0xdf, 0x1e, 0x72, 0xf3, 0xd8, 0xca, 0x41, 0x7b, 0x60, 0x21, 0xa4, 0xe4, 0xfe, 0x61, 0x2d,
+ 0xe8, 0x75, 0x3b, 0xbd, 0x6e, 0x8d, 0x78, 0xed, 0x22, 0xd3, 0x32, 0x4a, 0x93, 0xeb, 0x0b, 0x2b,
+ 0x72, 0x9a, 0x9f, 0x35, 0x42, 0xbf, 0x4a, 0x2e, 0xbb, 0x73, 0x62, 0xdd, 0x1e, 0x59, 0x46, 0xcd,
+ 0xe5, 0x71, 0x30, 0xca, 0x1c, 0x2e, 0x7d, 0x2a, 0x07, 0x66, 0x94, 0x4d, 0x44, 0x9d, 0xa0, 0x1d,
+ 0xf9, 0xf0, 0x22, 0xc8, 0x77, 0xbc, 0x30, 0xf2, 0x6b, 0x7e, 0x18, 0x06, 0x21, 0xd9, 0x00, 0x8e,
+ 0x0b, 0x10, 0x63, 0x05, 0xdb, 0xe0, 0x55, 0x30, 0x15, 0xf9, 0x61, 0xc3, 0x6b, 0x36, 0x3e, 0xc9,
+ 0x61, 0x23, 0x0c, 0x36, 0x29, 0x2e, 0x50, 0xe8, 0x65, 0x30, 0x11, 0xf6, 0xda, 0x38, 0xc1, 0x0c,
+ 0xc8, 0xf7, 0x59, 0x60, 0x66, 0x0a, 0x4b, 0x4b, 0x9d, 0x39, 0x68, 0xea, 0x86, 0xd3, 0x52, 0xb7,
+ 0x08, 0x46, 0xa3, 0x97, 0x8d, 0x4e, 0xc7, 0x3f, 0x44, 0x67, 0xd8, 0x75, 0x6e, 0x28, 0x8f, 0x81,
+ 0x91, 0xd0, 0x8f, 0x7a, 0xcd, 0xee, 0xd2, 0x7f, 0xaa, 0xa0, 0xf0, 0xd4, 0x8f, 0xba, 0x5b, 0xcd,
+ 0xe6, 0xd3, 0x93, 0x8e, 0x1f, 0xc1, 0xcb, 0x60, 0x32, 0xe8, 0xe0, 0x5e, 0xf3, 0x9a, 0xb5, 0x46,
+ 0xbb, 0xbb, 0xb1, 0x4e, 0x12, 0x70, 0xc6, 0x9d, 0xe0, 0xd6, 0x1d, 0x6c, 0xd4, 0x61, 0x8e, 0x4d,
+ 0xf6, 0x65, 0x2a, 0x30, 0xc7, 0x86, 0x57, 0xc0, 0x94, 0x80, 0xf5, 0x28, 0x1d, 0xde, 0xd5, 0x84,
+ 0x2b, 0x56, 0x1f, 0x10, 0x6b, 0x02, 0xe8, 0xd8, 0x64, 0x57, 0xc3, 0x2a, 0x50, 0x63, 0x8c, 0x28,
+ 0x23, 0xde, 0xde, 0x74, 0x0c, 0xdc, 0x4f, 0x32, 0x46, 0x94, 0x11, 0xd7, 0x08, 0xaa, 0x40, 0xc7,
+ 0x86, 0x57, 0x41, 0x51, 0x00, 0x8f, 0x1a, 0x9f, 0xf0, 0x0f, 0x37, 0xd6, 0xd1, 0xa8, 0x65, 0x94,
+ 0x46, 0x5d, 0x41, 0x50, 0xa5, 0xe6, 0x24, 0xd4, 0xb1, 0xd1, 0x98, 0x65, 0x94, 0x46, 0x34, 0xa8,
+ 0x63, 0xc3, 0xeb, 0x60, 0x3a, 0x76, 0xcf, 0x69, 0xc7, 0x2d, 0xa3, 0x34, 0xe5, 0x0a, 0x8e, 0x7d,
+ 0x66, 0x4f, 0x01, 0x3b, 0x36, 0x02, 0x96, 0x51, 0x2a, 0xea, 0x60, 0xc7, 0x56, 0x52, 0x7f, 0xd4,
+ 0x0c, 0xbc, 0x2e, 0xca, 0x5b, 0x46, 0x29, 0x17, 0xa7, 0xbe, 0x8a, 0x8d, 0xca, 0xfe, 0x0f, 0x83,
+ 0xde, 0x8b, 0xa6, 0x8f, 0x0a, 0x96, 0x51, 0x32, 0xe2, 0xfd, 0x3f, 0x20, 0x56, 0xb8, 0x0c, 0xc4,
+ 0xca, 0xda, 0x8b, 0x20, 0x68, 0xa2, 0x09, 0xcb, 0x28, 0x8d, 0xb9, 0x05, 0x6e, 0x2c, 0x07, 0x41,
+ 0x53, 0xcd, 0x66, 0x37, 0x6c, 0xb4, 0x8f, 0xd1, 0x24, 0xee, 0x2a, 0x29, 0x9b, 0xc4, 0xaa, 0x44,
+ 0xf7, 0xe2, 0xa4, 0xeb, 0x47, 0x68, 0x0a, 0xb7, 0x71, 0x1c, 0x5d, 0x19, 0x1b, 0x61, 0x0d, 0x2c,
+ 0x08, 0x58, 0x9b, 0xde, 0xde, 0x2d, 0x3f, 0x8a, 0xbc, 0x63, 0x1f, 0x41, 0xcb, 0x28, 0xe5, 0xd7,
+ 0xaf, 0x28, 0x37, 0xb6, 0xdc, 0xa2, 0x2b, 0xbb, 0x04, 0xff, 0x98, 0xc2, 0xdd, 0x39, 0xce, 0xa3,
+ 0x98, 0xe1, 0x01, 0x40, 0x71, 0x96, 0x82, 0xd0, 0x6f, 0x1c, 0xb7, 0x85, 0x87, 0x19, 0xe2, 0xe1,
+ 0x9c, 0xe2, 0xa1, 0x4a, 0x31, 0x9c, 0x75, 0x5e, 0x24, 0x53, 0xb1, 0xc3, 0xf7, 0xc0, 0xac, 0x1e,
+ 0xb7, 0xdf, 0xee, 0xb5, 0xd0, 0x1c, 0x51, 0xa3, 0x4b, 0xa7, 0x05, 0x5d, 0x69, 0xf7, 0x5a, 0x2e,
+ 0x54, 0x23, 0xc6, 0x36, 0xf8, 0x2e, 0x98, 0x4b, 0x84, 0x4b, 0x88, 0xe7, 0x09, 0x31, 0x4a, 0x8b,
+ 0x95, 0x90, 0xcd, 0x68, 0x81, 0x12, 0x36, 0x47, 0x62, 0xa3, 0xd5, 0xaa, 0x75, 0x1a, 0x7e, 0xdd,
+ 0x47, 0x08, 0xd7, 0xac, 0x9c, 0x1b, 0xcb, 0xc5, 0xeb, 0x68, 0xdd, 0x9e, 0xe0, 0xcb, 0xf0, 0x8a,
+ 0xd4, 0x0a, 0xf5, 0x20, 0x3c, 0x44, 0x67, 0x19, 0xde, 0x88, 0xdb, 0xe1, 0x7e, 0x10, 0x1e, 0xc2,
+ 0x2a, 0x98, 0x0e, 0xfd, 0x7a, 0x2f, 0x8c, 0x1a, 0xaf, 0x7c, 0x91, 0xd6, 0x73, 0x24, 0xad, 0x67,
+ 0x33, 0x73, 0xe0, 0x16, 0xc5, 0x1a, 0x9e, 0xce, 0xcb, 0x60, 0x32, 0xf4, 0x3b, 0xbe, 0x87, 0xf3,
+ 0x48, 0x6f, 0xe6, 0x0b, 0x96, 0x89, 0xd5, 0x86, 0x5b, 0x85, 0xda, 0xc8, 0x30, 0xc7, 0x46, 0x96,
+ 0x65, 0x62, 0xb5, 0x91, 0x60, 0x54, 0x1b, 0x04, 0x8c, 0xa9, 0xcd, 0x45, 0xcb, 0xc4, 0x6a, 0xc3,
+ 0xcd, 0xb1, 0xda, 0x28, 0x40, 0xc7, 0x46, 0x4b, 0x96, 0x89, 0xd5, 0x46, 0x06, 0x6a, 0x8c, 0x4c,
+ 0x6d, 0x96, 0x2d, 0x13, 0xab, 0x0d, 0x37, 0xef, 0x27, 0x19, 0x99, 0xda, 0x5c, 0xb2, 0x4c, 0xac,
+ 0x36, 0x32, 0x90, 0xaa, 0x8d, 0x00, 0x72, 0x59, 0xb8, 0x6c, 0x99, 0x58, 0x6d, 0xb8, 0x5d, 0x52,
+ 0x1b, 0x15, 0xea, 0xd8, 0xe8, 0xff, 0x2c, 0x13, 0xab, 0x8d, 0x02, 0xa5, 0x6a, 0x13, 0xbb, 0xe7,
+ 0xb4, 0x57, 0x2c, 0x13, 0xab, 0x8d, 0x08, 0x40, 0x52, 0x1b, 0x0d, 0xec, 0xd8, 0xa8, 0x64, 0x99,
+ 0x58, 0x6d, 0x54, 0x30, 0x55, 0x9b, 0x38, 0x08, 0xa2, 0x36, 0x57, 0x2d, 0x13, 0xab, 0x8d, 0x08,
+ 0x81, 0xab, 0x8d, 0x80, 0x31, 0xb5, 0xb9, 0x66, 0x99, 0x58, 0x6d, 0xb8, 0x39, 0x56, 0x1b, 0x01,
+ 0x24, 0x6a, 0x73, 0xdd, 0x32, 0xb1, 0xda, 0x70, 0x23, 0x57, 0x9b, 0x38, 0x42, 0xaa, 0x36, 0x6f,
+ 0x5a, 0x26, 0x56, 0x1b, 0x11, 0x9f, 0x50, 0x9b, 0x98, 0x8d, 0xa8, 0xcd, 0x5b, 0x96, 0x89, 0xd5,
+ 0x46, 0xd0, 0x71, 0xb5, 0x11, 0x30, 0x4d, 0x6d, 0x56, 0x2d, 0xf3, 0xb5, 0xd4, 0x86, 0xf3, 0x24,
+ 0xd4, 0x26, 0xce, 0x92, 0xa6, 0x36, 0x6b, 0xc4, 0x43, 0x7f, 0xb5, 0x11, 0xc9, 0x4c, 0xa8, 0x8d,
+ 0x1e, 0x37, 0x11, 0x85, 0x0d, 0xcb, 0x1c, 0x5c, 0x6d, 0xd4, 0x88, 0xb9, 0xda, 0x24, 0xc2, 0x25,
+ 0xc4, 0x36, 0x21, 0xee, 0xa3, 0x36, 0x5a, 0xa0, 0x5c, 0x6d, 0xb4, 0x6a, 0x31, 0xb5, 0x71, 0x70,
+ 0xcd, 0xa8, 0xda, 0xa8, 0x75, 0x13, 0x6a, 0x23, 0xd6, 0x11, 0xb5, 0xb9, 0xc5, 0xf0, 0x46, 0xdc,
+ 0x0e, 0x44, 0x6d, 0x9e, 0x82, 0xa9, 0x96, 0xd7, 0xa1, 0x02, 0xc1, 0x64, 0x62, 0x93, 0x24, 0xf5,
+ 0xcd, 0xec, 0x0c, 0x3c, 0xf6, 0x3a, 0x44, 0x3b, 0xc8, 0x47, 0xa5, 0xdd, 0x0d, 0x4f, 0xdc, 0x89,
+ 0x96, 0x6c, 0x93, 0x58, 0x1d, 0x9b, 0xa9, 0xca, 0xed, 0xc1, 0x58, 0x1d, 0x9b, 0x7c, 0x28, 0xac,
+ 0xcc, 0x06, 0x9f, 0x83, 0x69, 0xcc, 0x4a, 0xe5, 0x87, 0xab, 0xd0, 0x1d, 0xc2, 0xbb, 0xd2, 0x97,
+ 0x97, 0x4a, 0x13, 0xfd, 0xa4, 0xcc, 0x38, 0x3c, 0xd9, 0x2a, 0x73, 0x3b, 0x36, 0x17, 0xae, 0xb7,
+ 0x07, 0xe4, 0x76, 0x6c, 0xfa, 0xa9, 0x72, 0x73, 0x2b, 0xe7, 0xa6, 0x22, 0xc7, 0xb5, 0xee, 0xff,
+ 0x07, 0xe0, 0xa6, 0x02, 0xb8, 0xaf, 0xc5, 0x2d, 0x5b, 0x65, 0x6e, 0xc7, 0xe6, 0xf2, 0xf8, 0xce,
+ 0x80, 0xdc, 0x8e, 0xbd, 0xaf, 0xc5, 0x2d, 0x5b, 0xe1, 0xc7, 0xc1, 0x0c, 0xe6, 0x66, 0xda, 0x26,
+ 0x24, 0xf5, 0x2e, 0x61, 0x5f, 0xed, 0xcb, 0xce, 0x74, 0x96, 0xfd, 0xa0, 0xfc, 0x38, 0x50, 0xd5,
+ 0xae, 0x78, 0x70, 0x6c, 0xa1, 0xc4, 0x1f, 0x19, 0xd4, 0x83, 0x63, 0xb3, 0x1f, 0x9a, 0x07, 0x61,
+ 0x87, 0x47, 0x60, 0x8e, 0xe4, 0x87, 0x6f, 0x42, 0x28, 0xf8, 0x3d, 0xe2, 0x63, 0xbd, 0x7f, 0x8e,
+ 0x18, 0x98, 0xff, 0xa4, 0x5e, 0x70, 0xc8, 0xfa, 0x15, 0xd5, 0x0f, 0xae, 0x04, 0xdf, 0xcb, 0xd6,
+ 0xc0, 0x7e, 0x1c, 0x9b, 0xff, 0xd4, 0xfd, 0xc4, 0x57, 0xd4, 0xfb, 0x95, 0x1e, 0x1a, 0xe5, 0x41,
+ 0xef, 0x57, 0x72, 0x9c, 0x68, 0xf7, 0x2b, 0x3d, 0x62, 0x9e, 0x81, 0x62, 0xcc, 0xca, 0xce, 0x98,
+ 0xfb, 0x84, 0xf6, 0xad, 0xd3, 0x69, 0xe9, 0xe9, 0x43, 0x79, 0x27, 0x5b, 0x8a, 0x11, 0xee, 0x02,
+ 0xec, 0x89, 0x9c, 0x46, 0xf4, 0x48, 0x7a, 0x40, 0x58, 0xaf, 0xf5, 0x65, 0xc5, 0xe7, 0x14, 0xfe,
+ 0x9f, 0x52, 0xe6, 0x5b, 0xb1, 0x45, 0xb4, 0x3b, 0x95, 0x42, 0x76, 0x7e, 0x55, 0x06, 0x69, 0x77,
+ 0x02, 0xa5, 0x9f, 0x52, 0xbb, 0x4b, 0x56, 0x9e, 0x04, 0xc6, 0x4d, 0x8f, 0xbc, 0xea, 0x00, 0x49,
+ 0xa0, 0xcb, 0xc9, 0x69, 0x18, 0x27, 0x41, 0x32, 0xc2, 0x0e, 0x38, 0x2b, 0x11, 0x6b, 0x87, 0xe4,
+ 0x43, 0xe2, 0xe1, 0xe6, 0x00, 0x1e, 0x94, 0x63, 0x91, 0x7a, 0x9a, 0x6f, 0xa5, 0x5e, 0x84, 0x11,
+ 0x58, 0x94, 0x3c, 0xea, 0xa7, 0xe6, 0x36, 0x71, 0xe9, 0x0c, 0xe0, 0x52, 0x3d, 0x33, 0xa9, 0xcf,
+ 0x85, 0x56, 0xfa, 0x55, 0x78, 0x0c, 0xe6, 0x93, 0xdb, 0x24, 0x47, 0xdf, 0xce, 0x20, 0xf7, 0x80,
+ 0xb4, 0x0d, 0x7c, 0xf4, 0x49, 0xf7, 0x80, 0x76, 0x05, 0xbe, 0x0f, 0x16, 0x52, 0x76, 0x47, 0x3c,
+ 0x3d, 0x22, 0x9e, 0x36, 0x06, 0xdf, 0x5a, 0xec, 0x6a, 0xb6, 0x95, 0x72, 0x09, 0x2e, 0x83, 0x42,
+ 0xd0, 0xf6, 0x83, 0x23, 0x7e, 0xdc, 0x04, 0xf8, 0x11, 0x7b, 0x7b, 0xc8, 0xcd, 0x13, 0x2b, 0x3b,
+ 0x3c, 0x3e, 0x06, 0x66, 0x29, 0x48, 0xab, 0x6d, 0xe7, 0xb5, 0x1e, 0xb7, 0xb6, 0x87, 0x5c, 0x48,
+ 0x68, 0xd4, 0x5a, 0x8a, 0x08, 0x58, 0xb7, 0x7f, 0xc0, 0x27, 0x12, 0xc4, 0xca, 0x7a, 0xf7, 0x22,
+ 0xa0, 0x5f, 0x59, 0xdb, 0x86, 0x6c, 0xbc, 0x01, 0x88, 0x91, 0x76, 0xe1, 0x05, 0x00, 0x18, 0x04,
+ 0xdf, 0x87, 0x11, 0x7e, 0x10, 0xdd, 0x1e, 0x72, 0xc7, 0x29, 0x02, 0xdf, 0x5b, 0xca, 0x56, 0x1d,
+ 0x1b, 0x75, 0x2d, 0xa3, 0x34, 0xac, 0x6c, 0xd5, 0xb1, 0x63, 0x47, 0x54, 0x7b, 0x7a, 0xf8, 0xf1,
+ 0x58, 0x38, 0xa2, 0x62, 0x22, 0x78, 0x98, 0x90, 0xbc, 0xc2, 0x8f, 0xc6, 0x82, 0x87, 0x09, 0x43,
+ 0x85, 0x47, 0x43, 0xca, 0xf6, 0xe1, 0xe0, 0x8f, 0x78, 0x22, 0x66, 0x52, 0x9e, 0x3d, 0xe9, 0x69,
+ 0x8c, 0x88, 0x0c, 0x9b, 0xa6, 0xa1, 0x5f, 0x19, 0x24, 0xf7, 0x8b, 0x2b, 0x74, 0xdc, 0xb6, 0xc2,
+ 0xe7, 0x3c, 0x2b, 0x78, 0xab, 0xef, 0x79, 0xcd, 0x9e, 0x1f, 0x3f, 0xa6, 0x61, 0xd3, 0x33, 0xba,
+ 0x0e, 0xba, 0x60, 0x5e, 0x9d, 0xd1, 0x08, 0xc6, 0x5f, 0x1b, 0xec, 0xd1, 0x56, 0x67, 0x24, 0x7a,
+ 0x47, 0x29, 0x67, 0x95, 0x49, 0x4e, 0x06, 0xa7, 0x63, 0x0b, 0xce, 0xdf, 0xf4, 0xe1, 0x74, 0xec,
+ 0x24, 0xa7, 0x63, 0x73, 0xce, 0x03, 0xe9, 0x21, 0xbf, 0xa7, 0x06, 0xfa, 0x5b, 0x4a, 0x7a, 0x3e,
+ 0x41, 0x7a, 0x20, 0x45, 0x3a, 0xa7, 0x0e, 0x89, 0xb2, 0x68, 0xa5, 0x58, 0x7f, 0xd7, 0x8f, 0x96,
+ 0x07, 0x3b, 0xa7, 0x8e, 0x94, 0xd2, 0x32, 0x40, 0x1a, 0x47, 0xb0, 0xfe, 0x3e, 0x2b, 0x03, 0xa4,
+ 0x97, 0xb4, 0x0c, 0x10, 0x5b, 0x5a, 0xa8, 0xb4, 0xd3, 0x04, 0xe9, 0x1f, 0xb2, 0x42, 0xa5, 0xcd,
+ 0xa7, 0x85, 0x4a, 0x8d, 0x69, 0xb4, 0x4c, 0x61, 0x38, 0xed, 0x1f, 0xb3, 0x68, 0xe9, 0x4d, 0xa8,
+ 0xd1, 0x52, 0x63, 0x5a, 0x06, 0xc8, 0x3d, 0x2a, 0x58, 0xff, 0x94, 0x95, 0x01, 0x72, 0xdb, 0x6a,
+ 0x19, 0x20, 0x36, 0xce, 0xb9, 0x27, 0x3d, 0x1c, 0x28, 0xcd, 0xff, 0x67, 0x83, 0xc8, 0x60, 0xdf,
+ 0xe6, 0x97, 0x1f, 0x0a, 0xa5, 0x20, 0xd5, 0x91, 0x81, 0x60, 0xfc, 0x8b, 0xc1, 0x9e, 0xb4, 0xfa,
+ 0x35, 0xbf, 0x32, 0x58, 0xc8, 0xe0, 0x94, 0x1a, 0xea, 0xaf, 0x7d, 0x38, 0x45, 0xf3, 0x2b, 0x53,
+ 0x08, 0xa9, 0x46, 0xda, 0x30, 0x42, 0x90, 0xfe, 0x8d, 0x92, 0x9e, 0xd2, 0xfc, 0xea, 0xcc, 0x22,
+ 0x8b, 0x56, 0x8a, 0xf5, 0xef, 0xfd, 0x68, 0x45, 0xf3, 0xab, 0x13, 0x8e, 0xb4, 0x0c, 0xa8, 0xcd,
+ 0xff, 0x8f, 0xac, 0x0c, 0xc8, 0xcd, 0xaf, 0x0c, 0x03, 0xd2, 0x42, 0xd5, 0x9a, 0xff, 0x9f, 0x59,
+ 0xa1, 0x2a, 0xcd, 0xaf, 0x8e, 0x0e, 0xd2, 0x68, 0xb5, 0xe6, 0xff, 0x57, 0x16, 0xad, 0xd2, 0xfc,
+ 0xea, 0xb3, 0x68, 0x5a, 0x06, 0xd4, 0xe6, 0xff, 0x77, 0x56, 0x06, 0xe4, 0xe6, 0x57, 0x06, 0x0e,
+ 0x9c, 0xf3, 0xa1, 0x34, 0xd7, 0xe5, 0xef, 0x70, 0xd0, 0x77, 0x73, 0x6c, 0x4e, 0x96, 0xd8, 0x3b,
+ 0x43, 0xc4, 0x33, 0x5f, 0x6e, 0x81, 0x8f, 0x80, 0x18, 0x1a, 0xd6, 0xc4, 0xcb, 0x1a, 0xf4, 0xbd,
+ 0x5c, 0xc6, 0xf9, 0xf1, 0x94, 0x43, 0x5c, 0xe1, 0x5f, 0x98, 0xe0, 0x47, 0xc1, 0x8c, 0x34, 0xc4,
+ 0xe6, 0x2f, 0x8e, 0xd0, 0xf7, 0xb3, 0xc8, 0xaa, 0x18, 0xf3, 0xd8, 0x8b, 0x5e, 0xc6, 0x64, 0xc2,
+ 0x04, 0xb7, 0xd4, 0xb9, 0x70, 0xaf, 0xde, 0x45, 0x3f, 0xa0, 0x44, 0x0b, 0x69, 0x45, 0xe8, 0xd5,
+ 0xbb, 0xca, 0xc4, 0xb8, 0x57, 0xef, 0xc2, 0x4d, 0x20, 0x66, 0x8b, 0x35, 0xaf, 0x7d, 0x82, 0x7e,
+ 0x48, 0xd7, 0xcf, 0x26, 0xd6, 0x6f, 0xb5, 0x4f, 0xdc, 0x3c, 0x87, 0x6e, 0xb5, 0x4f, 0xe0, 0x5d,
+ 0x69, 0xd6, 0xfc, 0x0a, 0x97, 0x01, 0xfd, 0x88, 0xae, 0x9d, 0x4f, 0xac, 0xa5, 0x55, 0x12, 0xd3,
+ 0x4d, 0xf2, 0x15, 0x97, 0x27, 0x6e, 0x50, 0x5e, 0x9e, 0x1f, 0xe7, 0x48, 0xb5, 0xfb, 0x95, 0x47,
+ 0xf4, 0xa5, 0x54, 0x1e, 0x41, 0x14, 0x97, 0xe7, 0x27, 0xb9, 0x0c, 0x85, 0x93, 0xca, 0xc3, 0x97,
+ 0xc5, 0xe5, 0x91, 0xb9, 0x48, 0x79, 0x48, 0x75, 0x7e, 0x9a, 0xc5, 0x25, 0x55, 0x27, 0x1e, 0x0a,
+ 0xb2, 0x55, 0xb8, 0x3a, 0xf2, 0xad, 0x82, 0xab, 0xf3, 0x4b, 0x4a, 0x94, 0x5d, 0x1d, 0xe9, 0xee,
+ 0x60, 0xd5, 0x11, 0x14, 0xb8, 0x3a, 0x3f, 0xa3, 0xeb, 0x33, 0xaa, 0xc3, 0xa1, 0xac, 0x3a, 0x62,
+ 0x25, 0xad, 0xce, 0xcf, 0xe9, 0xda, 0xcc, 0xea, 0x70, 0x38, 0xad, 0xce, 0x05, 0x00, 0xc8, 0xfe,
+ 0xdb, 0x5e, 0xcb, 0x5f, 0x43, 0x9f, 0x36, 0xc9, 0x6b, 0x28, 0xc9, 0x04, 0x2d, 0x90, 0xa7, 0xfd,
+ 0x8b, 0xbf, 0xae, 0xa3, 0xcf, 0xc8, 0x88, 0x5d, 0x6c, 0x82, 0x17, 0x41, 0xa1, 0x16, 0x43, 0x36,
+ 0xd0, 0x67, 0x19, 0xa4, 0xca, 0x21, 0x1b, 0x70, 0x09, 0x4c, 0x50, 0x04, 0x81, 0xd8, 0x35, 0xf4,
+ 0x39, 0x9d, 0x86, 0xfc, 0x3d, 0x49, 0xbe, 0xad, 0x62, 0xc8, 0x4d, 0xf4, 0x79, 0x8a, 0x90, 0x6d,
+ 0x70, 0x99, 0xd3, 0xac, 0x12, 0x1e, 0x07, 0x7d, 0x41, 0x01, 0x61, 0x1e, 0x47, 0xec, 0x08, 0x7f,
+ 0xbb, 0x85, 0xbe, 0xa8, 0x3b, 0xba, 0x85, 0x01, 0x22, 0xb4, 0x4d, 0xf4, 0x25, 0x3d, 0xda, 0xcd,
+ 0x78, 0xcb, 0xf8, 0xeb, 0x6d, 0xf4, 0x65, 0x9d, 0xe2, 0x36, 0x5c, 0x02, 0x85, 0xaa, 0x40, 0xac,
+ 0xad, 0xa2, 0xaf, 0xb0, 0x38, 0x04, 0xc9, 0xda, 0x2a, 0xc1, 0xec, 0x54, 0xde, 0x7d, 0x50, 0xdb,
+ 0xdd, 0x7a, 0x5c, 0x59, 0x5b, 0x43, 0x5f, 0xe5, 0x18, 0x6c, 0xa4, 0xb6, 0x18, 0x43, 0x72, 0xbd,
+ 0x8e, 0xbe, 0xa6, 0x60, 0x88, 0x0d, 0x5e, 0x02, 0x93, 0x35, 0x29, 0xbf, 0x6b, 0x1b, 0xe8, 0xeb,
+ 0x09, 0x6f, 0x1b, 0x14, 0x55, 0x8d, 0x51, 0x36, 0xfa, 0x46, 0x02, 0x65, 0xc7, 0x09, 0xa4, 0xa0,
+ 0x9b, 0xe8, 0x9b, 0x72, 0x02, 0x09, 0x48, 0xca, 0x32, 0xdd, 0x9d, 0x83, 0xbe, 0x95, 0x00, 0x39,
+ 0xd8, 0x9f, 0x14, 0xd3, 0xad, 0x5a, 0x0d, 0x7d, 0x3b, 0x81, 0xba, 0x85, 0x51, 0x52, 0x4c, 0x9b,
+ 0xb5, 0x1a, 0xfa, 0x4e, 0x22, 0xaa, 0xcd, 0xc5, 0xe7, 0x60, 0x42, 0x7d, 0xd0, 0x29, 0x00, 0xc3,
+ 0x63, 0x6f, 0x44, 0x0d, 0x0f, 0xbe, 0x0d, 0xf2, 0xf5, 0x40, 0xbc, 0xd4, 0x40, 0xb9, 0xd3, 0x5e,
+ 0x80, 0xc8, 0xe8, 0xc5, 0x7b, 0x00, 0x26, 0x87, 0x94, 0xb0, 0x08, 0xcc, 0x97, 0xfe, 0x09, 0x73,
+ 0x81, 0x7f, 0x85, 0xb3, 0xe0, 0x0c, 0xbd, 0x7d, 0x72, 0xc4, 0x46, 0xbf, 0xdc, 0xc9, 0x6d, 0x1a,
+ 0x31, 0x83, 0x3c, 0x90, 0x94, 0x19, 0xcc, 0x14, 0x06, 0x53, 0x66, 0x28, 0x83, 0xd9, 0xb4, 0xd1,
+ 0xa3, 0xcc, 0x31, 0x91, 0xc2, 0x31, 0x91, 0xce, 0xa1, 0x8c, 0x18, 0x65, 0x8e, 0xe1, 0x14, 0x8e,
+ 0xe1, 0x24, 0x47, 0x62, 0x94, 0x28, 0x73, 0x4c, 0xa7, 0x70, 0x4c, 0xa7, 0x73, 0x28, 0x23, 0x43,
+ 0x99, 0x03, 0xa6, 0x70, 0x40, 0x99, 0xe3, 0x01, 0x98, 0x4f, 0x1f, 0x0c, 0xca, 0x2c, 0xa3, 0x29,
+ 0x2c, 0xa3, 0x19, 0x2c, 0xea, 0xf0, 0x4f, 0x66, 0x19, 0x49, 0x61, 0x19, 0x91, 0x59, 0xaa, 0x00,
+ 0x65, 0x8d, 0xf7, 0x64, 0x9e, 0xa9, 0x14, 0x9e, 0xa9, 0x2c, 0x1e, 0x6d, 0x7c, 0x27, 0xf3, 0x14,
+ 0x53, 0x78, 0x8a, 0xa9, 0xdd, 0x26, 0x0f, 0xe9, 0x4e, 0xeb, 0xd7, 0x9c, 0xcc, 0xb0, 0x05, 0x66,
+ 0x52, 0xe6, 0x71, 0xa7, 0x51, 0x18, 0x32, 0xc5, 0x5d, 0x50, 0xd4, 0x87, 0x6f, 0xf2, 0xfa, 0xb1,
+ 0x94, 0xf5, 0x63, 0x29, 0x4d, 0xa2, 0x0f, 0xda, 0x64, 0x8e, 0xf1, 0x14, 0x8e, 0xf1, 0xe4, 0x36,
+ 0xf4, 0x89, 0xda, 0x69, 0x14, 0x05, 0x99, 0x22, 0x04, 0xe7, 0xfa, 0x8c, 0xcc, 0x52, 0xa8, 0xde,
+ 0x91, 0xa9, 0x5e, 0xe3, 0x7d, 0x95, 0xe4, 0xf3, 0x18, 0x9c, 0xef, 0x37, 0x33, 0x4b, 0x71, 0xba,
+ 0xa6, 0x3a, 0xed, 0xfb, 0x0a, 0x4b, 0x72, 0xd4, 0xa4, 0x0d, 0x97, 0x36, 0x2b, 0x4b, 0x71, 0x72,
+ 0x47, 0x76, 0x32, 0xe8, 0x4b, 0x2d, 0xc9, 0x9b, 0x07, 0xce, 0x66, 0xce, 0xcb, 0x52, 0xdc, 0xad,
+ 0xa8, 0xee, 0xb2, 0x5f, 0x75, 0xc5, 0x2e, 0x96, 0x6e, 0x03, 0x20, 0x4d, 0xf6, 0x46, 0x81, 0x59,
+ 0xdd, 0xdb, 0x2b, 0x0e, 0xe1, 0x5f, 0xca, 0x5b, 0x6e, 0xd1, 0xa0, 0xbf, 0x3c, 0x2f, 0xe6, 0xb0,
+ 0xbb, 0xdd, 0xca, 0xc3, 0xe2, 0x7f, 0xf9, 0x7f, 0x46, 0x79, 0x42, 0x8c, 0xa2, 0xf0, 0xa9, 0xb2,
+ 0xf4, 0x06, 0x98, 0xd4, 0x06, 0x92, 0x05, 0x60, 0xd4, 0xf9, 0x81, 0x52, 0xbf, 0x76, 0x13, 0x80,
+ 0xf8, 0xdf, 0x30, 0xc1, 0x29, 0x90, 0x3f, 0xd8, 0xdd, 0x7f, 0x52, 0xb9, 0xbf, 0x53, 0xdd, 0xa9,
+ 0x3c, 0x28, 0x0e, 0xc1, 0x02, 0x18, 0x7b, 0xe2, 0xee, 0x3d, 0xdd, 0x2b, 0x1f, 0x54, 0x8b, 0x06,
+ 0x1c, 0x03, 0xc3, 0x8f, 0xf6, 0xf7, 0x76, 0x8b, 0xb9, 0x6b, 0xf7, 0x40, 0x5e, 0x9e, 0x07, 0x4e,
+ 0x81, 0x7c, 0x75, 0xcf, 0xad, 0xec, 0x3c, 0xdc, 0xad, 0xd1, 0x48, 0x25, 0x03, 0x8d, 0x58, 0x31,
+ 0x3c, 0x2f, 0xe6, 0xca, 0x17, 0xc1, 0x85, 0x7a, 0xd0, 0x4a, 0xfc, 0x61, 0x26, 0x25, 0xe7, 0xc5,
+ 0x08, 0xb1, 0x6e, 0xfc, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x33, 0xc2, 0x0c, 0xb6, 0xeb, 0x26, 0x00,
+ 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.proto b/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.proto
new file mode 100644
index 0000000..95a8fd1
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.proto
@@ -0,0 +1,285 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+package conformance;
+option java_package = "com.google.protobuf.conformance";
+
+import "google/protobuf/any.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/struct.proto";
+import "google/protobuf/timestamp.proto";
+import "google/protobuf/wrappers.proto";
+
+// This defines the conformance testing protocol. This protocol exists between
+// the conformance test suite itself and the code being tested. For each test,
+// the suite will send a ConformanceRequest message and expect a
+// ConformanceResponse message.
+//
+// You can either run the tests in two different ways:
+//
+// 1. in-process (using the interface in conformance_test.h).
+//
+// 2. as a sub-process communicating over a pipe. Information about how to
+// do this is in conformance_test_runner.cc.
+//
+// Pros/cons of the two approaches:
+//
+// - running as a sub-process is much simpler for languages other than C/C++.
+//
+// - running as a sub-process may be more tricky in unusual environments like
+// iOS apps, where fork/stdin/stdout are not available.
+
+enum WireFormat {
+ UNSPECIFIED = 0;
+ PROTOBUF = 1;
+ JSON = 2;
+}
+
+// Represents a single test case's input. The testee should:
+//
+// 1. parse this proto (which should always succeed)
+// 2. parse the protobuf or JSON payload in "payload" (which may fail)
+// 3. if the parse succeeded, serialize the message in the requested format.
+message ConformanceRequest {
+ // The payload (whether protobuf of JSON) is always for a TestAllTypes proto
+ // (see below).
+ oneof payload {
+ bytes protobuf_payload = 1;
+ string json_payload = 2;
+ }
+
+ // Which format should the testee serialize its message to?
+ WireFormat requested_output_format = 3;
+}
+
+// Represents a single test case's output.
+message ConformanceResponse {
+ oneof result {
+ // This string should be set to indicate parsing failed. The string can
+ // provide more information about the parse error if it is available.
+ //
+ // Setting this string does not necessarily mean the testee failed the
+ // test. Some of the test cases are intentionally invalid input.
+ string parse_error = 1;
+
+ // If the input was successfully parsed but errors occurred when
+ // serializing it to the requested output format, set the error message in
+ // this field.
+ string serialize_error = 6;
+
+ // This should be set if some other error occurred. This will always
+ // indicate that the test failed. The string can provide more information
+ // about the failure.
+ string runtime_error = 2;
+
+ // If the input was successfully parsed and the requested output was
+ // protobuf, serialize it to protobuf and set it in this field.
+ bytes protobuf_payload = 3;
+
+ // If the input was successfully parsed and the requested output was JSON,
+ // serialize to JSON and set it in this field.
+ string json_payload = 4;
+
+ // For when the testee skipped the test, likely because a certain feature
+ // wasn't supported, like JSON input/output.
+ string skipped = 5;
+ }
+}
+
+// This proto includes every type of field in both singular and repeated
+// forms.
+message TestAllTypes {
+ message NestedMessage {
+ int32 a = 1;
+ TestAllTypes corecursive = 2;
+ }
+
+ enum NestedEnum {
+ FOO = 0;
+ BAR = 1;
+ BAZ = 2;
+ NEG = -1; // Intentionally negative.
+ }
+
+ // Singular
+ int32 optional_int32 = 1;
+ int64 optional_int64 = 2;
+ uint32 optional_uint32 = 3;
+ uint64 optional_uint64 = 4;
+ sint32 optional_sint32 = 5;
+ sint64 optional_sint64 = 6;
+ fixed32 optional_fixed32 = 7;
+ fixed64 optional_fixed64 = 8;
+ sfixed32 optional_sfixed32 = 9;
+ sfixed64 optional_sfixed64 = 10;
+ float optional_float = 11;
+ double optional_double = 12;
+ bool optional_bool = 13;
+ string optional_string = 14;
+ bytes optional_bytes = 15;
+
+ NestedMessage optional_nested_message = 18;
+ ForeignMessage optional_foreign_message = 19;
+
+ NestedEnum optional_nested_enum = 21;
+ ForeignEnum optional_foreign_enum = 22;
+
+ string optional_string_piece = 24 [ctype=STRING_PIECE];
+ string optional_cord = 25 [ctype=CORD];
+
+ TestAllTypes recursive_message = 27;
+
+ // Repeated
+ repeated int32 repeated_int32 = 31;
+ repeated int64 repeated_int64 = 32;
+ repeated uint32 repeated_uint32 = 33;
+ repeated uint64 repeated_uint64 = 34;
+ repeated sint32 repeated_sint32 = 35;
+ repeated sint64 repeated_sint64 = 36;
+ repeated fixed32 repeated_fixed32 = 37;
+ repeated fixed64 repeated_fixed64 = 38;
+ repeated sfixed32 repeated_sfixed32 = 39;
+ repeated sfixed64 repeated_sfixed64 = 40;
+ repeated float repeated_float = 41;
+ repeated double repeated_double = 42;
+ repeated bool repeated_bool = 43;
+ repeated string repeated_string = 44;
+ repeated bytes repeated_bytes = 45;
+
+ repeated NestedMessage repeated_nested_message = 48;
+ repeated ForeignMessage repeated_foreign_message = 49;
+
+ repeated NestedEnum repeated_nested_enum = 51;
+ repeated ForeignEnum repeated_foreign_enum = 52;
+
+ repeated string repeated_string_piece = 54 [ctype=STRING_PIECE];
+ repeated string repeated_cord = 55 [ctype=CORD];
+
+ // Map
+ map < int32, int32> map_int32_int32 = 56;
+ map < int64, int64> map_int64_int64 = 57;
+ map < uint32, uint32> map_uint32_uint32 = 58;
+ map < uint64, uint64> map_uint64_uint64 = 59;
+ map < sint32, sint32> map_sint32_sint32 = 60;
+ map < sint64, sint64> map_sint64_sint64 = 61;
+ map < fixed32, fixed32> map_fixed32_fixed32 = 62;
+ map < fixed64, fixed64> map_fixed64_fixed64 = 63;
+ map <sfixed32, sfixed32> map_sfixed32_sfixed32 = 64;
+ map <sfixed64, sfixed64> map_sfixed64_sfixed64 = 65;
+ map < int32, float> map_int32_float = 66;
+ map < int32, double> map_int32_double = 67;
+ map < bool, bool> map_bool_bool = 68;
+ map < string, string> map_string_string = 69;
+ map < string, bytes> map_string_bytes = 70;
+ map < string, NestedMessage> map_string_nested_message = 71;
+ map < string, ForeignMessage> map_string_foreign_message = 72;
+ map < string, NestedEnum> map_string_nested_enum = 73;
+ map < string, ForeignEnum> map_string_foreign_enum = 74;
+
+ oneof oneof_field {
+ uint32 oneof_uint32 = 111;
+ NestedMessage oneof_nested_message = 112;
+ string oneof_string = 113;
+ bytes oneof_bytes = 114;
+ bool oneof_bool = 115;
+ uint64 oneof_uint64 = 116;
+ float oneof_float = 117;
+ double oneof_double = 118;
+ NestedEnum oneof_enum = 119;
+ }
+
+ // Well-known types
+ google.protobuf.BoolValue optional_bool_wrapper = 201;
+ google.protobuf.Int32Value optional_int32_wrapper = 202;
+ google.protobuf.Int64Value optional_int64_wrapper = 203;
+ google.protobuf.UInt32Value optional_uint32_wrapper = 204;
+ google.protobuf.UInt64Value optional_uint64_wrapper = 205;
+ google.protobuf.FloatValue optional_float_wrapper = 206;
+ google.protobuf.DoubleValue optional_double_wrapper = 207;
+ google.protobuf.StringValue optional_string_wrapper = 208;
+ google.protobuf.BytesValue optional_bytes_wrapper = 209;
+
+ repeated google.protobuf.BoolValue repeated_bool_wrapper = 211;
+ repeated google.protobuf.Int32Value repeated_int32_wrapper = 212;
+ repeated google.protobuf.Int64Value repeated_int64_wrapper = 213;
+ repeated google.protobuf.UInt32Value repeated_uint32_wrapper = 214;
+ repeated google.protobuf.UInt64Value repeated_uint64_wrapper = 215;
+ repeated google.protobuf.FloatValue repeated_float_wrapper = 216;
+ repeated google.protobuf.DoubleValue repeated_double_wrapper = 217;
+ repeated google.protobuf.StringValue repeated_string_wrapper = 218;
+ repeated google.protobuf.BytesValue repeated_bytes_wrapper = 219;
+
+ google.protobuf.Duration optional_duration = 301;
+ google.protobuf.Timestamp optional_timestamp = 302;
+ google.protobuf.FieldMask optional_field_mask = 303;
+ google.protobuf.Struct optional_struct = 304;
+ google.protobuf.Any optional_any = 305;
+ google.protobuf.Value optional_value = 306;
+
+ repeated google.protobuf.Duration repeated_duration = 311;
+ repeated google.protobuf.Timestamp repeated_timestamp = 312;
+ repeated google.protobuf.FieldMask repeated_fieldmask = 313;
+ repeated google.protobuf.Struct repeated_struct = 324;
+ repeated google.protobuf.Any repeated_any = 315;
+ repeated google.protobuf.Value repeated_value = 316;
+
+ // Test field-name-to-JSON-name convention.
+ // (protobuf says names can be any valid C/C++ identifier.)
+ int32 fieldname1 = 401;
+ int32 field_name2 = 402;
+ int32 _field_name3 = 403;
+ int32 field__name4_ = 404;
+ int32 field0name5 = 405;
+ int32 field_0_name6 = 406;
+ int32 fieldName7 = 407;
+ int32 FieldName8 = 408;
+ int32 field_Name9 = 409;
+ int32 Field_Name10 = 410;
+ int32 FIELD_NAME11 = 411;
+ int32 FIELD_name12 = 412;
+ int32 __field_name13 = 413;
+ int32 __Field_name14 = 414;
+ int32 field__name15 = 415;
+ int32 field__Name16 = 416;
+ int32 field_name17__ = 417;
+ int32 Field_name18__ = 418;
+}
+
+message ForeignMessage {
+ int32 c = 1;
+}
+
+enum ForeignEnum {
+ FOREIGN_FOO = 0;
+ FOREIGN_BAR = 1;
+ FOREIGN_BAZ = 2;
+}
diff --git a/vendor/github.com/golang/protobuf/descriptor/descriptor.go b/vendor/github.com/golang/protobuf/descriptor/descriptor.go
new file mode 100644
index 0000000..ac7e51b
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/descriptor/descriptor.go
@@ -0,0 +1,93 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package descriptor provides functions for obtaining protocol buffer
+// descriptors for generated Go types.
+//
+// These functions cannot go in package proto because they depend on the
+// generated protobuf descriptor messages, which themselves depend on proto.
+package descriptor
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io/ioutil"
+
+ "github.com/golang/protobuf/proto"
+ protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
+)
+
+// extractFile extracts a FileDescriptorProto from a gzip'd buffer.
+func extractFile(gz []byte) (*protobuf.FileDescriptorProto, error) {
+ r, err := gzip.NewReader(bytes.NewReader(gz))
+ if err != nil {
+ return nil, fmt.Errorf("failed to open gzip reader: %v", err)
+ }
+ defer r.Close()
+
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, fmt.Errorf("failed to uncompress descriptor: %v", err)
+ }
+
+ fd := new(protobuf.FileDescriptorProto)
+ if err := proto.Unmarshal(b, fd); err != nil {
+ return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err)
+ }
+
+ return fd, nil
+}
+
+// Message is a proto.Message with a method to return its descriptor.
+//
+// Message types generated by the protocol compiler always satisfy
+// the Message interface.
+type Message interface {
+ proto.Message
+ Descriptor() ([]byte, []int)
+}
+
+// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it
+// describing the given message.
+func ForMessage(msg Message) (fd *protobuf.FileDescriptorProto, md *protobuf.DescriptorProto) {
+ gz, path := msg.Descriptor()
+ fd, err := extractFile(gz)
+ if err != nil {
+ panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err))
+ }
+
+ md = fd.MessageType[path[0]]
+ for _, i := range path[1:] {
+ md = md.NestedType[i]
+ }
+ return fd, md
+}
diff --git a/vendor/github.com/golang/protobuf/descriptor/descriptor_test.go b/vendor/github.com/golang/protobuf/descriptor/descriptor_test.go
new file mode 100644
index 0000000..27b0729
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/descriptor/descriptor_test.go
@@ -0,0 +1,32 @@
+package descriptor_test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/golang/protobuf/descriptor"
+ tpb "github.com/golang/protobuf/proto/testdata"
+ protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
+)
+
+func TestMessage(t *testing.T) {
+ var msg *protobuf.DescriptorProto
+ fd, md := descriptor.ForMessage(msg)
+ if pkg, want := fd.GetPackage(), "google.protobuf"; pkg != want {
+ t.Errorf("descriptor.ForMessage(%T).GetPackage() = %q; want %q", msg, pkg, want)
+ }
+ if name, want := md.GetName(), "DescriptorProto"; name != want {
+ t.Fatalf("descriptor.ForMessage(%T).GetName() = %q; want %q", msg, name, want)
+ }
+}
+
+func Example_Options() {
+ var msg *tpb.MyMessageSet
+ _, md := descriptor.ForMessage(msg)
+ if md.GetOptions().GetMessageSetWireFormat() {
+ fmt.Printf("%v uses option message_set_wire_format.\n", md.GetName())
+ }
+
+ // Output:
+ // MyMessageSet uses option message_set_wire_format.
+}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
new file mode 100644
index 0000000..dfdfc5b
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
@@ -0,0 +1,1082 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2015 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON.
+It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json.
+
+This package produces a different output than the standard "encoding/json" package,
+which does not operate correctly on protocol buffers.
+*/
+package jsonpb
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ stpb "github.com/golang/protobuf/ptypes/struct"
+)
+
+// Marshaler is a configurable object for converting between
+// protocol buffer objects and a JSON representation for them.
+type Marshaler struct {
+ // Whether to render enum values as integers, as opposed to string values.
+ EnumsAsInts bool
+
+ // Whether to render fields with zero values.
+ EmitDefaults bool
+
+ // A string to indent each level by. The presence of this field will
+ // also cause a space to appear between the field separator and
+ // value, and for newlines to be appear between fields and array
+ // elements.
+ Indent string
+
+ // Whether to use the original (.proto) name for fields.
+ OrigName bool
+
+ // A custom URL resolver to use when marshaling Any messages to JSON.
+ // If unset, the default resolution strategy is to extract the
+ // fully-qualified type name from the type URL and pass that to
+ // proto.MessageType(string).
+ AnyResolver AnyResolver
+}
+
+// AnyResolver takes a type URL, present in an Any message, and resolves it into
+// an instance of the associated message.
+type AnyResolver interface {
+ Resolve(typeUrl string) (proto.Message, error)
+}
+
+func defaultResolveAny(typeUrl string) (proto.Message, error) {
+ // Only the part of typeUrl after the last slash is relevant.
+ mname := typeUrl
+ if slash := strings.LastIndex(mname, "/"); slash >= 0 {
+ mname = mname[slash+1:]
+ }
+ mt := proto.MessageType(mname)
+ if mt == nil {
+ return nil, fmt.Errorf("unknown message type %q", mname)
+ }
+ return reflect.New(mt.Elem()).Interface().(proto.Message), nil
+}
+
+// JSONPBMarshaler is implemented by protobuf messages that customize the
+// way they are marshaled to JSON. Messages that implement this should
+// also implement JSONPBUnmarshaler so that the custom format can be
+// parsed.
+type JSONPBMarshaler interface {
+ MarshalJSONPB(*Marshaler) ([]byte, error)
+}
+
+// JSONPBUnmarshaler is implemented by protobuf messages that customize
+// the way they are unmarshaled from JSON. Messages that implement this
+// should also implement JSONPBMarshaler so that the custom format can be
+// produced.
+type JSONPBUnmarshaler interface {
+ UnmarshalJSONPB(*Unmarshaler, []byte) error
+}
+
+// Marshal marshals a protocol buffer into JSON.
+func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error {
+ writer := &errWriter{writer: out}
+ return m.marshalObject(writer, pb, "", "")
+}
+
+// MarshalToString converts a protocol buffer object to JSON string.
+func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) {
+ var buf bytes.Buffer
+ if err := m.Marshal(&buf, pb); err != nil {
+ return "", err
+ }
+ return buf.String(), nil
+}
+
+type int32Slice []int32
+
+var nonFinite = map[string]float64{
+ `"NaN"`: math.NaN(),
+ `"Infinity"`: math.Inf(1),
+ `"-Infinity"`: math.Inf(-1),
+}
+
+// For sorting extensions ids to ensure stable output.
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+type wkt interface {
+ XXX_WellKnownType() string
+}
+
+// marshalObject writes a struct to the Writer.
+func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error {
+ if jsm, ok := v.(JSONPBMarshaler); ok {
+ b, err := jsm.MarshalJSONPB(m)
+ if err != nil {
+ return err
+ }
+ if typeURL != "" {
+ // we are marshaling this object to an Any type
+ var js map[string]*json.RawMessage
+ if err = json.Unmarshal(b, &js); err != nil {
+ return fmt.Errorf("type %T produced invalid JSON: %v", v, err)
+ }
+ turl, err := json.Marshal(typeURL)
+ if err != nil {
+ return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err)
+ }
+ js["@type"] = (*json.RawMessage)(&turl)
+ if b, err = json.Marshal(js); err != nil {
+ return err
+ }
+ }
+
+ out.write(string(b))
+ return out.err
+ }
+
+ s := reflect.ValueOf(v).Elem()
+
+ // Handle well-known types.
+ if wkt, ok := v.(wkt); ok {
+ switch wkt.XXX_WellKnownType() {
+ case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
+ "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
+ // "Wrappers use the same representation in JSON
+ // as the wrapped primitive type, ..."
+ sprop := proto.GetProperties(s.Type())
+ return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent)
+ case "Any":
+ // Any is a bit more involved.
+ return m.marshalAny(out, v, indent)
+ case "Duration":
+ // "Generated output always contains 3, 6, or 9 fractional digits,
+ // depending on required precision."
+ s, ns := s.Field(0).Int(), s.Field(1).Int()
+ x := fmt.Sprintf("%d.%09d", s, ns)
+ x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, "000")
+ out.write(`"`)
+ out.write(x)
+ out.write(`s"`)
+ return out.err
+ case "Struct", "ListValue":
+ // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice.
+ // TODO: pass the correct Properties if needed.
+ return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent)
+ case "Timestamp":
+ // "RFC 3339, where generated output will always be Z-normalized
+ // and uses 3, 6 or 9 fractional digits."
+ s, ns := s.Field(0).Int(), s.Field(1).Int()
+ t := time.Unix(s, ns).UTC()
+ // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
+ x := t.Format("2006-01-02T15:04:05.000000000")
+ x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, "000")
+ out.write(`"`)
+ out.write(x)
+ out.write(`Z"`)
+ return out.err
+ case "Value":
+ // Value has a single oneof.
+ kind := s.Field(0)
+ if kind.IsNil() {
+ // "absence of any variant indicates an error"
+ return errors.New("nil Value")
+ }
+ // oneof -> *T -> T -> T.F
+ x := kind.Elem().Elem().Field(0)
+ // TODO: pass the correct Properties if needed.
+ return m.marshalValue(out, &proto.Properties{}, x, indent)
+ }
+ }
+
+ out.write("{")
+ if m.Indent != "" {
+ out.write("\n")
+ }
+
+ firstField := true
+
+ if typeURL != "" {
+ if err := m.marshalTypeURL(out, indent, typeURL); err != nil {
+ return err
+ }
+ firstField = false
+ }
+
+ for i := 0; i < s.NumField(); i++ {
+ value := s.Field(i)
+ valueField := s.Type().Field(i)
+ if strings.HasPrefix(valueField.Name, "XXX_") {
+ continue
+ }
+
+ // IsNil will panic on most value kinds.
+ switch value.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Interface:
+ if value.IsNil() {
+ continue
+ }
+ }
+
+ if !m.EmitDefaults {
+ switch value.Kind() {
+ case reflect.Bool:
+ if !value.Bool() {
+ continue
+ }
+ case reflect.Int32, reflect.Int64:
+ if value.Int() == 0 {
+ continue
+ }
+ case reflect.Uint32, reflect.Uint64:
+ if value.Uint() == 0 {
+ continue
+ }
+ case reflect.Float32, reflect.Float64:
+ if value.Float() == 0 {
+ continue
+ }
+ case reflect.String:
+ if value.Len() == 0 {
+ continue
+ }
+ case reflect.Map, reflect.Ptr, reflect.Slice:
+ if value.IsNil() {
+ continue
+ }
+ }
+ }
+
+ // Oneof fields need special handling.
+ if valueField.Tag.Get("protobuf_oneof") != "" {
+ // value is an interface containing &T{real_value}.
+ sv := value.Elem().Elem() // interface -> *T -> T
+ value = sv.Field(0)
+ valueField = sv.Type().Field(0)
+ }
+ prop := jsonProperties(valueField, m.OrigName)
+ if !firstField {
+ m.writeSep(out)
+ }
+ if err := m.marshalField(out, prop, value, indent); err != nil {
+ return err
+ }
+ firstField = false
+ }
+
+ // Handle proto2 extensions.
+ if ep, ok := v.(proto.Message); ok {
+ extensions := proto.RegisteredExtensions(v)
+ // Sort extensions for stable output.
+ ids := make([]int32, 0, len(extensions))
+ for id, desc := range extensions {
+ if !proto.HasExtension(ep, desc) {
+ continue
+ }
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+ for _, id := range ids {
+ desc := extensions[id]
+ if desc == nil {
+ // unknown extension
+ continue
+ }
+ ext, extErr := proto.GetExtension(ep, desc)
+ if extErr != nil {
+ return extErr
+ }
+ value := reflect.ValueOf(ext)
+ var prop proto.Properties
+ prop.Parse(desc.Tag)
+ prop.JSONName = fmt.Sprintf("[%s]", desc.Name)
+ if !firstField {
+ m.writeSep(out)
+ }
+ if err := m.marshalField(out, &prop, value, indent); err != nil {
+ return err
+ }
+ firstField = false
+ }
+
+ }
+
+ if m.Indent != "" {
+ out.write("\n")
+ out.write(indent)
+ }
+ out.write("}")
+ return out.err
+}
+
+func (m *Marshaler) writeSep(out *errWriter) {
+ if m.Indent != "" {
+ out.write(",\n")
+ } else {
+ out.write(",")
+ }
+}
+
+func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error {
+ // "If the Any contains a value that has a special JSON mapping,
+ // it will be converted as follows: {"@type": xxx, "value": yyy}.
+ // Otherwise, the value will be converted into a JSON object,
+ // and the "@type" field will be inserted to indicate the actual data type."
+ v := reflect.ValueOf(any).Elem()
+ turl := v.Field(0).String()
+ val := v.Field(1).Bytes()
+
+ var msg proto.Message
+ var err error
+ if m.AnyResolver != nil {
+ msg, err = m.AnyResolver.Resolve(turl)
+ } else {
+ msg, err = defaultResolveAny(turl)
+ }
+ if err != nil {
+ return err
+ }
+
+ if err := proto.Unmarshal(val, msg); err != nil {
+ return err
+ }
+
+ if _, ok := msg.(wkt); ok {
+ out.write("{")
+ if m.Indent != "" {
+ out.write("\n")
+ }
+ if err := m.marshalTypeURL(out, indent, turl); err != nil {
+ return err
+ }
+ m.writeSep(out)
+ if m.Indent != "" {
+ out.write(indent)
+ out.write(m.Indent)
+ out.write(`"value": `)
+ } else {
+ out.write(`"value":`)
+ }
+ if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil {
+ return err
+ }
+ if m.Indent != "" {
+ out.write("\n")
+ out.write(indent)
+ }
+ out.write("}")
+ return out.err
+ }
+
+ return m.marshalObject(out, msg, indent, turl)
+}
+
+func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error {
+ if m.Indent != "" {
+ out.write(indent)
+ out.write(m.Indent)
+ }
+ out.write(`"@type":`)
+ if m.Indent != "" {
+ out.write(" ")
+ }
+ b, err := json.Marshal(typeURL)
+ if err != nil {
+ return err
+ }
+ out.write(string(b))
+ return out.err
+}
+
+// marshalField writes field description and value to the Writer.
+func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
+ if m.Indent != "" {
+ out.write(indent)
+ out.write(m.Indent)
+ }
+ out.write(`"`)
+ out.write(prop.JSONName)
+ out.write(`":`)
+ if m.Indent != "" {
+ out.write(" ")
+ }
+ if err := m.marshalValue(out, prop, v, indent); err != nil {
+ return err
+ }
+ return nil
+}
+
+// marshalValue writes the value to the Writer.
+func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
+ var err error
+ v = reflect.Indirect(v)
+
+ // Handle nil pointer
+ if v.Kind() == reflect.Invalid {
+ out.write("null")
+ return out.err
+ }
+
+ // Handle repeated elements.
+ if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
+ out.write("[")
+ comma := ""
+ for i := 0; i < v.Len(); i++ {
+ sliceVal := v.Index(i)
+ out.write(comma)
+ if m.Indent != "" {
+ out.write("\n")
+ out.write(indent)
+ out.write(m.Indent)
+ out.write(m.Indent)
+ }
+ if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil {
+ return err
+ }
+ comma = ","
+ }
+ if m.Indent != "" {
+ out.write("\n")
+ out.write(indent)
+ out.write(m.Indent)
+ }
+ out.write("]")
+ return out.err
+ }
+
+ // Handle well-known types.
+ // Most are handled up in marshalObject (because 99% are messages).
+ if wkt, ok := v.Interface().(wkt); ok {
+ switch wkt.XXX_WellKnownType() {
+ case "NullValue":
+ out.write("null")
+ return out.err
+ }
+ }
+
+ // Handle enumerations.
+ if !m.EnumsAsInts && prop.Enum != "" {
+ // Unknown enum values will are stringified by the proto library as their
+ // value. Such values should _not_ be quoted or they will be interpreted
+ // as an enum string instead of their value.
+ enumStr := v.Interface().(fmt.Stringer).String()
+ var valStr string
+ if v.Kind() == reflect.Ptr {
+ valStr = strconv.Itoa(int(v.Elem().Int()))
+ } else {
+ valStr = strconv.Itoa(int(v.Int()))
+ }
+ isKnownEnum := enumStr != valStr
+ if isKnownEnum {
+ out.write(`"`)
+ }
+ out.write(enumStr)
+ if isKnownEnum {
+ out.write(`"`)
+ }
+ return out.err
+ }
+
+ // Handle nested messages.
+ if v.Kind() == reflect.Struct {
+ return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "")
+ }
+
+ // Handle maps.
+ // Since Go randomizes map iteration, we sort keys for stable output.
+ if v.Kind() == reflect.Map {
+ out.write(`{`)
+ keys := v.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for i, k := range keys {
+ if i > 0 {
+ out.write(`,`)
+ }
+ if m.Indent != "" {
+ out.write("\n")
+ out.write(indent)
+ out.write(m.Indent)
+ out.write(m.Indent)
+ }
+
+ b, err := json.Marshal(k.Interface())
+ if err != nil {
+ return err
+ }
+ s := string(b)
+
+ // If the JSON is not a string value, encode it again to make it one.
+ if !strings.HasPrefix(s, `"`) {
+ b, err := json.Marshal(s)
+ if err != nil {
+ return err
+ }
+ s = string(b)
+ }
+
+ out.write(s)
+ out.write(`:`)
+ if m.Indent != "" {
+ out.write(` `)
+ }
+
+ if err := m.marshalValue(out, prop, v.MapIndex(k), indent+m.Indent); err != nil {
+ return err
+ }
+ }
+ if m.Indent != "" {
+ out.write("\n")
+ out.write(indent)
+ out.write(m.Indent)
+ }
+ out.write(`}`)
+ return out.err
+ }
+
+ // Handle non-finite floats, e.g. NaN, Infinity and -Infinity.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ f := v.Float()
+ var sval string
+ switch {
+ case math.IsInf(f, 1):
+ sval = `"Infinity"`
+ case math.IsInf(f, -1):
+ sval = `"-Infinity"`
+ case math.IsNaN(f):
+ sval = `"NaN"`
+ }
+ if sval != "" {
+ out.write(sval)
+ return out.err
+ }
+ }
+
+ // Default handling defers to the encoding/json library.
+ b, err := json.Marshal(v.Interface())
+ if err != nil {
+ return err
+ }
+ needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64)
+ if needToQuote {
+ out.write(`"`)
+ }
+ out.write(string(b))
+ if needToQuote {
+ out.write(`"`)
+ }
+ return out.err
+}
+
+// Unmarshaler is a configurable object for converting from a JSON
+// representation to a protocol buffer object.
+type Unmarshaler struct {
+ // Whether to allow messages to contain unknown fields, as opposed to
+ // failing to unmarshal.
+ AllowUnknownFields bool
+
+ // A custom URL resolver to use when unmarshaling Any messages from JSON.
+ // If unset, the default resolution strategy is to extract the
+ // fully-qualified type name from the type URL and pass that to
+ // proto.MessageType(string).
+ AnyResolver AnyResolver
+}
+
+// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
+// This function is lenient and will decode any options permutations of the
+// related Marshaler.
+func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
+ inputValue := json.RawMessage{}
+ if err := dec.Decode(&inputValue); err != nil {
+ return err
+ }
+ return u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil)
+}
+
+// Unmarshal unmarshals a JSON object stream into a protocol
+// buffer. This function is lenient and will decode any options
+// permutations of the related Marshaler.
+func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error {
+ dec := json.NewDecoder(r)
+ return u.UnmarshalNext(dec, pb)
+}
+
+// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
+// This function is lenient and will decode any options permutations of the
+// related Marshaler.
+func UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
+ return new(Unmarshaler).UnmarshalNext(dec, pb)
+}
+
+// Unmarshal unmarshals a JSON object stream into a protocol
+// buffer. This function is lenient and will decode any options
+// permutations of the related Marshaler.
+func Unmarshal(r io.Reader, pb proto.Message) error {
+ return new(Unmarshaler).Unmarshal(r, pb)
+}
+
+// UnmarshalString will populate the fields of a protocol buffer based
+// on a JSON string. This function is lenient and will decode any options
+// permutations of the related Marshaler.
+func UnmarshalString(str string, pb proto.Message) error {
+ return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb)
+}
+
+// unmarshalValue converts/copies a value into the target.
+// prop may be nil.
+func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error {
+ targetType := target.Type()
+
+ // Allocate memory for pointer fields.
+ if targetType.Kind() == reflect.Ptr {
+ // If input value is "null" and target is a pointer type, then the field should be treated as not set
+ // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue.
+ _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler)
+ if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) && !isJSONPBUnmarshaler {
+ return nil
+ }
+ target.Set(reflect.New(targetType.Elem()))
+
+ return u.unmarshalValue(target.Elem(), inputValue, prop)
+ }
+
+ if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok {
+ return jsu.UnmarshalJSONPB(u, []byte(inputValue))
+ }
+
+ // Handle well-known types that are not pointers.
+ if w, ok := target.Addr().Interface().(wkt); ok {
+ switch w.XXX_WellKnownType() {
+ case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
+ "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
+ return u.unmarshalValue(target.Field(0), inputValue, prop)
+ case "Any":
+ // Use json.RawMessage pointer type instead of value to support pre-1.8 version.
+ // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see
+ // https://github.com/golang/go/issues/14493
+ var jsonFields map[string]*json.RawMessage
+ if err := json.Unmarshal(inputValue, &jsonFields); err != nil {
+ return err
+ }
+
+ val, ok := jsonFields["@type"]
+ if !ok || val == nil {
+ return errors.New("Any JSON doesn't have '@type'")
+ }
+
+ var turl string
+ if err := json.Unmarshal([]byte(*val), &turl); err != nil {
+ return fmt.Errorf("can't unmarshal Any's '@type': %q", *val)
+ }
+ target.Field(0).SetString(turl)
+
+ var m proto.Message
+ var err error
+ if u.AnyResolver != nil {
+ m, err = u.AnyResolver.Resolve(turl)
+ } else {
+ m, err = defaultResolveAny(turl)
+ }
+ if err != nil {
+ return err
+ }
+
+ if _, ok := m.(wkt); ok {
+ val, ok := jsonFields["value"]
+ if !ok {
+ return errors.New("Any JSON doesn't have 'value'")
+ }
+
+ if err := u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil {
+ return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err)
+ }
+ } else {
+ delete(jsonFields, "@type")
+ nestedProto, err := json.Marshal(jsonFields)
+ if err != nil {
+ return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err)
+ }
+
+ if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil {
+ return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err)
+ }
+ }
+
+ b, err := proto.Marshal(m)
+ if err != nil {
+ return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err)
+ }
+ target.Field(1).SetBytes(b)
+
+ return nil
+ case "Duration":
+ unq, err := strconv.Unquote(string(inputValue))
+ if err != nil {
+ return err
+ }
+
+ d, err := time.ParseDuration(unq)
+ if err != nil {
+ return fmt.Errorf("bad Duration: %v", err)
+ }
+
+ ns := d.Nanoseconds()
+ s := ns / 1e9
+ ns %= 1e9
+ target.Field(0).SetInt(s)
+ target.Field(1).SetInt(ns)
+ return nil
+ case "Timestamp":
+ unq, err := strconv.Unquote(string(inputValue))
+ if err != nil {
+ return err
+ }
+
+ t, err := time.Parse(time.RFC3339Nano, unq)
+ if err != nil {
+ return fmt.Errorf("bad Timestamp: %v", err)
+ }
+
+ target.Field(0).SetInt(t.Unix())
+ target.Field(1).SetInt(int64(t.Nanosecond()))
+ return nil
+ case "Struct":
+ var m map[string]json.RawMessage
+ if err := json.Unmarshal(inputValue, &m); err != nil {
+ return fmt.Errorf("bad StructValue: %v", err)
+ }
+
+ target.Field(0).Set(reflect.ValueOf(map[string]*stpb.Value{}))
+ for k, jv := range m {
+ pv := &stpb.Value{}
+ if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil {
+ return fmt.Errorf("bad value in StructValue for key %q: %v", k, err)
+ }
+ target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv))
+ }
+ return nil
+ case "ListValue":
+ var s []json.RawMessage
+ if err := json.Unmarshal(inputValue, &s); err != nil {
+ return fmt.Errorf("bad ListValue: %v", err)
+ }
+
+ target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s), len(s))))
+ for i, sv := range s {
+ if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil {
+ return err
+ }
+ }
+ return nil
+ case "Value":
+ ivStr := string(inputValue)
+ if ivStr == "null" {
+ target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{}))
+ } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil {
+ target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v}))
+ } else if v, err := strconv.Unquote(ivStr); err == nil {
+ target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v}))
+ } else if v, err := strconv.ParseBool(ivStr); err == nil {
+ target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v}))
+ } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil {
+ lv := &stpb.ListValue{}
+ target.Field(0).Set(reflect.ValueOf(&stpb.Value_ListValue{lv}))
+ return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop)
+ } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil {
+ sv := &stpb.Struct{}
+ target.Field(0).Set(reflect.ValueOf(&stpb.Value_StructValue{sv}))
+ return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop)
+ } else {
+ return fmt.Errorf("unrecognized type for Value %q", ivStr)
+ }
+ return nil
+ }
+ }
+
+ // Handle enums, which have an underlying type of int32,
+ // and may appear as strings.
+ // The case of an enum appearing as a number is handled
+ // at the bottom of this function.
+ if inputValue[0] == '"' && prop != nil && prop.Enum != "" {
+ vmap := proto.EnumValueMap(prop.Enum)
+ // Don't need to do unquoting; valid enum names
+ // are from a limited character set.
+ s := inputValue[1 : len(inputValue)-1]
+ n, ok := vmap[string(s)]
+ if !ok {
+ return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum)
+ }
+ if target.Kind() == reflect.Ptr { // proto2
+ target.Set(reflect.New(targetType.Elem()))
+ target = target.Elem()
+ }
+ target.SetInt(int64(n))
+ return nil
+ }
+
+ // Handle nested messages.
+ if targetType.Kind() == reflect.Struct {
+ var jsonFields map[string]json.RawMessage
+ if err := json.Unmarshal(inputValue, &jsonFields); err != nil {
+ return err
+ }
+
+ consumeField := func(prop *proto.Properties) (json.RawMessage, bool) {
+ // Be liberal in what names we accept; both orig_name and camelName are okay.
+ fieldNames := acceptedJSONFieldNames(prop)
+
+ vOrig, okOrig := jsonFields[fieldNames.orig]
+ vCamel, okCamel := jsonFields[fieldNames.camel]
+ if !okOrig && !okCamel {
+ return nil, false
+ }
+ // If, for some reason, both are present in the data, favour the camelName.
+ var raw json.RawMessage
+ if okOrig {
+ raw = vOrig
+ delete(jsonFields, fieldNames.orig)
+ }
+ if okCamel {
+ raw = vCamel
+ delete(jsonFields, fieldNames.camel)
+ }
+ return raw, true
+ }
+
+ sprops := proto.GetProperties(targetType)
+ for i := 0; i < target.NumField(); i++ {
+ ft := target.Type().Field(i)
+ if strings.HasPrefix(ft.Name, "XXX_") {
+ continue
+ }
+
+ valueForField, ok := consumeField(sprops.Prop[i])
+ if !ok {
+ continue
+ }
+
+ if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil {
+ return err
+ }
+ }
+ // Check for any oneof fields.
+ if len(jsonFields) > 0 {
+ for _, oop := range sprops.OneofTypes {
+ raw, ok := consumeField(oop.Prop)
+ if !ok {
+ continue
+ }
+ nv := reflect.New(oop.Type.Elem())
+ target.Field(oop.Field).Set(nv)
+ if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil {
+ return err
+ }
+ }
+ }
+ // Handle proto2 extensions.
+ if len(jsonFields) > 0 {
+ if ep, ok := target.Addr().Interface().(proto.Message); ok {
+ for _, ext := range proto.RegisteredExtensions(ep) {
+ name := fmt.Sprintf("[%s]", ext.Name)
+ raw, ok := jsonFields[name]
+ if !ok {
+ continue
+ }
+ delete(jsonFields, name)
+ nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem())
+ if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil {
+ return err
+ }
+ if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ if !u.AllowUnknownFields && len(jsonFields) > 0 {
+ // Pick any field to be the scapegoat.
+ var f string
+ for fname := range jsonFields {
+ f = fname
+ break
+ }
+ return fmt.Errorf("unknown field %q in %v", f, targetType)
+ }
+ return nil
+ }
+
+ // Handle arrays (which aren't encoded bytes)
+ if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 {
+ var slc []json.RawMessage
+ if err := json.Unmarshal(inputValue, &slc); err != nil {
+ return err
+ }
+ if slc != nil {
+ l := len(slc)
+ target.Set(reflect.MakeSlice(targetType, l, l))
+ for i := 0; i < l; i++ {
+ if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+ }
+
+ // Handle maps (whose keys are always strings)
+ if targetType.Kind() == reflect.Map {
+ var mp map[string]json.RawMessage
+ if err := json.Unmarshal(inputValue, &mp); err != nil {
+ return err
+ }
+ if mp != nil {
+ target.Set(reflect.MakeMap(targetType))
+ var keyprop, valprop *proto.Properties
+ if prop != nil {
+ // These could still be nil if the protobuf metadata is broken somehow.
+ // TODO: This won't work because the fields are unexported.
+ // We should probably just reparse them.
+ //keyprop, valprop = prop.mkeyprop, prop.mvalprop
+ }
+ for ks, raw := range mp {
+ // Unmarshal map key. The core json library already decoded the key into a
+ // string, so we handle that specially. Other types were quoted post-serialization.
+ var k reflect.Value
+ if targetType.Key().Kind() == reflect.String {
+ k = reflect.ValueOf(ks)
+ } else {
+ k = reflect.New(targetType.Key()).Elem()
+ if err := u.unmarshalValue(k, json.RawMessage(ks), keyprop); err != nil {
+ return err
+ }
+ }
+
+ // Unmarshal map value.
+ v := reflect.New(targetType.Elem()).Elem()
+ if err := u.unmarshalValue(v, raw, valprop); err != nil {
+ return err
+ }
+ target.SetMapIndex(k, v)
+ }
+ }
+ return nil
+ }
+
+ // 64-bit integers can be encoded as strings. In this case we drop
+ // the quotes and proceed as normal.
+ isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64
+ if isNum && strings.HasPrefix(string(inputValue), `"`) {
+ inputValue = inputValue[1 : len(inputValue)-1]
+ }
+
+ // Non-finite numbers can be encoded as strings.
+ isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64
+ if isFloat {
+ if num, ok := nonFinite[string(inputValue)]; ok {
+ target.SetFloat(num)
+ return nil
+ }
+ }
+
+ // Use the encoding/json for parsing other value types.
+ return json.Unmarshal(inputValue, target.Addr().Interface())
+}
+
+// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute.
+func jsonProperties(f reflect.StructField, origName bool) *proto.Properties {
+ var prop proto.Properties
+ prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f)
+ if origName || prop.JSONName == "" {
+ prop.JSONName = prop.OrigName
+ }
+ return &prop
+}
+
+type fieldNames struct {
+ orig, camel string
+}
+
+func acceptedJSONFieldNames(prop *proto.Properties) fieldNames {
+ opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName}
+ if prop.JSONName != "" {
+ opts.camel = prop.JSONName
+ }
+ return opts
+}
+
+// Writer wrapper inspired by https://blog.golang.org/errors-are-values
+type errWriter struct {
+ writer io.Writer
+ err error
+}
+
+func (w *errWriter) write(str string) {
+ if w.err != nil {
+ return
+ }
+ _, w.err = w.writer.Write([]byte(str))
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+//
+// Numeric keys are sorted in numeric order per
+// https://developers.google.com/protocol-buffers/docs/proto#maps.
+type mapKeys []reflect.Value
+
+func (s mapKeys) Len() int { return len(s) }
+func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s mapKeys) Less(i, j int) bool {
+ if k := s[i].Kind(); k == s[j].Kind() {
+ switch k {
+ case reflect.Int32, reflect.Int64:
+ return s[i].Int() < s[j].Int()
+ case reflect.Uint32, reflect.Uint64:
+ return s[i].Uint() < s[j].Uint()
+ }
+ }
+ return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface())
+}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go
new file mode 100644
index 0000000..4fdbde1
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go
@@ -0,0 +1,897 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2015 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package jsonpb
+
+import (
+ "bytes"
+ "encoding/json"
+ "io"
+ "math"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ pb "github.com/golang/protobuf/jsonpb/jsonpb_test_proto"
+ proto3pb "github.com/golang/protobuf/proto/proto3_proto"
+ "github.com/golang/protobuf/ptypes"
+ anypb "github.com/golang/protobuf/ptypes/any"
+ durpb "github.com/golang/protobuf/ptypes/duration"
+ stpb "github.com/golang/protobuf/ptypes/struct"
+ tspb "github.com/golang/protobuf/ptypes/timestamp"
+ wpb "github.com/golang/protobuf/ptypes/wrappers"
+)
+
+var (
+ marshaler = Marshaler{}
+
+ marshalerAllOptions = Marshaler{
+ Indent: " ",
+ }
+
+ simpleObject = &pb.Simple{
+ OInt32: proto.Int32(-32),
+ OInt64: proto.Int64(-6400000000),
+ OUint32: proto.Uint32(32),
+ OUint64: proto.Uint64(6400000000),
+ OSint32: proto.Int32(-13),
+ OSint64: proto.Int64(-2600000000),
+ OFloat: proto.Float32(3.14),
+ ODouble: proto.Float64(6.02214179e23),
+ OBool: proto.Bool(true),
+ OString: proto.String("hello \"there\""),
+ OBytes: []byte("beep boop"),
+ }
+
+ simpleObjectJSON = `{` +
+ `"oBool":true,` +
+ `"oInt32":-32,` +
+ `"oInt64":"-6400000000",` +
+ `"oUint32":32,` +
+ `"oUint64":"6400000000",` +
+ `"oSint32":-13,` +
+ `"oSint64":"-2600000000",` +
+ `"oFloat":3.14,` +
+ `"oDouble":6.02214179e+23,` +
+ `"oString":"hello \"there\"",` +
+ `"oBytes":"YmVlcCBib29w"` +
+ `}`
+
+ simpleObjectPrettyJSON = `{
+ "oBool": true,
+ "oInt32": -32,
+ "oInt64": "-6400000000",
+ "oUint32": 32,
+ "oUint64": "6400000000",
+ "oSint32": -13,
+ "oSint64": "-2600000000",
+ "oFloat": 3.14,
+ "oDouble": 6.02214179e+23,
+ "oString": "hello \"there\"",
+ "oBytes": "YmVlcCBib29w"
+}`
+
+ repeatsObject = &pb.Repeats{
+ RBool: []bool{true, false, true},
+ RInt32: []int32{-3, -4, -5},
+ RInt64: []int64{-123456789, -987654321},
+ RUint32: []uint32{1, 2, 3},
+ RUint64: []uint64{6789012345, 3456789012},
+ RSint32: []int32{-1, -2, -3},
+ RSint64: []int64{-6789012345, -3456789012},
+ RFloat: []float32{3.14, 6.28},
+ RDouble: []float64{299792458 * 1e20, 6.62606957e-34},
+ RString: []string{"happy", "days"},
+ RBytes: [][]byte{[]byte("skittles"), []byte("m&m's")},
+ }
+
+ repeatsObjectJSON = `{` +
+ `"rBool":[true,false,true],` +
+ `"rInt32":[-3,-4,-5],` +
+ `"rInt64":["-123456789","-987654321"],` +
+ `"rUint32":[1,2,3],` +
+ `"rUint64":["6789012345","3456789012"],` +
+ `"rSint32":[-1,-2,-3],` +
+ `"rSint64":["-6789012345","-3456789012"],` +
+ `"rFloat":[3.14,6.28],` +
+ `"rDouble":[2.99792458e+28,6.62606957e-34],` +
+ `"rString":["happy","days"],` +
+ `"rBytes":["c2tpdHRsZXM=","bSZtJ3M="]` +
+ `}`
+
+ repeatsObjectPrettyJSON = `{
+ "rBool": [
+ true,
+ false,
+ true
+ ],
+ "rInt32": [
+ -3,
+ -4,
+ -5
+ ],
+ "rInt64": [
+ "-123456789",
+ "-987654321"
+ ],
+ "rUint32": [
+ 1,
+ 2,
+ 3
+ ],
+ "rUint64": [
+ "6789012345",
+ "3456789012"
+ ],
+ "rSint32": [
+ -1,
+ -2,
+ -3
+ ],
+ "rSint64": [
+ "-6789012345",
+ "-3456789012"
+ ],
+ "rFloat": [
+ 3.14,
+ 6.28
+ ],
+ "rDouble": [
+ 2.99792458e+28,
+ 6.62606957e-34
+ ],
+ "rString": [
+ "happy",
+ "days"
+ ],
+ "rBytes": [
+ "c2tpdHRsZXM=",
+ "bSZtJ3M="
+ ]
+}`
+
+ innerSimple = &pb.Simple{OInt32: proto.Int32(-32)}
+ innerSimple2 = &pb.Simple{OInt64: proto.Int64(25)}
+ innerRepeats = &pb.Repeats{RString: []string{"roses", "red"}}
+ innerRepeats2 = &pb.Repeats{RString: []string{"violets", "blue"}}
+ complexObject = &pb.Widget{
+ Color: pb.Widget_GREEN.Enum(),
+ RColor: []pb.Widget_Color{pb.Widget_RED, pb.Widget_GREEN, pb.Widget_BLUE},
+ Simple: innerSimple,
+ RSimple: []*pb.Simple{innerSimple, innerSimple2},
+ Repeats: innerRepeats,
+ RRepeats: []*pb.Repeats{innerRepeats, innerRepeats2},
+ }
+
+ complexObjectJSON = `{"color":"GREEN",` +
+ `"rColor":["RED","GREEN","BLUE"],` +
+ `"simple":{"oInt32":-32},` +
+ `"rSimple":[{"oInt32":-32},{"oInt64":"25"}],` +
+ `"repeats":{"rString":["roses","red"]},` +
+ `"rRepeats":[{"rString":["roses","red"]},{"rString":["violets","blue"]}]` +
+ `}`
+
+ complexObjectPrettyJSON = `{
+ "color": "GREEN",
+ "rColor": [
+ "RED",
+ "GREEN",
+ "BLUE"
+ ],
+ "simple": {
+ "oInt32": -32
+ },
+ "rSimple": [
+ {
+ "oInt32": -32
+ },
+ {
+ "oInt64": "25"
+ }
+ ],
+ "repeats": {
+ "rString": [
+ "roses",
+ "red"
+ ]
+ },
+ "rRepeats": [
+ {
+ "rString": [
+ "roses",
+ "red"
+ ]
+ },
+ {
+ "rString": [
+ "violets",
+ "blue"
+ ]
+ }
+ ]
+}`
+
+ colorPrettyJSON = `{
+ "color": 2
+}`
+
+ colorListPrettyJSON = `{
+ "color": 1000,
+ "rColor": [
+ "RED"
+ ]
+}`
+
+ nummyPrettyJSON = `{
+ "nummy": {
+ "1": 2,
+ "3": 4
+ }
+}`
+
+ objjyPrettyJSON = `{
+ "objjy": {
+ "1": {
+ "dub": 1
+ }
+ }
+}`
+ realNumber = &pb.Real{Value: proto.Float64(3.14159265359)}
+ realNumberName = "Pi"
+ complexNumber = &pb.Complex{Imaginary: proto.Float64(0.5772156649)}
+ realNumberJSON = `{` +
+ `"value":3.14159265359,` +
+ `"[jsonpb.Complex.real_extension]":{"imaginary":0.5772156649},` +
+ `"[jsonpb.name]":"Pi"` +
+ `}`
+
+ anySimple = &pb.KnownTypes{
+ An: &anypb.Any{
+ TypeUrl: "something.example.com/jsonpb.Simple",
+ Value: []byte{
+ // &pb.Simple{OBool:true}
+ 1 << 3, 1,
+ },
+ },
+ }
+ anySimpleJSON = `{"an":{"@type":"something.example.com/jsonpb.Simple","oBool":true}}`
+ anySimplePrettyJSON = `{
+ "an": {
+ "@type": "something.example.com/jsonpb.Simple",
+ "oBool": true
+ }
+}`
+
+ anyWellKnown = &pb.KnownTypes{
+ An: &anypb.Any{
+ TypeUrl: "type.googleapis.com/google.protobuf.Duration",
+ Value: []byte{
+ // &durpb.Duration{Seconds: 1, Nanos: 212000000 }
+ 1 << 3, 1, // seconds
+ 2 << 3, 0x80, 0xba, 0x8b, 0x65, // nanos
+ },
+ },
+ }
+ anyWellKnownJSON = `{"an":{"@type":"type.googleapis.com/google.protobuf.Duration","value":"1.212s"}}`
+ anyWellKnownPrettyJSON = `{
+ "an": {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+}`
+
+ nonFinites = &pb.NonFinites{
+ FNan: proto.Float32(float32(math.NaN())),
+ FPinf: proto.Float32(float32(math.Inf(1))),
+ FNinf: proto.Float32(float32(math.Inf(-1))),
+ DNan: proto.Float64(float64(math.NaN())),
+ DPinf: proto.Float64(float64(math.Inf(1))),
+ DNinf: proto.Float64(float64(math.Inf(-1))),
+ }
+ nonFinitesJSON = `{` +
+ `"fNan":"NaN",` +
+ `"fPinf":"Infinity",` +
+ `"fNinf":"-Infinity",` +
+ `"dNan":"NaN",` +
+ `"dPinf":"Infinity",` +
+ `"dNinf":"-Infinity"` +
+ `}`
+)
+
+func init() {
+ if err := proto.SetExtension(realNumber, pb.E_Name, &realNumberName); err != nil {
+ panic(err)
+ }
+ if err := proto.SetExtension(realNumber, pb.E_Complex_RealExtension, complexNumber); err != nil {
+ panic(err)
+ }
+}
+
+var marshalingTests = []struct {
+ desc string
+ marshaler Marshaler
+ pb proto.Message
+ json string
+}{
+ {"simple flat object", marshaler, simpleObject, simpleObjectJSON},
+ {"simple pretty object", marshalerAllOptions, simpleObject, simpleObjectPrettyJSON},
+ {"non-finite floats fields object", marshaler, nonFinites, nonFinitesJSON},
+ {"repeated fields flat object", marshaler, repeatsObject, repeatsObjectJSON},
+ {"repeated fields pretty object", marshalerAllOptions, repeatsObject, repeatsObjectPrettyJSON},
+ {"nested message/enum flat object", marshaler, complexObject, complexObjectJSON},
+ {"nested message/enum pretty object", marshalerAllOptions, complexObject, complexObjectPrettyJSON},
+ {"enum-string flat object", Marshaler{},
+ &pb.Widget{Color: pb.Widget_BLUE.Enum()}, `{"color":"BLUE"}`},
+ {"enum-value pretty object", Marshaler{EnumsAsInts: true, Indent: " "},
+ &pb.Widget{Color: pb.Widget_BLUE.Enum()}, colorPrettyJSON},
+ {"unknown enum value object", marshalerAllOptions,
+ &pb.Widget{Color: pb.Widget_Color(1000).Enum(), RColor: []pb.Widget_Color{pb.Widget_RED}}, colorListPrettyJSON},
+ {"repeated proto3 enum", Marshaler{},
+ &proto3pb.Message{RFunny: []proto3pb.Message_Humour{
+ proto3pb.Message_PUNS,
+ proto3pb.Message_SLAPSTICK,
+ }},
+ `{"rFunny":["PUNS","SLAPSTICK"]}`},
+ {"repeated proto3 enum as int", Marshaler{EnumsAsInts: true},
+ &proto3pb.Message{RFunny: []proto3pb.Message_Humour{
+ proto3pb.Message_PUNS,
+ proto3pb.Message_SLAPSTICK,
+ }},
+ `{"rFunny":[1,2]}`},
+ {"empty value", marshaler, &pb.Simple3{}, `{}`},
+ {"empty value emitted", Marshaler{EmitDefaults: true}, &pb.Simple3{}, `{"dub":0}`},
+ {"empty repeated emitted", Marshaler{EmitDefaults: true}, &pb.SimpleSlice3{}, `{"slices":[]}`},
+ {"empty map emitted", Marshaler{EmitDefaults: true}, &pb.SimpleMap3{}, `{"stringy":{}}`},
+ {"nested struct null", Marshaler{EmitDefaults: true}, &pb.SimpleNull3{}, `{"simple":null}`},
+ {"map<int64, int32>", marshaler, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}, `{"nummy":{"1":2,"3":4}}`},
+ {"map<int64, int32>", marshalerAllOptions, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}, nummyPrettyJSON},
+ {"map<string, string>", marshaler,
+ &pb.Mappy{Strry: map[string]string{`"one"`: "two", "three": "four"}},
+ `{"strry":{"\"one\"":"two","three":"four"}}`},
+ {"map<int32, Object>", marshaler,
+ &pb.Mappy{Objjy: map[int32]*pb.Simple3{1: {Dub: 1}}}, `{"objjy":{"1":{"dub":1}}}`},
+ {"map<int32, Object>", marshalerAllOptions,
+ &pb.Mappy{Objjy: map[int32]*pb.Simple3{1: {Dub: 1}}}, objjyPrettyJSON},
+ {"map<int64, string>", marshaler, &pb.Mappy{Buggy: map[int64]string{1234: "yup"}},
+ `{"buggy":{"1234":"yup"}}`},
+ {"map<bool, bool>", marshaler, &pb.Mappy{Booly: map[bool]bool{false: true}}, `{"booly":{"false":true}}`},
+ // TODO: This is broken.
+ //{"map<string, enum>", marshaler, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}, `{"enumy":{"XIV":"ROMAN"}`},
+ {"map<string, enum as int>", Marshaler{EnumsAsInts: true}, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}, `{"enumy":{"XIV":2}}`},
+ {"map<int32, bool>", marshaler, &pb.Mappy{S32Booly: map[int32]bool{1: true, 3: false, 10: true, 12: false}}, `{"s32booly":{"1":true,"3":false,"10":true,"12":false}}`},
+ {"map<int64, bool>", marshaler, &pb.Mappy{S64Booly: map[int64]bool{1: true, 3: false, 10: true, 12: false}}, `{"s64booly":{"1":true,"3":false,"10":true,"12":false}}`},
+ {"map<uint32, bool>", marshaler, &pb.Mappy{U32Booly: map[uint32]bool{1: true, 3: false, 10: true, 12: false}}, `{"u32booly":{"1":true,"3":false,"10":true,"12":false}}`},
+ {"map<uint64, bool>", marshaler, &pb.Mappy{U64Booly: map[uint64]bool{1: true, 3: false, 10: true, 12: false}}, `{"u64booly":{"1":true,"3":false,"10":true,"12":false}}`},
+ {"proto2 map<int64, string>", marshaler, &pb.Maps{MInt64Str: map[int64]string{213: "cat"}},
+ `{"mInt64Str":{"213":"cat"}}`},
+ {"proto2 map<bool, Object>", marshaler,
+ &pb.Maps{MBoolSimple: map[bool]*pb.Simple{true: {OInt32: proto.Int32(1)}}},
+ `{"mBoolSimple":{"true":{"oInt32":1}}}`},
+ {"oneof, not set", marshaler, &pb.MsgWithOneof{}, `{}`},
+ {"oneof, set", marshaler, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Title{"Grand Poobah"}}, `{"title":"Grand Poobah"}`},
+ {"force orig_name", Marshaler{OrigName: true}, &pb.Simple{OInt32: proto.Int32(4)},
+ `{"o_int32":4}`},
+ {"proto2 extension", marshaler, realNumber, realNumberJSON},
+ {"Any with message", marshaler, anySimple, anySimpleJSON},
+ {"Any with message and indent", marshalerAllOptions, anySimple, anySimplePrettyJSON},
+ {"Any with WKT", marshaler, anyWellKnown, anyWellKnownJSON},
+ {"Any with WKT and indent", marshalerAllOptions, anyWellKnown, anyWellKnownPrettyJSON},
+ {"Duration", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}, `{"dur":"3.000s"}`},
+ {"Duration", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 100000000, Nanos: 1}}, `{"dur":"100000000.000000001s"}`},
+ {"Struct", marshaler, &pb.KnownTypes{St: &stpb.Struct{
+ Fields: map[string]*stpb.Value{
+ "one": {Kind: &stpb.Value_StringValue{"loneliest number"}},
+ "two": {Kind: &stpb.Value_NullValue{stpb.NullValue_NULL_VALUE}},
+ },
+ }}, `{"st":{"one":"loneliest number","two":null}}`},
+ {"empty ListValue", marshaler, &pb.KnownTypes{Lv: &stpb.ListValue{}}, `{"lv":[]}`},
+ {"basic ListValue", marshaler, &pb.KnownTypes{Lv: &stpb.ListValue{Values: []*stpb.Value{
+ {Kind: &stpb.Value_StringValue{"x"}},
+ {Kind: &stpb.Value_NullValue{}},
+ {Kind: &stpb.Value_NumberValue{3}},
+ {Kind: &stpb.Value_BoolValue{true}},
+ }}}, `{"lv":["x",null,3,true]}`},
+ {"Timestamp", marshaler, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 14e8, Nanos: 21e6}}, `{"ts":"2014-05-13T16:53:20.021Z"}`},
+ {"number Value", marshaler, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_NumberValue{1}}}, `{"val":1}`},
+ {"null Value", marshaler, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_NullValue{stpb.NullValue_NULL_VALUE}}}, `{"val":null}`},
+ {"string number value", marshaler, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_StringValue{"9223372036854775807"}}}, `{"val":"9223372036854775807"}`},
+ {"list of lists Value", marshaler, &pb.KnownTypes{Val: &stpb.Value{
+ Kind: &stpb.Value_ListValue{&stpb.ListValue{
+ Values: []*stpb.Value{
+ {Kind: &stpb.Value_StringValue{"x"}},
+ {Kind: &stpb.Value_ListValue{&stpb.ListValue{
+ Values: []*stpb.Value{
+ {Kind: &stpb.Value_ListValue{&stpb.ListValue{
+ Values: []*stpb.Value{{Kind: &stpb.Value_StringValue{"y"}}},
+ }}},
+ {Kind: &stpb.Value_StringValue{"z"}},
+ },
+ }}},
+ },
+ }},
+ }}, `{"val":["x",[["y"],"z"]]}`},
+
+ {"DoubleValue", marshaler, &pb.KnownTypes{Dbl: &wpb.DoubleValue{Value: 1.2}}, `{"dbl":1.2}`},
+ {"FloatValue", marshaler, &pb.KnownTypes{Flt: &wpb.FloatValue{Value: 1.2}}, `{"flt":1.2}`},
+ {"Int64Value", marshaler, &pb.KnownTypes{I64: &wpb.Int64Value{Value: -3}}, `{"i64":"-3"}`},
+ {"UInt64Value", marshaler, &pb.KnownTypes{U64: &wpb.UInt64Value{Value: 3}}, `{"u64":"3"}`},
+ {"Int32Value", marshaler, &pb.KnownTypes{I32: &wpb.Int32Value{Value: -4}}, `{"i32":-4}`},
+ {"UInt32Value", marshaler, &pb.KnownTypes{U32: &wpb.UInt32Value{Value: 4}}, `{"u32":4}`},
+ {"BoolValue", marshaler, &pb.KnownTypes{Bool: &wpb.BoolValue{Value: true}}, `{"bool":true}`},
+ {"StringValue", marshaler, &pb.KnownTypes{Str: &wpb.StringValue{Value: "plush"}}, `{"str":"plush"}`},
+ {"BytesValue", marshaler, &pb.KnownTypes{Bytes: &wpb.BytesValue{Value: []byte("wow")}}, `{"bytes":"d293"}`},
+}
+
+func TestMarshaling(t *testing.T) {
+ for _, tt := range marshalingTests {
+ json, err := tt.marshaler.MarshalToString(tt.pb)
+ if err != nil {
+ t.Errorf("%s: marshaling error: %v", tt.desc, err)
+ } else if tt.json != json {
+ t.Errorf("%s: got [%v] want [%v]", tt.desc, json, tt.json)
+ }
+ }
+}
+
+func TestMarshalJSONPBMarshaler(t *testing.T) {
+ rawJson := `{ "foo": "bar", "baz": [0, 1, 2, 3] }`
+ msg := dynamicMessage{rawJson: rawJson}
+ str, err := new(Marshaler).MarshalToString(&msg)
+ if err != nil {
+ t.Errorf("an unexpected error occurred when marshalling JSONPBMarshaler: %v", err)
+ }
+ if str != rawJson {
+ t.Errorf("marshalling JSON produced incorrect output: got %s, wanted %s", str, rawJson)
+ }
+}
+
+func TestMarshalAnyJSONPBMarshaler(t *testing.T) {
+ msg := dynamicMessage{rawJson: `{ "foo": "bar", "baz": [0, 1, 2, 3] }`}
+ a, err := ptypes.MarshalAny(&msg)
+ if err != nil {
+ t.Errorf("an unexpected error occurred when marshalling to Any: %v", err)
+ }
+ str, err := new(Marshaler).MarshalToString(a)
+ if err != nil {
+ t.Errorf("an unexpected error occurred when marshalling Any to JSON: %v", err)
+ }
+ // after custom marshaling, it's round-tripped through JSON decoding/encoding already,
+ // so the keys are sorted, whitespace is compacted, and "@type" key has been added
+ expected := `{"@type":"type.googleapis.com/` + dynamicMessageName + `","baz":[0,1,2,3],"foo":"bar"}`
+ if str != expected {
+ t.Errorf("marshalling JSON produced incorrect output: got %s, wanted %s", str, expected)
+ }
+}
+
+var unmarshalingTests = []struct {
+ desc string
+ unmarshaler Unmarshaler
+ json string
+ pb proto.Message
+}{
+ {"simple flat object", Unmarshaler{}, simpleObjectJSON, simpleObject},
+ {"simple pretty object", Unmarshaler{}, simpleObjectPrettyJSON, simpleObject},
+ {"repeated fields flat object", Unmarshaler{}, repeatsObjectJSON, repeatsObject},
+ {"repeated fields pretty object", Unmarshaler{}, repeatsObjectPrettyJSON, repeatsObject},
+ {"nested message/enum flat object", Unmarshaler{}, complexObjectJSON, complexObject},
+ {"nested message/enum pretty object", Unmarshaler{}, complexObjectPrettyJSON, complexObject},
+ {"enum-string object", Unmarshaler{}, `{"color":"BLUE"}`, &pb.Widget{Color: pb.Widget_BLUE.Enum()}},
+ {"enum-value object", Unmarshaler{}, "{\n \"color\": 2\n}", &pb.Widget{Color: pb.Widget_BLUE.Enum()}},
+ {"unknown field with allowed option", Unmarshaler{AllowUnknownFields: true}, `{"unknown": "foo"}`, new(pb.Simple)},
+ {"proto3 enum string", Unmarshaler{}, `{"hilarity":"PUNS"}`, &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}},
+ {"proto3 enum value", Unmarshaler{}, `{"hilarity":1}`, &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}},
+ {"unknown enum value object",
+ Unmarshaler{},
+ "{\n \"color\": 1000,\n \"r_color\": [\n \"RED\"\n ]\n}",
+ &pb.Widget{Color: pb.Widget_Color(1000).Enum(), RColor: []pb.Widget_Color{pb.Widget_RED}}},
+ {"repeated proto3 enum", Unmarshaler{}, `{"rFunny":["PUNS","SLAPSTICK"]}`,
+ &proto3pb.Message{RFunny: []proto3pb.Message_Humour{
+ proto3pb.Message_PUNS,
+ proto3pb.Message_SLAPSTICK,
+ }}},
+ {"repeated proto3 enum as int", Unmarshaler{}, `{"rFunny":[1,2]}`,
+ &proto3pb.Message{RFunny: []proto3pb.Message_Humour{
+ proto3pb.Message_PUNS,
+ proto3pb.Message_SLAPSTICK,
+ }}},
+ {"repeated proto3 enum as mix of strings and ints", Unmarshaler{}, `{"rFunny":["PUNS",2]}`,
+ &proto3pb.Message{RFunny: []proto3pb.Message_Humour{
+ proto3pb.Message_PUNS,
+ proto3pb.Message_SLAPSTICK,
+ }}},
+ {"unquoted int64 object", Unmarshaler{}, `{"oInt64":-314}`, &pb.Simple{OInt64: proto.Int64(-314)}},
+ {"unquoted uint64 object", Unmarshaler{}, `{"oUint64":123}`, &pb.Simple{OUint64: proto.Uint64(123)}},
+ {"NaN", Unmarshaler{}, `{"oDouble":"NaN"}`, &pb.Simple{ODouble: proto.Float64(math.NaN())}},
+ {"Inf", Unmarshaler{}, `{"oFloat":"Infinity"}`, &pb.Simple{OFloat: proto.Float32(float32(math.Inf(1)))}},
+ {"-Inf", Unmarshaler{}, `{"oDouble":"-Infinity"}`, &pb.Simple{ODouble: proto.Float64(math.Inf(-1))}},
+ {"map<int64, int32>", Unmarshaler{}, `{"nummy":{"1":2,"3":4}}`, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}},
+ {"map<string, string>", Unmarshaler{}, `{"strry":{"\"one\"":"two","three":"four"}}`, &pb.Mappy{Strry: map[string]string{`"one"`: "two", "three": "four"}}},
+ {"map<int32, Object>", Unmarshaler{}, `{"objjy":{"1":{"dub":1}}}`, &pb.Mappy{Objjy: map[int32]*pb.Simple3{1: {Dub: 1}}}},
+ {"proto2 extension", Unmarshaler{}, realNumberJSON, realNumber},
+ {"Any with message", Unmarshaler{}, anySimpleJSON, anySimple},
+ {"Any with message and indent", Unmarshaler{}, anySimplePrettyJSON, anySimple},
+ {"Any with WKT", Unmarshaler{}, anyWellKnownJSON, anyWellKnown},
+ {"Any with WKT and indent", Unmarshaler{}, anyWellKnownPrettyJSON, anyWellKnown},
+ // TODO: This is broken.
+ //{"map<string, enum>", Unmarshaler{}, `{"enumy":{"XIV":"ROMAN"}`, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}},
+ {"map<string, enum as int>", Unmarshaler{}, `{"enumy":{"XIV":2}}`, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}},
+ {"oneof", Unmarshaler{}, `{"salary":31000}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Salary{31000}}},
+ {"oneof spec name", Unmarshaler{}, `{"Country":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Country{"Australia"}}},
+ {"oneof orig_name", Unmarshaler{}, `{"Country":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Country{"Australia"}}},
+ {"oneof spec name2", Unmarshaler{}, `{"homeAddress":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_HomeAddress{"Australia"}}},
+ {"oneof orig_name2", Unmarshaler{}, `{"home_address":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_HomeAddress{"Australia"}}},
+ {"orig_name input", Unmarshaler{}, `{"o_bool":true}`, &pb.Simple{OBool: proto.Bool(true)}},
+ {"camelName input", Unmarshaler{}, `{"oBool":true}`, &pb.Simple{OBool: proto.Bool(true)}},
+
+ {"Duration", Unmarshaler{}, `{"dur":"3.000s"}`, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}},
+ {"null Duration", Unmarshaler{}, `{"dur":null}`, &pb.KnownTypes{Dur: nil}},
+ {"Timestamp", Unmarshaler{}, `{"ts":"2014-05-13T16:53:20.021Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 14e8, Nanos: 21e6}}},
+ {"PreEpochTimestamp", Unmarshaler{}, `{"ts":"1969-12-31T23:59:58.999999995Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: -2, Nanos: 999999995}}},
+ {"ZeroTimeTimestamp", Unmarshaler{}, `{"ts":"0001-01-01T00:00:00Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: -62135596800, Nanos: 0}}},
+ {"null Timestamp", Unmarshaler{}, `{"ts":null}`, &pb.KnownTypes{Ts: nil}},
+ {"null Struct", Unmarshaler{}, `{"st": null}`, &pb.KnownTypes{St: nil}},
+ {"empty Struct", Unmarshaler{}, `{"st": {}}`, &pb.KnownTypes{St: &stpb.Struct{}}},
+ {"basic Struct", Unmarshaler{}, `{"st": {"a": "x", "b": null, "c": 3, "d": true}}`, &pb.KnownTypes{St: &stpb.Struct{Fields: map[string]*stpb.Value{
+ "a": {Kind: &stpb.Value_StringValue{"x"}},
+ "b": {Kind: &stpb.Value_NullValue{}},
+ "c": {Kind: &stpb.Value_NumberValue{3}},
+ "d": {Kind: &stpb.Value_BoolValue{true}},
+ }}}},
+ {"nested Struct", Unmarshaler{}, `{"st": {"a": {"b": 1, "c": [{"d": true}, "f"]}}}`, &pb.KnownTypes{St: &stpb.Struct{Fields: map[string]*stpb.Value{
+ "a": {Kind: &stpb.Value_StructValue{&stpb.Struct{Fields: map[string]*stpb.Value{
+ "b": {Kind: &stpb.Value_NumberValue{1}},
+ "c": {Kind: &stpb.Value_ListValue{&stpb.ListValue{Values: []*stpb.Value{
+ {Kind: &stpb.Value_StructValue{&stpb.Struct{Fields: map[string]*stpb.Value{"d": {Kind: &stpb.Value_BoolValue{true}}}}}},
+ {Kind: &stpb.Value_StringValue{"f"}},
+ }}}},
+ }}}},
+ }}}},
+ {"null ListValue", Unmarshaler{}, `{"lv": null}`, &pb.KnownTypes{Lv: nil}},
+ {"empty ListValue", Unmarshaler{}, `{"lv": []}`, &pb.KnownTypes{Lv: &stpb.ListValue{}}},
+ {"basic ListValue", Unmarshaler{}, `{"lv": ["x", null, 3, true]}`, &pb.KnownTypes{Lv: &stpb.ListValue{Values: []*stpb.Value{
+ {Kind: &stpb.Value_StringValue{"x"}},
+ {Kind: &stpb.Value_NullValue{}},
+ {Kind: &stpb.Value_NumberValue{3}},
+ {Kind: &stpb.Value_BoolValue{true}},
+ }}}},
+ {"number Value", Unmarshaler{}, `{"val":1}`, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_NumberValue{1}}}},
+ {"null Value", Unmarshaler{}, `{"val":null}`, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_NullValue{stpb.NullValue_NULL_VALUE}}}},
+ {"bool Value", Unmarshaler{}, `{"val":true}`, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_BoolValue{true}}}},
+ {"string Value", Unmarshaler{}, `{"val":"x"}`, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_StringValue{"x"}}}},
+ {"string number value", Unmarshaler{}, `{"val":"9223372036854775807"}`, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_StringValue{"9223372036854775807"}}}},
+ {"list of lists Value", Unmarshaler{}, `{"val":["x", [["y"], "z"]]}`, &pb.KnownTypes{Val: &stpb.Value{
+ Kind: &stpb.Value_ListValue{&stpb.ListValue{
+ Values: []*stpb.Value{
+ {Kind: &stpb.Value_StringValue{"x"}},
+ {Kind: &stpb.Value_ListValue{&stpb.ListValue{
+ Values: []*stpb.Value{
+ {Kind: &stpb.Value_ListValue{&stpb.ListValue{
+ Values: []*stpb.Value{{Kind: &stpb.Value_StringValue{"y"}}},
+ }}},
+ {Kind: &stpb.Value_StringValue{"z"}},
+ },
+ }}},
+ },
+ }}}}},
+
+ {"DoubleValue", Unmarshaler{}, `{"dbl":1.2}`, &pb.KnownTypes{Dbl: &wpb.DoubleValue{Value: 1.2}}},
+ {"FloatValue", Unmarshaler{}, `{"flt":1.2}`, &pb.KnownTypes{Flt: &wpb.FloatValue{Value: 1.2}}},
+ {"Int64Value", Unmarshaler{}, `{"i64":"-3"}`, &pb.KnownTypes{I64: &wpb.Int64Value{Value: -3}}},
+ {"UInt64Value", Unmarshaler{}, `{"u64":"3"}`, &pb.KnownTypes{U64: &wpb.UInt64Value{Value: 3}}},
+ {"Int32Value", Unmarshaler{}, `{"i32":-4}`, &pb.KnownTypes{I32: &wpb.Int32Value{Value: -4}}},
+ {"UInt32Value", Unmarshaler{}, `{"u32":4}`, &pb.KnownTypes{U32: &wpb.UInt32Value{Value: 4}}},
+ {"BoolValue", Unmarshaler{}, `{"bool":true}`, &pb.KnownTypes{Bool: &wpb.BoolValue{Value: true}}},
+ {"StringValue", Unmarshaler{}, `{"str":"plush"}`, &pb.KnownTypes{Str: &wpb.StringValue{Value: "plush"}}},
+ {"BytesValue", Unmarshaler{}, `{"bytes":"d293"}`, &pb.KnownTypes{Bytes: &wpb.BytesValue{Value: []byte("wow")}}},
+
+ // Ensure that `null` as a value ends up with a nil pointer instead of a [type]Value struct.
+ {"null DoubleValue", Unmarshaler{}, `{"dbl":null}`, &pb.KnownTypes{Dbl: nil}},
+ {"null FloatValue", Unmarshaler{}, `{"flt":null}`, &pb.KnownTypes{Flt: nil}},
+ {"null Int64Value", Unmarshaler{}, `{"i64":null}`, &pb.KnownTypes{I64: nil}},
+ {"null UInt64Value", Unmarshaler{}, `{"u64":null}`, &pb.KnownTypes{U64: nil}},
+ {"null Int32Value", Unmarshaler{}, `{"i32":null}`, &pb.KnownTypes{I32: nil}},
+ {"null UInt32Value", Unmarshaler{}, `{"u32":null}`, &pb.KnownTypes{U32: nil}},
+ {"null BoolValue", Unmarshaler{}, `{"bool":null}`, &pb.KnownTypes{Bool: nil}},
+ {"null StringValue", Unmarshaler{}, `{"str":null}`, &pb.KnownTypes{Str: nil}},
+ {"null BytesValue", Unmarshaler{}, `{"bytes":null}`, &pb.KnownTypes{Bytes: nil}},
+}
+
+func TestUnmarshaling(t *testing.T) {
+ for _, tt := range unmarshalingTests {
+ // Make a new instance of the type of our expected object.
+ p := reflect.New(reflect.TypeOf(tt.pb).Elem()).Interface().(proto.Message)
+
+ err := tt.unmarshaler.Unmarshal(strings.NewReader(tt.json), p)
+ if err != nil {
+ t.Errorf("%s: %v", tt.desc, err)
+ continue
+ }
+
+ // For easier diffs, compare text strings of the protos.
+ exp := proto.MarshalTextString(tt.pb)
+ act := proto.MarshalTextString(p)
+ if string(exp) != string(act) {
+ t.Errorf("%s: got [%s] want [%s]", tt.desc, act, exp)
+ }
+ }
+}
+
+func TestUnmarshalNullArray(t *testing.T) {
+ var repeats pb.Repeats
+ if err := UnmarshalString(`{"rBool":null}`, &repeats); err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(repeats, pb.Repeats{}) {
+ t.Errorf("got non-nil fields in [%#v]", repeats)
+ }
+}
+
+func TestUnmarshalNullObject(t *testing.T) {
+ var maps pb.Maps
+ if err := UnmarshalString(`{"mInt64Str":null}`, &maps); err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(maps, pb.Maps{}) {
+ t.Errorf("got non-nil fields in [%#v]", maps)
+ }
+}
+
+func TestUnmarshalNext(t *testing.T) {
+ // We only need to check against a few, not all of them.
+ tests := unmarshalingTests[:5]
+
+ // Create a buffer with many concatenated JSON objects.
+ var b bytes.Buffer
+ for _, tt := range tests {
+ b.WriteString(tt.json)
+ }
+
+ dec := json.NewDecoder(&b)
+ for _, tt := range tests {
+ // Make a new instance of the type of our expected object.
+ p := reflect.New(reflect.TypeOf(tt.pb).Elem()).Interface().(proto.Message)
+
+ err := tt.unmarshaler.UnmarshalNext(dec, p)
+ if err != nil {
+ t.Errorf("%s: %v", tt.desc, err)
+ continue
+ }
+
+ // For easier diffs, compare text strings of the protos.
+ exp := proto.MarshalTextString(tt.pb)
+ act := proto.MarshalTextString(p)
+ if string(exp) != string(act) {
+ t.Errorf("%s: got [%s] want [%s]", tt.desc, act, exp)
+ }
+ }
+
+ p := &pb.Simple{}
+ err := new(Unmarshaler).UnmarshalNext(dec, p)
+ if err != io.EOF {
+ t.Errorf("eof: got %v, expected io.EOF", err)
+ }
+}
+
+var unmarshalingShouldError = []struct {
+ desc string
+ in string
+ pb proto.Message
+}{
+ {"a value", "666", new(pb.Simple)},
+ {"gibberish", "{adskja123;l23=-=", new(pb.Simple)},
+ {"unknown field", `{"unknown": "foo"}`, new(pb.Simple)},
+ {"unknown enum name", `{"hilarity":"DAVE"}`, new(proto3pb.Message)},
+}
+
+func TestUnmarshalingBadInput(t *testing.T) {
+ for _, tt := range unmarshalingShouldError {
+ err := UnmarshalString(tt.in, tt.pb)
+ if err == nil {
+ t.Errorf("an error was expected when parsing %q instead of an object", tt.desc)
+ }
+ }
+}
+
+type funcResolver func(turl string) (proto.Message, error)
+
+func (fn funcResolver) Resolve(turl string) (proto.Message, error) {
+ return fn(turl)
+}
+
+func TestAnyWithCustomResolver(t *testing.T) {
+ var resolvedTypeUrls []string
+ resolver := funcResolver(func(turl string) (proto.Message, error) {
+ resolvedTypeUrls = append(resolvedTypeUrls, turl)
+ return new(pb.Simple), nil
+ })
+ msg := &pb.Simple{
+ OBytes: []byte{1, 2, 3, 4},
+ OBool: proto.Bool(true),
+ OString: proto.String("foobar"),
+ OInt64: proto.Int64(1020304),
+ }
+ msgBytes, err := proto.Marshal(msg)
+ if err != nil {
+ t.Errorf("an unexpected error occurred when marshaling message: %v", err)
+ }
+ // make an Any with a type URL that won't resolve w/out custom resolver
+ any := &anypb.Any{
+ TypeUrl: "https://foobar.com/some.random.MessageKind",
+ Value: msgBytes,
+ }
+
+ m := Marshaler{AnyResolver: resolver}
+ js, err := m.MarshalToString(any)
+ if err != nil {
+ t.Errorf("an unexpected error occurred when marshaling any to JSON: %v", err)
+ }
+ if len(resolvedTypeUrls) != 1 {
+ t.Errorf("custom resolver was not invoked during marshaling")
+ } else if resolvedTypeUrls[0] != "https://foobar.com/some.random.MessageKind" {
+ t.Errorf("custom resolver was invoked with wrong URL: got %q, wanted %q", resolvedTypeUrls[0], "https://foobar.com/some.random.MessageKind")
+ }
+ wanted := `{"@type":"https://foobar.com/some.random.MessageKind","oBool":true,"oInt64":"1020304","oString":"foobar","oBytes":"AQIDBA=="}`
+ if js != wanted {
+ t.Errorf("marshalling JSON produced incorrect output: got %s, wanted %s", js, wanted)
+ }
+
+ u := Unmarshaler{AnyResolver: resolver}
+ roundTrip := &anypb.Any{}
+ err = u.Unmarshal(bytes.NewReader([]byte(js)), roundTrip)
+ if err != nil {
+ t.Errorf("an unexpected error occurred when unmarshaling any from JSON: %v", err)
+ }
+ if len(resolvedTypeUrls) != 2 {
+ t.Errorf("custom resolver was not invoked during marshaling")
+ } else if resolvedTypeUrls[1] != "https://foobar.com/some.random.MessageKind" {
+ t.Errorf("custom resolver was invoked with wrong URL: got %q, wanted %q", resolvedTypeUrls[1], "https://foobar.com/some.random.MessageKind")
+ }
+ if !proto.Equal(any, roundTrip) {
+ t.Errorf("message contents not set correctly after unmarshalling JSON: got %s, wanted %s", roundTrip, any)
+ }
+}
+
+func TestUnmarshalJSONPBUnmarshaler(t *testing.T) {
+ rawJson := `{ "foo": "bar", "baz": [0, 1, 2, 3] }`
+ var msg dynamicMessage
+ if err := Unmarshal(strings.NewReader(rawJson), &msg); err != nil {
+ t.Errorf("an unexpected error occurred when parsing into JSONPBUnmarshaler: %v", err)
+ }
+ if msg.rawJson != rawJson {
+ t.Errorf("message contents not set correctly after unmarshalling JSON: got %s, wanted %s", msg.rawJson, rawJson)
+ }
+}
+
+func TestUnmarshalNullWithJSONPBUnmarshaler(t *testing.T) {
+ rawJson := `{"stringField":null}`
+ var ptrFieldMsg ptrFieldMessage
+ if err := Unmarshal(strings.NewReader(rawJson), &ptrFieldMsg); err != nil {
+ t.Errorf("unmarshal error: %v", err)
+ }
+
+ want := ptrFieldMessage{StringField: &stringField{IsSet: true, StringValue: "null"}}
+ if !proto.Equal(&ptrFieldMsg, &want) {
+ t.Errorf("unmarshal result StringField: got %v, want %v", ptrFieldMsg, want)
+ }
+}
+
+func TestUnmarshalAnyJSONPBUnmarshaler(t *testing.T) {
+ rawJson := `{ "@type": "blah.com/` + dynamicMessageName + `", "foo": "bar", "baz": [0, 1, 2, 3] }`
+ var got anypb.Any
+ if err := Unmarshal(strings.NewReader(rawJson), &got); err != nil {
+ t.Errorf("an unexpected error occurred when parsing into JSONPBUnmarshaler: %v", err)
+ }
+
+ dm := &dynamicMessage{rawJson: `{"baz":[0,1,2,3],"foo":"bar"}`}
+ var want anypb.Any
+ if b, err := proto.Marshal(dm); err != nil {
+ t.Errorf("an unexpected error occurred when marshaling message: %v", err)
+ } else {
+ want.TypeUrl = "blah.com/" + dynamicMessageName
+ want.Value = b
+ }
+
+ if !proto.Equal(&got, &want) {
+ t.Errorf("message contents not set correctly after unmarshalling JSON: got %s, wanted %s", got, want)
+ }
+}
+
+const (
+ dynamicMessageName = "google.protobuf.jsonpb.testing.dynamicMessage"
+)
+
+func init() {
+ // we register the custom type below so that we can use it in Any types
+ proto.RegisterType((*dynamicMessage)(nil), dynamicMessageName)
+}
+
+type ptrFieldMessage struct {
+ StringField *stringField `protobuf:"bytes,1,opt,name=stringField"`
+}
+
+func (m *ptrFieldMessage) Reset() {
+}
+
+func (m *ptrFieldMessage) String() string {
+ return m.StringField.StringValue
+}
+
+func (m *ptrFieldMessage) ProtoMessage() {
+}
+
+type stringField struct {
+ IsSet bool `protobuf:"varint,1,opt,name=isSet"`
+ StringValue string `protobuf:"bytes,2,opt,name=stringValue"`
+}
+
+func (s *stringField) Reset() {
+}
+
+func (s *stringField) String() string {
+ return s.StringValue
+}
+
+func (s *stringField) ProtoMessage() {
+}
+
+func (s *stringField) UnmarshalJSONPB(jum *Unmarshaler, js []byte) error {
+ s.IsSet = true
+ s.StringValue = string(js)
+ return nil
+}
+
+// dynamicMessage implements protobuf.Message but is not a normal generated message type.
+// It provides implementations of JSONPBMarshaler and JSONPBUnmarshaler for JSON support.
+type dynamicMessage struct {
+ rawJson string `protobuf:"bytes,1,opt,name=rawJson"`
+}
+
+func (m *dynamicMessage) Reset() {
+ m.rawJson = "{}"
+}
+
+func (m *dynamicMessage) String() string {
+ return m.rawJson
+}
+
+func (m *dynamicMessage) ProtoMessage() {
+}
+
+func (m *dynamicMessage) MarshalJSONPB(jm *Marshaler) ([]byte, error) {
+ return []byte(m.rawJson), nil
+}
+
+func (m *dynamicMessage) UnmarshalJSONPB(jum *Unmarshaler, js []byte) error {
+ m.rawJson = string(js)
+ return nil
+}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile
new file mode 100644
index 0000000..eeda8ae
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile
@@ -0,0 +1,33 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2015 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+regenerate:
+ protoc --go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,Mgoogle/protobuf/struct.proto=github.com/golang/protobuf/ptypes/struct,Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,Mgoogle/protobuf/wrappers.proto=github.com/golang/protobuf/ptypes/wrappers:. *.proto
diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go
new file mode 100644
index 0000000..ebb180e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go
@@ -0,0 +1,266 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: more_test_objects.proto
+
+/*
+Package jsonpb is a generated protocol buffer package.
+
+It is generated from these files:
+ more_test_objects.proto
+ test_objects.proto
+
+It has these top-level messages:
+ Simple3
+ SimpleSlice3
+ SimpleMap3
+ SimpleNull3
+ Mappy
+ Simple
+ NonFinites
+ Repeats
+ Widget
+ Maps
+ MsgWithOneof
+ Real
+ Complex
+ KnownTypes
+*/
+package jsonpb
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Numeral int32
+
+const (
+ Numeral_UNKNOWN Numeral = 0
+ Numeral_ARABIC Numeral = 1
+ Numeral_ROMAN Numeral = 2
+)
+
+var Numeral_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "ARABIC",
+ 2: "ROMAN",
+}
+var Numeral_value = map[string]int32{
+ "UNKNOWN": 0,
+ "ARABIC": 1,
+ "ROMAN": 2,
+}
+
+func (x Numeral) String() string {
+ return proto.EnumName(Numeral_name, int32(x))
+}
+func (Numeral) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+type Simple3 struct {
+ Dub float64 `protobuf:"fixed64,1,opt,name=dub" json:"dub,omitempty"`
+}
+
+func (m *Simple3) Reset() { *m = Simple3{} }
+func (m *Simple3) String() string { return proto.CompactTextString(m) }
+func (*Simple3) ProtoMessage() {}
+func (*Simple3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *Simple3) GetDub() float64 {
+ if m != nil {
+ return m.Dub
+ }
+ return 0
+}
+
+type SimpleSlice3 struct {
+ Slices []string `protobuf:"bytes,1,rep,name=slices" json:"slices,omitempty"`
+}
+
+func (m *SimpleSlice3) Reset() { *m = SimpleSlice3{} }
+func (m *SimpleSlice3) String() string { return proto.CompactTextString(m) }
+func (*SimpleSlice3) ProtoMessage() {}
+func (*SimpleSlice3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *SimpleSlice3) GetSlices() []string {
+ if m != nil {
+ return m.Slices
+ }
+ return nil
+}
+
+type SimpleMap3 struct {
+ Stringy map[string]string `protobuf:"bytes,1,rep,name=stringy" json:"stringy,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+}
+
+func (m *SimpleMap3) Reset() { *m = SimpleMap3{} }
+func (m *SimpleMap3) String() string { return proto.CompactTextString(m) }
+func (*SimpleMap3) ProtoMessage() {}
+func (*SimpleMap3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+func (m *SimpleMap3) GetStringy() map[string]string {
+ if m != nil {
+ return m.Stringy
+ }
+ return nil
+}
+
+type SimpleNull3 struct {
+ Simple *Simple3 `protobuf:"bytes,1,opt,name=simple" json:"simple,omitempty"`
+}
+
+func (m *SimpleNull3) Reset() { *m = SimpleNull3{} }
+func (m *SimpleNull3) String() string { return proto.CompactTextString(m) }
+func (*SimpleNull3) ProtoMessage() {}
+func (*SimpleNull3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+
+func (m *SimpleNull3) GetSimple() *Simple3 {
+ if m != nil {
+ return m.Simple
+ }
+ return nil
+}
+
+type Mappy struct {
+ Nummy map[int64]int32 `protobuf:"bytes,1,rep,name=nummy" json:"nummy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
+ Strry map[string]string `protobuf:"bytes,2,rep,name=strry" json:"strry,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ Objjy map[int32]*Simple3 `protobuf:"bytes,3,rep,name=objjy" json:"objjy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ Buggy map[int64]string `protobuf:"bytes,4,rep,name=buggy" json:"buggy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ Booly map[bool]bool `protobuf:"bytes,5,rep,name=booly" json:"booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
+ Enumy map[string]Numeral `protobuf:"bytes,6,rep,name=enumy" json:"enumy,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=jsonpb.Numeral"`
+ S32Booly map[int32]bool `protobuf:"bytes,7,rep,name=s32booly" json:"s32booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
+ S64Booly map[int64]bool `protobuf:"bytes,8,rep,name=s64booly" json:"s64booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
+ U32Booly map[uint32]bool `protobuf:"bytes,9,rep,name=u32booly" json:"u32booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
+ U64Booly map[uint64]bool `protobuf:"bytes,10,rep,name=u64booly" json:"u64booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
+}
+
+func (m *Mappy) Reset() { *m = Mappy{} }
+func (m *Mappy) String() string { return proto.CompactTextString(m) }
+func (*Mappy) ProtoMessage() {}
+func (*Mappy) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+
+func (m *Mappy) GetNummy() map[int64]int32 {
+ if m != nil {
+ return m.Nummy
+ }
+ return nil
+}
+
+func (m *Mappy) GetStrry() map[string]string {
+ if m != nil {
+ return m.Strry
+ }
+ return nil
+}
+
+func (m *Mappy) GetObjjy() map[int32]*Simple3 {
+ if m != nil {
+ return m.Objjy
+ }
+ return nil
+}
+
+func (m *Mappy) GetBuggy() map[int64]string {
+ if m != nil {
+ return m.Buggy
+ }
+ return nil
+}
+
+func (m *Mappy) GetBooly() map[bool]bool {
+ if m != nil {
+ return m.Booly
+ }
+ return nil
+}
+
+func (m *Mappy) GetEnumy() map[string]Numeral {
+ if m != nil {
+ return m.Enumy
+ }
+ return nil
+}
+
+func (m *Mappy) GetS32Booly() map[int32]bool {
+ if m != nil {
+ return m.S32Booly
+ }
+ return nil
+}
+
+func (m *Mappy) GetS64Booly() map[int64]bool {
+ if m != nil {
+ return m.S64Booly
+ }
+ return nil
+}
+
+func (m *Mappy) GetU32Booly() map[uint32]bool {
+ if m != nil {
+ return m.U32Booly
+ }
+ return nil
+}
+
+func (m *Mappy) GetU64Booly() map[uint64]bool {
+ if m != nil {
+ return m.U64Booly
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*Simple3)(nil), "jsonpb.Simple3")
+ proto.RegisterType((*SimpleSlice3)(nil), "jsonpb.SimpleSlice3")
+ proto.RegisterType((*SimpleMap3)(nil), "jsonpb.SimpleMap3")
+ proto.RegisterType((*SimpleNull3)(nil), "jsonpb.SimpleNull3")
+ proto.RegisterType((*Mappy)(nil), "jsonpb.Mappy")
+ proto.RegisterEnum("jsonpb.Numeral", Numeral_name, Numeral_value)
+}
+
+func init() { proto.RegisterFile("more_test_objects.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 526 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0xdd, 0x6b, 0xdb, 0x3c,
+ 0x14, 0x87, 0x5f, 0x27, 0xf5, 0xd7, 0x49, 0xfb, 0x2e, 0x88, 0xb1, 0x99, 0xf4, 0x62, 0xc5, 0xb0,
+ 0xad, 0x0c, 0xe6, 0x8b, 0x78, 0x74, 0x5d, 0x77, 0x95, 0x8e, 0x5e, 0x94, 0x11, 0x07, 0x1c, 0xc2,
+ 0x2e, 0x4b, 0xdc, 0x99, 0x90, 0xcc, 0x5f, 0xd8, 0xd6, 0xc0, 0xd7, 0xfb, 0xbb, 0x07, 0xe3, 0x48,
+ 0x72, 0x2d, 0x07, 0x85, 0x6c, 0x77, 0x52, 0x7e, 0xcf, 0xe3, 0x73, 0x24, 0x1d, 0x02, 0x2f, 0xd3,
+ 0xbc, 0x8c, 0x1f, 0xea, 0xb8, 0xaa, 0x1f, 0xf2, 0x68, 0x17, 0x3f, 0xd6, 0x95, 0x57, 0x94, 0x79,
+ 0x9d, 0x13, 0x63, 0x57, 0xe5, 0x59, 0x11, 0xb9, 0xe7, 0x60, 0x2e, 0xb7, 0x69, 0x91, 0xc4, 0x3e,
+ 0x19, 0xc3, 0xf0, 0x3b, 0x8d, 0x1c, 0xed, 0x42, 0xbb, 0xd4, 0x42, 0x5c, 0xba, 0x6f, 0xe0, 0x94,
+ 0x87, 0xcb, 0x64, 0xfb, 0x18, 0xfb, 0xe4, 0x05, 0x18, 0x15, 0xae, 0x2a, 0x47, 0xbb, 0x18, 0x5e,
+ 0xda, 0xa1, 0xd8, 0xb9, 0xbf, 0x34, 0x00, 0x0e, 0xce, 0xd7, 0x85, 0x4f, 0x3e, 0x81, 0x59, 0xd5,
+ 0xe5, 0x36, 0xdb, 0x34, 0x8c, 0x1b, 0x4d, 0x5f, 0x79, 0xbc, 0x9a, 0xd7, 0x41, 0xde, 0x92, 0x13,
+ 0x77, 0x59, 0x5d, 0x36, 0x61, 0xcb, 0x4f, 0x6e, 0xe0, 0x54, 0x0e, 0xb0, 0xa7, 0x1f, 0x71, 0xc3,
+ 0x7a, 0xb2, 0x43, 0x5c, 0x92, 0xe7, 0xa0, 0xff, 0x5c, 0x27, 0x34, 0x76, 0x06, 0xec, 0x37, 0xbe,
+ 0xb9, 0x19, 0x5c, 0x6b, 0xee, 0x15, 0x8c, 0xf8, 0xf7, 0x03, 0x9a, 0x24, 0x3e, 0x79, 0x0b, 0x46,
+ 0xc5, 0xb6, 0xcc, 0x1e, 0x4d, 0x9f, 0xf5, 0x9b, 0xf0, 0x43, 0x11, 0xbb, 0xbf, 0x2d, 0xd0, 0xe7,
+ 0xeb, 0xa2, 0x68, 0x88, 0x07, 0x7a, 0x46, 0xd3, 0xb4, 0x6d, 0xdb, 0x69, 0x0d, 0x96, 0x7a, 0x01,
+ 0x46, 0xbc, 0x5f, 0x8e, 0x21, 0x5f, 0xd5, 0x65, 0xd9, 0x38, 0x03, 0x15, 0xbf, 0xc4, 0x48, 0xf0,
+ 0x0c, 0x43, 0x3e, 0x8f, 0x76, 0xbb, 0xc6, 0x19, 0xaa, 0xf8, 0x05, 0x46, 0x82, 0x67, 0x18, 0xf2,
+ 0x11, 0xdd, 0x6c, 0x1a, 0xe7, 0x44, 0xc5, 0xdf, 0x62, 0x24, 0x78, 0x86, 0x31, 0x3e, 0xcf, 0x93,
+ 0xc6, 0xd1, 0x95, 0x3c, 0x46, 0x2d, 0x8f, 0x6b, 0xe4, 0xe3, 0x8c, 0xa6, 0x8d, 0x63, 0xa8, 0xf8,
+ 0x3b, 0x8c, 0x04, 0xcf, 0x30, 0xf2, 0x11, 0xac, 0xca, 0x9f, 0xf2, 0x12, 0x26, 0x53, 0xce, 0xf7,
+ 0x8e, 0x2c, 0x52, 0x6e, 0x3d, 0xc1, 0x4c, 0xbc, 0xfa, 0xc0, 0x45, 0x4b, 0x29, 0x8a, 0xb4, 0x15,
+ 0xc5, 0x16, 0x45, 0xda, 0x56, 0xb4, 0x55, 0xe2, 0xaa, 0x5f, 0x91, 0x4a, 0x15, 0x69, 0x5b, 0x11,
+ 0x94, 0x62, 0xbf, 0x62, 0x0b, 0x4f, 0xae, 0x01, 0xba, 0x87, 0x96, 0xe7, 0x6f, 0xa8, 0x98, 0x3f,
+ 0x5d, 0x9a, 0x3f, 0x34, 0xbb, 0x27, 0xff, 0x97, 0xc9, 0x9d, 0xdc, 0x03, 0x74, 0x8f, 0x2f, 0x9b,
+ 0x3a, 0x37, 0x5f, 0xcb, 0xa6, 0x62, 0x92, 0xfb, 0x4d, 0x74, 0x73, 0x71, 0xac, 0x7d, 0x7b, 0xdf,
+ 0x7c, 0xba, 0x10, 0xd9, 0xb4, 0x14, 0xa6, 0xb5, 0xd7, 0x7e, 0x37, 0x2b, 0x8a, 0x83, 0xf7, 0xda,
+ 0xff, 0xbf, 0x6b, 0x3f, 0xa0, 0x69, 0x5c, 0xae, 0x13, 0xf9, 0x53, 0x9f, 0xe1, 0xac, 0x37, 0x43,
+ 0x8a, 0xcb, 0x38, 0xdc, 0x07, 0xca, 0xf2, 0xab, 0x1e, 0x3b, 0xfe, 0xbe, 0xbc, 0x3a, 0x54, 0xf9,
+ 0xec, 0x6f, 0xe4, 0x43, 0x95, 0x4f, 0x8e, 0xc8, 0xef, 0xde, 0x83, 0x29, 0x6e, 0x82, 0x8c, 0xc0,
+ 0x5c, 0x05, 0x5f, 0x83, 0xc5, 0xb7, 0x60, 0xfc, 0x1f, 0x01, 0x30, 0x66, 0xe1, 0xec, 0xf6, 0xfe,
+ 0xcb, 0x58, 0x23, 0x36, 0xe8, 0xe1, 0x62, 0x3e, 0x0b, 0xc6, 0x83, 0xc8, 0x60, 0x7f, 0xe0, 0xfe,
+ 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x34, 0xaf, 0xdb, 0x05, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto
new file mode 100644
index 0000000..d254fa5
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto
@@ -0,0 +1,69 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2015 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package jsonpb;
+
+message Simple3 {
+ double dub = 1;
+}
+
+message SimpleSlice3 {
+ repeated string slices = 1;
+}
+
+message SimpleMap3 {
+ map<string,string> stringy = 1;
+}
+
+message SimpleNull3 {
+ Simple3 simple = 1;
+}
+
+enum Numeral {
+ UNKNOWN = 0;
+ ARABIC = 1;
+ ROMAN = 2;
+}
+
+message Mappy {
+ map<int64, int32> nummy = 1;
+ map<string, string> strry = 2;
+ map<int32, Simple3> objjy = 3;
+ map<int64, string> buggy = 4;
+ map<bool, bool> booly = 5;
+ map<string, Numeral> enumy = 6;
+ map<int32, bool> s32booly = 7;
+ map<int64, bool> s64booly = 8;
+ map<uint32, bool> u32booly = 9;
+ map<uint64, bool> u64booly = 10;
+}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go
new file mode 100644
index 0000000..d413d74
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go
@@ -0,0 +1,852 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: test_objects.proto
+
+package jsonpb
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_protobuf "github.com/golang/protobuf/ptypes/any"
+import google_protobuf1 "github.com/golang/protobuf/ptypes/duration"
+import google_protobuf2 "github.com/golang/protobuf/ptypes/struct"
+import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp"
+import google_protobuf4 "github.com/golang/protobuf/ptypes/wrappers"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type Widget_Color int32
+
+const (
+ Widget_RED Widget_Color = 0
+ Widget_GREEN Widget_Color = 1
+ Widget_BLUE Widget_Color = 2
+)
+
+var Widget_Color_name = map[int32]string{
+ 0: "RED",
+ 1: "GREEN",
+ 2: "BLUE",
+}
+var Widget_Color_value = map[string]int32{
+ "RED": 0,
+ "GREEN": 1,
+ "BLUE": 2,
+}
+
+func (x Widget_Color) Enum() *Widget_Color {
+ p := new(Widget_Color)
+ *p = x
+ return p
+}
+func (x Widget_Color) String() string {
+ return proto.EnumName(Widget_Color_name, int32(x))
+}
+func (x *Widget_Color) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Widget_Color_value, data, "Widget_Color")
+ if err != nil {
+ return err
+ }
+ *x = Widget_Color(value)
+ return nil
+}
+func (Widget_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{3, 0} }
+
+// Test message for holding primitive types.
+type Simple struct {
+ OBool *bool `protobuf:"varint,1,opt,name=o_bool,json=oBool" json:"o_bool,omitempty"`
+ OInt32 *int32 `protobuf:"varint,2,opt,name=o_int32,json=oInt32" json:"o_int32,omitempty"`
+ OInt64 *int64 `protobuf:"varint,3,opt,name=o_int64,json=oInt64" json:"o_int64,omitempty"`
+ OUint32 *uint32 `protobuf:"varint,4,opt,name=o_uint32,json=oUint32" json:"o_uint32,omitempty"`
+ OUint64 *uint64 `protobuf:"varint,5,opt,name=o_uint64,json=oUint64" json:"o_uint64,omitempty"`
+ OSint32 *int32 `protobuf:"zigzag32,6,opt,name=o_sint32,json=oSint32" json:"o_sint32,omitempty"`
+ OSint64 *int64 `protobuf:"zigzag64,7,opt,name=o_sint64,json=oSint64" json:"o_sint64,omitempty"`
+ OFloat *float32 `protobuf:"fixed32,8,opt,name=o_float,json=oFloat" json:"o_float,omitempty"`
+ ODouble *float64 `protobuf:"fixed64,9,opt,name=o_double,json=oDouble" json:"o_double,omitempty"`
+ OString *string `protobuf:"bytes,10,opt,name=o_string,json=oString" json:"o_string,omitempty"`
+ OBytes []byte `protobuf:"bytes,11,opt,name=o_bytes,json=oBytes" json:"o_bytes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Simple) Reset() { *m = Simple{} }
+func (m *Simple) String() string { return proto.CompactTextString(m) }
+func (*Simple) ProtoMessage() {}
+func (*Simple) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
+
+func (m *Simple) GetOBool() bool {
+ if m != nil && m.OBool != nil {
+ return *m.OBool
+ }
+ return false
+}
+
+func (m *Simple) GetOInt32() int32 {
+ if m != nil && m.OInt32 != nil {
+ return *m.OInt32
+ }
+ return 0
+}
+
+func (m *Simple) GetOInt64() int64 {
+ if m != nil && m.OInt64 != nil {
+ return *m.OInt64
+ }
+ return 0
+}
+
+func (m *Simple) GetOUint32() uint32 {
+ if m != nil && m.OUint32 != nil {
+ return *m.OUint32
+ }
+ return 0
+}
+
+func (m *Simple) GetOUint64() uint64 {
+ if m != nil && m.OUint64 != nil {
+ return *m.OUint64
+ }
+ return 0
+}
+
+func (m *Simple) GetOSint32() int32 {
+ if m != nil && m.OSint32 != nil {
+ return *m.OSint32
+ }
+ return 0
+}
+
+func (m *Simple) GetOSint64() int64 {
+ if m != nil && m.OSint64 != nil {
+ return *m.OSint64
+ }
+ return 0
+}
+
+func (m *Simple) GetOFloat() float32 {
+ if m != nil && m.OFloat != nil {
+ return *m.OFloat
+ }
+ return 0
+}
+
+func (m *Simple) GetODouble() float64 {
+ if m != nil && m.ODouble != nil {
+ return *m.ODouble
+ }
+ return 0
+}
+
+func (m *Simple) GetOString() string {
+ if m != nil && m.OString != nil {
+ return *m.OString
+ }
+ return ""
+}
+
+func (m *Simple) GetOBytes() []byte {
+ if m != nil {
+ return m.OBytes
+ }
+ return nil
+}
+
+// Test message for holding special non-finites primitives.
+type NonFinites struct {
+ FNan *float32 `protobuf:"fixed32,1,opt,name=f_nan,json=fNan" json:"f_nan,omitempty"`
+ FPinf *float32 `protobuf:"fixed32,2,opt,name=f_pinf,json=fPinf" json:"f_pinf,omitempty"`
+ FNinf *float32 `protobuf:"fixed32,3,opt,name=f_ninf,json=fNinf" json:"f_ninf,omitempty"`
+ DNan *float64 `protobuf:"fixed64,4,opt,name=d_nan,json=dNan" json:"d_nan,omitempty"`
+ DPinf *float64 `protobuf:"fixed64,5,opt,name=d_pinf,json=dPinf" json:"d_pinf,omitempty"`
+ DNinf *float64 `protobuf:"fixed64,6,opt,name=d_ninf,json=dNinf" json:"d_ninf,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *NonFinites) Reset() { *m = NonFinites{} }
+func (m *NonFinites) String() string { return proto.CompactTextString(m) }
+func (*NonFinites) ProtoMessage() {}
+func (*NonFinites) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} }
+
+func (m *NonFinites) GetFNan() float32 {
+ if m != nil && m.FNan != nil {
+ return *m.FNan
+ }
+ return 0
+}
+
+func (m *NonFinites) GetFPinf() float32 {
+ if m != nil && m.FPinf != nil {
+ return *m.FPinf
+ }
+ return 0
+}
+
+func (m *NonFinites) GetFNinf() float32 {
+ if m != nil && m.FNinf != nil {
+ return *m.FNinf
+ }
+ return 0
+}
+
+func (m *NonFinites) GetDNan() float64 {
+ if m != nil && m.DNan != nil {
+ return *m.DNan
+ }
+ return 0
+}
+
+func (m *NonFinites) GetDPinf() float64 {
+ if m != nil && m.DPinf != nil {
+ return *m.DPinf
+ }
+ return 0
+}
+
+func (m *NonFinites) GetDNinf() float64 {
+ if m != nil && m.DNinf != nil {
+ return *m.DNinf
+ }
+ return 0
+}
+
+// Test message for holding repeated primitives.
+type Repeats struct {
+ RBool []bool `protobuf:"varint,1,rep,name=r_bool,json=rBool" json:"r_bool,omitempty"`
+ RInt32 []int32 `protobuf:"varint,2,rep,name=r_int32,json=rInt32" json:"r_int32,omitempty"`
+ RInt64 []int64 `protobuf:"varint,3,rep,name=r_int64,json=rInt64" json:"r_int64,omitempty"`
+ RUint32 []uint32 `protobuf:"varint,4,rep,name=r_uint32,json=rUint32" json:"r_uint32,omitempty"`
+ RUint64 []uint64 `protobuf:"varint,5,rep,name=r_uint64,json=rUint64" json:"r_uint64,omitempty"`
+ RSint32 []int32 `protobuf:"zigzag32,6,rep,name=r_sint32,json=rSint32" json:"r_sint32,omitempty"`
+ RSint64 []int64 `protobuf:"zigzag64,7,rep,name=r_sint64,json=rSint64" json:"r_sint64,omitempty"`
+ RFloat []float32 `protobuf:"fixed32,8,rep,name=r_float,json=rFloat" json:"r_float,omitempty"`
+ RDouble []float64 `protobuf:"fixed64,9,rep,name=r_double,json=rDouble" json:"r_double,omitempty"`
+ RString []string `protobuf:"bytes,10,rep,name=r_string,json=rString" json:"r_string,omitempty"`
+ RBytes [][]byte `protobuf:"bytes,11,rep,name=r_bytes,json=rBytes" json:"r_bytes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Repeats) Reset() { *m = Repeats{} }
+func (m *Repeats) String() string { return proto.CompactTextString(m) }
+func (*Repeats) ProtoMessage() {}
+func (*Repeats) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} }
+
+func (m *Repeats) GetRBool() []bool {
+ if m != nil {
+ return m.RBool
+ }
+ return nil
+}
+
+func (m *Repeats) GetRInt32() []int32 {
+ if m != nil {
+ return m.RInt32
+ }
+ return nil
+}
+
+func (m *Repeats) GetRInt64() []int64 {
+ if m != nil {
+ return m.RInt64
+ }
+ return nil
+}
+
+func (m *Repeats) GetRUint32() []uint32 {
+ if m != nil {
+ return m.RUint32
+ }
+ return nil
+}
+
+func (m *Repeats) GetRUint64() []uint64 {
+ if m != nil {
+ return m.RUint64
+ }
+ return nil
+}
+
+func (m *Repeats) GetRSint32() []int32 {
+ if m != nil {
+ return m.RSint32
+ }
+ return nil
+}
+
+func (m *Repeats) GetRSint64() []int64 {
+ if m != nil {
+ return m.RSint64
+ }
+ return nil
+}
+
+func (m *Repeats) GetRFloat() []float32 {
+ if m != nil {
+ return m.RFloat
+ }
+ return nil
+}
+
+func (m *Repeats) GetRDouble() []float64 {
+ if m != nil {
+ return m.RDouble
+ }
+ return nil
+}
+
+func (m *Repeats) GetRString() []string {
+ if m != nil {
+ return m.RString
+ }
+ return nil
+}
+
+func (m *Repeats) GetRBytes() [][]byte {
+ if m != nil {
+ return m.RBytes
+ }
+ return nil
+}
+
+// Test message for holding enums and nested messages.
+type Widget struct {
+ Color *Widget_Color `protobuf:"varint,1,opt,name=color,enum=jsonpb.Widget_Color" json:"color,omitempty"`
+ RColor []Widget_Color `protobuf:"varint,2,rep,name=r_color,json=rColor,enum=jsonpb.Widget_Color" json:"r_color,omitempty"`
+ Simple *Simple `protobuf:"bytes,10,opt,name=simple" json:"simple,omitempty"`
+ RSimple []*Simple `protobuf:"bytes,11,rep,name=r_simple,json=rSimple" json:"r_simple,omitempty"`
+ Repeats *Repeats `protobuf:"bytes,20,opt,name=repeats" json:"repeats,omitempty"`
+ RRepeats []*Repeats `protobuf:"bytes,21,rep,name=r_repeats,json=rRepeats" json:"r_repeats,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Widget) Reset() { *m = Widget{} }
+func (m *Widget) String() string { return proto.CompactTextString(m) }
+func (*Widget) ProtoMessage() {}
+func (*Widget) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} }
+
+func (m *Widget) GetColor() Widget_Color {
+ if m != nil && m.Color != nil {
+ return *m.Color
+ }
+ return Widget_RED
+}
+
+func (m *Widget) GetRColor() []Widget_Color {
+ if m != nil {
+ return m.RColor
+ }
+ return nil
+}
+
+func (m *Widget) GetSimple() *Simple {
+ if m != nil {
+ return m.Simple
+ }
+ return nil
+}
+
+func (m *Widget) GetRSimple() []*Simple {
+ if m != nil {
+ return m.RSimple
+ }
+ return nil
+}
+
+func (m *Widget) GetRepeats() *Repeats {
+ if m != nil {
+ return m.Repeats
+ }
+ return nil
+}
+
+func (m *Widget) GetRRepeats() []*Repeats {
+ if m != nil {
+ return m.RRepeats
+ }
+ return nil
+}
+
+type Maps struct {
+ MInt64Str map[int64]string `protobuf:"bytes,1,rep,name=m_int64_str,json=mInt64Str" json:"m_int64_str,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ MBoolSimple map[bool]*Simple `protobuf:"bytes,2,rep,name=m_bool_simple,json=mBoolSimple" json:"m_bool_simple,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Maps) Reset() { *m = Maps{} }
+func (m *Maps) String() string { return proto.CompactTextString(m) }
+func (*Maps) ProtoMessage() {}
+func (*Maps) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} }
+
+func (m *Maps) GetMInt64Str() map[int64]string {
+ if m != nil {
+ return m.MInt64Str
+ }
+ return nil
+}
+
+func (m *Maps) GetMBoolSimple() map[bool]*Simple {
+ if m != nil {
+ return m.MBoolSimple
+ }
+ return nil
+}
+
+type MsgWithOneof struct {
+ // Types that are valid to be assigned to Union:
+ // *MsgWithOneof_Title
+ // *MsgWithOneof_Salary
+ // *MsgWithOneof_Country
+ // *MsgWithOneof_HomeAddress
+ Union isMsgWithOneof_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MsgWithOneof) Reset() { *m = MsgWithOneof{} }
+func (m *MsgWithOneof) String() string { return proto.CompactTextString(m) }
+func (*MsgWithOneof) ProtoMessage() {}
+func (*MsgWithOneof) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} }
+
+type isMsgWithOneof_Union interface {
+ isMsgWithOneof_Union()
+}
+
+type MsgWithOneof_Title struct {
+ Title string `protobuf:"bytes,1,opt,name=title,oneof"`
+}
+type MsgWithOneof_Salary struct {
+ Salary int64 `protobuf:"varint,2,opt,name=salary,oneof"`
+}
+type MsgWithOneof_Country struct {
+ Country string `protobuf:"bytes,3,opt,name=Country,oneof"`
+}
+type MsgWithOneof_HomeAddress struct {
+ HomeAddress string `protobuf:"bytes,4,opt,name=home_address,json=homeAddress,oneof"`
+}
+
+func (*MsgWithOneof_Title) isMsgWithOneof_Union() {}
+func (*MsgWithOneof_Salary) isMsgWithOneof_Union() {}
+func (*MsgWithOneof_Country) isMsgWithOneof_Union() {}
+func (*MsgWithOneof_HomeAddress) isMsgWithOneof_Union() {}
+
+func (m *MsgWithOneof) GetUnion() isMsgWithOneof_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+}
+
+func (m *MsgWithOneof) GetTitle() string {
+ if x, ok := m.GetUnion().(*MsgWithOneof_Title); ok {
+ return x.Title
+ }
+ return ""
+}
+
+func (m *MsgWithOneof) GetSalary() int64 {
+ if x, ok := m.GetUnion().(*MsgWithOneof_Salary); ok {
+ return x.Salary
+ }
+ return 0
+}
+
+func (m *MsgWithOneof) GetCountry() string {
+ if x, ok := m.GetUnion().(*MsgWithOneof_Country); ok {
+ return x.Country
+ }
+ return ""
+}
+
+func (m *MsgWithOneof) GetHomeAddress() string {
+ if x, ok := m.GetUnion().(*MsgWithOneof_HomeAddress); ok {
+ return x.HomeAddress
+ }
+ return ""
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*MsgWithOneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _MsgWithOneof_OneofMarshaler, _MsgWithOneof_OneofUnmarshaler, _MsgWithOneof_OneofSizer, []interface{}{
+ (*MsgWithOneof_Title)(nil),
+ (*MsgWithOneof_Salary)(nil),
+ (*MsgWithOneof_Country)(nil),
+ (*MsgWithOneof_HomeAddress)(nil),
+ }
+}
+
+func _MsgWithOneof_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*MsgWithOneof)
+ // union
+ switch x := m.Union.(type) {
+ case *MsgWithOneof_Title:
+ b.EncodeVarint(1<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.Title)
+ case *MsgWithOneof_Salary:
+ b.EncodeVarint(2<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Salary))
+ case *MsgWithOneof_Country:
+ b.EncodeVarint(3<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.Country)
+ case *MsgWithOneof_HomeAddress:
+ b.EncodeVarint(4<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.HomeAddress)
+ case nil:
+ default:
+ return fmt.Errorf("MsgWithOneof.Union has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _MsgWithOneof_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*MsgWithOneof)
+ switch tag {
+ case 1: // union.title
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Union = &MsgWithOneof_Title{x}
+ return true, err
+ case 2: // union.salary
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &MsgWithOneof_Salary{int64(x)}
+ return true, err
+ case 3: // union.Country
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Union = &MsgWithOneof_Country{x}
+ return true, err
+ case 4: // union.home_address
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Union = &MsgWithOneof_HomeAddress{x}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _MsgWithOneof_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*MsgWithOneof)
+ // union
+ switch x := m.Union.(type) {
+ case *MsgWithOneof_Title:
+ n += proto.SizeVarint(1<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.Title)))
+ n += len(x.Title)
+ case *MsgWithOneof_Salary:
+ n += proto.SizeVarint(2<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.Salary))
+ case *MsgWithOneof_Country:
+ n += proto.SizeVarint(3<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.Country)))
+ n += len(x.Country)
+ case *MsgWithOneof_HomeAddress:
+ n += proto.SizeVarint(4<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.HomeAddress)))
+ n += len(x.HomeAddress)
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+type Real struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Real) Reset() { *m = Real{} }
+func (m *Real) String() string { return proto.CompactTextString(m) }
+func (*Real) ProtoMessage() {}
+func (*Real) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} }
+
+var extRange_Real = []proto.ExtensionRange{
+ {100, 536870911},
+}
+
+func (*Real) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_Real
+}
+
+func (m *Real) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Complex struct {
+ Imaginary *float64 `protobuf:"fixed64,1,opt,name=imaginary" json:"imaginary,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Complex) Reset() { *m = Complex{} }
+func (m *Complex) String() string { return proto.CompactTextString(m) }
+func (*Complex) ProtoMessage() {}
+func (*Complex) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} }
+
+var extRange_Complex = []proto.ExtensionRange{
+ {100, 536870911},
+}
+
+func (*Complex) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_Complex
+}
+
+func (m *Complex) GetImaginary() float64 {
+ if m != nil && m.Imaginary != nil {
+ return *m.Imaginary
+ }
+ return 0
+}
+
+var E_Complex_RealExtension = &proto.ExtensionDesc{
+ ExtendedType: (*Real)(nil),
+ ExtensionType: (*Complex)(nil),
+ Field: 123,
+ Name: "jsonpb.Complex.real_extension",
+ Tag: "bytes,123,opt,name=real_extension,json=realExtension",
+ Filename: "test_objects.proto",
+}
+
+type KnownTypes struct {
+ An *google_protobuf.Any `protobuf:"bytes,14,opt,name=an" json:"an,omitempty"`
+ Dur *google_protobuf1.Duration `protobuf:"bytes,1,opt,name=dur" json:"dur,omitempty"`
+ St *google_protobuf2.Struct `protobuf:"bytes,12,opt,name=st" json:"st,omitempty"`
+ Ts *google_protobuf3.Timestamp `protobuf:"bytes,2,opt,name=ts" json:"ts,omitempty"`
+ Lv *google_protobuf2.ListValue `protobuf:"bytes,15,opt,name=lv" json:"lv,omitempty"`
+ Val *google_protobuf2.Value `protobuf:"bytes,16,opt,name=val" json:"val,omitempty"`
+ Dbl *google_protobuf4.DoubleValue `protobuf:"bytes,3,opt,name=dbl" json:"dbl,omitempty"`
+ Flt *google_protobuf4.FloatValue `protobuf:"bytes,4,opt,name=flt" json:"flt,omitempty"`
+ I64 *google_protobuf4.Int64Value `protobuf:"bytes,5,opt,name=i64" json:"i64,omitempty"`
+ U64 *google_protobuf4.UInt64Value `protobuf:"bytes,6,opt,name=u64" json:"u64,omitempty"`
+ I32 *google_protobuf4.Int32Value `protobuf:"bytes,7,opt,name=i32" json:"i32,omitempty"`
+ U32 *google_protobuf4.UInt32Value `protobuf:"bytes,8,opt,name=u32" json:"u32,omitempty"`
+ Bool *google_protobuf4.BoolValue `protobuf:"bytes,9,opt,name=bool" json:"bool,omitempty"`
+ Str *google_protobuf4.StringValue `protobuf:"bytes,10,opt,name=str" json:"str,omitempty"`
+ Bytes *google_protobuf4.BytesValue `protobuf:"bytes,11,opt,name=bytes" json:"bytes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *KnownTypes) Reset() { *m = KnownTypes{} }
+func (m *KnownTypes) String() string { return proto.CompactTextString(m) }
+func (*KnownTypes) ProtoMessage() {}
+func (*KnownTypes) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} }
+
+func (m *KnownTypes) GetAn() *google_protobuf.Any {
+ if m != nil {
+ return m.An
+ }
+ return nil
+}
+
+func (m *KnownTypes) GetDur() *google_protobuf1.Duration {
+ if m != nil {
+ return m.Dur
+ }
+ return nil
+}
+
+func (m *KnownTypes) GetSt() *google_protobuf2.Struct {
+ if m != nil {
+ return m.St
+ }
+ return nil
+}
+
+func (m *KnownTypes) GetTs() *google_protobuf3.Timestamp {
+ if m != nil {
+ return m.Ts
+ }
+ return nil
+}
+
+func (m *KnownTypes) GetLv() *google_protobuf2.ListValue {
+ if m != nil {
+ return m.Lv
+ }
+ return nil
+}
+
+func (m *KnownTypes) GetVal() *google_protobuf2.Value {
+ if m != nil {
+ return m.Val
+ }
+ return nil
+}
+
+func (m *KnownTypes) GetDbl() *google_protobuf4.DoubleValue {
+ if m != nil {
+ return m.Dbl
+ }
+ return nil
+}
+
+func (m *KnownTypes) GetFlt() *google_protobuf4.FloatValue {
+ if m != nil {
+ return m.Flt
+ }
+ return nil
+}
+
+func (m *KnownTypes) GetI64() *google_protobuf4.Int64Value {
+ if m != nil {
+ return m.I64
+ }
+ return nil
+}
+
+func (m *KnownTypes) GetU64() *google_protobuf4.UInt64Value {
+ if m != nil {
+ return m.U64
+ }
+ return nil
+}
+
+func (m *KnownTypes) GetI32() *google_protobuf4.Int32Value {
+ if m != nil {
+ return m.I32
+ }
+ return nil
+}
+
+func (m *KnownTypes) GetU32() *google_protobuf4.UInt32Value {
+ if m != nil {
+ return m.U32
+ }
+ return nil
+}
+
+func (m *KnownTypes) GetBool() *google_protobuf4.BoolValue {
+ if m != nil {
+ return m.Bool
+ }
+ return nil
+}
+
+func (m *KnownTypes) GetStr() *google_protobuf4.StringValue {
+ if m != nil {
+ return m.Str
+ }
+ return nil
+}
+
+func (m *KnownTypes) GetBytes() *google_protobuf4.BytesValue {
+ if m != nil {
+ return m.Bytes
+ }
+ return nil
+}
+
+var E_Name = &proto.ExtensionDesc{
+ ExtendedType: (*Real)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 124,
+ Name: "jsonpb.name",
+ Tag: "bytes,124,opt,name=name",
+ Filename: "test_objects.proto",
+}
+
+func init() {
+ proto.RegisterType((*Simple)(nil), "jsonpb.Simple")
+ proto.RegisterType((*NonFinites)(nil), "jsonpb.NonFinites")
+ proto.RegisterType((*Repeats)(nil), "jsonpb.Repeats")
+ proto.RegisterType((*Widget)(nil), "jsonpb.Widget")
+ proto.RegisterType((*Maps)(nil), "jsonpb.Maps")
+ proto.RegisterType((*MsgWithOneof)(nil), "jsonpb.MsgWithOneof")
+ proto.RegisterType((*Real)(nil), "jsonpb.Real")
+ proto.RegisterType((*Complex)(nil), "jsonpb.Complex")
+ proto.RegisterType((*KnownTypes)(nil), "jsonpb.KnownTypes")
+ proto.RegisterEnum("jsonpb.Widget_Color", Widget_Color_name, Widget_Color_value)
+ proto.RegisterExtension(E_Complex_RealExtension)
+ proto.RegisterExtension(E_Name)
+}
+
+func init() { proto.RegisterFile("test_objects.proto", fileDescriptor1) }
+
+var fileDescriptor1 = []byte{
+ // 1160 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x95, 0x41, 0x73, 0xdb, 0x44,
+ 0x14, 0xc7, 0x23, 0xc9, 0x92, 0xed, 0x75, 0x92, 0x9a, 0x6d, 0xda, 0x2a, 0x26, 0x80, 0xc6, 0x94,
+ 0x22, 0x0a, 0x75, 0x07, 0xc7, 0xe3, 0x61, 0x0a, 0x97, 0xa4, 0x71, 0x29, 0x43, 0x13, 0x98, 0x4d,
+ 0x43, 0x8f, 0x1e, 0x39, 0x5a, 0xbb, 0x2a, 0xf2, 0xae, 0x67, 0x77, 0x95, 0xd4, 0x03, 0x87, 0x9c,
+ 0x39, 0x32, 0x7c, 0x05, 0xf8, 0x08, 0x1c, 0xf8, 0x74, 0xcc, 0xdb, 0x95, 0xac, 0xc4, 0x8e, 0x4f,
+ 0xf1, 0x7b, 0xef, 0xff, 0xfe, 0x59, 0xed, 0x6f, 0x77, 0x1f, 0xc2, 0x8a, 0x4a, 0x35, 0xe4, 0xa3,
+ 0x77, 0xf4, 0x5c, 0xc9, 0xce, 0x4c, 0x70, 0xc5, 0xb1, 0xf7, 0x4e, 0x72, 0x36, 0x1b, 0xb5, 0x76,
+ 0x27, 0x9c, 0x4f, 0x52, 0xfa, 0x54, 0x67, 0x47, 0xd9, 0xf8, 0x69, 0xc4, 0xe6, 0x46, 0xd2, 0xfa,
+ 0x78, 0xb9, 0x14, 0x67, 0x22, 0x52, 0x09, 0x67, 0x79, 0x7d, 0x6f, 0xb9, 0x2e, 0x95, 0xc8, 0xce,
+ 0x55, 0x5e, 0xfd, 0x64, 0xb9, 0xaa, 0x92, 0x29, 0x95, 0x2a, 0x9a, 0xce, 0xd6, 0xd9, 0x5f, 0x8a,
+ 0x68, 0x36, 0xa3, 0x22, 0x5f, 0x61, 0xfb, 0x6f, 0x1b, 0x79, 0xa7, 0xc9, 0x74, 0x96, 0x52, 0x7c,
+ 0x0f, 0x79, 0x7c, 0x38, 0xe2, 0x3c, 0xf5, 0xad, 0xc0, 0x0a, 0x6b, 0xc4, 0xe5, 0x87, 0x9c, 0xa7,
+ 0xf8, 0x01, 0xaa, 0xf2, 0x61, 0xc2, 0xd4, 0x7e, 0xd7, 0xb7, 0x03, 0x2b, 0x74, 0x89, 0xc7, 0x7f,
+ 0x80, 0x68, 0x51, 0xe8, 0xf7, 0x7c, 0x27, 0xb0, 0x42, 0xc7, 0x14, 0xfa, 0x3d, 0xbc, 0x8b, 0x6a,
+ 0x7c, 0x98, 0x99, 0x96, 0x4a, 0x60, 0x85, 0x5b, 0xa4, 0xca, 0xcf, 0x74, 0x58, 0x96, 0xfa, 0x3d,
+ 0xdf, 0x0d, 0xac, 0xb0, 0x92, 0x97, 0x8a, 0x2e, 0x69, 0xba, 0xbc, 0xc0, 0x0a, 0x3f, 0x20, 0x55,
+ 0x7e, 0x7a, 0xad, 0x4b, 0x9a, 0xae, 0x6a, 0x60, 0x85, 0x38, 0x2f, 0xf5, 0x7b, 0x66, 0x11, 0xe3,
+ 0x94, 0x47, 0xca, 0xaf, 0x05, 0x56, 0x68, 0x13, 0x8f, 0xbf, 0x80, 0xc8, 0xf4, 0xc4, 0x3c, 0x1b,
+ 0xa5, 0xd4, 0xaf, 0x07, 0x56, 0x68, 0x91, 0x2a, 0x3f, 0xd2, 0x61, 0x6e, 0xa7, 0x44, 0xc2, 0x26,
+ 0x3e, 0x0a, 0xac, 0xb0, 0x0e, 0x76, 0x3a, 0x34, 0x76, 0xa3, 0xb9, 0xa2, 0xd2, 0x6f, 0x04, 0x56,
+ 0xb8, 0x49, 0x3c, 0x7e, 0x08, 0x51, 0xfb, 0x4f, 0x0b, 0xa1, 0x13, 0xce, 0x5e, 0x24, 0x2c, 0x51,
+ 0x54, 0xe2, 0xbb, 0xc8, 0x1d, 0x0f, 0x59, 0xc4, 0xf4, 0x56, 0xd9, 0xa4, 0x32, 0x3e, 0x89, 0x18,
+ 0x6c, 0xe0, 0x78, 0x38, 0x4b, 0xd8, 0x58, 0x6f, 0x94, 0x4d, 0xdc, 0xf1, 0xcf, 0x09, 0x1b, 0x9b,
+ 0x34, 0x83, 0xb4, 0x93, 0xa7, 0x4f, 0x20, 0x7d, 0x17, 0xb9, 0xb1, 0xb6, 0xa8, 0xe8, 0xd5, 0x55,
+ 0xe2, 0xdc, 0x22, 0x36, 0x16, 0xae, 0xce, 0xba, 0x71, 0x61, 0x11, 0x1b, 0x0b, 0x2f, 0x4f, 0x83,
+ 0x45, 0xfb, 0x1f, 0x1b, 0x55, 0x09, 0x9d, 0xd1, 0x48, 0x49, 0x90, 0x88, 0x82, 0x9e, 0x03, 0xf4,
+ 0x44, 0x41, 0x4f, 0x2c, 0xe8, 0x39, 0x40, 0x4f, 0x2c, 0xe8, 0x89, 0x05, 0x3d, 0x07, 0xe8, 0x89,
+ 0x05, 0x3d, 0x51, 0xd2, 0x73, 0x80, 0x9e, 0x28, 0xe9, 0x89, 0x92, 0x9e, 0x03, 0xf4, 0x44, 0x49,
+ 0x4f, 0x94, 0xf4, 0x1c, 0xa0, 0x27, 0x4e, 0xaf, 0x75, 0x2d, 0xe8, 0x39, 0x40, 0x4f, 0x94, 0xf4,
+ 0xc4, 0x82, 0x9e, 0x03, 0xf4, 0xc4, 0x82, 0x9e, 0x28, 0xe9, 0x39, 0x40, 0x4f, 0x94, 0xf4, 0x44,
+ 0x49, 0xcf, 0x01, 0x7a, 0xa2, 0xa4, 0x27, 0x16, 0xf4, 0x1c, 0xa0, 0x27, 0x0c, 0xbd, 0x7f, 0x6d,
+ 0xe4, 0xbd, 0x49, 0xe2, 0x09, 0x55, 0xf8, 0x31, 0x72, 0xcf, 0x79, 0xca, 0x85, 0x26, 0xb7, 0xdd,
+ 0xdd, 0xe9, 0x98, 0x2b, 0xda, 0x31, 0xe5, 0xce, 0x73, 0xa8, 0x11, 0x23, 0xc1, 0x4f, 0xc0, 0xcf,
+ 0xa8, 0x61, 0xf3, 0xd6, 0xa9, 0x3d, 0xa1, 0xff, 0xe2, 0x47, 0xc8, 0x93, 0xfa, 0x2a, 0xe9, 0x53,
+ 0xd5, 0xe8, 0x6e, 0x17, 0x6a, 0x73, 0xc1, 0x48, 0x5e, 0xc5, 0x5f, 0x98, 0x0d, 0xd1, 0x4a, 0x58,
+ 0xe7, 0xaa, 0x12, 0x36, 0x28, 0x97, 0x56, 0x85, 0x01, 0xec, 0xef, 0x68, 0xcf, 0x3b, 0x85, 0x32,
+ 0xe7, 0x4e, 0x8a, 0x3a, 0xfe, 0x0a, 0xd5, 0xc5, 0xb0, 0x10, 0xdf, 0xd3, 0xb6, 0x2b, 0xe2, 0x9a,
+ 0xc8, 0x7f, 0xb5, 0x3f, 0x43, 0xae, 0x59, 0x74, 0x15, 0x39, 0x64, 0x70, 0xd4, 0xdc, 0xc0, 0x75,
+ 0xe4, 0x7e, 0x4f, 0x06, 0x83, 0x93, 0xa6, 0x85, 0x6b, 0xa8, 0x72, 0xf8, 0xea, 0x6c, 0xd0, 0xb4,
+ 0xdb, 0x7f, 0xd9, 0xa8, 0x72, 0x1c, 0xcd, 0x24, 0xfe, 0x16, 0x35, 0xa6, 0xe6, 0xb8, 0xc0, 0xde,
+ 0xeb, 0x33, 0xd6, 0xe8, 0x7e, 0x58, 0xf8, 0x83, 0xa4, 0x73, 0xac, 0xcf, 0xcf, 0xa9, 0x12, 0x03,
+ 0xa6, 0xc4, 0x9c, 0xd4, 0xa7, 0x45, 0x8c, 0x0f, 0xd0, 0xd6, 0x54, 0x9f, 0xcd, 0xe2, 0xab, 0x6d,
+ 0xdd, 0xfe, 0xd1, 0xcd, 0x76, 0x38, 0xaf, 0xe6, 0xb3, 0x8d, 0x41, 0x63, 0x5a, 0x66, 0x5a, 0xdf,
+ 0xa1, 0xed, 0x9b, 0xfe, 0xb8, 0x89, 0x9c, 0x5f, 0xe9, 0x5c, 0x63, 0x74, 0x08, 0xfc, 0xc4, 0x3b,
+ 0xc8, 0xbd, 0x88, 0xd2, 0x8c, 0xea, 0xeb, 0x57, 0x27, 0x26, 0x78, 0x66, 0x7f, 0x63, 0xb5, 0x4e,
+ 0x50, 0x73, 0xd9, 0xfe, 0x7a, 0x7f, 0xcd, 0xf4, 0x3f, 0xbc, 0xde, 0xbf, 0x0a, 0xa5, 0xf4, 0x6b,
+ 0xff, 0x61, 0xa1, 0xcd, 0x63, 0x39, 0x79, 0x93, 0xa8, 0xb7, 0x3f, 0x31, 0xca, 0xc7, 0xf8, 0x3e,
+ 0x72, 0x55, 0xa2, 0x52, 0xaa, 0xed, 0xea, 0x2f, 0x37, 0x88, 0x09, 0xb1, 0x8f, 0x3c, 0x19, 0xa5,
+ 0x91, 0x98, 0x6b, 0x4f, 0xe7, 0xe5, 0x06, 0xc9, 0x63, 0xdc, 0x42, 0xd5, 0xe7, 0x3c, 0x83, 0x95,
+ 0xe8, 0x67, 0x01, 0x7a, 0x8a, 0x04, 0xfe, 0x14, 0x6d, 0xbe, 0xe5, 0x53, 0x3a, 0x8c, 0xe2, 0x58,
+ 0x50, 0x29, 0xf5, 0x0b, 0x01, 0x82, 0x06, 0x64, 0x0f, 0x4c, 0xf2, 0xb0, 0x8a, 0xdc, 0x8c, 0x25,
+ 0x9c, 0xb5, 0x1f, 0xa1, 0x0a, 0xa1, 0x51, 0x5a, 0x7e, 0xbe, 0x65, 0xde, 0x08, 0x1d, 0x3c, 0xae,
+ 0xd5, 0xe2, 0xe6, 0xd5, 0xd5, 0xd5, 0x95, 0xdd, 0xbe, 0x84, 0xff, 0x08, 0x5f, 0xf2, 0x1e, 0xef,
+ 0xa1, 0x7a, 0x32, 0x8d, 0x26, 0x09, 0x83, 0x95, 0x19, 0x79, 0x99, 0x28, 0x5b, 0xba, 0x47, 0x68,
+ 0x5b, 0xd0, 0x28, 0x1d, 0xd2, 0xf7, 0x8a, 0x32, 0x99, 0x70, 0x86, 0x37, 0xcb, 0x23, 0x15, 0xa5,
+ 0xfe, 0x6f, 0x37, 0xcf, 0x64, 0x6e, 0x4f, 0xb6, 0xa0, 0x69, 0x50, 0xf4, 0xb4, 0xff, 0x73, 0x11,
+ 0xfa, 0x91, 0xf1, 0x4b, 0xf6, 0x7a, 0x3e, 0xa3, 0x12, 0x3f, 0x44, 0x76, 0xc4, 0xfc, 0x6d, 0xdd,
+ 0xba, 0xd3, 0x31, 0xf3, 0xa9, 0x53, 0xcc, 0xa7, 0xce, 0x01, 0x9b, 0x13, 0x3b, 0x62, 0xf8, 0x4b,
+ 0xe4, 0xc4, 0x99, 0xb9, 0xa5, 0x8d, 0xee, 0xee, 0x8a, 0xec, 0x28, 0x9f, 0x92, 0x04, 0x54, 0xf8,
+ 0x73, 0x64, 0x4b, 0xe5, 0x6f, 0x6a, 0xed, 0x83, 0x15, 0xed, 0xa9, 0x9e, 0x98, 0xc4, 0x96, 0x70,
+ 0xfb, 0x6d, 0x25, 0x73, 0xbe, 0xad, 0x15, 0xe1, 0xeb, 0x62, 0x78, 0x12, 0x5b, 0x49, 0xd0, 0xa6,
+ 0x17, 0xfe, 0x9d, 0x35, 0xda, 0x57, 0x89, 0x54, 0xbf, 0xc0, 0x0e, 0x13, 0x3b, 0xbd, 0xc0, 0x21,
+ 0x72, 0x2e, 0xa2, 0xd4, 0x6f, 0x6a, 0xf1, 0xfd, 0x15, 0xb1, 0x11, 0x82, 0x04, 0x77, 0x90, 0x13,
+ 0x8f, 0x52, 0xcd, 0xbc, 0xd1, 0xdd, 0x5b, 0xfd, 0x2e, 0xfd, 0xc8, 0xe5, 0xfa, 0x78, 0x94, 0xe2,
+ 0x27, 0xc8, 0x19, 0xa7, 0x4a, 0x1f, 0x01, 0xb8, 0x70, 0xcb, 0x7a, 0xfd, 0x5c, 0xe6, 0xf2, 0x71,
+ 0xaa, 0x40, 0x9e, 0xe4, 0xb3, 0xf5, 0x36, 0xb9, 0xbe, 0x42, 0xb9, 0x3c, 0xe9, 0xf7, 0x60, 0x35,
+ 0x59, 0xbf, 0xa7, 0xa7, 0xca, 0x6d, 0xab, 0x39, 0xbb, 0xae, 0xcf, 0xfa, 0x3d, 0x6d, 0xbf, 0xdf,
+ 0xd5, 0x43, 0x78, 0x8d, 0xfd, 0x7e, 0xb7, 0xb0, 0xdf, 0xef, 0x6a, 0xfb, 0xfd, 0xae, 0x9e, 0xcc,
+ 0xeb, 0xec, 0x17, 0xfa, 0x4c, 0xeb, 0x2b, 0x7a, 0x84, 0xd5, 0xd7, 0x6c, 0x3a, 0xdc, 0x61, 0x23,
+ 0xd7, 0x3a, 0xf0, 0x87, 0xd7, 0x08, 0xad, 0xf1, 0x37, 0x63, 0x21, 0xf7, 0x97, 0x4a, 0xe0, 0xaf,
+ 0x91, 0x5b, 0x0e, 0xf7, 0xdb, 0x3e, 0x40, 0x8f, 0x0b, 0xd3, 0x60, 0x94, 0xcf, 0x02, 0x54, 0x61,
+ 0xd1, 0x94, 0x2e, 0x1d, 0xfc, 0xdf, 0xf5, 0x0b, 0xa3, 0x2b, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff,
+ 0xd5, 0x39, 0x32, 0x09, 0xf9, 0x09, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto
new file mode 100644
index 0000000..0d2fc1f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto
@@ -0,0 +1,147 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2015 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto2";
+
+import "google/protobuf/any.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/struct.proto";
+import "google/protobuf/timestamp.proto";
+import "google/protobuf/wrappers.proto";
+
+package jsonpb;
+
+// Test message for holding primitive types.
+message Simple {
+ optional bool o_bool = 1;
+ optional int32 o_int32 = 2;
+ optional int64 o_int64 = 3;
+ optional uint32 o_uint32 = 4;
+ optional uint64 o_uint64 = 5;
+ optional sint32 o_sint32 = 6;
+ optional sint64 o_sint64 = 7;
+ optional float o_float = 8;
+ optional double o_double = 9;
+ optional string o_string = 10;
+ optional bytes o_bytes = 11;
+}
+
+// Test message for holding special non-finites primitives.
+message NonFinites {
+ optional float f_nan = 1;
+ optional float f_pinf = 2;
+ optional float f_ninf = 3;
+ optional double d_nan = 4;
+ optional double d_pinf = 5;
+ optional double d_ninf = 6;
+}
+
+// Test message for holding repeated primitives.
+message Repeats {
+ repeated bool r_bool = 1;
+ repeated int32 r_int32 = 2;
+ repeated int64 r_int64 = 3;
+ repeated uint32 r_uint32 = 4;
+ repeated uint64 r_uint64 = 5;
+ repeated sint32 r_sint32 = 6;
+ repeated sint64 r_sint64 = 7;
+ repeated float r_float = 8;
+ repeated double r_double = 9;
+ repeated string r_string = 10;
+ repeated bytes r_bytes = 11;
+}
+
+// Test message for holding enums and nested messages.
+message Widget {
+ enum Color {
+ RED = 0;
+ GREEN = 1;
+ BLUE = 2;
+ };
+ optional Color color = 1;
+ repeated Color r_color = 2;
+
+ optional Simple simple = 10;
+ repeated Simple r_simple = 11;
+
+ optional Repeats repeats = 20;
+ repeated Repeats r_repeats = 21;
+}
+
+message Maps {
+ map<int64, string> m_int64_str = 1;
+ map<bool, Simple> m_bool_simple = 2;
+}
+
+message MsgWithOneof {
+ oneof union {
+ string title = 1;
+ int64 salary = 2;
+ string Country = 3;
+ string home_address = 4;
+ }
+}
+
+message Real {
+ optional double value = 1;
+ extensions 100 to max;
+}
+
+extend Real {
+ optional string name = 124;
+}
+
+message Complex {
+ extend Real {
+ optional Complex real_extension = 123;
+ }
+ optional double imaginary = 1;
+ extensions 100 to max;
+}
+
+message KnownTypes {
+ optional google.protobuf.Any an = 14;
+ optional google.protobuf.Duration dur = 1;
+ optional google.protobuf.Struct st = 12;
+ optional google.protobuf.Timestamp ts = 2;
+ optional google.protobuf.ListValue lv = 15;
+ optional google.protobuf.Value val = 16;
+
+ optional google.protobuf.DoubleValue dbl = 3;
+ optional google.protobuf.FloatValue flt = 4;
+ optional google.protobuf.Int64Value i64 = 5;
+ optional google.protobuf.UInt64Value u64 = 6;
+ optional google.protobuf.Int32Value i32 = 7;
+ optional google.protobuf.UInt32Value u32 = 8;
+ optional google.protobuf.BoolValue bool = 9;
+ optional google.protobuf.StringValue str = 10;
+ optional google.protobuf.BytesValue bytes = 11;
+}
diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile
new file mode 100644
index 0000000..e2e0651
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/Makefile
@@ -0,0 +1,43 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+install:
+ go install
+
+test: install generate-test-pbs
+ go test
+
+
+generate-test-pbs:
+ make install
+ make -C testdata
+ protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
+ make
diff --git a/vendor/github.com/golang/protobuf/proto/all_test.go b/vendor/github.com/golang/protobuf/proto/all_test.go
new file mode 100644
index 0000000..41451a4
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/all_test.go
@@ -0,0 +1,2278 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math"
+ "math/rand"
+ "reflect"
+ "runtime/debug"
+ "strings"
+ "testing"
+ "time"
+
+ . "github.com/golang/protobuf/proto"
+ . "github.com/golang/protobuf/proto/testdata"
+)
+
+var globalO *Buffer
+
+func old() *Buffer {
+ if globalO == nil {
+ globalO = NewBuffer(nil)
+ }
+ globalO.Reset()
+ return globalO
+}
+
+func equalbytes(b1, b2 []byte, t *testing.T) {
+ if len(b1) != len(b2) {
+ t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2))
+ return
+ }
+ for i := 0; i < len(b1); i++ {
+ if b1[i] != b2[i] {
+ t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2)
+ }
+ }
+}
+
+func initGoTestField() *GoTestField {
+ f := new(GoTestField)
+ f.Label = String("label")
+ f.Type = String("type")
+ return f
+}
+
+// These are all structurally equivalent but the tag numbers differ.
+// (It's remarkable that required, optional, and repeated all have
+// 8 letters.)
+func initGoTest_RequiredGroup() *GoTest_RequiredGroup {
+ return &GoTest_RequiredGroup{
+ RequiredField: String("required"),
+ }
+}
+
+func initGoTest_OptionalGroup() *GoTest_OptionalGroup {
+ return &GoTest_OptionalGroup{
+ RequiredField: String("optional"),
+ }
+}
+
+func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup {
+ return &GoTest_RepeatedGroup{
+ RequiredField: String("repeated"),
+ }
+}
+
+func initGoTest(setdefaults bool) *GoTest {
+ pb := new(GoTest)
+ if setdefaults {
+ pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted)
+ pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted)
+ pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted)
+ pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted)
+ pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted)
+ pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted)
+ pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted)
+ pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted)
+ pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted)
+ pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted)
+ pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted
+ pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted)
+ pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted)
+ }
+
+ pb.Kind = GoTest_TIME.Enum()
+ pb.RequiredField = initGoTestField()
+ pb.F_BoolRequired = Bool(true)
+ pb.F_Int32Required = Int32(3)
+ pb.F_Int64Required = Int64(6)
+ pb.F_Fixed32Required = Uint32(32)
+ pb.F_Fixed64Required = Uint64(64)
+ pb.F_Uint32Required = Uint32(3232)
+ pb.F_Uint64Required = Uint64(6464)
+ pb.F_FloatRequired = Float32(3232)
+ pb.F_DoubleRequired = Float64(6464)
+ pb.F_StringRequired = String("string")
+ pb.F_BytesRequired = []byte("bytes")
+ pb.F_Sint32Required = Int32(-32)
+ pb.F_Sint64Required = Int64(-64)
+ pb.Requiredgroup = initGoTest_RequiredGroup()
+
+ return pb
+}
+
+func fail(msg string, b *bytes.Buffer, s string, t *testing.T) {
+ data := b.Bytes()
+ ld := len(data)
+ ls := len(s) / 2
+
+ fmt.Printf("fail %s ld=%d ls=%d\n", msg, ld, ls)
+
+ // find the interesting spot - n
+ n := ls
+ if ld < ls {
+ n = ld
+ }
+ j := 0
+ for i := 0; i < n; i++ {
+ bs := hex(s[j])*16 + hex(s[j+1])
+ j += 2
+ if data[i] == bs {
+ continue
+ }
+ n = i
+ break
+ }
+ l := n - 10
+ if l < 0 {
+ l = 0
+ }
+ h := n + 10
+
+ // find the interesting spot - n
+ fmt.Printf("is[%d]:", l)
+ for i := l; i < h; i++ {
+ if i >= ld {
+ fmt.Printf(" --")
+ continue
+ }
+ fmt.Printf(" %.2x", data[i])
+ }
+ fmt.Printf("\n")
+
+ fmt.Printf("sb[%d]:", l)
+ for i := l; i < h; i++ {
+ if i >= ls {
+ fmt.Printf(" --")
+ continue
+ }
+ bs := hex(s[j])*16 + hex(s[j+1])
+ j += 2
+ fmt.Printf(" %.2x", bs)
+ }
+ fmt.Printf("\n")
+
+ t.Fail()
+
+ // t.Errorf("%s: \ngood: %s\nbad: %x", msg, s, b.Bytes())
+ // Print the output in a partially-decoded format; can
+ // be helpful when updating the test. It produces the output
+ // that is pasted, with minor edits, into the argument to verify().
+ // data := b.Bytes()
+ // nesting := 0
+ // for b.Len() > 0 {
+ // start := len(data) - b.Len()
+ // var u uint64
+ // u, err := DecodeVarint(b)
+ // if err != nil {
+ // fmt.Printf("decode error on varint:", err)
+ // return
+ // }
+ // wire := u & 0x7
+ // tag := u >> 3
+ // switch wire {
+ // case WireVarint:
+ // v, err := DecodeVarint(b)
+ // if err != nil {
+ // fmt.Printf("decode error on varint:", err)
+ // return
+ // }
+ // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n",
+ // data[start:len(data)-b.Len()], tag, wire, v)
+ // case WireFixed32:
+ // v, err := DecodeFixed32(b)
+ // if err != nil {
+ // fmt.Printf("decode error on fixed32:", err)
+ // return
+ // }
+ // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n",
+ // data[start:len(data)-b.Len()], tag, wire, v)
+ // case WireFixed64:
+ // v, err := DecodeFixed64(b)
+ // if err != nil {
+ // fmt.Printf("decode error on fixed64:", err)
+ // return
+ // }
+ // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n",
+ // data[start:len(data)-b.Len()], tag, wire, v)
+ // case WireBytes:
+ // nb, err := DecodeVarint(b)
+ // if err != nil {
+ // fmt.Printf("decode error on bytes:", err)
+ // return
+ // }
+ // after_tag := len(data) - b.Len()
+ // str := make([]byte, nb)
+ // _, err = b.Read(str)
+ // if err != nil {
+ // fmt.Printf("decode error on bytes:", err)
+ // return
+ // }
+ // fmt.Printf("\t\t\"%x\" \"%x\" // field %d, encoding %d (FIELD)\n",
+ // data[start:after_tag], str, tag, wire)
+ // case WireStartGroup:
+ // nesting++
+ // fmt.Printf("\t\t\"%x\"\t\t// start group field %d level %d\n",
+ // data[start:len(data)-b.Len()], tag, nesting)
+ // case WireEndGroup:
+ // fmt.Printf("\t\t\"%x\"\t\t// end group field %d level %d\n",
+ // data[start:len(data)-b.Len()], tag, nesting)
+ // nesting--
+ // default:
+ // fmt.Printf("unrecognized wire type %d\n", wire)
+ // return
+ // }
+ // }
+}
+
+func hex(c uint8) uint8 {
+ if '0' <= c && c <= '9' {
+ return c - '0'
+ }
+ if 'a' <= c && c <= 'f' {
+ return 10 + c - 'a'
+ }
+ if 'A' <= c && c <= 'F' {
+ return 10 + c - 'A'
+ }
+ return 0
+}
+
+func equal(b []byte, s string, t *testing.T) bool {
+ if 2*len(b) != len(s) {
+ // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t)
+ fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s))
+ return false
+ }
+ for i, j := 0, 0; i < len(b); i, j = i+1, j+2 {
+ x := hex(s[j])*16 + hex(s[j+1])
+ if b[i] != x {
+ // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t)
+ fmt.Printf("bad byte[%d]:%x %x", i, b[i], x)
+ return false
+ }
+ }
+ return true
+}
+
+func overify(t *testing.T, pb *GoTest, expected string) {
+ o := old()
+ err := o.Marshal(pb)
+ if err != nil {
+ fmt.Printf("overify marshal-1 err = %v", err)
+ o.DebugPrint("", o.Bytes())
+ t.Fatalf("expected = %s", expected)
+ }
+ if !equal(o.Bytes(), expected, t) {
+ o.DebugPrint("overify neq 1", o.Bytes())
+ t.Fatalf("expected = %s", expected)
+ }
+
+ // Now test Unmarshal by recreating the original buffer.
+ pbd := new(GoTest)
+ err = o.Unmarshal(pbd)
+ if err != nil {
+ t.Fatalf("overify unmarshal err = %v", err)
+ o.DebugPrint("", o.Bytes())
+ t.Fatalf("string = %s", expected)
+ }
+ o.Reset()
+ err = o.Marshal(pbd)
+ if err != nil {
+ t.Errorf("overify marshal-2 err = %v", err)
+ o.DebugPrint("", o.Bytes())
+ t.Fatalf("string = %s", expected)
+ }
+ if !equal(o.Bytes(), expected, t) {
+ o.DebugPrint("overify neq 2", o.Bytes())
+ t.Fatalf("string = %s", expected)
+ }
+}
+
+// Simple tests for numeric encode/decode primitives (varint, etc.)
+func TestNumericPrimitives(t *testing.T) {
+ for i := uint64(0); i < 1e6; i += 111 {
+ o := old()
+ if o.EncodeVarint(i) != nil {
+ t.Error("EncodeVarint")
+ break
+ }
+ x, e := o.DecodeVarint()
+ if e != nil {
+ t.Fatal("DecodeVarint")
+ }
+ if x != i {
+ t.Fatal("varint decode fail:", i, x)
+ }
+
+ o = old()
+ if o.EncodeFixed32(i) != nil {
+ t.Fatal("encFixed32")
+ }
+ x, e = o.DecodeFixed32()
+ if e != nil {
+ t.Fatal("decFixed32")
+ }
+ if x != i {
+ t.Fatal("fixed32 decode fail:", i, x)
+ }
+
+ o = old()
+ if o.EncodeFixed64(i*1234567) != nil {
+ t.Error("encFixed64")
+ break
+ }
+ x, e = o.DecodeFixed64()
+ if e != nil {
+ t.Error("decFixed64")
+ break
+ }
+ if x != i*1234567 {
+ t.Error("fixed64 decode fail:", i*1234567, x)
+ break
+ }
+
+ o = old()
+ i32 := int32(i - 12345)
+ if o.EncodeZigzag32(uint64(i32)) != nil {
+ t.Fatal("EncodeZigzag32")
+ }
+ x, e = o.DecodeZigzag32()
+ if e != nil {
+ t.Fatal("DecodeZigzag32")
+ }
+ if x != uint64(uint32(i32)) {
+ t.Fatal("zigzag32 decode fail:", i32, x)
+ }
+
+ o = old()
+ i64 := int64(i - 12345)
+ if o.EncodeZigzag64(uint64(i64)) != nil {
+ t.Fatal("EncodeZigzag64")
+ }
+ x, e = o.DecodeZigzag64()
+ if e != nil {
+ t.Fatal("DecodeZigzag64")
+ }
+ if x != uint64(i64) {
+ t.Fatal("zigzag64 decode fail:", i64, x)
+ }
+ }
+}
+
+// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces.
+type fakeMarshaler struct {
+ b []byte
+ err error
+}
+
+func (f *fakeMarshaler) Marshal() ([]byte, error) { return f.b, f.err }
+func (f *fakeMarshaler) String() string { return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err) }
+func (f *fakeMarshaler) ProtoMessage() {}
+func (f *fakeMarshaler) Reset() {}
+
+type msgWithFakeMarshaler struct {
+ M *fakeMarshaler `protobuf:"bytes,1,opt,name=fake"`
+}
+
+func (m *msgWithFakeMarshaler) String() string { return CompactTextString(m) }
+func (m *msgWithFakeMarshaler) ProtoMessage() {}
+func (m *msgWithFakeMarshaler) Reset() {}
+
+// Simple tests for proto messages that implement the Marshaler interface.
+func TestMarshalerEncoding(t *testing.T) {
+ tests := []struct {
+ name string
+ m Message
+ want []byte
+ errType reflect.Type
+ }{
+ {
+ name: "Marshaler that fails",
+ m: &fakeMarshaler{
+ err: errors.New("some marshal err"),
+ b: []byte{5, 6, 7},
+ },
+ // Since the Marshal method returned bytes, they should be written to the
+ // buffer. (For efficiency, we assume that Marshal implementations are
+ // always correct w.r.t. RequiredNotSetError and output.)
+ want: []byte{5, 6, 7},
+ errType: reflect.TypeOf(errors.New("some marshal err")),
+ },
+ {
+ name: "Marshaler that fails with RequiredNotSetError",
+ m: &msgWithFakeMarshaler{
+ M: &fakeMarshaler{
+ err: &RequiredNotSetError{},
+ b: []byte{5, 6, 7},
+ },
+ },
+ // Since there's an error that can be continued after,
+ // the buffer should be written.
+ want: []byte{
+ 10, 3, // for &msgWithFakeMarshaler
+ 5, 6, 7, // for &fakeMarshaler
+ },
+ errType: reflect.TypeOf(&RequiredNotSetError{}),
+ },
+ {
+ name: "Marshaler that succeeds",
+ m: &fakeMarshaler{
+ b: []byte{0, 1, 2, 3, 4, 127, 255},
+ },
+ want: []byte{0, 1, 2, 3, 4, 127, 255},
+ },
+ }
+ for _, test := range tests {
+ b := NewBuffer(nil)
+ err := b.Marshal(test.m)
+ if reflect.TypeOf(err) != test.errType {
+ t.Errorf("%s: got err %T(%v) wanted %T", test.name, err, err, test.errType)
+ }
+ if !reflect.DeepEqual(test.want, b.Bytes()) {
+ t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want)
+ }
+ if size := Size(test.m); size != len(b.Bytes()) {
+ t.Errorf("%s: Size(_) = %v, but marshaled to %v bytes", test.name, size, len(b.Bytes()))
+ }
+
+ m, mErr := Marshal(test.m)
+ if !bytes.Equal(b.Bytes(), m) {
+ t.Errorf("%s: Marshal returned %v, but (*Buffer).Marshal wrote %v", test.name, m, b.Bytes())
+ }
+ if !reflect.DeepEqual(err, mErr) {
+ t.Errorf("%s: Marshal err = %q, but (*Buffer).Marshal returned %q",
+ test.name, fmt.Sprint(mErr), fmt.Sprint(err))
+ }
+ }
+}
+
+// Simple tests for bytes
+func TestBytesPrimitives(t *testing.T) {
+ o := old()
+ bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'}
+ if o.EncodeRawBytes(bytes) != nil {
+ t.Error("EncodeRawBytes")
+ }
+ decb, e := o.DecodeRawBytes(false)
+ if e != nil {
+ t.Error("DecodeRawBytes")
+ }
+ equalbytes(bytes, decb, t)
+}
+
+// Simple tests for strings
+func TestStringPrimitives(t *testing.T) {
+ o := old()
+ s := "now is the time"
+ if o.EncodeStringBytes(s) != nil {
+ t.Error("enc_string")
+ }
+ decs, e := o.DecodeStringBytes()
+ if e != nil {
+ t.Error("dec_string")
+ }
+ if s != decs {
+ t.Error("string encode/decode fail:", s, decs)
+ }
+}
+
+// Do we catch the "required bit not set" case?
+func TestRequiredBit(t *testing.T) {
+ o := old()
+ pb := new(GoTest)
+ err := o.Marshal(pb)
+ if err == nil {
+ t.Error("did not catch missing required fields")
+ } else if strings.Index(err.Error(), "Kind") < 0 {
+ t.Error("wrong error type:", err)
+ }
+}
+
+// Check that all fields are nil.
+// Clearly silly, and a residue from a more interesting test with an earlier,
+// different initialization property, but it once caught a compiler bug so
+// it lives.
+func checkInitialized(pb *GoTest, t *testing.T) {
+ if pb.F_BoolDefaulted != nil {
+ t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted)
+ }
+ if pb.F_Int32Defaulted != nil {
+ t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted)
+ }
+ if pb.F_Int64Defaulted != nil {
+ t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted)
+ }
+ if pb.F_Fixed32Defaulted != nil {
+ t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted)
+ }
+ if pb.F_Fixed64Defaulted != nil {
+ t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted)
+ }
+ if pb.F_Uint32Defaulted != nil {
+ t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted)
+ }
+ if pb.F_Uint64Defaulted != nil {
+ t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted)
+ }
+ if pb.F_FloatDefaulted != nil {
+ t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted)
+ }
+ if pb.F_DoubleDefaulted != nil {
+ t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted)
+ }
+ if pb.F_StringDefaulted != nil {
+ t.Error("New or Reset did not set string:", *pb.F_StringDefaulted)
+ }
+ if pb.F_BytesDefaulted != nil {
+ t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted))
+ }
+ if pb.F_Sint32Defaulted != nil {
+ t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted)
+ }
+ if pb.F_Sint64Defaulted != nil {
+ t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted)
+ }
+}
+
+// Does Reset() reset?
+func TestReset(t *testing.T) {
+ pb := initGoTest(true)
+ // muck with some values
+ pb.F_BoolDefaulted = Bool(false)
+ pb.F_Int32Defaulted = Int32(237)
+ pb.F_Int64Defaulted = Int64(12346)
+ pb.F_Fixed32Defaulted = Uint32(32000)
+ pb.F_Fixed64Defaulted = Uint64(666)
+ pb.F_Uint32Defaulted = Uint32(323232)
+ pb.F_Uint64Defaulted = nil
+ pb.F_FloatDefaulted = nil
+ pb.F_DoubleDefaulted = Float64(0)
+ pb.F_StringDefaulted = String("gotcha")
+ pb.F_BytesDefaulted = []byte("asdfasdf")
+ pb.F_Sint32Defaulted = Int32(123)
+ pb.F_Sint64Defaulted = Int64(789)
+ pb.Reset()
+ checkInitialized(pb, t)
+}
+
+// All required fields set, no defaults provided.
+func TestEncodeDecode1(t *testing.T) {
+ pb := initGoTest(false)
+ overify(t, pb,
+ "0807"+ // field 1, encoding 0, value 7
+ "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
+ "5001"+ // field 10, encoding 0, value 1
+ "5803"+ // field 11, encoding 0, value 3
+ "6006"+ // field 12, encoding 0, value 6
+ "6d20000000"+ // field 13, encoding 5, value 0x20
+ "714000000000000000"+ // field 14, encoding 1, value 0x40
+ "78a019"+ // field 15, encoding 0, value 0xca0 = 3232
+ "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464
+ "8d0100004a45"+ // field 17, encoding 5, value 3232.0
+ "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
+ "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string"
+ "b304"+ // field 70, encoding 3, start group
+ "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
+ "b404"+ // field 70, encoding 4, end group
+ "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes"
+ "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
+ "b8067f") // field 103, encoding 0, 0x7f zigzag64
+}
+
+// All required fields set, defaults provided.
+func TestEncodeDecode2(t *testing.T) {
+ pb := initGoTest(true)
+ overify(t, pb,
+ "0807"+ // field 1, encoding 0, value 7
+ "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
+ "5001"+ // field 10, encoding 0, value 1
+ "5803"+ // field 11, encoding 0, value 3
+ "6006"+ // field 12, encoding 0, value 6
+ "6d20000000"+ // field 13, encoding 5, value 32
+ "714000000000000000"+ // field 14, encoding 1, value 64
+ "78a019"+ // field 15, encoding 0, value 3232
+ "8001c032"+ // field 16, encoding 0, value 6464
+ "8d0100004a45"+ // field 17, encoding 5, value 3232.0
+ "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
+ "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
+ "c00201"+ // field 40, encoding 0, value 1
+ "c80220"+ // field 41, encoding 0, value 32
+ "d00240"+ // field 42, encoding 0, value 64
+ "dd0240010000"+ // field 43, encoding 5, value 320
+ "e1028002000000000000"+ // field 44, encoding 1, value 640
+ "e8028019"+ // field 45, encoding 0, value 3200
+ "f0028032"+ // field 46, encoding 0, value 6400
+ "fd02e0659948"+ // field 47, encoding 5, value 314159.0
+ "81030000000050971041"+ // field 48, encoding 1, value 271828.0
+ "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
+ "b304"+ // start group field 70 level 1
+ "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
+ "b404"+ // end group field 70 level 1
+ "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
+ "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
+ "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
+ "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
+ "90193f"+ // field 402, encoding 0, value 63
+ "98197f") // field 403, encoding 0, value 127
+
+}
+
+// All default fields set to their default value by hand
+func TestEncodeDecode3(t *testing.T) {
+ pb := initGoTest(false)
+ pb.F_BoolDefaulted = Bool(true)
+ pb.F_Int32Defaulted = Int32(32)
+ pb.F_Int64Defaulted = Int64(64)
+ pb.F_Fixed32Defaulted = Uint32(320)
+ pb.F_Fixed64Defaulted = Uint64(640)
+ pb.F_Uint32Defaulted = Uint32(3200)
+ pb.F_Uint64Defaulted = Uint64(6400)
+ pb.F_FloatDefaulted = Float32(314159)
+ pb.F_DoubleDefaulted = Float64(271828)
+ pb.F_StringDefaulted = String("hello, \"world!\"\n")
+ pb.F_BytesDefaulted = []byte("Bignose")
+ pb.F_Sint32Defaulted = Int32(-32)
+ pb.F_Sint64Defaulted = Int64(-64)
+
+ overify(t, pb,
+ "0807"+ // field 1, encoding 0, value 7
+ "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
+ "5001"+ // field 10, encoding 0, value 1
+ "5803"+ // field 11, encoding 0, value 3
+ "6006"+ // field 12, encoding 0, value 6
+ "6d20000000"+ // field 13, encoding 5, value 32
+ "714000000000000000"+ // field 14, encoding 1, value 64
+ "78a019"+ // field 15, encoding 0, value 3232
+ "8001c032"+ // field 16, encoding 0, value 6464
+ "8d0100004a45"+ // field 17, encoding 5, value 3232.0
+ "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
+ "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
+ "c00201"+ // field 40, encoding 0, value 1
+ "c80220"+ // field 41, encoding 0, value 32
+ "d00240"+ // field 42, encoding 0, value 64
+ "dd0240010000"+ // field 43, encoding 5, value 320
+ "e1028002000000000000"+ // field 44, encoding 1, value 640
+ "e8028019"+ // field 45, encoding 0, value 3200
+ "f0028032"+ // field 46, encoding 0, value 6400
+ "fd02e0659948"+ // field 47, encoding 5, value 314159.0
+ "81030000000050971041"+ // field 48, encoding 1, value 271828.0
+ "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
+ "b304"+ // start group field 70 level 1
+ "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
+ "b404"+ // end group field 70 level 1
+ "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
+ "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
+ "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
+ "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
+ "90193f"+ // field 402, encoding 0, value 63
+ "98197f") // field 403, encoding 0, value 127
+
+}
+
+// All required fields set, defaults provided, all non-defaulted optional fields have values.
+func TestEncodeDecode4(t *testing.T) {
+ pb := initGoTest(true)
+ pb.Table = String("hello")
+ pb.Param = Int32(7)
+ pb.OptionalField = initGoTestField()
+ pb.F_BoolOptional = Bool(true)
+ pb.F_Int32Optional = Int32(32)
+ pb.F_Int64Optional = Int64(64)
+ pb.F_Fixed32Optional = Uint32(3232)
+ pb.F_Fixed64Optional = Uint64(6464)
+ pb.F_Uint32Optional = Uint32(323232)
+ pb.F_Uint64Optional = Uint64(646464)
+ pb.F_FloatOptional = Float32(32.)
+ pb.F_DoubleOptional = Float64(64.)
+ pb.F_StringOptional = String("hello")
+ pb.F_BytesOptional = []byte("Bignose")
+ pb.F_Sint32Optional = Int32(-32)
+ pb.F_Sint64Optional = Int64(-64)
+ pb.Optionalgroup = initGoTest_OptionalGroup()
+
+ overify(t, pb,
+ "0807"+ // field 1, encoding 0, value 7
+ "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello"
+ "1807"+ // field 3, encoding 0, value 7
+ "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
+ "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField)
+ "5001"+ // field 10, encoding 0, value 1
+ "5803"+ // field 11, encoding 0, value 3
+ "6006"+ // field 12, encoding 0, value 6
+ "6d20000000"+ // field 13, encoding 5, value 32
+ "714000000000000000"+ // field 14, encoding 1, value 64
+ "78a019"+ // field 15, encoding 0, value 3232
+ "8001c032"+ // field 16, encoding 0, value 6464
+ "8d0100004a45"+ // field 17, encoding 5, value 3232.0
+ "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
+ "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
+ "f00101"+ // field 30, encoding 0, value 1
+ "f80120"+ // field 31, encoding 0, value 32
+ "800240"+ // field 32, encoding 0, value 64
+ "8d02a00c0000"+ // field 33, encoding 5, value 3232
+ "91024019000000000000"+ // field 34, encoding 1, value 6464
+ "9802a0dd13"+ // field 35, encoding 0, value 323232
+ "a002c0ba27"+ // field 36, encoding 0, value 646464
+ "ad0200000042"+ // field 37, encoding 5, value 32.0
+ "b1020000000000005040"+ // field 38, encoding 1, value 64.0
+ "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello"
+ "c00201"+ // field 40, encoding 0, value 1
+ "c80220"+ // field 41, encoding 0, value 32
+ "d00240"+ // field 42, encoding 0, value 64
+ "dd0240010000"+ // field 43, encoding 5, value 320
+ "e1028002000000000000"+ // field 44, encoding 1, value 640
+ "e8028019"+ // field 45, encoding 0, value 3200
+ "f0028032"+ // field 46, encoding 0, value 6400
+ "fd02e0659948"+ // field 47, encoding 5, value 314159.0
+ "81030000000050971041"+ // field 48, encoding 1, value 271828.0
+ "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
+ "b304"+ // start group field 70 level 1
+ "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
+ "b404"+ // end group field 70 level 1
+ "d305"+ // start group field 90 level 1
+ "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional"
+ "d405"+ // end group field 90 level 1
+ "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
+ "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
+ "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
+ "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose"
+ "f0123f"+ // field 302, encoding 0, value 63
+ "f8127f"+ // field 303, encoding 0, value 127
+ "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
+ "90193f"+ // field 402, encoding 0, value 63
+ "98197f") // field 403, encoding 0, value 127
+
+}
+
+// All required fields set, defaults provided, all repeated fields given two values.
+func TestEncodeDecode5(t *testing.T) {
+ pb := initGoTest(true)
+ pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()}
+ pb.F_BoolRepeated = []bool{false, true}
+ pb.F_Int32Repeated = []int32{32, 33}
+ pb.F_Int64Repeated = []int64{64, 65}
+ pb.F_Fixed32Repeated = []uint32{3232, 3333}
+ pb.F_Fixed64Repeated = []uint64{6464, 6565}
+ pb.F_Uint32Repeated = []uint32{323232, 333333}
+ pb.F_Uint64Repeated = []uint64{646464, 656565}
+ pb.F_FloatRepeated = []float32{32., 33.}
+ pb.F_DoubleRepeated = []float64{64., 65.}
+ pb.F_StringRepeated = []string{"hello", "sailor"}
+ pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")}
+ pb.F_Sint32Repeated = []int32{32, -32}
+ pb.F_Sint64Repeated = []int64{64, -64}
+ pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()}
+
+ overify(t, pb,
+ "0807"+ // field 1, encoding 0, value 7
+ "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
+ "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField)
+ "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField)
+ "5001"+ // field 10, encoding 0, value 1
+ "5803"+ // field 11, encoding 0, value 3
+ "6006"+ // field 12, encoding 0, value 6
+ "6d20000000"+ // field 13, encoding 5, value 32
+ "714000000000000000"+ // field 14, encoding 1, value 64
+ "78a019"+ // field 15, encoding 0, value 3232
+ "8001c032"+ // field 16, encoding 0, value 6464
+ "8d0100004a45"+ // field 17, encoding 5, value 3232.0
+ "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
+ "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
+ "a00100"+ // field 20, encoding 0, value 0
+ "a00101"+ // field 20, encoding 0, value 1
+ "a80120"+ // field 21, encoding 0, value 32
+ "a80121"+ // field 21, encoding 0, value 33
+ "b00140"+ // field 22, encoding 0, value 64
+ "b00141"+ // field 22, encoding 0, value 65
+ "bd01a00c0000"+ // field 23, encoding 5, value 3232
+ "bd01050d0000"+ // field 23, encoding 5, value 3333
+ "c1014019000000000000"+ // field 24, encoding 1, value 6464
+ "c101a519000000000000"+ // field 24, encoding 1, value 6565
+ "c801a0dd13"+ // field 25, encoding 0, value 323232
+ "c80195ac14"+ // field 25, encoding 0, value 333333
+ "d001c0ba27"+ // field 26, encoding 0, value 646464
+ "d001b58928"+ // field 26, encoding 0, value 656565
+ "dd0100000042"+ // field 27, encoding 5, value 32.0
+ "dd0100000442"+ // field 27, encoding 5, value 33.0
+ "e1010000000000005040"+ // field 28, encoding 1, value 64.0
+ "e1010000000000405040"+ // field 28, encoding 1, value 65.0
+ "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello"
+ "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor"
+ "c00201"+ // field 40, encoding 0, value 1
+ "c80220"+ // field 41, encoding 0, value 32
+ "d00240"+ // field 42, encoding 0, value 64
+ "dd0240010000"+ // field 43, encoding 5, value 320
+ "e1028002000000000000"+ // field 44, encoding 1, value 640
+ "e8028019"+ // field 45, encoding 0, value 3200
+ "f0028032"+ // field 46, encoding 0, value 6400
+ "fd02e0659948"+ // field 47, encoding 5, value 314159.0
+ "81030000000050971041"+ // field 48, encoding 1, value 271828.0
+ "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
+ "b304"+ // start group field 70 level 1
+ "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
+ "b404"+ // end group field 70 level 1
+ "8305"+ // start group field 80 level 1
+ "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated"
+ "8405"+ // end group field 80 level 1
+ "8305"+ // start group field 80 level 1
+ "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated"
+ "8405"+ // end group field 80 level 1
+ "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
+ "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
+ "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
+ "ca0c03"+"626967"+ // field 201, encoding 2, string "big"
+ "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose"
+ "d00c40"+ // field 202, encoding 0, value 32
+ "d00c3f"+ // field 202, encoding 0, value -32
+ "d80c8001"+ // field 203, encoding 0, value 64
+ "d80c7f"+ // field 203, encoding 0, value -64
+ "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
+ "90193f"+ // field 402, encoding 0, value 63
+ "98197f") // field 403, encoding 0, value 127
+
+}
+
+// All required fields set, all packed repeated fields given two values.
+func TestEncodeDecode6(t *testing.T) {
+ pb := initGoTest(false)
+ pb.F_BoolRepeatedPacked = []bool{false, true}
+ pb.F_Int32RepeatedPacked = []int32{32, 33}
+ pb.F_Int64RepeatedPacked = []int64{64, 65}
+ pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333}
+ pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565}
+ pb.F_Uint32RepeatedPacked = []uint32{323232, 333333}
+ pb.F_Uint64RepeatedPacked = []uint64{646464, 656565}
+ pb.F_FloatRepeatedPacked = []float32{32., 33.}
+ pb.F_DoubleRepeatedPacked = []float64{64., 65.}
+ pb.F_Sint32RepeatedPacked = []int32{32, -32}
+ pb.F_Sint64RepeatedPacked = []int64{64, -64}
+
+ overify(t, pb,
+ "0807"+ // field 1, encoding 0, value 7
+ "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
+ "5001"+ // field 10, encoding 0, value 1
+ "5803"+ // field 11, encoding 0, value 3
+ "6006"+ // field 12, encoding 0, value 6
+ "6d20000000"+ // field 13, encoding 5, value 32
+ "714000000000000000"+ // field 14, encoding 1, value 64
+ "78a019"+ // field 15, encoding 0, value 3232
+ "8001c032"+ // field 16, encoding 0, value 6464
+ "8d0100004a45"+ // field 17, encoding 5, value 3232.0
+ "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
+ "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
+ "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1
+ "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33
+ "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65
+ "aa0308"+ // field 53, encoding 2, 8 bytes
+ "a00c0000050d0000"+ // value 3232, value 3333
+ "b20310"+ // field 54, encoding 2, 16 bytes
+ "4019000000000000a519000000000000"+ // value 6464, value 6565
+ "ba0306"+ // field 55, encoding 2, 6 bytes
+ "a0dd1395ac14"+ // value 323232, value 333333
+ "c20306"+ // field 56, encoding 2, 6 bytes
+ "c0ba27b58928"+ // value 646464, value 656565
+ "ca0308"+ // field 57, encoding 2, 8 bytes
+ "0000004200000442"+ // value 32.0, value 33.0
+ "d20310"+ // field 58, encoding 2, 16 bytes
+ "00000000000050400000000000405040"+ // value 64.0, value 65.0
+ "b304"+ // start group field 70 level 1
+ "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
+ "b404"+ // end group field 70 level 1
+ "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
+ "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
+ "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
+ "b21f02"+ // field 502, encoding 2, 2 bytes
+ "403f"+ // value 32, value -32
+ "ba1f03"+ // field 503, encoding 2, 3 bytes
+ "80017f") // value 64, value -64
+}
+
+// Test that we can encode empty bytes fields.
+func TestEncodeDecodeBytes1(t *testing.T) {
+ pb := initGoTest(false)
+
+ // Create our bytes
+ pb.F_BytesRequired = []byte{}
+ pb.F_BytesRepeated = [][]byte{{}}
+ pb.F_BytesOptional = []byte{}
+
+ d, err := Marshal(pb)
+ if err != nil {
+ t.Error(err)
+ }
+
+ pbd := new(GoTest)
+ if err := Unmarshal(d, pbd); err != nil {
+ t.Error(err)
+ }
+
+ if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 {
+ t.Error("required empty bytes field is incorrect")
+ }
+ if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil {
+ t.Error("repeated empty bytes field is incorrect")
+ }
+ if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 {
+ t.Error("optional empty bytes field is incorrect")
+ }
+}
+
+// Test that we encode nil-valued fields of a repeated bytes field correctly.
+// Since entries in a repeated field cannot be nil, nil must mean empty value.
+func TestEncodeDecodeBytes2(t *testing.T) {
+ pb := initGoTest(false)
+
+ // Create our bytes
+ pb.F_BytesRepeated = [][]byte{nil}
+
+ d, err := Marshal(pb)
+ if err != nil {
+ t.Error(err)
+ }
+
+ pbd := new(GoTest)
+ if err := Unmarshal(d, pbd); err != nil {
+ t.Error(err)
+ }
+
+ if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil {
+ t.Error("Unexpected value for repeated bytes field")
+ }
+}
+
+// All required fields set, defaults provided, all repeated fields given two values.
+func TestSkippingUnrecognizedFields(t *testing.T) {
+ o := old()
+ pb := initGoTestField()
+
+ // Marshal it normally.
+ o.Marshal(pb)
+
+ // Now new a GoSkipTest record.
+ skip := &GoSkipTest{
+ SkipInt32: Int32(32),
+ SkipFixed32: Uint32(3232),
+ SkipFixed64: Uint64(6464),
+ SkipString: String("skipper"),
+ Skipgroup: &GoSkipTest_SkipGroup{
+ GroupInt32: Int32(75),
+ GroupString: String("wxyz"),
+ },
+ }
+
+ // Marshal it into same buffer.
+ o.Marshal(skip)
+
+ pbd := new(GoTestField)
+ o.Unmarshal(pbd)
+
+ // The __unrecognized field should be a marshaling of GoSkipTest
+ skipd := new(GoSkipTest)
+
+ o.SetBuf(pbd.XXX_unrecognized)
+ o.Unmarshal(skipd)
+
+ if *skipd.SkipInt32 != *skip.SkipInt32 {
+ t.Error("skip int32", skipd.SkipInt32)
+ }
+ if *skipd.SkipFixed32 != *skip.SkipFixed32 {
+ t.Error("skip fixed32", skipd.SkipFixed32)
+ }
+ if *skipd.SkipFixed64 != *skip.SkipFixed64 {
+ t.Error("skip fixed64", skipd.SkipFixed64)
+ }
+ if *skipd.SkipString != *skip.SkipString {
+ t.Error("skip string", *skipd.SkipString)
+ }
+ if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 {
+ t.Error("skip group int32", skipd.Skipgroup.GroupInt32)
+ }
+ if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString {
+ t.Error("skip group string", *skipd.Skipgroup.GroupString)
+ }
+}
+
+// Check that unrecognized fields of a submessage are preserved.
+func TestSubmessageUnrecognizedFields(t *testing.T) {
+ nm := &NewMessage{
+ Nested: &NewMessage_Nested{
+ Name: String("Nigel"),
+ FoodGroup: String("carbs"),
+ },
+ }
+ b, err := Marshal(nm)
+ if err != nil {
+ t.Fatalf("Marshal of NewMessage: %v", err)
+ }
+
+ // Unmarshal into an OldMessage.
+ om := new(OldMessage)
+ if err := Unmarshal(b, om); err != nil {
+ t.Fatalf("Unmarshal to OldMessage: %v", err)
+ }
+ exp := &OldMessage{
+ Nested: &OldMessage_Nested{
+ Name: String("Nigel"),
+ // normal protocol buffer users should not do this
+ XXX_unrecognized: []byte("\x12\x05carbs"),
+ },
+ }
+ if !Equal(om, exp) {
+ t.Errorf("om = %v, want %v", om, exp)
+ }
+
+ // Clone the OldMessage.
+ om = Clone(om).(*OldMessage)
+ if !Equal(om, exp) {
+ t.Errorf("Clone(om) = %v, want %v", om, exp)
+ }
+
+ // Marshal the OldMessage, then unmarshal it into an empty NewMessage.
+ if b, err = Marshal(om); err != nil {
+ t.Fatalf("Marshal of OldMessage: %v", err)
+ }
+ t.Logf("Marshal(%v) -> %q", om, b)
+ nm2 := new(NewMessage)
+ if err := Unmarshal(b, nm2); err != nil {
+ t.Fatalf("Unmarshal to NewMessage: %v", err)
+ }
+ if !Equal(nm, nm2) {
+ t.Errorf("NewMessage round-trip: %v => %v", nm, nm2)
+ }
+}
+
+// Check that an int32 field can be upgraded to an int64 field.
+func TestNegativeInt32(t *testing.T) {
+ om := &OldMessage{
+ Num: Int32(-1),
+ }
+ b, err := Marshal(om)
+ if err != nil {
+ t.Fatalf("Marshal of OldMessage: %v", err)
+ }
+
+ // Check the size. It should be 11 bytes;
+ // 1 for the field/wire type, and 10 for the negative number.
+ if len(b) != 11 {
+ t.Errorf("%v marshaled as %q, wanted 11 bytes", om, b)
+ }
+
+ // Unmarshal into a NewMessage.
+ nm := new(NewMessage)
+ if err := Unmarshal(b, nm); err != nil {
+ t.Fatalf("Unmarshal to NewMessage: %v", err)
+ }
+ want := &NewMessage{
+ Num: Int64(-1),
+ }
+ if !Equal(nm, want) {
+ t.Errorf("nm = %v, want %v", nm, want)
+ }
+}
+
+// Check that we can grow an array (repeated field) to have many elements.
+// This test doesn't depend only on our encoding; for variety, it makes sure
+// we create, encode, and decode the correct contents explicitly. It's therefore
+// a bit messier.
+// This test also uses (and hence tests) the Marshal/Unmarshal functions
+// instead of the methods.
+func TestBigRepeated(t *testing.T) {
+ pb := initGoTest(true)
+
+ // Create the arrays
+ const N = 50 // Internally the library starts much smaller.
+ pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N)
+ pb.F_Sint64Repeated = make([]int64, N)
+ pb.F_Sint32Repeated = make([]int32, N)
+ pb.F_BytesRepeated = make([][]byte, N)
+ pb.F_StringRepeated = make([]string, N)
+ pb.F_DoubleRepeated = make([]float64, N)
+ pb.F_FloatRepeated = make([]float32, N)
+ pb.F_Uint64Repeated = make([]uint64, N)
+ pb.F_Uint32Repeated = make([]uint32, N)
+ pb.F_Fixed64Repeated = make([]uint64, N)
+ pb.F_Fixed32Repeated = make([]uint32, N)
+ pb.F_Int64Repeated = make([]int64, N)
+ pb.F_Int32Repeated = make([]int32, N)
+ pb.F_BoolRepeated = make([]bool, N)
+ pb.RepeatedField = make([]*GoTestField, N)
+
+ // Fill in the arrays with checkable values.
+ igtf := initGoTestField()
+ igtrg := initGoTest_RepeatedGroup()
+ for i := 0; i < N; i++ {
+ pb.Repeatedgroup[i] = igtrg
+ pb.F_Sint64Repeated[i] = int64(i)
+ pb.F_Sint32Repeated[i] = int32(i)
+ s := fmt.Sprint(i)
+ pb.F_BytesRepeated[i] = []byte(s)
+ pb.F_StringRepeated[i] = s
+ pb.F_DoubleRepeated[i] = float64(i)
+ pb.F_FloatRepeated[i] = float32(i)
+ pb.F_Uint64Repeated[i] = uint64(i)
+ pb.F_Uint32Repeated[i] = uint32(i)
+ pb.F_Fixed64Repeated[i] = uint64(i)
+ pb.F_Fixed32Repeated[i] = uint32(i)
+ pb.F_Int64Repeated[i] = int64(i)
+ pb.F_Int32Repeated[i] = int32(i)
+ pb.F_BoolRepeated[i] = i%2 == 0
+ pb.RepeatedField[i] = igtf
+ }
+
+ // Marshal.
+ buf, _ := Marshal(pb)
+
+ // Now test Unmarshal by recreating the original buffer.
+ pbd := new(GoTest)
+ Unmarshal(buf, pbd)
+
+ // Check the checkable values
+ for i := uint64(0); i < N; i++ {
+ if pbd.Repeatedgroup[i] == nil { // TODO: more checking?
+ t.Error("pbd.Repeatedgroup bad")
+ }
+ var x uint64
+ x = uint64(pbd.F_Sint64Repeated[i])
+ if x != i {
+ t.Error("pbd.F_Sint64Repeated bad", x, i)
+ }
+ x = uint64(pbd.F_Sint32Repeated[i])
+ if x != i {
+ t.Error("pbd.F_Sint32Repeated bad", x, i)
+ }
+ s := fmt.Sprint(i)
+ equalbytes(pbd.F_BytesRepeated[i], []byte(s), t)
+ if pbd.F_StringRepeated[i] != s {
+ t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i)
+ }
+ x = uint64(pbd.F_DoubleRepeated[i])
+ if x != i {
+ t.Error("pbd.F_DoubleRepeated bad", x, i)
+ }
+ x = uint64(pbd.F_FloatRepeated[i])
+ if x != i {
+ t.Error("pbd.F_FloatRepeated bad", x, i)
+ }
+ x = pbd.F_Uint64Repeated[i]
+ if x != i {
+ t.Error("pbd.F_Uint64Repeated bad", x, i)
+ }
+ x = uint64(pbd.F_Uint32Repeated[i])
+ if x != i {
+ t.Error("pbd.F_Uint32Repeated bad", x, i)
+ }
+ x = pbd.F_Fixed64Repeated[i]
+ if x != i {
+ t.Error("pbd.F_Fixed64Repeated bad", x, i)
+ }
+ x = uint64(pbd.F_Fixed32Repeated[i])
+ if x != i {
+ t.Error("pbd.F_Fixed32Repeated bad", x, i)
+ }
+ x = uint64(pbd.F_Int64Repeated[i])
+ if x != i {
+ t.Error("pbd.F_Int64Repeated bad", x, i)
+ }
+ x = uint64(pbd.F_Int32Repeated[i])
+ if x != i {
+ t.Error("pbd.F_Int32Repeated bad", x, i)
+ }
+ if pbd.F_BoolRepeated[i] != (i%2 == 0) {
+ t.Error("pbd.F_BoolRepeated bad", x, i)
+ }
+ if pbd.RepeatedField[i] == nil { // TODO: more checking?
+ t.Error("pbd.RepeatedField bad")
+ }
+ }
+}
+
+// Verify we give a useful message when decoding to the wrong structure type.
+func TestTypeMismatch(t *testing.T) {
+ pb1 := initGoTest(true)
+
+ // Marshal
+ o := old()
+ o.Marshal(pb1)
+
+ // Now Unmarshal it to the wrong type.
+ pb2 := initGoTestField()
+ err := o.Unmarshal(pb2)
+ if err == nil {
+ t.Error("expected error, got no error")
+ } else if !strings.Contains(err.Error(), "bad wiretype") {
+ t.Error("expected bad wiretype error, got", err)
+ }
+}
+
+func encodeDecode(t *testing.T, in, out Message, msg string) {
+ buf, err := Marshal(in)
+ if err != nil {
+ t.Fatalf("failed marshaling %v: %v", msg, err)
+ }
+ if err := Unmarshal(buf, out); err != nil {
+ t.Fatalf("failed unmarshaling %v: %v", msg, err)
+ }
+}
+
+func TestPackedNonPackedDecoderSwitching(t *testing.T) {
+ np, p := new(NonPackedTest), new(PackedTest)
+
+ // non-packed -> packed
+ np.A = []int32{0, 1, 1, 2, 3, 5}
+ encodeDecode(t, np, p, "non-packed -> packed")
+ if !reflect.DeepEqual(np.A, p.B) {
+ t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B)
+ }
+
+ // packed -> non-packed
+ np.Reset()
+ p.B = []int32{3, 1, 4, 1, 5, 9}
+ encodeDecode(t, p, np, "packed -> non-packed")
+ if !reflect.DeepEqual(p.B, np.A) {
+ t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A)
+ }
+}
+
+func TestProto1RepeatedGroup(t *testing.T) {
+ pb := &MessageList{
+ Message: []*MessageList_Message{
+ {
+ Name: String("blah"),
+ Count: Int32(7),
+ },
+ // NOTE: pb.Message[1] is a nil
+ nil,
+ },
+ }
+
+ o := old()
+ err := o.Marshal(pb)
+ if err == nil || !strings.Contains(err.Error(), "repeated field Message has nil") {
+ t.Fatalf("unexpected or no error when marshaling: %v", err)
+ }
+}
+
+// Test that enums work. Checks for a bug introduced by making enums
+// named types instead of int32: newInt32FromUint64 would crash with
+// a type mismatch in reflect.PointTo.
+func TestEnum(t *testing.T) {
+ pb := new(GoEnum)
+ pb.Foo = FOO_FOO1.Enum()
+ o := old()
+ if err := o.Marshal(pb); err != nil {
+ t.Fatal("error encoding enum:", err)
+ }
+ pb1 := new(GoEnum)
+ if err := o.Unmarshal(pb1); err != nil {
+ t.Fatal("error decoding enum:", err)
+ }
+ if *pb1.Foo != FOO_FOO1 {
+ t.Error("expected 7 but got ", *pb1.Foo)
+ }
+}
+
+// Enum types have String methods. Check that enum fields can be printed.
+// We don't care what the value actually is, just as long as it doesn't crash.
+func TestPrintingNilEnumFields(t *testing.T) {
+ pb := new(GoEnum)
+ _ = fmt.Sprintf("%+v", pb)
+}
+
+// Verify that absent required fields cause Marshal/Unmarshal to return errors.
+func TestRequiredFieldEnforcement(t *testing.T) {
+ pb := new(GoTestField)
+ _, err := Marshal(pb)
+ if err == nil {
+ t.Error("marshal: expected error, got nil")
+ } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Label") {
+ t.Errorf("marshal: bad error type: %v", err)
+ }
+
+ // A slightly sneaky, yet valid, proto. It encodes the same required field twice,
+ // so simply counting the required fields is insufficient.
+ // field 1, encoding 2, value "hi"
+ buf := []byte("\x0A\x02hi\x0A\x02hi")
+ err = Unmarshal(buf, pb)
+ if err == nil {
+ t.Error("unmarshal: expected error, got nil")
+ } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "{Unknown}") {
+ t.Errorf("unmarshal: bad error type: %v", err)
+ }
+}
+
+// Verify that absent required fields in groups cause Marshal/Unmarshal to return errors.
+func TestRequiredFieldEnforcementGroups(t *testing.T) {
+ pb := &GoTestRequiredGroupField{Group: &GoTestRequiredGroupField_Group{}}
+ if _, err := Marshal(pb); err == nil {
+ t.Error("marshal: expected error, got nil")
+ } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.Field") {
+ t.Errorf("marshal: bad error type: %v", err)
+ }
+
+ buf := []byte{11, 12}
+ if err := Unmarshal(buf, pb); err == nil {
+ t.Error("unmarshal: expected error, got nil")
+ } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.{Unknown}") {
+ t.Errorf("unmarshal: bad error type: %v", err)
+ }
+}
+
+func TestTypedNilMarshal(t *testing.T) {
+ // A typed nil should return ErrNil and not crash.
+ {
+ var m *GoEnum
+ if _, err := Marshal(m); err != ErrNil {
+ t.Errorf("Marshal(%#v): got %v, want ErrNil", m, err)
+ }
+ }
+
+ {
+ m := &Communique{Union: &Communique_Msg{nil}}
+ if _, err := Marshal(m); err == nil || err == ErrNil {
+ t.Errorf("Marshal(%#v): got %v, want errOneofHasNil", m, err)
+ }
+ }
+}
+
+// A type that implements the Marshaler interface, but is not nillable.
+type nonNillableInt uint64
+
+func (nni nonNillableInt) Marshal() ([]byte, error) {
+ return EncodeVarint(uint64(nni)), nil
+}
+
+type NNIMessage struct {
+ nni nonNillableInt
+}
+
+func (*NNIMessage) Reset() {}
+func (*NNIMessage) String() string { return "" }
+func (*NNIMessage) ProtoMessage() {}
+
+// A type that implements the Marshaler interface and is nillable.
+type nillableMessage struct {
+ x uint64
+}
+
+func (nm *nillableMessage) Marshal() ([]byte, error) {
+ return EncodeVarint(nm.x), nil
+}
+
+type NMMessage struct {
+ nm *nillableMessage
+}
+
+func (*NMMessage) Reset() {}
+func (*NMMessage) String() string { return "" }
+func (*NMMessage) ProtoMessage() {}
+
+// Verify a type that uses the Marshaler interface, but has a nil pointer.
+func TestNilMarshaler(t *testing.T) {
+ // Try a struct with a Marshaler field that is nil.
+ // It should be directly marshable.
+ nmm := new(NMMessage)
+ if _, err := Marshal(nmm); err != nil {
+ t.Error("unexpected error marshaling nmm: ", err)
+ }
+
+ // Try a struct with a Marshaler field that is not nillable.
+ nnim := new(NNIMessage)
+ nnim.nni = 7
+ var _ Marshaler = nnim.nni // verify it is truly a Marshaler
+ if _, err := Marshal(nnim); err != nil {
+ t.Error("unexpected error marshaling nnim: ", err)
+ }
+}
+
+func TestAllSetDefaults(t *testing.T) {
+ // Exercise SetDefaults with all scalar field types.
+ m := &Defaults{
+ // NaN != NaN, so override that here.
+ F_Nan: Float32(1.7),
+ }
+ expected := &Defaults{
+ F_Bool: Bool(true),
+ F_Int32: Int32(32),
+ F_Int64: Int64(64),
+ F_Fixed32: Uint32(320),
+ F_Fixed64: Uint64(640),
+ F_Uint32: Uint32(3200),
+ F_Uint64: Uint64(6400),
+ F_Float: Float32(314159),
+ F_Double: Float64(271828),
+ F_String: String(`hello, "world!"` + "\n"),
+ F_Bytes: []byte("Bignose"),
+ F_Sint32: Int32(-32),
+ F_Sint64: Int64(-64),
+ F_Enum: Defaults_GREEN.Enum(),
+ F_Pinf: Float32(float32(math.Inf(1))),
+ F_Ninf: Float32(float32(math.Inf(-1))),
+ F_Nan: Float32(1.7),
+ StrZero: String(""),
+ }
+ SetDefaults(m)
+ if !Equal(m, expected) {
+ t.Errorf("SetDefaults failed\n got %v\nwant %v", m, expected)
+ }
+}
+
+func TestSetDefaultsWithSetField(t *testing.T) {
+ // Check that a set value is not overridden.
+ m := &Defaults{
+ F_Int32: Int32(12),
+ }
+ SetDefaults(m)
+ if v := m.GetF_Int32(); v != 12 {
+ t.Errorf("m.FInt32 = %v, want 12", v)
+ }
+}
+
+func TestSetDefaultsWithSubMessage(t *testing.T) {
+ m := &OtherMessage{
+ Key: Int64(123),
+ Inner: &InnerMessage{
+ Host: String("gopher"),
+ },
+ }
+ expected := &OtherMessage{
+ Key: Int64(123),
+ Inner: &InnerMessage{
+ Host: String("gopher"),
+ Port: Int32(4000),
+ },
+ }
+ SetDefaults(m)
+ if !Equal(m, expected) {
+ t.Errorf("\n got %v\nwant %v", m, expected)
+ }
+}
+
+func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) {
+ m := &MyMessage{
+ RepInner: []*InnerMessage{{}},
+ }
+ expected := &MyMessage{
+ RepInner: []*InnerMessage{{
+ Port: Int32(4000),
+ }},
+ }
+ SetDefaults(m)
+ if !Equal(m, expected) {
+ t.Errorf("\n got %v\nwant %v", m, expected)
+ }
+}
+
+func TestSetDefaultWithRepeatedNonMessage(t *testing.T) {
+ m := &MyMessage{
+ Pet: []string{"turtle", "wombat"},
+ }
+ expected := Clone(m)
+ SetDefaults(m)
+ if !Equal(m, expected) {
+ t.Errorf("\n got %v\nwant %v", m, expected)
+ }
+}
+
+func TestMaximumTagNumber(t *testing.T) {
+ m := &MaxTag{
+ LastField: String("natural goat essence"),
+ }
+ buf, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("proto.Marshal failed: %v", err)
+ }
+ m2 := new(MaxTag)
+ if err := Unmarshal(buf, m2); err != nil {
+ t.Fatalf("proto.Unmarshal failed: %v", err)
+ }
+ if got, want := m2.GetLastField(), *m.LastField; got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+}
+
+func TestJSON(t *testing.T) {
+ m := &MyMessage{
+ Count: Int32(4),
+ Pet: []string{"bunny", "kitty"},
+ Inner: &InnerMessage{
+ Host: String("cauchy"),
+ },
+ Bikeshed: MyMessage_GREEN.Enum(),
+ }
+ const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}`
+
+ b, err := json.Marshal(m)
+ if err != nil {
+ t.Fatalf("json.Marshal failed: %v", err)
+ }
+ s := string(b)
+ if s != expected {
+ t.Errorf("got %s\nwant %s", s, expected)
+ }
+
+ received := new(MyMessage)
+ if err := json.Unmarshal(b, received); err != nil {
+ t.Fatalf("json.Unmarshal failed: %v", err)
+ }
+ if !Equal(received, m) {
+ t.Fatalf("got %s, want %s", received, m)
+ }
+
+ // Test unmarshalling of JSON with symbolic enum name.
+ const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}`
+ received.Reset()
+ if err := json.Unmarshal([]byte(old), received); err != nil {
+ t.Fatalf("json.Unmarshal failed: %v", err)
+ }
+ if !Equal(received, m) {
+ t.Fatalf("got %s, want %s", received, m)
+ }
+}
+
+func TestBadWireType(t *testing.T) {
+ b := []byte{7<<3 | 6} // field 7, wire type 6
+ pb := new(OtherMessage)
+ if err := Unmarshal(b, pb); err == nil {
+ t.Errorf("Unmarshal did not fail")
+ } else if !strings.Contains(err.Error(), "unknown wire type") {
+ t.Errorf("wrong error: %v", err)
+ }
+}
+
+func TestBytesWithInvalidLength(t *testing.T) {
+ // If a byte sequence has an invalid (negative) length, Unmarshal should not panic.
+ b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0}
+ Unmarshal(b, new(MyMessage))
+}
+
+func TestLengthOverflow(t *testing.T) {
+ // Overflowing a length should not panic.
+ b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01}
+ Unmarshal(b, new(MyMessage))
+}
+
+func TestVarintOverflow(t *testing.T) {
+ // Overflowing a 64-bit length should not be allowed.
+ b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01}
+ if err := Unmarshal(b, new(MyMessage)); err == nil {
+ t.Fatalf("Overflowed uint64 length without error")
+ }
+}
+
+func TestUnmarshalFuzz(t *testing.T) {
+ const N = 1000
+ seed := time.Now().UnixNano()
+ t.Logf("RNG seed is %d", seed)
+ rng := rand.New(rand.NewSource(seed))
+ buf := make([]byte, 20)
+ for i := 0; i < N; i++ {
+ for j := range buf {
+ buf[j] = byte(rng.Intn(256))
+ }
+ fuzzUnmarshal(t, buf)
+ }
+}
+
+func TestMergeMessages(t *testing.T) {
+ pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}}
+ data, err := Marshal(pb)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+
+ pb1 := new(MessageList)
+ if err := Unmarshal(data, pb1); err != nil {
+ t.Fatalf("first Unmarshal: %v", err)
+ }
+ if err := Unmarshal(data, pb1); err != nil {
+ t.Fatalf("second Unmarshal: %v", err)
+ }
+ if len(pb1.Message) != 1 {
+ t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message))
+ }
+
+ pb2 := new(MessageList)
+ if err := UnmarshalMerge(data, pb2); err != nil {
+ t.Fatalf("first UnmarshalMerge: %v", err)
+ }
+ if err := UnmarshalMerge(data, pb2); err != nil {
+ t.Fatalf("second UnmarshalMerge: %v", err)
+ }
+ if len(pb2.Message) != 2 {
+ t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message))
+ }
+}
+
+func TestExtensionMarshalOrder(t *testing.T) {
+ m := &MyMessage{Count: Int(123)}
+ if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil {
+ t.Fatalf("SetExtension: %v", err)
+ }
+ if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil {
+ t.Fatalf("SetExtension: %v", err)
+ }
+ if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil {
+ t.Fatalf("SetExtension: %v", err)
+ }
+
+ // Serialize m several times, and check we get the same bytes each time.
+ var orig []byte
+ for i := 0; i < 100; i++ {
+ b, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if i == 0 {
+ orig = b
+ continue
+ }
+ if !bytes.Equal(b, orig) {
+ t.Errorf("Bytes differ on attempt #%d", i)
+ }
+ }
+}
+
+// Many extensions, because small maps might not iterate differently on each iteration.
+var exts = []*ExtensionDesc{
+ E_X201,
+ E_X202,
+ E_X203,
+ E_X204,
+ E_X205,
+ E_X206,
+ E_X207,
+ E_X208,
+ E_X209,
+ E_X210,
+ E_X211,
+ E_X212,
+ E_X213,
+ E_X214,
+ E_X215,
+ E_X216,
+ E_X217,
+ E_X218,
+ E_X219,
+ E_X220,
+ E_X221,
+ E_X222,
+ E_X223,
+ E_X224,
+ E_X225,
+ E_X226,
+ E_X227,
+ E_X228,
+ E_X229,
+ E_X230,
+ E_X231,
+ E_X232,
+ E_X233,
+ E_X234,
+ E_X235,
+ E_X236,
+ E_X237,
+ E_X238,
+ E_X239,
+ E_X240,
+ E_X241,
+ E_X242,
+ E_X243,
+ E_X244,
+ E_X245,
+ E_X246,
+ E_X247,
+ E_X248,
+ E_X249,
+ E_X250,
+}
+
+func TestMessageSetMarshalOrder(t *testing.T) {
+ m := &MyMessageSet{}
+ for _, x := range exts {
+ if err := SetExtension(m, x, &Empty{}); err != nil {
+ t.Fatalf("SetExtension: %v", err)
+ }
+ }
+
+ buf, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+
+ // Serialize m several times, and check we get the same bytes each time.
+ for i := 0; i < 10; i++ {
+ b1, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if !bytes.Equal(b1, buf) {
+ t.Errorf("Bytes differ on re-Marshal #%d", i)
+ }
+
+ m2 := &MyMessageSet{}
+ if err := Unmarshal(buf, m2); err != nil {
+ t.Errorf("Unmarshal: %v", err)
+ }
+ b2, err := Marshal(m2)
+ if err != nil {
+ t.Errorf("re-Marshal: %v", err)
+ }
+ if !bytes.Equal(b2, buf) {
+ t.Errorf("Bytes differ on round-trip #%d", i)
+ }
+ }
+}
+
+func TestUnmarshalMergesMessages(t *testing.T) {
+ // If a nested message occurs twice in the input,
+ // the fields should be merged when decoding.
+ a := &OtherMessage{
+ Key: Int64(123),
+ Inner: &InnerMessage{
+ Host: String("polhode"),
+ Port: Int32(1234),
+ },
+ }
+ aData, err := Marshal(a)
+ if err != nil {
+ t.Fatalf("Marshal(a): %v", err)
+ }
+ b := &OtherMessage{
+ Weight: Float32(1.2),
+ Inner: &InnerMessage{
+ Host: String("herpolhode"),
+ Connected: Bool(true),
+ },
+ }
+ bData, err := Marshal(b)
+ if err != nil {
+ t.Fatalf("Marshal(b): %v", err)
+ }
+ want := &OtherMessage{
+ Key: Int64(123),
+ Weight: Float32(1.2),
+ Inner: &InnerMessage{
+ Host: String("herpolhode"),
+ Port: Int32(1234),
+ Connected: Bool(true),
+ },
+ }
+ got := new(OtherMessage)
+ if err := Unmarshal(append(aData, bData...), got); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if !Equal(got, want) {
+ t.Errorf("\n got %v\nwant %v", got, want)
+ }
+}
+
+func TestEncodingSizes(t *testing.T) {
+ tests := []struct {
+ m Message
+ n int
+ }{
+ {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6},
+ {&Defaults{F_Int32: Int32(math.MinInt32)}, 11},
+ {&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6},
+ {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6},
+ }
+ for _, test := range tests {
+ b, err := Marshal(test.m)
+ if err != nil {
+ t.Errorf("Marshal(%v): %v", test.m, err)
+ continue
+ }
+ if len(b) != test.n {
+ t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n)
+ }
+ }
+}
+
+func TestRequiredNotSetError(t *testing.T) {
+ pb := initGoTest(false)
+ pb.RequiredField.Label = nil
+ pb.F_Int32Required = nil
+ pb.F_Int64Required = nil
+
+ expected := "0807" + // field 1, encoding 0, value 7
+ "2206" + "120474797065" + // field 4, encoding 2 (GoTestField)
+ "5001" + // field 10, encoding 0, value 1
+ "6d20000000" + // field 13, encoding 5, value 0x20
+ "714000000000000000" + // field 14, encoding 1, value 0x40
+ "78a019" + // field 15, encoding 0, value 0xca0 = 3232
+ "8001c032" + // field 16, encoding 0, value 0x1940 = 6464
+ "8d0100004a45" + // field 17, encoding 5, value 3232.0
+ "9101000000000040b940" + // field 18, encoding 1, value 6464.0
+ "9a0106" + "737472696e67" + // field 19, encoding 2, string "string"
+ "b304" + // field 70, encoding 3, start group
+ "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required"
+ "b404" + // field 70, encoding 4, end group
+ "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes"
+ "b0063f" + // field 102, encoding 0, 0x3f zigzag32
+ "b8067f" // field 103, encoding 0, 0x7f zigzag64
+
+ o := old()
+ bytes, err := Marshal(pb)
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err)
+ o.DebugPrint("", bytes)
+ t.Fatalf("expected = %s", expected)
+ }
+ if strings.Index(err.Error(), "RequiredField.Label") < 0 {
+ t.Errorf("marshal-1 wrong err msg: %v", err)
+ }
+ if !equal(bytes, expected, t) {
+ o.DebugPrint("neq 1", bytes)
+ t.Fatalf("expected = %s", expected)
+ }
+
+ // Now test Unmarshal by recreating the original buffer.
+ pbd := new(GoTest)
+ err = Unmarshal(bytes, pbd)
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err)
+ o.DebugPrint("", bytes)
+ t.Fatalf("string = %s", expected)
+ }
+ if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 {
+ t.Errorf("unmarshal wrong err msg: %v", err)
+ }
+ bytes, err = Marshal(pbd)
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err)
+ o.DebugPrint("", bytes)
+ t.Fatalf("string = %s", expected)
+ }
+ if strings.Index(err.Error(), "RequiredField.Label") < 0 {
+ t.Errorf("marshal-2 wrong err msg: %v", err)
+ }
+ if !equal(bytes, expected, t) {
+ o.DebugPrint("neq 2", bytes)
+ t.Fatalf("string = %s", expected)
+ }
+}
+
+func fuzzUnmarshal(t *testing.T, data []byte) {
+ defer func() {
+ if e := recover(); e != nil {
+ t.Errorf("These bytes caused a panic: %+v", data)
+ t.Logf("Stack:\n%s", debug.Stack())
+ t.FailNow()
+ }
+ }()
+
+ pb := new(MyMessage)
+ Unmarshal(data, pb)
+}
+
+func TestMapFieldMarshal(t *testing.T) {
+ m := &MessageWithMap{
+ NameMapping: map[int32]string{
+ 1: "Rob",
+ 4: "Ian",
+ 8: "Dave",
+ },
+ }
+ b, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+
+ // b should be the concatenation of these three byte sequences in some order.
+ parts := []string{
+ "\n\a\b\x01\x12\x03Rob",
+ "\n\a\b\x04\x12\x03Ian",
+ "\n\b\b\x08\x12\x04Dave",
+ }
+ ok := false
+ for i := range parts {
+ for j := range parts {
+ if j == i {
+ continue
+ }
+ for k := range parts {
+ if k == i || k == j {
+ continue
+ }
+ try := parts[i] + parts[j] + parts[k]
+ if bytes.Equal(b, []byte(try)) {
+ ok = true
+ break
+ }
+ }
+ }
+ }
+ if !ok {
+ t.Fatalf("Incorrect Marshal output.\n got %q\nwant %q (or a permutation of that)", b, parts[0]+parts[1]+parts[2])
+ }
+ t.Logf("FYI b: %q", b)
+
+ (new(Buffer)).DebugPrint("Dump of b", b)
+}
+
+func TestMapFieldRoundTrips(t *testing.T) {
+ m := &MessageWithMap{
+ NameMapping: map[int32]string{
+ 1: "Rob",
+ 4: "Ian",
+ 8: "Dave",
+ },
+ MsgMapping: map[int64]*FloatingPoint{
+ 0x7001: &FloatingPoint{F: Float64(2.0)},
+ },
+ ByteMapping: map[bool][]byte{
+ false: []byte("that's not right!"),
+ true: []byte("aye, 'tis true!"),
+ },
+ }
+ b, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ t.Logf("FYI b: %q", b)
+ m2 := new(MessageWithMap)
+ if err := Unmarshal(b, m2); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ for _, pair := range [][2]interface{}{
+ {m.NameMapping, m2.NameMapping},
+ {m.MsgMapping, m2.MsgMapping},
+ {m.ByteMapping, m2.ByteMapping},
+ } {
+ if !reflect.DeepEqual(pair[0], pair[1]) {
+ t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", pair[0], pair[1])
+ }
+ }
+}
+
+func TestMapFieldWithNil(t *testing.T) {
+ m1 := &MessageWithMap{
+ MsgMapping: map[int64]*FloatingPoint{
+ 1: nil,
+ },
+ }
+ b, err := Marshal(m1)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ m2 := new(MessageWithMap)
+ if err := Unmarshal(b, m2); err != nil {
+ t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b)
+ }
+ if v, ok := m2.MsgMapping[1]; !ok {
+ t.Error("msg_mapping[1] not present")
+ } else if v != nil {
+ t.Errorf("msg_mapping[1] not nil: %v", v)
+ }
+}
+
+func TestMapFieldWithNilBytes(t *testing.T) {
+ m1 := &MessageWithMap{
+ ByteMapping: map[bool][]byte{
+ false: []byte{},
+ true: nil,
+ },
+ }
+ n := Size(m1)
+ b, err := Marshal(m1)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if n != len(b) {
+ t.Errorf("Size(m1) = %d; want len(Marshal(m1)) = %d", n, len(b))
+ }
+ m2 := new(MessageWithMap)
+ if err := Unmarshal(b, m2); err != nil {
+ t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b)
+ }
+ if v, ok := m2.ByteMapping[false]; !ok {
+ t.Error("byte_mapping[false] not present")
+ } else if len(v) != 0 {
+ t.Errorf("byte_mapping[false] not empty: %#v", v)
+ }
+ if v, ok := m2.ByteMapping[true]; !ok {
+ t.Error("byte_mapping[true] not present")
+ } else if len(v) != 0 {
+ t.Errorf("byte_mapping[true] not empty: %#v", v)
+ }
+}
+
+func TestDecodeMapFieldMissingKey(t *testing.T) {
+ b := []byte{
+ 0x0A, 0x03, // message, tag 1 (name_mapping), of length 3 bytes
+ // no key
+ 0x12, 0x01, 0x6D, // string value of length 1 byte, value "m"
+ }
+ got := &MessageWithMap{}
+ err := Unmarshal(b, got)
+ if err != nil {
+ t.Fatalf("failed to marshal map with missing key: %v", err)
+ }
+ want := &MessageWithMap{NameMapping: map[int32]string{0: "m"}}
+ if !Equal(got, want) {
+ t.Errorf("Unmarshaled map with no key was not as expected. got: %v, want %v", got, want)
+ }
+}
+
+func TestDecodeMapFieldMissingValue(t *testing.T) {
+ b := []byte{
+ 0x0A, 0x02, // message, tag 1 (name_mapping), of length 2 bytes
+ 0x08, 0x01, // varint key, value 1
+ // no value
+ }
+ got := &MessageWithMap{}
+ err := Unmarshal(b, got)
+ if err != nil {
+ t.Fatalf("failed to marshal map with missing value: %v", err)
+ }
+ want := &MessageWithMap{NameMapping: map[int32]string{1: ""}}
+ if !Equal(got, want) {
+ t.Errorf("Unmarshaled map with no value was not as expected. got: %v, want %v", got, want)
+ }
+}
+
+func TestOneof(t *testing.T) {
+ m := &Communique{}
+ b, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal of empty message with oneof: %v", err)
+ }
+ if len(b) != 0 {
+ t.Errorf("Marshal of empty message yielded too many bytes: %v", b)
+ }
+
+ m = &Communique{
+ Union: &Communique_Name{"Barry"},
+ }
+
+ // Round-trip.
+ b, err = Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal of message with oneof: %v", err)
+ }
+ if len(b) != 7 { // name tag/wire (1) + name len (1) + name (5)
+ t.Errorf("Incorrect marshal of message with oneof: %v", b)
+ }
+ m.Reset()
+ if err := Unmarshal(b, m); err != nil {
+ t.Fatalf("Unmarshal of message with oneof: %v", err)
+ }
+ if x, ok := m.Union.(*Communique_Name); !ok || x.Name != "Barry" {
+ t.Errorf("After round trip, Union = %+v", m.Union)
+ }
+ if name := m.GetName(); name != "Barry" {
+ t.Errorf("After round trip, GetName = %q, want %q", name, "Barry")
+ }
+
+ // Let's try with a message in the oneof.
+ m.Union = &Communique_Msg{&Strings{StringField: String("deep deep string")}}
+ b, err = Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal of message with oneof set to message: %v", err)
+ }
+ if len(b) != 20 { // msg tag/wire (1) + msg len (1) + msg (1 + 1 + 16)
+ t.Errorf("Incorrect marshal of message with oneof set to message: %v", b)
+ }
+ m.Reset()
+ if err := Unmarshal(b, m); err != nil {
+ t.Fatalf("Unmarshal of message with oneof set to message: %v", err)
+ }
+ ss, ok := m.Union.(*Communique_Msg)
+ if !ok || ss.Msg.GetStringField() != "deep deep string" {
+ t.Errorf("After round trip with oneof set to message, Union = %+v", m.Union)
+ }
+}
+
+func TestInefficientPackedBool(t *testing.T) {
+ // https://github.com/golang/protobuf/issues/76
+ inp := []byte{
+ 0x12, 0x02, // 0x12 = 2<<3|2; 2 bytes
+ // Usually a bool should take a single byte,
+ // but it is permitted to be any varint.
+ 0xb9, 0x30,
+ }
+ if err := Unmarshal(inp, new(MoreRepeated)); err != nil {
+ t.Error(err)
+ }
+}
+
+// Benchmarks
+
+func testMsg() *GoTest {
+ pb := initGoTest(true)
+ const N = 1000 // Internally the library starts much smaller.
+ pb.F_Int32Repeated = make([]int32, N)
+ pb.F_DoubleRepeated = make([]float64, N)
+ for i := 0; i < N; i++ {
+ pb.F_Int32Repeated[i] = int32(i)
+ pb.F_DoubleRepeated[i] = float64(i)
+ }
+ return pb
+}
+
+func bytesMsg() *GoTest {
+ pb := initGoTest(true)
+ buf := make([]byte, 4000)
+ for i := range buf {
+ buf[i] = byte(i)
+ }
+ pb.F_BytesDefaulted = buf
+ return pb
+}
+
+func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) {
+ d, _ := marshal(pb)
+ b.SetBytes(int64(len(d)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ marshal(pb)
+ }
+}
+
+func benchmarkBufferMarshal(b *testing.B, pb Message) {
+ p := NewBuffer(nil)
+ benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) {
+ p.Reset()
+ err := p.Marshal(pb0)
+ return p.Bytes(), err
+ })
+}
+
+func benchmarkSize(b *testing.B, pb Message) {
+ benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) {
+ Size(pb)
+ return nil, nil
+ })
+}
+
+func newOf(pb Message) Message {
+ in := reflect.ValueOf(pb)
+ if in.IsNil() {
+ return pb
+ }
+ return reflect.New(in.Type().Elem()).Interface().(Message)
+}
+
+func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) {
+ d, _ := Marshal(pb)
+ b.SetBytes(int64(len(d)))
+ pbd := newOf(pb)
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ unmarshal(d, pbd)
+ }
+}
+
+func benchmarkBufferUnmarshal(b *testing.B, pb Message) {
+ p := NewBuffer(nil)
+ benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error {
+ p.SetBuf(d)
+ return p.Unmarshal(pb0)
+ })
+}
+
+// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes}
+
+func BenchmarkMarshal(b *testing.B) {
+ benchmarkMarshal(b, testMsg(), Marshal)
+}
+
+func BenchmarkBufferMarshal(b *testing.B) {
+ benchmarkBufferMarshal(b, testMsg())
+}
+
+func BenchmarkSize(b *testing.B) {
+ benchmarkSize(b, testMsg())
+}
+
+func BenchmarkUnmarshal(b *testing.B) {
+ benchmarkUnmarshal(b, testMsg(), Unmarshal)
+}
+
+func BenchmarkBufferUnmarshal(b *testing.B) {
+ benchmarkBufferUnmarshal(b, testMsg())
+}
+
+func BenchmarkMarshalBytes(b *testing.B) {
+ benchmarkMarshal(b, bytesMsg(), Marshal)
+}
+
+func BenchmarkBufferMarshalBytes(b *testing.B) {
+ benchmarkBufferMarshal(b, bytesMsg())
+}
+
+func BenchmarkSizeBytes(b *testing.B) {
+ benchmarkSize(b, bytesMsg())
+}
+
+func BenchmarkUnmarshalBytes(b *testing.B) {
+ benchmarkUnmarshal(b, bytesMsg(), Unmarshal)
+}
+
+func BenchmarkBufferUnmarshalBytes(b *testing.B) {
+ benchmarkBufferUnmarshal(b, bytesMsg())
+}
+
+func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) {
+ b.StopTimer()
+ pb := initGoTestField()
+ skip := &GoSkipTest{
+ SkipInt32: Int32(32),
+ SkipFixed32: Uint32(3232),
+ SkipFixed64: Uint64(6464),
+ SkipString: String("skipper"),
+ Skipgroup: &GoSkipTest_SkipGroup{
+ GroupInt32: Int32(75),
+ GroupString: String("wxyz"),
+ },
+ }
+
+ pbd := new(GoTestField)
+ p := NewBuffer(nil)
+ p.Marshal(pb)
+ p.Marshal(skip)
+ p2 := NewBuffer(nil)
+
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ p2.SetBuf(p.Bytes())
+ p2.Unmarshal(pbd)
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/any_test.go b/vendor/github.com/golang/protobuf/proto/any_test.go
new file mode 100644
index 0000000..1a3c22e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/any_test.go
@@ -0,0 +1,300 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ pb "github.com/golang/protobuf/proto/proto3_proto"
+ testpb "github.com/golang/protobuf/proto/testdata"
+ anypb "github.com/golang/protobuf/ptypes/any"
+)
+
+var (
+ expandedMarshaler = proto.TextMarshaler{ExpandAny: true}
+ expandedCompactMarshaler = proto.TextMarshaler{Compact: true, ExpandAny: true}
+)
+
+// anyEqual reports whether two messages which may be google.protobuf.Any or may
+// contain google.protobuf.Any fields are equal. We can't use proto.Equal for
+// comparison, because semantically equivalent messages may be marshaled to
+// binary in different tag order. Instead, trust that TextMarshaler with
+// ExpandAny option works and compare the text marshaling results.
+func anyEqual(got, want proto.Message) bool {
+ // if messages are proto.Equal, no need to marshal.
+ if proto.Equal(got, want) {
+ return true
+ }
+ g := expandedMarshaler.Text(got)
+ w := expandedMarshaler.Text(want)
+ return g == w
+}
+
+type golden struct {
+ m proto.Message
+ t, c string
+}
+
+var goldenMessages = makeGolden()
+
+func makeGolden() []golden {
+ nested := &pb.Nested{Bunny: "Monty"}
+ nb, err := proto.Marshal(nested)
+ if err != nil {
+ panic(err)
+ }
+ m1 := &pb.Message{
+ Name: "David",
+ ResultCount: 47,
+ Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb},
+ }
+ m2 := &pb.Message{
+ Name: "David",
+ ResultCount: 47,
+ Anything: &anypb.Any{TypeUrl: "http://[::1]/type.googleapis.com/" + proto.MessageName(nested), Value: nb},
+ }
+ m3 := &pb.Message{
+ Name: "David",
+ ResultCount: 47,
+ Anything: &anypb.Any{TypeUrl: `type.googleapis.com/"/` + proto.MessageName(nested), Value: nb},
+ }
+ m4 := &pb.Message{
+ Name: "David",
+ ResultCount: 47,
+ Anything: &anypb.Any{TypeUrl: "type.googleapis.com/a/path/" + proto.MessageName(nested), Value: nb},
+ }
+ m5 := &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb}
+
+ any1 := &testpb.MyMessage{Count: proto.Int32(47), Name: proto.String("David")}
+ proto.SetExtension(any1, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("foo")})
+ proto.SetExtension(any1, testpb.E_Ext_Text, proto.String("bar"))
+ any1b, err := proto.Marshal(any1)
+ if err != nil {
+ panic(err)
+ }
+ any2 := &testpb.MyMessage{Count: proto.Int32(42), Bikeshed: testpb.MyMessage_GREEN.Enum(), RepBytes: [][]byte{[]byte("roboto")}}
+ proto.SetExtension(any2, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("baz")})
+ any2b, err := proto.Marshal(any2)
+ if err != nil {
+ panic(err)
+ }
+ m6 := &pb.Message{
+ Name: "David",
+ ResultCount: 47,
+ Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b},
+ ManyThings: []*anypb.Any{
+ &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any2), Value: any2b},
+ &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b},
+ },
+ }
+
+ const (
+ m1Golden = `
+name: "David"
+result_count: 47
+anything: <
+ [type.googleapis.com/proto3_proto.Nested]: <
+ bunny: "Monty"
+ >
+>
+`
+ m2Golden = `
+name: "David"
+result_count: 47
+anything: <
+ ["http://[::1]/type.googleapis.com/proto3_proto.Nested"]: <
+ bunny: "Monty"
+ >
+>
+`
+ m3Golden = `
+name: "David"
+result_count: 47
+anything: <
+ ["type.googleapis.com/\"/proto3_proto.Nested"]: <
+ bunny: "Monty"
+ >
+>
+`
+ m4Golden = `
+name: "David"
+result_count: 47
+anything: <
+ [type.googleapis.com/a/path/proto3_proto.Nested]: <
+ bunny: "Monty"
+ >
+>
+`
+ m5Golden = `
+[type.googleapis.com/proto3_proto.Nested]: <
+ bunny: "Monty"
+>
+`
+ m6Golden = `
+name: "David"
+result_count: 47
+anything: <
+ [type.googleapis.com/testdata.MyMessage]: <
+ count: 47
+ name: "David"
+ [testdata.Ext.more]: <
+ data: "foo"
+ >
+ [testdata.Ext.text]: "bar"
+ >
+>
+many_things: <
+ [type.googleapis.com/testdata.MyMessage]: <
+ count: 42
+ bikeshed: GREEN
+ rep_bytes: "roboto"
+ [testdata.Ext.more]: <
+ data: "baz"
+ >
+ >
+>
+many_things: <
+ [type.googleapis.com/testdata.MyMessage]: <
+ count: 47
+ name: "David"
+ [testdata.Ext.more]: <
+ data: "foo"
+ >
+ [testdata.Ext.text]: "bar"
+ >
+>
+`
+ )
+ return []golden{
+ {m1, strings.TrimSpace(m1Golden) + "\n", strings.TrimSpace(compact(m1Golden)) + " "},
+ {m2, strings.TrimSpace(m2Golden) + "\n", strings.TrimSpace(compact(m2Golden)) + " "},
+ {m3, strings.TrimSpace(m3Golden) + "\n", strings.TrimSpace(compact(m3Golden)) + " "},
+ {m4, strings.TrimSpace(m4Golden) + "\n", strings.TrimSpace(compact(m4Golden)) + " "},
+ {m5, strings.TrimSpace(m5Golden) + "\n", strings.TrimSpace(compact(m5Golden)) + " "},
+ {m6, strings.TrimSpace(m6Golden) + "\n", strings.TrimSpace(compact(m6Golden)) + " "},
+ }
+}
+
+func TestMarshalGolden(t *testing.T) {
+ for _, tt := range goldenMessages {
+ if got, want := expandedMarshaler.Text(tt.m), tt.t; got != want {
+ t.Errorf("message %v: got:\n%s\nwant:\n%s", tt.m, got, want)
+ }
+ if got, want := expandedCompactMarshaler.Text(tt.m), tt.c; got != want {
+ t.Errorf("message %v: got:\n`%s`\nwant:\n`%s`", tt.m, got, want)
+ }
+ }
+}
+
+func TestUnmarshalGolden(t *testing.T) {
+ for _, tt := range goldenMessages {
+ want := tt.m
+ got := proto.Clone(tt.m)
+ got.Reset()
+ if err := proto.UnmarshalText(tt.t, got); err != nil {
+ t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.t, err)
+ }
+ if !anyEqual(got, want) {
+ t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.t, got, want)
+ }
+ got.Reset()
+ if err := proto.UnmarshalText(tt.c, got); err != nil {
+ t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.c, err)
+ }
+ if !anyEqual(got, want) {
+ t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.c, got, want)
+ }
+ }
+}
+
+func TestMarshalUnknownAny(t *testing.T) {
+ m := &pb.Message{
+ Anything: &anypb.Any{
+ TypeUrl: "foo",
+ Value: []byte("bar"),
+ },
+ }
+ want := `anything: <
+ type_url: "foo"
+ value: "bar"
+>
+`
+ got := expandedMarshaler.Text(m)
+ if got != want {
+ t.Errorf("got\n`%s`\nwant\n`%s`", got, want)
+ }
+}
+
+func TestAmbiguousAny(t *testing.T) {
+ pb := &anypb.Any{}
+ err := proto.UnmarshalText(`
+ type_url: "ttt/proto3_proto.Nested"
+ value: "\n\x05Monty"
+ `, pb)
+ t.Logf("result: %v (error: %v)", expandedMarshaler.Text(pb), err)
+ if err != nil {
+ t.Errorf("failed to parse ambiguous Any message: %v", err)
+ }
+}
+
+func TestUnmarshalOverwriteAny(t *testing.T) {
+ pb := &anypb.Any{}
+ err := proto.UnmarshalText(`
+ [type.googleapis.com/a/path/proto3_proto.Nested]: <
+ bunny: "Monty"
+ >
+ [type.googleapis.com/a/path/proto3_proto.Nested]: <
+ bunny: "Rabbit of Caerbannog"
+ >
+ `, pb)
+ want := `line 7: Any message unpacked multiple times, or "type_url" already set`
+ if err.Error() != want {
+ t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want)
+ }
+}
+
+func TestUnmarshalAnyMixAndMatch(t *testing.T) {
+ pb := &anypb.Any{}
+ err := proto.UnmarshalText(`
+ value: "\n\x05Monty"
+ [type.googleapis.com/a/path/proto3_proto.Nested]: <
+ bunny: "Rabbit of Caerbannog"
+ >
+ `, pb)
+ want := `line 5: Any message unpacked multiple times, or "value" already set`
+ if err.Error() != want {
+ t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want)
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 0000000..e392575
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,229 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(pb Message) Message {
+ in := reflect.ValueOf(pb)
+ if in.IsNil() {
+ return pb
+ }
+
+ out := reflect.New(in.Type().Elem())
+ // out is empty so a merge is a deep copy.
+ mergeStruct(out.Elem(), in.Elem())
+ return out.Interface().(Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+ in := reflect.ValueOf(src)
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ panic("proto: nil destination")
+ }
+ if in.Type() != out.Type() {
+ // Explicit test prior to mergeStruct so that mistyped nils will fail
+ panic("proto: type mismatch")
+ }
+ if in.IsNil() {
+ // Merging nil into non-nil is a quiet no-op
+ return
+ }
+ mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+ sprop := GetProperties(in.Type())
+ for i := 0; i < in.NumField(); i++ {
+ f := in.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+ }
+
+ if emIn, ok := extendable(in.Addr().Interface()); ok {
+ emOut, _ := extendable(out.Addr().Interface())
+ mIn, muIn := emIn.extensionsRead()
+ if mIn != nil {
+ mOut := emOut.extensionsWrite()
+ muIn.Lock()
+ mergeExtension(mOut, mIn)
+ muIn.Unlock()
+ }
+ }
+
+ uf := in.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return
+ }
+ uin := uf.Bytes()
+ if len(uin) > 0 {
+ out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+ }
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+ if in.Type() == protoMessageType {
+ if !in.IsNil() {
+ if out.IsNil() {
+ out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+ } else {
+ Merge(out.Interface().(Message), in.Interface().(Message))
+ }
+ }
+ return
+ }
+ switch in.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ if !viaPtr && isProto3Zero(in) {
+ return
+ }
+ out.Set(in)
+ case reflect.Interface:
+ // Probably a oneof field; copy non-nil values.
+ if in.IsNil() {
+ return
+ }
+ // Allocate destination if it is not set, or set to a different type.
+ // Otherwise we will merge as normal.
+ if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+ out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+ }
+ mergeAny(out.Elem(), in.Elem(), false, nil)
+ case reflect.Map:
+ if in.Len() == 0 {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(in.Type()))
+ }
+ // For maps with value types of *T or []byte we need to deep copy each value.
+ elemKind := in.Type().Elem().Kind()
+ for _, key := range in.MapKeys() {
+ var val reflect.Value
+ switch elemKind {
+ case reflect.Ptr:
+ val = reflect.New(in.Type().Elem().Elem())
+ mergeAny(val, in.MapIndex(key), false, nil)
+ case reflect.Slice:
+ val = in.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ default:
+ val = in.MapIndex(key)
+ }
+ out.SetMapIndex(key, val)
+ }
+ case reflect.Ptr:
+ if in.IsNil() {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.New(in.Elem().Type()))
+ }
+ mergeAny(out.Elem(), in.Elem(), true, nil)
+ case reflect.Slice:
+ if in.IsNil() {
+ return
+ }
+ if in.Type().Elem().Kind() == reflect.Uint8 {
+ // []byte is a scalar bytes field, not a repeated field.
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value, and should not
+ // be merged.
+ if prop != nil && prop.proto3 && in.Len() == 0 {
+ return
+ }
+
+ // Make a deep copy.
+ // Append to []byte{} instead of []byte(nil) so that we never end up
+ // with a nil result.
+ out.SetBytes(append([]byte{}, in.Bytes()...))
+ return
+ }
+ n := in.Len()
+ if out.IsNil() {
+ out.Set(reflect.MakeSlice(in.Type(), 0, n))
+ }
+ switch in.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(reflect.AppendSlice(out, in))
+ default:
+ for i := 0; i < n; i++ {
+ x := reflect.Indirect(reflect.New(in.Type().Elem()))
+ mergeAny(x, in.Index(i), false, nil)
+ out.Set(reflect.Append(out, x))
+ }
+ }
+ case reflect.Struct:
+ mergeStruct(out, in)
+ default:
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to copy %v", in)
+ }
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+ for extNum, eIn := range in {
+ eOut := Extension{desc: eIn.desc}
+ if eIn.value != nil {
+ v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+ mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+ eOut.value = v.Interface()
+ }
+ if eIn.enc != nil {
+ eOut.enc = make([]byte, len(eIn.enc))
+ copy(eOut.enc, eIn.enc)
+ }
+
+ out[extNum] = eOut
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/clone_test.go b/vendor/github.com/golang/protobuf/proto/clone_test.go
new file mode 100644
index 0000000..f607ff4
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/clone_test.go
@@ -0,0 +1,300 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ proto3pb "github.com/golang/protobuf/proto/proto3_proto"
+ pb "github.com/golang/protobuf/proto/testdata"
+)
+
+var cloneTestMessage = &pb.MyMessage{
+ Count: proto.Int32(42),
+ Name: proto.String("Dave"),
+ Pet: []string{"bunny", "kitty", "horsey"},
+ Inner: &pb.InnerMessage{
+ Host: proto.String("niles"),
+ Port: proto.Int32(9099),
+ Connected: proto.Bool(true),
+ },
+ Others: []*pb.OtherMessage{
+ {
+ Value: []byte("some bytes"),
+ },
+ },
+ Somegroup: &pb.MyMessage_SomeGroup{
+ GroupField: proto.Int32(6),
+ },
+ RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
+}
+
+func init() {
+ ext := &pb.Ext{
+ Data: proto.String("extension"),
+ }
+ if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil {
+ panic("SetExtension: " + err.Error())
+ }
+}
+
+func TestClone(t *testing.T) {
+ m := proto.Clone(cloneTestMessage).(*pb.MyMessage)
+ if !proto.Equal(m, cloneTestMessage) {
+ t.Errorf("Clone(%v) = %v", cloneTestMessage, m)
+ }
+
+ // Verify it was a deep copy.
+ *m.Inner.Port++
+ if proto.Equal(m, cloneTestMessage) {
+ t.Error("Mutating clone changed the original")
+ }
+ // Byte fields and repeated fields should be copied.
+ if &m.Pet[0] == &cloneTestMessage.Pet[0] {
+ t.Error("Pet: repeated field not copied")
+ }
+ if &m.Others[0] == &cloneTestMessage.Others[0] {
+ t.Error("Others: repeated field not copied")
+ }
+ if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] {
+ t.Error("Others[0].Value: bytes field not copied")
+ }
+ if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] {
+ t.Error("RepBytes: repeated field not copied")
+ }
+ if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] {
+ t.Error("RepBytes[0]: bytes field not copied")
+ }
+}
+
+func TestCloneNil(t *testing.T) {
+ var m *pb.MyMessage
+ if c := proto.Clone(m); !proto.Equal(m, c) {
+ t.Errorf("Clone(%v) = %v", m, c)
+ }
+}
+
+var mergeTests = []struct {
+ src, dst, want proto.Message
+}{
+ {
+ src: &pb.MyMessage{
+ Count: proto.Int32(42),
+ },
+ dst: &pb.MyMessage{
+ Name: proto.String("Dave"),
+ },
+ want: &pb.MyMessage{
+ Count: proto.Int32(42),
+ Name: proto.String("Dave"),
+ },
+ },
+ {
+ src: &pb.MyMessage{
+ Inner: &pb.InnerMessage{
+ Host: proto.String("hey"),
+ Connected: proto.Bool(true),
+ },
+ Pet: []string{"horsey"},
+ Others: []*pb.OtherMessage{
+ {
+ Value: []byte("some bytes"),
+ },
+ },
+ },
+ dst: &pb.MyMessage{
+ Inner: &pb.InnerMessage{
+ Host: proto.String("niles"),
+ Port: proto.Int32(9099),
+ },
+ Pet: []string{"bunny", "kitty"},
+ Others: []*pb.OtherMessage{
+ {
+ Key: proto.Int64(31415926535),
+ },
+ {
+ // Explicitly test a src=nil field
+ Inner: nil,
+ },
+ },
+ },
+ want: &pb.MyMessage{
+ Inner: &pb.InnerMessage{
+ Host: proto.String("hey"),
+ Connected: proto.Bool(true),
+ Port: proto.Int32(9099),
+ },
+ Pet: []string{"bunny", "kitty", "horsey"},
+ Others: []*pb.OtherMessage{
+ {
+ Key: proto.Int64(31415926535),
+ },
+ {},
+ {
+ Value: []byte("some bytes"),
+ },
+ },
+ },
+ },
+ {
+ src: &pb.MyMessage{
+ RepBytes: [][]byte{[]byte("wow")},
+ },
+ dst: &pb.MyMessage{
+ Somegroup: &pb.MyMessage_SomeGroup{
+ GroupField: proto.Int32(6),
+ },
+ RepBytes: [][]byte{[]byte("sham")},
+ },
+ want: &pb.MyMessage{
+ Somegroup: &pb.MyMessage_SomeGroup{
+ GroupField: proto.Int32(6),
+ },
+ RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
+ },
+ },
+ // Check that a scalar bytes field replaces rather than appends.
+ {
+ src: &pb.OtherMessage{Value: []byte("foo")},
+ dst: &pb.OtherMessage{Value: []byte("bar")},
+ want: &pb.OtherMessage{Value: []byte("foo")},
+ },
+ {
+ src: &pb.MessageWithMap{
+ NameMapping: map[int32]string{6: "Nigel"},
+ MsgMapping: map[int64]*pb.FloatingPoint{
+ 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)},
+ 0x4002: &pb.FloatingPoint{
+ F: proto.Float64(2.0),
+ },
+ },
+ ByteMapping: map[bool][]byte{true: []byte("wowsa")},
+ },
+ dst: &pb.MessageWithMap{
+ NameMapping: map[int32]string{
+ 6: "Bruce", // should be overwritten
+ 7: "Andrew",
+ },
+ MsgMapping: map[int64]*pb.FloatingPoint{
+ 0x4002: &pb.FloatingPoint{
+ F: proto.Float64(3.0),
+ Exact: proto.Bool(true),
+ }, // the entire message should be overwritten
+ },
+ },
+ want: &pb.MessageWithMap{
+ NameMapping: map[int32]string{
+ 6: "Nigel",
+ 7: "Andrew",
+ },
+ MsgMapping: map[int64]*pb.FloatingPoint{
+ 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)},
+ 0x4002: &pb.FloatingPoint{
+ F: proto.Float64(2.0),
+ },
+ },
+ ByteMapping: map[bool][]byte{true: []byte("wowsa")},
+ },
+ },
+ // proto3 shouldn't merge zero values,
+ // in the same way that proto2 shouldn't merge nils.
+ {
+ src: &proto3pb.Message{
+ Name: "Aaron",
+ Data: []byte(""), // zero value, but not nil
+ },
+ dst: &proto3pb.Message{
+ HeightInCm: 176,
+ Data: []byte("texas!"),
+ },
+ want: &proto3pb.Message{
+ Name: "Aaron",
+ HeightInCm: 176,
+ Data: []byte("texas!"),
+ },
+ },
+ // Oneof fields should merge by assignment.
+ {
+ src: &pb.Communique{
+ Union: &pb.Communique_Number{41},
+ },
+ dst: &pb.Communique{
+ Union: &pb.Communique_Name{"Bobby Tables"},
+ },
+ want: &pb.Communique{
+ Union: &pb.Communique_Number{41},
+ },
+ },
+ // Oneof nil is the same as not set.
+ {
+ src: &pb.Communique{},
+ dst: &pb.Communique{
+ Union: &pb.Communique_Name{"Bobby Tables"},
+ },
+ want: &pb.Communique{
+ Union: &pb.Communique_Name{"Bobby Tables"},
+ },
+ },
+ {
+ src: &proto3pb.Message{
+ Terrain: map[string]*proto3pb.Nested{
+ "kay_a": &proto3pb.Nested{Cute: true}, // replace
+ "kay_b": &proto3pb.Nested{Bunny: "rabbit"}, // insert
+ },
+ },
+ dst: &proto3pb.Message{
+ Terrain: map[string]*proto3pb.Nested{
+ "kay_a": &proto3pb.Nested{Bunny: "lost"}, // replaced
+ "kay_c": &proto3pb.Nested{Bunny: "bunny"}, // keep
+ },
+ },
+ want: &proto3pb.Message{
+ Terrain: map[string]*proto3pb.Nested{
+ "kay_a": &proto3pb.Nested{Cute: true},
+ "kay_b": &proto3pb.Nested{Bunny: "rabbit"},
+ "kay_c": &proto3pb.Nested{Bunny: "bunny"},
+ },
+ },
+ },
+}
+
+func TestMerge(t *testing.T) {
+ for _, m := range mergeTests {
+ got := proto.Clone(m.dst)
+ proto.Merge(got, m.src)
+ if !proto.Equal(got, m.want) {
+ t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want)
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 0000000..aa20729
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,970 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// The fundamental decoders that interpret bytes on the wire.
+// Those that take integer types all return uint64 and are
+// therefore of type valueDecoder.
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+ for shift := uint(0); shift < 64; shift += 7 {
+ if n >= len(buf) {
+ return 0, 0
+ }
+ b := uint64(buf[n])
+ n++
+ x |= (b & 0x7F) << shift
+ if (b & 0x80) == 0 {
+ return x, n
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ return 0, 0
+}
+
+func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
+ i := p.index
+ l := len(p.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := p.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ p.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ i := p.index
+ buf := p.buf
+
+ if i >= len(buf) {
+ return 0, io.ErrUnexpectedEOF
+ } else if buf[i] < 0x80 {
+ p.index++
+ return uint64(buf[i]), nil
+ } else if len(buf)-i < 10 {
+ return p.decodeVarintSlow()
+ }
+
+ var b uint64
+ // we already checked the first byte
+ x = uint64(buf[i]) - 0x80
+ i++
+
+ b = uint64(buf[i])
+ i++
+ x += b << 7
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 7
+
+ b = uint64(buf[i])
+ i++
+ x += b << 14
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 14
+
+ b = uint64(buf[i])
+ i++
+ x += b << 21
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 21
+
+ b = uint64(buf[i])
+ i++
+ x += b << 28
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 28
+
+ b = uint64(buf[i])
+ i++
+ x += b << 35
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 35
+
+ b = uint64(buf[i])
+ i++
+ x += b << 42
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 42
+
+ b = uint64(buf[i])
+ i++
+ x += b << 49
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 49
+
+ b = uint64(buf[i])
+ i++
+ x += b << 56
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 56
+
+ b = uint64(buf[i])
+ i++
+ x += b << 63
+ if b&0x80 == 0 {
+ goto done
+ }
+ // x -= 0x80 << 63 // Always zero.
+
+ return 0, errOverflow
+
+done:
+ p.index = i
+ return x, nil
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 8
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-8])
+ x |= uint64(p.buf[i-7]) << 8
+ x |= uint64(p.buf[i-6]) << 16
+ x |= uint64(p.buf[i-5]) << 24
+ x |= uint64(p.buf[i-4]) << 32
+ x |= uint64(p.buf[i-3]) << 40
+ x |= uint64(p.buf[i-2]) << 48
+ x |= uint64(p.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 4
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-4])
+ x |= uint64(p.buf[i-3]) << 8
+ x |= uint64(p.buf[i-2]) << 16
+ x |= uint64(p.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+ return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+ return
+}
+
+// These are not ValueDecoders: they produce an array of bytes or a string.
+// bytes, embedded messages
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := p.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := p.index + nb
+ if end < p.index || end > len(p.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ // todo: check if can get more uses of alloc=false
+ buf = p.buf[p.index:end]
+ p.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, p.buf[p.index:])
+ p.index += nb
+ return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+ buf, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return
+ }
+ return string(buf), nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+// If the protocol buffer has extensions, and the field matches, add it as an extension.
+// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
+func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
+ oi := o.index
+
+ err := o.skip(t, tag, wire)
+ if err != nil {
+ return err
+ }
+
+ if !unrecField.IsValid() {
+ return nil
+ }
+
+ ptr := structPointer_Bytes(base, unrecField)
+
+ // Add the skipped field to struct field
+ obuf := o.buf
+
+ o.buf = *ptr
+ o.EncodeVarint(uint64(tag<<3 | wire))
+ *ptr = append(o.buf, obuf[oi:o.index]...)
+
+ o.buf = obuf
+
+ return nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
+
+ var u uint64
+ var err error
+
+ switch wire {
+ case WireVarint:
+ _, err = o.DecodeVarint()
+ case WireFixed64:
+ _, err = o.DecodeFixed64()
+ case WireBytes:
+ _, err = o.DecodeRawBytes(false)
+ case WireFixed32:
+ _, err = o.DecodeFixed32()
+ case WireStartGroup:
+ for {
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ fwire := int(u & 0x7)
+ if fwire == WireEndGroup {
+ break
+ }
+ ftag := int(u >> 3)
+ err = o.skip(t, ftag, fwire)
+ if err != nil {
+ break
+ }
+ }
+ default:
+ err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
+ }
+ return err
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves. The method should reset the receiver before
+// decoding starts. The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+ pb.Reset()
+ return UnmarshalMerge(buf, pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+ enc, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+func (p *Buffer) DecodeGroup(pb Message) error {
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+ return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb. If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+//
+// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
+func (p *Buffer) Unmarshal(pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+
+ err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
+
+ if collectStats {
+ stats.Decode++
+ }
+
+ return err
+}
+
+// unmarshalType does the work of unmarshaling a structure.
+func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
+ var state errorState
+ required, reqFields := prop.reqCount, uint64(0)
+
+ var err error
+ for err == nil && o.index < len(o.buf) {
+ oi := o.index
+ var u uint64
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ wire := int(u & 0x7)
+ if wire == WireEndGroup {
+ if is_group {
+ if required > 0 {
+ // Not enough information to determine the exact field.
+ // (See below.)
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ return nil // input is satisfied
+ }
+ return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
+ }
+ tag := int(u >> 3)
+ if tag <= 0 {
+ return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
+ }
+ fieldnum, ok := prop.decoderTags.get(tag)
+ if !ok {
+ // Maybe it's an extension?
+ if prop.extendable {
+ if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
+ if err = o.skip(st, tag, wire); err == nil {
+ extmap := e.extensionsWrite()
+ ext := extmap[int32(tag)] // may be missing
+ ext.enc = append(ext.enc, o.buf[oi:o.index]...)
+ extmap[int32(tag)] = ext
+ }
+ continue
+ }
+ }
+ // Maybe it's a oneof?
+ if prop.oneofUnmarshaler != nil {
+ m := structPointer_Interface(base, st).(Message)
+ // First return value indicates whether tag is a oneof field.
+ ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
+ if err == ErrInternalBadWireType {
+ // Map the error to something more descriptive.
+ // Do the formatting here to save generated code space.
+ err = fmt.Errorf("bad wiretype for oneof field in %T", m)
+ }
+ if ok {
+ continue
+ }
+ }
+ err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
+ continue
+ }
+ p := prop.Prop[fieldnum]
+
+ if p.dec == nil {
+ fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
+ continue
+ }
+ dec := p.dec
+ if wire != WireStartGroup && wire != p.WireType {
+ if wire == WireBytes && p.packedDec != nil {
+ // a packable field
+ dec = p.packedDec
+ } else {
+ err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
+ continue
+ }
+ }
+ decErr := dec(o, p, base)
+ if decErr != nil && !state.shouldContinue(decErr, p) {
+ err = decErr
+ }
+ if err == nil && p.Required {
+ // Successfully decoded a required field.
+ if tag <= 64 {
+ // use bitmap for fields 1-64 to catch field reuse.
+ var mask uint64 = 1 << uint64(tag-1)
+ if reqFields&mask == 0 {
+ // new required field
+ reqFields |= mask
+ required--
+ }
+ } else {
+ // This is imprecise. It can be fooled by a required field
+ // with a tag > 64 that is encoded twice; that's very rare.
+ // A fully correct implementation would require allocating
+ // a data structure, which we would like to avoid.
+ required--
+ }
+ }
+ }
+ if err == nil {
+ if is_group {
+ return io.ErrUnexpectedEOF
+ }
+ if state.err != nil {
+ return state.err
+ }
+ if required > 0 {
+ // Not enough information to determine the exact field. If we use extra
+ // CPU, we could determine the field only if the missing required field
+ // has a tag <= 64 and we check reqFields.
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ }
+ return err
+}
+
+// Individual type decoders
+// For each,
+// u is the decoded value,
+// v is a pointer to the field (pointer) in the struct
+
+// Sizes of the pools to allocate inside the Buffer.
+// The goal is modest amortization and allocation
+// on at least 16-byte boundaries.
+const (
+ boolPoolSize = 16
+ uint32PoolSize = 8
+ uint64PoolSize = 4
+)
+
+// Decode a bool.
+func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ if len(o.bools) == 0 {
+ o.bools = make([]bool, boolPoolSize)
+ }
+ o.bools[0] = u != 0
+ *structPointer_Bool(base, p.field) = &o.bools[0]
+ o.bools = o.bools[1:]
+ return nil
+}
+
+func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ *structPointer_BoolVal(base, p.field) = u != 0
+ return nil
+}
+
+// Decode an int32.
+func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
+ return nil
+}
+
+// Decode an int64.
+func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64_Set(structPointer_Word64(base, p.field), o, u)
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
+ return nil
+}
+
+// Decode a string.
+func (o *Buffer) dec_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_String(base, p.field) = &s
+ return nil
+}
+
+func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_StringVal(base, p.field) = s
+ return nil
+}
+
+// Decode a slice of bytes ([]byte).
+func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ *structPointer_Bytes(base, p.field) = b
+ return nil
+}
+
+// Decode a slice of bools ([]bool).
+func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BoolSlice(base, p.field)
+ *v = append(*v, u != 0)
+ return nil
+}
+
+// Decode a slice of bools ([]bool) in packed format.
+func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
+ v := structPointer_BoolSlice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded bools
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+
+ y := *v
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ y = append(y, u != 0)
+ }
+
+ *v = y
+ return nil
+}
+
+// Decode a slice of int32s ([]int32).
+func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ structPointer_Word32Slice(base, p.field).Append(uint32(u))
+ return nil
+}
+
+// Decode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int32s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(uint32(u))
+ }
+ return nil
+}
+
+// Decode a slice of int64s ([]int64).
+func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+
+ structPointer_Word64Slice(base, p.field).Append(u)
+ return nil
+}
+
+// Decode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int64s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(u)
+ }
+ return nil
+}
+
+// Decode a slice of strings ([]string).
+func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ v := structPointer_StringSlice(base, p.field)
+ *v = append(*v, s)
+ return nil
+}
+
+// Decode a slice of slice of bytes ([][]byte).
+func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BytesSlice(base, p.field)
+ *v = append(*v, b)
+ return nil
+}
+
+// Decode a map field.
+func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ oi := o.index // index at the end of this map entry
+ o.index -= len(raw) // move buffer back to start of map entry
+
+ mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
+ if mptr.Elem().IsNil() {
+ mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
+ }
+ v := mptr.Elem() // map[K]V
+
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // See enc_new_map for why.
+ keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
+ keybase := toStructPointer(keyptr.Addr()) // **K
+
+ var valbase structPointer
+ var valptr reflect.Value
+ switch p.mtype.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valptr = reflect.ValueOf(&dummy) // *[]byte
+ valbase = toStructPointer(valptr) // *[]byte
+ case reflect.Ptr:
+ // message; valptr is **Msg; need to allocate the intermediate pointer
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valptr.Set(reflect.New(valptr.Type().Elem()))
+ valbase = toStructPointer(valptr)
+ default:
+ // everything else
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+
+ // Decode.
+ // This parses a restricted wire format, namely the encoding of a message
+ // with two fields. See enc_new_map for the format.
+ for o.index < oi {
+ // tagcode for key and value properties are always a single byte
+ // because they have tags 1 and 2.
+ tagcode := o.buf[o.index]
+ o.index++
+ switch tagcode {
+ case p.mkeyprop.tagcode[0]:
+ if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ case p.mvalprop.tagcode[0]:
+ if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ default:
+ // TODO: Should we silently skip this instead?
+ return fmt.Errorf("proto: bad map data tag %d", raw[0])
+ }
+ }
+ keyelem, valelem := keyptr.Elem(), valptr.Elem()
+ if !keyelem.IsValid() {
+ keyelem = reflect.Zero(p.mtype.Key())
+ }
+ if !valelem.IsValid() {
+ valelem = reflect.Zero(p.mtype.Elem())
+ }
+
+ v.SetMapIndex(keyelem, valelem)
+ return nil
+}
+
+// Decode a group.
+func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+ return o.unmarshalType(p.stype, p.sprop, true, bas)
+}
+
+// Decode an embedded message.
+func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
+ raw, e := o.DecodeRawBytes(false)
+ if e != nil {
+ return e
+ }
+
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := structPointer_Interface(bas, p.stype)
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, false, bas)
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of embedded messages.
+func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, false, base)
+}
+
+// Decode a slice of embedded groups.
+func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, true, base)
+}
+
+// Decode a slice of structs ([]*struct).
+func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
+ v := reflect.New(p.stype)
+ bas := toStructPointer(v)
+ structPointer_StructPointerSlice(base, p.field).Append(bas)
+
+ if is_group {
+ err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
+ return err
+ }
+
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := v.Interface()
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
+
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
diff --git a/vendor/github.com/golang/protobuf/proto/decode_test.go b/vendor/github.com/golang/protobuf/proto/decode_test.go
new file mode 100644
index 0000000..2c4c31d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/decode_test.go
@@ -0,0 +1,258 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build go1.7
+
+package proto_test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ tpb "github.com/golang/protobuf/proto/proto3_proto"
+)
+
+var (
+ bytesBlackhole []byte
+ msgBlackhole = new(tpb.Message)
+)
+
+// BenchmarkVarint32ArraySmall shows the performance on an array of small int32 fields (1 and
+// 2 bytes long).
+func BenchmarkVarint32ArraySmall(b *testing.B) {
+ for i := uint(1); i <= 10; i++ {
+ dist := genInt32Dist([7]int{0, 3, 1}, 1<<i)
+ raw, err := proto.Marshal(&tpb.Message{
+ ShortKey: dist,
+ })
+ if err != nil {
+ b.Error("wrong encode", err)
+ }
+ b.Run(fmt.Sprintf("Len%v", len(dist)), func(b *testing.B) {
+ scratchBuf := proto.NewBuffer(nil)
+ b.ResetTimer()
+ for k := 0; k < b.N; k++ {
+ scratchBuf.SetBuf(raw)
+ msgBlackhole.Reset()
+ if err := scratchBuf.Unmarshal(msgBlackhole); err != nil {
+ b.Error("wrong decode", err)
+ }
+ }
+ })
+ }
+}
+
+// BenchmarkVarint32ArrayLarge shows the performance on an array of large int32 fields (3 and
+// 4 bytes long, with a small number of 1, 2, 5 and 10 byte long versions).
+func BenchmarkVarint32ArrayLarge(b *testing.B) {
+ for i := uint(1); i <= 10; i++ {
+ dist := genInt32Dist([7]int{0, 1, 2, 4, 8, 1, 1}, 1<<i)
+ raw, err := proto.Marshal(&tpb.Message{
+ ShortKey: dist,
+ })
+ if err != nil {
+ b.Error("wrong encode", err)
+ }
+ b.Run(fmt.Sprintf("Len%v", len(dist)), func(b *testing.B) {
+ scratchBuf := proto.NewBuffer(nil)
+ b.ResetTimer()
+ for k := 0; k < b.N; k++ {
+ scratchBuf.SetBuf(raw)
+ msgBlackhole.Reset()
+ if err := scratchBuf.Unmarshal(msgBlackhole); err != nil {
+ b.Error("wrong decode", err)
+ }
+ }
+ })
+ }
+}
+
+// BenchmarkVarint64ArraySmall shows the performance on an array of small int64 fields (1 and
+// 2 bytes long).
+func BenchmarkVarint64ArraySmall(b *testing.B) {
+ for i := uint(1); i <= 10; i++ {
+ dist := genUint64Dist([11]int{0, 3, 1}, 1<<i)
+ raw, err := proto.Marshal(&tpb.Message{
+ Key: dist,
+ })
+ if err != nil {
+ b.Error("wrong encode", err)
+ }
+ b.Run(fmt.Sprintf("Len%v", len(dist)), func(b *testing.B) {
+ scratchBuf := proto.NewBuffer(nil)
+ b.ResetTimer()
+ for k := 0; k < b.N; k++ {
+ scratchBuf.SetBuf(raw)
+ msgBlackhole.Reset()
+ if err := scratchBuf.Unmarshal(msgBlackhole); err != nil {
+ b.Error("wrong decode", err)
+ }
+ }
+ })
+ }
+}
+
+// BenchmarkVarint64ArrayLarge shows the performance on an array of large int64 fields (6, 7,
+// and 8 bytes long with a small number of the other sizes).
+func BenchmarkVarint64ArrayLarge(b *testing.B) {
+ for i := uint(1); i <= 10; i++ {
+ dist := genUint64Dist([11]int{0, 1, 1, 2, 4, 8, 16, 32, 16, 1, 1}, 1<<i)
+ raw, err := proto.Marshal(&tpb.Message{
+ Key: dist,
+ })
+ if err != nil {
+ b.Error("wrong encode", err)
+ }
+ b.Run(fmt.Sprintf("Len%v", len(dist)), func(b *testing.B) {
+ scratchBuf := proto.NewBuffer(nil)
+ b.ResetTimer()
+ for k := 0; k < b.N; k++ {
+ scratchBuf.SetBuf(raw)
+ msgBlackhole.Reset()
+ if err := scratchBuf.Unmarshal(msgBlackhole); err != nil {
+ b.Error("wrong decode", err)
+ }
+ }
+ })
+ }
+}
+
+// BenchmarkVarint64ArrayMixed shows the performance of lots of small messages, each
+// containing a small number of large (3, 4, and 5 byte) repeated int64s.
+func BenchmarkVarint64ArrayMixed(b *testing.B) {
+ for i := uint(1); i <= 1<<5; i <<= 1 {
+ dist := genUint64Dist([11]int{0, 0, 0, 4, 6, 4, 0, 0, 0, 0, 0}, int(i))
+ // number of sub fields
+ for k := uint(1); k <= 1<<10; k <<= 2 {
+ msg := &tpb.Message{}
+ for m := uint(0); m < k; m++ {
+ msg.Children = append(msg.Children, &tpb.Message{
+ Key: dist,
+ })
+ }
+ raw, err := proto.Marshal(msg)
+ if err != nil {
+ b.Error("wrong encode", err)
+ }
+ b.Run(fmt.Sprintf("Fields%vLen%v", k, i), func(b *testing.B) {
+ scratchBuf := proto.NewBuffer(nil)
+ b.ResetTimer()
+ for k := 0; k < b.N; k++ {
+ scratchBuf.SetBuf(raw)
+ msgBlackhole.Reset()
+ if err := scratchBuf.Unmarshal(msgBlackhole); err != nil {
+ b.Error("wrong decode", err)
+ }
+ }
+ })
+ }
+ }
+}
+
+// genInt32Dist generates a slice of ints that will match the size distribution of dist.
+// A size of 6 corresponds to a max length varint32, which is 10 bytes. The distribution
+// is 1-indexed. (i.e. the value at index 1 is how many 1 byte ints to create).
+func genInt32Dist(dist [7]int, count int) (dest []int32) {
+ for i := 0; i < count; i++ {
+ for k := 0; k < len(dist); k++ {
+ var num int32
+ switch k {
+ case 1:
+ num = 1<<7 - 1
+ case 2:
+ num = 1<<14 - 1
+ case 3:
+ num = 1<<21 - 1
+ case 4:
+ num = 1<<28 - 1
+ case 5:
+ num = 1<<29 - 1
+ case 6:
+ num = -1
+ }
+ for m := 0; m < dist[k]; m++ {
+ dest = append(dest, num)
+ }
+ }
+ }
+ return
+}
+
+// genUint64Dist generates a slice of ints that will match the size distribution of dist.
+// The distribution is 1-indexed. (i.e. the value at index 1 is how many 1 byte ints to create).
+func genUint64Dist(dist [11]int, count int) (dest []uint64) {
+ for i := 0; i < count; i++ {
+ for k := 0; k < len(dist); k++ {
+ var num uint64
+ switch k {
+ case 1:
+ num = 1<<7 - 1
+ case 2:
+ num = 1<<14 - 1
+ case 3:
+ num = 1<<21 - 1
+ case 4:
+ num = 1<<28 - 1
+ case 5:
+ num = 1<<35 - 1
+ case 6:
+ num = 1<<42 - 1
+ case 7:
+ num = 1<<49 - 1
+ case 8:
+ num = 1<<56 - 1
+ case 9:
+ num = 1<<63 - 1
+ case 10:
+ num = 1<<64 - 1
+ }
+ for m := 0; m < dist[k]; m++ {
+ dest = append(dest, num)
+ }
+ }
+ }
+ return
+}
+
+// BenchmarkDecodeEmpty measures the overhead of doing the minimal possible decode.
+func BenchmarkDecodeEmpty(b *testing.B) {
+ raw, err := proto.Marshal(&tpb.Message{})
+ if err != nil {
+ b.Error("wrong encode", err)
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ if err := proto.Unmarshal(raw, msgBlackhole); err != nil {
+ b.Error("wrong decode", err)
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 0000000..8b84d1b
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,1362 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// RequiredNotSetError is the error returned if Marshal is called with
+// a protocol buffer struct whose required fields have not
+// all been initialized. It is also the error returned if Unmarshal is
+// called with an encoded protocol buffer that does not include all the
+// required fields.
+//
+// When printed, RequiredNotSetError reports the first unset required field in a
+// message. If the field cannot be precisely determined, it is reported as
+// "{Unknown}".
+type RequiredNotSetError struct {
+ field string
+}
+
+func (e *RequiredNotSetError) Error() string {
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+
+var (
+ // errRepeatedHasNil is the error returned if Marshal is called with
+ // a struct with a repeated field containing a nil element.
+ errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+ // errOneofHasNil is the error returned if Marshal is called with
+ // a struct with a oneof field containing a nil element.
+ errOneofHasNil = errors.New("proto: oneof field has nil value")
+
+ // ErrNil is the error returned if Marshal is called with nil.
+ ErrNil = errors.New("proto: Marshal called with nil")
+
+ // ErrTooLarge is the error returned if Marshal is called with a
+ // message that encodes to >2GB.
+ ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// maxMarshalSize is the largest allowed size of an encoded protobuf,
+// since C++ and Java use signed int32s for the size.
+const maxMarshalSize = 1<<31 - 1
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+ var buf [maxVarintBytes]byte
+ var n int
+ for n = 0; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ p.buf = append(p.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ p.buf = append(p.buf, uint8(x))
+ return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+ return sizeVarint(x)
+}
+
+func sizeVarint(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+func sizeFixed64(x uint64) int {
+ return 8
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+func sizeFixed32(x uint64) int {
+ return 4
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63)))
+}
+
+func sizeZigzag64(x uint64) int {
+ return sizeVarint((x << 1) ^ uint64((int64(x) >> 63)))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+func sizeZigzag32(x uint64) int {
+ return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+ p.EncodeVarint(uint64(len(b)))
+ p.buf = append(p.buf, b...)
+ return nil
+}
+
+func sizeRawBytes(b []byte) int {
+ return sizeVarint(uint64(len(b))) +
+ len(b)
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+ p.EncodeVarint(uint64(len(s)))
+ p.buf = append(p.buf, s...)
+ return nil
+}
+
+func sizeStringBytes(s string) int {
+ return sizeVarint(uint64(len(s))) +
+ len(s)
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, returning the data.
+func Marshal(pb Message) ([]byte, error) {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ return m.Marshal()
+ }
+ p := NewBuffer(nil)
+ err := p.Marshal(pb)
+ if p.buf == nil && err == nil {
+ // Return a non-nil slice on success.
+ return []byte{}, nil
+ }
+ return p.buf, err
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ var state errorState
+ err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
+ }
+ return err
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+func (p *Buffer) Marshal(pb Message) error {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ data, err := m.Marshal()
+ p.buf = append(p.buf, data...)
+ return err
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ err = p.enc_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ (stats).Encode++ // Parens are to work around a goimports bug.
+ }
+
+ if len(p.buf) > maxMarshalSize {
+ return ErrTooLarge
+ }
+ return err
+}
+
+// Size returns the encoded size of a protocol buffer.
+func Size(pb Message) (n int) {
+ // Can the object marshal itself? If so, Size is slow.
+ // TODO: add Size to Marshaler, or add a Sizer interface.
+ if m, ok := pb.(Marshaler); ok {
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return 0
+ }
+ if err == nil {
+ n = size_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ (stats).Size++ // Parens are to work around a goimports bug.
+ }
+
+ return
+}
+
+// Individual type encoders.
+
+// Encode a bool.
+func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := 0
+ if *v {
+ x = 1
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, 1)
+ return nil
+}
+
+func size_bool(p *Properties, base structPointer) int {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+func size_proto3_bool(p *Properties, base structPointer) int {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v && !p.oneof {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+// Encode an int32.
+func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode a uint32.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := word32_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := word32_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode an int64.
+func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return ErrNil
+ }
+ x := word64_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func size_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return 0
+ }
+ x := word64_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+func size_proto3_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+// Encode a string.
+func (o *Buffer) enc_string(p *Properties, base structPointer) error {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := *v
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(v)
+ return nil
+}
+
+func size_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return 0
+ }
+ x := *v
+ n += len(p.tagcode)
+ n += sizeStringBytes(x)
+ return
+}
+
+func size_proto3_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeStringBytes(v)
+ return
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
+
+// Encode a message struct.
+func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return ErrNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return state.err
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ return o.enc_len_struct(p.sprop, structp, &state)
+}
+
+func size_struct_message(p *Properties, base structPointer) int {
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return 0
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n0 := len(p.tagcode)
+ n1 := sizeRawBytes(data)
+ return n0 + n1
+ }
+
+ n0 := len(p.tagcode)
+ n1 := size_struct(p.sprop, structp)
+ n2 := sizeVarint(uint64(n1)) // size of encoded length
+ return n0 + n1 + n2
+}
+
+// Encode a group struct.
+func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return ErrNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ err := o.enc_struct(p.sprop, b)
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return state.err
+}
+
+func size_struct_group(p *Properties, base structPointer) (n int) {
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return 0
+ }
+
+ n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ n += size_struct(p.sprop, b)
+ n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return
+}
+
+// Encode a slice of bools ([]bool).
+func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ for _, x := range s {
+ o.buf = append(o.buf, p.tagcode...)
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_bool(p *Properties, base structPointer) int {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
+}
+
+// Encode a slice of bools ([]bool) in packed format.
+func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
+ for _, x := range s {
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(l))
+ n += l // each bool takes exactly one byte
+ return
+}
+
+// Encode a slice of bytes ([]byte).
+func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func size_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+// Encode a slice of int32s ([]int32).
+func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(buf, uint64(x))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ bufSize += p.valSize(uint64(x))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of uint32s ([]uint32).
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := s.Index(i)
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := s.Index(i)
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of uint32s ([]uint32) in packed format.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, uint64(s.Index(i)))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(uint64(s.Index(i)))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of int64s ([]int64).
+func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, s.Index(i))
+ }
+ return nil
+}
+
+func size_slice_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ n += p.valSize(s.Index(i))
+ }
+ return
+}
+
+// Encode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, s.Index(i))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(s.Index(i))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of slice of bytes ([][]byte).
+func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return 0
+ }
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeRawBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of strings ([]string).
+func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_string(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeStringBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of message structs ([]*struct).
+func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return errRepeatedHasNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ continue
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ err := o.enc_len_struct(p.sprop, structp, &state)
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+ }
+ return state.err
+}
+
+func size_slice_struct_message(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return // return the size up to this point
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n += sizeRawBytes(data)
+ continue
+ }
+
+ n0 := size_struct(p.sprop, structp)
+ n1 := sizeVarint(uint64(n0)) // size of encoded length
+ n += n0 + n1
+ }
+ return
+}
+
+// Encode a slice of group structs ([]*struct).
+func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return errRepeatedHasNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+
+ err := o.enc_struct(p.sprop, b)
+
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ }
+ return state.err
+}
+
+func size_slice_struct_group(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return // return size up to this point
+ }
+
+ n += size_struct(p.sprop, b)
+ }
+ return
+}
+
+// Encode an extension map.
+func (o *Buffer) enc_map(p *Properties, base structPointer) error {
+ exts := structPointer_ExtMap(base, p.field)
+ if err := encodeExtensionsMap(*exts); err != nil {
+ return err
+ }
+
+ return o.enc_map_body(*exts)
+}
+
+func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
+ exts := structPointer_Extensions(base, p.field)
+
+ v, mu := exts.extensionsRead()
+ if v == nil {
+ return nil
+ }
+
+ mu.Lock()
+ defer mu.Unlock()
+ if err := encodeExtensionsMap(v); err != nil {
+ return err
+ }
+
+ return o.enc_map_body(v)
+}
+
+func (o *Buffer) enc_map_body(v map[int32]Extension) error {
+ // Fast-path for common cases: zero or one extensions.
+ if len(v) <= 1 {
+ for _, e := range v {
+ o.buf = append(o.buf, e.enc...)
+ }
+ return nil
+ }
+
+ // Sort keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(v))
+ for k := range v {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ o.buf = append(o.buf, v[int32(k)].enc...)
+ }
+ return nil
+}
+
+func size_map(p *Properties, base structPointer) int {
+ v := structPointer_ExtMap(base, p.field)
+ return extensionsMapSize(*v)
+}
+
+func size_exts(p *Properties, base structPointer) int {
+ v := structPointer_Extensions(base, p.field)
+ return extensionsSize(v)
+}
+
+// Encode a map field.
+func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
+ var state errorState // XXX: or do we need to plumb this through?
+
+ /*
+ A map defined as
+ map<key_type, value_type> map_field = N;
+ is encoded in the same way as
+ message MapFieldEntry {
+ key_type key = 1;
+ value_type value = 2;
+ }
+ repeated MapFieldEntry map_field = N;
+ */
+
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+ if v.Len() == 0 {
+ return nil
+ }
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ enc := func() error {
+ if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil {
+ return err
+ }
+ return nil
+ }
+
+ // Don't sort map keys. It is not required by the spec, and C++ doesn't do it.
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ o.buf = append(o.buf, p.tagcode...)
+ if err := o.enc_len_thing(enc, &state); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func size_new_map(p *Properties, base structPointer) int {
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ n := 0
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ // Tag codes for key and val are the responsibility of the sub-sizer.
+ keysize := p.mkeyprop.size(p.mkeyprop, keybase)
+ valsize := p.mvalprop.size(p.mvalprop, valbase)
+ entry := keysize + valsize
+ // Add on tag code and length of map entry itself.
+ n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
+ }
+ return n
+}
+
+// mapEncodeScratch returns a new reflect.Value matching the map's value type,
+// and a structPointer suitable for passing to an encoder or sizer.
+func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // This is needed because the element-type encoders expect **T, but the map iteration produces T.
+
+ keycopy = reflect.New(mapType.Key()).Elem() // addressable K
+ keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
+ keyptr.Set(keycopy.Addr()) //
+ keybase = toStructPointer(keyptr.Addr()) // **K
+
+ // Value types are more varied and require special handling.
+ switch mapType.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
+ valbase = toStructPointer(valcopy.Addr())
+ case reflect.Ptr:
+ // message; the generated field type is map[K]*Msg (so V is *Msg),
+ // so we only need one level of indirection.
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valbase = toStructPointer(valcopy.Addr())
+ default:
+ // everything else
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
+ valptr.Set(valcopy.Addr()) //
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+ return
+}
+
+// Encode a struct.
+func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
+ var state errorState
+ // Encode fields in tag order so that decoders may use optimizations
+ // that depend on the ordering.
+ // https://developers.google.com/protocol-buffers/docs/encoding#order
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.enc != nil {
+ err := p.enc(o, p, base)
+ if err != nil {
+ if err == ErrNil {
+ if p.Required && state.err == nil {
+ state.err = &RequiredNotSetError{p.Name}
+ }
+ } else if err == errRepeatedHasNil {
+ // Give more context to nil values in repeated fields.
+ return errors.New("repeated field " + p.OrigName + " has nil element")
+ } else if !state.shouldContinue(err, p) {
+ return err
+ }
+ }
+ if len(o.buf) > maxMarshalSize {
+ return ErrTooLarge
+ }
+ }
+ }
+
+ // Do oneof fields.
+ if prop.oneofMarshaler != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ if err := prop.oneofMarshaler(m, o); err == ErrNil {
+ return errOneofHasNil
+ } else if err != nil {
+ return err
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ if len(o.buf)+len(v) > maxMarshalSize {
+ return ErrTooLarge
+ }
+ if len(v) > 0 {
+ o.buf = append(o.buf, v...)
+ }
+ }
+
+ return state.err
+}
+
+func size_struct(prop *StructProperties, base structPointer) (n int) {
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.size != nil {
+ n += p.size(p, base)
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ n += len(v)
+ }
+
+ // Factor in any oneof fields.
+ if prop.oneofSizer != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ n += prop.oneofSizer(m)
+ }
+
+ return
+}
+
+var zeroes [20]byte // longer than any conceivable sizeVarint
+
+// Encode a struct, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
+ return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
+}
+
+// Encode something, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
+ iLen := len(o.buf)
+ o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
+ iMsg := len(o.buf)
+ err := enc()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ lMsg := len(o.buf) - iMsg
+ lLen := sizeVarint(uint64(lMsg))
+ switch x := lLen - (iMsg - iLen); {
+ case x > 0: // actual length is x bytes larger than the space we reserved
+ // Move msg x bytes right.
+ o.buf = append(o.buf, zeroes[:x]...)
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ case x < 0: // actual length is x bytes smaller than the space we reserved
+ // Move msg x bytes left.
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ o.buf = o.buf[:len(o.buf)+x] // x is negative
+ }
+ // Encode the length in the reserved space.
+ o.buf = o.buf[:iLen]
+ o.EncodeVarint(uint64(lMsg))
+ o.buf = o.buf[:len(o.buf)+lMsg]
+ return state.err
+}
+
+// errorState maintains the first error that occurs and updates that error
+// with additional context.
+type errorState struct {
+ err error
+}
+
+// shouldContinue reports whether encoding should continue upon encountering the
+// given error. If the error is RequiredNotSetError, shouldContinue returns true
+// and, if this is the first appearance of that error, remembers it for future
+// reporting.
+//
+// If prop is not nil, it may update any error with additional context about the
+// field with the error.
+func (s *errorState) shouldContinue(err error, prop *Properties) bool {
+ // Ignore unset required fields.
+ reqNotSet, ok := err.(*RequiredNotSetError)
+ if !ok {
+ return false
+ }
+ if s.err == nil {
+ if prop != nil {
+ err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
+ }
+ s.err = err
+ }
+ return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/encode_test.go b/vendor/github.com/golang/protobuf/proto/encode_test.go
new file mode 100644
index 0000000..a720947
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/encode_test.go
@@ -0,0 +1,85 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build go1.7
+
+package proto_test
+
+import (
+ "strconv"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ tpb "github.com/golang/protobuf/proto/proto3_proto"
+ "github.com/golang/protobuf/ptypes"
+)
+
+var (
+ blackhole []byte
+)
+
+// BenchmarkAny creates increasingly large arbitrary Any messages. The type is always the
+// same.
+func BenchmarkAny(b *testing.B) {
+ data := make([]byte, 1<<20)
+ quantum := 1 << 10
+ for i := uint(0); i <= 10; i++ {
+ b.Run(strconv.Itoa(quantum<<i), func(b *testing.B) {
+ for k := 0; k < b.N; k++ {
+ inner := &tpb.Message{
+ Data: data[:quantum<<i],
+ }
+ outer, err := ptypes.MarshalAny(inner)
+ if err != nil {
+ b.Error("wrong encode", err)
+ }
+ raw, err := proto.Marshal(&tpb.Message{
+ Anything: outer,
+ })
+ if err != nil {
+ b.Error("wrong encode", err)
+ }
+ blackhole = raw
+ }
+ })
+ }
+}
+
+// BenchmarkEmpy measures the overhead of doing the minimal possible encode.
+func BenchmarkEmpy(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ raw, err := proto.Marshal(&tpb.Message{})
+ if err != nil {
+ b.Error("wrong encode", err)
+ }
+ blackhole = raw
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 0000000..2ed1cf5
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,300 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+ "bytes"
+ "log"
+ "reflect"
+ "strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+ - Two messages are equal iff they are the same type,
+ corresponding fields are equal, unknown field sets
+ are equal, and extensions sets are equal.
+ - Two set scalar fields are equal iff their values are equal.
+ If the fields are of a floating-point type, remember that
+ NaN != x for all x, including NaN. If the message is defined
+ in a proto3 .proto file, fields are not "set"; specifically,
+ zero length proto3 "bytes" fields are equal (nil == {}).
+ - Two repeated fields are equal iff their lengths are the same,
+ and their corresponding elements are equal. Note a "bytes" field,
+ although represented by []byte, is not a repeated field and the
+ rule for the scalar fields described above applies.
+ - Two unset fields are equal.
+ - Two unknown field sets are equal if their current
+ encoded state is equal.
+ - Two extension sets are equal iff they have corresponding
+ elements that are pairwise equal.
+ - Two map fields are equal iff their lengths are the same,
+ and they contain the same set of elements. Zero-length map
+ fields are equal.
+ - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+ if a == nil || b == nil {
+ return a == b
+ }
+ v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if v1.Kind() == reflect.Ptr {
+ if v1.IsNil() {
+ return v2.IsNil()
+ }
+ if v2.IsNil() {
+ return false
+ }
+ v1, v2 = v1.Elem(), v2.Elem()
+ }
+ if v1.Kind() != reflect.Struct {
+ return false
+ }
+ return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+ sprop := GetProperties(v1.Type())
+ for i := 0; i < v1.NumField(); i++ {
+ f := v1.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ f1, f2 := v1.Field(i), v2.Field(i)
+ if f.Type.Kind() == reflect.Ptr {
+ if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+ // both unset
+ continue
+ } else if n1 != n2 {
+ // set/unset mismatch
+ return false
+ }
+ b1, ok := f1.Interface().(raw)
+ if ok {
+ b2 := f2.Interface().(raw)
+ // RawMessage
+ if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
+ return false
+ }
+ continue
+ }
+ f1, f2 = f1.Elem(), f2.Elem()
+ }
+ if !equalAny(f1, f2, sprop.Prop[i]) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_InternalExtensions")
+ if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_extensions")
+ if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+ return false
+ }
+ }
+
+ uf := v1.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return true
+ }
+
+ u1 := uf.Bytes()
+ u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+ if !bytes.Equal(u1, u2) {
+ return false
+ }
+
+ return true
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+ if v1.Type() == protoMessageType {
+ m1, _ := v1.Interface().(Message)
+ m2, _ := v2.Interface().(Message)
+ return Equal(m1, m2)
+ }
+ switch v1.Kind() {
+ case reflect.Bool:
+ return v1.Bool() == v2.Bool()
+ case reflect.Float32, reflect.Float64:
+ return v1.Float() == v2.Float()
+ case reflect.Int32, reflect.Int64:
+ return v1.Int() == v2.Int()
+ case reflect.Interface:
+ // Probably a oneof field; compare the inner values.
+ n1, n2 := v1.IsNil(), v2.IsNil()
+ if n1 || n2 {
+ return n1 == n2
+ }
+ e1, e2 := v1.Elem(), v2.Elem()
+ if e1.Type() != e2.Type() {
+ return false
+ }
+ return equalAny(e1, e2, nil)
+ case reflect.Map:
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for _, key := range v1.MapKeys() {
+ val2 := v2.MapIndex(key)
+ if !val2.IsValid() {
+ // This key was not found in the second map.
+ return false
+ }
+ if !equalAny(v1.MapIndex(key), val2, nil) {
+ return false
+ }
+ }
+ return true
+ case reflect.Ptr:
+ // Maps may have nil values in them, so check for nil.
+ if v1.IsNil() && v2.IsNil() {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return equalAny(v1.Elem(), v2.Elem(), prop)
+ case reflect.Slice:
+ if v1.Type().Elem().Kind() == reflect.Uint8 {
+ // short circuit: []byte
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value.
+ if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+ }
+
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !equalAny(v1.Index(i), v2.Index(i), prop) {
+ return false
+ }
+ }
+ return true
+ case reflect.String:
+ return v1.Interface().(string) == v2.Interface().(string)
+ case reflect.Struct:
+ return equalStruct(v1, v2)
+ case reflect.Uint32, reflect.Uint64:
+ return v1.Uint() == v2.Uint()
+ }
+
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to compare %v", v1)
+ return false
+}
+
+// base is the struct type that the extensions are based on.
+// x1 and x2 are InternalExtensions.
+func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
+ em1, _ := x1.extensionsRead()
+ em2, _ := x2.extensionsRead()
+ return equalExtMap(base, em1, em2)
+}
+
+func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
+ if len(em1) != len(em2) {
+ return false
+ }
+
+ for extNum, e1 := range em1 {
+ e2, ok := em2[extNum]
+ if !ok {
+ return false
+ }
+
+ m1, m2 := e1.value, e2.value
+
+ if m1 != nil && m2 != nil {
+ // Both are unencoded.
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ continue
+ }
+
+ // At least one is encoded. To do a semantically correct comparison
+ // we need to unmarshal them first.
+ var desc *ExtensionDesc
+ if m := extensionMaps[base]; m != nil {
+ desc = m[extNum]
+ }
+ if desc == nil {
+ log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+ continue
+ }
+ var err error
+ if m1 == nil {
+ m1, err = decodeExtension(e1.enc, desc)
+ }
+ if m2 == nil && err == nil {
+ m2, err = decodeExtension(e2.enc, desc)
+ }
+ if err != nil {
+ // The encoded form is invalid.
+ log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+ return false
+ }
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/equal_test.go b/vendor/github.com/golang/protobuf/proto/equal_test.go
new file mode 100644
index 0000000..a2febb3
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/equal_test.go
@@ -0,0 +1,224 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "testing"
+
+ . "github.com/golang/protobuf/proto"
+ proto3pb "github.com/golang/protobuf/proto/proto3_proto"
+ pb "github.com/golang/protobuf/proto/testdata"
+)
+
+// Four identical base messages.
+// The init function adds extensions to some of them.
+var messageWithoutExtension = &pb.MyMessage{Count: Int32(7)}
+var messageWithExtension1a = &pb.MyMessage{Count: Int32(7)}
+var messageWithExtension1b = &pb.MyMessage{Count: Int32(7)}
+var messageWithExtension2 = &pb.MyMessage{Count: Int32(7)}
+
+// Two messages with non-message extensions.
+var messageWithInt32Extension1 = &pb.MyMessage{Count: Int32(8)}
+var messageWithInt32Extension2 = &pb.MyMessage{Count: Int32(8)}
+
+func init() {
+ ext1 := &pb.Ext{Data: String("Kirk")}
+ ext2 := &pb.Ext{Data: String("Picard")}
+
+ // messageWithExtension1a has ext1, but never marshals it.
+ if err := SetExtension(messageWithExtension1a, pb.E_Ext_More, ext1); err != nil {
+ panic("SetExtension on 1a failed: " + err.Error())
+ }
+
+ // messageWithExtension1b is the unmarshaled form of messageWithExtension1a.
+ if err := SetExtension(messageWithExtension1b, pb.E_Ext_More, ext1); err != nil {
+ panic("SetExtension on 1b failed: " + err.Error())
+ }
+ buf, err := Marshal(messageWithExtension1b)
+ if err != nil {
+ panic("Marshal of 1b failed: " + err.Error())
+ }
+ messageWithExtension1b.Reset()
+ if err := Unmarshal(buf, messageWithExtension1b); err != nil {
+ panic("Unmarshal of 1b failed: " + err.Error())
+ }
+
+ // messageWithExtension2 has ext2.
+ if err := SetExtension(messageWithExtension2, pb.E_Ext_More, ext2); err != nil {
+ panic("SetExtension on 2 failed: " + err.Error())
+ }
+
+ if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(23)); err != nil {
+ panic("SetExtension on Int32-1 failed: " + err.Error())
+ }
+ if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(24)); err != nil {
+ panic("SetExtension on Int32-2 failed: " + err.Error())
+ }
+}
+
+var EqualTests = []struct {
+ desc string
+ a, b Message
+ exp bool
+}{
+ {"different types", &pb.GoEnum{}, &pb.GoTestField{}, false},
+ {"equal empty", &pb.GoEnum{}, &pb.GoEnum{}, true},
+ {"nil vs nil", nil, nil, true},
+ {"typed nil vs typed nil", (*pb.GoEnum)(nil), (*pb.GoEnum)(nil), true},
+ {"typed nil vs empty", (*pb.GoEnum)(nil), &pb.GoEnum{}, false},
+ {"different typed nil", (*pb.GoEnum)(nil), (*pb.GoTestField)(nil), false},
+
+ {"one set field, one unset field", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{}, false},
+ {"one set field zero, one unset field", &pb.GoTest{Param: Int32(0)}, &pb.GoTest{}, false},
+ {"different set fields", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("bar")}, false},
+ {"equal set", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("foo")}, true},
+
+ {"repeated, one set", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{}, false},
+ {"repeated, different length", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{F_Int32Repeated: []int32{2}}, false},
+ {"repeated, different value", &pb.GoTest{F_Int32Repeated: []int32{2}}, &pb.GoTest{F_Int32Repeated: []int32{3}}, false},
+ {"repeated, equal", &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, true},
+ {"repeated, nil equal nil", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: nil}, true},
+ {"repeated, nil equal empty", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: []int32{}}, true},
+ {"repeated, empty equal nil", &pb.GoTest{F_Int32Repeated: []int32{}}, &pb.GoTest{F_Int32Repeated: nil}, true},
+
+ {
+ "nested, different",
+ &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("foo")}},
+ &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("bar")}},
+ false,
+ },
+ {
+ "nested, equal",
+ &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}},
+ &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}},
+ true,
+ },
+
+ {"bytes", &pb.OtherMessage{Value: []byte("foo")}, &pb.OtherMessage{Value: []byte("foo")}, true},
+ {"bytes, empty", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: []byte{}}, true},
+ {"bytes, empty vs nil", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: nil}, false},
+ {
+ "repeated bytes",
+ &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}},
+ &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}},
+ true,
+ },
+ // In proto3, []byte{} and []byte(nil) are equal.
+ {"proto3 bytes, empty vs nil", &proto3pb.Message{Data: []byte{}}, &proto3pb.Message{Data: nil}, true},
+
+ {"extension vs. no extension", messageWithoutExtension, messageWithExtension1a, false},
+ {"extension vs. same extension", messageWithExtension1a, messageWithExtension1b, true},
+ {"extension vs. different extension", messageWithExtension1a, messageWithExtension2, false},
+
+ {"int32 extension vs. itself", messageWithInt32Extension1, messageWithInt32Extension1, true},
+ {"int32 extension vs. a different int32", messageWithInt32Extension1, messageWithInt32Extension2, false},
+
+ {
+ "message with group",
+ &pb.MyMessage{
+ Count: Int32(1),
+ Somegroup: &pb.MyMessage_SomeGroup{
+ GroupField: Int32(5),
+ },
+ },
+ &pb.MyMessage{
+ Count: Int32(1),
+ Somegroup: &pb.MyMessage_SomeGroup{
+ GroupField: Int32(5),
+ },
+ },
+ true,
+ },
+
+ {
+ "map same",
+ &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
+ &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
+ true,
+ },
+ {
+ "map different entry",
+ &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
+ &pb.MessageWithMap{NameMapping: map[int32]string{2: "Rob"}},
+ false,
+ },
+ {
+ "map different key only",
+ &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
+ &pb.MessageWithMap{NameMapping: map[int32]string{2: "Ken"}},
+ false,
+ },
+ {
+ "map different value only",
+ &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
+ &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob"}},
+ false,
+ },
+ {
+ "zero-length maps same",
+ &pb.MessageWithMap{NameMapping: map[int32]string{}},
+ &pb.MessageWithMap{NameMapping: nil},
+ true,
+ },
+ {
+ "orders in map don't matter",
+ &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken", 2: "Rob"}},
+ &pb.MessageWithMap{NameMapping: map[int32]string{2: "Rob", 1: "Ken"}},
+ true,
+ },
+ {
+ "oneof same",
+ &pb.Communique{Union: &pb.Communique_Number{41}},
+ &pb.Communique{Union: &pb.Communique_Number{41}},
+ true,
+ },
+ {
+ "oneof one nil",
+ &pb.Communique{Union: &pb.Communique_Number{41}},
+ &pb.Communique{},
+ false,
+ },
+ {
+ "oneof different",
+ &pb.Communique{Union: &pb.Communique_Number{41}},
+ &pb.Communique{Union: &pb.Communique_Name{"Bobby Tables"}},
+ false,
+ },
+}
+
+func TestEqual(t *testing.T) {
+ for _, tc := range EqualTests {
+ if res := Equal(tc.a, tc.b); res != tc.exp {
+ t.Errorf("%v: Equal(%v, %v) = %v, want %v", tc.desc, tc.a, tc.b, res, tc.exp)
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 0000000..eaad218
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,587 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+ Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer generated by the current
+// proto compiler that may be extended.
+type extendableProto interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ extensionsWrite() map[int32]Extension
+ extensionsRead() (map[int32]Extension, sync.Locker)
+}
+
+// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
+// version of the proto compiler that may be extended.
+type extendableProtoV1 interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ ExtensionMap() map[int32]Extension
+}
+
+// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
+type extensionAdapter struct {
+ extendableProtoV1
+}
+
+func (e extensionAdapter) extensionsWrite() map[int32]Extension {
+ return e.ExtensionMap()
+}
+
+func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+ return e.ExtensionMap(), notLocker{}
+}
+
+// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
+type notLocker struct{}
+
+func (n notLocker) Lock() {}
+func (n notLocker) Unlock() {}
+
+// extendable returns the extendableProto interface for the given generated proto message.
+// If the proto message has the old extension format, it returns a wrapper that implements
+// the extendableProto interface.
+func extendable(p interface{}) (extendableProto, bool) {
+ if ep, ok := p.(extendableProto); ok {
+ return ep, ok
+ }
+ if ep, ok := p.(extendableProtoV1); ok {
+ return extensionAdapter{ep}, ok
+ }
+ return nil, false
+}
+
+// XXX_InternalExtensions is an internal representation of proto extensions.
+//
+// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
+// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
+//
+// The methods of XXX_InternalExtensions are not concurrency safe in general,
+// but calls to logically read-only methods such as has and get may be executed concurrently.
+type XXX_InternalExtensions struct {
+ // The struct must be indirect so that if a user inadvertently copies a
+ // generated message and its embedded XXX_InternalExtensions, they
+ // avoid the mayhem of a copied mutex.
+ //
+ // The mutex serializes all logically read-only operations to p.extensionMap.
+ // It is up to the client to ensure that write operations to p.extensionMap are
+ // mutually exclusive with other accesses.
+ p *struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ }
+}
+
+// extensionsWrite returns the extension map, creating it on first use.
+func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
+ if e.p == nil {
+ e.p = new(struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ })
+ e.p.extensionMap = make(map[int32]Extension)
+ }
+ return e.p.extensionMap
+}
+
+// extensionsRead returns the extensions map for read-only use. It may be nil.
+// The caller must hold the returned mutex's lock when accessing Elements within the map.
+func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
+ if e.p == nil {
+ return nil, nil
+ }
+ return e.p.extensionMap, &e.p.mu
+}
+
+var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
+var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+ ExtendedType Message // nil pointer to the type that is being extended
+ ExtensionType interface{} // nil pointer to the extension type
+ Field int32 // field number
+ Name string // fully-qualified name of extension, for text formatting
+ Tag string // protobuf tag style
+ Filename string // name of the file in which the extension is defined
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+ t := reflect.TypeOf(ed.ExtensionType)
+ return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+ // When an extension is stored in a message using SetExtension
+ // only desc and value are set. When the message is marshaled
+ // enc will be set to the encoded form of the message.
+ //
+ // When a message is unmarshaled and contains extensions, each
+ // extension will have only enc set. When such an extension is
+ // accessed using GetExtension (or GetExtensions) desc and value
+ // will be set.
+ desc *ExtensionDesc
+ value interface{}
+ enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base Message, id int32, b []byte) {
+ epb, ok := extendable(base)
+ if !ok {
+ return
+ }
+ extmap := epb.extensionsWrite()
+ extmap[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+ for _, er := range pb.ExtensionRangeArray() {
+ if er.Start <= field && field <= er.End {
+ return true
+ }
+ }
+ return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+ var pbi interface{} = pb
+ // Check the extended type.
+ if ea, ok := pbi.(extensionAdapter); ok {
+ pbi = ea.extendableProtoV1
+ }
+ if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
+ return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
+ }
+ // Check the range.
+ if !isExtensionField(pb, extension.Field) {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+ base reflect.Type
+ field int32
+}
+
+var extProp = struct {
+ sync.RWMutex
+ m map[extPropKey]*Properties
+}{
+ m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+ key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+ extProp.RLock()
+ if prop, ok := extProp.m[key]; ok {
+ extProp.RUnlock()
+ return prop
+ }
+ extProp.RUnlock()
+
+ extProp.Lock()
+ defer extProp.Unlock()
+ // Check again.
+ if prop, ok := extProp.m[key]; ok {
+ return prop
+ }
+
+ prop := new(Properties)
+ prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+ extProp.m[key] = prop
+ return prop
+}
+
+// encode encodes any unmarshaled (unencoded) extensions in e.
+func encodeExtensions(e *XXX_InternalExtensions) error {
+ m, mu := e.extensionsRead()
+ if m == nil {
+ return nil // fast path
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ return encodeExtensionsMap(m)
+}
+
+// encode encodes any unmarshaled (unencoded) extensions in e.
+func encodeExtensionsMap(m map[int32]Extension) error {
+ for k, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ p := NewBuffer(nil)
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ if err := props.enc(p, props, toStructPointer(x)); err != nil {
+ return err
+ }
+ e.enc = p.buf
+ m[k] = e
+ }
+ return nil
+}
+
+func extensionsSize(e *XXX_InternalExtensions) (n int) {
+ m, mu := e.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ return extensionsMapSize(m)
+}
+
+func extensionsMapSize(m map[int32]Extension) (n int) {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ n += props.size(props, toStructPointer(x))
+ }
+ return
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb Message, extension *ExtensionDesc) bool {
+ // TODO: Check types, field numbers, etc.?
+ epb, ok := extendable(pb)
+ if !ok {
+ return false
+ }
+ extmap, mu := epb.extensionsRead()
+ if extmap == nil {
+ return false
+ }
+ mu.Lock()
+ _, ok = extmap[extension.Field]
+ mu.Unlock()
+ return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb Message, extension *ExtensionDesc) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return
+ }
+ // TODO: Check types, field numbers, etc.?
+ extmap := epb.extensionsWrite()
+ delete(extmap, extension.Field)
+}
+
+// GetExtension parses and returns the given extension of pb.
+// If the extension is not present and has no default value it returns ErrMissingExtension.
+func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return nil, errors.New("proto: not an extendable proto")
+ }
+
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return nil, err
+ }
+
+ emap, mu := epb.extensionsRead()
+ if emap == nil {
+ return defaultExtensionValue(extension)
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ e, ok := emap[extension.Field]
+ if !ok {
+ // defaultExtensionValue returns the default value or
+ // ErrMissingExtension if there is no default.
+ return defaultExtensionValue(extension)
+ }
+
+ if e.value != nil {
+ // Already decoded. Check the descriptor, though.
+ if e.desc != extension {
+ // This shouldn't happen. If it does, it means that
+ // GetExtension was called twice with two different
+ // descriptors with the same field number.
+ return nil, errors.New("proto: descriptor conflict")
+ }
+ return e.value, nil
+ }
+
+ v, err := decodeExtension(e.enc, extension)
+ if err != nil {
+ return nil, err
+ }
+
+ // Remember the decoded version and drop the encoded version.
+ // That way it is safe to mutate what we return.
+ e.value = v
+ e.desc = extension
+ e.enc = nil
+ emap[extension.Field] = e
+ return e.value, nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ t := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+
+ sf, _, err := fieldDefault(t, props)
+ if err != nil {
+ return nil, err
+ }
+
+ if sf == nil || sf.value == nil {
+ // There is no default value.
+ return nil, ErrMissingExtension
+ }
+
+ if t.Kind() != reflect.Ptr {
+ // We do not need to return a Ptr, we can directly return sf.value.
+ return sf.value, nil
+ }
+
+ // We need to return an interface{} that is a pointer to sf.value.
+ value := reflect.New(t).Elem()
+ value.Set(reflect.New(value.Type().Elem()))
+ if sf.kind == reflect.Int32 {
+ // We may have an int32 or an enum, but the underlying data is int32.
+ // Since we can't set an int32 into a non int32 reflect.value directly
+ // set it as a int32.
+ value.Elem().SetInt(int64(sf.value.(int32)))
+ } else {
+ value.Elem().Set(reflect.ValueOf(sf.value))
+ }
+ return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+ o := NewBuffer(b)
+
+ t := reflect.TypeOf(extension.ExtensionType)
+
+ props := extensionProperties(extension)
+
+ // t is a pointer to a struct, pointer to basic type or a slice.
+ // Allocate a "field" to store the pointer/slice itself; the
+ // pointer/slice will be stored here. We pass
+ // the address of this field to props.dec.
+ // This passes a zero field and a *t and lets props.dec
+ // interpret it as a *struct{ x t }.
+ value := reflect.New(t).Elem()
+
+ for {
+ // Discard wire type and field number varint. It isn't needed.
+ if _, err := o.DecodeVarint(); err != nil {
+ return nil, err
+ }
+
+ if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
+ return nil, err
+ }
+
+ if o.index >= len(o.buf) {
+ break
+ }
+ }
+ return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return nil, errors.New("proto: not an extendable proto")
+ }
+ extensions = make([]interface{}, len(es))
+ for i, e := range es {
+ extensions[i], err = GetExtension(epb, e)
+ if err == ErrMissingExtension {
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
+// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
+// just the Field field, which defines the extension's field number.
+func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
+ }
+ registeredExtensions := RegisteredExtensions(pb)
+
+ emap, mu := epb.extensionsRead()
+ if emap == nil {
+ return nil, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ extensions := make([]*ExtensionDesc, 0, len(emap))
+ for extid, e := range emap {
+ desc := e.desc
+ if desc == nil {
+ desc = registeredExtensions[extid]
+ if desc == nil {
+ desc = &ExtensionDesc{Field: extid}
+ }
+ }
+
+ extensions = append(extensions, desc)
+ }
+ return extensions, nil
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
+ epb, ok := extendable(pb)
+ if !ok {
+ return errors.New("proto: not an extendable proto")
+ }
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return err
+ }
+ typ := reflect.TypeOf(extension.ExtensionType)
+ if typ != reflect.TypeOf(value) {
+ return errors.New("proto: bad extension value type")
+ }
+ // nil extension values need to be caught early, because the
+ // encoder can't distinguish an ErrNil due to a nil extension
+ // from an ErrNil due to a missing field. Extensions are
+ // always optional, so the encoder would just swallow the error
+ // and drop all the extensions from the encoded message.
+ if reflect.ValueOf(value).IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+ }
+
+ extmap := epb.extensionsWrite()
+ extmap[extension.Field] = Extension{desc: extension, value: value}
+ return nil
+}
+
+// ClearAllExtensions clears all extensions from pb.
+func ClearAllExtensions(pb Message) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return
+ }
+ m := epb.extensionsWrite()
+ for k := range m {
+ delete(m, k)
+ }
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+ st := reflect.TypeOf(desc.ExtendedType).Elem()
+ m := extensionMaps[st]
+ if m == nil {
+ m = make(map[int32]*ExtensionDesc)
+ extensionMaps[st] = m
+ }
+ if _, ok := m[desc.Field]; ok {
+ panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+ }
+ m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+ return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions_test.go b/vendor/github.com/golang/protobuf/proto/extensions_test.go
new file mode 100644
index 0000000..b6d9114
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions_test.go
@@ -0,0 +1,536 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2014 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "sort"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ pb "github.com/golang/protobuf/proto/testdata"
+ "golang.org/x/sync/errgroup"
+)
+
+func TestGetExtensionsWithMissingExtensions(t *testing.T) {
+ msg := &pb.MyMessage{}
+ ext1 := &pb.Ext{}
+ if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {
+ t.Fatalf("Could not set ext1: %s", err)
+ }
+ exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{
+ pb.E_Ext_More,
+ pb.E_Ext_Text,
+ })
+ if err != nil {
+ t.Fatalf("GetExtensions() failed: %s", err)
+ }
+ if exts[0] != ext1 {
+ t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0])
+ }
+ if exts[1] != nil {
+ t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1])
+ }
+}
+
+func TestExtensionDescsWithMissingExtensions(t *testing.T) {
+ msg := &pb.MyMessage{Count: proto.Int32(0)}
+ extdesc1 := pb.E_Ext_More
+ if descs, err := proto.ExtensionDescs(msg); len(descs) != 0 || err != nil {
+ t.Errorf("proto.ExtensionDescs: got %d descs, error %v; want 0, nil", len(descs), err)
+ }
+
+ ext1 := &pb.Ext{}
+ if err := proto.SetExtension(msg, extdesc1, ext1); err != nil {
+ t.Fatalf("Could not set ext1: %s", err)
+ }
+ extdesc2 := &proto.ExtensionDesc{
+ ExtendedType: (*pb.MyMessage)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 123456789,
+ Name: "a.b",
+ Tag: "varint,123456789,opt",
+ }
+ ext2 := proto.Bool(false)
+ if err := proto.SetExtension(msg, extdesc2, ext2); err != nil {
+ t.Fatalf("Could not set ext2: %s", err)
+ }
+
+ b, err := proto.Marshal(msg)
+ if err != nil {
+ t.Fatalf("Could not marshal msg: %v", err)
+ }
+ if err := proto.Unmarshal(b, msg); err != nil {
+ t.Fatalf("Could not unmarshal into msg: %v", err)
+ }
+
+ descs, err := proto.ExtensionDescs(msg)
+ if err != nil {
+ t.Fatalf("proto.ExtensionDescs: got error %v", err)
+ }
+ sortExtDescs(descs)
+ wantDescs := []*proto.ExtensionDesc{extdesc1, &proto.ExtensionDesc{Field: extdesc2.Field}}
+ if !reflect.DeepEqual(descs, wantDescs) {
+ t.Errorf("proto.ExtensionDescs(msg) sorted extension ids: got %+v, want %+v", descs, wantDescs)
+ }
+}
+
+type ExtensionDescSlice []*proto.ExtensionDesc
+
+func (s ExtensionDescSlice) Len() int { return len(s) }
+func (s ExtensionDescSlice) Less(i, j int) bool { return s[i].Field < s[j].Field }
+func (s ExtensionDescSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+func sortExtDescs(s []*proto.ExtensionDesc) {
+ sort.Sort(ExtensionDescSlice(s))
+}
+
+func TestGetExtensionStability(t *testing.T) {
+ check := func(m *pb.MyMessage) bool {
+ ext1, err := proto.GetExtension(m, pb.E_Ext_More)
+ if err != nil {
+ t.Fatalf("GetExtension() failed: %s", err)
+ }
+ ext2, err := proto.GetExtension(m, pb.E_Ext_More)
+ if err != nil {
+ t.Fatalf("GetExtension() failed: %s", err)
+ }
+ return ext1 == ext2
+ }
+ msg := &pb.MyMessage{Count: proto.Int32(4)}
+ ext0 := &pb.Ext{}
+ if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil {
+ t.Fatalf("Could not set ext1: %s", ext0)
+ }
+ if !check(msg) {
+ t.Errorf("GetExtension() not stable before marshaling")
+ }
+ bb, err := proto.Marshal(msg)
+ if err != nil {
+ t.Fatalf("Marshal() failed: %s", err)
+ }
+ msg1 := &pb.MyMessage{}
+ err = proto.Unmarshal(bb, msg1)
+ if err != nil {
+ t.Fatalf("Unmarshal() failed: %s", err)
+ }
+ if !check(msg1) {
+ t.Errorf("GetExtension() not stable after unmarshaling")
+ }
+}
+
+func TestGetExtensionDefaults(t *testing.T) {
+ var setFloat64 float64 = 1
+ var setFloat32 float32 = 2
+ var setInt32 int32 = 3
+ var setInt64 int64 = 4
+ var setUint32 uint32 = 5
+ var setUint64 uint64 = 6
+ var setBool = true
+ var setBool2 = false
+ var setString = "Goodnight string"
+ var setBytes = []byte("Goodnight bytes")
+ var setEnum = pb.DefaultsMessage_TWO
+
+ type testcase struct {
+ ext *proto.ExtensionDesc // Extension we are testing.
+ want interface{} // Expected value of extension, or nil (meaning that GetExtension will fail).
+ def interface{} // Expected value of extension after ClearExtension().
+ }
+ tests := []testcase{
+ {pb.E_NoDefaultDouble, setFloat64, nil},
+ {pb.E_NoDefaultFloat, setFloat32, nil},
+ {pb.E_NoDefaultInt32, setInt32, nil},
+ {pb.E_NoDefaultInt64, setInt64, nil},
+ {pb.E_NoDefaultUint32, setUint32, nil},
+ {pb.E_NoDefaultUint64, setUint64, nil},
+ {pb.E_NoDefaultSint32, setInt32, nil},
+ {pb.E_NoDefaultSint64, setInt64, nil},
+ {pb.E_NoDefaultFixed32, setUint32, nil},
+ {pb.E_NoDefaultFixed64, setUint64, nil},
+ {pb.E_NoDefaultSfixed32, setInt32, nil},
+ {pb.E_NoDefaultSfixed64, setInt64, nil},
+ {pb.E_NoDefaultBool, setBool, nil},
+ {pb.E_NoDefaultBool, setBool2, nil},
+ {pb.E_NoDefaultString, setString, nil},
+ {pb.E_NoDefaultBytes, setBytes, nil},
+ {pb.E_NoDefaultEnum, setEnum, nil},
+ {pb.E_DefaultDouble, setFloat64, float64(3.1415)},
+ {pb.E_DefaultFloat, setFloat32, float32(3.14)},
+ {pb.E_DefaultInt32, setInt32, int32(42)},
+ {pb.E_DefaultInt64, setInt64, int64(43)},
+ {pb.E_DefaultUint32, setUint32, uint32(44)},
+ {pb.E_DefaultUint64, setUint64, uint64(45)},
+ {pb.E_DefaultSint32, setInt32, int32(46)},
+ {pb.E_DefaultSint64, setInt64, int64(47)},
+ {pb.E_DefaultFixed32, setUint32, uint32(48)},
+ {pb.E_DefaultFixed64, setUint64, uint64(49)},
+ {pb.E_DefaultSfixed32, setInt32, int32(50)},
+ {pb.E_DefaultSfixed64, setInt64, int64(51)},
+ {pb.E_DefaultBool, setBool, true},
+ {pb.E_DefaultBool, setBool2, true},
+ {pb.E_DefaultString, setString, "Hello, string"},
+ {pb.E_DefaultBytes, setBytes, []byte("Hello, bytes")},
+ {pb.E_DefaultEnum, setEnum, pb.DefaultsMessage_ONE},
+ }
+
+ checkVal := func(test testcase, msg *pb.DefaultsMessage, valWant interface{}) error {
+ val, err := proto.GetExtension(msg, test.ext)
+ if err != nil {
+ if valWant != nil {
+ return fmt.Errorf("GetExtension(): %s", err)
+ }
+ if want := proto.ErrMissingExtension; err != want {
+ return fmt.Errorf("Unexpected error: got %v, want %v", err, want)
+ }
+ return nil
+ }
+
+ // All proto2 extension values are either a pointer to a value or a slice of values.
+ ty := reflect.TypeOf(val)
+ tyWant := reflect.TypeOf(test.ext.ExtensionType)
+ if got, want := ty, tyWant; got != want {
+ return fmt.Errorf("unexpected reflect.TypeOf(): got %v want %v", got, want)
+ }
+ tye := ty.Elem()
+ tyeWant := tyWant.Elem()
+ if got, want := tye, tyeWant; got != want {
+ return fmt.Errorf("unexpected reflect.TypeOf().Elem(): got %v want %v", got, want)
+ }
+
+ // Check the name of the type of the value.
+ // If it is an enum it will be type int32 with the name of the enum.
+ if got, want := tye.Name(), tye.Name(); got != want {
+ return fmt.Errorf("unexpected reflect.TypeOf().Elem().Name(): got %v want %v", got, want)
+ }
+
+ // Check that value is what we expect.
+ // If we have a pointer in val, get the value it points to.
+ valExp := val
+ if ty.Kind() == reflect.Ptr {
+ valExp = reflect.ValueOf(val).Elem().Interface()
+ }
+ if got, want := valExp, valWant; !reflect.DeepEqual(got, want) {
+ return fmt.Errorf("unexpected reflect.DeepEqual(): got %v want %v", got, want)
+ }
+
+ return nil
+ }
+
+ setTo := func(test testcase) interface{} {
+ setTo := reflect.ValueOf(test.want)
+ if typ := reflect.TypeOf(test.ext.ExtensionType); typ.Kind() == reflect.Ptr {
+ setTo = reflect.New(typ).Elem()
+ setTo.Set(reflect.New(setTo.Type().Elem()))
+ setTo.Elem().Set(reflect.ValueOf(test.want))
+ }
+ return setTo.Interface()
+ }
+
+ for _, test := range tests {
+ msg := &pb.DefaultsMessage{}
+ name := test.ext.Name
+
+ // Check the initial value.
+ if err := checkVal(test, msg, test.def); err != nil {
+ t.Errorf("%s: %v", name, err)
+ }
+
+ // Set the per-type value and check value.
+ name = fmt.Sprintf("%s (set to %T %v)", name, test.want, test.want)
+ if err := proto.SetExtension(msg, test.ext, setTo(test)); err != nil {
+ t.Errorf("%s: SetExtension(): %v", name, err)
+ continue
+ }
+ if err := checkVal(test, msg, test.want); err != nil {
+ t.Errorf("%s: %v", name, err)
+ continue
+ }
+
+ // Set and check the value.
+ name += " (cleared)"
+ proto.ClearExtension(msg, test.ext)
+ if err := checkVal(test, msg, test.def); err != nil {
+ t.Errorf("%s: %v", name, err)
+ }
+ }
+}
+
+func TestExtensionsRoundTrip(t *testing.T) {
+ msg := &pb.MyMessage{}
+ ext1 := &pb.Ext{
+ Data: proto.String("hi"),
+ }
+ ext2 := &pb.Ext{
+ Data: proto.String("there"),
+ }
+ exists := proto.HasExtension(msg, pb.E_Ext_More)
+ if exists {
+ t.Error("Extension More present unexpectedly")
+ }
+ if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {
+ t.Error(err)
+ }
+ if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil {
+ t.Error(err)
+ }
+ e, err := proto.GetExtension(msg, pb.E_Ext_More)
+ if err != nil {
+ t.Error(err)
+ }
+ x, ok := e.(*pb.Ext)
+ if !ok {
+ t.Errorf("e has type %T, expected testdata.Ext", e)
+ } else if *x.Data != "there" {
+ t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x)
+ }
+ proto.ClearExtension(msg, pb.E_Ext_More)
+ if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension {
+ t.Errorf("got %v, expected ErrMissingExtension", e)
+ }
+ if _, err := proto.GetExtension(msg, pb.E_X215); err == nil {
+ t.Error("expected bad extension error, got nil")
+ }
+ if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil {
+ t.Error("expected extension err")
+ }
+ if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil {
+ t.Error("expected some sort of type mismatch error, got nil")
+ }
+}
+
+func TestNilExtension(t *testing.T) {
+ msg := &pb.MyMessage{
+ Count: proto.Int32(1),
+ }
+ if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil {
+ t.Fatal(err)
+ }
+ if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil {
+ t.Error("expected SetExtension to fail due to a nil extension")
+ } else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want {
+ t.Errorf("expected error %v, got %v", want, err)
+ }
+ // Note: if the behavior of Marshal is ever changed to ignore nil extensions, update
+ // this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal.
+}
+
+func TestMarshalUnmarshalRepeatedExtension(t *testing.T) {
+ // Add a repeated extension to the result.
+ tests := []struct {
+ name string
+ ext []*pb.ComplexExtension
+ }{
+ {
+ "two fields",
+ []*pb.ComplexExtension{
+ {First: proto.Int32(7)},
+ {Second: proto.Int32(11)},
+ },
+ },
+ {
+ "repeated field",
+ []*pb.ComplexExtension{
+ {Third: []int32{1000}},
+ {Third: []int32{2000}},
+ },
+ },
+ {
+ "two fields and repeated field",
+ []*pb.ComplexExtension{
+ {Third: []int32{1000}},
+ {First: proto.Int32(9)},
+ {Second: proto.Int32(21)},
+ {Third: []int32{2000}},
+ },
+ },
+ }
+ for _, test := range tests {
+ // Marshal message with a repeated extension.
+ msg1 := new(pb.OtherMessage)
+ err := proto.SetExtension(msg1, pb.E_RComplex, test.ext)
+ if err != nil {
+ t.Fatalf("[%s] Error setting extension: %v", test.name, err)
+ }
+ b, err := proto.Marshal(msg1)
+ if err != nil {
+ t.Fatalf("[%s] Error marshaling message: %v", test.name, err)
+ }
+
+ // Unmarshal and read the merged proto.
+ msg2 := new(pb.OtherMessage)
+ err = proto.Unmarshal(b, msg2)
+ if err != nil {
+ t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err)
+ }
+ e, err := proto.GetExtension(msg2, pb.E_RComplex)
+ if err != nil {
+ t.Fatalf("[%s] Error getting extension: %v", test.name, err)
+ }
+ ext := e.([]*pb.ComplexExtension)
+ if ext == nil {
+ t.Fatalf("[%s] Invalid extension", test.name)
+ }
+ if !reflect.DeepEqual(ext, test.ext) {
+ t.Errorf("[%s] Wrong value for ComplexExtension: got: %v want: %v\n", test.name, ext, test.ext)
+ }
+ }
+}
+
+func TestUnmarshalRepeatingNonRepeatedExtension(t *testing.T) {
+ // We may see multiple instances of the same extension in the wire
+ // format. For example, the proto compiler may encode custom options in
+ // this way. Here, we verify that we merge the extensions together.
+ tests := []struct {
+ name string
+ ext []*pb.ComplexExtension
+ }{
+ {
+ "two fields",
+ []*pb.ComplexExtension{
+ {First: proto.Int32(7)},
+ {Second: proto.Int32(11)},
+ },
+ },
+ {
+ "repeated field",
+ []*pb.ComplexExtension{
+ {Third: []int32{1000}},
+ {Third: []int32{2000}},
+ },
+ },
+ {
+ "two fields and repeated field",
+ []*pb.ComplexExtension{
+ {Third: []int32{1000}},
+ {First: proto.Int32(9)},
+ {Second: proto.Int32(21)},
+ {Third: []int32{2000}},
+ },
+ },
+ }
+ for _, test := range tests {
+ var buf bytes.Buffer
+ var want pb.ComplexExtension
+
+ // Generate a serialized representation of a repeated extension
+ // by catenating bytes together.
+ for i, e := range test.ext {
+ // Merge to create the wanted proto.
+ proto.Merge(&want, e)
+
+ // serialize the message
+ msg := new(pb.OtherMessage)
+ err := proto.SetExtension(msg, pb.E_Complex, e)
+ if err != nil {
+ t.Fatalf("[%s] Error setting extension %d: %v", test.name, i, err)
+ }
+ b, err := proto.Marshal(msg)
+ if err != nil {
+ t.Fatalf("[%s] Error marshaling message %d: %v", test.name, i, err)
+ }
+ buf.Write(b)
+ }
+
+ // Unmarshal and read the merged proto.
+ msg2 := new(pb.OtherMessage)
+ err := proto.Unmarshal(buf.Bytes(), msg2)
+ if err != nil {
+ t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err)
+ }
+ e, err := proto.GetExtension(msg2, pb.E_Complex)
+ if err != nil {
+ t.Fatalf("[%s] Error getting extension: %v", test.name, err)
+ }
+ ext := e.(*pb.ComplexExtension)
+ if ext == nil {
+ t.Fatalf("[%s] Invalid extension", test.name)
+ }
+ if !reflect.DeepEqual(*ext, want) {
+ t.Errorf("[%s] Wrong value for ComplexExtension: got: %s want: %s\n", test.name, ext, want)
+ }
+ }
+}
+
+func TestClearAllExtensions(t *testing.T) {
+ // unregistered extension
+ desc := &proto.ExtensionDesc{
+ ExtendedType: (*pb.MyMessage)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 101010100,
+ Name: "emptyextension",
+ Tag: "varint,0,opt",
+ }
+ m := &pb.MyMessage{}
+ if proto.HasExtension(m, desc) {
+ t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m))
+ }
+ if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil {
+ t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err)
+ }
+ if !proto.HasExtension(m, desc) {
+ t.Errorf("proto.HasExtension(%s): got false, want true", proto.MarshalTextString(m))
+ }
+ proto.ClearAllExtensions(m)
+ if proto.HasExtension(m, desc) {
+ t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m))
+ }
+}
+
+func TestMarshalRace(t *testing.T) {
+ // unregistered extension
+ desc := &proto.ExtensionDesc{
+ ExtendedType: (*pb.MyMessage)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 101010100,
+ Name: "emptyextension",
+ Tag: "varint,0,opt",
+ }
+
+ m := &pb.MyMessage{Count: proto.Int32(4)}
+ if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil {
+ t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err)
+ }
+
+ var g errgroup.Group
+ for n := 3; n > 0; n-- {
+ g.Go(func() error {
+ _, err := proto.Marshal(m)
+ return err
+ })
+ }
+ if err := g.Wait(); err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 0000000..1c22550
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,897 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers. It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed by the enclosing message's name, or by the
+ enum's type name if it is a top-level enum. Enum types have a String
+ method, and a Enum method to assist in message construction.
+ - Nested messages, groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+ - Non-repeated fields of non-message type are values instead of pointers.
+ - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+ package example;
+
+ enum FOO { X = 17; }
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ oneof union {
+ int32 number = 6;
+ string name = 7;
+ }
+ }
+
+The resulting file, test.pb.go, is:
+
+ package example
+
+ import proto "github.com/golang/protobuf/proto"
+ import math "math"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+ func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+ }
+
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ // Types that are valid to be assigned to Union:
+ // *Test_Number
+ // *Test_Name
+ Union isTest_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (m *Test) Reset() { *m = Test{} }
+ func (m *Test) String() string { return proto.CompactTextString(m) }
+ func (*Test) ProtoMessage() {}
+
+ type isTest_Union interface {
+ isTest_Union()
+ }
+
+ type Test_Number struct {
+ Number int32 `protobuf:"varint,6,opt,name=number"`
+ }
+ type Test_Name struct {
+ Name string `protobuf:"bytes,7,opt,name=name"`
+ }
+
+ func (*Test_Number) isTest_Union() {}
+ func (*Test_Name) isTest_Union() {}
+
+ func (m *Test) GetUnion() isTest_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+ }
+ const Default_Test_Type int32 = 77
+
+ func (m *Test) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+ }
+
+ func (m *Test) GetType() int32 {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_Test_Type
+ }
+
+ func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ }
+ func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
+ func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+ func (m *Test_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+ }
+
+ func (m *Test) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Test_Number); ok {
+ return x.Number
+ }
+ return 0
+ }
+
+ func (m *Test) GetName() string {
+ if x, ok := m.GetUnion().(*Test_Name); ok {
+ return x.Name
+ }
+ return ""
+ }
+
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+To create and play with a Test object:
+
+ package main
+
+ import (
+ "log"
+
+ "github.com/golang/protobuf/proto"
+ pb "./example.pb"
+ )
+
+ func main() {
+ test := &pb.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Reps: []int64{1, 2, 3},
+ Optionalgroup: &pb.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ Union: &pb.Test_Name{"fred"},
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &pb.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // Use a type switch to determine which oneof was set.
+ switch u := test.Union.(type) {
+ case *pb.Test_Number: // u.Number contains the number.
+ case *pb.Test_Name: // u.Name contains the string.
+ }
+ // etc.
+ }
+*/
+package proto
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+// Stats records allocation details about the protocol buffer encoders
+// and decoders. Useful for tuning the library itself.
+type Stats struct {
+ Emalloc uint64 // mallocs in encode
+ Dmalloc uint64 // mallocs in decode
+ Encode uint64 // number of encodes
+ Decode uint64 // number of decodes
+ Chit uint64 // number of cache hits
+ Cmiss uint64 // number of cache misses
+ Size uint64 // number of sizes
+}
+
+// Set to true to enable stats collection.
+const collectStats = false
+
+var stats Stats
+
+// GetStats returns a copy of the global Stats structure.
+func GetStats() Stats { return stats }
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers. It may be reused between invocations to
+// reduce memory usage. It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+ buf []byte // encode/decode byte stream
+ index int // read point
+
+ // pools of basic types to amortize allocation.
+ bools []bool
+ uint32s []uint32
+ uint64s []uint64
+
+ // extra pools, only used with pointer_reflect.go
+ int32s []int32
+ int64s []int64
+ float32s []float32
+ float64s []float64
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+ return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+ p.buf = p.buf[0:0] // for reading/writing
+ p.index = 0 // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+ p.buf = s
+ p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+ p := new(int32)
+ *p = int32(v)
+ return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+ return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+ var u uint64
+
+ obuf := p.buf
+ index := p.index
+ p.buf = b
+ p.index = 0
+ depth := 0
+
+ fmt.Printf("\n--- %s ---\n", s)
+
+out:
+ for {
+ for i := 0; i < depth; i++ {
+ fmt.Print(" ")
+ }
+
+ index := p.index
+ if index == len(p.buf) {
+ break
+ }
+
+ op, err := p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: fetching op err %v\n", index, err)
+ break out
+ }
+ tag := op >> 3
+ wire := op & 7
+
+ switch wire {
+ default:
+ fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+ index, tag, wire)
+ break out
+
+ case WireBytes:
+ var r []byte
+
+ r, err = p.DecodeRawBytes(false)
+ if err != nil {
+ break out
+ }
+ fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+ if len(r) <= 6 {
+ for i := 0; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ } else {
+ for i := 0; i < 3; i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ fmt.Printf(" ..")
+ for i := len(r) - 3; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ }
+ fmt.Printf("\n")
+
+ case WireFixed32:
+ u, err = p.DecodeFixed32()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+ case WireFixed64:
+ u, err = p.DecodeFixed64()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+ case WireVarint:
+ u, err = p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+ case WireStartGroup:
+ fmt.Printf("%3d: t=%3d start\n", index, tag)
+ depth++
+
+ case WireEndGroup:
+ depth--
+ fmt.Printf("%3d: t=%3d end\n", index, tag)
+ }
+ }
+
+ if depth != 0 {
+ fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+ }
+ fmt.Printf("\n")
+
+ p.buf = obuf
+ p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+ setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+ v = v.Elem()
+
+ defaultMu.RLock()
+ dm, ok := defaults[v.Type()]
+ defaultMu.RUnlock()
+ if !ok {
+ dm = buildDefaultMessage(v.Type())
+ defaultMu.Lock()
+ defaults[v.Type()] = dm
+ defaultMu.Unlock()
+ }
+
+ for _, sf := range dm.scalars {
+ f := v.Field(sf.index)
+ if !f.IsNil() {
+ // field already set
+ continue
+ }
+ dv := sf.value
+ if dv == nil && !zeros {
+ // no explicit default, and don't want to set zeros
+ continue
+ }
+ fptr := f.Addr().Interface() // **T
+ // TODO: Consider batching the allocations we do here.
+ switch sf.kind {
+ case reflect.Bool:
+ b := new(bool)
+ if dv != nil {
+ *b = dv.(bool)
+ }
+ *(fptr.(**bool)) = b
+ case reflect.Float32:
+ f := new(float32)
+ if dv != nil {
+ *f = dv.(float32)
+ }
+ *(fptr.(**float32)) = f
+ case reflect.Float64:
+ f := new(float64)
+ if dv != nil {
+ *f = dv.(float64)
+ }
+ *(fptr.(**float64)) = f
+ case reflect.Int32:
+ // might be an enum
+ if ft := f.Type(); ft != int32PtrType {
+ // enum
+ f.Set(reflect.New(ft.Elem()))
+ if dv != nil {
+ f.Elem().SetInt(int64(dv.(int32)))
+ }
+ } else {
+ // int32 field
+ i := new(int32)
+ if dv != nil {
+ *i = dv.(int32)
+ }
+ *(fptr.(**int32)) = i
+ }
+ case reflect.Int64:
+ i := new(int64)
+ if dv != nil {
+ *i = dv.(int64)
+ }
+ *(fptr.(**int64)) = i
+ case reflect.String:
+ s := new(string)
+ if dv != nil {
+ *s = dv.(string)
+ }
+ *(fptr.(**string)) = s
+ case reflect.Uint8:
+ // exceptional case: []byte
+ var b []byte
+ if dv != nil {
+ db := dv.([]byte)
+ b = make([]byte, len(db))
+ copy(b, db)
+ } else {
+ b = []byte{}
+ }
+ *(fptr.(*[]byte)) = b
+ case reflect.Uint32:
+ u := new(uint32)
+ if dv != nil {
+ *u = dv.(uint32)
+ }
+ *(fptr.(**uint32)) = u
+ case reflect.Uint64:
+ u := new(uint64)
+ if dv != nil {
+ *u = dv.(uint64)
+ }
+ *(fptr.(**uint64)) = u
+ default:
+ log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+ }
+ }
+
+ for _, ni := range dm.nested {
+ f := v.Field(ni)
+ // f is *T or []*T or map[T]*T
+ switch f.Kind() {
+ case reflect.Ptr:
+ if f.IsNil() {
+ continue
+ }
+ setDefaults(f, recur, zeros)
+
+ case reflect.Slice:
+ for i := 0; i < f.Len(); i++ {
+ e := f.Index(i)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+
+ case reflect.Map:
+ for _, k := range f.MapKeys() {
+ e := f.MapIndex(k)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+ }
+ }
+}
+
+var (
+ // defaults maps a protocol buffer struct type to a slice of the fields,
+ // with its scalar fields set to their proto-declared non-zero default values.
+ defaultMu sync.RWMutex
+ defaults = make(map[reflect.Type]defaultMessage)
+
+ int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+ scalars []scalarField
+ nested []int // struct field index of nested messages
+}
+
+type scalarField struct {
+ index int // struct field index
+ kind reflect.Kind // element type (the T in *T or []T)
+ value interface{} // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+ sprop := GetProperties(t)
+ for _, prop := range sprop.Prop {
+ fi, ok := sprop.decoderTags.get(prop.Tag)
+ if !ok {
+ // XXX_unrecognized
+ continue
+ }
+ ft := t.Field(fi).Type
+
+ sf, nested, err := fieldDefault(ft, prop)
+ switch {
+ case err != nil:
+ log.Print(err)
+ case nested:
+ dm.nested = append(dm.nested, fi)
+ case sf != nil:
+ sf.index = fi
+ dm.scalars = append(dm.scalars, *sf)
+ }
+ }
+
+ return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+ var canHaveDefault bool
+ switch ft.Kind() {
+ case reflect.Ptr:
+ if ft.Elem().Kind() == reflect.Struct {
+ nestedMessage = true
+ } else {
+ canHaveDefault = true // proto2 scalar field
+ }
+
+ case reflect.Slice:
+ switch ft.Elem().Kind() {
+ case reflect.Ptr:
+ nestedMessage = true // repeated message
+ case reflect.Uint8:
+ canHaveDefault = true // bytes field
+ }
+
+ case reflect.Map:
+ if ft.Elem().Kind() == reflect.Ptr {
+ nestedMessage = true // map with message values
+ }
+ }
+
+ if !canHaveDefault {
+ if nestedMessage {
+ return nil, true, nil
+ }
+ return nil, false, nil
+ }
+
+ // We now know that ft is a pointer or slice.
+ sf = &scalarField{kind: ft.Elem().Kind()}
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ return sf, false, nil
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ default:
+ return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+ }
+
+ return sf, false, nil
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+
+func mapKeys(vs []reflect.Value) sort.Interface {
+ s := mapKeySorter{
+ vs: vs,
+ // default Less function: textual comparison
+ less: func(a, b reflect.Value) bool {
+ return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
+ },
+ }
+
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
+ // numeric keys are sorted numerically.
+ if len(vs) == 0 {
+ return s
+ }
+ switch vs[0].Kind() {
+ case reflect.Int32, reflect.Int64:
+ s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+ case reflect.Uint32, reflect.Uint64:
+ s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ }
+
+ return s
+}
+
+type mapKeySorter struct {
+ vs []reflect.Value
+ less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+ return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint32, reflect.Uint64:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.String:
+ return v.String() == ""
+ }
+ return false
+}
+
+// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion2 = true
+
+// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion1 = true
diff --git a/vendor/github.com/golang/protobuf/proto/map_test.go b/vendor/github.com/golang/protobuf/proto/map_test.go
new file mode 100644
index 0000000..313e879
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/map_test.go
@@ -0,0 +1,46 @@
+package proto_test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ ppb "github.com/golang/protobuf/proto/proto3_proto"
+)
+
+func marshalled() []byte {
+ m := &ppb.IntMaps{}
+ for i := 0; i < 1000; i++ {
+ m.Maps = append(m.Maps, &ppb.IntMap{
+ Rtt: map[int32]int32{1: 2},
+ })
+ }
+ b, err := proto.Marshal(m)
+ if err != nil {
+ panic(fmt.Sprintf("Can't marshal %+v: %v", m, err))
+ }
+ return b
+}
+
+func BenchmarkConcurrentMapUnmarshal(b *testing.B) {
+ in := marshalled()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ var out ppb.IntMaps
+ if err := proto.Unmarshal(in, &out); err != nil {
+ b.Errorf("Can't unmarshal ppb.IntMaps: %v", err)
+ }
+ }
+ })
+}
+
+func BenchmarkSequentialMapUnmarshal(b *testing.B) {
+ in := marshalled()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ var out ppb.IntMaps
+ if err := proto.Unmarshal(in, &out); err != nil {
+ b.Errorf("Can't unmarshal ppb.IntMaps: %v", err)
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 0000000..fd982de
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,311 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+ TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
+ Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+ Item []*_MessageSet_Item `protobuf:"group,1,rep"`
+ XXX_unrecognized []byte
+ // TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+ MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return nil
+ }
+ id := mti.MessageTypeId()
+ for _, item := range ms.Item {
+ if *item.TypeId == id {
+ return item
+ }
+ }
+ return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+ if ms.find(pb) != nil {
+ return true
+ }
+ return false
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+ if item := ms.find(pb); item != nil {
+ return Unmarshal(item.Message, pb)
+ }
+ if _, ok := pb.(messageTypeIder); !ok {
+ return errNoMessageTypeID
+ }
+ return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+ msg, err := Marshal(pb)
+ if err != nil {
+ return err
+ }
+ if item := ms.find(pb); item != nil {
+ // reuse existing item
+ item.Message = msg
+ return nil
+ }
+
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return errNoMessageTypeID
+ }
+
+ mtid := mti.MessageTypeId()
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: &mtid,
+ Message: msg,
+ })
+ return nil
+}
+
+func (ms *messageSet) Reset() { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage() {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+ i := 0
+ for ; buf[i]&0x80 != 0; i++ {
+ }
+ return buf[i+1:]
+}
+
+// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
+// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSet(exts interface{}) ([]byte, error) {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ if err := encodeExtensions(exts); err != nil {
+ return nil, err
+ }
+ m, _ = exts.extensionsRead()
+ case map[int32]Extension:
+ if err := encodeExtensionsMap(exts); err != nil {
+ return nil, err
+ }
+ m = exts
+ default:
+ return nil, errors.New("proto: not an extension map")
+ }
+
+ // Sort extension IDs to provide a deterministic encoding.
+ // See also enc_map in encode.go.
+ ids := make([]int, 0, len(m))
+ for id := range m {
+ ids = append(ids, int(id))
+ }
+ sort.Ints(ids)
+
+ ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
+ for _, id := range ids {
+ e := m[int32(id)]
+ // Remove the wire type and field number varint, as well as the length varint.
+ msg := skipVarint(skipVarint(e.enc))
+
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: Int32(int32(id)),
+ Message: msg,
+ })
+ }
+ return Marshal(ms)
+}
+
+// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSet(buf []byte, exts interface{}) error {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ m = exts.extensionsWrite()
+ case map[int32]Extension:
+ m = exts
+ default:
+ return errors.New("proto: not an extension map")
+ }
+
+ ms := new(messageSet)
+ if err := Unmarshal(buf, ms); err != nil {
+ return err
+ }
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ msg := item.Message
+
+ // Restore wire type and field number varint, plus length varint.
+ // Be careful to preserve duplicate items.
+ b := EncodeVarint(uint64(id)<<3 | WireBytes)
+ if ext, ok := m[id]; ok {
+ // Existing data; rip off the tag and length varint
+ // so we join the new data correctly.
+ // We can assume that ext.enc is set because we are unmarshaling.
+ o := ext.enc[len(b):] // skip wire type and field number
+ _, n := DecodeVarint(o) // calculate length of length varint
+ o = o[n:] // skip length varint
+ msg = append(o, msg...) // join old data and new data
+ }
+ b = append(b, EncodeVarint(uint64(len(msg)))...)
+ b = append(b, msg...)
+
+ m[id] = Extension{enc: b}
+ }
+ return nil
+}
+
+// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
+// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ m, _ = exts.extensionsRead()
+ case map[int32]Extension:
+ m = exts
+ default:
+ return nil, errors.New("proto: not an extension map")
+ }
+ var b bytes.Buffer
+ b.WriteByte('{')
+
+ // Process the map in key order for deterministic output.
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
+
+ for i, id := range ids {
+ ext := m[id]
+ if i > 0 {
+ b.WriteByte(',')
+ }
+
+ msd, ok := messageSetMap[id]
+ if !ok {
+ // Unknown type; we can't render it, so skip it.
+ continue
+ }
+ fmt.Fprintf(&b, `"[%s]":`, msd.name)
+
+ x := ext.value
+ if x == nil {
+ x = reflect.New(msd.t.Elem()).Interface()
+ if err := Unmarshal(ext.enc, x.(Message)); err != nil {
+ return nil, err
+ }
+ }
+ d, err := json.Marshal(x)
+ if err != nil {
+ return nil, err
+ }
+ b.Write(d)
+ }
+ b.WriteByte('}')
+ return b.Bytes(), nil
+}
+
+// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
+// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
+ // Common-case fast path.
+ if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
+ return nil
+ }
+
+ // This is fairly tricky, and it's not clear that it is needed.
+ return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
+}
+
+// A global registry of types that can be used in a MessageSet.
+
+var messageSetMap = make(map[int32]messageSetDesc)
+
+type messageSetDesc struct {
+ t reflect.Type // pointer to struct
+ name string
+}
+
+// RegisterMessageSetType is called from the generated code.
+func RegisterMessageSetType(m Message, fieldNum int32, name string) {
+ messageSetMap[fieldNum] = messageSetDesc{
+ t: reflect.TypeOf(m),
+ name: name,
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/message_set_test.go b/vendor/github.com/golang/protobuf/proto/message_set_test.go
new file mode 100644
index 0000000..353a3ea
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/message_set_test.go
@@ -0,0 +1,66 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2014 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "bytes"
+ "testing"
+)
+
+func TestUnmarshalMessageSetWithDuplicate(t *testing.T) {
+ // Check that a repeated message set entry will be concatenated.
+ in := &messageSet{
+ Item: []*_MessageSet_Item{
+ {TypeId: Int32(12345), Message: []byte("hoo")},
+ {TypeId: Int32(12345), Message: []byte("hah")},
+ },
+ }
+ b, err := Marshal(in)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ t.Logf("Marshaled bytes: %q", b)
+
+ var extensions XXX_InternalExtensions
+ if err := UnmarshalMessageSet(b, &extensions); err != nil {
+ t.Fatalf("UnmarshalMessageSet: %v", err)
+ }
+ ext, ok := extensions.p.extensionMap[12345]
+ if !ok {
+ t.Fatalf("Didn't retrieve extension 12345; map is %v", extensions.p.extensionMap)
+ }
+ // Skip wire type/field number and length varints.
+ got := skipVarint(skipVarint(ext.enc))
+ if want := []byte("hoohah"); !bytes.Equal(got, want) {
+ t.Errorf("Combined extension is %q, want %q", got, want)
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 0000000..fb512e2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,484 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "math"
+ "reflect"
+)
+
+// A structPointer is a pointer to a struct.
+type structPointer struct {
+ v reflect.Value
+}
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+// The reflect value must itself be a pointer to a struct.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer{v}
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p.v.IsNil()
+}
+
+// Interface returns the struct pointer as an interface value.
+func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
+ return p.v.Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// field returns the given field in the struct as a reflect value.
+func structPointer_field(p structPointer, f field) reflect.Value {
+ // Special case: an extension map entry with a value of type T
+ // passes a *T to the struct-handling code with a zero field,
+ // expecting that it will be treated as equivalent to *struct{ X T },
+ // which has the same memory layout. We have to handle that case
+ // specially, because reflect will panic if we call FieldByIndex on a
+ // non-struct.
+ if f == nil {
+ return p.v.Elem()
+ }
+
+ return p.v.Elem().FieldByIndex(f)
+}
+
+// ifield returns the given field in the struct as an interface value.
+func structPointer_ifield(p structPointer, f field) interface{} {
+ return structPointer_field(p, f).Addr().Interface()
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return structPointer_ifield(p, f).(*[]byte)
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return structPointer_ifield(p, f).(*[][]byte)
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return structPointer_ifield(p, f).(**bool)
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return structPointer_ifield(p, f).(*bool)
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return structPointer_ifield(p, f).(*[]bool)
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return structPointer_ifield(p, f).(**string)
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return structPointer_ifield(p, f).(*string)
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return structPointer_ifield(p, f).(*[]string)
+}
+
+// Extensions returns the address of an extension map field in the struct.
+func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
+ return structPointer_ifield(p, f).(*XXX_InternalExtensions)
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return structPointer_ifield(p, f).(*map[int32]Extension)
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return structPointer_field(p, f).Addr()
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ structPointer_field(p, f).Set(q.v)
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return structPointer{structPointer_field(p, f)}
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
+ return structPointerSlice{structPointer_field(p, f)}
+}
+
+// A structPointerSlice represents the address of a slice of pointers to structs
+// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
+type structPointerSlice struct {
+ v reflect.Value
+}
+
+func (p structPointerSlice) Len() int { return p.v.Len() }
+func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
+func (p structPointerSlice) Append(q structPointer) {
+ p.v.Set(reflect.Append(p.v, q.v))
+}
+
+var (
+ int32Type = reflect.TypeOf(int32(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ float32Type = reflect.TypeOf(float32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float64Type = reflect.TypeOf(float64(0))
+)
+
+// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
+// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
+type word32 struct {
+ v reflect.Value
+}
+
+// IsNil reports whether p is nil.
+func word32_IsNil(p word32) bool {
+ return p.v.IsNil()
+}
+
+// Set sets p to point at a newly allocated word with bits set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int32Type:
+ if len(o.int32s) == 0 {
+ o.int32s = make([]int32, uint32PoolSize)
+ }
+ o.int32s[0] = int32(x)
+ p.v.Set(reflect.ValueOf(&o.int32s[0]))
+ o.int32s = o.int32s[1:]
+ return
+ case uint32Type:
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint32s[0]))
+ o.uint32s = o.uint32s[1:]
+ return
+ case float32Type:
+ if len(o.float32s) == 0 {
+ o.float32s = make([]float32, uint32PoolSize)
+ }
+ o.float32s[0] = math.Float32frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float32s[0]))
+ o.float32s = o.float32s[1:]
+ return
+ }
+
+ // must be enum
+ p.v.Set(reflect.New(t))
+ p.v.Elem().SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32_Get(p word32) uint32 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32{structPointer_field(p, f)}
+}
+
+// A word32Val represents a field of type int32, uint32, float32, or enum.
+// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
+type word32Val struct {
+ v reflect.Value
+}
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ switch p.v.Type() {
+ case int32Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint32Type:
+ p.v.SetUint(uint64(x))
+ return
+ case float32Type:
+ p.v.SetFloat(float64(math.Float32frombits(x)))
+ return
+ }
+
+ // must be enum
+ p.v.SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32Val_Get(p word32Val) uint32 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val{structPointer_field(p, f)}
+}
+
+// A word32Slice is a slice of 32-bit values.
+// That is, v.Type() is []int32, []uint32, []float32, or []enum.
+type word32Slice struct {
+ v reflect.Value
+}
+
+func (p word32Slice) Append(x uint32) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int32:
+ elem.SetInt(int64(int32(x)))
+ case reflect.Uint32:
+ elem.SetUint(uint64(x))
+ case reflect.Float32:
+ elem.SetFloat(float64(math.Float32frombits(x)))
+ }
+}
+
+func (p word32Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word32Slice) Index(i int) uint32 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) word32Slice {
+ return word32Slice{structPointer_field(p, f)}
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 struct {
+ v reflect.Value
+}
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int64Type:
+ if len(o.int64s) == 0 {
+ o.int64s = make([]int64, uint64PoolSize)
+ }
+ o.int64s[0] = int64(x)
+ p.v.Set(reflect.ValueOf(&o.int64s[0]))
+ o.int64s = o.int64s[1:]
+ return
+ case uint64Type:
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint64s[0]))
+ o.uint64s = o.uint64s[1:]
+ return
+ case float64Type:
+ if len(o.float64s) == 0 {
+ o.float64s = make([]float64, uint64PoolSize)
+ }
+ o.float64s[0] = math.Float64frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float64s[0]))
+ o.float64s = o.float64s[1:]
+ return
+ }
+ panic("unreachable")
+}
+
+func word64_IsNil(p word64) bool {
+ return p.v.IsNil()
+}
+
+func word64_Get(p word64) uint64 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64{structPointer_field(p, f)}
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val struct {
+ v reflect.Value
+}
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ switch p.v.Type() {
+ case int64Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint64Type:
+ p.v.SetUint(x)
+ return
+ case float64Type:
+ p.v.SetFloat(math.Float64frombits(x))
+ return
+ }
+ panic("unreachable")
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val{structPointer_field(p, f)}
+}
+
+type word64Slice struct {
+ v reflect.Value
+}
+
+func (p word64Slice) Append(x uint64) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int64:
+ elem.SetInt(int64(int64(x)))
+ case reflect.Uint64:
+ elem.SetUint(uint64(x))
+ case reflect.Float64:
+ elem.SetFloat(float64(math.Float64frombits(x)))
+ }
+}
+
+func (p word64Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word64Slice) Index(i int) uint64 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return uint64(elem.Uint())
+ case reflect.Float64:
+ return math.Float64bits(float64(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Slice(p structPointer, f field) word64Slice {
+ return word64Slice{structPointer_field(p, f)}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 0000000..6b5567d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,270 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// NOTE: These type_Foo functions would more idiomatically be methods,
+// but Go does not allow methods on pointer types, and we must preserve
+// some pointer type for the garbage collector. We use these
+// funcs with clunky names as our poor approximation to methods.
+//
+// An alternative would be
+// type structPointer struct { p unsafe.Pointer }
+// but that does not registerize as well.
+
+// A structPointer is a pointer to a struct.
+type structPointer unsafe.Pointer
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer(unsafe.Pointer(v.Pointer()))
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p == nil
+}
+
+// Interface returns the struct pointer, assumed to have element type t,
+// as an interface value.
+func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
+ return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+ return f != ^field(0)
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
+ return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
+ return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
+type structPointerSlice []structPointer
+
+func (v *structPointerSlice) Len() int { return len(*v) }
+func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
+func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
+
+// A word32 is the address of a "pointer to 32-bit value" field.
+type word32 **uint32
+
+// IsNil reports whether *v is nil.
+func word32_IsNil(p word32) bool {
+ return *p == nil
+}
+
+// Set sets *v to point at a newly allocated word set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ *p = &o.uint32s[0]
+ o.uint32s = o.uint32s[1:]
+}
+
+// Get gets the value pointed at by *v.
+func word32_Get(p word32) uint32 {
+ return **p
+}
+
+// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Val is the address of a 32-bit value field.
+type word32Val *uint32
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ *p = x
+}
+
+// Get gets the value pointed at by p.
+func word32Val_Get(p word32Val) uint32 {
+ return *p
+}
+
+// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Slice is a slice of 32-bit values.
+type word32Slice []uint32
+
+func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
+func (v *word32Slice) Len() int { return len(*v) }
+func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
+
+// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
+ return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 **uint64
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ *p = &o.uint64s[0]
+ o.uint64s = o.uint64s[1:]
+}
+
+func word64_IsNil(p word64) bool {
+ return *p == nil
+}
+
+func word64_Get(p word64) uint64 {
+ return **p
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val *uint64
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ *p = x
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ return *p
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Slice is like word32Slice but for 64-bit values.
+type word64Slice []uint64
+
+func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
+func (v *word64Slice) Len() int { return len(*v) }
+func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
+
+func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
+ return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 0000000..ec2289c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,872 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+ WireVarint = 0
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+ WireFixed32 = 5
+)
+
+const startSize = 10 // initial slice/string sizes
+
+// Encoders are defined in encode.go
+// An encoder outputs the full representation of a field, including its
+// tag and encoder type.
+type encoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueEncoder encodes a single integer in a particular encoding.
+type valueEncoder func(o *Buffer, x uint64) error
+
+// Sizers are defined in encode.go
+// A sizer returns the encoded size of a field, including its tag and encoder
+// type.
+type sizer func(prop *Properties, base structPointer) int
+
+// A valueSizer returns the encoded size of a single integer in a particular
+// encoding.
+type valueSizer func(x uint64) int
+
+// Decoders are defined in decode.go
+// A decoder creates a value from its wire representation.
+// Unrecognized subelements are saved in unrec.
+type decoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueDecoder decodes a single integer in a particular encoding.
+type valueDecoder func(o *Buffer) (x uint64, err error)
+
+// A oneofMarshaler does the marshaling for all oneof fields in a message.
+type oneofMarshaler func(Message, *Buffer) error
+
+// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
+type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
+
+// A oneofSizer does the sizing for all oneof fields in a message.
+type oneofSizer func(Message) int
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+ fastTags []int
+ slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+ if t > 0 && t < tagMapFastLimit {
+ if t >= len(p.fastTags) {
+ return 0, false
+ }
+ fi := p.fastTags[t]
+ return fi, fi >= 0
+ }
+ fi, ok := p.slowTags[t]
+ return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+ if t > 0 && t < tagMapFastLimit {
+ for len(p.fastTags) < t+1 {
+ p.fastTags = append(p.fastTags, -1)
+ }
+ p.fastTags[t] = fi
+ return
+ }
+ if p.slowTags == nil {
+ p.slowTags = make(map[int]int)
+ }
+ p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+ Prop []*Properties // properties for each field
+ reqCount int // required count
+ decoderTags tagMap // map from proto tag to struct field number
+ decoderOrigNames map[string]int // map from original name to struct field number
+ order []int // list of struct field numbers in tag order
+ unrecField field // field id of the XXX_unrecognized []byte field
+ extendable bool // is this an extendable proto
+
+ oneofMarshaler oneofMarshaler
+ oneofUnmarshaler oneofUnmarshaler
+ oneofSizer oneofSizer
+ stype reflect.Type
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the original name of a field.
+ OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+ Type reflect.Type // pointer to generated struct type for this oneof field
+ Field int // struct field number of the containing oneof in the message
+ Prop *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+ return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+ Name string // name of the field, for error messages
+ OrigName string // original name before protocol compiler (always set)
+ JSONName string // name to use for JSON; determined by protoc
+ Wire string
+ WireType int
+ Tag int
+ Required bool
+ Optional bool
+ Repeated bool
+ Packed bool // relevant for repeated primitives only
+ Enum string // set for enum types only
+ proto3 bool // whether this is known to be a proto3 field; set for []byte only
+ oneof bool // whether this is a oneof field
+
+ Default string // default value
+ HasDefault bool // whether an explicit default was provided
+ def_uint64 uint64
+
+ enc encoder
+ valEnc valueEncoder // set for bool and numeric types only
+ field field
+ tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
+ tagbuf [8]byte
+ stype reflect.Type // set for struct types only
+ sprop *StructProperties // set for struct types only
+ isMarshaler bool
+ isUnmarshaler bool
+
+ mtype reflect.Type // set for map types only
+ mkeyprop *Properties // set for map types only
+ mvalprop *Properties // set for map types only
+
+ size sizer
+ valSize valueSizer // set for bool and numeric types only
+
+ dec decoder
+ valDec valueDecoder // set for bool and numeric types only
+
+ // If this is a packable field, this will be the decoder for the packed version of the field.
+ packedDec decoder
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s = ","
+ s += strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ s += ",name=" + p.OrigName
+ if p.JSONName != p.OrigName {
+ s += ",json=" + p.JSONName
+ }
+ if p.proto3 {
+ s += ",proto3"
+ }
+ if p.oneof {
+ s += ",oneof"
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+ // "bytes,49,opt,name=foo,def=hello!"
+ fields := strings.Split(s, ",") // breaks def=, but handled below.
+ if len(fields) < 2 {
+ fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+ return
+ }
+
+ p.Wire = fields[0]
+ switch p.Wire {
+ case "varint":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeVarint
+ p.valDec = (*Buffer).DecodeVarint
+ p.valSize = sizeVarint
+ case "fixed32":
+ p.WireType = WireFixed32
+ p.valEnc = (*Buffer).EncodeFixed32
+ p.valDec = (*Buffer).DecodeFixed32
+ p.valSize = sizeFixed32
+ case "fixed64":
+ p.WireType = WireFixed64
+ p.valEnc = (*Buffer).EncodeFixed64
+ p.valDec = (*Buffer).DecodeFixed64
+ p.valSize = sizeFixed64
+ case "zigzag32":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag32
+ p.valDec = (*Buffer).DecodeZigzag32
+ p.valSize = sizeZigzag32
+ case "zigzag64":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag64
+ p.valDec = (*Buffer).DecodeZigzag64
+ p.valSize = sizeZigzag64
+ case "bytes", "group":
+ p.WireType = WireBytes
+ // no numeric converter for non-numeric types
+ default:
+ fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+ return
+ }
+
+ var err error
+ p.Tag, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return
+ }
+
+ for i := 2; i < len(fields); i++ {
+ f := fields[i]
+ switch {
+ case f == "req":
+ p.Required = true
+ case f == "opt":
+ p.Optional = true
+ case f == "rep":
+ p.Repeated = true
+ case f == "packed":
+ p.Packed = true
+ case strings.HasPrefix(f, "name="):
+ p.OrigName = f[5:]
+ case strings.HasPrefix(f, "json="):
+ p.JSONName = f[5:]
+ case strings.HasPrefix(f, "enum="):
+ p.Enum = f[5:]
+ case f == "proto3":
+ p.proto3 = true
+ case f == "oneof":
+ p.oneof = true
+ case strings.HasPrefix(f, "def="):
+ p.HasDefault = true
+ p.Default = f[4:] // rest of string
+ if i+1 < len(fields) {
+ // Commas aren't escaped, and def is always last.
+ p.Default += "," + strings.Join(fields[i+1:], ",")
+ break
+ }
+ }
+ }
+}
+
+func logNoSliceEnc(t1, t2 reflect.Type) {
+ fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// Initialize the fields for encoding and decoding.
+func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+ p.enc = nil
+ p.dec = nil
+ p.size = nil
+
+ switch t1 := typ; t1.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
+
+ // proto3 scalar types
+
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_proto3_bool
+ p.dec = (*Buffer).dec_proto3_bool
+ p.size = size_proto3_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_proto3_int32
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_proto3_uint32
+ p.dec = (*Buffer).dec_proto3_int32 // can reuse
+ p.size = size_proto3_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_proto3_int64
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_proto3_string
+ p.dec = (*Buffer).dec_proto3_string
+ p.size = size_proto3_string
+
+ case reflect.Ptr:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
+ break
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_bool
+ p.dec = (*Buffer).dec_bool
+ p.size = size_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_int32
+ p.dec = (*Buffer).dec_int32
+ p.size = size_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_uint32
+ p.dec = (*Buffer).dec_int32 // can reuse
+ p.size = size_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_int64
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_int32
+ p.size = size_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_string
+ p.dec = (*Buffer).dec_string
+ p.size = size_string
+ case reflect.Struct:
+ p.stype = t1.Elem()
+ p.isMarshaler = isMarshaler(t1)
+ p.isUnmarshaler = isUnmarshaler(t1)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_struct_message
+ p.dec = (*Buffer).dec_struct_message
+ p.size = size_struct_message
+ } else {
+ p.enc = (*Buffer).enc_struct_group
+ p.dec = (*Buffer).dec_struct_group
+ p.size = size_struct_group
+ }
+ }
+
+ case reflect.Slice:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ case reflect.Bool:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_bool
+ p.size = size_slice_packed_bool
+ } else {
+ p.enc = (*Buffer).enc_slice_bool
+ p.size = size_slice_bool
+ }
+ p.dec = (*Buffer).dec_slice_bool
+ p.packedDec = (*Buffer).dec_slice_packed_bool
+ case reflect.Int32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int32
+ p.size = size_slice_packed_int32
+ } else {
+ p.enc = (*Buffer).enc_slice_int32
+ p.size = size_slice_int32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Uint32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Int64, reflect.Uint64:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ case reflect.Uint8:
+ p.dec = (*Buffer).dec_slice_byte
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_slice_byte
+ p.size = size_proto3_slice_byte
+ } else {
+ p.enc = (*Buffer).enc_slice_byte
+ p.size = size_slice_byte
+ }
+ case reflect.Float32, reflect.Float64:
+ switch t2.Bits() {
+ case 32:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case 64:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ }
+ case reflect.String:
+ p.enc = (*Buffer).enc_slice_string
+ p.dec = (*Buffer).dec_slice_string
+ p.size = size_slice_string
+ case reflect.Ptr:
+ switch t3 := t2.Elem(); t3.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
+ break
+ case reflect.Struct:
+ p.stype = t2.Elem()
+ p.isMarshaler = isMarshaler(t2)
+ p.isUnmarshaler = isUnmarshaler(t2)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_slice_struct_message
+ p.dec = (*Buffer).dec_slice_struct_message
+ p.size = size_slice_struct_message
+ } else {
+ p.enc = (*Buffer).enc_slice_struct_group
+ p.dec = (*Buffer).dec_slice_struct_group
+ p.size = size_slice_struct_group
+ }
+ }
+ case reflect.Slice:
+ switch t2.Elem().Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
+ break
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_slice_byte
+ p.dec = (*Buffer).dec_slice_slice_byte
+ p.size = size_slice_slice_byte
+ }
+ }
+
+ case reflect.Map:
+ p.enc = (*Buffer).enc_new_map
+ p.dec = (*Buffer).dec_new_map
+ p.size = size_new_map
+
+ p.mtype = t1
+ p.mkeyprop = &Properties{}
+ p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.mvalprop = &Properties{}
+ vtype := p.mtype.Elem()
+ if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+ // The value type is not a message (*T) or bytes ([]byte),
+ // so we need encoders for the pointer to this type.
+ vtype = reflect.PtrTo(vtype)
+ }
+ p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ }
+
+ // precalculate tag code
+ wire := p.WireType
+ if p.Packed {
+ wire = WireBytes
+ }
+ x := uint32(p.Tag)<<3 | uint32(wire)
+ i := 0
+ for i = 0; x > 127; i++ {
+ p.tagbuf[i] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ p.tagbuf[i] = uint8(x)
+ p.tagcode = p.tagbuf[0 : i+1]
+
+ if p.stype != nil {
+ if lockGetProp {
+ p.sprop = GetProperties(p.stype)
+ } else {
+ p.sprop = getPropertiesLocked(p.stype)
+ }
+ }
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+)
+
+// isMarshaler reports whether type t implements Marshaler.
+func isMarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isMarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isMarshaler")
+ }
+ return t.Implements(marshalerType)
+}
+
+// isUnmarshaler reports whether type t implements Unmarshaler.
+func isUnmarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isUnmarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isUnmarshaler")
+ }
+ return t.Implements(unmarshalerType)
+}
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+ // "bytes,49,opt,def=hello!"
+ p.Name = name
+ p.OrigName = name
+ if f != nil {
+ p.field = toField(f)
+ }
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+ p.setEncAndDec(typ, f, lockGetProp)
+}
+
+var (
+ propertiesMu sync.RWMutex
+ propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic("proto: type must have kind struct")
+ }
+
+ // Most calls to GetProperties in a long-running program will be
+ // retrieving details for types we have seen before.
+ propertiesMu.RLock()
+ sprop, ok := propertiesMap[t]
+ propertiesMu.RUnlock()
+ if ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return sprop
+ }
+
+ propertiesMu.Lock()
+ sprop = getPropertiesLocked(t)
+ propertiesMu.Unlock()
+ return sprop
+}
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+ if prop, ok := propertiesMap[t]; ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return prop
+ }
+ if collectStats {
+ stats.Cmiss++
+ }
+
+ prop := new(StructProperties)
+ // in case of recursive protos, fill this in now.
+ propertiesMap[t] = prop
+
+ // build properties
+ prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||
+ reflect.PtrTo(t).Implements(extendableProtoV1Type)
+ prop.unrecField = invalidField
+ prop.Prop = make([]*Properties, t.NumField())
+ prop.order = make([]int, t.NumField())
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ p := new(Properties)
+ name := f.Name
+ p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+ if f.Name == "XXX_InternalExtensions" { // special case
+ p.enc = (*Buffer).enc_exts
+ p.dec = nil // not needed
+ p.size = size_exts
+ } else if f.Name == "XXX_extensions" { // special case
+ p.enc = (*Buffer).enc_map
+ p.dec = nil // not needed
+ p.size = size_map
+ } else if f.Name == "XXX_unrecognized" { // special case
+ prop.unrecField = toField(&f)
+ }
+ oneof := f.Tag.Get("protobuf_oneof") // special case
+ if oneof != "" {
+ // Oneof fields don't use the traditional protobuf tag.
+ p.OrigName = oneof
+ }
+ prop.Prop[i] = p
+ prop.order[i] = i
+ if debug {
+ print(i, " ", f.Name, " ", t.String(), " ")
+ if p.Tag > 0 {
+ print(p.String())
+ }
+ print("\n")
+ }
+ if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
+ fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
+ }
+ }
+
+ // Re-order prop.order.
+ sort.Sort(prop)
+
+ type oneofMessage interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ }
+ if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+ var oots []interface{}
+ prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
+ prop.stype = t
+
+ // Interpret oneof metadata.
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, oot := range oots {
+ oop := &OneofProperties{
+ Type: reflect.ValueOf(oot).Type(), // *T
+ Prop: new(Properties),
+ }
+ sft := oop.Type.Elem().Field(0)
+ oop.Prop.Name = sft.Name
+ oop.Prop.Parse(sft.Tag.Get("protobuf"))
+ // There will be exactly one interface field that
+ // this new value is assignable to.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type.Kind() != reflect.Interface {
+ continue
+ }
+ if !oop.Type.AssignableTo(f.Type) {
+ continue
+ }
+ oop.Field = i
+ break
+ }
+ prop.OneofTypes[oop.Prop.OrigName] = oop
+ }
+ }
+
+ // build required counts
+ // build tags
+ reqCount := 0
+ prop.decoderOrigNames = make(map[string]int)
+ for i, p := range prop.Prop {
+ if strings.HasPrefix(p.Name, "XXX_") {
+ // Internal fields should not appear in tags/origNames maps.
+ // They are handled specially when encoding and decoding.
+ continue
+ }
+ if p.Required {
+ reqCount++
+ }
+ prop.decoderTags.put(p.Tag, i)
+ prop.decoderOrigNames[p.OrigName] = i
+ }
+ prop.reqCount = reqCount
+
+ return prop
+}
+
+// Return the Properties object for the x[0]'th field of the structure.
+func propByIndex(t reflect.Type, x []int) *Properties {
+ if len(x) != 1 {
+ fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
+ return nil
+ }
+ prop := GetProperties(t)
+ return prop.Prop[x[0]]
+}
+
+// Get the address and type of a pointer to a struct from an interface.
+func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
+ if pb == nil {
+ err = ErrNil
+ return
+ }
+ // get the reflect type of the pointer to the struct.
+ t = reflect.TypeOf(pb)
+ // get the address of the struct.
+ value := reflect.ValueOf(pb)
+ b = toStructPointer(value)
+ return
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+ if _, ok := enumValueMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumValueMaps[typeName] = valueMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+ return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+ protoTypes = make(map[string]reflect.Type)
+ revProtoTypes = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+ if _, ok := protoTypes[name]; ok {
+ // TODO: Some day, make this a panic.
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ protoTypes[name] = t
+ revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string {
+ type xname interface {
+ XXX_MessageName() string
+ }
+ if m, ok := x.(xname); ok {
+ return m.XXX_MessageName()
+ }
+ return revProtoTypes[reflect.TypeOf(x)]
+}
+
+// MessageType returns the message type (pointer to struct) for a named message.
+func MessageType(name string) reflect.Type { return protoTypes[name] }
+
+// A registry of all linked proto files.
+var (
+ protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+ protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go
new file mode 100644
index 0000000..cc4d048
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go
@@ -0,0 +1,347 @@
+// Code generated by protoc-gen-go.
+// source: proto3_proto/proto3.proto
+// DO NOT EDIT!
+
+/*
+Package proto3_proto is a generated protocol buffer package.
+
+It is generated from these files:
+ proto3_proto/proto3.proto
+
+It has these top-level messages:
+ Message
+ Nested
+ MessageWithMap
+ IntMap
+ IntMaps
+*/
+package proto3_proto
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_protobuf "github.com/golang/protobuf/ptypes/any"
+import testdata "github.com/golang/protobuf/proto/testdata"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Message_Humour int32
+
+const (
+ Message_UNKNOWN Message_Humour = 0
+ Message_PUNS Message_Humour = 1
+ Message_SLAPSTICK Message_Humour = 2
+ Message_BILL_BAILEY Message_Humour = 3
+)
+
+var Message_Humour_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "PUNS",
+ 2: "SLAPSTICK",
+ 3: "BILL_BAILEY",
+}
+var Message_Humour_value = map[string]int32{
+ "UNKNOWN": 0,
+ "PUNS": 1,
+ "SLAPSTICK": 2,
+ "BILL_BAILEY": 3,
+}
+
+func (x Message_Humour) String() string {
+ return proto.EnumName(Message_Humour_name, int32(x))
+}
+func (Message_Humour) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
+
+type Message struct {
+ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"`
+ HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm,json=heightInCm" json:"height_in_cm,omitempty"`
+ Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"`
+ ResultCount int64 `protobuf:"varint,7,opt,name=result_count,json=resultCount" json:"result_count,omitempty"`
+ TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman,json=trueScotsman" json:"true_scotsman,omitempty"`
+ Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"`
+ Key []uint64 `protobuf:"varint,5,rep,packed,name=key" json:"key,omitempty"`
+ ShortKey []int32 `protobuf:"varint,19,rep,packed,name=short_key,json=shortKey" json:"short_key,omitempty"`
+ Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"`
+ RFunny []Message_Humour `protobuf:"varint,16,rep,packed,name=r_funny,json=rFunny,enum=proto3_proto.Message_Humour" json:"r_funny,omitempty"`
+ Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field,json=proto2Field" json:"proto2_field,omitempty"`
+ Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value,json=proto2Value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ Anything *google_protobuf.Any `protobuf:"bytes,14,opt,name=anything" json:"anything,omitempty"`
+ ManyThings []*google_protobuf.Any `protobuf:"bytes,15,rep,name=many_things,json=manyThings" json:"many_things,omitempty"`
+ Submessage *Message `protobuf:"bytes,17,opt,name=submessage" json:"submessage,omitempty"`
+ Children []*Message `protobuf:"bytes,18,rep,name=children" json:"children,omitempty"`
+}
+
+func (m *Message) Reset() { *m = Message{} }
+func (m *Message) String() string { return proto.CompactTextString(m) }
+func (*Message) ProtoMessage() {}
+func (*Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *Message) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *Message) GetHilarity() Message_Humour {
+ if m != nil {
+ return m.Hilarity
+ }
+ return Message_UNKNOWN
+}
+
+func (m *Message) GetHeightInCm() uint32 {
+ if m != nil {
+ return m.HeightInCm
+ }
+ return 0
+}
+
+func (m *Message) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (m *Message) GetResultCount() int64 {
+ if m != nil {
+ return m.ResultCount
+ }
+ return 0
+}
+
+func (m *Message) GetTrueScotsman() bool {
+ if m != nil {
+ return m.TrueScotsman
+ }
+ return false
+}
+
+func (m *Message) GetScore() float32 {
+ if m != nil {
+ return m.Score
+ }
+ return 0
+}
+
+func (m *Message) GetKey() []uint64 {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *Message) GetShortKey() []int32 {
+ if m != nil {
+ return m.ShortKey
+ }
+ return nil
+}
+
+func (m *Message) GetNested() *Nested {
+ if m != nil {
+ return m.Nested
+ }
+ return nil
+}
+
+func (m *Message) GetRFunny() []Message_Humour {
+ if m != nil {
+ return m.RFunny
+ }
+ return nil
+}
+
+func (m *Message) GetTerrain() map[string]*Nested {
+ if m != nil {
+ return m.Terrain
+ }
+ return nil
+}
+
+func (m *Message) GetProto2Field() *testdata.SubDefaults {
+ if m != nil {
+ return m.Proto2Field
+ }
+ return nil
+}
+
+func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults {
+ if m != nil {
+ return m.Proto2Value
+ }
+ return nil
+}
+
+func (m *Message) GetAnything() *google_protobuf.Any {
+ if m != nil {
+ return m.Anything
+ }
+ return nil
+}
+
+func (m *Message) GetManyThings() []*google_protobuf.Any {
+ if m != nil {
+ return m.ManyThings
+ }
+ return nil
+}
+
+func (m *Message) GetSubmessage() *Message {
+ if m != nil {
+ return m.Submessage
+ }
+ return nil
+}
+
+func (m *Message) GetChildren() []*Message {
+ if m != nil {
+ return m.Children
+ }
+ return nil
+}
+
+type Nested struct {
+ Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"`
+ Cute bool `protobuf:"varint,2,opt,name=cute" json:"cute,omitempty"`
+}
+
+func (m *Nested) Reset() { *m = Nested{} }
+func (m *Nested) String() string { return proto.CompactTextString(m) }
+func (*Nested) ProtoMessage() {}
+func (*Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *Nested) GetBunny() string {
+ if m != nil {
+ return m.Bunny
+ }
+ return ""
+}
+
+func (m *Nested) GetCute() bool {
+ if m != nil {
+ return m.Cute
+ }
+ return false
+}
+
+type MessageWithMap struct {
+ ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (m *MessageWithMap) Reset() { *m = MessageWithMap{} }
+func (m *MessageWithMap) String() string { return proto.CompactTextString(m) }
+func (*MessageWithMap) ProtoMessage() {}
+func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+func (m *MessageWithMap) GetByteMapping() map[bool][]byte {
+ if m != nil {
+ return m.ByteMapping
+ }
+ return nil
+}
+
+type IntMap struct {
+ Rtt map[int32]int32 `protobuf:"bytes,1,rep,name=rtt" json:"rtt,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
+}
+
+func (m *IntMap) Reset() { *m = IntMap{} }
+func (m *IntMap) String() string { return proto.CompactTextString(m) }
+func (*IntMap) ProtoMessage() {}
+func (*IntMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+
+func (m *IntMap) GetRtt() map[int32]int32 {
+ if m != nil {
+ return m.Rtt
+ }
+ return nil
+}
+
+type IntMaps struct {
+ Maps []*IntMap `protobuf:"bytes,1,rep,name=maps" json:"maps,omitempty"`
+}
+
+func (m *IntMaps) Reset() { *m = IntMaps{} }
+func (m *IntMaps) String() string { return proto.CompactTextString(m) }
+func (*IntMaps) ProtoMessage() {}
+func (*IntMaps) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+
+func (m *IntMaps) GetMaps() []*IntMap {
+ if m != nil {
+ return m.Maps
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*Message)(nil), "proto3_proto.Message")
+ proto.RegisterType((*Nested)(nil), "proto3_proto.Nested")
+ proto.RegisterType((*MessageWithMap)(nil), "proto3_proto.MessageWithMap")
+ proto.RegisterType((*IntMap)(nil), "proto3_proto.IntMap")
+ proto.RegisterType((*IntMaps)(nil), "proto3_proto.IntMaps")
+ proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value)
+}
+
+func init() { proto.RegisterFile("proto3_proto/proto3.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 733 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x53, 0x6d, 0x6f, 0xf3, 0x34,
+ 0x14, 0x25, 0x4d, 0x5f, 0xd2, 0x9b, 0x74, 0x0b, 0x5e, 0x91, 0xbc, 0x02, 0x52, 0x28, 0x12, 0x8a,
+ 0x78, 0x49, 0xa1, 0xd3, 0xd0, 0x84, 0x10, 0x68, 0x1b, 0x9b, 0xa8, 0xd6, 0x95, 0xca, 0xdd, 0x98,
+ 0xf8, 0x14, 0xa5, 0xad, 0xdb, 0x46, 0x34, 0x4e, 0x49, 0x1c, 0xa4, 0xfc, 0x1d, 0xfe, 0x28, 0x8f,
+ 0x6c, 0xa7, 0x5d, 0x36, 0x65, 0xcf, 0xf3, 0x29, 0xf6, 0xf1, 0xb9, 0xf7, 0x9c, 0x1c, 0x5f, 0xc3,
+ 0xe9, 0x2e, 0x89, 0x79, 0x7c, 0xe6, 0xcb, 0xcf, 0x40, 0x6d, 0x3c, 0xf9, 0x41, 0x56, 0xf9, 0xa8,
+ 0x77, 0xba, 0x8e, 0xe3, 0xf5, 0x96, 0x2a, 0xca, 0x3c, 0x5b, 0x0d, 0x02, 0x96, 0x2b, 0x62, 0xef,
+ 0x84, 0xd3, 0x94, 0x2f, 0x03, 0x1e, 0x0c, 0xc4, 0x42, 0x81, 0xfd, 0xff, 0x5b, 0xd0, 0xba, 0xa7,
+ 0x69, 0x1a, 0xac, 0x29, 0x42, 0x50, 0x67, 0x41, 0x44, 0xb1, 0xe6, 0x68, 0x6e, 0x9b, 0xc8, 0x35,
+ 0xba, 0x00, 0x63, 0x13, 0x6e, 0x83, 0x24, 0xe4, 0x39, 0xae, 0x39, 0x9a, 0x7b, 0x34, 0xfc, 0xcc,
+ 0x2b, 0x0b, 0x7a, 0x45, 0xb1, 0xf7, 0x7b, 0x16, 0xc5, 0x59, 0x42, 0x0e, 0x6c, 0xe4, 0x80, 0xb5,
+ 0xa1, 0xe1, 0x7a, 0xc3, 0xfd, 0x90, 0xf9, 0x8b, 0x08, 0xeb, 0x8e, 0xe6, 0x76, 0x08, 0x28, 0x6c,
+ 0xc4, 0xae, 0x23, 0xa1, 0x27, 0xec, 0xe0, 0xba, 0xa3, 0xb9, 0x16, 0x91, 0x6b, 0xf4, 0x05, 0x58,
+ 0x09, 0x4d, 0xb3, 0x2d, 0xf7, 0x17, 0x71, 0xc6, 0x38, 0x6e, 0x39, 0x9a, 0xab, 0x13, 0x53, 0x61,
+ 0xd7, 0x02, 0x42, 0x5f, 0x42, 0x87, 0x27, 0x19, 0xf5, 0xd3, 0x45, 0xcc, 0xd3, 0x28, 0x60, 0xd8,
+ 0x70, 0x34, 0xd7, 0x20, 0x96, 0x00, 0x67, 0x05, 0x86, 0xba, 0xd0, 0x48, 0x17, 0x71, 0x42, 0x71,
+ 0xdb, 0xd1, 0xdc, 0x1a, 0x51, 0x1b, 0x64, 0x83, 0xfe, 0x37, 0xcd, 0x71, 0xc3, 0xd1, 0xdd, 0x3a,
+ 0x11, 0x4b, 0xf4, 0x29, 0xb4, 0xd3, 0x4d, 0x9c, 0x70, 0x5f, 0xe0, 0x27, 0x8e, 0xee, 0x36, 0x88,
+ 0x21, 0x81, 0x3b, 0x9a, 0xa3, 0x6f, 0xa1, 0xc9, 0x68, 0xca, 0xe9, 0x12, 0x37, 0x1d, 0xcd, 0x35,
+ 0x87, 0xdd, 0x97, 0xbf, 0x3e, 0x91, 0x67, 0xa4, 0xe0, 0xa0, 0x73, 0x68, 0x25, 0xfe, 0x2a, 0x63,
+ 0x2c, 0xc7, 0xb6, 0xa3, 0x7f, 0x30, 0xa9, 0x66, 0x72, 0x2b, 0xb8, 0xe8, 0x67, 0x68, 0x71, 0x9a,
+ 0x24, 0x41, 0xc8, 0x30, 0x38, 0xba, 0x6b, 0x0e, 0xfb, 0xd5, 0x65, 0x0f, 0x8a, 0x74, 0xc3, 0x78,
+ 0x92, 0x93, 0x7d, 0x09, 0xba, 0x00, 0x75, 0xff, 0x43, 0x7f, 0x15, 0xd2, 0xed, 0x12, 0x9b, 0xd2,
+ 0xe8, 0x27, 0xde, 0xfe, 0xae, 0xbd, 0x59, 0x36, 0xff, 0x8d, 0xae, 0x82, 0x6c, 0xcb, 0x53, 0x62,
+ 0x2a, 0xea, 0xad, 0x60, 0xa2, 0xd1, 0xa1, 0xf2, 0xdf, 0x60, 0x9b, 0x51, 0xdc, 0x91, 0xe2, 0x5f,
+ 0x55, 0x8b, 0x4f, 0x25, 0xf3, 0x4f, 0x41, 0x54, 0x06, 0x8a, 0x56, 0x12, 0x41, 0xdf, 0x83, 0x11,
+ 0xb0, 0x9c, 0x6f, 0x42, 0xb6, 0xc6, 0x47, 0x45, 0x52, 0x6a, 0x0e, 0xbd, 0xfd, 0x1c, 0x7a, 0x97,
+ 0x2c, 0x27, 0x07, 0x16, 0x3a, 0x07, 0x33, 0x0a, 0x58, 0xee, 0xcb, 0x5d, 0x8a, 0x8f, 0xa5, 0x76,
+ 0x75, 0x11, 0x08, 0xe2, 0x83, 0xe4, 0xa1, 0x73, 0x80, 0x34, 0x9b, 0x47, 0xca, 0x14, 0xfe, 0xb8,
+ 0xf8, 0xd7, 0x2a, 0xc7, 0xa4, 0x44, 0x44, 0x3f, 0x80, 0xb1, 0xd8, 0x84, 0xdb, 0x65, 0x42, 0x19,
+ 0x46, 0x52, 0xea, 0x8d, 0xa2, 0x03, 0xad, 0x37, 0x05, 0xab, 0x1c, 0xf8, 0x7e, 0x72, 0xd4, 0xd3,
+ 0x90, 0x93, 0xf3, 0x35, 0x34, 0x54, 0x70, 0xb5, 0xf7, 0xcc, 0x86, 0xa2, 0xfc, 0x54, 0xbb, 0xd0,
+ 0x7a, 0x8f, 0x60, 0xbf, 0x4e, 0xb1, 0xa2, 0xeb, 0x37, 0x2f, 0xbb, 0xbe, 0x71, 0x91, 0xcf, 0x6d,
+ 0xfb, 0xbf, 0x42, 0x53, 0x0d, 0x14, 0x32, 0xa1, 0xf5, 0x38, 0xb9, 0x9b, 0xfc, 0xf1, 0x34, 0xb1,
+ 0x3f, 0x42, 0x06, 0xd4, 0xa7, 0x8f, 0x93, 0x99, 0xad, 0xa1, 0x0e, 0xb4, 0x67, 0xe3, 0xcb, 0xe9,
+ 0xec, 0x61, 0x74, 0x7d, 0x67, 0xd7, 0xd0, 0x31, 0x98, 0x57, 0xa3, 0xf1, 0xd8, 0xbf, 0xba, 0x1c,
+ 0x8d, 0x6f, 0xfe, 0xb2, 0xf5, 0xfe, 0x10, 0x9a, 0xca, 0xac, 0x78, 0x33, 0x73, 0x39, 0xbe, 0xca,
+ 0x8f, 0xda, 0x88, 0x57, 0xba, 0xc8, 0xb8, 0x32, 0x64, 0x10, 0xb9, 0xee, 0xff, 0xa7, 0xc1, 0x51,
+ 0x91, 0xd9, 0x53, 0xc8, 0x37, 0xf7, 0xc1, 0x0e, 0x4d, 0xc1, 0x9a, 0xe7, 0x9c, 0xfa, 0x51, 0xb0,
+ 0xdb, 0x89, 0x39, 0xd0, 0x64, 0xce, 0xdf, 0x55, 0xe6, 0x5c, 0xd4, 0x78, 0x57, 0x39, 0xa7, 0xf7,
+ 0x8a, 0x5f, 0x4c, 0xd5, 0xfc, 0x19, 0xe9, 0xfd, 0x02, 0xf6, 0x6b, 0x42, 0x39, 0x30, 0x43, 0x05,
+ 0xd6, 0x2d, 0x07, 0x66, 0x95, 0x93, 0xf9, 0x07, 0x9a, 0x23, 0xc6, 0x85, 0xb7, 0x01, 0xe8, 0x09,
+ 0xe7, 0x85, 0xa5, 0xcf, 0x5f, 0x5a, 0x52, 0x14, 0x8f, 0x70, 0xae, 0x2c, 0x08, 0x66, 0xef, 0x47,
+ 0x30, 0xf6, 0x40, 0x59, 0xb2, 0x51, 0x21, 0xd9, 0x28, 0x4b, 0x9e, 0x41, 0x4b, 0xf5, 0x4b, 0x91,
+ 0x0b, 0xf5, 0x28, 0xd8, 0xa5, 0x85, 0x68, 0xb7, 0x4a, 0x94, 0x48, 0xc6, 0xbc, 0xa9, 0x8e, 0xde,
+ 0x05, 0x00, 0x00, 0xff, 0xff, 0x75, 0x38, 0xad, 0x84, 0xe4, 0x05, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto
new file mode 100644
index 0000000..2048655
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto
@@ -0,0 +1,87 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2014 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+import "google/protobuf/any.proto";
+import "testdata/test.proto";
+
+package proto3_proto;
+
+message Message {
+ enum Humour {
+ UNKNOWN = 0;
+ PUNS = 1;
+ SLAPSTICK = 2;
+ BILL_BAILEY = 3;
+ }
+
+ string name = 1;
+ Humour hilarity = 2;
+ uint32 height_in_cm = 3;
+ bytes data = 4;
+ int64 result_count = 7;
+ bool true_scotsman = 8;
+ float score = 9;
+
+ repeated uint64 key = 5;
+ repeated int32 short_key = 19;
+ Nested nested = 6;
+ repeated Humour r_funny = 16;
+
+ map<string, Nested> terrain = 10;
+ testdata.SubDefaults proto2_field = 11;
+ map<string, testdata.SubDefaults> proto2_value = 13;
+
+ google.protobuf.Any anything = 14;
+ repeated google.protobuf.Any many_things = 15;
+
+ Message submessage = 17;
+ repeated Message children = 18;
+}
+
+message Nested {
+ string bunny = 1;
+ bool cute = 2;
+}
+
+message MessageWithMap {
+ map<bool, bytes> byte_mapping = 1;
+}
+
+
+message IntMap {
+ map<int32, int32> rtt = 1;
+}
+
+message IntMaps {
+ repeated IntMap maps = 1;
+}
diff --git a/vendor/github.com/golang/protobuf/proto/proto3_test.go b/vendor/github.com/golang/protobuf/proto/proto3_test.go
new file mode 100644
index 0000000..735837f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/proto3_test.go
@@ -0,0 +1,135 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2014 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ pb "github.com/golang/protobuf/proto/proto3_proto"
+ tpb "github.com/golang/protobuf/proto/testdata"
+)
+
+func TestProto3ZeroValues(t *testing.T) {
+ tests := []struct {
+ desc string
+ m proto.Message
+ }{
+ {"zero message", &pb.Message{}},
+ {"empty bytes field", &pb.Message{Data: []byte{}}},
+ }
+ for _, test := range tests {
+ b, err := proto.Marshal(test.m)
+ if err != nil {
+ t.Errorf("%s: proto.Marshal: %v", test.desc, err)
+ continue
+ }
+ if len(b) > 0 {
+ t.Errorf("%s: Encoding is non-empty: %q", test.desc, b)
+ }
+ }
+}
+
+func TestRoundTripProto3(t *testing.T) {
+ m := &pb.Message{
+ Name: "David", // (2 | 1<<3): 0x0a 0x05 "David"
+ Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01
+ HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01
+ Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto"
+ ResultCount: 47, // (0 | 7<<3): 0x38 0x2f
+ TrueScotsman: true, // (0 | 8<<3): 0x40 0x01
+ Score: 8.1, // (5 | 9<<3): 0x4d <8.1>
+
+ Key: []uint64{1, 0xdeadbeef},
+ Nested: &pb.Nested{
+ Bunny: "Monty",
+ },
+ }
+ t.Logf(" m: %v", m)
+
+ b, err := proto.Marshal(m)
+ if err != nil {
+ t.Fatalf("proto.Marshal: %v", err)
+ }
+ t.Logf(" b: %q", b)
+
+ m2 := new(pb.Message)
+ if err := proto.Unmarshal(b, m2); err != nil {
+ t.Fatalf("proto.Unmarshal: %v", err)
+ }
+ t.Logf("m2: %v", m2)
+
+ if !proto.Equal(m, m2) {
+ t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2)
+ }
+}
+
+func TestGettersForBasicTypesExist(t *testing.T) {
+ var m pb.Message
+ if got := m.GetNested().GetBunny(); got != "" {
+ t.Errorf("m.GetNested().GetBunny() = %q, want empty string", got)
+ }
+ if got := m.GetNested().GetCute(); got {
+ t.Errorf("m.GetNested().GetCute() = %t, want false", got)
+ }
+}
+
+func TestProto3SetDefaults(t *testing.T) {
+ in := &pb.Message{
+ Terrain: map[string]*pb.Nested{
+ "meadow": new(pb.Nested),
+ },
+ Proto2Field: new(tpb.SubDefaults),
+ Proto2Value: map[string]*tpb.SubDefaults{
+ "badlands": new(tpb.SubDefaults),
+ },
+ }
+
+ got := proto.Clone(in).(*pb.Message)
+ proto.SetDefaults(got)
+
+ // There are no defaults in proto3. Everything should be the zero value, but
+ // we need to remember to set defaults for nested proto2 messages.
+ want := &pb.Message{
+ Terrain: map[string]*pb.Nested{
+ "meadow": new(pb.Nested),
+ },
+ Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)},
+ Proto2Value: map[string]*tpb.SubDefaults{
+ "badlands": &tpb.SubDefaults{N: proto.Int64(7)},
+ },
+ }
+
+ if !proto.Equal(got, want) {
+ t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want)
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/size2_test.go b/vendor/github.com/golang/protobuf/proto/size2_test.go
new file mode 100644
index 0000000..a2729c3
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/size2_test.go
@@ -0,0 +1,63 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "testing"
+)
+
+// This is a separate file and package from size_test.go because that one uses
+// generated messages and thus may not be in package proto without having a circular
+// dependency, whereas this file tests unexported details of size.go.
+
+func TestVarintSize(t *testing.T) {
+ // Check the edge cases carefully.
+ testCases := []struct {
+ n uint64
+ size int
+ }{
+ {0, 1},
+ {1, 1},
+ {127, 1},
+ {128, 2},
+ {16383, 2},
+ {16384, 3},
+ {1<<63 - 1, 9},
+ {1 << 63, 10},
+ }
+ for _, tc := range testCases {
+ size := sizeVarint(tc.n)
+ if size != tc.size {
+ t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size)
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/size_test.go b/vendor/github.com/golang/protobuf/proto/size_test.go
new file mode 100644
index 0000000..af1034d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/size_test.go
@@ -0,0 +1,164 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "log"
+ "strings"
+ "testing"
+
+ . "github.com/golang/protobuf/proto"
+ proto3pb "github.com/golang/protobuf/proto/proto3_proto"
+ pb "github.com/golang/protobuf/proto/testdata"
+)
+
+var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)}
+
+// messageWithExtension2 is in equal_test.go.
+var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)}
+
+func init() {
+ if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil {
+ log.Panicf("SetExtension: %v", err)
+ }
+ if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil {
+ log.Panicf("SetExtension: %v", err)
+ }
+
+ // Force messageWithExtension3 to have the extension encoded.
+ Marshal(messageWithExtension3)
+
+}
+
+var SizeTests = []struct {
+ desc string
+ pb Message
+}{
+ {"empty", &pb.OtherMessage{}},
+ // Basic types.
+ {"bool", &pb.Defaults{F_Bool: Bool(true)}},
+ {"int32", &pb.Defaults{F_Int32: Int32(12)}},
+ {"negative int32", &pb.Defaults{F_Int32: Int32(-1)}},
+ {"small int64", &pb.Defaults{F_Int64: Int64(1)}},
+ {"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}},
+ {"negative int64", &pb.Defaults{F_Int64: Int64(-1)}},
+ {"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}},
+ {"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}},
+ {"uint32", &pb.Defaults{F_Uint32: Uint32(123)}},
+ {"uint64", &pb.Defaults{F_Uint64: Uint64(124)}},
+ {"float", &pb.Defaults{F_Float: Float32(12.6)}},
+ {"double", &pb.Defaults{F_Double: Float64(13.9)}},
+ {"string", &pb.Defaults{F_String: String("niles")}},
+ {"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}},
+ {"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}},
+ {"sint32", &pb.Defaults{F_Sint32: Int32(65)}},
+ {"sint64", &pb.Defaults{F_Sint64: Int64(67)}},
+ {"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}},
+ // Repeated.
+ {"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}},
+ {"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}},
+ {"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}},
+ {"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}},
+ {"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}},
+ {"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{
+ // Need enough large numbers to verify that the header is counting the number of bytes
+ // for the field, not the number of elements.
+ 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,
+ 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,
+ }}},
+ {"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}},
+ {"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}},
+ // Nested.
+ {"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}},
+ {"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}},
+ // Other things.
+ {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}},
+ {"extension (unencoded)", messageWithExtension1},
+ {"extension (encoded)", messageWithExtension3},
+ // proto3 message
+ {"proto3 empty", &proto3pb.Message{}},
+ {"proto3 bool", &proto3pb.Message{TrueScotsman: true}},
+ {"proto3 int64", &proto3pb.Message{ResultCount: 1}},
+ {"proto3 uint32", &proto3pb.Message{HeightInCm: 123}},
+ {"proto3 float", &proto3pb.Message{Score: 12.6}},
+ {"proto3 string", &proto3pb.Message{Name: "Snezana"}},
+ {"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}},
+ {"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}},
+ {"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}},
+ {"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: []byte{}}}},
+
+ {"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}},
+ {"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}},
+ {"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}},
+ {"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte{}}}},
+
+ {"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}},
+ {"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}},
+ {"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}},
+
+ {"oneof not set", &pb.Oneof{}},
+ {"oneof bool", &pb.Oneof{Union: &pb.Oneof_F_Bool{true}}},
+ {"oneof zero int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{0}}},
+ {"oneof big int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{1 << 20}}},
+ {"oneof int64", &pb.Oneof{Union: &pb.Oneof_F_Int64{42}}},
+ {"oneof fixed32", &pb.Oneof{Union: &pb.Oneof_F_Fixed32{43}}},
+ {"oneof fixed64", &pb.Oneof{Union: &pb.Oneof_F_Fixed64{44}}},
+ {"oneof uint32", &pb.Oneof{Union: &pb.Oneof_F_Uint32{45}}},
+ {"oneof uint64", &pb.Oneof{Union: &pb.Oneof_F_Uint64{46}}},
+ {"oneof float", &pb.Oneof{Union: &pb.Oneof_F_Float{47.1}}},
+ {"oneof double", &pb.Oneof{Union: &pb.Oneof_F_Double{48.9}}},
+ {"oneof string", &pb.Oneof{Union: &pb.Oneof_F_String{"Rhythmic Fman"}}},
+ {"oneof bytes", &pb.Oneof{Union: &pb.Oneof_F_Bytes{[]byte("let go")}}},
+ {"oneof sint32", &pb.Oneof{Union: &pb.Oneof_F_Sint32{50}}},
+ {"oneof sint64", &pb.Oneof{Union: &pb.Oneof_F_Sint64{51}}},
+ {"oneof enum", &pb.Oneof{Union: &pb.Oneof_F_Enum{pb.MyMessage_BLUE}}},
+ {"message for oneof", &pb.GoTestField{Label: String("k"), Type: String("v")}},
+ {"oneof message", &pb.Oneof{Union: &pb.Oneof_F_Message{&pb.GoTestField{Label: String("k"), Type: String("v")}}}},
+ {"oneof group", &pb.Oneof{Union: &pb.Oneof_FGroup{&pb.Oneof_F_Group{X: Int32(52)}}}},
+ {"oneof largest tag", &pb.Oneof{Union: &pb.Oneof_F_Largest_Tag{1}}},
+ {"multiple oneofs", &pb.Oneof{Union: &pb.Oneof_F_Int32{1}, Tormato: &pb.Oneof_Value{2}}},
+}
+
+func TestSize(t *testing.T) {
+ for _, tc := range SizeTests {
+ size := Size(tc.pb)
+ b, err := Marshal(tc.pb)
+ if err != nil {
+ t.Errorf("%v: Marshal failed: %v", tc.desc, err)
+ continue
+ }
+ if size != len(b) {
+ t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b))
+ t.Logf("%v: bytes: %#v", tc.desc, b)
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/testdata/Makefile b/vendor/github.com/golang/protobuf/proto/testdata/Makefile
new file mode 100644
index 0000000..fc28862
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/testdata/Makefile
@@ -0,0 +1,50 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+include ../../Make.protobuf
+
+all: regenerate
+
+regenerate:
+ rm -f test.pb.go
+ make test.pb.go
+
+# The following rules are just aids to development. Not needed for typical testing.
+
+diff: regenerate
+ git diff test.pb.go
+
+restore:
+ cp test.pb.go.golden test.pb.go
+
+preserve:
+ cp test.pb.go test.pb.go.golden
diff --git a/vendor/github.com/golang/protobuf/proto/testdata/golden_test.go b/vendor/github.com/golang/protobuf/proto/testdata/golden_test.go
new file mode 100644
index 0000000..7172d0e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/testdata/golden_test.go
@@ -0,0 +1,86 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Verify that the compiler output for test.proto is unchanged.
+
+package testdata
+
+import (
+ "crypto/sha1"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "testing"
+)
+
+// sum returns in string form (for easy comparison) the SHA-1 hash of the named file.
+func sum(t *testing.T, name string) string {
+ data, err := ioutil.ReadFile(name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Logf("sum(%q): length is %d", name, len(data))
+ hash := sha1.New()
+ _, err = hash.Write(data)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return fmt.Sprintf("% x", hash.Sum(nil))
+}
+
+func run(t *testing.T, name string, args ...string) {
+ cmd := exec.Command(name, args...)
+ cmd.Stdin = os.Stdin
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ err := cmd.Run()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestGolden(t *testing.T) {
+ // Compute the original checksum.
+ goldenSum := sum(t, "test.pb.go")
+ // Run the proto compiler.
+ run(t, "protoc", "--go_out="+os.TempDir(), "test.proto")
+ newFile := filepath.Join(os.TempDir(), "test.pb.go")
+ defer os.Remove(newFile)
+ // Compute the new checksum.
+ newSum := sum(t, newFile)
+ // Verify
+ if newSum != goldenSum {
+ run(t, "diff", "-u", "test.pb.go", newFile)
+ t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go")
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/testdata/test.pb.go b/vendor/github.com/golang/protobuf/proto/testdata/test.pb.go
new file mode 100644
index 0000000..e980d1a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/testdata/test.pb.go
@@ -0,0 +1,4147 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: test.proto
+
+/*
+Package testdata is a generated protocol buffer package.
+
+It is generated from these files:
+ test.proto
+
+It has these top-level messages:
+ GoEnum
+ GoTestField
+ GoTest
+ GoTestRequiredGroupField
+ GoSkipTest
+ NonPackedTest
+ PackedTest
+ MaxTag
+ OldMessage
+ NewMessage
+ InnerMessage
+ OtherMessage
+ RequiredInnerMessage
+ MyMessage
+ Ext
+ ComplexExtension
+ DefaultsMessage
+ MyMessageSet
+ Empty
+ MessageList
+ Strings
+ Defaults
+ SubDefaults
+ RepeatedEnum
+ MoreRepeated
+ GroupOld
+ GroupNew
+ FloatingPoint
+ MessageWithMap
+ Oneof
+ Communique
+*/
+package testdata
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type FOO int32
+
+const (
+ FOO_FOO1 FOO = 1
+)
+
+var FOO_name = map[int32]string{
+ 1: "FOO1",
+}
+var FOO_value = map[string]int32{
+ "FOO1": 1,
+}
+
+func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+}
+func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+}
+func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO")
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+}
+func (FOO) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+// An enum, for completeness.
+type GoTest_KIND int32
+
+const (
+ GoTest_VOID GoTest_KIND = 0
+ // Basic types
+ GoTest_BOOL GoTest_KIND = 1
+ GoTest_BYTES GoTest_KIND = 2
+ GoTest_FINGERPRINT GoTest_KIND = 3
+ GoTest_FLOAT GoTest_KIND = 4
+ GoTest_INT GoTest_KIND = 5
+ GoTest_STRING GoTest_KIND = 6
+ GoTest_TIME GoTest_KIND = 7
+ // Groupings
+ GoTest_TUPLE GoTest_KIND = 8
+ GoTest_ARRAY GoTest_KIND = 9
+ GoTest_MAP GoTest_KIND = 10
+ // Table types
+ GoTest_TABLE GoTest_KIND = 11
+ // Functions
+ GoTest_FUNCTION GoTest_KIND = 12
+)
+
+var GoTest_KIND_name = map[int32]string{
+ 0: "VOID",
+ 1: "BOOL",
+ 2: "BYTES",
+ 3: "FINGERPRINT",
+ 4: "FLOAT",
+ 5: "INT",
+ 6: "STRING",
+ 7: "TIME",
+ 8: "TUPLE",
+ 9: "ARRAY",
+ 10: "MAP",
+ 11: "TABLE",
+ 12: "FUNCTION",
+}
+var GoTest_KIND_value = map[string]int32{
+ "VOID": 0,
+ "BOOL": 1,
+ "BYTES": 2,
+ "FINGERPRINT": 3,
+ "FLOAT": 4,
+ "INT": 5,
+ "STRING": 6,
+ "TIME": 7,
+ "TUPLE": 8,
+ "ARRAY": 9,
+ "MAP": 10,
+ "TABLE": 11,
+ "FUNCTION": 12,
+}
+
+func (x GoTest_KIND) Enum() *GoTest_KIND {
+ p := new(GoTest_KIND)
+ *p = x
+ return p
+}
+func (x GoTest_KIND) String() string {
+ return proto.EnumName(GoTest_KIND_name, int32(x))
+}
+func (x *GoTest_KIND) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND")
+ if err != nil {
+ return err
+ }
+ *x = GoTest_KIND(value)
+ return nil
+}
+func (GoTest_KIND) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} }
+
+type MyMessage_Color int32
+
+const (
+ MyMessage_RED MyMessage_Color = 0
+ MyMessage_GREEN MyMessage_Color = 1
+ MyMessage_BLUE MyMessage_Color = 2
+)
+
+var MyMessage_Color_name = map[int32]string{
+ 0: "RED",
+ 1: "GREEN",
+ 2: "BLUE",
+}
+var MyMessage_Color_value = map[string]int32{
+ "RED": 0,
+ "GREEN": 1,
+ "BLUE": 2,
+}
+
+func (x MyMessage_Color) Enum() *MyMessage_Color {
+ p := new(MyMessage_Color)
+ *p = x
+ return p
+}
+func (x MyMessage_Color) String() string {
+ return proto.EnumName(MyMessage_Color_name, int32(x))
+}
+func (x *MyMessage_Color) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color")
+ if err != nil {
+ return err
+ }
+ *x = MyMessage_Color(value)
+ return nil
+}
+func (MyMessage_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 0} }
+
+type DefaultsMessage_DefaultsEnum int32
+
+const (
+ DefaultsMessage_ZERO DefaultsMessage_DefaultsEnum = 0
+ DefaultsMessage_ONE DefaultsMessage_DefaultsEnum = 1
+ DefaultsMessage_TWO DefaultsMessage_DefaultsEnum = 2
+)
+
+var DefaultsMessage_DefaultsEnum_name = map[int32]string{
+ 0: "ZERO",
+ 1: "ONE",
+ 2: "TWO",
+}
+var DefaultsMessage_DefaultsEnum_value = map[string]int32{
+ "ZERO": 0,
+ "ONE": 1,
+ "TWO": 2,
+}
+
+func (x DefaultsMessage_DefaultsEnum) Enum() *DefaultsMessage_DefaultsEnum {
+ p := new(DefaultsMessage_DefaultsEnum)
+ *p = x
+ return p
+}
+func (x DefaultsMessage_DefaultsEnum) String() string {
+ return proto.EnumName(DefaultsMessage_DefaultsEnum_name, int32(x))
+}
+func (x *DefaultsMessage_DefaultsEnum) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(DefaultsMessage_DefaultsEnum_value, data, "DefaultsMessage_DefaultsEnum")
+ if err != nil {
+ return err
+ }
+ *x = DefaultsMessage_DefaultsEnum(value)
+ return nil
+}
+func (DefaultsMessage_DefaultsEnum) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{16, 0}
+}
+
+type Defaults_Color int32
+
+const (
+ Defaults_RED Defaults_Color = 0
+ Defaults_GREEN Defaults_Color = 1
+ Defaults_BLUE Defaults_Color = 2
+)
+
+var Defaults_Color_name = map[int32]string{
+ 0: "RED",
+ 1: "GREEN",
+ 2: "BLUE",
+}
+var Defaults_Color_value = map[string]int32{
+ "RED": 0,
+ "GREEN": 1,
+ "BLUE": 2,
+}
+
+func (x Defaults_Color) Enum() *Defaults_Color {
+ p := new(Defaults_Color)
+ *p = x
+ return p
+}
+func (x Defaults_Color) String() string {
+ return proto.EnumName(Defaults_Color_name, int32(x))
+}
+func (x *Defaults_Color) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color")
+ if err != nil {
+ return err
+ }
+ *x = Defaults_Color(value)
+ return nil
+}
+func (Defaults_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{21, 0} }
+
+type RepeatedEnum_Color int32
+
+const (
+ RepeatedEnum_RED RepeatedEnum_Color = 1
+)
+
+var RepeatedEnum_Color_name = map[int32]string{
+ 1: "RED",
+}
+var RepeatedEnum_Color_value = map[string]int32{
+ "RED": 1,
+}
+
+func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color {
+ p := new(RepeatedEnum_Color)
+ *p = x
+ return p
+}
+func (x RepeatedEnum_Color) String() string {
+ return proto.EnumName(RepeatedEnum_Color_name, int32(x))
+}
+func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color")
+ if err != nil {
+ return err
+ }
+ *x = RepeatedEnum_Color(value)
+ return nil
+}
+func (RepeatedEnum_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{23, 0} }
+
+type GoEnum struct {
+ Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoEnum) Reset() { *m = GoEnum{} }
+func (m *GoEnum) String() string { return proto.CompactTextString(m) }
+func (*GoEnum) ProtoMessage() {}
+func (*GoEnum) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *GoEnum) GetFoo() FOO {
+ if m != nil && m.Foo != nil {
+ return *m.Foo
+ }
+ return FOO_FOO1
+}
+
+type GoTestField struct {
+ Label *string `protobuf:"bytes,1,req,name=Label" json:"Label,omitempty"`
+ Type *string `protobuf:"bytes,2,req,name=Type" json:"Type,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoTestField) Reset() { *m = GoTestField{} }
+func (m *GoTestField) String() string { return proto.CompactTextString(m) }
+func (*GoTestField) ProtoMessage() {}
+func (*GoTestField) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *GoTestField) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+}
+
+func (m *GoTestField) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return ""
+}
+
+type GoTest struct {
+ // Some typical parameters
+ Kind *GoTest_KIND `protobuf:"varint,1,req,name=Kind,enum=testdata.GoTest_KIND" json:"Kind,omitempty"`
+ Table *string `protobuf:"bytes,2,opt,name=Table" json:"Table,omitempty"`
+ Param *int32 `protobuf:"varint,3,opt,name=Param" json:"Param,omitempty"`
+ // Required, repeated and optional foreign fields.
+ RequiredField *GoTestField `protobuf:"bytes,4,req,name=RequiredField" json:"RequiredField,omitempty"`
+ RepeatedField []*GoTestField `protobuf:"bytes,5,rep,name=RepeatedField" json:"RepeatedField,omitempty"`
+ OptionalField *GoTestField `protobuf:"bytes,6,opt,name=OptionalField" json:"OptionalField,omitempty"`
+ // Required fields of all basic types
+ F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required,json=FBoolRequired" json:"F_Bool_required,omitempty"`
+ F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required,json=FInt32Required" json:"F_Int32_required,omitempty"`
+ F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required,json=FInt64Required" json:"F_Int64_required,omitempty"`
+ F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required,json=FFixed32Required" json:"F_Fixed32_required,omitempty"`
+ F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required,json=FFixed64Required" json:"F_Fixed64_required,omitempty"`
+ F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required,json=FUint32Required" json:"F_Uint32_required,omitempty"`
+ F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required,json=FUint64Required" json:"F_Uint64_required,omitempty"`
+ F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required,json=FFloatRequired" json:"F_Float_required,omitempty"`
+ F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required,json=FDoubleRequired" json:"F_Double_required,omitempty"`
+ F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required,json=FStringRequired" json:"F_String_required,omitempty"`
+ F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required,json=FBytesRequired" json:"F_Bytes_required,omitempty"`
+ F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required,json=FSint32Required" json:"F_Sint32_required,omitempty"`
+ F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required,json=FSint64Required" json:"F_Sint64_required,omitempty"`
+ // Repeated fields of all basic types
+ F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated,json=FBoolRepeated" json:"F_Bool_repeated,omitempty"`
+ F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated,json=FInt32Repeated" json:"F_Int32_repeated,omitempty"`
+ F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated,json=FInt64Repeated" json:"F_Int64_repeated,omitempty"`
+ F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated,json=FFixed32Repeated" json:"F_Fixed32_repeated,omitempty"`
+ F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated,json=FFixed64Repeated" json:"F_Fixed64_repeated,omitempty"`
+ F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated,json=FUint32Repeated" json:"F_Uint32_repeated,omitempty"`
+ F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated,json=FUint64Repeated" json:"F_Uint64_repeated,omitempty"`
+ F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated,json=FFloatRepeated" json:"F_Float_repeated,omitempty"`
+ F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated,json=FDoubleRepeated" json:"F_Double_repeated,omitempty"`
+ F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated,json=FStringRepeated" json:"F_String_repeated,omitempty"`
+ F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated,json=FBytesRepeated" json:"F_Bytes_repeated,omitempty"`
+ F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated,json=FSint32Repeated" json:"F_Sint32_repeated,omitempty"`
+ F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated,json=FSint64Repeated" json:"F_Sint64_repeated,omitempty"`
+ // Optional fields of all basic types
+ F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional,json=FBoolOptional" json:"F_Bool_optional,omitempty"`
+ F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional,json=FInt32Optional" json:"F_Int32_optional,omitempty"`
+ F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional,json=FInt64Optional" json:"F_Int64_optional,omitempty"`
+ F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional,json=FFixed32Optional" json:"F_Fixed32_optional,omitempty"`
+ F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional,json=FFixed64Optional" json:"F_Fixed64_optional,omitempty"`
+ F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional,json=FUint32Optional" json:"F_Uint32_optional,omitempty"`
+ F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional,json=FUint64Optional" json:"F_Uint64_optional,omitempty"`
+ F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional,json=FFloatOptional" json:"F_Float_optional,omitempty"`
+ F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional,json=FDoubleOptional" json:"F_Double_optional,omitempty"`
+ F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional,json=FStringOptional" json:"F_String_optional,omitempty"`
+ F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional,json=FBytesOptional" json:"F_Bytes_optional,omitempty"`
+ F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional,json=FSint32Optional" json:"F_Sint32_optional,omitempty"`
+ F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional,json=FSint64Optional" json:"F_Sint64_optional,omitempty"`
+ // Default-valued fields of all basic types
+ F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,json=FBoolDefaulted,def=1" json:"F_Bool_defaulted,omitempty"`
+ F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,json=FInt32Defaulted,def=32" json:"F_Int32_defaulted,omitempty"`
+ F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,json=FInt64Defaulted,def=64" json:"F_Int64_defaulted,omitempty"`
+ F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,json=FFixed32Defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"`
+ F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,json=FFixed64Defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"`
+ F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,json=FUint32Defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"`
+ F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,json=FUint64Defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"`
+ F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,json=FFloatDefaulted,def=314159" json:"F_Float_defaulted,omitempty"`
+ F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,json=FDoubleDefaulted,def=271828" json:"F_Double_defaulted,omitempty"`
+ F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,json=FStringDefaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"`
+ F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,json=FBytesDefaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"`
+ F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,json=FSint32Defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"`
+ F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,json=FSint64Defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"`
+ // Packed repeated fields (no string or bytes).
+ F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed,json=FBoolRepeatedPacked" json:"F_Bool_repeated_packed,omitempty"`
+ F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed,json=FInt32RepeatedPacked" json:"F_Int32_repeated_packed,omitempty"`
+ F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed,json=FInt64RepeatedPacked" json:"F_Int64_repeated_packed,omitempty"`
+ F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed,json=FFixed32RepeatedPacked" json:"F_Fixed32_repeated_packed,omitempty"`
+ F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed,json=FFixed64RepeatedPacked" json:"F_Fixed64_repeated_packed,omitempty"`
+ F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed,json=FUint32RepeatedPacked" json:"F_Uint32_repeated_packed,omitempty"`
+ F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed,json=FUint64RepeatedPacked" json:"F_Uint64_repeated_packed,omitempty"`
+ F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed,json=FFloatRepeatedPacked" json:"F_Float_repeated_packed,omitempty"`
+ F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed,json=FDoubleRepeatedPacked" json:"F_Double_repeated_packed,omitempty"`
+ F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed,json=FSint32RepeatedPacked" json:"F_Sint32_repeated_packed,omitempty"`
+ F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed,json=FSint64RepeatedPacked" json:"F_Sint64_repeated_packed,omitempty"`
+ Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup,json=requiredgroup" json:"requiredgroup,omitempty"`
+ Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup,json=repeatedgroup" json:"repeatedgroup,omitempty"`
+ Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup,json=optionalgroup" json:"optionalgroup,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoTest) Reset() { *m = GoTest{} }
+func (m *GoTest) String() string { return proto.CompactTextString(m) }
+func (*GoTest) ProtoMessage() {}
+func (*GoTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+const Default_GoTest_F_BoolDefaulted bool = true
+const Default_GoTest_F_Int32Defaulted int32 = 32
+const Default_GoTest_F_Int64Defaulted int64 = 64
+const Default_GoTest_F_Fixed32Defaulted uint32 = 320
+const Default_GoTest_F_Fixed64Defaulted uint64 = 640
+const Default_GoTest_F_Uint32Defaulted uint32 = 3200
+const Default_GoTest_F_Uint64Defaulted uint64 = 6400
+const Default_GoTest_F_FloatDefaulted float32 = 314159
+const Default_GoTest_F_DoubleDefaulted float64 = 271828
+const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n"
+
+var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose")
+
+const Default_GoTest_F_Sint32Defaulted int32 = -32
+const Default_GoTest_F_Sint64Defaulted int64 = -64
+
+func (m *GoTest) GetKind() GoTest_KIND {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return GoTest_VOID
+}
+
+func (m *GoTest) GetTable() string {
+ if m != nil && m.Table != nil {
+ return *m.Table
+ }
+ return ""
+}
+
+func (m *GoTest) GetParam() int32 {
+ if m != nil && m.Param != nil {
+ return *m.Param
+ }
+ return 0
+}
+
+func (m *GoTest) GetRequiredField() *GoTestField {
+ if m != nil {
+ return m.RequiredField
+ }
+ return nil
+}
+
+func (m *GoTest) GetRepeatedField() []*GoTestField {
+ if m != nil {
+ return m.RepeatedField
+ }
+ return nil
+}
+
+func (m *GoTest) GetOptionalField() *GoTestField {
+ if m != nil {
+ return m.OptionalField
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_BoolRequired() bool {
+ if m != nil && m.F_BoolRequired != nil {
+ return *m.F_BoolRequired
+ }
+ return false
+}
+
+func (m *GoTest) GetF_Int32Required() int32 {
+ if m != nil && m.F_Int32Required != nil {
+ return *m.F_Int32Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Int64Required() int64 {
+ if m != nil && m.F_Int64Required != nil {
+ return *m.F_Int64Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Fixed32Required() uint32 {
+ if m != nil && m.F_Fixed32Required != nil {
+ return *m.F_Fixed32Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Fixed64Required() uint64 {
+ if m != nil && m.F_Fixed64Required != nil {
+ return *m.F_Fixed64Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Uint32Required() uint32 {
+ if m != nil && m.F_Uint32Required != nil {
+ return *m.F_Uint32Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Uint64Required() uint64 {
+ if m != nil && m.F_Uint64Required != nil {
+ return *m.F_Uint64Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_FloatRequired() float32 {
+ if m != nil && m.F_FloatRequired != nil {
+ return *m.F_FloatRequired
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_DoubleRequired() float64 {
+ if m != nil && m.F_DoubleRequired != nil {
+ return *m.F_DoubleRequired
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_StringRequired() string {
+ if m != nil && m.F_StringRequired != nil {
+ return *m.F_StringRequired
+ }
+ return ""
+}
+
+func (m *GoTest) GetF_BytesRequired() []byte {
+ if m != nil {
+ return m.F_BytesRequired
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Sint32Required() int32 {
+ if m != nil && m.F_Sint32Required != nil {
+ return *m.F_Sint32Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Sint64Required() int64 {
+ if m != nil && m.F_Sint64Required != nil {
+ return *m.F_Sint64Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_BoolRepeated() []bool {
+ if m != nil {
+ return m.F_BoolRepeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Int32Repeated() []int32 {
+ if m != nil {
+ return m.F_Int32Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Int64Repeated() []int64 {
+ if m != nil {
+ return m.F_Int64Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Fixed32Repeated() []uint32 {
+ if m != nil {
+ return m.F_Fixed32Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Fixed64Repeated() []uint64 {
+ if m != nil {
+ return m.F_Fixed64Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Uint32Repeated() []uint32 {
+ if m != nil {
+ return m.F_Uint32Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Uint64Repeated() []uint64 {
+ if m != nil {
+ return m.F_Uint64Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_FloatRepeated() []float32 {
+ if m != nil {
+ return m.F_FloatRepeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_DoubleRepeated() []float64 {
+ if m != nil {
+ return m.F_DoubleRepeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_StringRepeated() []string {
+ if m != nil {
+ return m.F_StringRepeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_BytesRepeated() [][]byte {
+ if m != nil {
+ return m.F_BytesRepeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Sint32Repeated() []int32 {
+ if m != nil {
+ return m.F_Sint32Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Sint64Repeated() []int64 {
+ if m != nil {
+ return m.F_Sint64Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_BoolOptional() bool {
+ if m != nil && m.F_BoolOptional != nil {
+ return *m.F_BoolOptional
+ }
+ return false
+}
+
+func (m *GoTest) GetF_Int32Optional() int32 {
+ if m != nil && m.F_Int32Optional != nil {
+ return *m.F_Int32Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Int64Optional() int64 {
+ if m != nil && m.F_Int64Optional != nil {
+ return *m.F_Int64Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Fixed32Optional() uint32 {
+ if m != nil && m.F_Fixed32Optional != nil {
+ return *m.F_Fixed32Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Fixed64Optional() uint64 {
+ if m != nil && m.F_Fixed64Optional != nil {
+ return *m.F_Fixed64Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Uint32Optional() uint32 {
+ if m != nil && m.F_Uint32Optional != nil {
+ return *m.F_Uint32Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Uint64Optional() uint64 {
+ if m != nil && m.F_Uint64Optional != nil {
+ return *m.F_Uint64Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_FloatOptional() float32 {
+ if m != nil && m.F_FloatOptional != nil {
+ return *m.F_FloatOptional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_DoubleOptional() float64 {
+ if m != nil && m.F_DoubleOptional != nil {
+ return *m.F_DoubleOptional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_StringOptional() string {
+ if m != nil && m.F_StringOptional != nil {
+ return *m.F_StringOptional
+ }
+ return ""
+}
+
+func (m *GoTest) GetF_BytesOptional() []byte {
+ if m != nil {
+ return m.F_BytesOptional
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Sint32Optional() int32 {
+ if m != nil && m.F_Sint32Optional != nil {
+ return *m.F_Sint32Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Sint64Optional() int64 {
+ if m != nil && m.F_Sint64Optional != nil {
+ return *m.F_Sint64Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_BoolDefaulted() bool {
+ if m != nil && m.F_BoolDefaulted != nil {
+ return *m.F_BoolDefaulted
+ }
+ return Default_GoTest_F_BoolDefaulted
+}
+
+func (m *GoTest) GetF_Int32Defaulted() int32 {
+ if m != nil && m.F_Int32Defaulted != nil {
+ return *m.F_Int32Defaulted
+ }
+ return Default_GoTest_F_Int32Defaulted
+}
+
+func (m *GoTest) GetF_Int64Defaulted() int64 {
+ if m != nil && m.F_Int64Defaulted != nil {
+ return *m.F_Int64Defaulted
+ }
+ return Default_GoTest_F_Int64Defaulted
+}
+
+func (m *GoTest) GetF_Fixed32Defaulted() uint32 {
+ if m != nil && m.F_Fixed32Defaulted != nil {
+ return *m.F_Fixed32Defaulted
+ }
+ return Default_GoTest_F_Fixed32Defaulted
+}
+
+func (m *GoTest) GetF_Fixed64Defaulted() uint64 {
+ if m != nil && m.F_Fixed64Defaulted != nil {
+ return *m.F_Fixed64Defaulted
+ }
+ return Default_GoTest_F_Fixed64Defaulted
+}
+
+func (m *GoTest) GetF_Uint32Defaulted() uint32 {
+ if m != nil && m.F_Uint32Defaulted != nil {
+ return *m.F_Uint32Defaulted
+ }
+ return Default_GoTest_F_Uint32Defaulted
+}
+
+func (m *GoTest) GetF_Uint64Defaulted() uint64 {
+ if m != nil && m.F_Uint64Defaulted != nil {
+ return *m.F_Uint64Defaulted
+ }
+ return Default_GoTest_F_Uint64Defaulted
+}
+
+func (m *GoTest) GetF_FloatDefaulted() float32 {
+ if m != nil && m.F_FloatDefaulted != nil {
+ return *m.F_FloatDefaulted
+ }
+ return Default_GoTest_F_FloatDefaulted
+}
+
+func (m *GoTest) GetF_DoubleDefaulted() float64 {
+ if m != nil && m.F_DoubleDefaulted != nil {
+ return *m.F_DoubleDefaulted
+ }
+ return Default_GoTest_F_DoubleDefaulted
+}
+
+func (m *GoTest) GetF_StringDefaulted() string {
+ if m != nil && m.F_StringDefaulted != nil {
+ return *m.F_StringDefaulted
+ }
+ return Default_GoTest_F_StringDefaulted
+}
+
+func (m *GoTest) GetF_BytesDefaulted() []byte {
+ if m != nil && m.F_BytesDefaulted != nil {
+ return m.F_BytesDefaulted
+ }
+ return append([]byte(nil), Default_GoTest_F_BytesDefaulted...)
+}
+
+func (m *GoTest) GetF_Sint32Defaulted() int32 {
+ if m != nil && m.F_Sint32Defaulted != nil {
+ return *m.F_Sint32Defaulted
+ }
+ return Default_GoTest_F_Sint32Defaulted
+}
+
+func (m *GoTest) GetF_Sint64Defaulted() int64 {
+ if m != nil && m.F_Sint64Defaulted != nil {
+ return *m.F_Sint64Defaulted
+ }
+ return Default_GoTest_F_Sint64Defaulted
+}
+
+func (m *GoTest) GetF_BoolRepeatedPacked() []bool {
+ if m != nil {
+ return m.F_BoolRepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Int32RepeatedPacked() []int32 {
+ if m != nil {
+ return m.F_Int32RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Int64RepeatedPacked() []int64 {
+ if m != nil {
+ return m.F_Int64RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 {
+ if m != nil {
+ return m.F_Fixed32RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 {
+ if m != nil {
+ return m.F_Fixed64RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 {
+ if m != nil {
+ return m.F_Uint32RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 {
+ if m != nil {
+ return m.F_Uint64RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_FloatRepeatedPacked() []float32 {
+ if m != nil {
+ return m.F_FloatRepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 {
+ if m != nil {
+ return m.F_DoubleRepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 {
+ if m != nil {
+ return m.F_Sint32RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 {
+ if m != nil {
+ return m.F_Sint64RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup {
+ if m != nil {
+ return m.Requiredgroup
+ }
+ return nil
+}
+
+func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup {
+ if m != nil {
+ return m.Repeatedgroup
+ }
+ return nil
+}
+
+func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+}
+
+// Required, repeated, and optional groups.
+type GoTest_RequiredGroup struct {
+ RequiredField *string `protobuf:"bytes,71,req,name=RequiredField" json:"RequiredField,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} }
+func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) }
+func (*GoTest_RequiredGroup) ProtoMessage() {}
+func (*GoTest_RequiredGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} }
+
+func (m *GoTest_RequiredGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+}
+
+type GoTest_RepeatedGroup struct {
+ RequiredField *string `protobuf:"bytes,81,req,name=RequiredField" json:"RequiredField,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} }
+func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) }
+func (*GoTest_RepeatedGroup) ProtoMessage() {}
+func (*GoTest_RepeatedGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 1} }
+
+func (m *GoTest_RepeatedGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+}
+
+type GoTest_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,91,req,name=RequiredField" json:"RequiredField,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} }
+func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) }
+func (*GoTest_OptionalGroup) ProtoMessage() {}
+func (*GoTest_OptionalGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 2} }
+
+func (m *GoTest_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+}
+
+// For testing a group containing a required field.
+type GoTestRequiredGroupField struct {
+ Group *GoTestRequiredGroupField_Group `protobuf:"group,1,req,name=Group,json=group" json:"group,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoTestRequiredGroupField) Reset() { *m = GoTestRequiredGroupField{} }
+func (m *GoTestRequiredGroupField) String() string { return proto.CompactTextString(m) }
+func (*GoTestRequiredGroupField) ProtoMessage() {}
+func (*GoTestRequiredGroupField) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+
+func (m *GoTestRequiredGroupField) GetGroup() *GoTestRequiredGroupField_Group {
+ if m != nil {
+ return m.Group
+ }
+ return nil
+}
+
+type GoTestRequiredGroupField_Group struct {
+ Field *int32 `protobuf:"varint,2,req,name=Field" json:"Field,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoTestRequiredGroupField_Group) Reset() { *m = GoTestRequiredGroupField_Group{} }
+func (m *GoTestRequiredGroupField_Group) String() string { return proto.CompactTextString(m) }
+func (*GoTestRequiredGroupField_Group) ProtoMessage() {}
+func (*GoTestRequiredGroupField_Group) Descriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{3, 0}
+}
+
+func (m *GoTestRequiredGroupField_Group) GetField() int32 {
+ if m != nil && m.Field != nil {
+ return *m.Field
+ }
+ return 0
+}
+
+// For testing skipping of unrecognized fields.
+// Numbers are all big, larger than tag numbers in GoTestField,
+// the message used in the corresponding test.
+type GoSkipTest struct {
+ SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32,json=skipInt32" json:"skip_int32,omitempty"`
+ SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32,json=skipFixed32" json:"skip_fixed32,omitempty"`
+ SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64,json=skipFixed64" json:"skip_fixed64,omitempty"`
+ SkipString *string `protobuf:"bytes,14,req,name=skip_string,json=skipString" json:"skip_string,omitempty"`
+ Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup,json=skipgroup" json:"skipgroup,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoSkipTest) Reset() { *m = GoSkipTest{} }
+func (m *GoSkipTest) String() string { return proto.CompactTextString(m) }
+func (*GoSkipTest) ProtoMessage() {}
+func (*GoSkipTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+
+func (m *GoSkipTest) GetSkipInt32() int32 {
+ if m != nil && m.SkipInt32 != nil {
+ return *m.SkipInt32
+ }
+ return 0
+}
+
+func (m *GoSkipTest) GetSkipFixed32() uint32 {
+ if m != nil && m.SkipFixed32 != nil {
+ return *m.SkipFixed32
+ }
+ return 0
+}
+
+func (m *GoSkipTest) GetSkipFixed64() uint64 {
+ if m != nil && m.SkipFixed64 != nil {
+ return *m.SkipFixed64
+ }
+ return 0
+}
+
+func (m *GoSkipTest) GetSkipString() string {
+ if m != nil && m.SkipString != nil {
+ return *m.SkipString
+ }
+ return ""
+}
+
+func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup {
+ if m != nil {
+ return m.Skipgroup
+ }
+ return nil
+}
+
+type GoSkipTest_SkipGroup struct {
+ GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32,json=groupInt32" json:"group_int32,omitempty"`
+ GroupString *string `protobuf:"bytes,17,req,name=group_string,json=groupString" json:"group_string,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} }
+func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) }
+func (*GoSkipTest_SkipGroup) ProtoMessage() {}
+func (*GoSkipTest_SkipGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} }
+
+func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 {
+ if m != nil && m.GroupInt32 != nil {
+ return *m.GroupInt32
+ }
+ return 0
+}
+
+func (m *GoSkipTest_SkipGroup) GetGroupString() string {
+ if m != nil && m.GroupString != nil {
+ return *m.GroupString
+ }
+ return ""
+}
+
+// For testing packed/non-packed decoder switching.
+// A serialized instance of one should be deserializable as the other.
+type NonPackedTest struct {
+ A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *NonPackedTest) Reset() { *m = NonPackedTest{} }
+func (m *NonPackedTest) String() string { return proto.CompactTextString(m) }
+func (*NonPackedTest) ProtoMessage() {}
+func (*NonPackedTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+
+func (m *NonPackedTest) GetA() []int32 {
+ if m != nil {
+ return m.A
+ }
+ return nil
+}
+
+type PackedTest struct {
+ B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PackedTest) Reset() { *m = PackedTest{} }
+func (m *PackedTest) String() string { return proto.CompactTextString(m) }
+func (*PackedTest) ProtoMessage() {}
+func (*PackedTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+
+func (m *PackedTest) GetB() []int32 {
+ if m != nil {
+ return m.B
+ }
+ return nil
+}
+
+type MaxTag struct {
+ // Maximum possible tag number.
+ LastField *string `protobuf:"bytes,536870911,opt,name=last_field,json=lastField" json:"last_field,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MaxTag) Reset() { *m = MaxTag{} }
+func (m *MaxTag) String() string { return proto.CompactTextString(m) }
+func (*MaxTag) ProtoMessage() {}
+func (*MaxTag) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+
+func (m *MaxTag) GetLastField() string {
+ if m != nil && m.LastField != nil {
+ return *m.LastField
+ }
+ return ""
+}
+
+type OldMessage struct {
+ Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"`
+ Num *int32 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OldMessage) Reset() { *m = OldMessage{} }
+func (m *OldMessage) String() string { return proto.CompactTextString(m) }
+func (*OldMessage) ProtoMessage() {}
+func (*OldMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+
+func (m *OldMessage) GetNested() *OldMessage_Nested {
+ if m != nil {
+ return m.Nested
+ }
+ return nil
+}
+
+func (m *OldMessage) GetNum() int32 {
+ if m != nil && m.Num != nil {
+ return *m.Num
+ }
+ return 0
+}
+
+type OldMessage_Nested struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} }
+func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) }
+func (*OldMessage_Nested) ProtoMessage() {}
+func (*OldMessage_Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8, 0} }
+
+func (m *OldMessage_Nested) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+// NewMessage is wire compatible with OldMessage;
+// imagine it as a future version.
+type NewMessage struct {
+ Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"`
+ // This is an int32 in OldMessage.
+ Num *int64 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *NewMessage) Reset() { *m = NewMessage{} }
+func (m *NewMessage) String() string { return proto.CompactTextString(m) }
+func (*NewMessage) ProtoMessage() {}
+func (*NewMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
+
+func (m *NewMessage) GetNested() *NewMessage_Nested {
+ if m != nil {
+ return m.Nested
+ }
+ return nil
+}
+
+func (m *NewMessage) GetNum() int64 {
+ if m != nil && m.Num != nil {
+ return *m.Num
+ }
+ return 0
+}
+
+type NewMessage_Nested struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ FoodGroup *string `protobuf:"bytes,2,opt,name=food_group,json=foodGroup" json:"food_group,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} }
+func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) }
+func (*NewMessage_Nested) ProtoMessage() {}
+func (*NewMessage_Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 0} }
+
+func (m *NewMessage_Nested) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *NewMessage_Nested) GetFoodGroup() string {
+ if m != nil && m.FoodGroup != nil {
+ return *m.FoodGroup
+ }
+ return ""
+}
+
+type InnerMessage struct {
+ Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"`
+ Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"`
+ Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *InnerMessage) Reset() { *m = InnerMessage{} }
+func (m *InnerMessage) String() string { return proto.CompactTextString(m) }
+func (*InnerMessage) ProtoMessage() {}
+func (*InnerMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+
+const Default_InnerMessage_Port int32 = 4000
+
+func (m *InnerMessage) GetHost() string {
+ if m != nil && m.Host != nil {
+ return *m.Host
+ }
+ return ""
+}
+
+func (m *InnerMessage) GetPort() int32 {
+ if m != nil && m.Port != nil {
+ return *m.Port
+ }
+ return Default_InnerMessage_Port
+}
+
+func (m *InnerMessage) GetConnected() bool {
+ if m != nil && m.Connected != nil {
+ return *m.Connected
+ }
+ return false
+}
+
+type OtherMessage struct {
+ Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"`
+ Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OtherMessage) Reset() { *m = OtherMessage{} }
+func (m *OtherMessage) String() string { return proto.CompactTextString(m) }
+func (*OtherMessage) ProtoMessage() {}
+func (*OtherMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+
+var extRange_OtherMessage = []proto.ExtensionRange{
+ {100, 536870911},
+}
+
+func (*OtherMessage) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_OtherMessage
+}
+
+func (m *OtherMessage) GetKey() int64 {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return 0
+}
+
+func (m *OtherMessage) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *OtherMessage) GetWeight() float32 {
+ if m != nil && m.Weight != nil {
+ return *m.Weight
+ }
+ return 0
+}
+
+func (m *OtherMessage) GetInner() *InnerMessage {
+ if m != nil {
+ return m.Inner
+ }
+ return nil
+}
+
+type RequiredInnerMessage struct {
+ LeoFinallyWonAnOscar *InnerMessage `protobuf:"bytes,1,req,name=leo_finally_won_an_oscar,json=leoFinallyWonAnOscar" json:"leo_finally_won_an_oscar,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RequiredInnerMessage) Reset() { *m = RequiredInnerMessage{} }
+func (m *RequiredInnerMessage) String() string { return proto.CompactTextString(m) }
+func (*RequiredInnerMessage) ProtoMessage() {}
+func (*RequiredInnerMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
+
+func (m *RequiredInnerMessage) GetLeoFinallyWonAnOscar() *InnerMessage {
+ if m != nil {
+ return m.LeoFinallyWonAnOscar
+ }
+ return nil
+}
+
+type MyMessage struct {
+ Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"`
+ Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
+ Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"`
+ Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"`
+ Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"`
+ Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"`
+ WeMustGoDeeper *RequiredInnerMessage `protobuf:"bytes,13,opt,name=we_must_go_deeper,json=weMustGoDeeper" json:"we_must_go_deeper,omitempty"`
+ RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner,json=repInner" json:"rep_inner,omitempty"`
+ Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"`
+ Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"`
+ // This field becomes [][]byte in the generated code.
+ RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes,json=repBytes" json:"rep_bytes,omitempty"`
+ Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MyMessage) Reset() { *m = MyMessage{} }
+func (m *MyMessage) String() string { return proto.CompactTextString(m) }
+func (*MyMessage) ProtoMessage() {}
+func (*MyMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
+
+var extRange_MyMessage = []proto.ExtensionRange{
+ {100, 536870911},
+}
+
+func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_MyMessage
+}
+
+func (m *MyMessage) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *MyMessage) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MyMessage) GetQuote() string {
+ if m != nil && m.Quote != nil {
+ return *m.Quote
+ }
+ return ""
+}
+
+func (m *MyMessage) GetPet() []string {
+ if m != nil {
+ return m.Pet
+ }
+ return nil
+}
+
+func (m *MyMessage) GetInner() *InnerMessage {
+ if m != nil {
+ return m.Inner
+ }
+ return nil
+}
+
+func (m *MyMessage) GetOthers() []*OtherMessage {
+ if m != nil {
+ return m.Others
+ }
+ return nil
+}
+
+func (m *MyMessage) GetWeMustGoDeeper() *RequiredInnerMessage {
+ if m != nil {
+ return m.WeMustGoDeeper
+ }
+ return nil
+}
+
+func (m *MyMessage) GetRepInner() []*InnerMessage {
+ if m != nil {
+ return m.RepInner
+ }
+ return nil
+}
+
+func (m *MyMessage) GetBikeshed() MyMessage_Color {
+ if m != nil && m.Bikeshed != nil {
+ return *m.Bikeshed
+ }
+ return MyMessage_RED
+}
+
+func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup {
+ if m != nil {
+ return m.Somegroup
+ }
+ return nil
+}
+
+func (m *MyMessage) GetRepBytes() [][]byte {
+ if m != nil {
+ return m.RepBytes
+ }
+ return nil
+}
+
+func (m *MyMessage) GetBigfloat() float64 {
+ if m != nil && m.Bigfloat != nil {
+ return *m.Bigfloat
+ }
+ return 0
+}
+
+type MyMessage_SomeGroup struct {
+ GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} }
+func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) }
+func (*MyMessage_SomeGroup) ProtoMessage() {}
+func (*MyMessage_SomeGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 0} }
+
+func (m *MyMessage_SomeGroup) GetGroupField() int32 {
+ if m != nil && m.GroupField != nil {
+ return *m.GroupField
+ }
+ return 0
+}
+
+type Ext struct {
+ Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Ext) Reset() { *m = Ext{} }
+func (m *Ext) String() string { return proto.CompactTextString(m) }
+func (*Ext) ProtoMessage() {}
+func (*Ext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
+
+func (m *Ext) GetData() string {
+ if m != nil && m.Data != nil {
+ return *m.Data
+ }
+ return ""
+}
+
+var E_Ext_More = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessage)(nil),
+ ExtensionType: (*Ext)(nil),
+ Field: 103,
+ Name: "testdata.Ext.more",
+ Tag: "bytes,103,opt,name=more",
+ Filename: "test.proto",
+}
+
+var E_Ext_Text = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessage)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 104,
+ Name: "testdata.Ext.text",
+ Tag: "bytes,104,opt,name=text",
+ Filename: "test.proto",
+}
+
+var E_Ext_Number = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessage)(nil),
+ ExtensionType: (*int32)(nil),
+ Field: 105,
+ Name: "testdata.Ext.number",
+ Tag: "varint,105,opt,name=number",
+ Filename: "test.proto",
+}
+
+type ComplexExtension struct {
+ First *int32 `protobuf:"varint,1,opt,name=first" json:"first,omitempty"`
+ Second *int32 `protobuf:"varint,2,opt,name=second" json:"second,omitempty"`
+ Third []int32 `protobuf:"varint,3,rep,name=third" json:"third,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ComplexExtension) Reset() { *m = ComplexExtension{} }
+func (m *ComplexExtension) String() string { return proto.CompactTextString(m) }
+func (*ComplexExtension) ProtoMessage() {}
+func (*ComplexExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
+
+func (m *ComplexExtension) GetFirst() int32 {
+ if m != nil && m.First != nil {
+ return *m.First
+ }
+ return 0
+}
+
+func (m *ComplexExtension) GetSecond() int32 {
+ if m != nil && m.Second != nil {
+ return *m.Second
+ }
+ return 0
+}
+
+func (m *ComplexExtension) GetThird() []int32 {
+ if m != nil {
+ return m.Third
+ }
+ return nil
+}
+
+type DefaultsMessage struct {
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DefaultsMessage) Reset() { *m = DefaultsMessage{} }
+func (m *DefaultsMessage) String() string { return proto.CompactTextString(m) }
+func (*DefaultsMessage) ProtoMessage() {}
+func (*DefaultsMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
+
+var extRange_DefaultsMessage = []proto.ExtensionRange{
+ {100, 536870911},
+}
+
+func (*DefaultsMessage) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_DefaultsMessage
+}
+
+type MyMessageSet struct {
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MyMessageSet) Reset() { *m = MyMessageSet{} }
+func (m *MyMessageSet) String() string { return proto.CompactTextString(m) }
+func (*MyMessageSet) ProtoMessage() {}
+func (*MyMessageSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
+
+func (m *MyMessageSet) Marshal() ([]byte, error) {
+ return proto.MarshalMessageSet(&m.XXX_InternalExtensions)
+}
+func (m *MyMessageSet) Unmarshal(buf []byte) error {
+ return proto.UnmarshalMessageSet(buf, &m.XXX_InternalExtensions)
+}
+func (m *MyMessageSet) MarshalJSON() ([]byte, error) {
+ return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions)
+}
+func (m *MyMessageSet) UnmarshalJSON(buf []byte) error {
+ return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions)
+}
+
+// ensure MyMessageSet satisfies proto.Marshaler and proto.Unmarshaler
+var _ proto.Marshaler = (*MyMessageSet)(nil)
+var _ proto.Unmarshaler = (*MyMessageSet)(nil)
+
+var extRange_MyMessageSet = []proto.ExtensionRange{
+ {100, 2147483646},
+}
+
+func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_MyMessageSet
+}
+
+type Empty struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Empty) Reset() { *m = Empty{} }
+func (m *Empty) String() string { return proto.CompactTextString(m) }
+func (*Empty) ProtoMessage() {}
+func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
+
+type MessageList struct {
+ Message []*MessageList_Message `protobuf:"group,1,rep,name=Message,json=message" json:"message,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MessageList) Reset() { *m = MessageList{} }
+func (m *MessageList) String() string { return proto.CompactTextString(m) }
+func (*MessageList) ProtoMessage() {}
+func (*MessageList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
+
+func (m *MessageList) GetMessage() []*MessageList_Message {
+ if m != nil {
+ return m.Message
+ }
+ return nil
+}
+
+type MessageList_Message struct {
+ Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"`
+ Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MessageList_Message) Reset() { *m = MessageList_Message{} }
+func (m *MessageList_Message) String() string { return proto.CompactTextString(m) }
+func (*MessageList_Message) ProtoMessage() {}
+func (*MessageList_Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19, 0} }
+
+func (m *MessageList_Message) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MessageList_Message) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+type Strings struct {
+ StringField *string `protobuf:"bytes,1,opt,name=string_field,json=stringField" json:"string_field,omitempty"`
+ BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field,json=bytesField" json:"bytes_field,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Strings) Reset() { *m = Strings{} }
+func (m *Strings) String() string { return proto.CompactTextString(m) }
+func (*Strings) ProtoMessage() {}
+func (*Strings) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
+
+func (m *Strings) GetStringField() string {
+ if m != nil && m.StringField != nil {
+ return *m.StringField
+ }
+ return ""
+}
+
+func (m *Strings) GetBytesField() []byte {
+ if m != nil {
+ return m.BytesField
+ }
+ return nil
+}
+
+type Defaults struct {
+ // Default-valued fields of all basic types.
+ // Same as GoTest, but copied here to make testing easier.
+ F_Bool *bool `protobuf:"varint,1,opt,name=F_Bool,json=FBool,def=1" json:"F_Bool,omitempty"`
+ F_Int32 *int32 `protobuf:"varint,2,opt,name=F_Int32,json=FInt32,def=32" json:"F_Int32,omitempty"`
+ F_Int64 *int64 `protobuf:"varint,3,opt,name=F_Int64,json=FInt64,def=64" json:"F_Int64,omitempty"`
+ F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=FFixed32,def=320" json:"F_Fixed32,omitempty"`
+ F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=FFixed64,def=640" json:"F_Fixed64,omitempty"`
+ F_Uint32 *uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=FUint32,def=3200" json:"F_Uint32,omitempty"`
+ F_Uint64 *uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=FUint64,def=6400" json:"F_Uint64,omitempty"`
+ F_Float *float32 `protobuf:"fixed32,8,opt,name=F_Float,json=FFloat,def=314159" json:"F_Float,omitempty"`
+ F_Double *float64 `protobuf:"fixed64,9,opt,name=F_Double,json=FDouble,def=271828" json:"F_Double,omitempty"`
+ F_String *string `protobuf:"bytes,10,opt,name=F_String,json=FString,def=hello, \"world!\"\n" json:"F_String,omitempty"`
+ F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=FBytes,def=Bignose" json:"F_Bytes,omitempty"`
+ F_Sint32 *int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=FSint32,def=-32" json:"F_Sint32,omitempty"`
+ F_Sint64 *int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=FSint64,def=-64" json:"F_Sint64,omitempty"`
+ F_Enum *Defaults_Color `protobuf:"varint,14,opt,name=F_Enum,json=FEnum,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"`
+ // More fields with crazy defaults.
+ F_Pinf *float32 `protobuf:"fixed32,15,opt,name=F_Pinf,json=FPinf,def=inf" json:"F_Pinf,omitempty"`
+ F_Ninf *float32 `protobuf:"fixed32,16,opt,name=F_Ninf,json=FNinf,def=-inf" json:"F_Ninf,omitempty"`
+ F_Nan *float32 `protobuf:"fixed32,17,opt,name=F_Nan,json=FNan,def=nan" json:"F_Nan,omitempty"`
+ // Sub-message.
+ Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"`
+ // Redundant but explicit defaults.
+ StrZero *string `protobuf:"bytes,19,opt,name=str_zero,json=strZero,def=" json:"str_zero,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Defaults) Reset() { *m = Defaults{} }
+func (m *Defaults) String() string { return proto.CompactTextString(m) }
+func (*Defaults) ProtoMessage() {}
+func (*Defaults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
+
+const Default_Defaults_F_Bool bool = true
+const Default_Defaults_F_Int32 int32 = 32
+const Default_Defaults_F_Int64 int64 = 64
+const Default_Defaults_F_Fixed32 uint32 = 320
+const Default_Defaults_F_Fixed64 uint64 = 640
+const Default_Defaults_F_Uint32 uint32 = 3200
+const Default_Defaults_F_Uint64 uint64 = 6400
+const Default_Defaults_F_Float float32 = 314159
+const Default_Defaults_F_Double float64 = 271828
+const Default_Defaults_F_String string = "hello, \"world!\"\n"
+
+var Default_Defaults_F_Bytes []byte = []byte("Bignose")
+
+const Default_Defaults_F_Sint32 int32 = -32
+const Default_Defaults_F_Sint64 int64 = -64
+const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN
+
+var Default_Defaults_F_Pinf float32 = float32(math.Inf(1))
+var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1))
+var Default_Defaults_F_Nan float32 = float32(math.NaN())
+
+func (m *Defaults) GetF_Bool() bool {
+ if m != nil && m.F_Bool != nil {
+ return *m.F_Bool
+ }
+ return Default_Defaults_F_Bool
+}
+
+func (m *Defaults) GetF_Int32() int32 {
+ if m != nil && m.F_Int32 != nil {
+ return *m.F_Int32
+ }
+ return Default_Defaults_F_Int32
+}
+
+func (m *Defaults) GetF_Int64() int64 {
+ if m != nil && m.F_Int64 != nil {
+ return *m.F_Int64
+ }
+ return Default_Defaults_F_Int64
+}
+
+func (m *Defaults) GetF_Fixed32() uint32 {
+ if m != nil && m.F_Fixed32 != nil {
+ return *m.F_Fixed32
+ }
+ return Default_Defaults_F_Fixed32
+}
+
+func (m *Defaults) GetF_Fixed64() uint64 {
+ if m != nil && m.F_Fixed64 != nil {
+ return *m.F_Fixed64
+ }
+ return Default_Defaults_F_Fixed64
+}
+
+func (m *Defaults) GetF_Uint32() uint32 {
+ if m != nil && m.F_Uint32 != nil {
+ return *m.F_Uint32
+ }
+ return Default_Defaults_F_Uint32
+}
+
+func (m *Defaults) GetF_Uint64() uint64 {
+ if m != nil && m.F_Uint64 != nil {
+ return *m.F_Uint64
+ }
+ return Default_Defaults_F_Uint64
+}
+
+func (m *Defaults) GetF_Float() float32 {
+ if m != nil && m.F_Float != nil {
+ return *m.F_Float
+ }
+ return Default_Defaults_F_Float
+}
+
+func (m *Defaults) GetF_Double() float64 {
+ if m != nil && m.F_Double != nil {
+ return *m.F_Double
+ }
+ return Default_Defaults_F_Double
+}
+
+func (m *Defaults) GetF_String() string {
+ if m != nil && m.F_String != nil {
+ return *m.F_String
+ }
+ return Default_Defaults_F_String
+}
+
+func (m *Defaults) GetF_Bytes() []byte {
+ if m != nil && m.F_Bytes != nil {
+ return m.F_Bytes
+ }
+ return append([]byte(nil), Default_Defaults_F_Bytes...)
+}
+
+func (m *Defaults) GetF_Sint32() int32 {
+ if m != nil && m.F_Sint32 != nil {
+ return *m.F_Sint32
+ }
+ return Default_Defaults_F_Sint32
+}
+
+func (m *Defaults) GetF_Sint64() int64 {
+ if m != nil && m.F_Sint64 != nil {
+ return *m.F_Sint64
+ }
+ return Default_Defaults_F_Sint64
+}
+
+func (m *Defaults) GetF_Enum() Defaults_Color {
+ if m != nil && m.F_Enum != nil {
+ return *m.F_Enum
+ }
+ return Default_Defaults_F_Enum
+}
+
+func (m *Defaults) GetF_Pinf() float32 {
+ if m != nil && m.F_Pinf != nil {
+ return *m.F_Pinf
+ }
+ return Default_Defaults_F_Pinf
+}
+
+func (m *Defaults) GetF_Ninf() float32 {
+ if m != nil && m.F_Ninf != nil {
+ return *m.F_Ninf
+ }
+ return Default_Defaults_F_Ninf
+}
+
+func (m *Defaults) GetF_Nan() float32 {
+ if m != nil && m.F_Nan != nil {
+ return *m.F_Nan
+ }
+ return Default_Defaults_F_Nan
+}
+
+func (m *Defaults) GetSub() *SubDefaults {
+ if m != nil {
+ return m.Sub
+ }
+ return nil
+}
+
+func (m *Defaults) GetStrZero() string {
+ if m != nil && m.StrZero != nil {
+ return *m.StrZero
+ }
+ return ""
+}
+
+type SubDefaults struct {
+ N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SubDefaults) Reset() { *m = SubDefaults{} }
+func (m *SubDefaults) String() string { return proto.CompactTextString(m) }
+func (*SubDefaults) ProtoMessage() {}
+func (*SubDefaults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
+
+const Default_SubDefaults_N int64 = 7
+
+func (m *SubDefaults) GetN() int64 {
+ if m != nil && m.N != nil {
+ return *m.N
+ }
+ return Default_SubDefaults_N
+}
+
+type RepeatedEnum struct {
+ Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=testdata.RepeatedEnum_Color" json:"color,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} }
+func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) }
+func (*RepeatedEnum) ProtoMessage() {}
+func (*RepeatedEnum) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
+
+func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color {
+ if m != nil {
+ return m.Color
+ }
+ return nil
+}
+
+type MoreRepeated struct {
+ Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"`
+ BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed,json=boolsPacked" json:"bools_packed,omitempty"`
+ Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"`
+ IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed,json=intsPacked" json:"ints_packed,omitempty"`
+ Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed,json=int64sPacked" json:"int64s_packed,omitempty"`
+ Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"`
+ Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MoreRepeated) Reset() { *m = MoreRepeated{} }
+func (m *MoreRepeated) String() string { return proto.CompactTextString(m) }
+func (*MoreRepeated) ProtoMessage() {}
+func (*MoreRepeated) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
+
+func (m *MoreRepeated) GetBools() []bool {
+ if m != nil {
+ return m.Bools
+ }
+ return nil
+}
+
+func (m *MoreRepeated) GetBoolsPacked() []bool {
+ if m != nil {
+ return m.BoolsPacked
+ }
+ return nil
+}
+
+func (m *MoreRepeated) GetInts() []int32 {
+ if m != nil {
+ return m.Ints
+ }
+ return nil
+}
+
+func (m *MoreRepeated) GetIntsPacked() []int32 {
+ if m != nil {
+ return m.IntsPacked
+ }
+ return nil
+}
+
+func (m *MoreRepeated) GetInt64SPacked() []int64 {
+ if m != nil {
+ return m.Int64SPacked
+ }
+ return nil
+}
+
+func (m *MoreRepeated) GetStrings() []string {
+ if m != nil {
+ return m.Strings
+ }
+ return nil
+}
+
+func (m *MoreRepeated) GetFixeds() []uint32 {
+ if m != nil {
+ return m.Fixeds
+ }
+ return nil
+}
+
+type GroupOld struct {
+ G *GroupOld_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GroupOld) Reset() { *m = GroupOld{} }
+func (m *GroupOld) String() string { return proto.CompactTextString(m) }
+func (*GroupOld) ProtoMessage() {}
+func (*GroupOld) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
+
+func (m *GroupOld) GetG() *GroupOld_G {
+ if m != nil {
+ return m.G
+ }
+ return nil
+}
+
+type GroupOld_G struct {
+ X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GroupOld_G) Reset() { *m = GroupOld_G{} }
+func (m *GroupOld_G) String() string { return proto.CompactTextString(m) }
+func (*GroupOld_G) ProtoMessage() {}
+func (*GroupOld_G) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25, 0} }
+
+func (m *GroupOld_G) GetX() int32 {
+ if m != nil && m.X != nil {
+ return *m.X
+ }
+ return 0
+}
+
+type GroupNew struct {
+ G *GroupNew_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GroupNew) Reset() { *m = GroupNew{} }
+func (m *GroupNew) String() string { return proto.CompactTextString(m) }
+func (*GroupNew) ProtoMessage() {}
+func (*GroupNew) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} }
+
+func (m *GroupNew) GetG() *GroupNew_G {
+ if m != nil {
+ return m.G
+ }
+ return nil
+}
+
+type GroupNew_G struct {
+ X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"`
+ Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GroupNew_G) Reset() { *m = GroupNew_G{} }
+func (m *GroupNew_G) String() string { return proto.CompactTextString(m) }
+func (*GroupNew_G) ProtoMessage() {}
+func (*GroupNew_G) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26, 0} }
+
+func (m *GroupNew_G) GetX() int32 {
+ if m != nil && m.X != nil {
+ return *m.X
+ }
+ return 0
+}
+
+func (m *GroupNew_G) GetY() int32 {
+ if m != nil && m.Y != nil {
+ return *m.Y
+ }
+ return 0
+}
+
+type FloatingPoint struct {
+ F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"`
+ Exact *bool `protobuf:"varint,2,opt,name=exact" json:"exact,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FloatingPoint) Reset() { *m = FloatingPoint{} }
+func (m *FloatingPoint) String() string { return proto.CompactTextString(m) }
+func (*FloatingPoint) ProtoMessage() {}
+func (*FloatingPoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} }
+
+func (m *FloatingPoint) GetF() float64 {
+ if m != nil && m.F != nil {
+ return *m.F
+ }
+ return 0
+}
+
+func (m *FloatingPoint) GetExact() bool {
+ if m != nil && m.Exact != nil {
+ return *m.Exact
+ }
+ return false
+}
+
+type MessageWithMap struct {
+ NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str,json=strToStr" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MessageWithMap) Reset() { *m = MessageWithMap{} }
+func (m *MessageWithMap) String() string { return proto.CompactTextString(m) }
+func (*MessageWithMap) ProtoMessage() {}
+func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} }
+
+func (m *MessageWithMap) GetNameMapping() map[int32]string {
+ if m != nil {
+ return m.NameMapping
+ }
+ return nil
+}
+
+func (m *MessageWithMap) GetMsgMapping() map[int64]*FloatingPoint {
+ if m != nil {
+ return m.MsgMapping
+ }
+ return nil
+}
+
+func (m *MessageWithMap) GetByteMapping() map[bool][]byte {
+ if m != nil {
+ return m.ByteMapping
+ }
+ return nil
+}
+
+func (m *MessageWithMap) GetStrToStr() map[string]string {
+ if m != nil {
+ return m.StrToStr
+ }
+ return nil
+}
+
+type Oneof struct {
+ // Types that are valid to be assigned to Union:
+ // *Oneof_F_Bool
+ // *Oneof_F_Int32
+ // *Oneof_F_Int64
+ // *Oneof_F_Fixed32
+ // *Oneof_F_Fixed64
+ // *Oneof_F_Uint32
+ // *Oneof_F_Uint64
+ // *Oneof_F_Float
+ // *Oneof_F_Double
+ // *Oneof_F_String
+ // *Oneof_F_Bytes
+ // *Oneof_F_Sint32
+ // *Oneof_F_Sint64
+ // *Oneof_F_Enum
+ // *Oneof_F_Message
+ // *Oneof_FGroup
+ // *Oneof_F_Largest_Tag
+ Union isOneof_Union `protobuf_oneof:"union"`
+ // Types that are valid to be assigned to Tormato:
+ // *Oneof_Value
+ Tormato isOneof_Tormato `protobuf_oneof:"tormato"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Oneof) Reset() { *m = Oneof{} }
+func (m *Oneof) String() string { return proto.CompactTextString(m) }
+func (*Oneof) ProtoMessage() {}
+func (*Oneof) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
+
+type isOneof_Union interface {
+ isOneof_Union()
+}
+type isOneof_Tormato interface {
+ isOneof_Tormato()
+}
+
+type Oneof_F_Bool struct {
+ F_Bool bool `protobuf:"varint,1,opt,name=F_Bool,json=FBool,oneof"`
+}
+type Oneof_F_Int32 struct {
+ F_Int32 int32 `protobuf:"varint,2,opt,name=F_Int32,json=FInt32,oneof"`
+}
+type Oneof_F_Int64 struct {
+ F_Int64 int64 `protobuf:"varint,3,opt,name=F_Int64,json=FInt64,oneof"`
+}
+type Oneof_F_Fixed32 struct {
+ F_Fixed32 uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=FFixed32,oneof"`
+}
+type Oneof_F_Fixed64 struct {
+ F_Fixed64 uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=FFixed64,oneof"`
+}
+type Oneof_F_Uint32 struct {
+ F_Uint32 uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=FUint32,oneof"`
+}
+type Oneof_F_Uint64 struct {
+ F_Uint64 uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=FUint64,oneof"`
+}
+type Oneof_F_Float struct {
+ F_Float float32 `protobuf:"fixed32,8,opt,name=F_Float,json=FFloat,oneof"`
+}
+type Oneof_F_Double struct {
+ F_Double float64 `protobuf:"fixed64,9,opt,name=F_Double,json=FDouble,oneof"`
+}
+type Oneof_F_String struct {
+ F_String string `protobuf:"bytes,10,opt,name=F_String,json=FString,oneof"`
+}
+type Oneof_F_Bytes struct {
+ F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=FBytes,oneof"`
+}
+type Oneof_F_Sint32 struct {
+ F_Sint32 int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=FSint32,oneof"`
+}
+type Oneof_F_Sint64 struct {
+ F_Sint64 int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=FSint64,oneof"`
+}
+type Oneof_F_Enum struct {
+ F_Enum MyMessage_Color `protobuf:"varint,14,opt,name=F_Enum,json=FEnum,enum=testdata.MyMessage_Color,oneof"`
+}
+type Oneof_F_Message struct {
+ F_Message *GoTestField `protobuf:"bytes,15,opt,name=F_Message,json=FMessage,oneof"`
+}
+type Oneof_FGroup struct {
+ FGroup *Oneof_F_Group `protobuf:"group,16,opt,name=F_Group,json=fGroup,oneof"`
+}
+type Oneof_F_Largest_Tag struct {
+ F_Largest_Tag int32 `protobuf:"varint,536870911,opt,name=F_Largest_Tag,json=FLargestTag,oneof"`
+}
+type Oneof_Value struct {
+ Value int32 `protobuf:"varint,100,opt,name=value,oneof"`
+}
+
+func (*Oneof_F_Bool) isOneof_Union() {}
+func (*Oneof_F_Int32) isOneof_Union() {}
+func (*Oneof_F_Int64) isOneof_Union() {}
+func (*Oneof_F_Fixed32) isOneof_Union() {}
+func (*Oneof_F_Fixed64) isOneof_Union() {}
+func (*Oneof_F_Uint32) isOneof_Union() {}
+func (*Oneof_F_Uint64) isOneof_Union() {}
+func (*Oneof_F_Float) isOneof_Union() {}
+func (*Oneof_F_Double) isOneof_Union() {}
+func (*Oneof_F_String) isOneof_Union() {}
+func (*Oneof_F_Bytes) isOneof_Union() {}
+func (*Oneof_F_Sint32) isOneof_Union() {}
+func (*Oneof_F_Sint64) isOneof_Union() {}
+func (*Oneof_F_Enum) isOneof_Union() {}
+func (*Oneof_F_Message) isOneof_Union() {}
+func (*Oneof_FGroup) isOneof_Union() {}
+func (*Oneof_F_Largest_Tag) isOneof_Union() {}
+func (*Oneof_Value) isOneof_Tormato() {}
+
+func (m *Oneof) GetUnion() isOneof_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+}
+func (m *Oneof) GetTormato() isOneof_Tormato {
+ if m != nil {
+ return m.Tormato
+ }
+ return nil
+}
+
+func (m *Oneof) GetF_Bool() bool {
+ if x, ok := m.GetUnion().(*Oneof_F_Bool); ok {
+ return x.F_Bool
+ }
+ return false
+}
+
+func (m *Oneof) GetF_Int32() int32 {
+ if x, ok := m.GetUnion().(*Oneof_F_Int32); ok {
+ return x.F_Int32
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Int64() int64 {
+ if x, ok := m.GetUnion().(*Oneof_F_Int64); ok {
+ return x.F_Int64
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Fixed32() uint32 {
+ if x, ok := m.GetUnion().(*Oneof_F_Fixed32); ok {
+ return x.F_Fixed32
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Fixed64() uint64 {
+ if x, ok := m.GetUnion().(*Oneof_F_Fixed64); ok {
+ return x.F_Fixed64
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Uint32() uint32 {
+ if x, ok := m.GetUnion().(*Oneof_F_Uint32); ok {
+ return x.F_Uint32
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Uint64() uint64 {
+ if x, ok := m.GetUnion().(*Oneof_F_Uint64); ok {
+ return x.F_Uint64
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Float() float32 {
+ if x, ok := m.GetUnion().(*Oneof_F_Float); ok {
+ return x.F_Float
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Double() float64 {
+ if x, ok := m.GetUnion().(*Oneof_F_Double); ok {
+ return x.F_Double
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_String() string {
+ if x, ok := m.GetUnion().(*Oneof_F_String); ok {
+ return x.F_String
+ }
+ return ""
+}
+
+func (m *Oneof) GetF_Bytes() []byte {
+ if x, ok := m.GetUnion().(*Oneof_F_Bytes); ok {
+ return x.F_Bytes
+ }
+ return nil
+}
+
+func (m *Oneof) GetF_Sint32() int32 {
+ if x, ok := m.GetUnion().(*Oneof_F_Sint32); ok {
+ return x.F_Sint32
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Sint64() int64 {
+ if x, ok := m.GetUnion().(*Oneof_F_Sint64); ok {
+ return x.F_Sint64
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Enum() MyMessage_Color {
+ if x, ok := m.GetUnion().(*Oneof_F_Enum); ok {
+ return x.F_Enum
+ }
+ return MyMessage_RED
+}
+
+func (m *Oneof) GetF_Message() *GoTestField {
+ if x, ok := m.GetUnion().(*Oneof_F_Message); ok {
+ return x.F_Message
+ }
+ return nil
+}
+
+func (m *Oneof) GetFGroup() *Oneof_F_Group {
+ if x, ok := m.GetUnion().(*Oneof_FGroup); ok {
+ return x.FGroup
+ }
+ return nil
+}
+
+func (m *Oneof) GetF_Largest_Tag() int32 {
+ if x, ok := m.GetUnion().(*Oneof_F_Largest_Tag); ok {
+ return x.F_Largest_Tag
+ }
+ return 0
+}
+
+func (m *Oneof) GetValue() int32 {
+ if x, ok := m.GetTormato().(*Oneof_Value); ok {
+ return x.Value
+ }
+ return 0
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Oneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _Oneof_OneofMarshaler, _Oneof_OneofUnmarshaler, _Oneof_OneofSizer, []interface{}{
+ (*Oneof_F_Bool)(nil),
+ (*Oneof_F_Int32)(nil),
+ (*Oneof_F_Int64)(nil),
+ (*Oneof_F_Fixed32)(nil),
+ (*Oneof_F_Fixed64)(nil),
+ (*Oneof_F_Uint32)(nil),
+ (*Oneof_F_Uint64)(nil),
+ (*Oneof_F_Float)(nil),
+ (*Oneof_F_Double)(nil),
+ (*Oneof_F_String)(nil),
+ (*Oneof_F_Bytes)(nil),
+ (*Oneof_F_Sint32)(nil),
+ (*Oneof_F_Sint64)(nil),
+ (*Oneof_F_Enum)(nil),
+ (*Oneof_F_Message)(nil),
+ (*Oneof_FGroup)(nil),
+ (*Oneof_F_Largest_Tag)(nil),
+ (*Oneof_Value)(nil),
+ }
+}
+
+func _Oneof_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Oneof)
+ // union
+ switch x := m.Union.(type) {
+ case *Oneof_F_Bool:
+ t := uint64(0)
+ if x.F_Bool {
+ t = 1
+ }
+ b.EncodeVarint(1<<3 | proto.WireVarint)
+ b.EncodeVarint(t)
+ case *Oneof_F_Int32:
+ b.EncodeVarint(2<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.F_Int32))
+ case *Oneof_F_Int64:
+ b.EncodeVarint(3<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.F_Int64))
+ case *Oneof_F_Fixed32:
+ b.EncodeVarint(4<<3 | proto.WireFixed32)
+ b.EncodeFixed32(uint64(x.F_Fixed32))
+ case *Oneof_F_Fixed64:
+ b.EncodeVarint(5<<3 | proto.WireFixed64)
+ b.EncodeFixed64(uint64(x.F_Fixed64))
+ case *Oneof_F_Uint32:
+ b.EncodeVarint(6<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.F_Uint32))
+ case *Oneof_F_Uint64:
+ b.EncodeVarint(7<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.F_Uint64))
+ case *Oneof_F_Float:
+ b.EncodeVarint(8<<3 | proto.WireFixed32)
+ b.EncodeFixed32(uint64(math.Float32bits(x.F_Float)))
+ case *Oneof_F_Double:
+ b.EncodeVarint(9<<3 | proto.WireFixed64)
+ b.EncodeFixed64(math.Float64bits(x.F_Double))
+ case *Oneof_F_String:
+ b.EncodeVarint(10<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.F_String)
+ case *Oneof_F_Bytes:
+ b.EncodeVarint(11<<3 | proto.WireBytes)
+ b.EncodeRawBytes(x.F_Bytes)
+ case *Oneof_F_Sint32:
+ b.EncodeVarint(12<<3 | proto.WireVarint)
+ b.EncodeZigzag32(uint64(x.F_Sint32))
+ case *Oneof_F_Sint64:
+ b.EncodeVarint(13<<3 | proto.WireVarint)
+ b.EncodeZigzag64(uint64(x.F_Sint64))
+ case *Oneof_F_Enum:
+ b.EncodeVarint(14<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.F_Enum))
+ case *Oneof_F_Message:
+ b.EncodeVarint(15<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.F_Message); err != nil {
+ return err
+ }
+ case *Oneof_FGroup:
+ b.EncodeVarint(16<<3 | proto.WireStartGroup)
+ if err := b.Marshal(x.FGroup); err != nil {
+ return err
+ }
+ b.EncodeVarint(16<<3 | proto.WireEndGroup)
+ case *Oneof_F_Largest_Tag:
+ b.EncodeVarint(536870911<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.F_Largest_Tag))
+ case nil:
+ default:
+ return fmt.Errorf("Oneof.Union has unexpected type %T", x)
+ }
+ // tormato
+ switch x := m.Tormato.(type) {
+ case *Oneof_Value:
+ b.EncodeVarint(100<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Value))
+ case nil:
+ default:
+ return fmt.Errorf("Oneof.Tormato has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Oneof_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Oneof)
+ switch tag {
+ case 1: // union.F_Bool
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Oneof_F_Bool{x != 0}
+ return true, err
+ case 2: // union.F_Int32
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Oneof_F_Int32{int32(x)}
+ return true, err
+ case 3: // union.F_Int64
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Oneof_F_Int64{int64(x)}
+ return true, err
+ case 4: // union.F_Fixed32
+ if wire != proto.WireFixed32 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed32()
+ m.Union = &Oneof_F_Fixed32{uint32(x)}
+ return true, err
+ case 5: // union.F_Fixed64
+ if wire != proto.WireFixed64 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed64()
+ m.Union = &Oneof_F_Fixed64{x}
+ return true, err
+ case 6: // union.F_Uint32
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Oneof_F_Uint32{uint32(x)}
+ return true, err
+ case 7: // union.F_Uint64
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Oneof_F_Uint64{x}
+ return true, err
+ case 8: // union.F_Float
+ if wire != proto.WireFixed32 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed32()
+ m.Union = &Oneof_F_Float{math.Float32frombits(uint32(x))}
+ return true, err
+ case 9: // union.F_Double
+ if wire != proto.WireFixed64 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed64()
+ m.Union = &Oneof_F_Double{math.Float64frombits(x)}
+ return true, err
+ case 10: // union.F_String
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Union = &Oneof_F_String{x}
+ return true, err
+ case 11: // union.F_Bytes
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeRawBytes(true)
+ m.Union = &Oneof_F_Bytes{x}
+ return true, err
+ case 12: // union.F_Sint32
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeZigzag32()
+ m.Union = &Oneof_F_Sint32{int32(x)}
+ return true, err
+ case 13: // union.F_Sint64
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeZigzag64()
+ m.Union = &Oneof_F_Sint64{int64(x)}
+ return true, err
+ case 14: // union.F_Enum
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Oneof_F_Enum{MyMessage_Color(x)}
+ return true, err
+ case 15: // union.F_Message
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(GoTestField)
+ err := b.DecodeMessage(msg)
+ m.Union = &Oneof_F_Message{msg}
+ return true, err
+ case 16: // union.f_group
+ if wire != proto.WireStartGroup {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Oneof_F_Group)
+ err := b.DecodeGroup(msg)
+ m.Union = &Oneof_FGroup{msg}
+ return true, err
+ case 536870911: // union.F_Largest_Tag
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Oneof_F_Largest_Tag{int32(x)}
+ return true, err
+ case 100: // tormato.value
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Tormato = &Oneof_Value{int32(x)}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _Oneof_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*Oneof)
+ // union
+ switch x := m.Union.(type) {
+ case *Oneof_F_Bool:
+ n += proto.SizeVarint(1<<3 | proto.WireVarint)
+ n += 1
+ case *Oneof_F_Int32:
+ n += proto.SizeVarint(2<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.F_Int32))
+ case *Oneof_F_Int64:
+ n += proto.SizeVarint(3<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.F_Int64))
+ case *Oneof_F_Fixed32:
+ n += proto.SizeVarint(4<<3 | proto.WireFixed32)
+ n += 4
+ case *Oneof_F_Fixed64:
+ n += proto.SizeVarint(5<<3 | proto.WireFixed64)
+ n += 8
+ case *Oneof_F_Uint32:
+ n += proto.SizeVarint(6<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.F_Uint32))
+ case *Oneof_F_Uint64:
+ n += proto.SizeVarint(7<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.F_Uint64))
+ case *Oneof_F_Float:
+ n += proto.SizeVarint(8<<3 | proto.WireFixed32)
+ n += 4
+ case *Oneof_F_Double:
+ n += proto.SizeVarint(9<<3 | proto.WireFixed64)
+ n += 8
+ case *Oneof_F_String:
+ n += proto.SizeVarint(10<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.F_String)))
+ n += len(x.F_String)
+ case *Oneof_F_Bytes:
+ n += proto.SizeVarint(11<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.F_Bytes)))
+ n += len(x.F_Bytes)
+ case *Oneof_F_Sint32:
+ n += proto.SizeVarint(12<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64((uint32(x.F_Sint32) << 1) ^ uint32((int32(x.F_Sint32) >> 31))))
+ case *Oneof_F_Sint64:
+ n += proto.SizeVarint(13<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(uint64(x.F_Sint64<<1) ^ uint64((int64(x.F_Sint64) >> 63))))
+ case *Oneof_F_Enum:
+ n += proto.SizeVarint(14<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.F_Enum))
+ case *Oneof_F_Message:
+ s := proto.Size(x.F_Message)
+ n += proto.SizeVarint(15<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Oneof_FGroup:
+ n += proto.SizeVarint(16<<3 | proto.WireStartGroup)
+ n += proto.Size(x.FGroup)
+ n += proto.SizeVarint(16<<3 | proto.WireEndGroup)
+ case *Oneof_F_Largest_Tag:
+ n += proto.SizeVarint(536870911<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.F_Largest_Tag))
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ // tormato
+ switch x := m.Tormato.(type) {
+ case *Oneof_Value:
+ n += proto.SizeVarint(100<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.Value))
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+type Oneof_F_Group struct {
+ X *int32 `protobuf:"varint,17,opt,name=x" json:"x,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Oneof_F_Group) Reset() { *m = Oneof_F_Group{} }
+func (m *Oneof_F_Group) String() string { return proto.CompactTextString(m) }
+func (*Oneof_F_Group) ProtoMessage() {}
+func (*Oneof_F_Group) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29, 0} }
+
+func (m *Oneof_F_Group) GetX() int32 {
+ if m != nil && m.X != nil {
+ return *m.X
+ }
+ return 0
+}
+
+type Communique struct {
+ MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"`
+ // This is a oneof, called "union".
+ //
+ // Types that are valid to be assigned to Union:
+ // *Communique_Number
+ // *Communique_Name
+ // *Communique_Data
+ // *Communique_TempC
+ // *Communique_Col
+ // *Communique_Msg
+ Union isCommunique_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Communique) Reset() { *m = Communique{} }
+func (m *Communique) String() string { return proto.CompactTextString(m) }
+func (*Communique) ProtoMessage() {}
+func (*Communique) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} }
+
+type isCommunique_Union interface {
+ isCommunique_Union()
+}
+
+type Communique_Number struct {
+ Number int32 `protobuf:"varint,5,opt,name=number,oneof"`
+}
+type Communique_Name struct {
+ Name string `protobuf:"bytes,6,opt,name=name,oneof"`
+}
+type Communique_Data struct {
+ Data []byte `protobuf:"bytes,7,opt,name=data,oneof"`
+}
+type Communique_TempC struct {
+ TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"`
+}
+type Communique_Col struct {
+ Col MyMessage_Color `protobuf:"varint,9,opt,name=col,enum=testdata.MyMessage_Color,oneof"`
+}
+type Communique_Msg struct {
+ Msg *Strings `protobuf:"bytes,10,opt,name=msg,oneof"`
+}
+
+func (*Communique_Number) isCommunique_Union() {}
+func (*Communique_Name) isCommunique_Union() {}
+func (*Communique_Data) isCommunique_Union() {}
+func (*Communique_TempC) isCommunique_Union() {}
+func (*Communique_Col) isCommunique_Union() {}
+func (*Communique_Msg) isCommunique_Union() {}
+
+func (m *Communique) GetUnion() isCommunique_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+}
+
+func (m *Communique) GetMakeMeCry() bool {
+ if m != nil && m.MakeMeCry != nil {
+ return *m.MakeMeCry
+ }
+ return false
+}
+
+func (m *Communique) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Communique_Number); ok {
+ return x.Number
+ }
+ return 0
+}
+
+func (m *Communique) GetName() string {
+ if x, ok := m.GetUnion().(*Communique_Name); ok {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *Communique) GetData() []byte {
+ if x, ok := m.GetUnion().(*Communique_Data); ok {
+ return x.Data
+ }
+ return nil
+}
+
+func (m *Communique) GetTempC() float64 {
+ if x, ok := m.GetUnion().(*Communique_TempC); ok {
+ return x.TempC
+ }
+ return 0
+}
+
+func (m *Communique) GetCol() MyMessage_Color {
+ if x, ok := m.GetUnion().(*Communique_Col); ok {
+ return x.Col
+ }
+ return MyMessage_RED
+}
+
+func (m *Communique) GetMsg() *Strings {
+ if x, ok := m.GetUnion().(*Communique_Msg); ok {
+ return x.Msg
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{
+ (*Communique_Number)(nil),
+ (*Communique_Name)(nil),
+ (*Communique_Data)(nil),
+ (*Communique_TempC)(nil),
+ (*Communique_Col)(nil),
+ (*Communique_Msg)(nil),
+ }
+}
+
+func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Communique)
+ // union
+ switch x := m.Union.(type) {
+ case *Communique_Number:
+ b.EncodeVarint(5<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Number))
+ case *Communique_Name:
+ b.EncodeVarint(6<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.Name)
+ case *Communique_Data:
+ b.EncodeVarint(7<<3 | proto.WireBytes)
+ b.EncodeRawBytes(x.Data)
+ case *Communique_TempC:
+ b.EncodeVarint(8<<3 | proto.WireFixed64)
+ b.EncodeFixed64(math.Float64bits(x.TempC))
+ case *Communique_Col:
+ b.EncodeVarint(9<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Col))
+ case *Communique_Msg:
+ b.EncodeVarint(10<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Msg); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("Communique.Union has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Communique)
+ switch tag {
+ case 5: // union.number
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Communique_Number{int32(x)}
+ return true, err
+ case 6: // union.name
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Union = &Communique_Name{x}
+ return true, err
+ case 7: // union.data
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeRawBytes(true)
+ m.Union = &Communique_Data{x}
+ return true, err
+ case 8: // union.temp_c
+ if wire != proto.WireFixed64 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed64()
+ m.Union = &Communique_TempC{math.Float64frombits(x)}
+ return true, err
+ case 9: // union.col
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Communique_Col{MyMessage_Color(x)}
+ return true, err
+ case 10: // union.msg
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Strings)
+ err := b.DecodeMessage(msg)
+ m.Union = &Communique_Msg{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _Communique_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*Communique)
+ // union
+ switch x := m.Union.(type) {
+ case *Communique_Number:
+ n += proto.SizeVarint(5<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.Number))
+ case *Communique_Name:
+ n += proto.SizeVarint(6<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.Name)))
+ n += len(x.Name)
+ case *Communique_Data:
+ n += proto.SizeVarint(7<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.Data)))
+ n += len(x.Data)
+ case *Communique_TempC:
+ n += proto.SizeVarint(8<<3 | proto.WireFixed64)
+ n += 8
+ case *Communique_Col:
+ n += proto.SizeVarint(9<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.Col))
+ case *Communique_Msg:
+ s := proto.Size(x.Msg)
+ n += proto.SizeVarint(10<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+var E_Greeting = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessage)(nil),
+ ExtensionType: ([]string)(nil),
+ Field: 106,
+ Name: "testdata.greeting",
+ Tag: "bytes,106,rep,name=greeting",
+ Filename: "test.proto",
+}
+
+var E_Complex = &proto.ExtensionDesc{
+ ExtendedType: (*OtherMessage)(nil),
+ ExtensionType: (*ComplexExtension)(nil),
+ Field: 200,
+ Name: "testdata.complex",
+ Tag: "bytes,200,opt,name=complex",
+ Filename: "test.proto",
+}
+
+var E_RComplex = &proto.ExtensionDesc{
+ ExtendedType: (*OtherMessage)(nil),
+ ExtensionType: ([]*ComplexExtension)(nil),
+ Field: 201,
+ Name: "testdata.r_complex",
+ Tag: "bytes,201,rep,name=r_complex,json=rComplex",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultDouble = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*float64)(nil),
+ Field: 101,
+ Name: "testdata.no_default_double",
+ Tag: "fixed64,101,opt,name=no_default_double,json=noDefaultDouble",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultFloat = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*float32)(nil),
+ Field: 102,
+ Name: "testdata.no_default_float",
+ Tag: "fixed32,102,opt,name=no_default_float,json=noDefaultFloat",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultInt32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int32)(nil),
+ Field: 103,
+ Name: "testdata.no_default_int32",
+ Tag: "varint,103,opt,name=no_default_int32,json=noDefaultInt32",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultInt64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int64)(nil),
+ Field: 104,
+ Name: "testdata.no_default_int64",
+ Tag: "varint,104,opt,name=no_default_int64,json=noDefaultInt64",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultUint32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*uint32)(nil),
+ Field: 105,
+ Name: "testdata.no_default_uint32",
+ Tag: "varint,105,opt,name=no_default_uint32,json=noDefaultUint32",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultUint64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*uint64)(nil),
+ Field: 106,
+ Name: "testdata.no_default_uint64",
+ Tag: "varint,106,opt,name=no_default_uint64,json=noDefaultUint64",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultSint32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int32)(nil),
+ Field: 107,
+ Name: "testdata.no_default_sint32",
+ Tag: "zigzag32,107,opt,name=no_default_sint32,json=noDefaultSint32",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultSint64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int64)(nil),
+ Field: 108,
+ Name: "testdata.no_default_sint64",
+ Tag: "zigzag64,108,opt,name=no_default_sint64,json=noDefaultSint64",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultFixed32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*uint32)(nil),
+ Field: 109,
+ Name: "testdata.no_default_fixed32",
+ Tag: "fixed32,109,opt,name=no_default_fixed32,json=noDefaultFixed32",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultFixed64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*uint64)(nil),
+ Field: 110,
+ Name: "testdata.no_default_fixed64",
+ Tag: "fixed64,110,opt,name=no_default_fixed64,json=noDefaultFixed64",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultSfixed32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int32)(nil),
+ Field: 111,
+ Name: "testdata.no_default_sfixed32",
+ Tag: "fixed32,111,opt,name=no_default_sfixed32,json=noDefaultSfixed32",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultSfixed64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int64)(nil),
+ Field: 112,
+ Name: "testdata.no_default_sfixed64",
+ Tag: "fixed64,112,opt,name=no_default_sfixed64,json=noDefaultSfixed64",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultBool = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 113,
+ Name: "testdata.no_default_bool",
+ Tag: "varint,113,opt,name=no_default_bool,json=noDefaultBool",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultString = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 114,
+ Name: "testdata.no_default_string",
+ Tag: "bytes,114,opt,name=no_default_string,json=noDefaultString",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultBytes = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: ([]byte)(nil),
+ Field: 115,
+ Name: "testdata.no_default_bytes",
+ Tag: "bytes,115,opt,name=no_default_bytes,json=noDefaultBytes",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultEnum = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil),
+ Field: 116,
+ Name: "testdata.no_default_enum",
+ Tag: "varint,116,opt,name=no_default_enum,json=noDefaultEnum,enum=testdata.DefaultsMessage_DefaultsEnum",
+ Filename: "test.proto",
+}
+
+var E_DefaultDouble = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*float64)(nil),
+ Field: 201,
+ Name: "testdata.default_double",
+ Tag: "fixed64,201,opt,name=default_double,json=defaultDouble,def=3.1415",
+ Filename: "test.proto",
+}
+
+var E_DefaultFloat = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*float32)(nil),
+ Field: 202,
+ Name: "testdata.default_float",
+ Tag: "fixed32,202,opt,name=default_float,json=defaultFloat,def=3.14",
+ Filename: "test.proto",
+}
+
+var E_DefaultInt32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int32)(nil),
+ Field: 203,
+ Name: "testdata.default_int32",
+ Tag: "varint,203,opt,name=default_int32,json=defaultInt32,def=42",
+ Filename: "test.proto",
+}
+
+var E_DefaultInt64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int64)(nil),
+ Field: 204,
+ Name: "testdata.default_int64",
+ Tag: "varint,204,opt,name=default_int64,json=defaultInt64,def=43",
+ Filename: "test.proto",
+}
+
+var E_DefaultUint32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*uint32)(nil),
+ Field: 205,
+ Name: "testdata.default_uint32",
+ Tag: "varint,205,opt,name=default_uint32,json=defaultUint32,def=44",
+ Filename: "test.proto",
+}
+
+var E_DefaultUint64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*uint64)(nil),
+ Field: 206,
+ Name: "testdata.default_uint64",
+ Tag: "varint,206,opt,name=default_uint64,json=defaultUint64,def=45",
+ Filename: "test.proto",
+}
+
+var E_DefaultSint32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int32)(nil),
+ Field: 207,
+ Name: "testdata.default_sint32",
+ Tag: "zigzag32,207,opt,name=default_sint32,json=defaultSint32,def=46",
+ Filename: "test.proto",
+}
+
+var E_DefaultSint64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int64)(nil),
+ Field: 208,
+ Name: "testdata.default_sint64",
+ Tag: "zigzag64,208,opt,name=default_sint64,json=defaultSint64,def=47",
+ Filename: "test.proto",
+}
+
+var E_DefaultFixed32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*uint32)(nil),
+ Field: 209,
+ Name: "testdata.default_fixed32",
+ Tag: "fixed32,209,opt,name=default_fixed32,json=defaultFixed32,def=48",
+ Filename: "test.proto",
+}
+
+var E_DefaultFixed64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*uint64)(nil),
+ Field: 210,
+ Name: "testdata.default_fixed64",
+ Tag: "fixed64,210,opt,name=default_fixed64,json=defaultFixed64,def=49",
+ Filename: "test.proto",
+}
+
+var E_DefaultSfixed32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int32)(nil),
+ Field: 211,
+ Name: "testdata.default_sfixed32",
+ Tag: "fixed32,211,opt,name=default_sfixed32,json=defaultSfixed32,def=50",
+ Filename: "test.proto",
+}
+
+var E_DefaultSfixed64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int64)(nil),
+ Field: 212,
+ Name: "testdata.default_sfixed64",
+ Tag: "fixed64,212,opt,name=default_sfixed64,json=defaultSfixed64,def=51",
+ Filename: "test.proto",
+}
+
+var E_DefaultBool = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 213,
+ Name: "testdata.default_bool",
+ Tag: "varint,213,opt,name=default_bool,json=defaultBool,def=1",
+ Filename: "test.proto",
+}
+
+var E_DefaultString = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 214,
+ Name: "testdata.default_string",
+ Tag: "bytes,214,opt,name=default_string,json=defaultString,def=Hello, string",
+ Filename: "test.proto",
+}
+
+var E_DefaultBytes = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: ([]byte)(nil),
+ Field: 215,
+ Name: "testdata.default_bytes",
+ Tag: "bytes,215,opt,name=default_bytes,json=defaultBytes,def=Hello, bytes",
+ Filename: "test.proto",
+}
+
+var E_DefaultEnum = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil),
+ Field: 216,
+ Name: "testdata.default_enum",
+ Tag: "varint,216,opt,name=default_enum,json=defaultEnum,enum=testdata.DefaultsMessage_DefaultsEnum,def=1",
+ Filename: "test.proto",
+}
+
+var E_X201 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 201,
+ Name: "testdata.x201",
+ Tag: "bytes,201,opt,name=x201",
+ Filename: "test.proto",
+}
+
+var E_X202 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 202,
+ Name: "testdata.x202",
+ Tag: "bytes,202,opt,name=x202",
+ Filename: "test.proto",
+}
+
+var E_X203 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 203,
+ Name: "testdata.x203",
+ Tag: "bytes,203,opt,name=x203",
+ Filename: "test.proto",
+}
+
+var E_X204 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 204,
+ Name: "testdata.x204",
+ Tag: "bytes,204,opt,name=x204",
+ Filename: "test.proto",
+}
+
+var E_X205 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 205,
+ Name: "testdata.x205",
+ Tag: "bytes,205,opt,name=x205",
+ Filename: "test.proto",
+}
+
+var E_X206 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 206,
+ Name: "testdata.x206",
+ Tag: "bytes,206,opt,name=x206",
+ Filename: "test.proto",
+}
+
+var E_X207 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 207,
+ Name: "testdata.x207",
+ Tag: "bytes,207,opt,name=x207",
+ Filename: "test.proto",
+}
+
+var E_X208 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 208,
+ Name: "testdata.x208",
+ Tag: "bytes,208,opt,name=x208",
+ Filename: "test.proto",
+}
+
+var E_X209 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 209,
+ Name: "testdata.x209",
+ Tag: "bytes,209,opt,name=x209",
+ Filename: "test.proto",
+}
+
+var E_X210 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 210,
+ Name: "testdata.x210",
+ Tag: "bytes,210,opt,name=x210",
+ Filename: "test.proto",
+}
+
+var E_X211 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 211,
+ Name: "testdata.x211",
+ Tag: "bytes,211,opt,name=x211",
+ Filename: "test.proto",
+}
+
+var E_X212 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 212,
+ Name: "testdata.x212",
+ Tag: "bytes,212,opt,name=x212",
+ Filename: "test.proto",
+}
+
+var E_X213 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 213,
+ Name: "testdata.x213",
+ Tag: "bytes,213,opt,name=x213",
+ Filename: "test.proto",
+}
+
+var E_X214 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 214,
+ Name: "testdata.x214",
+ Tag: "bytes,214,opt,name=x214",
+ Filename: "test.proto",
+}
+
+var E_X215 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 215,
+ Name: "testdata.x215",
+ Tag: "bytes,215,opt,name=x215",
+ Filename: "test.proto",
+}
+
+var E_X216 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 216,
+ Name: "testdata.x216",
+ Tag: "bytes,216,opt,name=x216",
+ Filename: "test.proto",
+}
+
+var E_X217 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 217,
+ Name: "testdata.x217",
+ Tag: "bytes,217,opt,name=x217",
+ Filename: "test.proto",
+}
+
+var E_X218 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 218,
+ Name: "testdata.x218",
+ Tag: "bytes,218,opt,name=x218",
+ Filename: "test.proto",
+}
+
+var E_X219 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 219,
+ Name: "testdata.x219",
+ Tag: "bytes,219,opt,name=x219",
+ Filename: "test.proto",
+}
+
+var E_X220 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 220,
+ Name: "testdata.x220",
+ Tag: "bytes,220,opt,name=x220",
+ Filename: "test.proto",
+}
+
+var E_X221 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 221,
+ Name: "testdata.x221",
+ Tag: "bytes,221,opt,name=x221",
+ Filename: "test.proto",
+}
+
+var E_X222 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 222,
+ Name: "testdata.x222",
+ Tag: "bytes,222,opt,name=x222",
+ Filename: "test.proto",
+}
+
+var E_X223 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 223,
+ Name: "testdata.x223",
+ Tag: "bytes,223,opt,name=x223",
+ Filename: "test.proto",
+}
+
+var E_X224 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 224,
+ Name: "testdata.x224",
+ Tag: "bytes,224,opt,name=x224",
+ Filename: "test.proto",
+}
+
+var E_X225 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 225,
+ Name: "testdata.x225",
+ Tag: "bytes,225,opt,name=x225",
+ Filename: "test.proto",
+}
+
+var E_X226 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 226,
+ Name: "testdata.x226",
+ Tag: "bytes,226,opt,name=x226",
+ Filename: "test.proto",
+}
+
+var E_X227 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 227,
+ Name: "testdata.x227",
+ Tag: "bytes,227,opt,name=x227",
+ Filename: "test.proto",
+}
+
+var E_X228 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 228,
+ Name: "testdata.x228",
+ Tag: "bytes,228,opt,name=x228",
+ Filename: "test.proto",
+}
+
+var E_X229 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 229,
+ Name: "testdata.x229",
+ Tag: "bytes,229,opt,name=x229",
+ Filename: "test.proto",
+}
+
+var E_X230 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 230,
+ Name: "testdata.x230",
+ Tag: "bytes,230,opt,name=x230",
+ Filename: "test.proto",
+}
+
+var E_X231 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 231,
+ Name: "testdata.x231",
+ Tag: "bytes,231,opt,name=x231",
+ Filename: "test.proto",
+}
+
+var E_X232 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 232,
+ Name: "testdata.x232",
+ Tag: "bytes,232,opt,name=x232",
+ Filename: "test.proto",
+}
+
+var E_X233 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 233,
+ Name: "testdata.x233",
+ Tag: "bytes,233,opt,name=x233",
+ Filename: "test.proto",
+}
+
+var E_X234 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 234,
+ Name: "testdata.x234",
+ Tag: "bytes,234,opt,name=x234",
+ Filename: "test.proto",
+}
+
+var E_X235 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 235,
+ Name: "testdata.x235",
+ Tag: "bytes,235,opt,name=x235",
+ Filename: "test.proto",
+}
+
+var E_X236 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 236,
+ Name: "testdata.x236",
+ Tag: "bytes,236,opt,name=x236",
+ Filename: "test.proto",
+}
+
+var E_X237 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 237,
+ Name: "testdata.x237",
+ Tag: "bytes,237,opt,name=x237",
+ Filename: "test.proto",
+}
+
+var E_X238 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 238,
+ Name: "testdata.x238",
+ Tag: "bytes,238,opt,name=x238",
+ Filename: "test.proto",
+}
+
+var E_X239 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 239,
+ Name: "testdata.x239",
+ Tag: "bytes,239,opt,name=x239",
+ Filename: "test.proto",
+}
+
+var E_X240 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 240,
+ Name: "testdata.x240",
+ Tag: "bytes,240,opt,name=x240",
+ Filename: "test.proto",
+}
+
+var E_X241 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 241,
+ Name: "testdata.x241",
+ Tag: "bytes,241,opt,name=x241",
+ Filename: "test.proto",
+}
+
+var E_X242 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 242,
+ Name: "testdata.x242",
+ Tag: "bytes,242,opt,name=x242",
+ Filename: "test.proto",
+}
+
+var E_X243 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 243,
+ Name: "testdata.x243",
+ Tag: "bytes,243,opt,name=x243",
+ Filename: "test.proto",
+}
+
+var E_X244 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 244,
+ Name: "testdata.x244",
+ Tag: "bytes,244,opt,name=x244",
+ Filename: "test.proto",
+}
+
+var E_X245 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 245,
+ Name: "testdata.x245",
+ Tag: "bytes,245,opt,name=x245",
+ Filename: "test.proto",
+}
+
+var E_X246 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 246,
+ Name: "testdata.x246",
+ Tag: "bytes,246,opt,name=x246",
+ Filename: "test.proto",
+}
+
+var E_X247 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 247,
+ Name: "testdata.x247",
+ Tag: "bytes,247,opt,name=x247",
+ Filename: "test.proto",
+}
+
+var E_X248 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 248,
+ Name: "testdata.x248",
+ Tag: "bytes,248,opt,name=x248",
+ Filename: "test.proto",
+}
+
+var E_X249 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 249,
+ Name: "testdata.x249",
+ Tag: "bytes,249,opt,name=x249",
+ Filename: "test.proto",
+}
+
+var E_X250 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 250,
+ Name: "testdata.x250",
+ Tag: "bytes,250,opt,name=x250",
+ Filename: "test.proto",
+}
+
+func init() {
+ proto.RegisterType((*GoEnum)(nil), "testdata.GoEnum")
+ proto.RegisterType((*GoTestField)(nil), "testdata.GoTestField")
+ proto.RegisterType((*GoTest)(nil), "testdata.GoTest")
+ proto.RegisterType((*GoTest_RequiredGroup)(nil), "testdata.GoTest.RequiredGroup")
+ proto.RegisterType((*GoTest_RepeatedGroup)(nil), "testdata.GoTest.RepeatedGroup")
+ proto.RegisterType((*GoTest_OptionalGroup)(nil), "testdata.GoTest.OptionalGroup")
+ proto.RegisterType((*GoTestRequiredGroupField)(nil), "testdata.GoTestRequiredGroupField")
+ proto.RegisterType((*GoTestRequiredGroupField_Group)(nil), "testdata.GoTestRequiredGroupField.Group")
+ proto.RegisterType((*GoSkipTest)(nil), "testdata.GoSkipTest")
+ proto.RegisterType((*GoSkipTest_SkipGroup)(nil), "testdata.GoSkipTest.SkipGroup")
+ proto.RegisterType((*NonPackedTest)(nil), "testdata.NonPackedTest")
+ proto.RegisterType((*PackedTest)(nil), "testdata.PackedTest")
+ proto.RegisterType((*MaxTag)(nil), "testdata.MaxTag")
+ proto.RegisterType((*OldMessage)(nil), "testdata.OldMessage")
+ proto.RegisterType((*OldMessage_Nested)(nil), "testdata.OldMessage.Nested")
+ proto.RegisterType((*NewMessage)(nil), "testdata.NewMessage")
+ proto.RegisterType((*NewMessage_Nested)(nil), "testdata.NewMessage.Nested")
+ proto.RegisterType((*InnerMessage)(nil), "testdata.InnerMessage")
+ proto.RegisterType((*OtherMessage)(nil), "testdata.OtherMessage")
+ proto.RegisterType((*RequiredInnerMessage)(nil), "testdata.RequiredInnerMessage")
+ proto.RegisterType((*MyMessage)(nil), "testdata.MyMessage")
+ proto.RegisterType((*MyMessage_SomeGroup)(nil), "testdata.MyMessage.SomeGroup")
+ proto.RegisterType((*Ext)(nil), "testdata.Ext")
+ proto.RegisterType((*ComplexExtension)(nil), "testdata.ComplexExtension")
+ proto.RegisterType((*DefaultsMessage)(nil), "testdata.DefaultsMessage")
+ proto.RegisterType((*MyMessageSet)(nil), "testdata.MyMessageSet")
+ proto.RegisterType((*Empty)(nil), "testdata.Empty")
+ proto.RegisterType((*MessageList)(nil), "testdata.MessageList")
+ proto.RegisterType((*MessageList_Message)(nil), "testdata.MessageList.Message")
+ proto.RegisterType((*Strings)(nil), "testdata.Strings")
+ proto.RegisterType((*Defaults)(nil), "testdata.Defaults")
+ proto.RegisterType((*SubDefaults)(nil), "testdata.SubDefaults")
+ proto.RegisterType((*RepeatedEnum)(nil), "testdata.RepeatedEnum")
+ proto.RegisterType((*MoreRepeated)(nil), "testdata.MoreRepeated")
+ proto.RegisterType((*GroupOld)(nil), "testdata.GroupOld")
+ proto.RegisterType((*GroupOld_G)(nil), "testdata.GroupOld.G")
+ proto.RegisterType((*GroupNew)(nil), "testdata.GroupNew")
+ proto.RegisterType((*GroupNew_G)(nil), "testdata.GroupNew.G")
+ proto.RegisterType((*FloatingPoint)(nil), "testdata.FloatingPoint")
+ proto.RegisterType((*MessageWithMap)(nil), "testdata.MessageWithMap")
+ proto.RegisterType((*Oneof)(nil), "testdata.Oneof")
+ proto.RegisterType((*Oneof_F_Group)(nil), "testdata.Oneof.F_Group")
+ proto.RegisterType((*Communique)(nil), "testdata.Communique")
+ proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value)
+ proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value)
+ proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value)
+ proto.RegisterEnum("testdata.DefaultsMessage_DefaultsEnum", DefaultsMessage_DefaultsEnum_name, DefaultsMessage_DefaultsEnum_value)
+ proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value)
+ proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value)
+ proto.RegisterExtension(E_Ext_More)
+ proto.RegisterExtension(E_Ext_Text)
+ proto.RegisterExtension(E_Ext_Number)
+ proto.RegisterExtension(E_Greeting)
+ proto.RegisterExtension(E_Complex)
+ proto.RegisterExtension(E_RComplex)
+ proto.RegisterExtension(E_NoDefaultDouble)
+ proto.RegisterExtension(E_NoDefaultFloat)
+ proto.RegisterExtension(E_NoDefaultInt32)
+ proto.RegisterExtension(E_NoDefaultInt64)
+ proto.RegisterExtension(E_NoDefaultUint32)
+ proto.RegisterExtension(E_NoDefaultUint64)
+ proto.RegisterExtension(E_NoDefaultSint32)
+ proto.RegisterExtension(E_NoDefaultSint64)
+ proto.RegisterExtension(E_NoDefaultFixed32)
+ proto.RegisterExtension(E_NoDefaultFixed64)
+ proto.RegisterExtension(E_NoDefaultSfixed32)
+ proto.RegisterExtension(E_NoDefaultSfixed64)
+ proto.RegisterExtension(E_NoDefaultBool)
+ proto.RegisterExtension(E_NoDefaultString)
+ proto.RegisterExtension(E_NoDefaultBytes)
+ proto.RegisterExtension(E_NoDefaultEnum)
+ proto.RegisterExtension(E_DefaultDouble)
+ proto.RegisterExtension(E_DefaultFloat)
+ proto.RegisterExtension(E_DefaultInt32)
+ proto.RegisterExtension(E_DefaultInt64)
+ proto.RegisterExtension(E_DefaultUint32)
+ proto.RegisterExtension(E_DefaultUint64)
+ proto.RegisterExtension(E_DefaultSint32)
+ proto.RegisterExtension(E_DefaultSint64)
+ proto.RegisterExtension(E_DefaultFixed32)
+ proto.RegisterExtension(E_DefaultFixed64)
+ proto.RegisterExtension(E_DefaultSfixed32)
+ proto.RegisterExtension(E_DefaultSfixed64)
+ proto.RegisterExtension(E_DefaultBool)
+ proto.RegisterExtension(E_DefaultString)
+ proto.RegisterExtension(E_DefaultBytes)
+ proto.RegisterExtension(E_DefaultEnum)
+ proto.RegisterExtension(E_X201)
+ proto.RegisterExtension(E_X202)
+ proto.RegisterExtension(E_X203)
+ proto.RegisterExtension(E_X204)
+ proto.RegisterExtension(E_X205)
+ proto.RegisterExtension(E_X206)
+ proto.RegisterExtension(E_X207)
+ proto.RegisterExtension(E_X208)
+ proto.RegisterExtension(E_X209)
+ proto.RegisterExtension(E_X210)
+ proto.RegisterExtension(E_X211)
+ proto.RegisterExtension(E_X212)
+ proto.RegisterExtension(E_X213)
+ proto.RegisterExtension(E_X214)
+ proto.RegisterExtension(E_X215)
+ proto.RegisterExtension(E_X216)
+ proto.RegisterExtension(E_X217)
+ proto.RegisterExtension(E_X218)
+ proto.RegisterExtension(E_X219)
+ proto.RegisterExtension(E_X220)
+ proto.RegisterExtension(E_X221)
+ proto.RegisterExtension(E_X222)
+ proto.RegisterExtension(E_X223)
+ proto.RegisterExtension(E_X224)
+ proto.RegisterExtension(E_X225)
+ proto.RegisterExtension(E_X226)
+ proto.RegisterExtension(E_X227)
+ proto.RegisterExtension(E_X228)
+ proto.RegisterExtension(E_X229)
+ proto.RegisterExtension(E_X230)
+ proto.RegisterExtension(E_X231)
+ proto.RegisterExtension(E_X232)
+ proto.RegisterExtension(E_X233)
+ proto.RegisterExtension(E_X234)
+ proto.RegisterExtension(E_X235)
+ proto.RegisterExtension(E_X236)
+ proto.RegisterExtension(E_X237)
+ proto.RegisterExtension(E_X238)
+ proto.RegisterExtension(E_X239)
+ proto.RegisterExtension(E_X240)
+ proto.RegisterExtension(E_X241)
+ proto.RegisterExtension(E_X242)
+ proto.RegisterExtension(E_X243)
+ proto.RegisterExtension(E_X244)
+ proto.RegisterExtension(E_X245)
+ proto.RegisterExtension(E_X246)
+ proto.RegisterExtension(E_X247)
+ proto.RegisterExtension(E_X248)
+ proto.RegisterExtension(E_X249)
+ proto.RegisterExtension(E_X250)
+}
+
+func init() { proto.RegisterFile("test.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 4453 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x5a, 0xc9, 0x77, 0xdb, 0x48,
+ 0x7a, 0x37, 0xc0, 0xfd, 0x23, 0x25, 0x42, 0x65, 0xb5, 0x9b, 0x96, 0xbc, 0xc0, 0x9c, 0xe9, 0x6e,
+ 0x7a, 0xd3, 0x48, 0x20, 0x44, 0xdb, 0x74, 0xa7, 0xdf, 0xf3, 0x42, 0xca, 0x7a, 0x63, 0x89, 0x0a,
+ 0xa4, 0xee, 0x7e, 0xd3, 0x39, 0xf0, 0x51, 0x22, 0x44, 0xb3, 0x4d, 0x02, 0x34, 0x09, 0xc5, 0x52,
+ 0x72, 0xe9, 0x4b, 0x72, 0xcd, 0x76, 0xc9, 0x35, 0xa7, 0x9c, 0x92, 0xbc, 0x97, 0x7f, 0x22, 0xe9,
+ 0xee, 0x59, 0x7b, 0xd6, 0xac, 0x93, 0x7d, 0x99, 0xec, 0xdb, 0x4c, 0x92, 0x4b, 0xcf, 0xab, 0xaf,
+ 0x0a, 0x40, 0x01, 0x24, 0x20, 0xf9, 0x24, 0x56, 0xd5, 0xef, 0xf7, 0xd5, 0xf6, 0xab, 0xef, 0xab,
+ 0xaf, 0x20, 0x00, 0xc7, 0x9c, 0x38, 0x2b, 0xa3, 0xb1, 0xed, 0xd8, 0x24, 0x4b, 0x7f, 0x77, 0x3b,
+ 0x4e, 0xa7, 0x7c, 0x1d, 0xd2, 0x1b, 0x76, 0xc3, 0x3a, 0x1a, 0x92, 0xab, 0x90, 0x38, 0xb4, 0xed,
+ 0x92, 0xa4, 0xca, 0x95, 0x79, 0x6d, 0x6e, 0xc5, 0x45, 0xac, 0x34, 0x5b, 0x2d, 0x83, 0xb6, 0x94,
+ 0xef, 0x40, 0x7e, 0xc3, 0xde, 0x33, 0x27, 0x4e, 0xb3, 0x6f, 0x0e, 0xba, 0x64, 0x11, 0x52, 0x4f,
+ 0x3b, 0xfb, 0xe6, 0x00, 0x19, 0x39, 0x83, 0x15, 0x08, 0x81, 0xe4, 0xde, 0xc9, 0xc8, 0x2c, 0xc9,
+ 0x58, 0x89, 0xbf, 0xcb, 0xbf, 0x72, 0x85, 0x76, 0x42, 0x99, 0xe4, 0x3a, 0x24, 0xbf, 0xdc, 0xb7,
+ 0xba, 0xbc, 0x97, 0xd7, 0xfc, 0x5e, 0x58, 0xfb, 0xca, 0x97, 0x37, 0xb7, 0x1f, 0x1b, 0x08, 0xa1,
+ 0xf6, 0xf7, 0x3a, 0xfb, 0x03, 0x6a, 0x4a, 0xa2, 0xf6, 0xb1, 0x40, 0x6b, 0x77, 0x3a, 0xe3, 0xce,
+ 0xb0, 0x94, 0x50, 0xa5, 0x4a, 0xca, 0x60, 0x05, 0x72, 0x1f, 0xe6, 0x0c, 0xf3, 0xc5, 0x51, 0x7f,
+ 0x6c, 0x76, 0x71, 0x70, 0xa5, 0xa4, 0x2a, 0x57, 0xf2, 0xd3, 0xf6, 0xb1, 0xd1, 0x08, 0x62, 0x19,
+ 0x79, 0x64, 0x76, 0x1c, 0x97, 0x9c, 0x52, 0x13, 0xb1, 0x64, 0x01, 0x4b, 0xc9, 0xad, 0x91, 0xd3,
+ 0xb7, 0xad, 0xce, 0x80, 0x91, 0xd3, 0xaa, 0x14, 0x43, 0x0e, 0x60, 0xc9, 0x9b, 0x50, 0x6c, 0xb6,
+ 0x1f, 0xda, 0xf6, 0xa0, 0x3d, 0xe6, 0x23, 0x2a, 0x81, 0x2a, 0x57, 0xb2, 0xc6, 0x5c, 0x93, 0xd6,
+ 0xba, 0xc3, 0x24, 0x15, 0x50, 0x9a, 0xed, 0x4d, 0xcb, 0xa9, 0x6a, 0x3e, 0x30, 0xaf, 0xca, 0x95,
+ 0x94, 0x31, 0xdf, 0xc4, 0xea, 0x29, 0x64, 0x4d, 0xf7, 0x91, 0x05, 0x55, 0xae, 0x24, 0x18, 0xb2,
+ 0xa6, 0x7b, 0xc8, 0x5b, 0x40, 0x9a, 0xed, 0x66, 0xff, 0xd8, 0xec, 0x8a, 0x56, 0xe7, 0x54, 0xb9,
+ 0x92, 0x31, 0x94, 0x26, 0x6f, 0x98, 0x81, 0x16, 0x2d, 0xcf, 0xab, 0x72, 0x25, 0xed, 0xa2, 0x05,
+ 0xdb, 0x37, 0x60, 0xa1, 0xd9, 0x7e, 0xb7, 0x1f, 0x1c, 0x70, 0x51, 0x95, 0x2b, 0x73, 0x46, 0xb1,
+ 0xc9, 0xea, 0xa7, 0xb1, 0xa2, 0x61, 0x45, 0x95, 0x2b, 0x49, 0x8e, 0x15, 0xec, 0xe2, 0xec, 0x9a,
+ 0x03, 0xbb, 0xe3, 0xf8, 0xd0, 0x05, 0x55, 0xae, 0xc8, 0xc6, 0x7c, 0x13, 0xab, 0x83, 0x56, 0x1f,
+ 0xdb, 0x47, 0xfb, 0x03, 0xd3, 0x87, 0x12, 0x55, 0xae, 0x48, 0x46, 0xb1, 0xc9, 0xea, 0x83, 0xd8,
+ 0x5d, 0x67, 0xdc, 0xb7, 0x7a, 0x3e, 0xf6, 0x3c, 0xea, 0xb7, 0xd8, 0x64, 0xf5, 0xc1, 0x11, 0x3c,
+ 0x3c, 0x71, 0xcc, 0x89, 0x0f, 0x35, 0x55, 0xb9, 0x52, 0x30, 0xe6, 0x9b, 0x58, 0x1d, 0xb2, 0x1a,
+ 0x5a, 0x83, 0x43, 0x55, 0xae, 0x2c, 0x50, 0xab, 0x33, 0xd6, 0x60, 0x37, 0xb4, 0x06, 0x3d, 0x55,
+ 0xae, 0x10, 0x8e, 0x15, 0xd6, 0x40, 0xd4, 0x0c, 0x13, 0x62, 0x69, 0x51, 0x4d, 0x08, 0x9a, 0x61,
+ 0x95, 0x41, 0xcd, 0x70, 0xe0, 0x6b, 0x6a, 0x42, 0xd4, 0x4c, 0x08, 0x89, 0x9d, 0x73, 0xe4, 0x05,
+ 0x35, 0x21, 0x6a, 0x86, 0x23, 0x43, 0x9a, 0xe1, 0xd8, 0xd7, 0xd5, 0x44, 0x50, 0x33, 0x53, 0x68,
+ 0xd1, 0x72, 0x49, 0x4d, 0x04, 0x35, 0xc3, 0xd1, 0x41, 0xcd, 0x70, 0xf0, 0x45, 0x35, 0x11, 0xd0,
+ 0x4c, 0x18, 0x2b, 0x1a, 0x5e, 0x52, 0x13, 0x01, 0xcd, 0x88, 0xb3, 0x73, 0x35, 0xc3, 0xa1, 0xcb,
+ 0x6a, 0x42, 0xd4, 0x8c, 0x68, 0xd5, 0xd3, 0x0c, 0x87, 0x5e, 0x52, 0x13, 0x01, 0xcd, 0x88, 0x58,
+ 0x4f, 0x33, 0x1c, 0x7b, 0x59, 0x4d, 0x04, 0x34, 0xc3, 0xb1, 0xd7, 0x45, 0xcd, 0x70, 0xe8, 0xc7,
+ 0x92, 0x9a, 0x10, 0x45, 0xc3, 0xa1, 0x37, 0x03, 0xa2, 0xe1, 0xd8, 0x4f, 0x28, 0x56, 0x54, 0x4d,
+ 0x18, 0x2c, 0xae, 0xc2, 0xa7, 0x14, 0x2c, 0xca, 0x86, 0x83, 0x7d, 0xd9, 0xd8, 0xdc, 0x05, 0x95,
+ 0xae, 0xa8, 0x92, 0x27, 0x1b, 0xd7, 0x2f, 0x89, 0xb2, 0xf1, 0x80, 0x57, 0xd1, 0xd5, 0x72, 0xd9,
+ 0x4c, 0x21, 0x6b, 0xba, 0x8f, 0x54, 0x55, 0xc9, 0x97, 0x8d, 0x87, 0x0c, 0xc8, 0xc6, 0xc3, 0x5e,
+ 0x53, 0x25, 0x51, 0x36, 0x33, 0xd0, 0xa2, 0xe5, 0xb2, 0x2a, 0x89, 0xb2, 0xf1, 0xd0, 0xa2, 0x6c,
+ 0x3c, 0xf0, 0x17, 0x54, 0x49, 0x90, 0xcd, 0x34, 0x56, 0x34, 0xfc, 0x45, 0x55, 0x12, 0x64, 0x13,
+ 0x9c, 0x1d, 0x93, 0x8d, 0x07, 0x7d, 0x43, 0x95, 0x7c, 0xd9, 0x04, 0xad, 0x72, 0xd9, 0x78, 0xd0,
+ 0x37, 0x55, 0x49, 0x90, 0x4d, 0x10, 0xcb, 0x65, 0xe3, 0x61, 0xdf, 0xc2, 0xf8, 0xe6, 0xca, 0xc6,
+ 0xc3, 0x0a, 0xb2, 0xf1, 0xa0, 0xbf, 0x43, 0x63, 0xa1, 0x27, 0x1b, 0x0f, 0x2a, 0xca, 0xc6, 0xc3,
+ 0xfe, 0x2e, 0xc5, 0xfa, 0xb2, 0x99, 0x06, 0x8b, 0xab, 0xf0, 0x7b, 0x14, 0xec, 0xcb, 0xc6, 0x03,
+ 0xaf, 0xe0, 0x20, 0xa8, 0x6c, 0xba, 0xe6, 0x61, 0xe7, 0x68, 0x40, 0x25, 0x56, 0xa1, 0xba, 0xa9,
+ 0x27, 0x9d, 0xf1, 0x91, 0x49, 0x47, 0x62, 0xdb, 0x83, 0xc7, 0x6e, 0x1b, 0x59, 0xa1, 0xc6, 0x99,
+ 0x7c, 0x7c, 0xc2, 0x75, 0xaa, 0x9f, 0xba, 0x5c, 0xd5, 0x8c, 0x22, 0xd3, 0xd0, 0x34, 0xbe, 0xa6,
+ 0x0b, 0xf8, 0x1b, 0x54, 0x45, 0x75, 0xb9, 0xa6, 0x33, 0x7c, 0x4d, 0xf7, 0xf1, 0x55, 0x38, 0xef,
+ 0x4b, 0xc9, 0x67, 0xdc, 0xa4, 0x5a, 0xaa, 0x27, 0xaa, 0xda, 0xaa, 0xb1, 0xe0, 0x0a, 0x6a, 0x16,
+ 0x29, 0xd0, 0xcd, 0x2d, 0x2a, 0xa9, 0x7a, 0xa2, 0xa6, 0x7b, 0x24, 0xb1, 0x27, 0x8d, 0xca, 0x90,
+ 0x0b, 0xcb, 0xe7, 0xdc, 0xa6, 0xca, 0xaa, 0x27, 0xab, 0xda, 0xea, 0xaa, 0xa1, 0x70, 0x7d, 0xcd,
+ 0xe0, 0x04, 0xfa, 0x59, 0xa1, 0x0a, 0xab, 0x27, 0x6b, 0xba, 0xc7, 0x09, 0xf6, 0xb3, 0xe0, 0x0a,
+ 0xcd, 0xa7, 0x7c, 0x89, 0x2a, 0xad, 0x9e, 0xae, 0xae, 0xe9, 0x6b, 0xeb, 0xf7, 0x8c, 0x22, 0x53,
+ 0x9c, 0xcf, 0xd1, 0x69, 0x3f, 0x5c, 0x72, 0x3e, 0x69, 0x95, 0x6a, 0xae, 0x9e, 0xd6, 0xee, 0xac,
+ 0xdd, 0xd5, 0xee, 0x1a, 0x0a, 0xd7, 0x9e, 0xcf, 0x7a, 0x87, 0xb2, 0xb8, 0xf8, 0x7c, 0xd6, 0x1a,
+ 0x55, 0x5f, 0x5d, 0x79, 0x66, 0x0e, 0x06, 0xf6, 0x2d, 0xb5, 0xfc, 0xd2, 0x1e, 0x0f, 0xba, 0xd7,
+ 0xca, 0x60, 0x28, 0x5c, 0x8f, 0x62, 0xaf, 0x0b, 0xae, 0x20, 0x7d, 0xfa, 0xaf, 0xd1, 0x7b, 0x58,
+ 0xa1, 0x9e, 0x79, 0xd8, 0xef, 0x59, 0xf6, 0xc4, 0x34, 0x8a, 0x4c, 0x9a, 0xa1, 0x35, 0xd9, 0x0d,
+ 0xaf, 0xe3, 0xaf, 0x53, 0xda, 0x42, 0x3d, 0x71, 0xbb, 0xaa, 0xd1, 0x9e, 0x66, 0xad, 0xe3, 0x6e,
+ 0x78, 0x1d, 0x7f, 0x83, 0x72, 0x48, 0x3d, 0x71, 0xbb, 0xa6, 0x73, 0x8e, 0xb8, 0x8e, 0x77, 0xe0,
+ 0x42, 0x28, 0x2e, 0xb6, 0x47, 0x9d, 0x83, 0xe7, 0x66, 0xb7, 0xa4, 0xd1, 0xf0, 0xf8, 0x50, 0x56,
+ 0x24, 0xe3, 0x7c, 0x20, 0x44, 0xee, 0x60, 0x33, 0xb9, 0x07, 0xaf, 0x87, 0x03, 0xa5, 0xcb, 0xac,
+ 0xd2, 0x78, 0x89, 0xcc, 0xc5, 0x60, 0xcc, 0x0c, 0x51, 0x05, 0x07, 0xec, 0x52, 0x75, 0x1a, 0x40,
+ 0x7d, 0xaa, 0xef, 0x89, 0x39, 0xf5, 0x67, 0xe0, 0xe2, 0x74, 0x28, 0x75, 0xc9, 0xeb, 0x34, 0xa2,
+ 0x22, 0xf9, 0x42, 0x38, 0xaa, 0x4e, 0xd1, 0x67, 0xf4, 0x5d, 0xa3, 0x21, 0x56, 0xa4, 0x4f, 0xf5,
+ 0x7e, 0x1f, 0x4a, 0x53, 0xc1, 0xd6, 0x65, 0xdf, 0xa1, 0x31, 0x17, 0xd9, 0xaf, 0x85, 0xe2, 0x6e,
+ 0x98, 0x3c, 0xa3, 0xeb, 0xbb, 0x34, 0x08, 0x0b, 0xe4, 0xa9, 0x9e, 0x71, 0xc9, 0x82, 0xe1, 0xd8,
+ 0xe5, 0xde, 0xa3, 0x51, 0x99, 0x2f, 0x59, 0x20, 0x32, 0x8b, 0xfd, 0x86, 0xe2, 0xb3, 0xcb, 0xad,
+ 0xd3, 0x30, 0xcd, 0xfb, 0x0d, 0x86, 0x6a, 0x4e, 0x7e, 0x9b, 0x92, 0x77, 0x67, 0xcf, 0xf8, 0xc7,
+ 0x09, 0x1a, 0x60, 0x39, 0x7b, 0x77, 0xd6, 0x94, 0x3d, 0xf6, 0x8c, 0x29, 0xff, 0x84, 0xb2, 0x89,
+ 0xc0, 0x9e, 0x9a, 0xf3, 0x63, 0x98, 0x73, 0x6f, 0x75, 0xbd, 0xb1, 0x7d, 0x34, 0x2a, 0x35, 0x55,
+ 0xb9, 0x02, 0xda, 0x95, 0xa9, 0xec, 0xc7, 0xbd, 0xe4, 0x6d, 0x50, 0x94, 0x11, 0x24, 0x31, 0x2b,
+ 0xcc, 0x2e, 0xb3, 0xb2, 0xa3, 0x26, 0x22, 0xac, 0x30, 0x94, 0x67, 0x45, 0x20, 0x51, 0x2b, 0xae,
+ 0xd3, 0x67, 0x56, 0x3e, 0x50, 0xa5, 0x99, 0x56, 0xdc, 0x10, 0xc0, 0xad, 0x04, 0x48, 0x4b, 0xeb,
+ 0x7e, 0xbe, 0x85, 0xed, 0xe4, 0x8b, 0xe1, 0x04, 0x6c, 0x03, 0xef, 0xcf, 0xc1, 0x4a, 0x46, 0x13,
+ 0x06, 0x37, 0x4d, 0xfb, 0xd9, 0x08, 0x5a, 0x60, 0x34, 0xd3, 0xb4, 0x9f, 0x9b, 0x41, 0x2b, 0xff,
+ 0xa6, 0x04, 0x49, 0x9a, 0x4f, 0x92, 0x2c, 0x24, 0xdf, 0x6b, 0x6d, 0x3e, 0x56, 0xce, 0xd1, 0x5f,
+ 0x0f, 0x5b, 0xad, 0xa7, 0x8a, 0x44, 0x72, 0x90, 0x7a, 0xf8, 0x95, 0xbd, 0xc6, 0xae, 0x22, 0x93,
+ 0x22, 0xe4, 0x9b, 0x9b, 0xdb, 0x1b, 0x0d, 0x63, 0xc7, 0xd8, 0xdc, 0xde, 0x53, 0x12, 0xb4, 0xad,
+ 0xf9, 0xb4, 0xf5, 0x60, 0x4f, 0x49, 0x92, 0x0c, 0x24, 0x68, 0x5d, 0x8a, 0x00, 0xa4, 0x77, 0xf7,
+ 0x8c, 0xcd, 0xed, 0x0d, 0x25, 0x4d, 0xad, 0xec, 0x6d, 0x6e, 0x35, 0x94, 0x0c, 0x45, 0xee, 0xbd,
+ 0xbb, 0xf3, 0xb4, 0xa1, 0x64, 0xe9, 0xcf, 0x07, 0x86, 0xf1, 0xe0, 0x2b, 0x4a, 0x8e, 0x92, 0xb6,
+ 0x1e, 0xec, 0x28, 0x80, 0xcd, 0x0f, 0x1e, 0x3e, 0x6d, 0x28, 0x79, 0x52, 0x80, 0x6c, 0xf3, 0xdd,
+ 0xed, 0x47, 0x7b, 0x9b, 0xad, 0x6d, 0xa5, 0x50, 0x3e, 0x81, 0x12, 0x5b, 0xe6, 0xc0, 0x2a, 0xb2,
+ 0xa4, 0xf0, 0x1d, 0x48, 0xb1, 0x9d, 0x91, 0x50, 0x25, 0x95, 0xf0, 0xce, 0x4c, 0x53, 0x56, 0xd8,
+ 0x1e, 0x31, 0xda, 0xd2, 0x65, 0x48, 0xb1, 0x55, 0x5a, 0x84, 0x14, 0x5b, 0x1d, 0x19, 0x53, 0x45,
+ 0x56, 0x28, 0xff, 0x96, 0x0c, 0xb0, 0x61, 0xef, 0x3e, 0xef, 0x8f, 0x30, 0x21, 0xbf, 0x0c, 0x30,
+ 0x79, 0xde, 0x1f, 0xb5, 0x51, 0xf5, 0x3c, 0xa9, 0xcc, 0xd1, 0x1a, 0xf4, 0x77, 0xe4, 0x1a, 0x14,
+ 0xb0, 0xf9, 0x90, 0x79, 0x21, 0xcc, 0x25, 0x33, 0x46, 0x9e, 0xd6, 0x71, 0xc7, 0x14, 0x84, 0xd4,
+ 0x74, 0x4c, 0x21, 0xd3, 0x02, 0xa4, 0xa6, 0x93, 0xab, 0x80, 0xc5, 0xf6, 0x04, 0x23, 0x0a, 0xa6,
+ 0x8d, 0x39, 0x03, 0xfb, 0x65, 0x31, 0x86, 0xbc, 0x0d, 0xd8, 0x27, 0x9b, 0x77, 0x71, 0xfa, 0x74,
+ 0xb8, 0xc3, 0x5d, 0xa1, 0x3f, 0xd8, 0x6c, 0x7d, 0xc2, 0x52, 0x0b, 0x72, 0x5e, 0x3d, 0xed, 0x0b,
+ 0x6b, 0xf9, 0x8c, 0x14, 0x9c, 0x11, 0x60, 0x95, 0x37, 0x25, 0x06, 0xe0, 0xa3, 0x59, 0xc0, 0xd1,
+ 0x30, 0x12, 0x1b, 0x4e, 0xf9, 0x32, 0xcc, 0x6d, 0xdb, 0x16, 0x3b, 0xbd, 0xb8, 0x4a, 0x05, 0x90,
+ 0x3a, 0x25, 0x09, 0xb3, 0x27, 0xa9, 0x53, 0xbe, 0x02, 0x20, 0xb4, 0x29, 0x20, 0xed, 0xb3, 0x36,
+ 0xf4, 0x01, 0xd2, 0x7e, 0xf9, 0x26, 0xa4, 0xb7, 0x3a, 0xc7, 0x7b, 0x9d, 0x1e, 0xb9, 0x06, 0x30,
+ 0xe8, 0x4c, 0x9c, 0xf6, 0x21, 0xee, 0xc3, 0xe7, 0x9f, 0x7f, 0xfe, 0xb9, 0x84, 0x97, 0xbd, 0x1c,
+ 0xad, 0x65, 0xfb, 0xf1, 0x02, 0xa0, 0x35, 0xe8, 0x6e, 0x99, 0x93, 0x49, 0xa7, 0x67, 0x92, 0x2a,
+ 0xa4, 0x2d, 0x73, 0x42, 0xa3, 0x9d, 0x84, 0xef, 0x08, 0xcb, 0xfe, 0x2a, 0xf8, 0xa8, 0x95, 0x6d,
+ 0x84, 0x18, 0x1c, 0x4a, 0x14, 0x48, 0x58, 0x47, 0x43, 0x7c, 0x27, 0x49, 0x19, 0xf4, 0xe7, 0xd2,
+ 0x25, 0x48, 0x33, 0x0c, 0x21, 0x90, 0xb4, 0x3a, 0x43, 0xb3, 0xc4, 0xfa, 0xc5, 0xdf, 0xe5, 0x5f,
+ 0x95, 0x00, 0xb6, 0xcd, 0x97, 0x67, 0xe8, 0xd3, 0x47, 0xc5, 0xf4, 0x99, 0x60, 0x7d, 0xde, 0x8f,
+ 0xeb, 0x93, 0xea, 0xec, 0xd0, 0xb6, 0xbb, 0x6d, 0xb6, 0xc5, 0xec, 0x49, 0x27, 0x47, 0x6b, 0x70,
+ 0xd7, 0xca, 0x1f, 0x40, 0x61, 0xd3, 0xb2, 0xcc, 0xb1, 0x3b, 0x26, 0x02, 0xc9, 0x67, 0xf6, 0xc4,
+ 0xe1, 0x6f, 0x4b, 0xf8, 0x9b, 0x94, 0x20, 0x39, 0xb2, 0xc7, 0x0e, 0x9b, 0x67, 0x3d, 0xa9, 0xaf,
+ 0xae, 0xae, 0x1a, 0x58, 0x43, 0x2e, 0x41, 0xee, 0xc0, 0xb6, 0x2c, 0xf3, 0x80, 0x4e, 0x22, 0x81,
+ 0x69, 0x8d, 0x5f, 0x51, 0xfe, 0x65, 0x09, 0x0a, 0x2d, 0xe7, 0x99, 0x6f, 0x5c, 0x81, 0xc4, 0x73,
+ 0xf3, 0x04, 0x87, 0x97, 0x30, 0xe8, 0x4f, 0x7a, 0x54, 0x7e, 0xbe, 0x33, 0x38, 0x62, 0x6f, 0x4d,
+ 0x05, 0x83, 0x15, 0xc8, 0x05, 0x48, 0xbf, 0x34, 0xfb, 0xbd, 0x67, 0x0e, 0xda, 0x94, 0x0d, 0x5e,
+ 0x22, 0xb7, 0x20, 0xd5, 0xa7, 0x83, 0x2d, 0x25, 0x71, 0xbd, 0x2e, 0xf8, 0xeb, 0x25, 0xce, 0xc1,
+ 0x60, 0xa0, 0x1b, 0xd9, 0x6c, 0x57, 0xf9, 0xe8, 0xa3, 0x8f, 0x3e, 0x92, 0xcb, 0x87, 0xb0, 0xe8,
+ 0x1e, 0xde, 0xc0, 0x64, 0xb7, 0xa1, 0x34, 0x30, 0xed, 0xf6, 0x61, 0xdf, 0xea, 0x0c, 0x06, 0x27,
+ 0xed, 0x97, 0xb6, 0xd5, 0xee, 0x58, 0x6d, 0x7b, 0x72, 0xd0, 0x19, 0xe3, 0x02, 0x44, 0x77, 0xb1,
+ 0x38, 0x30, 0xed, 0x26, 0xa3, 0xbd, 0x6f, 0x5b, 0x0f, 0xac, 0x16, 0xe5, 0x94, 0xff, 0x20, 0x09,
+ 0xb9, 0xad, 0x13, 0xd7, 0xfa, 0x22, 0xa4, 0x0e, 0xec, 0x23, 0x8b, 0xad, 0x65, 0xca, 0x60, 0x05,
+ 0x6f, 0x8f, 0x64, 0x61, 0x8f, 0x16, 0x21, 0xf5, 0xe2, 0xc8, 0x76, 0x4c, 0x9c, 0x6e, 0xce, 0x60,
+ 0x05, 0xba, 0x5a, 0x23, 0xd3, 0x29, 0x25, 0x31, 0xb9, 0xa5, 0x3f, 0xfd, 0xf9, 0xa7, 0xce, 0x30,
+ 0x7f, 0xb2, 0x02, 0x69, 0x9b, 0xae, 0xfe, 0xa4, 0x94, 0xc6, 0x77, 0x35, 0x01, 0x2e, 0xee, 0x8a,
+ 0xc1, 0x51, 0x64, 0x13, 0x16, 0x5e, 0x9a, 0xed, 0xe1, 0xd1, 0xc4, 0x69, 0xf7, 0xec, 0x76, 0xd7,
+ 0x34, 0x47, 0xe6, 0xb8, 0x34, 0x87, 0x3d, 0x09, 0x3e, 0x61, 0xd6, 0x42, 0x1a, 0xf3, 0x2f, 0xcd,
+ 0xad, 0xa3, 0x89, 0xb3, 0x61, 0x3f, 0x46, 0x16, 0xa9, 0x42, 0x6e, 0x6c, 0x52, 0x4f, 0x40, 0x07,
+ 0x5b, 0x08, 0xf7, 0x1e, 0xa0, 0x66, 0xc7, 0xe6, 0x08, 0x2b, 0xc8, 0x3a, 0x64, 0xf7, 0xfb, 0xcf,
+ 0xcd, 0xc9, 0x33, 0xb3, 0x5b, 0xca, 0xa8, 0x52, 0x65, 0x5e, 0xbb, 0xe8, 0x73, 0xbc, 0x65, 0x5d,
+ 0x79, 0x64, 0x0f, 0xec, 0xb1, 0xe1, 0x41, 0xc9, 0x7d, 0xc8, 0x4d, 0xec, 0xa1, 0xc9, 0xf4, 0x9d,
+ 0xc5, 0xa0, 0x7a, 0x79, 0x16, 0x6f, 0xd7, 0x1e, 0x9a, 0xae, 0x07, 0x73, 0xf1, 0x64, 0x99, 0x0d,
+ 0x74, 0x9f, 0x5e, 0x9d, 0x4b, 0x80, 0x4f, 0x03, 0x74, 0x40, 0x78, 0x95, 0x26, 0x4b, 0x74, 0x40,
+ 0xbd, 0x43, 0x7a, 0x23, 0x2a, 0xe5, 0x31, 0xaf, 0xf4, 0xca, 0x4b, 0xb7, 0x20, 0xe7, 0x19, 0xf4,
+ 0x5d, 0x1f, 0x73, 0x37, 0x39, 0xf4, 0x07, 0xcc, 0xf5, 0x31, 0x5f, 0xf3, 0x06, 0xa4, 0x70, 0xd8,
+ 0x34, 0x42, 0x19, 0x0d, 0x1a, 0x10, 0x73, 0x90, 0xda, 0x30, 0x1a, 0x8d, 0x6d, 0x45, 0xc2, 0xd8,
+ 0xf8, 0xf4, 0xdd, 0x86, 0x22, 0x0b, 0x8a, 0xfd, 0x6d, 0x09, 0x12, 0x8d, 0x63, 0x54, 0x0b, 0x9d,
+ 0x86, 0x7b, 0xa2, 0xe9, 0x6f, 0xad, 0x06, 0xc9, 0xa1, 0x3d, 0x36, 0xc9, 0xf9, 0x19, 0xb3, 0x2c,
+ 0xf5, 0x70, 0xbf, 0x84, 0x57, 0xe4, 0xc6, 0xb1, 0x63, 0x20, 0x5e, 0x7b, 0x0b, 0x92, 0x8e, 0x79,
+ 0xec, 0xcc, 0xe6, 0x3d, 0x63, 0x1d, 0x50, 0x80, 0x76, 0x13, 0xd2, 0xd6, 0xd1, 0x70, 0xdf, 0x1c,
+ 0xcf, 0x86, 0xf6, 0x71, 0x7a, 0x1c, 0x52, 0x7e, 0x0f, 0x94, 0x47, 0xf6, 0x70, 0x34, 0x30, 0x8f,
+ 0x1b, 0xc7, 0x8e, 0x69, 0x4d, 0xfa, 0xb6, 0x45, 0xf5, 0x7c, 0xd8, 0x1f, 0xa3, 0x17, 0xc1, 0xb7,
+ 0x62, 0x2c, 0xd0, 0x53, 0x3d, 0x31, 0x0f, 0x6c, 0xab, 0xcb, 0x1d, 0x26, 0x2f, 0x51, 0xb4, 0xf3,
+ 0xac, 0x3f, 0xa6, 0x0e, 0x84, 0xfa, 0x79, 0x56, 0x28, 0x6f, 0x40, 0x91, 0xe7, 0x18, 0x13, 0xde,
+ 0x71, 0xf9, 0x06, 0x14, 0xdc, 0x2a, 0x7c, 0x38, 0xcf, 0x42, 0xf2, 0x83, 0x86, 0xd1, 0x52, 0xce,
+ 0xd1, 0x65, 0x6d, 0x6d, 0x37, 0x14, 0x89, 0xfe, 0xd8, 0x7b, 0xbf, 0x15, 0x58, 0xca, 0x4b, 0x50,
+ 0xf0, 0xc6, 0xbe, 0x6b, 0x3a, 0xd8, 0x42, 0x03, 0x42, 0xa6, 0x2e, 0x67, 0xa5, 0x72, 0x06, 0x52,
+ 0x8d, 0xe1, 0xc8, 0x39, 0x29, 0xff, 0x22, 0xe4, 0x39, 0xe8, 0x69, 0x7f, 0xe2, 0x90, 0x3b, 0x90,
+ 0x19, 0xf2, 0xf9, 0x4a, 0x78, 0xdd, 0x13, 0x35, 0xe5, 0xe3, 0xdc, 0xdf, 0x86, 0x8b, 0x5e, 0xaa,
+ 0x42, 0x46, 0xf0, 0xa5, 0xfc, 0xa8, 0xcb, 0xe2, 0x51, 0x67, 0x4e, 0x21, 0x21, 0x38, 0x85, 0xf2,
+ 0x16, 0x64, 0x58, 0x04, 0x9c, 0x60, 0x54, 0x67, 0xa9, 0x22, 0x13, 0x13, 0xdb, 0xf9, 0x3c, 0xab,
+ 0x63, 0x17, 0x95, 0xab, 0x90, 0x47, 0xc1, 0x72, 0x04, 0x73, 0x9d, 0x80, 0x55, 0x4c, 0x6e, 0xbf,
+ 0x9f, 0x82, 0xac, 0xbb, 0x52, 0x64, 0x19, 0xd2, 0x2c, 0x3f, 0x43, 0x53, 0xee, 0xfb, 0x41, 0x0a,
+ 0x33, 0x32, 0xb2, 0x0c, 0x19, 0x9e, 0x83, 0x71, 0xef, 0x2e, 0x57, 0x35, 0x23, 0xcd, 0x72, 0x2e,
+ 0xaf, 0xb1, 0xa6, 0xa3, 0x63, 0x62, 0x2f, 0x03, 0x69, 0x96, 0x55, 0x11, 0x15, 0x72, 0x5e, 0x1e,
+ 0x85, 0xfe, 0x98, 0x3f, 0x03, 0x64, 0xdd, 0xc4, 0x49, 0x40, 0xd4, 0x74, 0xf4, 0x58, 0x3c, 0xe7,
+ 0xcf, 0x36, 0xfd, 0xeb, 0x49, 0xd6, 0xcd, 0x86, 0xf0, 0xf9, 0xde, 0x4d, 0xf0, 0x33, 0x3c, 0xff,
+ 0xf1, 0x01, 0x35, 0x1d, 0x5d, 0x82, 0x9b, 0xcd, 0x67, 0x78, 0x8e, 0x43, 0xae, 0xd2, 0x21, 0x62,
+ 0xce, 0x82, 0x47, 0xdf, 0x4f, 0xdd, 0xd3, 0x2c, 0x93, 0x21, 0xd7, 0xa8, 0x05, 0x96, 0x98, 0xe0,
+ 0xb9, 0xf4, 0xf3, 0xf4, 0x0c, 0xcf, 0x57, 0xc8, 0x4d, 0x0a, 0x61, 0xcb, 0x5f, 0x82, 0x88, 0xa4,
+ 0x3c, 0xc3, 0x93, 0x72, 0xa2, 0xd2, 0x0e, 0xd1, 0x3d, 0xa0, 0x4b, 0x10, 0x12, 0xf0, 0x34, 0x4b,
+ 0xc0, 0xc9, 0x15, 0x34, 0xc7, 0x26, 0x55, 0xf0, 0x93, 0xed, 0x0c, 0x4f, 0x70, 0xfc, 0x76, 0xbc,
+ 0xb2, 0x79, 0x89, 0x75, 0x86, 0xa7, 0x30, 0xa4, 0x46, 0xf7, 0x8b, 0xea, 0xbb, 0x34, 0x8f, 0x4e,
+ 0xb0, 0xe4, 0x0b, 0xcf, 0xdd, 0x53, 0xe6, 0x03, 0xeb, 0xcc, 0x83, 0x18, 0xa9, 0x26, 0x9e, 0x86,
+ 0x25, 0xca, 0xdb, 0xe9, 0x5b, 0x87, 0xa5, 0x22, 0xae, 0x44, 0xa2, 0x6f, 0x1d, 0x1a, 0xa9, 0x26,
+ 0xad, 0x61, 0x1a, 0xd8, 0xa6, 0x6d, 0x0a, 0xb6, 0x25, 0x6f, 0xb3, 0x46, 0x5a, 0x45, 0x4a, 0x90,
+ 0x6a, 0xb6, 0xb7, 0x3b, 0x56, 0x69, 0x81, 0xf1, 0xac, 0x8e, 0x65, 0x24, 0x9b, 0xdb, 0x1d, 0x8b,
+ 0xbc, 0x05, 0x89, 0xc9, 0xd1, 0x7e, 0x89, 0x84, 0xbf, 0xac, 0xec, 0x1e, 0xed, 0xbb, 0x43, 0x31,
+ 0x28, 0x82, 0x2c, 0x43, 0x76, 0xe2, 0x8c, 0xdb, 0xbf, 0x60, 0x8e, 0xed, 0xd2, 0x79, 0x5c, 0xc2,
+ 0x73, 0x46, 0x66, 0xe2, 0x8c, 0x3f, 0x30, 0xc7, 0xf6, 0x19, 0x9d, 0x5f, 0xf9, 0x0a, 0xe4, 0x05,
+ 0xbb, 0xa4, 0x08, 0x92, 0xc5, 0x6e, 0x0a, 0x75, 0xe9, 0x8e, 0x21, 0x59, 0xe5, 0x3d, 0x28, 0xb8,
+ 0x39, 0x0c, 0xce, 0x57, 0xa3, 0x27, 0x69, 0x60, 0x8f, 0xf1, 0x7c, 0xce, 0x6b, 0x97, 0xc4, 0x10,
+ 0xe5, 0xc3, 0x78, 0xb8, 0x60, 0xd0, 0xb2, 0x12, 0x1a, 0x8a, 0x54, 0xfe, 0xa1, 0x04, 0x85, 0x2d,
+ 0x7b, 0xec, 0x3f, 0x30, 0x2f, 0x42, 0x6a, 0xdf, 0xb6, 0x07, 0x13, 0x34, 0x9b, 0x35, 0x58, 0x81,
+ 0xbc, 0x01, 0x05, 0xfc, 0xe1, 0xe6, 0x9e, 0xb2, 0xf7, 0xb4, 0x91, 0xc7, 0x7a, 0x9e, 0x70, 0x12,
+ 0x48, 0xf6, 0x2d, 0x67, 0xc2, 0x3d, 0x19, 0xfe, 0x26, 0x5f, 0x80, 0x3c, 0xfd, 0xeb, 0x32, 0x93,
+ 0xde, 0x85, 0x15, 0x68, 0x35, 0x27, 0xbe, 0x05, 0x73, 0xb8, 0xfb, 0x1e, 0x2c, 0xe3, 0x3d, 0x63,
+ 0x14, 0x58, 0x03, 0x07, 0x96, 0x20, 0xc3, 0x5c, 0xc1, 0x04, 0xbf, 0x96, 0xe5, 0x0c, 0xb7, 0x48,
+ 0xdd, 0x2b, 0x66, 0x02, 0x2c, 0xdc, 0x67, 0x0c, 0x5e, 0x2a, 0x3f, 0x80, 0x2c, 0x46, 0xa9, 0xd6,
+ 0xa0, 0x4b, 0xca, 0x20, 0xf5, 0x4a, 0x26, 0xc6, 0xc8, 0x45, 0xe1, 0x9a, 0xcf, 0x9b, 0x57, 0x36,
+ 0x0c, 0xa9, 0xb7, 0xb4, 0x00, 0xd2, 0x06, 0xbd, 0x77, 0x1f, 0x73, 0x37, 0x2d, 0x1d, 0x97, 0x5b,
+ 0xdc, 0xc4, 0xb6, 0xf9, 0x32, 0xce, 0xc4, 0xb6, 0xf9, 0x92, 0x99, 0xb8, 0x3a, 0x65, 0x82, 0x96,
+ 0x4e, 0xf8, 0xa7, 0x43, 0xe9, 0xa4, 0x5c, 0x85, 0x39, 0x3c, 0x9e, 0x7d, 0xab, 0xb7, 0x63, 0xf7,
+ 0x2d, 0xbc, 0xe7, 0x1f, 0xe2, 0x3d, 0x49, 0x32, 0xa4, 0x43, 0xba, 0x07, 0xe6, 0x71, 0xe7, 0x80,
+ 0xdd, 0x38, 0xb3, 0x06, 0x2b, 0x94, 0x3f, 0x4b, 0xc2, 0x3c, 0x77, 0xad, 0xef, 0xf7, 0x9d, 0x67,
+ 0x5b, 0x9d, 0x11, 0x79, 0x0a, 0x05, 0xea, 0x55, 0xdb, 0xc3, 0xce, 0x68, 0x44, 0x8f, 0xaf, 0x84,
+ 0x57, 0x8d, 0xeb, 0x53, 0xae, 0x9a, 0xe3, 0x57, 0xb6, 0x3b, 0x43, 0x73, 0x8b, 0x61, 0x1b, 0x96,
+ 0x33, 0x3e, 0x31, 0xf2, 0x96, 0x5f, 0x43, 0x36, 0x21, 0x3f, 0x9c, 0xf4, 0x3c, 0x63, 0x32, 0x1a,
+ 0xab, 0x44, 0x1a, 0xdb, 0x9a, 0xf4, 0x02, 0xb6, 0x60, 0xe8, 0x55, 0xd0, 0x81, 0x51, 0x7f, 0xec,
+ 0xd9, 0x4a, 0x9c, 0x32, 0x30, 0xea, 0x3a, 0x82, 0x03, 0xdb, 0xf7, 0x6b, 0xc8, 0x63, 0x00, 0x7a,
+ 0xbc, 0x1c, 0x9b, 0xa6, 0x4e, 0xa8, 0xa0, 0xbc, 0xf6, 0x66, 0xa4, 0xad, 0x5d, 0x67, 0xbc, 0x67,
+ 0xef, 0x3a, 0x63, 0x66, 0x88, 0x1e, 0x4c, 0x2c, 0x2e, 0xbd, 0x03, 0x4a, 0x78, 0xfe, 0xe2, 0x8d,
+ 0x3c, 0x35, 0xe3, 0x46, 0x9e, 0xe3, 0x37, 0xf2, 0xba, 0x7c, 0x57, 0x5a, 0x7a, 0x0f, 0x8a, 0xa1,
+ 0x29, 0x8b, 0x74, 0xc2, 0xe8, 0xb7, 0x45, 0x7a, 0x5e, 0x7b, 0x5d, 0xf8, 0x9c, 0x2d, 0x6e, 0xb8,
+ 0x68, 0xf7, 0x1d, 0x50, 0xc2, 0xd3, 0x17, 0x0d, 0x67, 0x63, 0x32, 0x05, 0xe4, 0xdf, 0x87, 0xb9,
+ 0xc0, 0x94, 0x45, 0x72, 0xee, 0x94, 0x49, 0x95, 0x7f, 0x29, 0x05, 0xa9, 0x96, 0x65, 0xda, 0x87,
+ 0xe4, 0xf5, 0x60, 0x9c, 0x7c, 0x72, 0xce, 0x8d, 0x91, 0x17, 0x43, 0x31, 0xf2, 0xc9, 0x39, 0x2f,
+ 0x42, 0x5e, 0x0c, 0x45, 0x48, 0xb7, 0xa9, 0xa6, 0x93, 0xcb, 0x53, 0xf1, 0xf1, 0xc9, 0x39, 0x21,
+ 0x38, 0x5e, 0x9e, 0x0a, 0x8e, 0x7e, 0x73, 0x4d, 0xa7, 0x0e, 0x35, 0x18, 0x19, 0x9f, 0x9c, 0xf3,
+ 0xa3, 0xe2, 0x72, 0x38, 0x2a, 0x7a, 0x8d, 0x35, 0x9d, 0x0d, 0x49, 0x88, 0x88, 0x38, 0x24, 0x16,
+ 0x0b, 0x97, 0xc3, 0xb1, 0x10, 0x79, 0x3c, 0x0a, 0x2e, 0x87, 0xa3, 0x20, 0x36, 0xf2, 0xa8, 0x77,
+ 0x31, 0x14, 0xf5, 0xd0, 0x28, 0x0b, 0x77, 0xcb, 0xe1, 0x70, 0xc7, 0x78, 0xc2, 0x48, 0xc5, 0x58,
+ 0xe7, 0x35, 0xd6, 0x74, 0xa2, 0x85, 0x02, 0x5d, 0xf4, 0x6d, 0x1f, 0xf7, 0x02, 0x9d, 0xbe, 0x4e,
+ 0x97, 0xcd, 0xbd, 0x88, 0x16, 0x63, 0xbe, 0xf8, 0xe3, 0x6a, 0xba, 0x17, 0x31, 0x0d, 0x32, 0x87,
+ 0x3c, 0x01, 0x56, 0xd0, 0x73, 0x09, 0xb2, 0xc4, 0xcd, 0x5f, 0x69, 0xb6, 0xd1, 0x83, 0xd1, 0x79,
+ 0x1d, 0xb2, 0x3b, 0x7d, 0x05, 0xe6, 0x9a, 0xed, 0xa7, 0x9d, 0x71, 0xcf, 0x9c, 0x38, 0xed, 0xbd,
+ 0x4e, 0xcf, 0x7b, 0x44, 0xa0, 0xfb, 0x9f, 0x6f, 0xf2, 0x96, 0xbd, 0x4e, 0x8f, 0x5c, 0x70, 0xc5,
+ 0xd5, 0xc5, 0x56, 0x89, 0xcb, 0x6b, 0xe9, 0x75, 0xba, 0x68, 0xcc, 0x18, 0xfa, 0xc2, 0x05, 0xee,
+ 0x0b, 0x1f, 0x66, 0x20, 0x75, 0x64, 0xf5, 0x6d, 0xeb, 0x61, 0x0e, 0x32, 0x8e, 0x3d, 0x1e, 0x76,
+ 0x1c, 0xbb, 0xfc, 0x23, 0x09, 0xe0, 0x91, 0x3d, 0x1c, 0x1e, 0x59, 0xfd, 0x17, 0x47, 0x26, 0xb9,
+ 0x02, 0xf9, 0x61, 0xe7, 0xb9, 0xd9, 0x1e, 0x9a, 0xed, 0x83, 0xb1, 0x7b, 0x0e, 0x72, 0xb4, 0x6a,
+ 0xcb, 0x7c, 0x34, 0x3e, 0x21, 0x25, 0xf7, 0x8a, 0x8e, 0xda, 0x41, 0x49, 0xf2, 0x2b, 0xfb, 0x22,
+ 0xbf, 0x74, 0xa6, 0xf9, 0x1e, 0xba, 0xd7, 0x4e, 0x96, 0x47, 0x64, 0xf8, 0xee, 0x61, 0x89, 0x4a,
+ 0xde, 0x31, 0x87, 0xa3, 0xf6, 0x01, 0x4a, 0x85, 0xca, 0x21, 0x45, 0xcb, 0x8f, 0xc8, 0x6d, 0x48,
+ 0x1c, 0xd8, 0x03, 0x14, 0xc9, 0x29, 0xfb, 0x42, 0x71, 0xe4, 0x0d, 0x48, 0x0c, 0x27, 0x4c, 0x36,
+ 0x79, 0x6d, 0x41, 0xb8, 0x27, 0xb0, 0xd0, 0x44, 0x61, 0xc3, 0x49, 0xcf, 0x9b, 0xf7, 0x8d, 0x22,
+ 0x24, 0x9a, 0xad, 0x16, 0x8d, 0xfd, 0xcd, 0x56, 0x6b, 0x4d, 0x91, 0xea, 0x5f, 0x82, 0x6c, 0x6f,
+ 0x6c, 0x9a, 0xd4, 0x3d, 0xcc, 0xce, 0x39, 0x3e, 0xc4, 0x58, 0xe7, 0x81, 0xea, 0x5b, 0x90, 0x39,
+ 0x60, 0x59, 0x07, 0x89, 0x48, 0x6b, 0x4b, 0x7f, 0xc8, 0x1e, 0x55, 0x96, 0xfc, 0xe6, 0x70, 0x9e,
+ 0x62, 0xb8, 0x36, 0xea, 0x3b, 0x90, 0x1b, 0xb7, 0x4f, 0x33, 0xf8, 0x31, 0x8b, 0x2e, 0x71, 0x06,
+ 0xb3, 0x63, 0x5e, 0x55, 0x6f, 0xc0, 0x82, 0x65, 0xbb, 0xdf, 0x50, 0xda, 0x5d, 0x76, 0xc6, 0x2e,
+ 0x4e, 0x5f, 0xe5, 0x5c, 0xe3, 0x26, 0xfb, 0x6e, 0x69, 0xd9, 0xbc, 0x81, 0x9d, 0xca, 0xfa, 0x23,
+ 0x50, 0x04, 0x33, 0x98, 0x7a, 0xc6, 0x59, 0x39, 0x64, 0x1f, 0x4a, 0x3d, 0x2b, 0x78, 0xee, 0x43,
+ 0x46, 0xd8, 0xc9, 0x8c, 0x31, 0xd2, 0x63, 0x5f, 0x9d, 0x3d, 0x23, 0xe8, 0xea, 0xa6, 0x8d, 0x50,
+ 0x5f, 0x13, 0x6d, 0xe4, 0x19, 0xfb, 0x20, 0x2d, 0x1a, 0xa9, 0xe9, 0xa1, 0x55, 0x39, 0x3a, 0x75,
+ 0x28, 0x7d, 0xf6, 0x3d, 0xd9, 0xb3, 0xc2, 0x1c, 0xe0, 0x0c, 0x33, 0xf1, 0x83, 0xf9, 0x90, 0x7d,
+ 0x6a, 0x0e, 0x98, 0x99, 0x1a, 0xcd, 0xe4, 0xd4, 0xd1, 0x3c, 0x67, 0xdf, 0x75, 0x3d, 0x33, 0xbb,
+ 0xb3, 0x46, 0x33, 0x39, 0x75, 0x34, 0x03, 0xf6, 0xc5, 0x37, 0x60, 0xa6, 0xa6, 0xd7, 0x37, 0x80,
+ 0x88, 0x5b, 0xcd, 0xe3, 0x44, 0x8c, 0x9d, 0x21, 0xfb, 0x8e, 0xef, 0x6f, 0x36, 0xa3, 0xcc, 0x32,
+ 0x14, 0x3f, 0x20, 0x8b, 0x7d, 0xe2, 0x0f, 0x1a, 0xaa, 0xe9, 0xf5, 0x4d, 0x38, 0x2f, 0x4e, 0xec,
+ 0x0c, 0x43, 0xb2, 0x55, 0xa9, 0x52, 0x34, 0x16, 0xfc, 0xa9, 0x71, 0xce, 0x4c, 0x53, 0xf1, 0x83,
+ 0x1a, 0xa9, 0x52, 0x45, 0x99, 0x32, 0x55, 0xd3, 0xeb, 0x0f, 0xa0, 0x28, 0x98, 0xda, 0xc7, 0x08,
+ 0x1d, 0x6d, 0xe6, 0x05, 0xfb, 0x5f, 0x0b, 0xcf, 0x0c, 0x8d, 0xe8, 0xe1, 0x1d, 0xe3, 0x31, 0x2e,
+ 0xda, 0xc8, 0x98, 0xfd, 0xa3, 0x80, 0x3f, 0x16, 0x64, 0x84, 0x8e, 0x04, 0xe6, 0xdf, 0x71, 0x56,
+ 0x26, 0xec, 0x5f, 0x08, 0xfc, 0xa1, 0x50, 0x42, 0xbd, 0x1f, 0x98, 0x8e, 0x49, 0x83, 0x5c, 0x8c,
+ 0x0d, 0x07, 0x3d, 0xf2, 0x9b, 0x91, 0x80, 0x15, 0xf1, 0x81, 0x44, 0x98, 0x36, 0x2d, 0xd6, 0x37,
+ 0x61, 0xfe, 0xec, 0x0e, 0xe9, 0x63, 0x89, 0x65, 0xcb, 0xd5, 0x15, 0x9a, 0x50, 0x1b, 0x73, 0xdd,
+ 0x80, 0x5f, 0x6a, 0xc0, 0xdc, 0x99, 0x9d, 0xd2, 0x27, 0x12, 0xcb, 0x39, 0xa9, 0x25, 0xa3, 0xd0,
+ 0x0d, 0x7a, 0xa6, 0xb9, 0x33, 0xbb, 0xa5, 0x4f, 0x25, 0xf6, 0x40, 0xa1, 0x6b, 0x9e, 0x11, 0xd7,
+ 0x33, 0xcd, 0x9d, 0xd9, 0x2d, 0x7d, 0x95, 0x65, 0x94, 0xb2, 0x5e, 0x15, 0x8d, 0xa0, 0x2f, 0x98,
+ 0x3f, 0xbb, 0x5b, 0xfa, 0x9a, 0x84, 0x8f, 0x15, 0xb2, 0xae, 0x7b, 0xeb, 0xe2, 0x79, 0xa6, 0xf9,
+ 0xb3, 0xbb, 0xa5, 0xaf, 0x4b, 0xf8, 0xa4, 0x21, 0xeb, 0xeb, 0x01, 0x33, 0xc1, 0xd1, 0x9c, 0xee,
+ 0x96, 0xbe, 0x21, 0xe1, 0x2b, 0x83, 0xac, 0xd7, 0x3c, 0x33, 0xbb, 0x53, 0xa3, 0x39, 0xdd, 0x2d,
+ 0x7d, 0x13, 0x6f, 0xf1, 0x75, 0x59, 0xbf, 0x13, 0x30, 0x83, 0x9e, 0xa9, 0xf8, 0x0a, 0x6e, 0xe9,
+ 0x5b, 0x12, 0x3e, 0x06, 0xc9, 0xfa, 0x5d, 0xc3, 0xed, 0xdd, 0xf7, 0x4c, 0xc5, 0x57, 0x70, 0x4b,
+ 0x9f, 0x49, 0xf8, 0x66, 0x24, 0xeb, 0xf7, 0x82, 0x86, 0xd0, 0x33, 0x29, 0xaf, 0xe2, 0x96, 0xbe,
+ 0x4d, 0x2d, 0x15, 0xeb, 0xf2, 0xfa, 0xaa, 0xe1, 0x0e, 0x40, 0xf0, 0x4c, 0xca, 0xab, 0xb8, 0xa5,
+ 0xef, 0x50, 0x53, 0x4a, 0x5d, 0x5e, 0x5f, 0x0b, 0x99, 0xaa, 0xe9, 0xf5, 0x47, 0x50, 0x38, 0xab,
+ 0x5b, 0xfa, 0xae, 0xf8, 0x16, 0x97, 0xef, 0x0a, 0xbe, 0x69, 0x47, 0xd8, 0xb3, 0x53, 0x1d, 0xd3,
+ 0xf7, 0x30, 0xc7, 0xa9, 0xcf, 0x3d, 0x61, 0xef, 0x55, 0x8c, 0xe0, 0x6f, 0x1f, 0x73, 0x53, 0x5b,
+ 0xfe, 0xf9, 0x38, 0xd5, 0x47, 0x7d, 0x5f, 0xc2, 0x47, 0xad, 0x02, 0x37, 0x88, 0x78, 0xef, 0xa4,
+ 0x30, 0x87, 0xf5, 0xa1, 0x3f, 0xcb, 0xd3, 0xbc, 0xd5, 0x0f, 0xa4, 0x57, 0x71, 0x57, 0xf5, 0x44,
+ 0x6b, 0xbb, 0xe1, 0x2d, 0x06, 0xd6, 0xbc, 0x0d, 0xc9, 0x63, 0x6d, 0x75, 0x4d, 0xbc, 0x92, 0x89,
+ 0x6f, 0xb9, 0xcc, 0x49, 0xe5, 0xb5, 0xa2, 0xf0, 0xdc, 0x3d, 0x1c, 0x39, 0x27, 0x06, 0xb2, 0x38,
+ 0x5b, 0x8b, 0x64, 0x7f, 0x12, 0xc3, 0xd6, 0x38, 0xbb, 0x1a, 0xc9, 0xfe, 0x34, 0x86, 0x5d, 0xe5,
+ 0x6c, 0x3d, 0x92, 0xfd, 0xd5, 0x18, 0xb6, 0xce, 0xd9, 0xeb, 0x91, 0xec, 0xaf, 0xc5, 0xb0, 0xd7,
+ 0x39, 0xbb, 0x16, 0xc9, 0xfe, 0x7a, 0x0c, 0xbb, 0xc6, 0xd9, 0x77, 0x22, 0xd9, 0xdf, 0x88, 0x61,
+ 0xdf, 0xe1, 0xec, 0xbb, 0x91, 0xec, 0x6f, 0xc6, 0xb0, 0xef, 0x72, 0xf6, 0xbd, 0x48, 0xf6, 0xb7,
+ 0x62, 0xd8, 0xf7, 0x18, 0x7b, 0x6d, 0x35, 0x92, 0xfd, 0x59, 0x34, 0x7b, 0x6d, 0x95, 0xb3, 0xa3,
+ 0xb5, 0xf6, 0xed, 0x18, 0x36, 0xd7, 0xda, 0x5a, 0xb4, 0xd6, 0xbe, 0x13, 0xc3, 0xe6, 0x5a, 0x5b,
+ 0x8b, 0xd6, 0xda, 0x77, 0x63, 0xd8, 0x5c, 0x6b, 0x6b, 0xd1, 0x5a, 0xfb, 0x5e, 0x0c, 0x9b, 0x6b,
+ 0x6d, 0x2d, 0x5a, 0x6b, 0xdf, 0x8f, 0x61, 0x73, 0xad, 0xad, 0x45, 0x6b, 0xed, 0x07, 0x31, 0x6c,
+ 0xae, 0xb5, 0xb5, 0x68, 0xad, 0xfd, 0x51, 0x0c, 0x9b, 0x6b, 0x6d, 0x2d, 0x5a, 0x6b, 0x7f, 0x1c,
+ 0xc3, 0xe6, 0x5a, 0x5b, 0x8b, 0xd6, 0xda, 0x9f, 0xc4, 0xb0, 0xb9, 0xd6, 0xb4, 0x68, 0xad, 0xfd,
+ 0x69, 0x34, 0x5b, 0xe3, 0x5a, 0xd3, 0xa2, 0xb5, 0xf6, 0x67, 0x31, 0x6c, 0xae, 0x35, 0x2d, 0x5a,
+ 0x6b, 0x7f, 0x1e, 0xc3, 0xe6, 0x5a, 0xd3, 0xa2, 0xb5, 0xf6, 0xc3, 0x18, 0x36, 0xd7, 0x9a, 0x16,
+ 0xad, 0xb5, 0xbf, 0x88, 0x61, 0x73, 0xad, 0x69, 0xd1, 0x5a, 0xfb, 0xcb, 0x18, 0x36, 0xd7, 0x9a,
+ 0x16, 0xad, 0xb5, 0xbf, 0x8a, 0x61, 0x73, 0xad, 0x69, 0xd1, 0x5a, 0xfb, 0xeb, 0x18, 0x36, 0xd7,
+ 0x9a, 0x16, 0xad, 0xb5, 0xbf, 0x89, 0x61, 0x73, 0xad, 0x69, 0xd1, 0x5a, 0xfb, 0xdb, 0x18, 0x36,
+ 0xd7, 0x5a, 0x35, 0x5a, 0x6b, 0x7f, 0x17, 0xcd, 0xae, 0x72, 0xad, 0x55, 0xa3, 0xb5, 0xf6, 0xf7,
+ 0x31, 0x6c, 0xae, 0xb5, 0x6a, 0xb4, 0xd6, 0xfe, 0x21, 0x86, 0xcd, 0xb5, 0x56, 0x8d, 0xd6, 0xda,
+ 0x3f, 0xc6, 0xb0, 0xb9, 0xd6, 0xaa, 0xd1, 0x5a, 0xfb, 0x51, 0x0c, 0x9b, 0x6b, 0xad, 0x1a, 0xad,
+ 0xb5, 0x7f, 0x8a, 0x61, 0x73, 0xad, 0x55, 0xa3, 0xb5, 0xf6, 0xcf, 0x31, 0x6c, 0xae, 0xb5, 0x6a,
+ 0xb4, 0xd6, 0xfe, 0x25, 0x86, 0xcd, 0xb5, 0x56, 0x8d, 0xd6, 0xda, 0xbf, 0xc6, 0xb0, 0xb9, 0xd6,
+ 0xaa, 0xd1, 0x5a, 0xfb, 0xb7, 0x18, 0x36, 0xd7, 0x9a, 0x1e, 0xad, 0xb5, 0x7f, 0x8f, 0x66, 0xeb,
+ 0x5c, 0x6b, 0x7a, 0xb4, 0xd6, 0xfe, 0x23, 0x86, 0xcd, 0xb5, 0xa6, 0x47, 0x6b, 0xed, 0x3f, 0x63,
+ 0xd8, 0x5c, 0x6b, 0x7a, 0xb4, 0xd6, 0xfe, 0x2b, 0x86, 0xcd, 0xb5, 0xa6, 0x47, 0x6b, 0xed, 0xbf,
+ 0x63, 0xd8, 0x5c, 0x6b, 0x7a, 0xb4, 0xd6, 0xfe, 0x27, 0x86, 0xcd, 0xb5, 0xa6, 0x47, 0x6b, 0xed,
+ 0xc7, 0x31, 0x6c, 0xae, 0x35, 0x3d, 0x5a, 0x6b, 0x3f, 0x89, 0x61, 0x73, 0xad, 0xe9, 0xd1, 0x5a,
+ 0xfb, 0xdf, 0x18, 0x36, 0xd7, 0x9a, 0x1e, 0xad, 0xb5, 0xff, 0x8b, 0x61, 0x73, 0xad, 0xad, 0x47,
+ 0x6b, 0xed, 0xff, 0xa3, 0xd9, 0xeb, 0xab, 0x3f, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xaa, 0x00, 0xcd,
+ 0x32, 0x57, 0x39, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/proto/testdata/test.proto b/vendor/github.com/golang/protobuf/proto/testdata/test.proto
new file mode 100644
index 0000000..70e3cfc
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/testdata/test.proto
@@ -0,0 +1,548 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// A feature-rich test file for the protocol compiler and libraries.
+
+syntax = "proto2";
+
+package testdata;
+
+enum FOO { FOO1 = 1; };
+
+message GoEnum {
+ required FOO foo = 1;
+}
+
+message GoTestField {
+ required string Label = 1;
+ required string Type = 2;
+}
+
+message GoTest {
+ // An enum, for completeness.
+ enum KIND {
+ VOID = 0;
+
+ // Basic types
+ BOOL = 1;
+ BYTES = 2;
+ FINGERPRINT = 3;
+ FLOAT = 4;
+ INT = 5;
+ STRING = 6;
+ TIME = 7;
+
+ // Groupings
+ TUPLE = 8;
+ ARRAY = 9;
+ MAP = 10;
+
+ // Table types
+ TABLE = 11;
+
+ // Functions
+ FUNCTION = 12; // last tag
+ };
+
+ // Some typical parameters
+ required KIND Kind = 1;
+ optional string Table = 2;
+ optional int32 Param = 3;
+
+ // Required, repeated and optional foreign fields.
+ required GoTestField RequiredField = 4;
+ repeated GoTestField RepeatedField = 5;
+ optional GoTestField OptionalField = 6;
+
+ // Required fields of all basic types
+ required bool F_Bool_required = 10;
+ required int32 F_Int32_required = 11;
+ required int64 F_Int64_required = 12;
+ required fixed32 F_Fixed32_required = 13;
+ required fixed64 F_Fixed64_required = 14;
+ required uint32 F_Uint32_required = 15;
+ required uint64 F_Uint64_required = 16;
+ required float F_Float_required = 17;
+ required double F_Double_required = 18;
+ required string F_String_required = 19;
+ required bytes F_Bytes_required = 101;
+ required sint32 F_Sint32_required = 102;
+ required sint64 F_Sint64_required = 103;
+
+ // Repeated fields of all basic types
+ repeated bool F_Bool_repeated = 20;
+ repeated int32 F_Int32_repeated = 21;
+ repeated int64 F_Int64_repeated = 22;
+ repeated fixed32 F_Fixed32_repeated = 23;
+ repeated fixed64 F_Fixed64_repeated = 24;
+ repeated uint32 F_Uint32_repeated = 25;
+ repeated uint64 F_Uint64_repeated = 26;
+ repeated float F_Float_repeated = 27;
+ repeated double F_Double_repeated = 28;
+ repeated string F_String_repeated = 29;
+ repeated bytes F_Bytes_repeated = 201;
+ repeated sint32 F_Sint32_repeated = 202;
+ repeated sint64 F_Sint64_repeated = 203;
+
+ // Optional fields of all basic types
+ optional bool F_Bool_optional = 30;
+ optional int32 F_Int32_optional = 31;
+ optional int64 F_Int64_optional = 32;
+ optional fixed32 F_Fixed32_optional = 33;
+ optional fixed64 F_Fixed64_optional = 34;
+ optional uint32 F_Uint32_optional = 35;
+ optional uint64 F_Uint64_optional = 36;
+ optional float F_Float_optional = 37;
+ optional double F_Double_optional = 38;
+ optional string F_String_optional = 39;
+ optional bytes F_Bytes_optional = 301;
+ optional sint32 F_Sint32_optional = 302;
+ optional sint64 F_Sint64_optional = 303;
+
+ // Default-valued fields of all basic types
+ optional bool F_Bool_defaulted = 40 [default=true];
+ optional int32 F_Int32_defaulted = 41 [default=32];
+ optional int64 F_Int64_defaulted = 42 [default=64];
+ optional fixed32 F_Fixed32_defaulted = 43 [default=320];
+ optional fixed64 F_Fixed64_defaulted = 44 [default=640];
+ optional uint32 F_Uint32_defaulted = 45 [default=3200];
+ optional uint64 F_Uint64_defaulted = 46 [default=6400];
+ optional float F_Float_defaulted = 47 [default=314159.];
+ optional double F_Double_defaulted = 48 [default=271828.];
+ optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"];
+ optional bytes F_Bytes_defaulted = 401 [default="Bignose"];
+ optional sint32 F_Sint32_defaulted = 402 [default = -32];
+ optional sint64 F_Sint64_defaulted = 403 [default = -64];
+
+ // Packed repeated fields (no string or bytes).
+ repeated bool F_Bool_repeated_packed = 50 [packed=true];
+ repeated int32 F_Int32_repeated_packed = 51 [packed=true];
+ repeated int64 F_Int64_repeated_packed = 52 [packed=true];
+ repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true];
+ repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true];
+ repeated uint32 F_Uint32_repeated_packed = 55 [packed=true];
+ repeated uint64 F_Uint64_repeated_packed = 56 [packed=true];
+ repeated float F_Float_repeated_packed = 57 [packed=true];
+ repeated double F_Double_repeated_packed = 58 [packed=true];
+ repeated sint32 F_Sint32_repeated_packed = 502 [packed=true];
+ repeated sint64 F_Sint64_repeated_packed = 503 [packed=true];
+
+ // Required, repeated, and optional groups.
+ required group RequiredGroup = 70 {
+ required string RequiredField = 71;
+ };
+
+ repeated group RepeatedGroup = 80 {
+ required string RequiredField = 81;
+ };
+
+ optional group OptionalGroup = 90 {
+ required string RequiredField = 91;
+ };
+}
+
+// For testing a group containing a required field.
+message GoTestRequiredGroupField {
+ required group Group = 1 {
+ required int32 Field = 2;
+ };
+}
+
+// For testing skipping of unrecognized fields.
+// Numbers are all big, larger than tag numbers in GoTestField,
+// the message used in the corresponding test.
+message GoSkipTest {
+ required int32 skip_int32 = 11;
+ required fixed32 skip_fixed32 = 12;
+ required fixed64 skip_fixed64 = 13;
+ required string skip_string = 14;
+ required group SkipGroup = 15 {
+ required int32 group_int32 = 16;
+ required string group_string = 17;
+ }
+}
+
+// For testing packed/non-packed decoder switching.
+// A serialized instance of one should be deserializable as the other.
+message NonPackedTest {
+ repeated int32 a = 1;
+}
+
+message PackedTest {
+ repeated int32 b = 1 [packed=true];
+}
+
+message MaxTag {
+ // Maximum possible tag number.
+ optional string last_field = 536870911;
+}
+
+message OldMessage {
+ message Nested {
+ optional string name = 1;
+ }
+ optional Nested nested = 1;
+
+ optional int32 num = 2;
+}
+
+// NewMessage is wire compatible with OldMessage;
+// imagine it as a future version.
+message NewMessage {
+ message Nested {
+ optional string name = 1;
+ optional string food_group = 2;
+ }
+ optional Nested nested = 1;
+
+ // This is an int32 in OldMessage.
+ optional int64 num = 2;
+}
+
+// Smaller tests for ASCII formatting.
+
+message InnerMessage {
+ required string host = 1;
+ optional int32 port = 2 [default=4000];
+ optional bool connected = 3;
+}
+
+message OtherMessage {
+ optional int64 key = 1;
+ optional bytes value = 2;
+ optional float weight = 3;
+ optional InnerMessage inner = 4;
+
+ extensions 100 to max;
+}
+
+message RequiredInnerMessage {
+ required InnerMessage leo_finally_won_an_oscar = 1;
+}
+
+message MyMessage {
+ required int32 count = 1;
+ optional string name = 2;
+ optional string quote = 3;
+ repeated string pet = 4;
+ optional InnerMessage inner = 5;
+ repeated OtherMessage others = 6;
+ optional RequiredInnerMessage we_must_go_deeper = 13;
+ repeated InnerMessage rep_inner = 12;
+
+ enum Color {
+ RED = 0;
+ GREEN = 1;
+ BLUE = 2;
+ };
+ optional Color bikeshed = 7;
+
+ optional group SomeGroup = 8 {
+ optional int32 group_field = 9;
+ }
+
+ // This field becomes [][]byte in the generated code.
+ repeated bytes rep_bytes = 10;
+
+ optional double bigfloat = 11;
+
+ extensions 100 to max;
+}
+
+message Ext {
+ extend MyMessage {
+ optional Ext more = 103;
+ optional string text = 104;
+ optional int32 number = 105;
+ }
+
+ optional string data = 1;
+}
+
+extend MyMessage {
+ repeated string greeting = 106;
+}
+
+message ComplexExtension {
+ optional int32 first = 1;
+ optional int32 second = 2;
+ repeated int32 third = 3;
+}
+
+extend OtherMessage {
+ optional ComplexExtension complex = 200;
+ repeated ComplexExtension r_complex = 201;
+}
+
+message DefaultsMessage {
+ enum DefaultsEnum {
+ ZERO = 0;
+ ONE = 1;
+ TWO = 2;
+ };
+ extensions 100 to max;
+}
+
+extend DefaultsMessage {
+ optional double no_default_double = 101;
+ optional float no_default_float = 102;
+ optional int32 no_default_int32 = 103;
+ optional int64 no_default_int64 = 104;
+ optional uint32 no_default_uint32 = 105;
+ optional uint64 no_default_uint64 = 106;
+ optional sint32 no_default_sint32 = 107;
+ optional sint64 no_default_sint64 = 108;
+ optional fixed32 no_default_fixed32 = 109;
+ optional fixed64 no_default_fixed64 = 110;
+ optional sfixed32 no_default_sfixed32 = 111;
+ optional sfixed64 no_default_sfixed64 = 112;
+ optional bool no_default_bool = 113;
+ optional string no_default_string = 114;
+ optional bytes no_default_bytes = 115;
+ optional DefaultsMessage.DefaultsEnum no_default_enum = 116;
+
+ optional double default_double = 201 [default = 3.1415];
+ optional float default_float = 202 [default = 3.14];
+ optional int32 default_int32 = 203 [default = 42];
+ optional int64 default_int64 = 204 [default = 43];
+ optional uint32 default_uint32 = 205 [default = 44];
+ optional uint64 default_uint64 = 206 [default = 45];
+ optional sint32 default_sint32 = 207 [default = 46];
+ optional sint64 default_sint64 = 208 [default = 47];
+ optional fixed32 default_fixed32 = 209 [default = 48];
+ optional fixed64 default_fixed64 = 210 [default = 49];
+ optional sfixed32 default_sfixed32 = 211 [default = 50];
+ optional sfixed64 default_sfixed64 = 212 [default = 51];
+ optional bool default_bool = 213 [default = true];
+ optional string default_string = 214 [default = "Hello, string"];
+ optional bytes default_bytes = 215 [default = "Hello, bytes"];
+ optional DefaultsMessage.DefaultsEnum default_enum = 216 [default = ONE];
+}
+
+message MyMessageSet {
+ option message_set_wire_format = true;
+ extensions 100 to max;
+}
+
+message Empty {
+}
+
+extend MyMessageSet {
+ optional Empty x201 = 201;
+ optional Empty x202 = 202;
+ optional Empty x203 = 203;
+ optional Empty x204 = 204;
+ optional Empty x205 = 205;
+ optional Empty x206 = 206;
+ optional Empty x207 = 207;
+ optional Empty x208 = 208;
+ optional Empty x209 = 209;
+ optional Empty x210 = 210;
+ optional Empty x211 = 211;
+ optional Empty x212 = 212;
+ optional Empty x213 = 213;
+ optional Empty x214 = 214;
+ optional Empty x215 = 215;
+ optional Empty x216 = 216;
+ optional Empty x217 = 217;
+ optional Empty x218 = 218;
+ optional Empty x219 = 219;
+ optional Empty x220 = 220;
+ optional Empty x221 = 221;
+ optional Empty x222 = 222;
+ optional Empty x223 = 223;
+ optional Empty x224 = 224;
+ optional Empty x225 = 225;
+ optional Empty x226 = 226;
+ optional Empty x227 = 227;
+ optional Empty x228 = 228;
+ optional Empty x229 = 229;
+ optional Empty x230 = 230;
+ optional Empty x231 = 231;
+ optional Empty x232 = 232;
+ optional Empty x233 = 233;
+ optional Empty x234 = 234;
+ optional Empty x235 = 235;
+ optional Empty x236 = 236;
+ optional Empty x237 = 237;
+ optional Empty x238 = 238;
+ optional Empty x239 = 239;
+ optional Empty x240 = 240;
+ optional Empty x241 = 241;
+ optional Empty x242 = 242;
+ optional Empty x243 = 243;
+ optional Empty x244 = 244;
+ optional Empty x245 = 245;
+ optional Empty x246 = 246;
+ optional Empty x247 = 247;
+ optional Empty x248 = 248;
+ optional Empty x249 = 249;
+ optional Empty x250 = 250;
+}
+
+message MessageList {
+ repeated group Message = 1 {
+ required string name = 2;
+ required int32 count = 3;
+ }
+}
+
+message Strings {
+ optional string string_field = 1;
+ optional bytes bytes_field = 2;
+}
+
+message Defaults {
+ enum Color {
+ RED = 0;
+ GREEN = 1;
+ BLUE = 2;
+ }
+
+ // Default-valued fields of all basic types.
+ // Same as GoTest, but copied here to make testing easier.
+ optional bool F_Bool = 1 [default=true];
+ optional int32 F_Int32 = 2 [default=32];
+ optional int64 F_Int64 = 3 [default=64];
+ optional fixed32 F_Fixed32 = 4 [default=320];
+ optional fixed64 F_Fixed64 = 5 [default=640];
+ optional uint32 F_Uint32 = 6 [default=3200];
+ optional uint64 F_Uint64 = 7 [default=6400];
+ optional float F_Float = 8 [default=314159.];
+ optional double F_Double = 9 [default=271828.];
+ optional string F_String = 10 [default="hello, \"world!\"\n"];
+ optional bytes F_Bytes = 11 [default="Bignose"];
+ optional sint32 F_Sint32 = 12 [default=-32];
+ optional sint64 F_Sint64 = 13 [default=-64];
+ optional Color F_Enum = 14 [default=GREEN];
+
+ // More fields with crazy defaults.
+ optional float F_Pinf = 15 [default=inf];
+ optional float F_Ninf = 16 [default=-inf];
+ optional float F_Nan = 17 [default=nan];
+
+ // Sub-message.
+ optional SubDefaults sub = 18;
+
+ // Redundant but explicit defaults.
+ optional string str_zero = 19 [default=""];
+}
+
+message SubDefaults {
+ optional int64 n = 1 [default=7];
+}
+
+message RepeatedEnum {
+ enum Color {
+ RED = 1;
+ }
+ repeated Color color = 1;
+}
+
+message MoreRepeated {
+ repeated bool bools = 1;
+ repeated bool bools_packed = 2 [packed=true];
+ repeated int32 ints = 3;
+ repeated int32 ints_packed = 4 [packed=true];
+ repeated int64 int64s_packed = 7 [packed=true];
+ repeated string strings = 5;
+ repeated fixed32 fixeds = 6;
+}
+
+// GroupOld and GroupNew have the same wire format.
+// GroupNew has a new field inside a group.
+
+message GroupOld {
+ optional group G = 101 {
+ optional int32 x = 2;
+ }
+}
+
+message GroupNew {
+ optional group G = 101 {
+ optional int32 x = 2;
+ optional int32 y = 3;
+ }
+}
+
+message FloatingPoint {
+ required double f = 1;
+ optional bool exact = 2;
+}
+
+message MessageWithMap {
+ map<int32, string> name_mapping = 1;
+ map<sint64, FloatingPoint> msg_mapping = 2;
+ map<bool, bytes> byte_mapping = 3;
+ map<string, string> str_to_str = 4;
+}
+
+message Oneof {
+ oneof union {
+ bool F_Bool = 1;
+ int32 F_Int32 = 2;
+ int64 F_Int64 = 3;
+ fixed32 F_Fixed32 = 4;
+ fixed64 F_Fixed64 = 5;
+ uint32 F_Uint32 = 6;
+ uint64 F_Uint64 = 7;
+ float F_Float = 8;
+ double F_Double = 9;
+ string F_String = 10;
+ bytes F_Bytes = 11;
+ sint32 F_Sint32 = 12;
+ sint64 F_Sint64 = 13;
+ MyMessage.Color F_Enum = 14;
+ GoTestField F_Message = 15;
+ group F_Group = 16 {
+ optional int32 x = 17;
+ }
+ int32 F_Largest_Tag = 536870911;
+ }
+
+ oneof tormato {
+ int32 value = 100;
+ }
+}
+
+message Communique {
+ optional bool make_me_cry = 1;
+
+ // This is a oneof, called "union".
+ oneof union {
+ int32 number = 5;
+ string name = 6;
+ bytes data = 7;
+ double temp_c = 8;
+ MyMessage.Color col = 9;
+ Strings msg = 10;
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 0000000..965876b
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,854 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+var (
+ newline = []byte("\n")
+ spaces = []byte(" ")
+ gtNewline = []byte(">\n")
+ endBraceNewline = []byte("}\n")
+ backslashN = []byte{'\\', 'n'}
+ backslashR = []byte{'\\', 'r'}
+ backslashT = []byte{'\\', 't'}
+ backslashDQ = []byte{'\\', '"'}
+ backslashBS = []byte{'\\', '\\'}
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ ind int
+ complete bool // if the current position is a complete line
+ compact bool // whether to write out as a one-liner
+ w writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+ if !strings.Contains(s, "\n") {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+ return io.WriteString(w.w, s)
+ }
+ // WriteString is typically called without newlines, so this
+ // codepath and its copy are rare. We copy to avoid
+ // duplicating all of Write's logic here.
+ return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ n, err = w.w.Write(p)
+ w.complete = false
+ return n, err
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ if err := w.w.WriteByte(' '); err != nil {
+ return n, err
+ }
+ n++
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ if i+1 < len(frags) {
+ if err := w.w.WriteByte('\n'); err != nil {
+ return n, err
+ }
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ err := w.w.WriteByte(c)
+ w.complete = c == '\n'
+ return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+ if w.ind == 0 {
+ log.Print("proto: textWriter unindented too far")
+ return
+ }
+ w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+ if _, err := w.WriteString(props.OrigName); err != nil {
+ return err
+ }
+ if props.Wire != "group" {
+ return w.WriteByte(':')
+ }
+ return nil
+}
+
+// raw is the interface satisfied by RawMessage.
+type raw interface {
+ Bytes() []byte
+}
+
+func requiresQuotes(u string) bool {
+ // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+ for _, ch := range u {
+ switch {
+ case ch == '.' || ch == '/' || ch == '_':
+ continue
+ case '0' <= ch && ch <= '9':
+ continue
+ case 'A' <= ch && ch <= 'Z':
+ continue
+ case 'a' <= ch && ch <= 'z':
+ continue
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+ type wkt interface {
+ XXX_WellKnownType() string
+ }
+ t, ok := sv.Addr().Interface().(wkt)
+ return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+ turl := sv.FieldByName("TypeUrl")
+ val := sv.FieldByName("Value")
+ if !turl.IsValid() || !val.IsValid() {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ b, ok := val.Interface().([]byte)
+ if !ok {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ parts := strings.Split(turl.String(), "/")
+ mt := MessageType(parts[len(parts)-1])
+ if mt == nil {
+ return false, nil
+ }
+ m := reflect.New(mt.Elem())
+ if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+ return false, nil
+ }
+ w.Write([]byte("["))
+ u := turl.String()
+ if requiresQuotes(u) {
+ writeString(w, u)
+ } else {
+ w.Write([]byte(u))
+ }
+ if w.compact {
+ w.Write([]byte("]:<"))
+ } else {
+ w.Write([]byte("]: <\n"))
+ w.ind++
+ }
+ if err := tm.writeStruct(w, m.Elem()); err != nil {
+ return true, err
+ }
+ if w.compact {
+ w.Write([]byte("> "))
+ } else {
+ w.ind--
+ w.Write([]byte(">\n"))
+ }
+ return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+ if tm.ExpandAny && isAny(sv) {
+ if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+ return err
+ }
+ }
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < sv.NumField(); i++ {
+ fv := sv.Field(i)
+ props := sprops.Prop[i]
+ name := st.Field(i).Name
+
+ if strings.HasPrefix(name, "XXX_") {
+ // There are two XXX_ fields:
+ // XXX_unrecognized []byte
+ // XXX_extensions map[int32]proto.Extension
+ // The first is handled here;
+ // the second is handled at the bottom of this function.
+ if name == "XXX_unrecognized" && !fv.IsNil() {
+ if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Field not filled in. This could be an optional field or
+ // a required field that wasn't filled in. Either way, there
+ // isn't anything we can show for it.
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ // Repeated field that is empty, or a bytes field that is unused.
+ continue
+ }
+
+ if props.Repeated && fv.Kind() == reflect.Slice {
+ // Repeated field.
+ for j := 0; j < fv.Len(); j++ {
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ v := fv.Index(j)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ // A nil message in a repeated field is not valid,
+ // but we can handle that more gracefully than panicking.
+ if _, err := w.Write([]byte("<nil>\n")); err != nil {
+ return err
+ }
+ continue
+ }
+ if err := tm.writeAny(w, v, props); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Map {
+ // Map fields are rendered as a repeated struct with key/value fields.
+ keys := fv.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := fv.MapIndex(key)
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ // open struct
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ // key
+ if _, err := w.WriteString("key:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // nil values aren't legal, but we can avoid panicking because of them.
+ if val.Kind() != reflect.Ptr || !val.IsNil() {
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ // close struct
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+ // empty bytes field
+ continue
+ }
+ if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+ // proto3 non-repeated scalar field; skip if zero value
+ if isProto3Zero(fv) {
+ continue
+ }
+ }
+
+ if fv.Kind() == reflect.Interface {
+ // Check if it is a oneof.
+ if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+ // fv is nil, or holds a pointer to generated struct.
+ // That generated struct has exactly one field,
+ // which has a protobuf struct tag.
+ if fv.IsNil() {
+ continue
+ }
+ inner := fv.Elem().Elem() // interface -> *T -> T
+ tag := inner.Type().Field(0).Tag.Get("protobuf")
+ props = new(Properties) // Overwrite the outer props var, but not its pointee.
+ props.Parse(tag)
+ // Write the value in the oneof, not the oneof itself.
+ fv = inner.Field(0)
+
+ // Special case to cope with malformed messages gracefully:
+ // If the value in the oneof is a nil pointer, don't panic
+ // in writeAny.
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Use errors.New so writeAny won't render quotes.
+ msg := errors.New("/* nil */")
+ fv = reflect.ValueOf(&msg).Elem()
+ }
+ }
+ }
+
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if b, ok := fv.Interface().(raw); ok {
+ if err := writeRaw(w, b.Bytes()); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // Enums have a String method, so writeAny will work fine.
+ if err := tm.writeAny(w, fv, props); err != nil {
+ return err
+ }
+
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ // Extensions (the XXX_extensions field).
+ pv := sv.Addr()
+ if _, ok := extendable(pv.Interface()); ok {
+ if err := tm.writeExtensions(w, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeRaw writes an uninterpreted raw message.
+func writeRaw(w *textWriter, b []byte) error {
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if err := writeUnknownStruct(w, b); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ return nil
+}
+
+// writeAny writes an arbitrary field.
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+ v = reflect.Indirect(v)
+
+ // Floats have special cases.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ x := v.Float()
+ var b []byte
+ switch {
+ case math.IsInf(x, 1):
+ b = posInf
+ case math.IsInf(x, -1):
+ b = negInf
+ case math.IsNaN(x):
+ b = nan
+ }
+ if b != nil {
+ _, err := w.Write(b)
+ return err
+ }
+ // Other values are handled below.
+ }
+
+ // We don't attempt to serialise every possible value type; only those
+ // that can occur in protocol buffers.
+ switch v.Kind() {
+ case reflect.Slice:
+ // Should only be a []byte; repeated fields are handled in writeStruct.
+ if err := writeString(w, string(v.Bytes())); err != nil {
+ return err
+ }
+ case reflect.String:
+ if err := writeString(w, v.String()); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ // Required/optional group/message.
+ var bra, ket byte = '<', '>'
+ if props != nil && props.Wire == "group" {
+ bra, ket = '{', '}'
+ }
+ if err := w.WriteByte(bra); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(text); err != nil {
+ return err
+ }
+ } else if err := tm.writeStruct(w, v); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte(ket); err != nil {
+ return err
+ }
+ default:
+ _, err := fmt.Fprint(w, v.Interface())
+ return err
+ }
+ return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+ return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = w.w.Write(backslashN)
+ case '\r':
+ _, err = w.w.Write(backslashR)
+ case '\t':
+ _, err = w.w.Write(backslashT)
+ case '"':
+ _, err = w.w.Write(backslashDQ)
+ case '\\':
+ _, err = w.w.Write(backslashBS)
+ default:
+ if isprint(c) {
+ err = w.w.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(w.w, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+ if !w.compact {
+ if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+ return err
+ }
+ }
+ b := NewBuffer(data)
+ for b.index < len(b.buf) {
+ x, err := b.DecodeVarint()
+ if err != nil {
+ _, err := fmt.Fprintf(w, "/* %v */\n", err)
+ return err
+ }
+ wire, tag := x&7, x>>3
+ if wire == WireEndGroup {
+ w.unindent()
+ if _, err := w.Write(endBraceNewline); err != nil {
+ return err
+ }
+ continue
+ }
+ if _, err := fmt.Fprint(w, tag); err != nil {
+ return err
+ }
+ if wire != WireStartGroup {
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if !w.compact || wire == WireStartGroup {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ switch wire {
+ case WireBytes:
+ buf, e := b.DecodeRawBytes(false)
+ if e == nil {
+ _, err = fmt.Fprintf(w, "%q", buf)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", e)
+ }
+ case WireFixed32:
+ x, err = b.DecodeFixed32()
+ err = writeUnknownInt(w, x, err)
+ case WireFixed64:
+ x, err = b.DecodeFixed64()
+ err = writeUnknownInt(w, x, err)
+ case WireStartGroup:
+ err = w.WriteByte('{')
+ w.indent()
+ case WireVarint:
+ x, err = b.DecodeVarint()
+ err = writeUnknownInt(w, x, err)
+ default:
+ _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+ }
+ if err != nil {
+ return err
+ }
+ if err = w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+ if err == nil {
+ _, err = fmt.Fprint(w, x)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", err)
+ }
+ return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
+ emap := extensionMaps[pv.Type().Elem()]
+ ep, _ := extendable(pv.Interface())
+
+ // Order the extensions by ID.
+ // This isn't strictly necessary, but it will give us
+ // canonical output, which will also make testing easier.
+ m, mu := ep.extensionsRead()
+ if m == nil {
+ return nil
+ }
+ mu.Lock()
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+ mu.Unlock()
+
+ for _, extNum := range ids {
+ ext := m[extNum]
+ var desc *ExtensionDesc
+ if emap != nil {
+ desc = emap[extNum]
+ }
+ if desc == nil {
+ // Unknown extension.
+ if err := writeUnknownStruct(w, ext.enc); err != nil {
+ return err
+ }
+ continue
+ }
+
+ pb, err := GetExtension(ep, desc)
+ if err != nil {
+ return fmt.Errorf("failed getting extension: %v", err)
+ }
+
+ // Repeated extensions will appear as a slice.
+ if !desc.repeated() {
+ if err := tm.writeExtension(w, desc.Name, pb); err != nil {
+ return err
+ }
+ } else {
+ v := reflect.ValueOf(pb)
+ for i := 0; i < v.Len(); i++ {
+ if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
+ if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ remain := w.ind * 2
+ for remain > 0 {
+ n := remain
+ if n > len(spaces) {
+ n = len(spaces)
+ }
+ w.w.Write(spaces[:n])
+ remain -= n
+ }
+ w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+ Compact bool // use compact text format (one line).
+ ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+ val := reflect.ValueOf(pb)
+ if pb == nil || val.IsNil() {
+ w.Write([]byte("<nil>"))
+ return nil
+ }
+ var bw *bufio.Writer
+ ww, ok := w.(writer)
+ if !ok {
+ bw = bufio.NewWriter(w)
+ ww = bw
+ }
+ aw := &textWriter{
+ w: ww,
+ complete: true,
+ compact: tm.Compact,
+ }
+
+ if etm, ok := pb.(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = aw.Write(text); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+ }
+ // Dereference the received pointer so we don't have outer < and >.
+ v := reflect.Indirect(val)
+ if err := tm.writeStruct(aw, v); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (tm *TextMarshaler) Text(pb Message) string {
+ var buf bytes.Buffer
+ tm.Marshal(&buf, pb)
+ return buf.String()
+}
+
+var (
+ defaultTextMarshaler = TextMarshaler{}
+ compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 0000000..5e14513
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,895 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// Error string emitted when deserializing Any and fields are already set
+const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
+
+type ParseError struct {
+ Message string
+ Line int // 1-based line number
+ Offset int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+ if p.Line == 1 {
+ // show offset only for first line
+ return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+ }
+ return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+ if t.err == nil {
+ return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+ }
+ return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+var (
+ errBadUTF8 = errors.New("proto: bad UTF-8")
+ errBadHex = errors.New("proto: bad hexadecimal")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ base := 8
+ ss := s[:2]
+ s = s[2:]
+ if r == 'x' || r == 'X' {
+ base = 16
+ } else {
+ ss = string(r) + ss
+ }
+ i, err := strconv.ParseUint(ss, base, 8)
+ if err != nil {
+ return "", "", err
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'u', 'U':
+ n := 4
+ if r == 'U' {
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
+ }
+
+ bs := make([]byte, n/2)
+ for i := 0; i < n; i += 2 {
+ a, ok1 := unhex(s[i])
+ b, ok2 := unhex(s[i+1])
+ if !ok1 || !ok2 {
+ return "", "", errBadHex
+ }
+ bs[i/2] = a<<4 | b
+ }
+ s = s[n:]
+ return string(bs), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Adapted from src/pkg/strconv/quote.go.
+func unhex(b byte) (v byte, ok bool) {
+ switch {
+ case '0' <= b && b <= '9':
+ return b - '0', true
+ case 'a' <= b && b <= 'f':
+ return b - 'a' + 10, true
+ case 'A' <= b && b <= 'F':
+ return b - 'A' + 10, true
+ }
+ return 0, false
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < st.NumField(); i++ {
+ if !isNil(sv.Field(i)) {
+ continue
+ }
+
+ props := sprops.Prop[i]
+ if props.Required {
+ return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+ }
+ }
+ return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+ i, ok := sprops.decoderOrigNames[name]
+ if ok {
+ return i, sprops.Prop[i], true
+ }
+ return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ // Colon is optional when the field is a group or message.
+ needColon := true
+ switch props.Wire {
+ case "group":
+ needColon = false
+ case "bytes":
+ // A "bytes" field is either a message, a string, or a repeated field;
+ // those three become *T, *string and []T respectively, so we can check for
+ // this field being a pointer to a non-string.
+ if typ.Kind() == reflect.Ptr {
+ // *T or *string
+ if typ.Elem().Kind() == reflect.String {
+ break
+ }
+ } else if typ.Kind() == reflect.Slice {
+ // []T or []*T
+ if typ.Elem().Kind() != reflect.Ptr {
+ break
+ }
+ } else if typ.Kind() == reflect.String {
+ // The proto3 exception is for a string field,
+ // which requires a colon.
+ break
+ }
+ needColon = false
+ }
+ if needColon {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ reqCount := sprops.reqCount
+ var reqFieldErr error
+ fieldSet := make(map[string]bool)
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]" or "[type/url]".
+ //
+ // The whole struct can also be an expanded Any message, like:
+ // [type/url] < ... struct contents ... >
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ // Looks like an extension or an Any.
+ //
+ // TODO: Check whether we need to handle
+ // namespace rooted names (e.g. ".something.Foo").
+ extName, err := p.consumeExtName()
+ if err != nil {
+ return err
+ }
+
+ if s := strings.LastIndex(extName, "/"); s >= 0 {
+ // If it contains a slash, it's an Any type URL.
+ messageName := extName[s+1:]
+ mt := MessageType(messageName)
+ if mt == nil {
+ return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
+ }
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ // consume an optional colon
+ if tok.value == ":" {
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ }
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ v := reflect.New(mt.Elem())
+ if pe := p.readStruct(v.Elem(), terminator); pe != nil {
+ return pe
+ }
+ b, err := Marshal(v.Interface().(Message))
+ if err != nil {
+ return p.errorf("failed to marshal message of type %q: %v", messageName, err)
+ }
+ if fieldSet["type_url"] {
+ return p.errorf(anyRepeatedlyUnpacked, "type_url")
+ }
+ if fieldSet["value"] {
+ return p.errorf(anyRepeatedlyUnpacked, "value")
+ }
+ sv.FieldByName("TypeUrl").SetString(extName)
+ sv.FieldByName("Value").SetBytes(b)
+ fieldSet["type_url"] = true
+ fieldSet["value"] = true
+ continue
+ }
+
+ var desc *ExtensionDesc
+ // This could be faster, but it's functional.
+ // TODO: Do something smarter than a linear scan.
+ for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+ if d.Name == extName {
+ desc = d
+ break
+ }
+ }
+ if desc == nil {
+ return p.errorf("unrecognized extension %q", extName)
+ }
+
+ props := &Properties{}
+ props.Parse(desc.Tag)
+
+ typ := reflect.TypeOf(desc.ExtensionType)
+ if err := p.checkForColon(props, typ); err != nil {
+ return err
+ }
+
+ rep := desc.repeated()
+
+ // Read the extension structure, and set it in
+ // the value we're constructing.
+ var ext reflect.Value
+ if !rep {
+ ext = reflect.New(typ).Elem()
+ } else {
+ ext = reflect.New(typ.Elem()).Elem()
+ }
+ if err := p.readAny(ext, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ ep := sv.Addr().Interface().(Message)
+ if !rep {
+ SetExtension(ep, desc, ext.Interface())
+ } else {
+ old, err := GetExtension(ep, desc)
+ var sl reflect.Value
+ if err == nil {
+ sl = reflect.ValueOf(old) // existing slice
+ } else {
+ sl = reflect.MakeSlice(typ, 0, 1)
+ }
+ sl = reflect.Append(sl, ext)
+ SetExtension(ep, desc, sl.Interface())
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := tok.value
+ var dst reflect.Value
+ fi, props, ok := structFieldByName(sprops, name)
+ if ok {
+ dst = sv.Field(fi)
+ } else if oop, ok := sprops.OneofTypes[name]; ok {
+ // It is a oneof.
+ props = oop.Prop
+ nv := reflect.New(oop.Type.Elem())
+ dst = nv.Elem().Field(0)
+ field := sv.Field(oop.Field)
+ if !field.IsNil() {
+ return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
+ }
+ field.Set(nv)
+ }
+ if !dst.IsValid() {
+ return p.errorf("unknown field name %q in %v", name, st)
+ }
+
+ if dst.Kind() == reflect.Map {
+ // Consume any colon.
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Construct the map if it doesn't already exist.
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ key := reflect.New(dst.Type().Key()).Elem()
+ val := reflect.New(dst.Type().Elem()).Elem()
+
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // However, implementations may omit key or value, and technically
+ // we should support them in any order. See b/28924776 for a time
+ // this went wrong.
+
+ tok := p.next()
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ switch tok.value {
+ case "key":
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ case "value":
+ if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
+ return err
+ }
+ if err := p.readAny(val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ default:
+ p.back()
+ return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+ }
+ }
+
+ dst.SetMapIndex(key, val)
+ continue
+ }
+
+ // Check that it's not already set if it's not a repeated field.
+ if !props.Repeated && fieldSet[name] {
+ return p.errorf("non-repeated field %q was repeated", name)
+ }
+
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ fieldSet[name] = true
+ if err := p.readAny(dst, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ if props.Required {
+ reqCount--
+ }
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+
+ }
+
+ if reqCount > 0 {
+ return p.missingRequiredFieldError(sv)
+ }
+ return reqFieldErr
+}
+
+// consumeExtName consumes extension name or expanded Any type URL and the
+// following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtName() (string, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return "", tok.err
+ }
+
+ // If extension name or type url is quoted, it's a single token.
+ if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+ name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+ if err != nil {
+ return "", err
+ }
+ return name, p.consumeToken("]")
+ }
+
+ // Consume everything up to "]"
+ var parts []string
+ for tok.value != "]" {
+ parts = append(parts, tok.value)
+ tok = p.next()
+ if tok.err != nil {
+ return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+ }
+ }
+ return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "" {
+ return p.errorf("unexpected EOF")
+ }
+
+ switch fv := v; fv.Kind() {
+ case reflect.Slice:
+ at := v.Type()
+ if at.Elem().Kind() == reflect.Uint8 {
+ // Special case for []byte
+ if tok.value[0] != '"' && tok.value[0] != '\'' {
+ // Deliberately written out here, as the error after
+ // this switch statement would write "invalid []byte: ...",
+ // which is not as user-friendly.
+ return p.errorf("invalid string: %v", tok.value)
+ }
+ bytes := []byte(tok.unquoted)
+ fv.Set(reflect.ValueOf(bytes))
+ return nil
+ }
+ // Repeated field.
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ err := p.readAny(fv.Index(fv.Len()-1), props)
+ if err != nil {
+ return err
+ }
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "]" {
+ break
+ }
+ if tok.value != "," {
+ return p.errorf("Expected ']' or ',' found %q", tok.value)
+ }
+ }
+ return nil
+ }
+ // One value of the repeated field.
+ p.back()
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ return p.readAny(fv.Index(fv.Len()-1), props)
+ case reflect.Bool:
+ // true/1/t/True or false/f/0/False.
+ switch tok.value {
+ case "true", "1", "t", "True":
+ fv.SetBool(true)
+ return nil
+ case "false", "0", "f", "False":
+ fv.SetBool(false)
+ return nil
+ }
+ case reflect.Float32, reflect.Float64:
+ v := tok.value
+ // Ignore 'f' for compatibility with output generated by C++, but don't
+ // remove 'f' when the value is "-inf" or "inf".
+ if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+ v = v[:len(v)-1]
+ }
+ if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+ fv.SetFloat(f)
+ return nil
+ }
+ case reflect.Int32:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ if len(props.Enum) == 0 {
+ break
+ }
+ m, ok := enumValueMaps[props.Enum]
+ if !ok {
+ break
+ }
+ x, ok := m[tok.value]
+ if !ok {
+ break
+ }
+ fv.SetInt(int64(x))
+ return nil
+ case reflect.Int64:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ case reflect.Ptr:
+ // A basic field (indirected through pointer), or a repeated message/group
+ p.back()
+ fv.Set(reflect.New(fv.Type().Elem()))
+ return p.readAny(fv.Elem(), props)
+ case reflect.String:
+ if tok.value[0] == '"' || tok.value[0] == '\'' {
+ fv.SetString(tok.unquoted)
+ return nil
+ }
+ case reflect.Struct:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+ return p.readStruct(fv, terminator)
+ case reflect.Uint32:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ case reflect.Uint64:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ }
+ return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+ if um, ok := pb.(encoding.TextUnmarshaler); ok {
+ err := um.UnmarshalText([]byte(s))
+ return err
+ }
+ pb.Reset()
+ v := reflect.ValueOf(pb)
+ if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
+ return pe
+ }
+ return nil
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser_test.go b/vendor/github.com/golang/protobuf/proto/text_parser_test.go
new file mode 100644
index 0000000..8f7cb4d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_parser_test.go
@@ -0,0 +1,673 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "math"
+ "reflect"
+ "testing"
+
+ . "github.com/golang/protobuf/proto"
+ proto3pb "github.com/golang/protobuf/proto/proto3_proto"
+ . "github.com/golang/protobuf/proto/testdata"
+)
+
+type UnmarshalTextTest struct {
+ in string
+ err string // if "", no error expected
+ out *MyMessage
+}
+
+func buildExtStructTest(text string) UnmarshalTextTest {
+ msg := &MyMessage{
+ Count: Int32(42),
+ }
+ SetExtension(msg, E_Ext_More, &Ext{
+ Data: String("Hello, world!"),
+ })
+ return UnmarshalTextTest{in: text, out: msg}
+}
+
+func buildExtDataTest(text string) UnmarshalTextTest {
+ msg := &MyMessage{
+ Count: Int32(42),
+ }
+ SetExtension(msg, E_Ext_Text, String("Hello, world!"))
+ SetExtension(msg, E_Ext_Number, Int32(1729))
+ return UnmarshalTextTest{in: text, out: msg}
+}
+
+func buildExtRepStringTest(text string) UnmarshalTextTest {
+ msg := &MyMessage{
+ Count: Int32(42),
+ }
+ if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil {
+ panic(err)
+ }
+ return UnmarshalTextTest{in: text, out: msg}
+}
+
+var unMarshalTextTests = []UnmarshalTextTest{
+ // Basic
+ {
+ in: " count:42\n name:\"Dave\" ",
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("Dave"),
+ },
+ },
+
+ // Empty quoted string
+ {
+ in: `count:42 name:""`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String(""),
+ },
+ },
+
+ // Quoted string concatenation with double quotes
+ {
+ in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("My name is elsewhere"),
+ },
+ },
+
+ // Quoted string concatenation with single quotes
+ {
+ in: "count:42 name: 'My name is '\n'elsewhere'",
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("My name is elsewhere"),
+ },
+ },
+
+ // Quoted string concatenations with mixed quotes
+ {
+ in: "count:42 name: 'My name is '\n\"elsewhere\"",
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("My name is elsewhere"),
+ },
+ },
+ {
+ in: "count:42 name: \"My name is \"\n'elsewhere'",
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("My name is elsewhere"),
+ },
+ },
+
+ // Quoted string with escaped apostrophe
+ {
+ in: `count:42 name: "HOLIDAY - New Year\'s Day"`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("HOLIDAY - New Year's Day"),
+ },
+ },
+
+ // Quoted string with single quote
+ {
+ in: `count:42 name: 'Roger "The Ramster" Ramjet'`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String(`Roger "The Ramster" Ramjet`),
+ },
+ },
+
+ // Quoted string with all the accepted special characters from the C++ test
+ {
+ in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"",
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"),
+ },
+ },
+
+ // Quoted string with quoted backslash
+ {
+ in: `count:42 name: "\\'xyz"`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String(`\'xyz`),
+ },
+ },
+
+ // Quoted string with UTF-8 bytes.
+ {
+ in: "count:42 name: '\303\277\302\201\xAB'",
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("\303\277\302\201\xAB"),
+ },
+ },
+
+ // Bad quoted string
+ {
+ in: `inner: < host: "\0" >` + "\n",
+ err: `line 1.15: invalid quoted string "\0": \0 requires 2 following digits`,
+ },
+
+ // Number too large for int64
+ {
+ in: "count: 1 others { key: 123456789012345678901 }",
+ err: "line 1.23: invalid int64: 123456789012345678901",
+ },
+
+ // Number too large for int32
+ {
+ in: "count: 1234567890123",
+ err: "line 1.7: invalid int32: 1234567890123",
+ },
+
+ // Number in hexadecimal
+ {
+ in: "count: 0x2beef",
+ out: &MyMessage{
+ Count: Int32(0x2beef),
+ },
+ },
+
+ // Number in octal
+ {
+ in: "count: 024601",
+ out: &MyMessage{
+ Count: Int32(024601),
+ },
+ },
+
+ // Floating point number with "f" suffix
+ {
+ in: "count: 4 others:< weight: 17.0f >",
+ out: &MyMessage{
+ Count: Int32(4),
+ Others: []*OtherMessage{
+ {
+ Weight: Float32(17),
+ },
+ },
+ },
+ },
+
+ // Floating point positive infinity
+ {
+ in: "count: 4 bigfloat: inf",
+ out: &MyMessage{
+ Count: Int32(4),
+ Bigfloat: Float64(math.Inf(1)),
+ },
+ },
+
+ // Floating point negative infinity
+ {
+ in: "count: 4 bigfloat: -inf",
+ out: &MyMessage{
+ Count: Int32(4),
+ Bigfloat: Float64(math.Inf(-1)),
+ },
+ },
+
+ // Number too large for float32
+ {
+ in: "others:< weight: 12345678901234567890123456789012345678901234567890 >",
+ err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890",
+ },
+
+ // Number posing as a quoted string
+ {
+ in: `inner: < host: 12 >` + "\n",
+ err: `line 1.15: invalid string: 12`,
+ },
+
+ // Quoted string posing as int32
+ {
+ in: `count: "12"`,
+ err: `line 1.7: invalid int32: "12"`,
+ },
+
+ // Quoted string posing a float32
+ {
+ in: `others:< weight: "17.4" >`,
+ err: `line 1.17: invalid float32: "17.4"`,
+ },
+
+ // Enum
+ {
+ in: `count:42 bikeshed: BLUE`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Bikeshed: MyMessage_BLUE.Enum(),
+ },
+ },
+
+ // Repeated field
+ {
+ in: `count:42 pet: "horsey" pet:"bunny"`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Pet: []string{"horsey", "bunny"},
+ },
+ },
+
+ // Repeated field with list notation
+ {
+ in: `count:42 pet: ["horsey", "bunny"]`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Pet: []string{"horsey", "bunny"},
+ },
+ },
+
+ // Repeated message with/without colon and <>/{}
+ {
+ in: `count:42 others:{} others{} others:<> others:{}`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Others: []*OtherMessage{
+ {},
+ {},
+ {},
+ {},
+ },
+ },
+ },
+
+ // Missing colon for inner message
+ {
+ in: `count:42 inner < host: "cauchy.syd" >`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("cauchy.syd"),
+ },
+ },
+ },
+
+ // Missing colon for string field
+ {
+ in: `name "Dave"`,
+ err: `line 1.5: expected ':', found "\"Dave\""`,
+ },
+
+ // Missing colon for int32 field
+ {
+ in: `count 42`,
+ err: `line 1.6: expected ':', found "42"`,
+ },
+
+ // Missing required field
+ {
+ in: `name: "Pawel"`,
+ err: `proto: required field "testdata.MyMessage.count" not set`,
+ out: &MyMessage{
+ Name: String("Pawel"),
+ },
+ },
+
+ // Missing required field in a required submessage
+ {
+ in: `count: 42 we_must_go_deeper < leo_finally_won_an_oscar <> >`,
+ err: `proto: required field "testdata.InnerMessage.host" not set`,
+ out: &MyMessage{
+ Count: Int32(42),
+ WeMustGoDeeper: &RequiredInnerMessage{LeoFinallyWonAnOscar: &InnerMessage{}},
+ },
+ },
+
+ // Repeated non-repeated field
+ {
+ in: `name: "Rob" name: "Russ"`,
+ err: `line 1.12: non-repeated field "name" was repeated`,
+ },
+
+ // Group
+ {
+ in: `count: 17 SomeGroup { group_field: 12 }`,
+ out: &MyMessage{
+ Count: Int32(17),
+ Somegroup: &MyMessage_SomeGroup{
+ GroupField: Int32(12),
+ },
+ },
+ },
+
+ // Semicolon between fields
+ {
+ in: `count:3;name:"Calvin"`,
+ out: &MyMessage{
+ Count: Int32(3),
+ Name: String("Calvin"),
+ },
+ },
+ // Comma between fields
+ {
+ in: `count:4,name:"Ezekiel"`,
+ out: &MyMessage{
+ Count: Int32(4),
+ Name: String("Ezekiel"),
+ },
+ },
+
+ // Boolean false
+ {
+ in: `count:42 inner { host: "example.com" connected: false }`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("example.com"),
+ Connected: Bool(false),
+ },
+ },
+ },
+ // Boolean true
+ {
+ in: `count:42 inner { host: "example.com" connected: true }`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("example.com"),
+ Connected: Bool(true),
+ },
+ },
+ },
+ // Boolean 0
+ {
+ in: `count:42 inner { host: "example.com" connected: 0 }`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("example.com"),
+ Connected: Bool(false),
+ },
+ },
+ },
+ // Boolean 1
+ {
+ in: `count:42 inner { host: "example.com" connected: 1 }`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("example.com"),
+ Connected: Bool(true),
+ },
+ },
+ },
+ // Boolean f
+ {
+ in: `count:42 inner { host: "example.com" connected: f }`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("example.com"),
+ Connected: Bool(false),
+ },
+ },
+ },
+ // Boolean t
+ {
+ in: `count:42 inner { host: "example.com" connected: t }`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("example.com"),
+ Connected: Bool(true),
+ },
+ },
+ },
+ // Boolean False
+ {
+ in: `count:42 inner { host: "example.com" connected: False }`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("example.com"),
+ Connected: Bool(false),
+ },
+ },
+ },
+ // Boolean True
+ {
+ in: `count:42 inner { host: "example.com" connected: True }`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("example.com"),
+ Connected: Bool(true),
+ },
+ },
+ },
+
+ // Extension
+ buildExtStructTest(`count: 42 [testdata.Ext.more]:<data:"Hello, world!" >`),
+ buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`),
+ buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`),
+ buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`),
+
+ // Big all-in-one
+ {
+ in: "count:42 # Meaning\n" +
+ `name:"Dave" ` +
+ `quote:"\"I didn't want to go.\"" ` +
+ `pet:"bunny" ` +
+ `pet:"kitty" ` +
+ `pet:"horsey" ` +
+ `inner:<` +
+ ` host:"footrest.syd" ` +
+ ` port:7001 ` +
+ ` connected:true ` +
+ `> ` +
+ `others:<` +
+ ` key:3735928559 ` +
+ ` value:"\x01A\a\f" ` +
+ `> ` +
+ `others:<` +
+ " weight:58.9 # Atomic weight of Co\n" +
+ ` inner:<` +
+ ` host:"lesha.mtv" ` +
+ ` port:8002 ` +
+ ` >` +
+ `>`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("Dave"),
+ Quote: String(`"I didn't want to go."`),
+ Pet: []string{"bunny", "kitty", "horsey"},
+ Inner: &InnerMessage{
+ Host: String("footrest.syd"),
+ Port: Int32(7001),
+ Connected: Bool(true),
+ },
+ Others: []*OtherMessage{
+ {
+ Key: Int64(3735928559),
+ Value: []byte{0x1, 'A', '\a', '\f'},
+ },
+ {
+ Weight: Float32(58.9),
+ Inner: &InnerMessage{
+ Host: String("lesha.mtv"),
+ Port: Int32(8002),
+ },
+ },
+ },
+ },
+ },
+}
+
+func TestUnmarshalText(t *testing.T) {
+ for i, test := range unMarshalTextTests {
+ pb := new(MyMessage)
+ err := UnmarshalText(test.in, pb)
+ if test.err == "" {
+ // We don't expect failure.
+ if err != nil {
+ t.Errorf("Test %d: Unexpected error: %v", i, err)
+ } else if !reflect.DeepEqual(pb, test.out) {
+ t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v",
+ i, pb, test.out)
+ }
+ } else {
+ // We do expect failure.
+ if err == nil {
+ t.Errorf("Test %d: Didn't get expected error: %v", i, test.err)
+ } else if err.Error() != test.err {
+ t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v",
+ i, err.Error(), test.err)
+ } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) {
+ t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v",
+ i, pb, test.out)
+ }
+ }
+ }
+}
+
+func TestUnmarshalTextCustomMessage(t *testing.T) {
+ msg := &textMessage{}
+ if err := UnmarshalText("custom", msg); err != nil {
+ t.Errorf("Unexpected error from custom unmarshal: %v", err)
+ }
+ if UnmarshalText("not custom", msg) == nil {
+ t.Errorf("Didn't get expected error from custom unmarshal")
+ }
+}
+
+// Regression test; this caused a panic.
+func TestRepeatedEnum(t *testing.T) {
+ pb := new(RepeatedEnum)
+ if err := UnmarshalText("color: RED", pb); err != nil {
+ t.Fatal(err)
+ }
+ exp := &RepeatedEnum{
+ Color: []RepeatedEnum_Color{RepeatedEnum_RED},
+ }
+ if !Equal(pb, exp) {
+ t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp)
+ }
+}
+
+func TestProto3TextParsing(t *testing.T) {
+ m := new(proto3pb.Message)
+ const in = `name: "Wallace" true_scotsman: true`
+ want := &proto3pb.Message{
+ Name: "Wallace",
+ TrueScotsman: true,
+ }
+ if err := UnmarshalText(in, m); err != nil {
+ t.Fatal(err)
+ }
+ if !Equal(m, want) {
+ t.Errorf("\n got %v\nwant %v", m, want)
+ }
+}
+
+func TestMapParsing(t *testing.T) {
+ m := new(MessageWithMap)
+ const in = `name_mapping:<key:1234 value:"Feist"> name_mapping:<key:1 value:"Beatles">` +
+ `msg_mapping:<key:-4, value:<f: 2.0>,>` + // separating commas are okay
+ `msg_mapping<key:-2 value<f: 4.0>>` + // no colon after "value"
+ `msg_mapping:<value:<f: 5.0>>` + // omitted key
+ `msg_mapping:<key:1>` + // omitted value
+ `byte_mapping:<key:true value:"so be it">` +
+ `byte_mapping:<>` // omitted key and value
+ want := &MessageWithMap{
+ NameMapping: map[int32]string{
+ 1: "Beatles",
+ 1234: "Feist",
+ },
+ MsgMapping: map[int64]*FloatingPoint{
+ -4: {F: Float64(2.0)},
+ -2: {F: Float64(4.0)},
+ 0: {F: Float64(5.0)},
+ 1: nil,
+ },
+ ByteMapping: map[bool][]byte{
+ false: nil,
+ true: []byte("so be it"),
+ },
+ }
+ if err := UnmarshalText(in, m); err != nil {
+ t.Fatal(err)
+ }
+ if !Equal(m, want) {
+ t.Errorf("\n got %v\nwant %v", m, want)
+ }
+}
+
+func TestOneofParsing(t *testing.T) {
+ const in = `name:"Shrek"`
+ m := new(Communique)
+ want := &Communique{Union: &Communique_Name{"Shrek"}}
+ if err := UnmarshalText(in, m); err != nil {
+ t.Fatal(err)
+ }
+ if !Equal(m, want) {
+ t.Errorf("\n got %v\nwant %v", m, want)
+ }
+
+ const inOverwrite = `name:"Shrek" number:42`
+ m = new(Communique)
+ testErr := "line 1.13: field 'number' would overwrite already parsed oneof 'Union'"
+ if err := UnmarshalText(inOverwrite, m); err == nil {
+ t.Errorf("TestOneofParsing: Didn't get expected error: %v", testErr)
+ } else if err.Error() != testErr {
+ t.Errorf("TestOneofParsing: Incorrect error.\nHave: %v\nWant: %v",
+ err.Error(), testErr)
+ }
+
+}
+
+var benchInput string
+
+func init() {
+ benchInput = "count: 4\n"
+ for i := 0; i < 1000; i++ {
+ benchInput += "pet: \"fido\"\n"
+ }
+
+ // Check it is valid input.
+ pb := new(MyMessage)
+ err := UnmarshalText(benchInput, pb)
+ if err != nil {
+ panic("Bad benchmark input: " + err.Error())
+ }
+}
+
+func BenchmarkUnmarshalText(b *testing.B) {
+ pb := new(MyMessage)
+ for i := 0; i < b.N; i++ {
+ UnmarshalText(benchInput, pb)
+ }
+ b.SetBytes(int64(len(benchInput)))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text_test.go b/vendor/github.com/golang/protobuf/proto/text_test.go
new file mode 100644
index 0000000..3eabaca
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_test.go
@@ -0,0 +1,474 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "bytes"
+ "errors"
+ "io/ioutil"
+ "math"
+ "strings"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ proto3pb "github.com/golang/protobuf/proto/proto3_proto"
+ pb "github.com/golang/protobuf/proto/testdata"
+)
+
+// textMessage implements the methods that allow it to marshal and unmarshal
+// itself as text.
+type textMessage struct {
+}
+
+func (*textMessage) MarshalText() ([]byte, error) {
+ return []byte("custom"), nil
+}
+
+func (*textMessage) UnmarshalText(bytes []byte) error {
+ if string(bytes) != "custom" {
+ return errors.New("expected 'custom'")
+ }
+ return nil
+}
+
+func (*textMessage) Reset() {}
+func (*textMessage) String() string { return "" }
+func (*textMessage) ProtoMessage() {}
+
+func newTestMessage() *pb.MyMessage {
+ msg := &pb.MyMessage{
+ Count: proto.Int32(42),
+ Name: proto.String("Dave"),
+ Quote: proto.String(`"I didn't want to go."`),
+ Pet: []string{"bunny", "kitty", "horsey"},
+ Inner: &pb.InnerMessage{
+ Host: proto.String("footrest.syd"),
+ Port: proto.Int32(7001),
+ Connected: proto.Bool(true),
+ },
+ Others: []*pb.OtherMessage{
+ {
+ Key: proto.Int64(0xdeadbeef),
+ Value: []byte{1, 65, 7, 12},
+ },
+ {
+ Weight: proto.Float32(6.022),
+ Inner: &pb.InnerMessage{
+ Host: proto.String("lesha.mtv"),
+ Port: proto.Int32(8002),
+ },
+ },
+ },
+ Bikeshed: pb.MyMessage_BLUE.Enum(),
+ Somegroup: &pb.MyMessage_SomeGroup{
+ GroupField: proto.Int32(8),
+ },
+ // One normally wouldn't do this.
+ // This is an undeclared tag 13, as a varint (wire type 0) with value 4.
+ XXX_unrecognized: []byte{13<<3 | 0, 4},
+ }
+ ext := &pb.Ext{
+ Data: proto.String("Big gobs for big rats"),
+ }
+ if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil {
+ panic(err)
+ }
+ greetings := []string{"adg", "easy", "cow"}
+ if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil {
+ panic(err)
+ }
+
+ // Add an unknown extension. We marshal a pb.Ext, and fake the ID.
+ b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")})
+ if err != nil {
+ panic(err)
+ }
+ b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...)
+ proto.SetRawExtension(msg, 201, b)
+
+ // Extensions can be plain fields, too, so let's test that.
+ b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19)
+ proto.SetRawExtension(msg, 202, b)
+
+ return msg
+}
+
+const text = `count: 42
+name: "Dave"
+quote: "\"I didn't want to go.\""
+pet: "bunny"
+pet: "kitty"
+pet: "horsey"
+inner: <
+ host: "footrest.syd"
+ port: 7001
+ connected: true
+>
+others: <
+ key: 3735928559
+ value: "\001A\007\014"
+>
+others: <
+ weight: 6.022
+ inner: <
+ host: "lesha.mtv"
+ port: 8002
+ >
+>
+bikeshed: BLUE
+SomeGroup {
+ group_field: 8
+}
+/* 2 unknown bytes */
+13: 4
+[testdata.Ext.more]: <
+ data: "Big gobs for big rats"
+>
+[testdata.greeting]: "adg"
+[testdata.greeting]: "easy"
+[testdata.greeting]: "cow"
+/* 13 unknown bytes */
+201: "\t3G skiing"
+/* 3 unknown bytes */
+202: 19
+`
+
+func TestMarshalText(t *testing.T) {
+ buf := new(bytes.Buffer)
+ if err := proto.MarshalText(buf, newTestMessage()); err != nil {
+ t.Fatalf("proto.MarshalText: %v", err)
+ }
+ s := buf.String()
+ if s != text {
+ t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text)
+ }
+}
+
+func TestMarshalTextCustomMessage(t *testing.T) {
+ buf := new(bytes.Buffer)
+ if err := proto.MarshalText(buf, &textMessage{}); err != nil {
+ t.Fatalf("proto.MarshalText: %v", err)
+ }
+ s := buf.String()
+ if s != "custom" {
+ t.Errorf("Got %q, expected %q", s, "custom")
+ }
+}
+func TestMarshalTextNil(t *testing.T) {
+ want := "<nil>"
+ tests := []proto.Message{nil, (*pb.MyMessage)(nil)}
+ for i, test := range tests {
+ buf := new(bytes.Buffer)
+ if err := proto.MarshalText(buf, test); err != nil {
+ t.Fatal(err)
+ }
+ if got := buf.String(); got != want {
+ t.Errorf("%d: got %q want %q", i, got, want)
+ }
+ }
+}
+
+func TestMarshalTextUnknownEnum(t *testing.T) {
+ // The Color enum only specifies values 0-2.
+ m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()}
+ got := m.String()
+ const want = `bikeshed:3 `
+ if got != want {
+ t.Errorf("\n got %q\nwant %q", got, want)
+ }
+}
+
+func TestTextOneof(t *testing.T) {
+ tests := []struct {
+ m proto.Message
+ want string
+ }{
+ // zero message
+ {&pb.Communique{}, ``},
+ // scalar field
+ {&pb.Communique{Union: &pb.Communique_Number{4}}, `number:4`},
+ // message field
+ {&pb.Communique{Union: &pb.Communique_Msg{
+ &pb.Strings{StringField: proto.String("why hello!")},
+ }}, `msg:<string_field:"why hello!" >`},
+ // bad oneof (should not panic)
+ {&pb.Communique{Union: &pb.Communique_Msg{nil}}, `msg:/* nil */`},
+ }
+ for _, test := range tests {
+ got := strings.TrimSpace(test.m.String())
+ if got != test.want {
+ t.Errorf("\n got %s\nwant %s", got, test.want)
+ }
+ }
+}
+
+func BenchmarkMarshalTextBuffered(b *testing.B) {
+ buf := new(bytes.Buffer)
+ m := newTestMessage()
+ for i := 0; i < b.N; i++ {
+ buf.Reset()
+ proto.MarshalText(buf, m)
+ }
+}
+
+func BenchmarkMarshalTextUnbuffered(b *testing.B) {
+ w := ioutil.Discard
+ m := newTestMessage()
+ for i := 0; i < b.N; i++ {
+ proto.MarshalText(w, m)
+ }
+}
+
+func compact(src string) string {
+ // s/[ \n]+/ /g; s/ $//;
+ dst := make([]byte, len(src))
+ space, comment := false, false
+ j := 0
+ for i := 0; i < len(src); i++ {
+ if strings.HasPrefix(src[i:], "/*") {
+ comment = true
+ i++
+ continue
+ }
+ if comment && strings.HasPrefix(src[i:], "*/") {
+ comment = false
+ i++
+ continue
+ }
+ if comment {
+ continue
+ }
+ c := src[i]
+ if c == ' ' || c == '\n' {
+ space = true
+ continue
+ }
+ if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') {
+ space = false
+ }
+ if c == '{' {
+ space = false
+ }
+ if space {
+ dst[j] = ' '
+ j++
+ space = false
+ }
+ dst[j] = c
+ j++
+ }
+ if space {
+ dst[j] = ' '
+ j++
+ }
+ return string(dst[0:j])
+}
+
+var compactText = compact(text)
+
+func TestCompactText(t *testing.T) {
+ s := proto.CompactTextString(newTestMessage())
+ if s != compactText {
+ t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText)
+ }
+}
+
+func TestStringEscaping(t *testing.T) {
+ testCases := []struct {
+ in *pb.Strings
+ out string
+ }{
+ {
+ // Test data from C++ test (TextFormatTest.StringEscape).
+ // Single divergence: we don't escape apostrophes.
+ &pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")},
+ "string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n",
+ },
+ {
+ // Test data from the same C++ test.
+ &pb.Strings{StringField: proto.String("\350\260\267\346\255\214")},
+ "string_field: \"\\350\\260\\267\\346\\255\\214\"\n",
+ },
+ {
+ // Some UTF-8.
+ &pb.Strings{StringField: proto.String("\x00\x01\xff\x81")},
+ `string_field: "\000\001\377\201"` + "\n",
+ },
+ }
+
+ for i, tc := range testCases {
+ var buf bytes.Buffer
+ if err := proto.MarshalText(&buf, tc.in); err != nil {
+ t.Errorf("proto.MarsalText: %v", err)
+ continue
+ }
+ s := buf.String()
+ if s != tc.out {
+ t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out)
+ continue
+ }
+
+ // Check round-trip.
+ pb := new(pb.Strings)
+ if err := proto.UnmarshalText(s, pb); err != nil {
+ t.Errorf("#%d: UnmarshalText: %v", i, err)
+ continue
+ }
+ if !proto.Equal(pb, tc.in) {
+ t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb)
+ }
+ }
+}
+
+// A limitedWriter accepts some output before it fails.
+// This is a proxy for something like a nearly-full or imminently-failing disk,
+// or a network connection that is about to die.
+type limitedWriter struct {
+ b bytes.Buffer
+ limit int
+}
+
+var outOfSpace = errors.New("proto: insufficient space")
+
+func (w *limitedWriter) Write(p []byte) (n int, err error) {
+ var avail = w.limit - w.b.Len()
+ if avail <= 0 {
+ return 0, outOfSpace
+ }
+ if len(p) <= avail {
+ return w.b.Write(p)
+ }
+ n, _ = w.b.Write(p[:avail])
+ return n, outOfSpace
+}
+
+func TestMarshalTextFailing(t *testing.T) {
+ // Try lots of different sizes to exercise more error code-paths.
+ for lim := 0; lim < len(text); lim++ {
+ buf := new(limitedWriter)
+ buf.limit = lim
+ err := proto.MarshalText(buf, newTestMessage())
+ // We expect a certain error, but also some partial results in the buffer.
+ if err != outOfSpace {
+ t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace)
+ }
+ s := buf.b.String()
+ x := text[:buf.limit]
+ if s != x {
+ t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x)
+ }
+ }
+}
+
+func TestFloats(t *testing.T) {
+ tests := []struct {
+ f float64
+ want string
+ }{
+ {0, "0"},
+ {4.7, "4.7"},
+ {math.Inf(1), "inf"},
+ {math.Inf(-1), "-inf"},
+ {math.NaN(), "nan"},
+ }
+ for _, test := range tests {
+ msg := &pb.FloatingPoint{F: &test.f}
+ got := strings.TrimSpace(msg.String())
+ want := `f:` + test.want
+ if got != want {
+ t.Errorf("f=%f: got %q, want %q", test.f, got, want)
+ }
+ }
+}
+
+func TestRepeatedNilText(t *testing.T) {
+ m := &pb.MessageList{
+ Message: []*pb.MessageList_Message{
+ nil,
+ &pb.MessageList_Message{
+ Name: proto.String("Horse"),
+ },
+ nil,
+ },
+ }
+ want := `Message <nil>
+Message {
+ name: "Horse"
+}
+Message <nil>
+`
+ if s := proto.MarshalTextString(m); s != want {
+ t.Errorf(" got: %s\nwant: %s", s, want)
+ }
+}
+
+func TestProto3Text(t *testing.T) {
+ tests := []struct {
+ m proto.Message
+ want string
+ }{
+ // zero message
+ {&proto3pb.Message{}, ``},
+ // zero message except for an empty byte slice
+ {&proto3pb.Message{Data: []byte{}}, ``},
+ // trivial case
+ {&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`},
+ // empty map
+ {&pb.MessageWithMap{}, ``},
+ // non-empty map; map format is the same as a repeated struct,
+ // and they are sorted by key (numerically for numeric keys).
+ {
+ &pb.MessageWithMap{NameMapping: map[int32]string{
+ -1: "Negatory",
+ 7: "Lucky",
+ 1234: "Feist",
+ 6345789: "Otis",
+ }},
+ `name_mapping:<key:-1 value:"Negatory" > ` +
+ `name_mapping:<key:7 value:"Lucky" > ` +
+ `name_mapping:<key:1234 value:"Feist" > ` +
+ `name_mapping:<key:6345789 value:"Otis" >`,
+ },
+ // map with nil value; not well-defined, but we shouldn't crash
+ {
+ &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{7: nil}},
+ `msg_mapping:<key:7 >`,
+ },
+ }
+ for _, test := range tests {
+ got := strings.TrimSpace(test.m.String())
+ if got != test.want {
+ t.Errorf("\n got %s\nwant %s", got, test.want)
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/Makefile
new file mode 100644
index 0000000..a42cc37
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/Makefile
@@ -0,0 +1,33 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+test:
+ cd testdata && make test
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile
new file mode 100644
index 0000000..f706871
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile
@@ -0,0 +1,37 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Not stored here, but descriptor.proto is in https://github.com/google/protobuf/
+# at src/google/protobuf/descriptor.proto
+regenerate:
+ @echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION
+ cp $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto .
+ protoc --go_out=../../../../.. -I$(HOME)/src/protobuf/include $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
new file mode 100644
index 0000000..c6a91bc
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
@@ -0,0 +1,2215 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/descriptor.proto
+
+/*
+Package descriptor is a generated protocol buffer package.
+
+It is generated from these files:
+ google/protobuf/descriptor.proto
+
+It has these top-level messages:
+ FileDescriptorSet
+ FileDescriptorProto
+ DescriptorProto
+ ExtensionRangeOptions
+ FieldDescriptorProto
+ OneofDescriptorProto
+ EnumDescriptorProto
+ EnumValueDescriptorProto
+ ServiceDescriptorProto
+ MethodDescriptorProto
+ FileOptions
+ MessageOptions
+ FieldOptions
+ OneofOptions
+ EnumOptions
+ EnumValueOptions
+ ServiceOptions
+ MethodOptions
+ UninterpretedOption
+ SourceCodeInfo
+ GeneratedCodeInfo
+*/
+package descriptor
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type FieldDescriptorProto_Type int32
+
+const (
+ // 0 is reserved for errors.
+ // Order is weird for historical reasons.
+ FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1
+ FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2
+ // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
+ // negative values are likely.
+ FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3
+ FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4
+ // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
+ // negative values are likely.
+ FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5
+ FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6
+ FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7
+ FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8
+ FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9
+ // Tag-delimited aggregate.
+ // Group type is deprecated and not supported in proto3. However, Proto3
+ // implementations should still be able to parse the group wire format and
+ // treat group fields as unknown fields.
+ FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10
+ FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11
+ // New in version 2.
+ FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12
+ FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13
+ FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14
+ FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15
+ FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16
+ FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17
+ FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18
+)
+
+var FieldDescriptorProto_Type_name = map[int32]string{
+ 1: "TYPE_DOUBLE",
+ 2: "TYPE_FLOAT",
+ 3: "TYPE_INT64",
+ 4: "TYPE_UINT64",
+ 5: "TYPE_INT32",
+ 6: "TYPE_FIXED64",
+ 7: "TYPE_FIXED32",
+ 8: "TYPE_BOOL",
+ 9: "TYPE_STRING",
+ 10: "TYPE_GROUP",
+ 11: "TYPE_MESSAGE",
+ 12: "TYPE_BYTES",
+ 13: "TYPE_UINT32",
+ 14: "TYPE_ENUM",
+ 15: "TYPE_SFIXED32",
+ 16: "TYPE_SFIXED64",
+ 17: "TYPE_SINT32",
+ 18: "TYPE_SINT64",
+}
+var FieldDescriptorProto_Type_value = map[string]int32{
+ "TYPE_DOUBLE": 1,
+ "TYPE_FLOAT": 2,
+ "TYPE_INT64": 3,
+ "TYPE_UINT64": 4,
+ "TYPE_INT32": 5,
+ "TYPE_FIXED64": 6,
+ "TYPE_FIXED32": 7,
+ "TYPE_BOOL": 8,
+ "TYPE_STRING": 9,
+ "TYPE_GROUP": 10,
+ "TYPE_MESSAGE": 11,
+ "TYPE_BYTES": 12,
+ "TYPE_UINT32": 13,
+ "TYPE_ENUM": 14,
+ "TYPE_SFIXED32": 15,
+ "TYPE_SFIXED64": 16,
+ "TYPE_SINT32": 17,
+ "TYPE_SINT64": 18,
+}
+
+func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type {
+ p := new(FieldDescriptorProto_Type)
+ *p = x
+ return p
+}
+func (x FieldDescriptorProto_Type) String() string {
+ return proto.EnumName(FieldDescriptorProto_Type_name, int32(x))
+}
+func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type")
+ if err != nil {
+ return err
+ }
+ *x = FieldDescriptorProto_Type(value)
+ return nil
+}
+func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} }
+
+type FieldDescriptorProto_Label int32
+
+const (
+ // 0 is reserved for errors
+ FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1
+ FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2
+ FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3
+)
+
+var FieldDescriptorProto_Label_name = map[int32]string{
+ 1: "LABEL_OPTIONAL",
+ 2: "LABEL_REQUIRED",
+ 3: "LABEL_REPEATED",
+}
+var FieldDescriptorProto_Label_value = map[string]int32{
+ "LABEL_OPTIONAL": 1,
+ "LABEL_REQUIRED": 2,
+ "LABEL_REPEATED": 3,
+}
+
+func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label {
+ p := new(FieldDescriptorProto_Label)
+ *p = x
+ return p
+}
+func (x FieldDescriptorProto_Label) String() string {
+ return proto.EnumName(FieldDescriptorProto_Label_name, int32(x))
+}
+func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label")
+ if err != nil {
+ return err
+ }
+ *x = FieldDescriptorProto_Label(value)
+ return nil
+}
+func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{4, 1}
+}
+
+// Generated classes can be optimized for speed or code size.
+type FileOptions_OptimizeMode int32
+
+const (
+ FileOptions_SPEED FileOptions_OptimizeMode = 1
+ // etc.
+ FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2
+ FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3
+)
+
+var FileOptions_OptimizeMode_name = map[int32]string{
+ 1: "SPEED",
+ 2: "CODE_SIZE",
+ 3: "LITE_RUNTIME",
+}
+var FileOptions_OptimizeMode_value = map[string]int32{
+ "SPEED": 1,
+ "CODE_SIZE": 2,
+ "LITE_RUNTIME": 3,
+}
+
+func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode {
+ p := new(FileOptions_OptimizeMode)
+ *p = x
+ return p
+}
+func (x FileOptions_OptimizeMode) String() string {
+ return proto.EnumName(FileOptions_OptimizeMode_name, int32(x))
+}
+func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode")
+ if err != nil {
+ return err
+ }
+ *x = FileOptions_OptimizeMode(value)
+ return nil
+}
+func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{10, 0} }
+
+type FieldOptions_CType int32
+
+const (
+ // Default mode.
+ FieldOptions_STRING FieldOptions_CType = 0
+ FieldOptions_CORD FieldOptions_CType = 1
+ FieldOptions_STRING_PIECE FieldOptions_CType = 2
+)
+
+var FieldOptions_CType_name = map[int32]string{
+ 0: "STRING",
+ 1: "CORD",
+ 2: "STRING_PIECE",
+}
+var FieldOptions_CType_value = map[string]int32{
+ "STRING": 0,
+ "CORD": 1,
+ "STRING_PIECE": 2,
+}
+
+func (x FieldOptions_CType) Enum() *FieldOptions_CType {
+ p := new(FieldOptions_CType)
+ *p = x
+ return p
+}
+func (x FieldOptions_CType) String() string {
+ return proto.EnumName(FieldOptions_CType_name, int32(x))
+}
+func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType")
+ if err != nil {
+ return err
+ }
+ *x = FieldOptions_CType(value)
+ return nil
+}
+func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 0} }
+
+type FieldOptions_JSType int32
+
+const (
+ // Use the default type.
+ FieldOptions_JS_NORMAL FieldOptions_JSType = 0
+ // Use JavaScript strings.
+ FieldOptions_JS_STRING FieldOptions_JSType = 1
+ // Use JavaScript numbers.
+ FieldOptions_JS_NUMBER FieldOptions_JSType = 2
+)
+
+var FieldOptions_JSType_name = map[int32]string{
+ 0: "JS_NORMAL",
+ 1: "JS_STRING",
+ 2: "JS_NUMBER",
+}
+var FieldOptions_JSType_value = map[string]int32{
+ "JS_NORMAL": 0,
+ "JS_STRING": 1,
+ "JS_NUMBER": 2,
+}
+
+func (x FieldOptions_JSType) Enum() *FieldOptions_JSType {
+ p := new(FieldOptions_JSType)
+ *p = x
+ return p
+}
+func (x FieldOptions_JSType) String() string {
+ return proto.EnumName(FieldOptions_JSType_name, int32(x))
+}
+func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType")
+ if err != nil {
+ return err
+ }
+ *x = FieldOptions_JSType(value)
+ return nil
+}
+func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 1} }
+
+// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
+// or neither? HTTP based RPC implementation may choose GET verb for safe
+// methods, and PUT verb for idempotent methods instead of the default POST.
+type MethodOptions_IdempotencyLevel int32
+
+const (
+ MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0
+ MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1
+ MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2
+)
+
+var MethodOptions_IdempotencyLevel_name = map[int32]string{
+ 0: "IDEMPOTENCY_UNKNOWN",
+ 1: "NO_SIDE_EFFECTS",
+ 2: "IDEMPOTENT",
+}
+var MethodOptions_IdempotencyLevel_value = map[string]int32{
+ "IDEMPOTENCY_UNKNOWN": 0,
+ "NO_SIDE_EFFECTS": 1,
+ "IDEMPOTENT": 2,
+}
+
+func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel {
+ p := new(MethodOptions_IdempotencyLevel)
+ *p = x
+ return p
+}
+func (x MethodOptions_IdempotencyLevel) String() string {
+ return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x))
+}
+func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel")
+ if err != nil {
+ return err
+ }
+ *x = MethodOptions_IdempotencyLevel(value)
+ return nil
+}
+func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{17, 0}
+}
+
+// The protocol compiler can output a FileDescriptorSet containing the .proto
+// files it parses.
+type FileDescriptorSet struct {
+ File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} }
+func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) }
+func (*FileDescriptorSet) ProtoMessage() {}
+func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto {
+ if m != nil {
+ return m.File
+ }
+ return nil
+}
+
+// Describes a complete .proto file.
+type FileDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"`
+ // Names of files imported by this file.
+ Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"`
+ // Indexes of the public imported files in the dependency list above.
+ PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"`
+ // Indexes of the weak imported files in the dependency list.
+ // For Google-internal migration only. Do not use.
+ WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"`
+ // All top-level definitions in this file.
+ MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"`
+ EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
+ Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"`
+ Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"`
+ Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
+ // This field contains optional information about the original source code.
+ // You may safely remove this entire field without harming runtime
+ // functionality of the descriptors -- the information is needed only by
+ // development tools.
+ SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"`
+ // The syntax of the proto file.
+ // The supported values are "proto2" and "proto3".
+ Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} }
+func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*FileDescriptorProto) ProtoMessage() {}
+func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *FileDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FileDescriptorProto) GetPackage() string {
+ if m != nil && m.Package != nil {
+ return *m.Package
+ }
+ return ""
+}
+
+func (m *FileDescriptorProto) GetDependency() []string {
+ if m != nil {
+ return m.Dependency
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetPublicDependency() []int32 {
+ if m != nil {
+ return m.PublicDependency
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetWeakDependency() []int32 {
+ if m != nil {
+ return m.WeakDependency
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto {
+ if m != nil {
+ return m.MessageType
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto {
+ if m != nil {
+ return m.EnumType
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto {
+ if m != nil {
+ return m.Service
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto {
+ if m != nil {
+ return m.Extension
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetOptions() *FileOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo {
+ if m != nil {
+ return m.SourceCodeInfo
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetSyntax() string {
+ if m != nil && m.Syntax != nil {
+ return *m.Syntax
+ }
+ return ""
+}
+
+// Describes a message type.
+type DescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
+ Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"`
+ NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"`
+ EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
+ ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"`
+ OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"`
+ Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"`
+ ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
+ // Reserved field names, which may not be used by fields in the same message.
+ // A given name may only be reserved once.
+ ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DescriptorProto) Reset() { *m = DescriptorProto{} }
+func (m *DescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto) ProtoMessage() {}
+func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+func (m *DescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *DescriptorProto) GetField() []*FieldDescriptorProto {
+ if m != nil {
+ return m.Field
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto {
+ if m != nil {
+ return m.Extension
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetNestedType() []*DescriptorProto {
+ if m != nil {
+ return m.NestedType
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto {
+ if m != nil {
+ return m.EnumType
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange {
+ if m != nil {
+ return m.ExtensionRange
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto {
+ if m != nil {
+ return m.OneofDecl
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetOptions() *MessageOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange {
+ if m != nil {
+ return m.ReservedRange
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetReservedName() []string {
+ if m != nil {
+ return m.ReservedName
+ }
+ return nil
+}
+
+type DescriptorProto_ExtensionRange struct {
+ Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+ End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+ Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} }
+func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto_ExtensionRange) ProtoMessage() {}
+func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{2, 0}
+}
+
+func (m *DescriptorProto_ExtensionRange) GetStart() int32 {
+ if m != nil && m.Start != nil {
+ return *m.Start
+ }
+ return 0
+}
+
+func (m *DescriptorProto_ExtensionRange) GetEnd() int32 {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return 0
+}
+
+func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+// Range of reserved tag numbers. Reserved tag numbers may not be used by
+// fields or extension ranges in the same message. Reserved ranges may
+// not overlap.
+type DescriptorProto_ReservedRange struct {
+ Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+ End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} }
+func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto_ReservedRange) ProtoMessage() {}
+func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{2, 1}
+}
+
+func (m *DescriptorProto_ReservedRange) GetStart() int32 {
+ if m != nil && m.Start != nil {
+ return *m.Start
+ }
+ return 0
+}
+
+func (m *DescriptorProto_ReservedRange) GetEnd() int32 {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return 0
+}
+
+type ExtensionRangeOptions struct {
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} }
+func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) }
+func (*ExtensionRangeOptions) ProtoMessage() {}
+func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+
+var extRange_ExtensionRangeOptions = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_ExtensionRangeOptions
+}
+
+func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+// Describes a field within a message.
+type FieldDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"`
+ Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"`
+ // If type_name is set, this need not be set. If both this and type_name
+ // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
+ Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"`
+ // For message and enum types, this is the name of the type. If the name
+ // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping
+ // rules are used to find the type (i.e. first the nested types within this
+ // message are searched, then within the parent, on up to the root
+ // namespace).
+ TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"`
+ // For extensions, this is the name of the type being extended. It is
+ // resolved in the same manner as type_name.
+ Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"`
+ // For numeric types, contains the original text representation of the value.
+ // For booleans, "true" or "false".
+ // For strings, contains the default text contents (not escaped in any way).
+ // For bytes, contains the C escaped value. All bytes >= 128 are escaped.
+ // TODO(kenton): Base-64 encode?
+ DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"`
+ // If set, gives the index of a oneof in the containing type's oneof_decl
+ // list. This field is a member of that oneof.
+ OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"`
+ // JSON name of this field. The value is set by protocol compiler. If the
+ // user has set a "json_name" option on this field, that option's value
+ // will be used. Otherwise, it's deduced from the field's name by converting
+ // it to camelCase.
+ JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"`
+ Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} }
+func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*FieldDescriptorProto) ProtoMessage() {}
+func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+
+func (m *FieldDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FieldDescriptorProto) GetNumber() int32 {
+ if m != nil && m.Number != nil {
+ return *m.Number
+ }
+ return 0
+}
+
+func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return FieldDescriptorProto_LABEL_OPTIONAL
+}
+
+func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return FieldDescriptorProto_TYPE_DOUBLE
+}
+
+func (m *FieldDescriptorProto) GetTypeName() string {
+ if m != nil && m.TypeName != nil {
+ return *m.TypeName
+ }
+ return ""
+}
+
+func (m *FieldDescriptorProto) GetExtendee() string {
+ if m != nil && m.Extendee != nil {
+ return *m.Extendee
+ }
+ return ""
+}
+
+func (m *FieldDescriptorProto) GetDefaultValue() string {
+ if m != nil && m.DefaultValue != nil {
+ return *m.DefaultValue
+ }
+ return ""
+}
+
+func (m *FieldDescriptorProto) GetOneofIndex() int32 {
+ if m != nil && m.OneofIndex != nil {
+ return *m.OneofIndex
+ }
+ return 0
+}
+
+func (m *FieldDescriptorProto) GetJsonName() string {
+ if m != nil && m.JsonName != nil {
+ return *m.JsonName
+ }
+ return ""
+}
+
+func (m *FieldDescriptorProto) GetOptions() *FieldOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+// Describes a oneof.
+type OneofDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} }
+func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*OneofDescriptorProto) ProtoMessage() {}
+func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+
+func (m *OneofDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *OneofDescriptorProto) GetOptions() *OneofOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+// Describes an enum type.
+type EnumDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
+ Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} }
+func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*EnumDescriptorProto) ProtoMessage() {}
+func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+
+func (m *EnumDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *EnumDescriptorProto) GetOptions() *EnumOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+// Describes a value within an enum.
+type EnumValueDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
+ Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} }
+func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*EnumValueDescriptorProto) ProtoMessage() {}
+func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+
+func (m *EnumValueDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *EnumValueDescriptorProto) GetNumber() int32 {
+ if m != nil && m.Number != nil {
+ return *m.Number
+ }
+ return 0
+}
+
+func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+// Describes a service.
+type ServiceDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"`
+ Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} }
+func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*ServiceDescriptorProto) ProtoMessage() {}
+func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+
+func (m *ServiceDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto {
+ if m != nil {
+ return m.Method
+ }
+ return nil
+}
+
+func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+// Describes a method of a service.
+type MethodDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ // Input and output type names. These are resolved in the same way as
+ // FieldDescriptorProto.type_name, but must refer to a message type.
+ InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"`
+ OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"`
+ Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"`
+ // Identifies if client streams multiple client messages
+ ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"`
+ // Identifies if server streams multiple server messages
+ ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} }
+func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*MethodDescriptorProto) ProtoMessage() {}
+func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
+
+const Default_MethodDescriptorProto_ClientStreaming bool = false
+const Default_MethodDescriptorProto_ServerStreaming bool = false
+
+func (m *MethodDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MethodDescriptorProto) GetInputType() string {
+ if m != nil && m.InputType != nil {
+ return *m.InputType
+ }
+ return ""
+}
+
+func (m *MethodDescriptorProto) GetOutputType() string {
+ if m != nil && m.OutputType != nil {
+ return *m.OutputType
+ }
+ return ""
+}
+
+func (m *MethodDescriptorProto) GetOptions() *MethodOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+func (m *MethodDescriptorProto) GetClientStreaming() bool {
+ if m != nil && m.ClientStreaming != nil {
+ return *m.ClientStreaming
+ }
+ return Default_MethodDescriptorProto_ClientStreaming
+}
+
+func (m *MethodDescriptorProto) GetServerStreaming() bool {
+ if m != nil && m.ServerStreaming != nil {
+ return *m.ServerStreaming
+ }
+ return Default_MethodDescriptorProto_ServerStreaming
+}
+
+type FileOptions struct {
+ // Sets the Java package where classes generated from this .proto will be
+ // placed. By default, the proto package is used, but this is often
+ // inappropriate because proto packages do not normally start with backwards
+ // domain names.
+ JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"`
+ // If set, all the classes from the .proto file are wrapped in a single
+ // outer class with the given name. This applies to both Proto1
+ // (equivalent to the old "--one_java_file" option) and Proto2 (where
+ // a .proto always translates to a single class, but you may want to
+ // explicitly choose the class name).
+ JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"`
+ // If set true, then the Java code generator will generate a separate .java
+ // file for each top-level message, enum, and service defined in the .proto
+ // file. Thus, these types will *not* be nested inside the outer class
+ // named by java_outer_classname. However, the outer class will still be
+ // generated to contain the file's getDescriptor() method as well as any
+ // top-level extensions defined in the file.
+ JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"`
+ // This option does nothing.
+ JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"`
+ // If set true, then the Java2 code generator will generate code that
+ // throws an exception whenever an attempt is made to assign a non-UTF-8
+ // byte sequence to a string field.
+ // Message reflection will do the same.
+ // However, an extension field still accepts non-UTF-8 byte sequences.
+ // This option has no effect on when used with the lite runtime.
+ JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"`
+ OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"`
+ // Sets the Go package where structs generated from this .proto will be
+ // placed. If omitted, the Go package will be derived from the following:
+ // - The basename of the package import path, if provided.
+ // - Otherwise, the package statement in the .proto file, if present.
+ // - Otherwise, the basename of the .proto file, without extension.
+ GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"`
+ // Should generic services be generated in each language? "Generic" services
+ // are not specific to any particular RPC system. They are generated by the
+ // main code generators in each language (without additional plugins).
+ // Generic services were the only kind of service generation supported by
+ // early versions of google.protobuf.
+ //
+ // Generic services are now considered deprecated in favor of using plugins
+ // that generate code specific to your particular RPC system. Therefore,
+ // these default to false. Old code which depends on generic services should
+ // explicitly set them to true.
+ CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"`
+ JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"`
+ PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"`
+ PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"`
+ // Is this file deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for everything in the file, or it will be completely ignored; in the very
+ // least, this is a formalization for deprecating files.
+ Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // Enables the use of arenas for the proto messages in this file. This applies
+ // only to generated classes for C++.
+ CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"`
+ // Sets the objective c class prefix which is prepended to all objective c
+ // generated classes from this .proto. There is no default.
+ ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"`
+ // Namespace for generated classes; defaults to the package.
+ CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"`
+ // By default Swift generators will take the proto package and CamelCase it
+ // replacing '.' with underscore and use that to prefix the types/symbols
+ // defined. When this options is provided, they will use this value instead
+ // to prefix the types/symbols defined.
+ SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"`
+ // Sets the php class prefix which is prepended to all php generated classes
+ // from this .proto. Default is empty.
+ PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"`
+ // Use this option to change the namespace of php generated classes. Default
+ // is empty. When this option is empty, the package name will be used for
+ // determining the namespace.
+ PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FileOptions) Reset() { *m = FileOptions{} }
+func (m *FileOptions) String() string { return proto.CompactTextString(m) }
+func (*FileOptions) ProtoMessage() {}
+func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+
+var extRange_FileOptions = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_FileOptions
+}
+
+const Default_FileOptions_JavaMultipleFiles bool = false
+const Default_FileOptions_JavaStringCheckUtf8 bool = false
+const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED
+const Default_FileOptions_CcGenericServices bool = false
+const Default_FileOptions_JavaGenericServices bool = false
+const Default_FileOptions_PyGenericServices bool = false
+const Default_FileOptions_PhpGenericServices bool = false
+const Default_FileOptions_Deprecated bool = false
+const Default_FileOptions_CcEnableArenas bool = false
+
+func (m *FileOptions) GetJavaPackage() string {
+ if m != nil && m.JavaPackage != nil {
+ return *m.JavaPackage
+ }
+ return ""
+}
+
+func (m *FileOptions) GetJavaOuterClassname() string {
+ if m != nil && m.JavaOuterClassname != nil {
+ return *m.JavaOuterClassname
+ }
+ return ""
+}
+
+func (m *FileOptions) GetJavaMultipleFiles() bool {
+ if m != nil && m.JavaMultipleFiles != nil {
+ return *m.JavaMultipleFiles
+ }
+ return Default_FileOptions_JavaMultipleFiles
+}
+
+func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool {
+ if m != nil && m.JavaGenerateEqualsAndHash != nil {
+ return *m.JavaGenerateEqualsAndHash
+ }
+ return false
+}
+
+func (m *FileOptions) GetJavaStringCheckUtf8() bool {
+ if m != nil && m.JavaStringCheckUtf8 != nil {
+ return *m.JavaStringCheckUtf8
+ }
+ return Default_FileOptions_JavaStringCheckUtf8
+}
+
+func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode {
+ if m != nil && m.OptimizeFor != nil {
+ return *m.OptimizeFor
+ }
+ return Default_FileOptions_OptimizeFor
+}
+
+func (m *FileOptions) GetGoPackage() string {
+ if m != nil && m.GoPackage != nil {
+ return *m.GoPackage
+ }
+ return ""
+}
+
+func (m *FileOptions) GetCcGenericServices() bool {
+ if m != nil && m.CcGenericServices != nil {
+ return *m.CcGenericServices
+ }
+ return Default_FileOptions_CcGenericServices
+}
+
+func (m *FileOptions) GetJavaGenericServices() bool {
+ if m != nil && m.JavaGenericServices != nil {
+ return *m.JavaGenericServices
+ }
+ return Default_FileOptions_JavaGenericServices
+}
+
+func (m *FileOptions) GetPyGenericServices() bool {
+ if m != nil && m.PyGenericServices != nil {
+ return *m.PyGenericServices
+ }
+ return Default_FileOptions_PyGenericServices
+}
+
+func (m *FileOptions) GetPhpGenericServices() bool {
+ if m != nil && m.PhpGenericServices != nil {
+ return *m.PhpGenericServices
+ }
+ return Default_FileOptions_PhpGenericServices
+}
+
+func (m *FileOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_FileOptions_Deprecated
+}
+
+func (m *FileOptions) GetCcEnableArenas() bool {
+ if m != nil && m.CcEnableArenas != nil {
+ return *m.CcEnableArenas
+ }
+ return Default_FileOptions_CcEnableArenas
+}
+
+func (m *FileOptions) GetObjcClassPrefix() string {
+ if m != nil && m.ObjcClassPrefix != nil {
+ return *m.ObjcClassPrefix
+ }
+ return ""
+}
+
+func (m *FileOptions) GetCsharpNamespace() string {
+ if m != nil && m.CsharpNamespace != nil {
+ return *m.CsharpNamespace
+ }
+ return ""
+}
+
+func (m *FileOptions) GetSwiftPrefix() string {
+ if m != nil && m.SwiftPrefix != nil {
+ return *m.SwiftPrefix
+ }
+ return ""
+}
+
+func (m *FileOptions) GetPhpClassPrefix() string {
+ if m != nil && m.PhpClassPrefix != nil {
+ return *m.PhpClassPrefix
+ }
+ return ""
+}
+
+func (m *FileOptions) GetPhpNamespace() string {
+ if m != nil && m.PhpNamespace != nil {
+ return *m.PhpNamespace
+ }
+ return ""
+}
+
+func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+type MessageOptions struct {
+ // Set true to use the old proto1 MessageSet wire format for extensions.
+ // This is provided for backwards-compatibility with the MessageSet wire
+ // format. You should not use this for any other reason: It's less
+ // efficient, has fewer features, and is more complicated.
+ //
+ // The message must be defined exactly as follows:
+ // message Foo {
+ // option message_set_wire_format = true;
+ // extensions 4 to max;
+ // }
+ // Note that the message cannot have any defined fields; MessageSets only
+ // have extensions.
+ //
+ // All extensions of your type must be singular messages; e.g. they cannot
+ // be int32s, enums, or repeated messages.
+ //
+ // Because this is an option, the above two restrictions are not enforced by
+ // the protocol compiler.
+ MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"`
+ // Disables the generation of the standard "descriptor()" accessor, which can
+ // conflict with a field of the same name. This is meant to make migration
+ // from proto1 easier; new code should avoid fields named "descriptor".
+ NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"`
+ // Is this message deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the message, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating messages.
+ Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // Whether the message is an automatically generated map entry type for the
+ // maps field.
+ //
+ // For maps fields:
+ // map<KeyType, ValueType> map_field = 1;
+ // The parsed descriptor looks like:
+ // message MapFieldEntry {
+ // option map_entry = true;
+ // optional KeyType key = 1;
+ // optional ValueType value = 2;
+ // }
+ // repeated MapFieldEntry map_field = 1;
+ //
+ // Implementations may choose not to generate the map_entry=true message, but
+ // use a native map in the target language to hold the keys and values.
+ // The reflection APIs in such implementions still need to work as
+ // if the field is a repeated message field.
+ //
+ // NOTE: Do not set the option in .proto files. Always use the maps syntax
+ // instead. The option should only be implicitly set by the proto compiler
+ // parser.
+ MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MessageOptions) Reset() { *m = MessageOptions{} }
+func (m *MessageOptions) String() string { return proto.CompactTextString(m) }
+func (*MessageOptions) ProtoMessage() {}
+func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+
+var extRange_MessageOptions = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_MessageOptions
+}
+
+const Default_MessageOptions_MessageSetWireFormat bool = false
+const Default_MessageOptions_NoStandardDescriptorAccessor bool = false
+const Default_MessageOptions_Deprecated bool = false
+
+func (m *MessageOptions) GetMessageSetWireFormat() bool {
+ if m != nil && m.MessageSetWireFormat != nil {
+ return *m.MessageSetWireFormat
+ }
+ return Default_MessageOptions_MessageSetWireFormat
+}
+
+func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool {
+ if m != nil && m.NoStandardDescriptorAccessor != nil {
+ return *m.NoStandardDescriptorAccessor
+ }
+ return Default_MessageOptions_NoStandardDescriptorAccessor
+}
+
+func (m *MessageOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_MessageOptions_Deprecated
+}
+
+func (m *MessageOptions) GetMapEntry() bool {
+ if m != nil && m.MapEntry != nil {
+ return *m.MapEntry
+ }
+ return false
+}
+
+func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+type FieldOptions struct {
+ // The ctype option instructs the C++ code generator to use a different
+ // representation of the field than it normally would. See the specific
+ // options below. This option is not yet implemented in the open source
+ // release -- sorry, we'll try to include it in a future version!
+ Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"`
+ // The packed option can be enabled for repeated primitive fields to enable
+ // a more efficient representation on the wire. Rather than repeatedly
+ // writing the tag and type for each element, the entire array is encoded as
+ // a single length-delimited blob. In proto3, only explicit setting it to
+ // false will avoid using packed encoding.
+ Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"`
+ // The jstype option determines the JavaScript type used for values of the
+ // field. The option is permitted only for 64 bit integral and fixed types
+ // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING
+ // is represented as JavaScript string, which avoids loss of precision that
+ // can happen when a large value is converted to a floating point JavaScript.
+ // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
+ // use the JavaScript "number" type. The behavior of the default option
+ // JS_NORMAL is implementation dependent.
+ //
+ // This option is an enum to permit additional types to be added, e.g.
+ // goog.math.Integer.
+ Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"`
+ // Should this field be parsed lazily? Lazy applies only to message-type
+ // fields. It means that when the outer message is initially parsed, the
+ // inner message's contents will not be parsed but instead stored in encoded
+ // form. The inner message will actually be parsed when it is first accessed.
+ //
+ // This is only a hint. Implementations are free to choose whether to use
+ // eager or lazy parsing regardless of the value of this option. However,
+ // setting this option true suggests that the protocol author believes that
+ // using lazy parsing on this field is worth the additional bookkeeping
+ // overhead typically needed to implement it.
+ //
+ // This option does not affect the public interface of any generated code;
+ // all method signatures remain the same. Furthermore, thread-safety of the
+ // interface is not affected by this option; const methods remain safe to
+ // call from multiple threads concurrently, while non-const methods continue
+ // to require exclusive access.
+ //
+ //
+ // Note that implementations may choose not to check required fields within
+ // a lazy sub-message. That is, calling IsInitialized() on the outer message
+ // may return true even if the inner message has missing required fields.
+ // This is necessary because otherwise the inner message would have to be
+ // parsed in order to perform the check, defeating the purpose of lazy
+ // parsing. An implementation which chooses not to check required fields
+ // must be consistent about it. That is, for any particular sub-message, the
+ // implementation must either *always* check its required fields, or *never*
+ // check its required fields, regardless of whether or not the message has
+ // been parsed.
+ Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"`
+ // Is this field deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for accessors, or it will be completely ignored; in the very least, this
+ // is a formalization for deprecating fields.
+ Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // For Google-internal migration only. Do not use.
+ Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldOptions) Reset() { *m = FieldOptions{} }
+func (m *FieldOptions) String() string { return proto.CompactTextString(m) }
+func (*FieldOptions) ProtoMessage() {}
+func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
+
+var extRange_FieldOptions = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_FieldOptions
+}
+
+const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING
+const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL
+const Default_FieldOptions_Lazy bool = false
+const Default_FieldOptions_Deprecated bool = false
+const Default_FieldOptions_Weak bool = false
+
+func (m *FieldOptions) GetCtype() FieldOptions_CType {
+ if m != nil && m.Ctype != nil {
+ return *m.Ctype
+ }
+ return Default_FieldOptions_Ctype
+}
+
+func (m *FieldOptions) GetPacked() bool {
+ if m != nil && m.Packed != nil {
+ return *m.Packed
+ }
+ return false
+}
+
+func (m *FieldOptions) GetJstype() FieldOptions_JSType {
+ if m != nil && m.Jstype != nil {
+ return *m.Jstype
+ }
+ return Default_FieldOptions_Jstype
+}
+
+func (m *FieldOptions) GetLazy() bool {
+ if m != nil && m.Lazy != nil {
+ return *m.Lazy
+ }
+ return Default_FieldOptions_Lazy
+}
+
+func (m *FieldOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_FieldOptions_Deprecated
+}
+
+func (m *FieldOptions) GetWeak() bool {
+ if m != nil && m.Weak != nil {
+ return *m.Weak
+ }
+ return Default_FieldOptions_Weak
+}
+
+func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+type OneofOptions struct {
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OneofOptions) Reset() { *m = OneofOptions{} }
+func (m *OneofOptions) String() string { return proto.CompactTextString(m) }
+func (*OneofOptions) ProtoMessage() {}
+func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
+
+var extRange_OneofOptions = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_OneofOptions
+}
+
+func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+type EnumOptions struct {
+ // Set this option to true to allow mapping different tag names to the same
+ // value.
+ AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"`
+ // Is this enum deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the enum, or it will be completely ignored; in the very least, this
+ // is a formalization for deprecating enums.
+ Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *EnumOptions) Reset() { *m = EnumOptions{} }
+func (m *EnumOptions) String() string { return proto.CompactTextString(m) }
+func (*EnumOptions) ProtoMessage() {}
+func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
+
+var extRange_EnumOptions = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_EnumOptions
+}
+
+const Default_EnumOptions_Deprecated bool = false
+
+func (m *EnumOptions) GetAllowAlias() bool {
+ if m != nil && m.AllowAlias != nil {
+ return *m.AllowAlias
+ }
+ return false
+}
+
+func (m *EnumOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_EnumOptions_Deprecated
+}
+
+func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+type EnumValueOptions struct {
+ // Is this enum value deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the enum value, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating enum values.
+ Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} }
+func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) }
+func (*EnumValueOptions) ProtoMessage() {}
+func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
+
+var extRange_EnumValueOptions = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_EnumValueOptions
+}
+
+const Default_EnumValueOptions_Deprecated bool = false
+
+func (m *EnumValueOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_EnumValueOptions_Deprecated
+}
+
+func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+type ServiceOptions struct {
+ // Is this service deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the service, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating services.
+ Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ServiceOptions) Reset() { *m = ServiceOptions{} }
+func (m *ServiceOptions) String() string { return proto.CompactTextString(m) }
+func (*ServiceOptions) ProtoMessage() {}
+func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
+
+var extRange_ServiceOptions = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_ServiceOptions
+}
+
+const Default_ServiceOptions_Deprecated bool = false
+
+func (m *ServiceOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_ServiceOptions_Deprecated
+}
+
+func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+type MethodOptions struct {
+ // Is this method deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the method, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating methods.
+ Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MethodOptions) Reset() { *m = MethodOptions{} }
+func (m *MethodOptions) String() string { return proto.CompactTextString(m) }
+func (*MethodOptions) ProtoMessage() {}
+func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
+
+var extRange_MethodOptions = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_MethodOptions
+}
+
+const Default_MethodOptions_Deprecated bool = false
+const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN
+
+func (m *MethodOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_MethodOptions_Deprecated
+}
+
+func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel {
+ if m != nil && m.IdempotencyLevel != nil {
+ return *m.IdempotencyLevel
+ }
+ return Default_MethodOptions_IdempotencyLevel
+}
+
+func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+// A message representing a option the parser does not recognize. This only
+// appears in options protos created by the compiler::Parser class.
+// DescriptorPool resolves these when building Descriptor objects. Therefore,
+// options protos in descriptor objects (e.g. returned by Descriptor::options(),
+// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
+// in them.
+type UninterpretedOption struct {
+ Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
+ // The value of the uninterpreted option, in whatever type the tokenizer
+ // identified it as during parsing. Exactly one of these should be set.
+ IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"`
+ PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"`
+ NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"`
+ DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"`
+ StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
+ AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} }
+func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) }
+func (*UninterpretedOption) ProtoMessage() {}
+func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
+
+func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *UninterpretedOption) GetIdentifierValue() string {
+ if m != nil && m.IdentifierValue != nil {
+ return *m.IdentifierValue
+ }
+ return ""
+}
+
+func (m *UninterpretedOption) GetPositiveIntValue() uint64 {
+ if m != nil && m.PositiveIntValue != nil {
+ return *m.PositiveIntValue
+ }
+ return 0
+}
+
+func (m *UninterpretedOption) GetNegativeIntValue() int64 {
+ if m != nil && m.NegativeIntValue != nil {
+ return *m.NegativeIntValue
+ }
+ return 0
+}
+
+func (m *UninterpretedOption) GetDoubleValue() float64 {
+ if m != nil && m.DoubleValue != nil {
+ return *m.DoubleValue
+ }
+ return 0
+}
+
+func (m *UninterpretedOption) GetStringValue() []byte {
+ if m != nil {
+ return m.StringValue
+ }
+ return nil
+}
+
+func (m *UninterpretedOption) GetAggregateValue() string {
+ if m != nil && m.AggregateValue != nil {
+ return *m.AggregateValue
+ }
+ return ""
+}
+
+// The name of the uninterpreted option. Each string represents a segment in
+// a dot-separated name. is_extension is true iff a segment represents an
+// extension (denoted with parentheses in options specs in .proto files).
+// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
+// "foo.(bar.baz).qux".
+type UninterpretedOption_NamePart struct {
+ NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
+ IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} }
+func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) }
+func (*UninterpretedOption_NamePart) ProtoMessage() {}
+func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{18, 0}
+}
+
+func (m *UninterpretedOption_NamePart) GetNamePart() string {
+ if m != nil && m.NamePart != nil {
+ return *m.NamePart
+ }
+ return ""
+}
+
+func (m *UninterpretedOption_NamePart) GetIsExtension() bool {
+ if m != nil && m.IsExtension != nil {
+ return *m.IsExtension
+ }
+ return false
+}
+
+// Encapsulates information about the original source file from which a
+// FileDescriptorProto was generated.
+type SourceCodeInfo struct {
+ // A Location identifies a piece of source code in a .proto file which
+ // corresponds to a particular definition. This information is intended
+ // to be useful to IDEs, code indexers, documentation generators, and similar
+ // tools.
+ //
+ // For example, say we have a file like:
+ // message Foo {
+ // optional string foo = 1;
+ // }
+ // Let's look at just the field definition:
+ // optional string foo = 1;
+ // ^ ^^ ^^ ^ ^^^
+ // a bc de f ghi
+ // We have the following locations:
+ // span path represents
+ // [a,i) [ 4, 0, 2, 0 ] The whole field definition.
+ // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
+ // [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
+ // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
+ // [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
+ //
+ // Notes:
+ // - A location may refer to a repeated field itself (i.e. not to any
+ // particular index within it). This is used whenever a set of elements are
+ // logically enclosed in a single code segment. For example, an entire
+ // extend block (possibly containing multiple extension definitions) will
+ // have an outer location whose path refers to the "extensions" repeated
+ // field without an index.
+ // - Multiple locations may have the same path. This happens when a single
+ // logical declaration is spread out across multiple places. The most
+ // obvious example is the "extend" block again -- there may be multiple
+ // extend blocks in the same scope, each of which will have the same path.
+ // - A location's span is not always a subset of its parent's span. For
+ // example, the "extendee" of an extension declaration appears at the
+ // beginning of the "extend" block and is shared by all extensions within
+ // the block.
+ // - Just because a location's span is a subset of some other location's span
+ // does not mean that it is a descendent. For example, a "group" defines
+ // both a type and a field in a single declaration. Thus, the locations
+ // corresponding to the type and field and their components will overlap.
+ // - Code which tries to interpret locations should probably be designed to
+ // ignore those that it doesn't understand, as more types of locations could
+ // be recorded in the future.
+ Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} }
+func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) }
+func (*SourceCodeInfo) ProtoMessage() {}
+func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
+
+func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location {
+ if m != nil {
+ return m.Location
+ }
+ return nil
+}
+
+type SourceCodeInfo_Location struct {
+ // Identifies which part of the FileDescriptorProto was defined at this
+ // location.
+ //
+ // Each element is a field number or an index. They form a path from
+ // the root FileDescriptorProto to the place where the definition. For
+ // example, this path:
+ // [ 4, 3, 2, 7, 1 ]
+ // refers to:
+ // file.message_type(3) // 4, 3
+ // .field(7) // 2, 7
+ // .name() // 1
+ // This is because FileDescriptorProto.message_type has field number 4:
+ // repeated DescriptorProto message_type = 4;
+ // and DescriptorProto.field has field number 2:
+ // repeated FieldDescriptorProto field = 2;
+ // and FieldDescriptorProto.name has field number 1:
+ // optional string name = 1;
+ //
+ // Thus, the above path gives the location of a field name. If we removed
+ // the last element:
+ // [ 4, 3, 2, 7 ]
+ // this path refers to the whole field declaration (from the beginning
+ // of the label to the terminating semicolon).
+ Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
+ // Always has exactly three or four elements: start line, start column,
+ // end line (optional, otherwise assumed same as start line), end column.
+ // These are packed into a single field for efficiency. Note that line
+ // and column numbers are zero-based -- typically you will want to add
+ // 1 to each before displaying to a user.
+ Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"`
+ // If this SourceCodeInfo represents a complete declaration, these are any
+ // comments appearing before and after the declaration which appear to be
+ // attached to the declaration.
+ //
+ // A series of line comments appearing on consecutive lines, with no other
+ // tokens appearing on those lines, will be treated as a single comment.
+ //
+ // leading_detached_comments will keep paragraphs of comments that appear
+ // before (but not connected to) the current element. Each paragraph,
+ // separated by empty lines, will be one comment element in the repeated
+ // field.
+ //
+ // Only the comment content is provided; comment markers (e.g. //) are
+ // stripped out. For block comments, leading whitespace and an asterisk
+ // will be stripped from the beginning of each line other than the first.
+ // Newlines are included in the output.
+ //
+ // Examples:
+ //
+ // optional int32 foo = 1; // Comment attached to foo.
+ // // Comment attached to bar.
+ // optional int32 bar = 2;
+ //
+ // optional string baz = 3;
+ // // Comment attached to baz.
+ // // Another line attached to baz.
+ //
+ // // Comment attached to qux.
+ // //
+ // // Another line attached to qux.
+ // optional double qux = 4;
+ //
+ // // Detached comment for corge. This is not leading or trailing comments
+ // // to qux or corge because there are blank lines separating it from
+ // // both.
+ //
+ // // Detached comment for corge paragraph 2.
+ //
+ // optional string corge = 5;
+ // /* Block comment attached
+ // * to corge. Leading asterisks
+ // * will be removed. */
+ // /* Block comment attached to
+ // * grault. */
+ // optional int32 grault = 6;
+ //
+ // // ignored detached comments.
+ LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"`
+ TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"`
+ LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} }
+func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) }
+func (*SourceCodeInfo_Location) ProtoMessage() {}
+func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19, 0} }
+
+func (m *SourceCodeInfo_Location) GetPath() []int32 {
+ if m != nil {
+ return m.Path
+ }
+ return nil
+}
+
+func (m *SourceCodeInfo_Location) GetSpan() []int32 {
+ if m != nil {
+ return m.Span
+ }
+ return nil
+}
+
+func (m *SourceCodeInfo_Location) GetLeadingComments() string {
+ if m != nil && m.LeadingComments != nil {
+ return *m.LeadingComments
+ }
+ return ""
+}
+
+func (m *SourceCodeInfo_Location) GetTrailingComments() string {
+ if m != nil && m.TrailingComments != nil {
+ return *m.TrailingComments
+ }
+ return ""
+}
+
+func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string {
+ if m != nil {
+ return m.LeadingDetachedComments
+ }
+ return nil
+}
+
+// Describes the relationship between generated code and its original source
+// file. A GeneratedCodeInfo message is associated with only one generated
+// source file, but may contain references to different source .proto files.
+type GeneratedCodeInfo struct {
+ // An Annotation connects some span of text in generated code to an element
+ // of its generating .proto file.
+ Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} }
+func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) }
+func (*GeneratedCodeInfo) ProtoMessage() {}
+func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
+
+func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation {
+ if m != nil {
+ return m.Annotation
+ }
+ return nil
+}
+
+type GeneratedCodeInfo_Annotation struct {
+ // Identifies the element in the original source .proto file. This field
+ // is formatted the same as SourceCodeInfo.Location.path.
+ Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
+ // Identifies the filesystem path to the original source .proto.
+ SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"`
+ // Identifies the starting offset in bytes in the generated code
+ // that relates to the identified object.
+ Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"`
+ // Identifies the ending offset in bytes in the generated code that
+ // relates to the identified offset. The end offset should be one past
+ // the last relevant byte (so the length of the text = end - begin).
+ End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} }
+func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) }
+func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
+func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{20, 0}
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 {
+ if m != nil {
+ return m.Path
+ }
+ return nil
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string {
+ if m != nil && m.SourceFile != nil {
+ return *m.SourceFile
+ }
+ return ""
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 {
+ if m != nil && m.Begin != nil {
+ return *m.Begin
+ }
+ return 0
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet")
+ proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto")
+ proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto")
+ proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange")
+ proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange")
+ proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions")
+ proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto")
+ proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto")
+ proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto")
+ proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto")
+ proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto")
+ proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto")
+ proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions")
+ proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions")
+ proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions")
+ proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions")
+ proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions")
+ proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions")
+ proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions")
+ proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions")
+ proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption")
+ proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart")
+ proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo")
+ proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location")
+ proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo")
+ proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation")
+ proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value)
+ proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value)
+ proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value)
+ proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value)
+ proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value)
+ proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value)
+}
+
+func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 2519 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x6e, 0x1b, 0xc7,
+ 0x15, 0x0e, 0x7f, 0x45, 0x1e, 0x52, 0xd4, 0x68, 0xa4, 0xd8, 0x6b, 0xe5, 0xc7, 0x32, 0xf3, 0x63,
+ 0xd9, 0x69, 0xa8, 0x40, 0xb1, 0x1d, 0x47, 0x29, 0xd2, 0x52, 0xe4, 0x5a, 0xa1, 0x4a, 0x91, 0xec,
+ 0x92, 0x6a, 0x7e, 0x6e, 0x16, 0xa3, 0xdd, 0x21, 0xb9, 0xf6, 0x72, 0x77, 0xb3, 0xbb, 0xb4, 0xad,
+ 0xa0, 0x17, 0x06, 0x7a, 0x55, 0xa0, 0x0f, 0x50, 0x14, 0x45, 0x2f, 0x72, 0x13, 0xa0, 0x0f, 0x50,
+ 0x20, 0x77, 0x7d, 0x82, 0x02, 0x79, 0x83, 0xa2, 0x28, 0xd0, 0x3e, 0x46, 0x31, 0x33, 0xbb, 0xcb,
+ 0x5d, 0xfe, 0xc4, 0x6a, 0x80, 0x38, 0x57, 0xe4, 0x7c, 0xe7, 0x3b, 0x67, 0xce, 0x9c, 0x39, 0x33,
+ 0x73, 0x66, 0x16, 0x76, 0x47, 0xb6, 0x3d, 0x32, 0xe9, 0xbe, 0xe3, 0xda, 0xbe, 0x7d, 0x3e, 0x1d,
+ 0xee, 0xeb, 0xd4, 0xd3, 0x5c, 0xc3, 0xf1, 0x6d, 0xb7, 0xc6, 0x31, 0xbc, 0x21, 0x18, 0xb5, 0x90,
+ 0x51, 0x3d, 0x85, 0xcd, 0x07, 0x86, 0x49, 0x9b, 0x11, 0xb1, 0x4f, 0x7d, 0x7c, 0x1f, 0xb2, 0x43,
+ 0xc3, 0xa4, 0x52, 0x6a, 0x37, 0xb3, 0x57, 0x3a, 0x78, 0xb3, 0x36, 0xa7, 0x54, 0x4b, 0x6a, 0xf4,
+ 0x18, 0xac, 0x70, 0x8d, 0xea, 0xbf, 0xb3, 0xb0, 0xb5, 0x44, 0x8a, 0x31, 0x64, 0x2d, 0x32, 0x61,
+ 0x16, 0x53, 0x7b, 0x45, 0x85, 0xff, 0xc7, 0x12, 0xac, 0x39, 0x44, 0x7b, 0x44, 0x46, 0x54, 0x4a,
+ 0x73, 0x38, 0x6c, 0xe2, 0xd7, 0x01, 0x74, 0xea, 0x50, 0x4b, 0xa7, 0x96, 0x76, 0x21, 0x65, 0x76,
+ 0x33, 0x7b, 0x45, 0x25, 0x86, 0xe0, 0x77, 0x60, 0xd3, 0x99, 0x9e, 0x9b, 0x86, 0xa6, 0xc6, 0x68,
+ 0xb0, 0x9b, 0xd9, 0xcb, 0x29, 0x48, 0x08, 0x9a, 0x33, 0xf2, 0x4d, 0xd8, 0x78, 0x42, 0xc9, 0xa3,
+ 0x38, 0xb5, 0xc4, 0xa9, 0x15, 0x06, 0xc7, 0x88, 0x0d, 0x28, 0x4f, 0xa8, 0xe7, 0x91, 0x11, 0x55,
+ 0xfd, 0x0b, 0x87, 0x4a, 0x59, 0x3e, 0xfa, 0xdd, 0x85, 0xd1, 0xcf, 0x8f, 0xbc, 0x14, 0x68, 0x0d,
+ 0x2e, 0x1c, 0x8a, 0xeb, 0x50, 0xa4, 0xd6, 0x74, 0x22, 0x2c, 0xe4, 0x56, 0xc4, 0x4f, 0xb6, 0xa6,
+ 0x93, 0x79, 0x2b, 0x05, 0xa6, 0x16, 0x98, 0x58, 0xf3, 0xa8, 0xfb, 0xd8, 0xd0, 0xa8, 0x94, 0xe7,
+ 0x06, 0x6e, 0x2e, 0x18, 0xe8, 0x0b, 0xf9, 0xbc, 0x8d, 0x50, 0x0f, 0x37, 0xa0, 0x48, 0x9f, 0xfa,
+ 0xd4, 0xf2, 0x0c, 0xdb, 0x92, 0xd6, 0xb8, 0x91, 0xb7, 0x96, 0xcc, 0x22, 0x35, 0xf5, 0x79, 0x13,
+ 0x33, 0x3d, 0x7c, 0x0f, 0xd6, 0x6c, 0xc7, 0x37, 0x6c, 0xcb, 0x93, 0x0a, 0xbb, 0xa9, 0xbd, 0xd2,
+ 0xc1, 0xab, 0x4b, 0x13, 0xa1, 0x2b, 0x38, 0x4a, 0x48, 0xc6, 0x2d, 0x40, 0x9e, 0x3d, 0x75, 0x35,
+ 0xaa, 0x6a, 0xb6, 0x4e, 0x55, 0xc3, 0x1a, 0xda, 0x52, 0x91, 0x1b, 0xb8, 0xbe, 0x38, 0x10, 0x4e,
+ 0x6c, 0xd8, 0x3a, 0x6d, 0x59, 0x43, 0x5b, 0xa9, 0x78, 0x89, 0x36, 0xbe, 0x02, 0x79, 0xef, 0xc2,
+ 0xf2, 0xc9, 0x53, 0xa9, 0xcc, 0x33, 0x24, 0x68, 0x55, 0xbf, 0xcd, 0xc3, 0xc6, 0x65, 0x52, 0xec,
+ 0x23, 0xc8, 0x0d, 0xd9, 0x28, 0xa5, 0xf4, 0xff, 0x13, 0x03, 0xa1, 0x93, 0x0c, 0x62, 0xfe, 0x07,
+ 0x06, 0xb1, 0x0e, 0x25, 0x8b, 0x7a, 0x3e, 0xd5, 0x45, 0x46, 0x64, 0x2e, 0x99, 0x53, 0x20, 0x94,
+ 0x16, 0x53, 0x2a, 0xfb, 0x83, 0x52, 0xea, 0x33, 0xd8, 0x88, 0x5c, 0x52, 0x5d, 0x62, 0x8d, 0xc2,
+ 0xdc, 0xdc, 0x7f, 0x9e, 0x27, 0x35, 0x39, 0xd4, 0x53, 0x98, 0x9a, 0x52, 0xa1, 0x89, 0x36, 0x6e,
+ 0x02, 0xd8, 0x16, 0xb5, 0x87, 0xaa, 0x4e, 0x35, 0x53, 0x2a, 0xac, 0x88, 0x52, 0x97, 0x51, 0x16,
+ 0xa2, 0x64, 0x0b, 0x54, 0x33, 0xf1, 0x87, 0xb3, 0x54, 0x5b, 0x5b, 0x91, 0x29, 0xa7, 0x62, 0x91,
+ 0x2d, 0x64, 0xdb, 0x19, 0x54, 0x5c, 0xca, 0xf2, 0x9e, 0xea, 0xc1, 0xc8, 0x8a, 0xdc, 0x89, 0xda,
+ 0x73, 0x47, 0xa6, 0x04, 0x6a, 0x62, 0x60, 0xeb, 0x6e, 0xbc, 0x89, 0xdf, 0x80, 0x08, 0x50, 0x79,
+ 0x5a, 0x01, 0xdf, 0x85, 0xca, 0x21, 0xd8, 0x21, 0x13, 0xba, 0xf3, 0x15, 0x54, 0x92, 0xe1, 0xc1,
+ 0xdb, 0x90, 0xf3, 0x7c, 0xe2, 0xfa, 0x3c, 0x0b, 0x73, 0x8a, 0x68, 0x60, 0x04, 0x19, 0x6a, 0xe9,
+ 0x7c, 0x97, 0xcb, 0x29, 0xec, 0x2f, 0xfe, 0xe5, 0x6c, 0xc0, 0x19, 0x3e, 0xe0, 0xb7, 0x17, 0x67,
+ 0x34, 0x61, 0x79, 0x7e, 0xdc, 0x3b, 0x1f, 0xc0, 0x7a, 0x62, 0x00, 0x97, 0xed, 0xba, 0xfa, 0x5b,
+ 0x78, 0x79, 0xa9, 0x69, 0xfc, 0x19, 0x6c, 0x4f, 0x2d, 0xc3, 0xf2, 0xa9, 0xeb, 0xb8, 0x94, 0x65,
+ 0xac, 0xe8, 0x4a, 0xfa, 0xcf, 0xda, 0x8a, 0x9c, 0x3b, 0x8b, 0xb3, 0x85, 0x15, 0x65, 0x6b, 0xba,
+ 0x08, 0xde, 0x2e, 0x16, 0xfe, 0xbb, 0x86, 0x9e, 0x3d, 0x7b, 0xf6, 0x2c, 0x5d, 0xfd, 0x63, 0x1e,
+ 0xb6, 0x97, 0xad, 0x99, 0xa5, 0xcb, 0xf7, 0x0a, 0xe4, 0xad, 0xe9, 0xe4, 0x9c, 0xba, 0x3c, 0x48,
+ 0x39, 0x25, 0x68, 0xe1, 0x3a, 0xe4, 0x4c, 0x72, 0x4e, 0x4d, 0x29, 0xbb, 0x9b, 0xda, 0xab, 0x1c,
+ 0xbc, 0x73, 0xa9, 0x55, 0x59, 0x6b, 0x33, 0x15, 0x45, 0x68, 0xe2, 0x8f, 0x21, 0x1b, 0x6c, 0xd1,
+ 0xcc, 0xc2, 0xed, 0xcb, 0x59, 0x60, 0x6b, 0x49, 0xe1, 0x7a, 0xf8, 0x15, 0x28, 0xb2, 0x5f, 0x91,
+ 0x1b, 0x79, 0xee, 0x73, 0x81, 0x01, 0x2c, 0x2f, 0xf0, 0x0e, 0x14, 0xf8, 0x32, 0xd1, 0x69, 0x78,
+ 0xb4, 0x45, 0x6d, 0x96, 0x58, 0x3a, 0x1d, 0x92, 0xa9, 0xe9, 0xab, 0x8f, 0x89, 0x39, 0xa5, 0x3c,
+ 0xe1, 0x8b, 0x4a, 0x39, 0x00, 0x7f, 0xc3, 0x30, 0x7c, 0x1d, 0x4a, 0x62, 0x55, 0x19, 0x96, 0x4e,
+ 0x9f, 0xf2, 0xdd, 0x33, 0xa7, 0x88, 0x85, 0xd6, 0x62, 0x08, 0xeb, 0xfe, 0xa1, 0x67, 0x5b, 0x61,
+ 0x6a, 0xf2, 0x2e, 0x18, 0xc0, 0xbb, 0xff, 0x60, 0x7e, 0xe3, 0x7e, 0x6d, 0xf9, 0xf0, 0xe6, 0x73,
+ 0xaa, 0xfa, 0xb7, 0x34, 0x64, 0xf9, 0x7e, 0xb1, 0x01, 0xa5, 0xc1, 0xe7, 0x3d, 0x59, 0x6d, 0x76,
+ 0xcf, 0x8e, 0xda, 0x32, 0x4a, 0xe1, 0x0a, 0x00, 0x07, 0x1e, 0xb4, 0xbb, 0xf5, 0x01, 0x4a, 0x47,
+ 0xed, 0x56, 0x67, 0x70, 0xef, 0x0e, 0xca, 0x44, 0x0a, 0x67, 0x02, 0xc8, 0xc6, 0x09, 0xef, 0x1f,
+ 0xa0, 0x1c, 0x46, 0x50, 0x16, 0x06, 0x5a, 0x9f, 0xc9, 0xcd, 0x7b, 0x77, 0x50, 0x3e, 0x89, 0xbc,
+ 0x7f, 0x80, 0xd6, 0xf0, 0x3a, 0x14, 0x39, 0x72, 0xd4, 0xed, 0xb6, 0x51, 0x21, 0xb2, 0xd9, 0x1f,
+ 0x28, 0xad, 0xce, 0x31, 0x2a, 0x46, 0x36, 0x8f, 0x95, 0xee, 0x59, 0x0f, 0x41, 0x64, 0xe1, 0x54,
+ 0xee, 0xf7, 0xeb, 0xc7, 0x32, 0x2a, 0x45, 0x8c, 0xa3, 0xcf, 0x07, 0x72, 0x1f, 0x95, 0x13, 0x6e,
+ 0xbd, 0x7f, 0x80, 0xd6, 0xa3, 0x2e, 0xe4, 0xce, 0xd9, 0x29, 0xaa, 0xe0, 0x4d, 0x58, 0x17, 0x5d,
+ 0x84, 0x4e, 0x6c, 0xcc, 0x41, 0xf7, 0xee, 0x20, 0x34, 0x73, 0x44, 0x58, 0xd9, 0x4c, 0x00, 0xf7,
+ 0xee, 0x20, 0x5c, 0x6d, 0x40, 0x8e, 0x67, 0x17, 0xc6, 0x50, 0x69, 0xd7, 0x8f, 0xe4, 0xb6, 0xda,
+ 0xed, 0x0d, 0x5a, 0xdd, 0x4e, 0xbd, 0x8d, 0x52, 0x33, 0x4c, 0x91, 0x7f, 0x7d, 0xd6, 0x52, 0xe4,
+ 0x26, 0x4a, 0xc7, 0xb1, 0x9e, 0x5c, 0x1f, 0xc8, 0x4d, 0x94, 0xa9, 0x6a, 0xb0, 0xbd, 0x6c, 0x9f,
+ 0x5c, 0xba, 0x32, 0x62, 0x53, 0x9c, 0x5e, 0x31, 0xc5, 0xdc, 0xd6, 0xc2, 0x14, 0x7f, 0x9d, 0x82,
+ 0xad, 0x25, 0x67, 0xc5, 0xd2, 0x4e, 0x7e, 0x01, 0x39, 0x91, 0xa2, 0xe2, 0xf4, 0xbc, 0xb5, 0xf4,
+ 0xd0, 0xe1, 0x09, 0xbb, 0x70, 0x82, 0x72, 0xbd, 0x78, 0x05, 0x91, 0x59, 0x51, 0x41, 0x30, 0x13,
+ 0x0b, 0x4e, 0xfe, 0x2e, 0x05, 0xd2, 0x2a, 0xdb, 0xcf, 0xd9, 0x28, 0xd2, 0x89, 0x8d, 0xe2, 0xa3,
+ 0x79, 0x07, 0x6e, 0xac, 0x1e, 0xc3, 0x82, 0x17, 0xdf, 0xa4, 0xe0, 0xca, 0xf2, 0x42, 0x6b, 0xa9,
+ 0x0f, 0x1f, 0x43, 0x7e, 0x42, 0xfd, 0xb1, 0x1d, 0x16, 0x1b, 0x6f, 0x2f, 0x39, 0xc2, 0x98, 0x78,
+ 0x3e, 0x56, 0x81, 0x56, 0xfc, 0x0c, 0xcc, 0xac, 0xaa, 0x96, 0x84, 0x37, 0x0b, 0x9e, 0xfe, 0x3e,
+ 0x0d, 0x2f, 0x2f, 0x35, 0xbe, 0xd4, 0xd1, 0xd7, 0x00, 0x0c, 0xcb, 0x99, 0xfa, 0xa2, 0xa0, 0x10,
+ 0xfb, 0x53, 0x91, 0x23, 0x7c, 0xed, 0xb3, 0xbd, 0x67, 0xea, 0x47, 0xf2, 0x0c, 0x97, 0x83, 0x80,
+ 0x38, 0xe1, 0xfe, 0xcc, 0xd1, 0x2c, 0x77, 0xf4, 0xf5, 0x15, 0x23, 0x5d, 0x38, 0xab, 0xdf, 0x03,
+ 0xa4, 0x99, 0x06, 0xb5, 0x7c, 0xd5, 0xf3, 0x5d, 0x4a, 0x26, 0x86, 0x35, 0xe2, 0x1b, 0x70, 0xe1,
+ 0x30, 0x37, 0x24, 0xa6, 0x47, 0x95, 0x0d, 0x21, 0xee, 0x87, 0x52, 0xa6, 0xc1, 0xcf, 0x38, 0x37,
+ 0xa6, 0x91, 0x4f, 0x68, 0x08, 0x71, 0xa4, 0x51, 0xfd, 0xb6, 0x00, 0xa5, 0x58, 0x59, 0x8a, 0x6f,
+ 0x40, 0xf9, 0x21, 0x79, 0x4c, 0xd4, 0xf0, 0xaa, 0x21, 0x22, 0x51, 0x62, 0x58, 0x2f, 0xb8, 0x6e,
+ 0xbc, 0x07, 0xdb, 0x9c, 0x62, 0x4f, 0x7d, 0xea, 0xaa, 0x9a, 0x49, 0x3c, 0x8f, 0x07, 0xad, 0xc0,
+ 0xa9, 0x98, 0xc9, 0xba, 0x4c, 0xd4, 0x08, 0x25, 0xf8, 0x2e, 0x6c, 0x71, 0x8d, 0xc9, 0xd4, 0xf4,
+ 0x0d, 0xc7, 0xa4, 0x2a, 0xbb, 0xfc, 0x78, 0x7c, 0x23, 0x8e, 0x3c, 0xdb, 0x64, 0x8c, 0xd3, 0x80,
+ 0xc0, 0x3c, 0xf2, 0x70, 0x13, 0x5e, 0xe3, 0x6a, 0x23, 0x6a, 0x51, 0x97, 0xf8, 0x54, 0xa5, 0x5f,
+ 0x4e, 0x89, 0xe9, 0xa9, 0xc4, 0xd2, 0xd5, 0x31, 0xf1, 0xc6, 0xd2, 0x36, 0x33, 0x70, 0x94, 0x96,
+ 0x52, 0xca, 0x35, 0x46, 0x3c, 0x0e, 0x78, 0x32, 0xa7, 0xd5, 0x2d, 0xfd, 0x13, 0xe2, 0x8d, 0xf1,
+ 0x21, 0x5c, 0xe1, 0x56, 0x3c, 0xdf, 0x35, 0xac, 0x91, 0xaa, 0x8d, 0xa9, 0xf6, 0x48, 0x9d, 0xfa,
+ 0xc3, 0xfb, 0xd2, 0x2b, 0xf1, 0xfe, 0xb9, 0x87, 0x7d, 0xce, 0x69, 0x30, 0xca, 0x99, 0x3f, 0xbc,
+ 0x8f, 0xfb, 0x50, 0x66, 0x93, 0x31, 0x31, 0xbe, 0xa2, 0xea, 0xd0, 0x76, 0xf9, 0xc9, 0x52, 0x59,
+ 0xb2, 0xb2, 0x63, 0x11, 0xac, 0x75, 0x03, 0x85, 0x53, 0x5b, 0xa7, 0x87, 0xb9, 0x7e, 0x4f, 0x96,
+ 0x9b, 0x4a, 0x29, 0xb4, 0xf2, 0xc0, 0x76, 0x59, 0x42, 0x8d, 0xec, 0x28, 0xc0, 0x25, 0x91, 0x50,
+ 0x23, 0x3b, 0x0c, 0xef, 0x5d, 0xd8, 0xd2, 0x34, 0x31, 0x66, 0x43, 0x53, 0x83, 0x2b, 0x8a, 0x27,
+ 0xa1, 0x44, 0xb0, 0x34, 0xed, 0x58, 0x10, 0x82, 0x1c, 0xf7, 0xf0, 0x87, 0xf0, 0xf2, 0x2c, 0x58,
+ 0x71, 0xc5, 0xcd, 0x85, 0x51, 0xce, 0xab, 0xde, 0x85, 0x2d, 0xe7, 0x62, 0x51, 0x11, 0x27, 0x7a,
+ 0x74, 0x2e, 0xe6, 0xd5, 0x3e, 0x80, 0x6d, 0x67, 0xec, 0x2c, 0xea, 0xdd, 0x8e, 0xeb, 0x61, 0x67,
+ 0xec, 0xcc, 0x2b, 0xbe, 0xc5, 0xef, 0xab, 0x2e, 0xd5, 0x88, 0x4f, 0x75, 0xe9, 0x6a, 0x9c, 0x1e,
+ 0x13, 0xe0, 0x7d, 0x40, 0x9a, 0xa6, 0x52, 0x8b, 0x9c, 0x9b, 0x54, 0x25, 0x2e, 0xb5, 0x88, 0x27,
+ 0x5d, 0x8f, 0x93, 0x2b, 0x9a, 0x26, 0x73, 0x69, 0x9d, 0x0b, 0xf1, 0x6d, 0xd8, 0xb4, 0xcf, 0x1f,
+ 0x6a, 0x22, 0x25, 0x55, 0xc7, 0xa5, 0x43, 0xe3, 0xa9, 0xf4, 0x26, 0x8f, 0xef, 0x06, 0x13, 0xf0,
+ 0x84, 0xec, 0x71, 0x18, 0xdf, 0x02, 0xa4, 0x79, 0x63, 0xe2, 0x3a, 0xbc, 0x26, 0xf0, 0x1c, 0xa2,
+ 0x51, 0xe9, 0x2d, 0x41, 0x15, 0x78, 0x27, 0x84, 0xd9, 0x92, 0xf0, 0x9e, 0x18, 0x43, 0x3f, 0xb4,
+ 0x78, 0x53, 0x2c, 0x09, 0x8e, 0x05, 0xd6, 0xf6, 0x00, 0xb1, 0x50, 0x24, 0x3a, 0xde, 0xe3, 0xb4,
+ 0x8a, 0x33, 0x76, 0xe2, 0xfd, 0xbe, 0x01, 0xeb, 0x8c, 0x39, 0xeb, 0xf4, 0x96, 0xa8, 0x67, 0x9c,
+ 0x71, 0xac, 0xc7, 0x1f, 0xad, 0xb4, 0xac, 0x1e, 0x42, 0x39, 0x9e, 0x9f, 0xb8, 0x08, 0x22, 0x43,
+ 0x51, 0x8a, 0x9d, 0xf5, 0x8d, 0x6e, 0x93, 0x9d, 0xd2, 0x5f, 0xc8, 0x28, 0xcd, 0xaa, 0x85, 0x76,
+ 0x6b, 0x20, 0xab, 0xca, 0x59, 0x67, 0xd0, 0x3a, 0x95, 0x51, 0x26, 0x56, 0x96, 0x9e, 0x64, 0x0b,
+ 0x6f, 0xa3, 0x9b, 0xd5, 0xef, 0xd2, 0x50, 0x49, 0xde, 0x33, 0xf0, 0xcf, 0xe1, 0x6a, 0xf8, 0x28,
+ 0xe0, 0x51, 0x5f, 0x7d, 0x62, 0xb8, 0x7c, 0xe1, 0x4c, 0x88, 0xa8, 0xb3, 0xa3, 0xa9, 0xdb, 0x0e,
+ 0x58, 0x7d, 0xea, 0x7f, 0x6a, 0xb8, 0x6c, 0x59, 0x4c, 0x88, 0x8f, 0xdb, 0x70, 0xdd, 0xb2, 0x55,
+ 0xcf, 0x27, 0x96, 0x4e, 0x5c, 0x5d, 0x9d, 0x3d, 0xc7, 0xa8, 0x44, 0xd3, 0xa8, 0xe7, 0xd9, 0xe2,
+ 0xc0, 0x8a, 0xac, 0xbc, 0x6a, 0xd9, 0xfd, 0x80, 0x3c, 0xdb, 0xc9, 0xeb, 0x01, 0x75, 0x2e, 0xcd,
+ 0x32, 0xab, 0xd2, 0xec, 0x15, 0x28, 0x4e, 0x88, 0xa3, 0x52, 0xcb, 0x77, 0x2f, 0x78, 0x75, 0x59,
+ 0x50, 0x0a, 0x13, 0xe2, 0xc8, 0xac, 0xfd, 0x42, 0x8a, 0xfc, 0x93, 0x6c, 0xa1, 0x80, 0x8a, 0x27,
+ 0xd9, 0x42, 0x11, 0x41, 0xf5, 0x5f, 0x19, 0x28, 0xc7, 0xab, 0x4d, 0x56, 0xbc, 0x6b, 0xfc, 0x64,
+ 0x49, 0xf1, 0xbd, 0xe7, 0x8d, 0xef, 0xad, 0x4d, 0x6b, 0x0d, 0x76, 0xe4, 0x1c, 0xe6, 0x45, 0x0d,
+ 0xa8, 0x08, 0x4d, 0x76, 0xdc, 0xb3, 0xdd, 0x86, 0x8a, 0x7b, 0x4d, 0x41, 0x09, 0x5a, 0xf8, 0x18,
+ 0xf2, 0x0f, 0x3d, 0x6e, 0x3b, 0xcf, 0x6d, 0xbf, 0xf9, 0xfd, 0xb6, 0x4f, 0xfa, 0xdc, 0x78, 0xf1,
+ 0xa4, 0xaf, 0x76, 0xba, 0xca, 0x69, 0xbd, 0xad, 0x04, 0xea, 0xf8, 0x1a, 0x64, 0x4d, 0xf2, 0xd5,
+ 0x45, 0xf2, 0x70, 0xe2, 0xd0, 0x65, 0x27, 0xe1, 0x1a, 0x64, 0x9f, 0x50, 0xf2, 0x28, 0x79, 0x24,
+ 0x70, 0xe8, 0x47, 0x5c, 0x0c, 0xfb, 0x90, 0xe3, 0xf1, 0xc2, 0x00, 0x41, 0xc4, 0xd0, 0x4b, 0xb8,
+ 0x00, 0xd9, 0x46, 0x57, 0x61, 0x0b, 0x02, 0x41, 0x59, 0xa0, 0x6a, 0xaf, 0x25, 0x37, 0x64, 0x94,
+ 0xae, 0xde, 0x85, 0xbc, 0x08, 0x02, 0x5b, 0x2c, 0x51, 0x18, 0xd0, 0x4b, 0x41, 0x33, 0xb0, 0x91,
+ 0x0a, 0xa5, 0x67, 0xa7, 0x47, 0xb2, 0x82, 0xd2, 0xc9, 0xa9, 0xce, 0xa2, 0x5c, 0xd5, 0x83, 0x72,
+ 0xbc, 0xdc, 0x7c, 0x31, 0x57, 0xc9, 0xbf, 0xa7, 0xa0, 0x14, 0x2b, 0x1f, 0x59, 0xe1, 0x42, 0x4c,
+ 0xd3, 0x7e, 0xa2, 0x12, 0xd3, 0x20, 0x5e, 0x90, 0x1a, 0xc0, 0xa1, 0x3a, 0x43, 0x2e, 0x3b, 0x75,
+ 0x2f, 0x68, 0x89, 0xe4, 0x50, 0xbe, 0xfa, 0x97, 0x14, 0xa0, 0xf9, 0x02, 0x74, 0xce, 0xcd, 0xd4,
+ 0x4f, 0xe9, 0x66, 0xf5, 0xcf, 0x29, 0xa8, 0x24, 0xab, 0xce, 0x39, 0xf7, 0x6e, 0xfc, 0xa4, 0xee,
+ 0xfd, 0x33, 0x0d, 0xeb, 0x89, 0x5a, 0xf3, 0xb2, 0xde, 0x7d, 0x09, 0x9b, 0x86, 0x4e, 0x27, 0x8e,
+ 0xed, 0x53, 0x4b, 0xbb, 0x50, 0x4d, 0xfa, 0x98, 0x9a, 0x52, 0x95, 0x6f, 0x1a, 0xfb, 0xdf, 0x5f,
+ 0xcd, 0xd6, 0x5a, 0x33, 0xbd, 0x36, 0x53, 0x3b, 0xdc, 0x6a, 0x35, 0xe5, 0xd3, 0x5e, 0x77, 0x20,
+ 0x77, 0x1a, 0x9f, 0xab, 0x67, 0x9d, 0x5f, 0x75, 0xba, 0x9f, 0x76, 0x14, 0x64, 0xcc, 0xd1, 0x7e,
+ 0xc4, 0x65, 0xdf, 0x03, 0x34, 0xef, 0x14, 0xbe, 0x0a, 0xcb, 0xdc, 0x42, 0x2f, 0xe1, 0x2d, 0xd8,
+ 0xe8, 0x74, 0xd5, 0x7e, 0xab, 0x29, 0xab, 0xf2, 0x83, 0x07, 0x72, 0x63, 0xd0, 0x17, 0xd7, 0xfb,
+ 0x88, 0x3d, 0x48, 0x2c, 0xf0, 0xea, 0x9f, 0x32, 0xb0, 0xb5, 0xc4, 0x13, 0x5c, 0x0f, 0x6e, 0x16,
+ 0xe2, 0xb2, 0xf3, 0xee, 0x65, 0xbc, 0xaf, 0xb1, 0x82, 0xa0, 0x47, 0x5c, 0x3f, 0xb8, 0x88, 0xdc,
+ 0x02, 0x16, 0x25, 0xcb, 0x37, 0x86, 0x06, 0x75, 0x83, 0xd7, 0x10, 0x71, 0xdd, 0xd8, 0x98, 0xe1,
+ 0xe2, 0x41, 0xe4, 0x67, 0x80, 0x1d, 0xdb, 0x33, 0x7c, 0xe3, 0x31, 0x55, 0x0d, 0x2b, 0x7c, 0x3a,
+ 0x61, 0xd7, 0x8f, 0xac, 0x82, 0x42, 0x49, 0xcb, 0xf2, 0x23, 0xb6, 0x45, 0x47, 0x64, 0x8e, 0xcd,
+ 0x36, 0xf3, 0x8c, 0x82, 0x42, 0x49, 0xc4, 0xbe, 0x01, 0x65, 0xdd, 0x9e, 0xb2, 0x9a, 0x4c, 0xf0,
+ 0xd8, 0xd9, 0x91, 0x52, 0x4a, 0x02, 0x8b, 0x28, 0x41, 0xb5, 0x3d, 0x7b, 0xb3, 0x29, 0x2b, 0x25,
+ 0x81, 0x09, 0xca, 0x4d, 0xd8, 0x20, 0xa3, 0x91, 0xcb, 0x8c, 0x87, 0x86, 0xc4, 0xfd, 0xa1, 0x12,
+ 0xc1, 0x9c, 0xb8, 0x73, 0x02, 0x85, 0x30, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0x50, 0x1d, 0xf1, 0x6e,
+ 0x97, 0xde, 0x2b, 0x2a, 0x05, 0x2b, 0x14, 0xde, 0x80, 0xb2, 0xe1, 0xa9, 0xb3, 0x27, 0xe8, 0xf4,
+ 0x6e, 0x7a, 0xaf, 0xa0, 0x94, 0x0c, 0x2f, 0x7a, 0xbe, 0xab, 0x7e, 0x93, 0x86, 0x4a, 0xf2, 0x09,
+ 0x1d, 0x37, 0xa1, 0x60, 0xda, 0x1a, 0xe1, 0xa9, 0x25, 0xbe, 0xdf, 0xec, 0x3d, 0xe7, 0xd5, 0xbd,
+ 0xd6, 0x0e, 0xf8, 0x4a, 0xa4, 0xb9, 0xf3, 0x8f, 0x14, 0x14, 0x42, 0x18, 0x5f, 0x81, 0xac, 0x43,
+ 0xfc, 0x31, 0x37, 0x97, 0x3b, 0x4a, 0xa3, 0x94, 0xc2, 0xdb, 0x0c, 0xf7, 0x1c, 0x62, 0xf1, 0x14,
+ 0x08, 0x70, 0xd6, 0x66, 0xf3, 0x6a, 0x52, 0xa2, 0xf3, 0xcb, 0x89, 0x3d, 0x99, 0x50, 0xcb, 0xf7,
+ 0xc2, 0x79, 0x0d, 0xf0, 0x46, 0x00, 0xe3, 0x77, 0x60, 0xd3, 0x77, 0x89, 0x61, 0x26, 0xb8, 0x59,
+ 0xce, 0x45, 0xa1, 0x20, 0x22, 0x1f, 0xc2, 0xb5, 0xd0, 0xae, 0x4e, 0x7d, 0xa2, 0x8d, 0xa9, 0x3e,
+ 0x53, 0xca, 0xf3, 0xf7, 0xd9, 0xab, 0x01, 0xa1, 0x19, 0xc8, 0x43, 0xdd, 0xea, 0x77, 0x29, 0xd8,
+ 0x0c, 0xaf, 0x53, 0x7a, 0x14, 0xac, 0x53, 0x00, 0x62, 0x59, 0xb6, 0x1f, 0x0f, 0xd7, 0x62, 0x2a,
+ 0x2f, 0xe8, 0xd5, 0xea, 0x91, 0x92, 0x12, 0x33, 0xb0, 0x33, 0x01, 0x98, 0x49, 0x56, 0x86, 0xed,
+ 0x3a, 0x94, 0x82, 0xef, 0x23, 0xfc, 0x23, 0x9b, 0xb8, 0x80, 0x83, 0x80, 0xd8, 0xbd, 0x0b, 0x6f,
+ 0x43, 0xee, 0x9c, 0x8e, 0x0c, 0x2b, 0x78, 0xf5, 0x14, 0x8d, 0xf0, 0x25, 0x37, 0x1b, 0xbd, 0xe4,
+ 0x1e, 0xfd, 0x21, 0x05, 0x5b, 0x9a, 0x3d, 0x99, 0xf7, 0xf7, 0x08, 0xcd, 0xbd, 0x02, 0x78, 0x9f,
+ 0xa4, 0xbe, 0xf8, 0x78, 0x64, 0xf8, 0xe3, 0xe9, 0x79, 0x4d, 0xb3, 0x27, 0xfb, 0x23, 0xdb, 0x24,
+ 0xd6, 0x68, 0xf6, 0x95, 0x90, 0xff, 0xd1, 0xde, 0x1d, 0x51, 0xeb, 0xdd, 0x91, 0x1d, 0xfb, 0x66,
+ 0xf8, 0xd1, 0xec, 0xef, 0xd7, 0xe9, 0xcc, 0x71, 0xef, 0xe8, 0xaf, 0xe9, 0x9d, 0x63, 0xd1, 0x57,
+ 0x2f, 0x8c, 0x8d, 0x42, 0x87, 0x26, 0xd5, 0xd8, 0x78, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0x0c,
+ 0xab, 0xb6, 0x37, 0x7e, 0x1c, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
new file mode 100644
index 0000000..4d4fb37
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
@@ -0,0 +1,849 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+// Based on original Protocol Buffers design by
+// Sanjay Ghemawat, Jeff Dean, and others.
+//
+// The messages in this file describe the definitions found in .proto files.
+// A valid .proto file can be translated directly to a FileDescriptorProto
+// without any other information (e.g. without reading its imports).
+
+
+syntax = "proto2";
+
+package google.protobuf;
+option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "DescriptorProtos";
+option csharp_namespace = "Google.Protobuf.Reflection";
+option objc_class_prefix = "GPB";
+
+// descriptor.proto must be optimized for speed because reflection-based
+// algorithms don't work during bootstrapping.
+option optimize_for = SPEED;
+
+// The protocol compiler can output a FileDescriptorSet containing the .proto
+// files it parses.
+message FileDescriptorSet {
+ repeated FileDescriptorProto file = 1;
+}
+
+// Describes a complete .proto file.
+message FileDescriptorProto {
+ optional string name = 1; // file name, relative to root of source tree
+ optional string package = 2; // e.g. "foo", "foo.bar", etc.
+
+ // Names of files imported by this file.
+ repeated string dependency = 3;
+ // Indexes of the public imported files in the dependency list above.
+ repeated int32 public_dependency = 10;
+ // Indexes of the weak imported files in the dependency list.
+ // For Google-internal migration only. Do not use.
+ repeated int32 weak_dependency = 11;
+
+ // All top-level definitions in this file.
+ repeated DescriptorProto message_type = 4;
+ repeated EnumDescriptorProto enum_type = 5;
+ repeated ServiceDescriptorProto service = 6;
+ repeated FieldDescriptorProto extension = 7;
+
+ optional FileOptions options = 8;
+
+ // This field contains optional information about the original source code.
+ // You may safely remove this entire field without harming runtime
+ // functionality of the descriptors -- the information is needed only by
+ // development tools.
+ optional SourceCodeInfo source_code_info = 9;
+
+ // The syntax of the proto file.
+ // The supported values are "proto2" and "proto3".
+ optional string syntax = 12;
+}
+
+// Describes a message type.
+message DescriptorProto {
+ optional string name = 1;
+
+ repeated FieldDescriptorProto field = 2;
+ repeated FieldDescriptorProto extension = 6;
+
+ repeated DescriptorProto nested_type = 3;
+ repeated EnumDescriptorProto enum_type = 4;
+
+ message ExtensionRange {
+ optional int32 start = 1;
+ optional int32 end = 2;
+
+ optional ExtensionRangeOptions options = 3;
+ }
+ repeated ExtensionRange extension_range = 5;
+
+ repeated OneofDescriptorProto oneof_decl = 8;
+
+ optional MessageOptions options = 7;
+
+ // Range of reserved tag numbers. Reserved tag numbers may not be used by
+ // fields or extension ranges in the same message. Reserved ranges may
+ // not overlap.
+ message ReservedRange {
+ optional int32 start = 1; // Inclusive.
+ optional int32 end = 2; // Exclusive.
+ }
+ repeated ReservedRange reserved_range = 9;
+ // Reserved field names, which may not be used by fields in the same message.
+ // A given name may only be reserved once.
+ repeated string reserved_name = 10;
+}
+
+message ExtensionRangeOptions {
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+// Describes a field within a message.
+message FieldDescriptorProto {
+ enum Type {
+ // 0 is reserved for errors.
+ // Order is weird for historical reasons.
+ TYPE_DOUBLE = 1;
+ TYPE_FLOAT = 2;
+ // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
+ // negative values are likely.
+ TYPE_INT64 = 3;
+ TYPE_UINT64 = 4;
+ // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
+ // negative values are likely.
+ TYPE_INT32 = 5;
+ TYPE_FIXED64 = 6;
+ TYPE_FIXED32 = 7;
+ TYPE_BOOL = 8;
+ TYPE_STRING = 9;
+ // Tag-delimited aggregate.
+ // Group type is deprecated and not supported in proto3. However, Proto3
+ // implementations should still be able to parse the group wire format and
+ // treat group fields as unknown fields.
+ TYPE_GROUP = 10;
+ TYPE_MESSAGE = 11; // Length-delimited aggregate.
+
+ // New in version 2.
+ TYPE_BYTES = 12;
+ TYPE_UINT32 = 13;
+ TYPE_ENUM = 14;
+ TYPE_SFIXED32 = 15;
+ TYPE_SFIXED64 = 16;
+ TYPE_SINT32 = 17; // Uses ZigZag encoding.
+ TYPE_SINT64 = 18; // Uses ZigZag encoding.
+ };
+
+ enum Label {
+ // 0 is reserved for errors
+ LABEL_OPTIONAL = 1;
+ LABEL_REQUIRED = 2;
+ LABEL_REPEATED = 3;
+ };
+
+ optional string name = 1;
+ optional int32 number = 3;
+ optional Label label = 4;
+
+ // If type_name is set, this need not be set. If both this and type_name
+ // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
+ optional Type type = 5;
+
+ // For message and enum types, this is the name of the type. If the name
+ // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping
+ // rules are used to find the type (i.e. first the nested types within this
+ // message are searched, then within the parent, on up to the root
+ // namespace).
+ optional string type_name = 6;
+
+ // For extensions, this is the name of the type being extended. It is
+ // resolved in the same manner as type_name.
+ optional string extendee = 2;
+
+ // For numeric types, contains the original text representation of the value.
+ // For booleans, "true" or "false".
+ // For strings, contains the default text contents (not escaped in any way).
+ // For bytes, contains the C escaped value. All bytes >= 128 are escaped.
+ // TODO(kenton): Base-64 encode?
+ optional string default_value = 7;
+
+ // If set, gives the index of a oneof in the containing type's oneof_decl
+ // list. This field is a member of that oneof.
+ optional int32 oneof_index = 9;
+
+ // JSON name of this field. The value is set by protocol compiler. If the
+ // user has set a "json_name" option on this field, that option's value
+ // will be used. Otherwise, it's deduced from the field's name by converting
+ // it to camelCase.
+ optional string json_name = 10;
+
+ optional FieldOptions options = 8;
+}
+
+// Describes a oneof.
+message OneofDescriptorProto {
+ optional string name = 1;
+ optional OneofOptions options = 2;
+}
+
+// Describes an enum type.
+message EnumDescriptorProto {
+ optional string name = 1;
+
+ repeated EnumValueDescriptorProto value = 2;
+
+ optional EnumOptions options = 3;
+}
+
+// Describes a value within an enum.
+message EnumValueDescriptorProto {
+ optional string name = 1;
+ optional int32 number = 2;
+
+ optional EnumValueOptions options = 3;
+}
+
+// Describes a service.
+message ServiceDescriptorProto {
+ optional string name = 1;
+ repeated MethodDescriptorProto method = 2;
+
+ optional ServiceOptions options = 3;
+}
+
+// Describes a method of a service.
+message MethodDescriptorProto {
+ optional string name = 1;
+
+ // Input and output type names. These are resolved in the same way as
+ // FieldDescriptorProto.type_name, but must refer to a message type.
+ optional string input_type = 2;
+ optional string output_type = 3;
+
+ optional MethodOptions options = 4;
+
+ // Identifies if client streams multiple client messages
+ optional bool client_streaming = 5 [default=false];
+ // Identifies if server streams multiple server messages
+ optional bool server_streaming = 6 [default=false];
+}
+
+
+// ===================================================================
+// Options
+
+// Each of the definitions above may have "options" attached. These are
+// just annotations which may cause code to be generated slightly differently
+// or may contain hints for code that manipulates protocol messages.
+//
+// Clients may define custom options as extensions of the *Options messages.
+// These extensions may not yet be known at parsing time, so the parser cannot
+// store the values in them. Instead it stores them in a field in the *Options
+// message called uninterpreted_option. This field must have the same name
+// across all *Options messages. We then use this field to populate the
+// extensions when we build a descriptor, at which point all protos have been
+// parsed and so all extensions are known.
+//
+// Extension numbers for custom options may be chosen as follows:
+// * For options which will only be used within a single application or
+// organization, or for experimental options, use field numbers 50000
+// through 99999. It is up to you to ensure that you do not use the
+// same number for multiple options.
+// * For options which will be published and used publicly by multiple
+// independent entities, e-mail protobuf-global-extension-registry@google.com
+// to reserve extension numbers. Simply provide your project name (e.g.
+// Objective-C plugin) and your project website (if available) -- there's no
+// need to explain how you intend to use them. Usually you only need one
+// extension number. You can declare multiple options with only one extension
+// number by putting them in a sub-message. See the Custom Options section of
+// the docs for examples:
+// https://developers.google.com/protocol-buffers/docs/proto#options
+// If this turns out to be popular, a web service will be set up
+// to automatically assign option numbers.
+
+
+message FileOptions {
+
+ // Sets the Java package where classes generated from this .proto will be
+ // placed. By default, the proto package is used, but this is often
+ // inappropriate because proto packages do not normally start with backwards
+ // domain names.
+ optional string java_package = 1;
+
+
+ // If set, all the classes from the .proto file are wrapped in a single
+ // outer class with the given name. This applies to both Proto1
+ // (equivalent to the old "--one_java_file" option) and Proto2 (where
+ // a .proto always translates to a single class, but you may want to
+ // explicitly choose the class name).
+ optional string java_outer_classname = 8;
+
+ // If set true, then the Java code generator will generate a separate .java
+ // file for each top-level message, enum, and service defined in the .proto
+ // file. Thus, these types will *not* be nested inside the outer class
+ // named by java_outer_classname. However, the outer class will still be
+ // generated to contain the file's getDescriptor() method as well as any
+ // top-level extensions defined in the file.
+ optional bool java_multiple_files = 10 [default=false];
+
+ // This option does nothing.
+ optional bool java_generate_equals_and_hash = 20 [deprecated=true];
+
+ // If set true, then the Java2 code generator will generate code that
+ // throws an exception whenever an attempt is made to assign a non-UTF-8
+ // byte sequence to a string field.
+ // Message reflection will do the same.
+ // However, an extension field still accepts non-UTF-8 byte sequences.
+ // This option has no effect on when used with the lite runtime.
+ optional bool java_string_check_utf8 = 27 [default=false];
+
+
+ // Generated classes can be optimized for speed or code size.
+ enum OptimizeMode {
+ SPEED = 1; // Generate complete code for parsing, serialization,
+ // etc.
+ CODE_SIZE = 2; // Use ReflectionOps to implement these methods.
+ LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
+ }
+ optional OptimizeMode optimize_for = 9 [default=SPEED];
+
+ // Sets the Go package where structs generated from this .proto will be
+ // placed. If omitted, the Go package will be derived from the following:
+ // - The basename of the package import path, if provided.
+ // - Otherwise, the package statement in the .proto file, if present.
+ // - Otherwise, the basename of the .proto file, without extension.
+ optional string go_package = 11;
+
+
+
+ // Should generic services be generated in each language? "Generic" services
+ // are not specific to any particular RPC system. They are generated by the
+ // main code generators in each language (without additional plugins).
+ // Generic services were the only kind of service generation supported by
+ // early versions of google.protobuf.
+ //
+ // Generic services are now considered deprecated in favor of using plugins
+ // that generate code specific to your particular RPC system. Therefore,
+ // these default to false. Old code which depends on generic services should
+ // explicitly set them to true.
+ optional bool cc_generic_services = 16 [default=false];
+ optional bool java_generic_services = 17 [default=false];
+ optional bool py_generic_services = 18 [default=false];
+ optional bool php_generic_services = 42 [default=false];
+
+ // Is this file deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for everything in the file, or it will be completely ignored; in the very
+ // least, this is a formalization for deprecating files.
+ optional bool deprecated = 23 [default=false];
+
+ // Enables the use of arenas for the proto messages in this file. This applies
+ // only to generated classes for C++.
+ optional bool cc_enable_arenas = 31 [default=false];
+
+
+ // Sets the objective c class prefix which is prepended to all objective c
+ // generated classes from this .proto. There is no default.
+ optional string objc_class_prefix = 36;
+
+ // Namespace for generated classes; defaults to the package.
+ optional string csharp_namespace = 37;
+
+ // By default Swift generators will take the proto package and CamelCase it
+ // replacing '.' with underscore and use that to prefix the types/symbols
+ // defined. When this options is provided, they will use this value instead
+ // to prefix the types/symbols defined.
+ optional string swift_prefix = 39;
+
+ // Sets the php class prefix which is prepended to all php generated classes
+ // from this .proto. Default is empty.
+ optional string php_class_prefix = 40;
+
+ // Use this option to change the namespace of php generated classes. Default
+ // is empty. When this option is empty, the package name will be used for
+ // determining the namespace.
+ optional string php_namespace = 41;
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+
+ reserved 38;
+}
+
+message MessageOptions {
+ // Set true to use the old proto1 MessageSet wire format for extensions.
+ // This is provided for backwards-compatibility with the MessageSet wire
+ // format. You should not use this for any other reason: It's less
+ // efficient, has fewer features, and is more complicated.
+ //
+ // The message must be defined exactly as follows:
+ // message Foo {
+ // option message_set_wire_format = true;
+ // extensions 4 to max;
+ // }
+ // Note that the message cannot have any defined fields; MessageSets only
+ // have extensions.
+ //
+ // All extensions of your type must be singular messages; e.g. they cannot
+ // be int32s, enums, or repeated messages.
+ //
+ // Because this is an option, the above two restrictions are not enforced by
+ // the protocol compiler.
+ optional bool message_set_wire_format = 1 [default=false];
+
+ // Disables the generation of the standard "descriptor()" accessor, which can
+ // conflict with a field of the same name. This is meant to make migration
+ // from proto1 easier; new code should avoid fields named "descriptor".
+ optional bool no_standard_descriptor_accessor = 2 [default=false];
+
+ // Is this message deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the message, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating messages.
+ optional bool deprecated = 3 [default=false];
+
+ // Whether the message is an automatically generated map entry type for the
+ // maps field.
+ //
+ // For maps fields:
+ // map<KeyType, ValueType> map_field = 1;
+ // The parsed descriptor looks like:
+ // message MapFieldEntry {
+ // option map_entry = true;
+ // optional KeyType key = 1;
+ // optional ValueType value = 2;
+ // }
+ // repeated MapFieldEntry map_field = 1;
+ //
+ // Implementations may choose not to generate the map_entry=true message, but
+ // use a native map in the target language to hold the keys and values.
+ // The reflection APIs in such implementions still need to work as
+ // if the field is a repeated message field.
+ //
+ // NOTE: Do not set the option in .proto files. Always use the maps syntax
+ // instead. The option should only be implicitly set by the proto compiler
+ // parser.
+ optional bool map_entry = 7;
+
+ reserved 8; // javalite_serializable
+ reserved 9; // javanano_as_lite
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+message FieldOptions {
+ // The ctype option instructs the C++ code generator to use a different
+ // representation of the field than it normally would. See the specific
+ // options below. This option is not yet implemented in the open source
+ // release -- sorry, we'll try to include it in a future version!
+ optional CType ctype = 1 [default = STRING];
+ enum CType {
+ // Default mode.
+ STRING = 0;
+
+ CORD = 1;
+
+ STRING_PIECE = 2;
+ }
+ // The packed option can be enabled for repeated primitive fields to enable
+ // a more efficient representation on the wire. Rather than repeatedly
+ // writing the tag and type for each element, the entire array is encoded as
+ // a single length-delimited blob. In proto3, only explicit setting it to
+ // false will avoid using packed encoding.
+ optional bool packed = 2;
+
+ // The jstype option determines the JavaScript type used for values of the
+ // field. The option is permitted only for 64 bit integral and fixed types
+ // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING
+ // is represented as JavaScript string, which avoids loss of precision that
+ // can happen when a large value is converted to a floating point JavaScript.
+ // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
+ // use the JavaScript "number" type. The behavior of the default option
+ // JS_NORMAL is implementation dependent.
+ //
+ // This option is an enum to permit additional types to be added, e.g.
+ // goog.math.Integer.
+ optional JSType jstype = 6 [default = JS_NORMAL];
+ enum JSType {
+ // Use the default type.
+ JS_NORMAL = 0;
+
+ // Use JavaScript strings.
+ JS_STRING = 1;
+
+ // Use JavaScript numbers.
+ JS_NUMBER = 2;
+ }
+
+ // Should this field be parsed lazily? Lazy applies only to message-type
+ // fields. It means that when the outer message is initially parsed, the
+ // inner message's contents will not be parsed but instead stored in encoded
+ // form. The inner message will actually be parsed when it is first accessed.
+ //
+ // This is only a hint. Implementations are free to choose whether to use
+ // eager or lazy parsing regardless of the value of this option. However,
+ // setting this option true suggests that the protocol author believes that
+ // using lazy parsing on this field is worth the additional bookkeeping
+ // overhead typically needed to implement it.
+ //
+ // This option does not affect the public interface of any generated code;
+ // all method signatures remain the same. Furthermore, thread-safety of the
+ // interface is not affected by this option; const methods remain safe to
+ // call from multiple threads concurrently, while non-const methods continue
+ // to require exclusive access.
+ //
+ //
+ // Note that implementations may choose not to check required fields within
+ // a lazy sub-message. That is, calling IsInitialized() on the outer message
+ // may return true even if the inner message has missing required fields.
+ // This is necessary because otherwise the inner message would have to be
+ // parsed in order to perform the check, defeating the purpose of lazy
+ // parsing. An implementation which chooses not to check required fields
+ // must be consistent about it. That is, for any particular sub-message, the
+ // implementation must either *always* check its required fields, or *never*
+ // check its required fields, regardless of whether or not the message has
+ // been parsed.
+ optional bool lazy = 5 [default=false];
+
+ // Is this field deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for accessors, or it will be completely ignored; in the very least, this
+ // is a formalization for deprecating fields.
+ optional bool deprecated = 3 [default=false];
+
+ // For Google-internal migration only. Do not use.
+ optional bool weak = 10 [default=false];
+
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+
+ reserved 4; // removed jtype
+}
+
+message OneofOptions {
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+message EnumOptions {
+
+ // Set this option to true to allow mapping different tag names to the same
+ // value.
+ optional bool allow_alias = 2;
+
+ // Is this enum deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the enum, or it will be completely ignored; in the very least, this
+ // is a formalization for deprecating enums.
+ optional bool deprecated = 3 [default=false];
+
+ reserved 5; // javanano_as_lite
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+message EnumValueOptions {
+ // Is this enum value deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the enum value, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating enum values.
+ optional bool deprecated = 1 [default=false];
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+message ServiceOptions {
+
+ // Note: Field numbers 1 through 32 are reserved for Google's internal RPC
+ // framework. We apologize for hoarding these numbers to ourselves, but
+ // we were already using them long before we decided to release Protocol
+ // Buffers.
+
+ // Is this service deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the service, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating services.
+ optional bool deprecated = 33 [default=false];
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+message MethodOptions {
+
+ // Note: Field numbers 1 through 32 are reserved for Google's internal RPC
+ // framework. We apologize for hoarding these numbers to ourselves, but
+ // we were already using them long before we decided to release Protocol
+ // Buffers.
+
+ // Is this method deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the method, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating methods.
+ optional bool deprecated = 33 [default=false];
+
+ // Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
+ // or neither? HTTP based RPC implementation may choose GET verb for safe
+ // methods, and PUT verb for idempotent methods instead of the default POST.
+ enum IdempotencyLevel {
+ IDEMPOTENCY_UNKNOWN = 0;
+ NO_SIDE_EFFECTS = 1; // implies idempotent
+ IDEMPOTENT = 2; // idempotent, but may have side effects
+ }
+ optional IdempotencyLevel idempotency_level =
+ 34 [default=IDEMPOTENCY_UNKNOWN];
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+
+// A message representing a option the parser does not recognize. This only
+// appears in options protos created by the compiler::Parser class.
+// DescriptorPool resolves these when building Descriptor objects. Therefore,
+// options protos in descriptor objects (e.g. returned by Descriptor::options(),
+// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
+// in them.
+message UninterpretedOption {
+ // The name of the uninterpreted option. Each string represents a segment in
+ // a dot-separated name. is_extension is true iff a segment represents an
+ // extension (denoted with parentheses in options specs in .proto files).
+ // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
+ // "foo.(bar.baz).qux".
+ message NamePart {
+ required string name_part = 1;
+ required bool is_extension = 2;
+ }
+ repeated NamePart name = 2;
+
+ // The value of the uninterpreted option, in whatever type the tokenizer
+ // identified it as during parsing. Exactly one of these should be set.
+ optional string identifier_value = 3;
+ optional uint64 positive_int_value = 4;
+ optional int64 negative_int_value = 5;
+ optional double double_value = 6;
+ optional bytes string_value = 7;
+ optional string aggregate_value = 8;
+}
+
+// ===================================================================
+// Optional source code info
+
+// Encapsulates information about the original source file from which a
+// FileDescriptorProto was generated.
+message SourceCodeInfo {
+ // A Location identifies a piece of source code in a .proto file which
+ // corresponds to a particular definition. This information is intended
+ // to be useful to IDEs, code indexers, documentation generators, and similar
+ // tools.
+ //
+ // For example, say we have a file like:
+ // message Foo {
+ // optional string foo = 1;
+ // }
+ // Let's look at just the field definition:
+ // optional string foo = 1;
+ // ^ ^^ ^^ ^ ^^^
+ // a bc de f ghi
+ // We have the following locations:
+ // span path represents
+ // [a,i) [ 4, 0, 2, 0 ] The whole field definition.
+ // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
+ // [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
+ // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
+ // [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
+ //
+ // Notes:
+ // - A location may refer to a repeated field itself (i.e. not to any
+ // particular index within it). This is used whenever a set of elements are
+ // logically enclosed in a single code segment. For example, an entire
+ // extend block (possibly containing multiple extension definitions) will
+ // have an outer location whose path refers to the "extensions" repeated
+ // field without an index.
+ // - Multiple locations may have the same path. This happens when a single
+ // logical declaration is spread out across multiple places. The most
+ // obvious example is the "extend" block again -- there may be multiple
+ // extend blocks in the same scope, each of which will have the same path.
+ // - A location's span is not always a subset of its parent's span. For
+ // example, the "extendee" of an extension declaration appears at the
+ // beginning of the "extend" block and is shared by all extensions within
+ // the block.
+ // - Just because a location's span is a subset of some other location's span
+ // does not mean that it is a descendent. For example, a "group" defines
+ // both a type and a field in a single declaration. Thus, the locations
+ // corresponding to the type and field and their components will overlap.
+ // - Code which tries to interpret locations should probably be designed to
+ // ignore those that it doesn't understand, as more types of locations could
+ // be recorded in the future.
+ repeated Location location = 1;
+ message Location {
+ // Identifies which part of the FileDescriptorProto was defined at this
+ // location.
+ //
+ // Each element is a field number or an index. They form a path from
+ // the root FileDescriptorProto to the place where the definition. For
+ // example, this path:
+ // [ 4, 3, 2, 7, 1 ]
+ // refers to:
+ // file.message_type(3) // 4, 3
+ // .field(7) // 2, 7
+ // .name() // 1
+ // This is because FileDescriptorProto.message_type has field number 4:
+ // repeated DescriptorProto message_type = 4;
+ // and DescriptorProto.field has field number 2:
+ // repeated FieldDescriptorProto field = 2;
+ // and FieldDescriptorProto.name has field number 1:
+ // optional string name = 1;
+ //
+ // Thus, the above path gives the location of a field name. If we removed
+ // the last element:
+ // [ 4, 3, 2, 7 ]
+ // this path refers to the whole field declaration (from the beginning
+ // of the label to the terminating semicolon).
+ repeated int32 path = 1 [packed=true];
+
+ // Always has exactly three or four elements: start line, start column,
+ // end line (optional, otherwise assumed same as start line), end column.
+ // These are packed into a single field for efficiency. Note that line
+ // and column numbers are zero-based -- typically you will want to add
+ // 1 to each before displaying to a user.
+ repeated int32 span = 2 [packed=true];
+
+ // If this SourceCodeInfo represents a complete declaration, these are any
+ // comments appearing before and after the declaration which appear to be
+ // attached to the declaration.
+ //
+ // A series of line comments appearing on consecutive lines, with no other
+ // tokens appearing on those lines, will be treated as a single comment.
+ //
+ // leading_detached_comments will keep paragraphs of comments that appear
+ // before (but not connected to) the current element. Each paragraph,
+ // separated by empty lines, will be one comment element in the repeated
+ // field.
+ //
+ // Only the comment content is provided; comment markers (e.g. //) are
+ // stripped out. For block comments, leading whitespace and an asterisk
+ // will be stripped from the beginning of each line other than the first.
+ // Newlines are included in the output.
+ //
+ // Examples:
+ //
+ // optional int32 foo = 1; // Comment attached to foo.
+ // // Comment attached to bar.
+ // optional int32 bar = 2;
+ //
+ // optional string baz = 3;
+ // // Comment attached to baz.
+ // // Another line attached to baz.
+ //
+ // // Comment attached to qux.
+ // //
+ // // Another line attached to qux.
+ // optional double qux = 4;
+ //
+ // // Detached comment for corge. This is not leading or trailing comments
+ // // to qux or corge because there are blank lines separating it from
+ // // both.
+ //
+ // // Detached comment for corge paragraph 2.
+ //
+ // optional string corge = 5;
+ // /* Block comment attached
+ // * to corge. Leading asterisks
+ // * will be removed. */
+ // /* Block comment attached to
+ // * grault. */
+ // optional int32 grault = 6;
+ //
+ // // ignored detached comments.
+ optional string leading_comments = 3;
+ optional string trailing_comments = 4;
+ repeated string leading_detached_comments = 6;
+ }
+}
+
+// Describes the relationship between generated code and its original source
+// file. A GeneratedCodeInfo message is associated with only one generated
+// source file, but may contain references to different source .proto files.
+message GeneratedCodeInfo {
+ // An Annotation connects some span of text in generated code to an element
+ // of its generating .proto file.
+ repeated Annotation annotation = 1;
+ message Annotation {
+ // Identifies the element in the original source .proto file. This field
+ // is formatted the same as SourceCodeInfo.Location.path.
+ repeated int32 path = 1 [packed=true];
+
+ // Identifies the filesystem path to the original source .proto.
+ optional string source_file = 2;
+
+ // Identifies the starting offset in bytes in the generated code
+ // that relates to the identified object.
+ optional int32 begin = 3;
+
+ // Identifies the ending offset in bytes in the generated code that
+ // relates to the identified offset. The end offset should be one past
+ // the last relevant byte (so the length of the text = end - begin).
+ optional int32 end = 4;
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go
new file mode 100644
index 0000000..0d6055d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go
@@ -0,0 +1,51 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+ A plugin for the Google protocol buffer compiler to generate Go code.
+ Run it by building this program and putting it in your path with the name
+ protoc-gen-go
+ That word 'go' at the end becomes part of the option string set for the
+ protocol compiler, so once the protocol compiler (protoc) is installed
+ you can run
+ protoc --go_out=output_directory input_directory/file.proto
+ to generate Go bindings for the protocol defined by file.proto.
+ With that input, the output will be written to
+ output_directory/file.pb.go
+
+ The generated code is documented in the package comment for
+ the library.
+
+ See the README and documentation for protocol buffers to learn more:
+ https://developers.google.com/protocol-buffers/
+
+*/
+package documentation
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/Makefile
new file mode 100644
index 0000000..b5715c3
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/Makefile
@@ -0,0 +1,40 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(GOROOT)/src/Make.inc
+
+TARG=github.com/golang/protobuf/compiler/generator
+GOFILES=\
+ generator.go\
+
+DEPS=../descriptor ../plugin ../../proto
+
+include $(GOROOT)/src/Make.pkg
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go
new file mode 100644
index 0000000..60d5246
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go
@@ -0,0 +1,2866 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+ The code generator for the plugin for the Google protocol buffer compiler.
+ It generates Go code from the protocol buffer description files read by the
+ main routine.
+*/
+package generator
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "log"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ plugin "github.com/golang/protobuf/protoc-gen-go/plugin"
+)
+
+// generatedCodeVersion indicates a version of the generated code.
+// It is incremented whenever an incompatibility between the generated code and
+// proto package is introduced; the generated code references
+// a constant, proto.ProtoPackageIsVersionN (where N is generatedCodeVersion).
+const generatedCodeVersion = 2
+
+// A Plugin provides functionality to add to the output during Go code generation,
+// such as to produce RPC stubs.
+type Plugin interface {
+ // Name identifies the plugin.
+ Name() string
+ // Init is called once after data structures are built but before
+ // code generation begins.
+ Init(g *Generator)
+ // Generate produces the code generated by the plugin for this file,
+ // except for the imports, by calling the generator's methods P, In, and Out.
+ Generate(file *FileDescriptor)
+ // GenerateImports produces the import declarations for this file.
+ // It is called after Generate.
+ GenerateImports(file *FileDescriptor)
+}
+
+var plugins []Plugin
+
+// RegisterPlugin installs a (second-order) plugin to be run when the Go output is generated.
+// It is typically called during initialization.
+func RegisterPlugin(p Plugin) {
+ plugins = append(plugins, p)
+}
+
+// Each type we import as a protocol buffer (other than FileDescriptorProto) needs
+// a pointer to the FileDescriptorProto that represents it. These types achieve that
+// wrapping by placing each Proto inside a struct with the pointer to its File. The
+// structs have the same names as their contents, with "Proto" removed.
+// FileDescriptor is used to store the things that it points to.
+
+// The file and package name method are common to messages and enums.
+type common struct {
+ file *descriptor.FileDescriptorProto // File this object comes from.
+}
+
+// PackageName is name in the package clause in the generated file.
+func (c *common) PackageName() string { return uniquePackageOf(c.file) }
+
+func (c *common) File() *descriptor.FileDescriptorProto { return c.file }
+
+func fileIsProto3(file *descriptor.FileDescriptorProto) bool {
+ return file.GetSyntax() == "proto3"
+}
+
+func (c *common) proto3() bool { return fileIsProto3(c.file) }
+
+// Descriptor represents a protocol buffer message.
+type Descriptor struct {
+ common
+ *descriptor.DescriptorProto
+ parent *Descriptor // The containing message, if any.
+ nested []*Descriptor // Inner messages, if any.
+ enums []*EnumDescriptor // Inner enums, if any.
+ ext []*ExtensionDescriptor // Extensions, if any.
+ typename []string // Cached typename vector.
+ index int // The index into the container, whether the file or another message.
+ path string // The SourceCodeInfo path as comma-separated integers.
+ group bool
+}
+
+// TypeName returns the elements of the dotted type name.
+// The package name is not part of this name.
+func (d *Descriptor) TypeName() []string {
+ if d.typename != nil {
+ return d.typename
+ }
+ n := 0
+ for parent := d; parent != nil; parent = parent.parent {
+ n++
+ }
+ s := make([]string, n, n)
+ for parent := d; parent != nil; parent = parent.parent {
+ n--
+ s[n] = parent.GetName()
+ }
+ d.typename = s
+ return s
+}
+
+// EnumDescriptor describes an enum. If it's at top level, its parent will be nil.
+// Otherwise it will be the descriptor of the message in which it is defined.
+type EnumDescriptor struct {
+ common
+ *descriptor.EnumDescriptorProto
+ parent *Descriptor // The containing message, if any.
+ typename []string // Cached typename vector.
+ index int // The index into the container, whether the file or a message.
+ path string // The SourceCodeInfo path as comma-separated integers.
+}
+
+// TypeName returns the elements of the dotted type name.
+// The package name is not part of this name.
+func (e *EnumDescriptor) TypeName() (s []string) {
+ if e.typename != nil {
+ return e.typename
+ }
+ name := e.GetName()
+ if e.parent == nil {
+ s = make([]string, 1)
+ } else {
+ pname := e.parent.TypeName()
+ s = make([]string, len(pname)+1)
+ copy(s, pname)
+ }
+ s[len(s)-1] = name
+ e.typename = s
+ return s
+}
+
+// Everything but the last element of the full type name, CamelCased.
+// The values of type Foo.Bar are call Foo_value1... not Foo_Bar_value1... .
+func (e *EnumDescriptor) prefix() string {
+ if e.parent == nil {
+ // If the enum is not part of a message, the prefix is just the type name.
+ return CamelCase(*e.Name) + "_"
+ }
+ typeName := e.TypeName()
+ return CamelCaseSlice(typeName[0:len(typeName)-1]) + "_"
+}
+
+// The integer value of the named constant in this enumerated type.
+func (e *EnumDescriptor) integerValueAsString(name string) string {
+ for _, c := range e.Value {
+ if c.GetName() == name {
+ return fmt.Sprint(c.GetNumber())
+ }
+ }
+ log.Fatal("cannot find value for enum constant")
+ return ""
+}
+
+// ExtensionDescriptor describes an extension. If it's at top level, its parent will be nil.
+// Otherwise it will be the descriptor of the message in which it is defined.
+type ExtensionDescriptor struct {
+ common
+ *descriptor.FieldDescriptorProto
+ parent *Descriptor // The containing message, if any.
+}
+
+// TypeName returns the elements of the dotted type name.
+// The package name is not part of this name.
+func (e *ExtensionDescriptor) TypeName() (s []string) {
+ name := e.GetName()
+ if e.parent == nil {
+ // top-level extension
+ s = make([]string, 1)
+ } else {
+ pname := e.parent.TypeName()
+ s = make([]string, len(pname)+1)
+ copy(s, pname)
+ }
+ s[len(s)-1] = name
+ return s
+}
+
+// DescName returns the variable name used for the generated descriptor.
+func (e *ExtensionDescriptor) DescName() string {
+ // The full type name.
+ typeName := e.TypeName()
+ // Each scope of the extension is individually CamelCased, and all are joined with "_" with an "E_" prefix.
+ for i, s := range typeName {
+ typeName[i] = CamelCase(s)
+ }
+ return "E_" + strings.Join(typeName, "_")
+}
+
+// ImportedDescriptor describes a type that has been publicly imported from another file.
+type ImportedDescriptor struct {
+ common
+ o Object
+}
+
+func (id *ImportedDescriptor) TypeName() []string { return id.o.TypeName() }
+
+// FileDescriptor describes an protocol buffer descriptor file (.proto).
+// It includes slices of all the messages and enums defined within it.
+// Those slices are constructed by WrapTypes.
+type FileDescriptor struct {
+ *descriptor.FileDescriptorProto
+ desc []*Descriptor // All the messages defined in this file.
+ enum []*EnumDescriptor // All the enums defined in this file.
+ ext []*ExtensionDescriptor // All the top-level extensions defined in this file.
+ imp []*ImportedDescriptor // All types defined in files publicly imported by this file.
+
+ // Comments, stored as a map of path (comma-separated integers) to the comment.
+ comments map[string]*descriptor.SourceCodeInfo_Location
+
+ // The full list of symbols that are exported,
+ // as a map from the exported object to its symbols.
+ // This is used for supporting public imports.
+ exported map[Object][]symbol
+
+ index int // The index of this file in the list of files to generate code for
+
+ proto3 bool // whether to generate proto3 code for this file
+}
+
+// PackageName is the package name we'll use in the generated code to refer to this file.
+func (d *FileDescriptor) PackageName() string { return uniquePackageOf(d.FileDescriptorProto) }
+
+// VarName is the variable name we'll use in the generated code to refer
+// to the compressed bytes of this descriptor. It is not exported, so
+// it is only valid inside the generated package.
+func (d *FileDescriptor) VarName() string { return fmt.Sprintf("fileDescriptor%d", d.index) }
+
+// goPackageOption interprets the file's go_package option.
+// If there is no go_package, it returns ("", "", false).
+// If there's a simple name, it returns ("", pkg, true).
+// If the option implies an import path, it returns (impPath, pkg, true).
+func (d *FileDescriptor) goPackageOption() (impPath, pkg string, ok bool) {
+ pkg = d.GetOptions().GetGoPackage()
+ if pkg == "" {
+ return
+ }
+ ok = true
+ // The presence of a slash implies there's an import path.
+ slash := strings.LastIndex(pkg, "/")
+ if slash < 0 {
+ return
+ }
+ impPath, pkg = pkg, pkg[slash+1:]
+ // A semicolon-delimited suffix overrides the package name.
+ sc := strings.IndexByte(impPath, ';')
+ if sc < 0 {
+ return
+ }
+ impPath, pkg = impPath[:sc], impPath[sc+1:]
+ return
+}
+
+// goPackageName returns the Go package name to use in the
+// generated Go file. The result explicit reports whether the name
+// came from an option go_package statement. If explicit is false,
+// the name was derived from the protocol buffer's package statement
+// or the input file name.
+func (d *FileDescriptor) goPackageName() (name string, explicit bool) {
+ // Does the file have a "go_package" option?
+ if _, pkg, ok := d.goPackageOption(); ok {
+ return pkg, true
+ }
+
+ // Does the file have a package clause?
+ if pkg := d.GetPackage(); pkg != "" {
+ return pkg, false
+ }
+ // Use the file base name.
+ return baseName(d.GetName()), false
+}
+
+// goFileName returns the output name for the generated Go file.
+func (d *FileDescriptor) goFileName() string {
+ name := *d.Name
+ if ext := path.Ext(name); ext == ".proto" || ext == ".protodevel" {
+ name = name[:len(name)-len(ext)]
+ }
+ name += ".pb.go"
+
+ // Does the file have a "go_package" option?
+ // If it does, it may override the filename.
+ if impPath, _, ok := d.goPackageOption(); ok && impPath != "" {
+ // Replace the existing dirname with the declared import path.
+ _, name = path.Split(name)
+ name = path.Join(impPath, name)
+ return name
+ }
+
+ return name
+}
+
+func (d *FileDescriptor) addExport(obj Object, sym symbol) {
+ d.exported[obj] = append(d.exported[obj], sym)
+}
+
+// symbol is an interface representing an exported Go symbol.
+type symbol interface {
+ // GenerateAlias should generate an appropriate alias
+ // for the symbol from the named package.
+ GenerateAlias(g *Generator, pkg string)
+}
+
+type messageSymbol struct {
+ sym string
+ hasExtensions, isMessageSet bool
+ hasOneof bool
+ getters []getterSymbol
+}
+
+type getterSymbol struct {
+ name string
+ typ string
+ typeName string // canonical name in proto world; empty for proto.Message and similar
+ genType bool // whether typ contains a generated type (message/group/enum)
+}
+
+func (ms *messageSymbol) GenerateAlias(g *Generator, pkg string) {
+ remoteSym := pkg + "." + ms.sym
+
+ g.P("type ", ms.sym, " ", remoteSym)
+ g.P("func (m *", ms.sym, ") Reset() { (*", remoteSym, ")(m).Reset() }")
+ g.P("func (m *", ms.sym, ") String() string { return (*", remoteSym, ")(m).String() }")
+ g.P("func (*", ms.sym, ") ProtoMessage() {}")
+ if ms.hasExtensions {
+ g.P("func (*", ms.sym, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange ",
+ "{ return (*", remoteSym, ")(nil).ExtensionRangeArray() }")
+ if ms.isMessageSet {
+ g.P("func (m *", ms.sym, ") Marshal() ([]byte, error) ",
+ "{ return (*", remoteSym, ")(m).Marshal() }")
+ g.P("func (m *", ms.sym, ") Unmarshal(buf []byte) error ",
+ "{ return (*", remoteSym, ")(m).Unmarshal(buf) }")
+ }
+ }
+ if ms.hasOneof {
+ // Oneofs and public imports do not mix well.
+ // We can make them work okay for the binary format,
+ // but they're going to break weirdly for text/JSON.
+ enc := "_" + ms.sym + "_OneofMarshaler"
+ dec := "_" + ms.sym + "_OneofUnmarshaler"
+ size := "_" + ms.sym + "_OneofSizer"
+ encSig := "(msg " + g.Pkg["proto"] + ".Message, b *" + g.Pkg["proto"] + ".Buffer) error"
+ decSig := "(msg " + g.Pkg["proto"] + ".Message, tag, wire int, b *" + g.Pkg["proto"] + ".Buffer) (bool, error)"
+ sizeSig := "(msg " + g.Pkg["proto"] + ".Message) int"
+ g.P("func (m *", ms.sym, ") XXX_OneofFuncs() (func", encSig, ", func", decSig, ", func", sizeSig, ", []interface{}) {")
+ g.P("return ", enc, ", ", dec, ", ", size, ", nil")
+ g.P("}")
+
+ g.P("func ", enc, encSig, " {")
+ g.P("m := msg.(*", ms.sym, ")")
+ g.P("m0 := (*", remoteSym, ")(m)")
+ g.P("enc, _, _, _ := m0.XXX_OneofFuncs()")
+ g.P("return enc(m0, b)")
+ g.P("}")
+
+ g.P("func ", dec, decSig, " {")
+ g.P("m := msg.(*", ms.sym, ")")
+ g.P("m0 := (*", remoteSym, ")(m)")
+ g.P("_, dec, _, _ := m0.XXX_OneofFuncs()")
+ g.P("return dec(m0, tag, wire, b)")
+ g.P("}")
+
+ g.P("func ", size, sizeSig, " {")
+ g.P("m := msg.(*", ms.sym, ")")
+ g.P("m0 := (*", remoteSym, ")(m)")
+ g.P("_, _, size, _ := m0.XXX_OneofFuncs()")
+ g.P("return size(m0)")
+ g.P("}")
+ }
+ for _, get := range ms.getters {
+
+ if get.typeName != "" {
+ g.RecordTypeUse(get.typeName)
+ }
+ typ := get.typ
+ val := "(*" + remoteSym + ")(m)." + get.name + "()"
+ if get.genType {
+ // typ will be "*pkg.T" (message/group) or "pkg.T" (enum)
+ // or "map[t]*pkg.T" (map to message/enum).
+ // The first two of those might have a "[]" prefix if it is repeated.
+ // Drop any package qualifier since we have hoisted the type into this package.
+ rep := strings.HasPrefix(typ, "[]")
+ if rep {
+ typ = typ[2:]
+ }
+ isMap := strings.HasPrefix(typ, "map[")
+ star := typ[0] == '*'
+ if !isMap { // map types handled lower down
+ typ = typ[strings.Index(typ, ".")+1:]
+ }
+ if star {
+ typ = "*" + typ
+ }
+ if rep {
+ // Go does not permit conversion between slice types where both
+ // element types are named. That means we need to generate a bit
+ // of code in this situation.
+ // typ is the element type.
+ // val is the expression to get the slice from the imported type.
+
+ ctyp := typ // conversion type expression; "Foo" or "(*Foo)"
+ if star {
+ ctyp = "(" + typ + ")"
+ }
+
+ g.P("func (m *", ms.sym, ") ", get.name, "() []", typ, " {")
+ g.In()
+ g.P("o := ", val)
+ g.P("if o == nil {")
+ g.In()
+ g.P("return nil")
+ g.Out()
+ g.P("}")
+ g.P("s := make([]", typ, ", len(o))")
+ g.P("for i, x := range o {")
+ g.In()
+ g.P("s[i] = ", ctyp, "(x)")
+ g.Out()
+ g.P("}")
+ g.P("return s")
+ g.Out()
+ g.P("}")
+ continue
+ }
+ if isMap {
+ // Split map[keyTyp]valTyp.
+ bra, ket := strings.Index(typ, "["), strings.Index(typ, "]")
+ keyTyp, valTyp := typ[bra+1:ket], typ[ket+1:]
+ // Drop any package qualifier.
+ // Only the value type may be foreign.
+ star := valTyp[0] == '*'
+ valTyp = valTyp[strings.Index(valTyp, ".")+1:]
+ if star {
+ valTyp = "*" + valTyp
+ }
+
+ typ := "map[" + keyTyp + "]" + valTyp
+ g.P("func (m *", ms.sym, ") ", get.name, "() ", typ, " {")
+ g.P("o := ", val)
+ g.P("if o == nil { return nil }")
+ g.P("s := make(", typ, ", len(o))")
+ g.P("for k, v := range o {")
+ g.P("s[k] = (", valTyp, ")(v)")
+ g.P("}")
+ g.P("return s")
+ g.P("}")
+ continue
+ }
+ // Convert imported type into the forwarding type.
+ val = "(" + typ + ")(" + val + ")"
+ }
+
+ g.P("func (m *", ms.sym, ") ", get.name, "() ", typ, " { return ", val, " }")
+ }
+
+}
+
+type enumSymbol struct {
+ name string
+ proto3 bool // Whether this came from a proto3 file.
+}
+
+func (es enumSymbol) GenerateAlias(g *Generator, pkg string) {
+ s := es.name
+ g.P("type ", s, " ", pkg, ".", s)
+ g.P("var ", s, "_name = ", pkg, ".", s, "_name")
+ g.P("var ", s, "_value = ", pkg, ".", s, "_value")
+ g.P("func (x ", s, ") String() string { return (", pkg, ".", s, ")(x).String() }")
+ if !es.proto3 {
+ g.P("func (x ", s, ") Enum() *", s, "{ return (*", s, ")((", pkg, ".", s, ")(x).Enum()) }")
+ g.P("func (x *", s, ") UnmarshalJSON(data []byte) error { return (*", pkg, ".", s, ")(x).UnmarshalJSON(data) }")
+ }
+}
+
+type constOrVarSymbol struct {
+ sym string
+ typ string // either "const" or "var"
+ cast string // if non-empty, a type cast is required (used for enums)
+}
+
+func (cs constOrVarSymbol) GenerateAlias(g *Generator, pkg string) {
+ v := pkg + "." + cs.sym
+ if cs.cast != "" {
+ v = cs.cast + "(" + v + ")"
+ }
+ g.P(cs.typ, " ", cs.sym, " = ", v)
+}
+
+// Object is an interface abstracting the abilities shared by enums, messages, extensions and imported objects.
+type Object interface {
+ PackageName() string // The name we use in our output (a_b_c), possibly renamed for uniqueness.
+ TypeName() []string
+ File() *descriptor.FileDescriptorProto
+}
+
+// Each package name we generate must be unique. The package we're generating
+// gets its own name but every other package must have a unique name that does
+// not conflict in the code we generate. These names are chosen globally (although
+// they don't have to be, it simplifies things to do them globally).
+func uniquePackageOf(fd *descriptor.FileDescriptorProto) string {
+ s, ok := uniquePackageName[fd]
+ if !ok {
+ log.Fatal("internal error: no package name defined for " + fd.GetName())
+ }
+ return s
+}
+
+// Generator is the type whose methods generate the output, stored in the associated response structure.
+type Generator struct {
+ *bytes.Buffer
+
+ Request *plugin.CodeGeneratorRequest // The input.
+ Response *plugin.CodeGeneratorResponse // The output.
+
+ Param map[string]string // Command-line parameters.
+ PackageImportPath string // Go import path of the package we're generating code for
+ ImportPrefix string // String to prefix to imported package file names.
+ ImportMap map[string]string // Mapping from .proto file name to import path
+
+ Pkg map[string]string // The names under which we import support packages
+
+ packageName string // What we're calling ourselves.
+ allFiles []*FileDescriptor // All files in the tree
+ allFilesByName map[string]*FileDescriptor // All files by filename.
+ genFiles []*FileDescriptor // Those files we will generate output for.
+ file *FileDescriptor // The file we are compiling now.
+ usedPackages map[string]bool // Names of packages used in current file.
+ typeNameToObject map[string]Object // Key is a fully-qualified name in input syntax.
+ init []string // Lines to emit in the init function.
+ indent string
+ writeOutput bool
+}
+
+// New creates a new generator and allocates the request and response protobufs.
+func New() *Generator {
+ g := new(Generator)
+ g.Buffer = new(bytes.Buffer)
+ g.Request = new(plugin.CodeGeneratorRequest)
+ g.Response = new(plugin.CodeGeneratorResponse)
+ return g
+}
+
+// Error reports a problem, including an error, and exits the program.
+func (g *Generator) Error(err error, msgs ...string) {
+ s := strings.Join(msgs, " ") + ":" + err.Error()
+ log.Print("protoc-gen-go: error:", s)
+ os.Exit(1)
+}
+
+// Fail reports a problem and exits the program.
+func (g *Generator) Fail(msgs ...string) {
+ s := strings.Join(msgs, " ")
+ log.Print("protoc-gen-go: error:", s)
+ os.Exit(1)
+}
+
+// CommandLineParameters breaks the comma-separated list of key=value pairs
+// in the parameter (a member of the request protobuf) into a key/value map.
+// It then sets file name mappings defined by those entries.
+func (g *Generator) CommandLineParameters(parameter string) {
+ g.Param = make(map[string]string)
+ for _, p := range strings.Split(parameter, ",") {
+ if i := strings.Index(p, "="); i < 0 {
+ g.Param[p] = ""
+ } else {
+ g.Param[p[0:i]] = p[i+1:]
+ }
+ }
+
+ g.ImportMap = make(map[string]string)
+ pluginList := "none" // Default list of plugin names to enable (empty means all).
+ for k, v := range g.Param {
+ switch k {
+ case "import_prefix":
+ g.ImportPrefix = v
+ case "import_path":
+ g.PackageImportPath = v
+ case "plugins":
+ pluginList = v
+ default:
+ if len(k) > 0 && k[0] == 'M' {
+ g.ImportMap[k[1:]] = v
+ }
+ }
+ }
+ if pluginList != "" {
+ // Amend the set of plugins.
+ enabled := make(map[string]bool)
+ for _, name := range strings.Split(pluginList, "+") {
+ enabled[name] = true
+ }
+ var nplugins []Plugin
+ for _, p := range plugins {
+ if enabled[p.Name()] {
+ nplugins = append(nplugins, p)
+ }
+ }
+ plugins = nplugins
+ }
+}
+
+// DefaultPackageName returns the package name printed for the object.
+// If its file is in a different package, it returns the package name we're using for this file, plus ".".
+// Otherwise it returns the empty string.
+func (g *Generator) DefaultPackageName(obj Object) string {
+ pkg := obj.PackageName()
+ if pkg == g.packageName {
+ return ""
+ }
+ return pkg + "."
+}
+
+// For each input file, the unique package name to use, underscored.
+var uniquePackageName = make(map[*descriptor.FileDescriptorProto]string)
+
+// Package names already registered. Key is the name from the .proto file;
+// value is the name that appears in the generated code.
+var pkgNamesInUse = make(map[string]bool)
+
+// Create and remember a guaranteed unique package name for this file descriptor.
+// Pkg is the candidate name. If f is nil, it's a builtin package like "proto" and
+// has no file descriptor.
+func RegisterUniquePackageName(pkg string, f *FileDescriptor) string {
+ // Convert dots to underscores before finding a unique alias.
+ pkg = strings.Map(badToUnderscore, pkg)
+
+ for i, orig := 1, pkg; pkgNamesInUse[pkg]; i++ {
+ // It's a duplicate; must rename.
+ pkg = orig + strconv.Itoa(i)
+ }
+ // Install it.
+ pkgNamesInUse[pkg] = true
+ if f != nil {
+ uniquePackageName[f.FileDescriptorProto] = pkg
+ }
+ return pkg
+}
+
+var isGoKeyword = map[string]bool{
+ "break": true,
+ "case": true,
+ "chan": true,
+ "const": true,
+ "continue": true,
+ "default": true,
+ "else": true,
+ "defer": true,
+ "fallthrough": true,
+ "for": true,
+ "func": true,
+ "go": true,
+ "goto": true,
+ "if": true,
+ "import": true,
+ "interface": true,
+ "map": true,
+ "package": true,
+ "range": true,
+ "return": true,
+ "select": true,
+ "struct": true,
+ "switch": true,
+ "type": true,
+ "var": true,
+}
+
+// defaultGoPackage returns the package name to use,
+// derived from the import path of the package we're building code for.
+func (g *Generator) defaultGoPackage() string {
+ p := g.PackageImportPath
+ if i := strings.LastIndex(p, "/"); i >= 0 {
+ p = p[i+1:]
+ }
+ if p == "" {
+ return ""
+ }
+
+ p = strings.Map(badToUnderscore, p)
+ // Identifier must not be keyword: insert _.
+ if isGoKeyword[p] {
+ p = "_" + p
+ }
+ // Identifier must not begin with digit: insert _.
+ if r, _ := utf8.DecodeRuneInString(p); unicode.IsDigit(r) {
+ p = "_" + p
+ }
+ return p
+}
+
+// SetPackageNames sets the package name for this run.
+// The package name must agree across all files being generated.
+// It also defines unique package names for all imported files.
+func (g *Generator) SetPackageNames() {
+ // Register the name for this package. It will be the first name
+ // registered so is guaranteed to be unmodified.
+ pkg, explicit := g.genFiles[0].goPackageName()
+
+ // Check all files for an explicit go_package option.
+ for _, f := range g.genFiles {
+ thisPkg, thisExplicit := f.goPackageName()
+ if thisExplicit {
+ if !explicit {
+ // Let this file's go_package option serve for all input files.
+ pkg, explicit = thisPkg, true
+ } else if thisPkg != pkg {
+ g.Fail("inconsistent package names:", thisPkg, pkg)
+ }
+ }
+ }
+
+ // If we don't have an explicit go_package option but we have an
+ // import path, use that.
+ if !explicit {
+ p := g.defaultGoPackage()
+ if p != "" {
+ pkg, explicit = p, true
+ }
+ }
+
+ // If there was no go_package and no import path to use,
+ // double-check that all the inputs have the same implicit
+ // Go package name.
+ if !explicit {
+ for _, f := range g.genFiles {
+ thisPkg, _ := f.goPackageName()
+ if thisPkg != pkg {
+ g.Fail("inconsistent package names:", thisPkg, pkg)
+ }
+ }
+ }
+
+ g.packageName = RegisterUniquePackageName(pkg, g.genFiles[0])
+
+ // Register the support package names. They might collide with the
+ // name of a package we import.
+ g.Pkg = map[string]string{
+ "fmt": RegisterUniquePackageName("fmt", nil),
+ "math": RegisterUniquePackageName("math", nil),
+ "proto": RegisterUniquePackageName("proto", nil),
+ }
+
+AllFiles:
+ for _, f := range g.allFiles {
+ for _, genf := range g.genFiles {
+ if f == genf {
+ // In this package already.
+ uniquePackageName[f.FileDescriptorProto] = g.packageName
+ continue AllFiles
+ }
+ }
+ // The file is a dependency, so we want to ignore its go_package option
+ // because that is only relevant for its specific generated output.
+ pkg := f.GetPackage()
+ if pkg == "" {
+ pkg = baseName(*f.Name)
+ }
+ RegisterUniquePackageName(pkg, f)
+ }
+}
+
+// WrapTypes walks the incoming data, wrapping DescriptorProtos, EnumDescriptorProtos
+// and FileDescriptorProtos into file-referenced objects within the Generator.
+// It also creates the list of files to generate and so should be called before GenerateAllFiles.
+func (g *Generator) WrapTypes() {
+ g.allFiles = make([]*FileDescriptor, 0, len(g.Request.ProtoFile))
+ g.allFilesByName = make(map[string]*FileDescriptor, len(g.allFiles))
+ for _, f := range g.Request.ProtoFile {
+ // We must wrap the descriptors before we wrap the enums
+ descs := wrapDescriptors(f)
+ g.buildNestedDescriptors(descs)
+ enums := wrapEnumDescriptors(f, descs)
+ g.buildNestedEnums(descs, enums)
+ exts := wrapExtensions(f)
+ fd := &FileDescriptor{
+ FileDescriptorProto: f,
+ desc: descs,
+ enum: enums,
+ ext: exts,
+ exported: make(map[Object][]symbol),
+ proto3: fileIsProto3(f),
+ }
+ extractComments(fd)
+ g.allFiles = append(g.allFiles, fd)
+ g.allFilesByName[f.GetName()] = fd
+ }
+ for _, fd := range g.allFiles {
+ fd.imp = wrapImported(fd.FileDescriptorProto, g)
+ }
+
+ g.genFiles = make([]*FileDescriptor, 0, len(g.Request.FileToGenerate))
+ for _, fileName := range g.Request.FileToGenerate {
+ fd := g.allFilesByName[fileName]
+ if fd == nil {
+ g.Fail("could not find file named", fileName)
+ }
+ fd.index = len(g.genFiles)
+ g.genFiles = append(g.genFiles, fd)
+ }
+}
+
+// Scan the descriptors in this file. For each one, build the slice of nested descriptors
+func (g *Generator) buildNestedDescriptors(descs []*Descriptor) {
+ for _, desc := range descs {
+ if len(desc.NestedType) != 0 {
+ for _, nest := range descs {
+ if nest.parent == desc {
+ desc.nested = append(desc.nested, nest)
+ }
+ }
+ if len(desc.nested) != len(desc.NestedType) {
+ g.Fail("internal error: nesting failure for", desc.GetName())
+ }
+ }
+ }
+}
+
+func (g *Generator) buildNestedEnums(descs []*Descriptor, enums []*EnumDescriptor) {
+ for _, desc := range descs {
+ if len(desc.EnumType) != 0 {
+ for _, enum := range enums {
+ if enum.parent == desc {
+ desc.enums = append(desc.enums, enum)
+ }
+ }
+ if len(desc.enums) != len(desc.EnumType) {
+ g.Fail("internal error: enum nesting failure for", desc.GetName())
+ }
+ }
+ }
+}
+
+// Construct the Descriptor
+func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *descriptor.FileDescriptorProto, index int) *Descriptor {
+ d := &Descriptor{
+ common: common{file},
+ DescriptorProto: desc,
+ parent: parent,
+ index: index,
+ }
+ if parent == nil {
+ d.path = fmt.Sprintf("%d,%d", messagePath, index)
+ } else {
+ d.path = fmt.Sprintf("%s,%d,%d", parent.path, messageMessagePath, index)
+ }
+
+ // The only way to distinguish a group from a message is whether
+ // the containing message has a TYPE_GROUP field that matches.
+ if parent != nil {
+ parts := d.TypeName()
+ if file.Package != nil {
+ parts = append([]string{*file.Package}, parts...)
+ }
+ exp := "." + strings.Join(parts, ".")
+ for _, field := range parent.Field {
+ if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetTypeName() == exp {
+ d.group = true
+ break
+ }
+ }
+ }
+
+ for _, field := range desc.Extension {
+ d.ext = append(d.ext, &ExtensionDescriptor{common{file}, field, d})
+ }
+
+ return d
+}
+
+// Return a slice of all the Descriptors defined within this file
+func wrapDescriptors(file *descriptor.FileDescriptorProto) []*Descriptor {
+ sl := make([]*Descriptor, 0, len(file.MessageType)+10)
+ for i, desc := range file.MessageType {
+ sl = wrapThisDescriptor(sl, desc, nil, file, i)
+ }
+ return sl
+}
+
+// Wrap this Descriptor, recursively
+func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, parent *Descriptor, file *descriptor.FileDescriptorProto, index int) []*Descriptor {
+ sl = append(sl, newDescriptor(desc, parent, file, index))
+ me := sl[len(sl)-1]
+ for i, nested := range desc.NestedType {
+ sl = wrapThisDescriptor(sl, nested, me, file, i)
+ }
+ return sl
+}
+
+// Construct the EnumDescriptor
+func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, file *descriptor.FileDescriptorProto, index int) *EnumDescriptor {
+ ed := &EnumDescriptor{
+ common: common{file},
+ EnumDescriptorProto: desc,
+ parent: parent,
+ index: index,
+ }
+ if parent == nil {
+ ed.path = fmt.Sprintf("%d,%d", enumPath, index)
+ } else {
+ ed.path = fmt.Sprintf("%s,%d,%d", parent.path, messageEnumPath, index)
+ }
+ return ed
+}
+
+// Return a slice of all the EnumDescriptors defined within this file
+func wrapEnumDescriptors(file *descriptor.FileDescriptorProto, descs []*Descriptor) []*EnumDescriptor {
+ sl := make([]*EnumDescriptor, 0, len(file.EnumType)+10)
+ // Top-level enums.
+ for i, enum := range file.EnumType {
+ sl = append(sl, newEnumDescriptor(enum, nil, file, i))
+ }
+ // Enums within messages. Enums within embedded messages appear in the outer-most message.
+ for _, nested := range descs {
+ for i, enum := range nested.EnumType {
+ sl = append(sl, newEnumDescriptor(enum, nested, file, i))
+ }
+ }
+ return sl
+}
+
+// Return a slice of all the top-level ExtensionDescriptors defined within this file.
+func wrapExtensions(file *descriptor.FileDescriptorProto) []*ExtensionDescriptor {
+ var sl []*ExtensionDescriptor
+ for _, field := range file.Extension {
+ sl = append(sl, &ExtensionDescriptor{common{file}, field, nil})
+ }
+ return sl
+}
+
+// Return a slice of all the types that are publicly imported into this file.
+func wrapImported(file *descriptor.FileDescriptorProto, g *Generator) (sl []*ImportedDescriptor) {
+ for _, index := range file.PublicDependency {
+ df := g.fileByName(file.Dependency[index])
+ for _, d := range df.desc {
+ if d.GetOptions().GetMapEntry() {
+ continue
+ }
+ sl = append(sl, &ImportedDescriptor{common{file}, d})
+ }
+ for _, e := range df.enum {
+ sl = append(sl, &ImportedDescriptor{common{file}, e})
+ }
+ for _, ext := range df.ext {
+ sl = append(sl, &ImportedDescriptor{common{file}, ext})
+ }
+ }
+ return
+}
+
+func extractComments(file *FileDescriptor) {
+ file.comments = make(map[string]*descriptor.SourceCodeInfo_Location)
+ for _, loc := range file.GetSourceCodeInfo().GetLocation() {
+ if loc.LeadingComments == nil {
+ continue
+ }
+ var p []string
+ for _, n := range loc.Path {
+ p = append(p, strconv.Itoa(int(n)))
+ }
+ file.comments[strings.Join(p, ",")] = loc
+ }
+}
+
+// BuildTypeNameMap builds the map from fully qualified type names to objects.
+// The key names for the map come from the input data, which puts a period at the beginning.
+// It should be called after SetPackageNames and before GenerateAllFiles.
+func (g *Generator) BuildTypeNameMap() {
+ g.typeNameToObject = make(map[string]Object)
+ for _, f := range g.allFiles {
+ // The names in this loop are defined by the proto world, not us, so the
+ // package name may be empty. If so, the dotted package name of X will
+ // be ".X"; otherwise it will be ".pkg.X".
+ dottedPkg := "." + f.GetPackage()
+ if dottedPkg != "." {
+ dottedPkg += "."
+ }
+ for _, enum := range f.enum {
+ name := dottedPkg + dottedSlice(enum.TypeName())
+ g.typeNameToObject[name] = enum
+ }
+ for _, desc := range f.desc {
+ name := dottedPkg + dottedSlice(desc.TypeName())
+ g.typeNameToObject[name] = desc
+ }
+ }
+}
+
+// ObjectNamed, given a fully-qualified input type name as it appears in the input data,
+// returns the descriptor for the message or enum with that name.
+func (g *Generator) ObjectNamed(typeName string) Object {
+ o, ok := g.typeNameToObject[typeName]
+ if !ok {
+ g.Fail("can't find object with type", typeName)
+ }
+
+ // If the file of this object isn't a direct dependency of the current file,
+ // or in the current file, then this object has been publicly imported into
+ // a dependency of the current file.
+ // We should return the ImportedDescriptor object for it instead.
+ direct := *o.File().Name == *g.file.Name
+ if !direct {
+ for _, dep := range g.file.Dependency {
+ if *g.fileByName(dep).Name == *o.File().Name {
+ direct = true
+ break
+ }
+ }
+ }
+ if !direct {
+ found := false
+ Loop:
+ for _, dep := range g.file.Dependency {
+ df := g.fileByName(*g.fileByName(dep).Name)
+ for _, td := range df.imp {
+ if td.o == o {
+ // Found it!
+ o = td
+ found = true
+ break Loop
+ }
+ }
+ }
+ if !found {
+ log.Printf("protoc-gen-go: WARNING: failed finding publicly imported dependency for %v, used in %v", typeName, *g.file.Name)
+ }
+ }
+
+ return o
+}
+
+// P prints the arguments to the generated output. It handles strings and int32s, plus
+// handling indirections because they may be *string, etc.
+func (g *Generator) P(str ...interface{}) {
+ if !g.writeOutput {
+ return
+ }
+ g.WriteString(g.indent)
+ for _, v := range str {
+ switch s := v.(type) {
+ case string:
+ g.WriteString(s)
+ case *string:
+ g.WriteString(*s)
+ case bool:
+ fmt.Fprintf(g, "%t", s)
+ case *bool:
+ fmt.Fprintf(g, "%t", *s)
+ case int:
+ fmt.Fprintf(g, "%d", s)
+ case *int32:
+ fmt.Fprintf(g, "%d", *s)
+ case *int64:
+ fmt.Fprintf(g, "%d", *s)
+ case float64:
+ fmt.Fprintf(g, "%g", s)
+ case *float64:
+ fmt.Fprintf(g, "%g", *s)
+ default:
+ g.Fail(fmt.Sprintf("unknown type in printer: %T", v))
+ }
+ }
+ g.WriteByte('\n')
+}
+
+// addInitf stores the given statement to be printed inside the file's init function.
+// The statement is given as a format specifier and arguments.
+func (g *Generator) addInitf(stmt string, a ...interface{}) {
+ g.init = append(g.init, fmt.Sprintf(stmt, a...))
+}
+
+// In Indents the output one tab stop.
+func (g *Generator) In() { g.indent += "\t" }
+
+// Out unindents the output one tab stop.
+func (g *Generator) Out() {
+ if len(g.indent) > 0 {
+ g.indent = g.indent[1:]
+ }
+}
+
+// GenerateAllFiles generates the output for all the files we're outputting.
+func (g *Generator) GenerateAllFiles() {
+ // Initialize the plugins
+ for _, p := range plugins {
+ p.Init(g)
+ }
+ // Generate the output. The generator runs for every file, even the files
+ // that we don't generate output for, so that we can collate the full list
+ // of exported symbols to support public imports.
+ genFileMap := make(map[*FileDescriptor]bool, len(g.genFiles))
+ for _, file := range g.genFiles {
+ genFileMap[file] = true
+ }
+ for _, file := range g.allFiles {
+ g.Reset()
+ g.writeOutput = genFileMap[file]
+ g.generate(file)
+ if !g.writeOutput {
+ continue
+ }
+ g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{
+ Name: proto.String(file.goFileName()),
+ Content: proto.String(g.String()),
+ })
+ }
+}
+
+// Run all the plugins associated with the file.
+func (g *Generator) runPlugins(file *FileDescriptor) {
+ for _, p := range plugins {
+ p.Generate(file)
+ }
+}
+
+// FileOf return the FileDescriptor for this FileDescriptorProto.
+func (g *Generator) FileOf(fd *descriptor.FileDescriptorProto) *FileDescriptor {
+ for _, file := range g.allFiles {
+ if file.FileDescriptorProto == fd {
+ return file
+ }
+ }
+ g.Fail("could not find file in table:", fd.GetName())
+ return nil
+}
+
+// Fill the response protocol buffer with the generated output for all the files we're
+// supposed to generate.
+func (g *Generator) generate(file *FileDescriptor) {
+ g.file = g.FileOf(file.FileDescriptorProto)
+ g.usedPackages = make(map[string]bool)
+
+ if g.file.index == 0 {
+ // For one file in the package, assert version compatibility.
+ g.P("// This is a compile-time assertion to ensure that this generated file")
+ g.P("// is compatible with the proto package it is being compiled against.")
+ g.P("// A compilation error at this line likely means your copy of the")
+ g.P("// proto package needs to be updated.")
+ g.P("const _ = ", g.Pkg["proto"], ".ProtoPackageIsVersion", generatedCodeVersion, " // please upgrade the proto package")
+ g.P()
+ }
+ for _, td := range g.file.imp {
+ g.generateImported(td)
+ }
+ for _, enum := range g.file.enum {
+ g.generateEnum(enum)
+ }
+ for _, desc := range g.file.desc {
+ // Don't generate virtual messages for maps.
+ if desc.GetOptions().GetMapEntry() {
+ continue
+ }
+ g.generateMessage(desc)
+ }
+ for _, ext := range g.file.ext {
+ g.generateExtension(ext)
+ }
+ g.generateInitFunction()
+
+ // Run the plugins before the imports so we know which imports are necessary.
+ g.runPlugins(file)
+
+ g.generateFileDescriptor(file)
+
+ // Generate header and imports last, though they appear first in the output.
+ rem := g.Buffer
+ g.Buffer = new(bytes.Buffer)
+ g.generateHeader()
+ g.generateImports()
+ if !g.writeOutput {
+ return
+ }
+ g.Write(rem.Bytes())
+
+ // Reformat generated code.
+ fset := token.NewFileSet()
+ raw := g.Bytes()
+ ast, err := parser.ParseFile(fset, "", g, parser.ParseComments)
+ if err != nil {
+ // Print out the bad code with line numbers.
+ // This should never happen in practice, but it can while changing generated code,
+ // so consider this a debugging aid.
+ var src bytes.Buffer
+ s := bufio.NewScanner(bytes.NewReader(raw))
+ for line := 1; s.Scan(); line++ {
+ fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes())
+ }
+ g.Fail("bad Go source code was generated:", err.Error(), "\n"+src.String())
+ }
+ g.Reset()
+ err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(g, fset, ast)
+ if err != nil {
+ g.Fail("generated Go source code could not be reformatted:", err.Error())
+ }
+}
+
+// Generate the header, including package definition
+func (g *Generator) generateHeader() {
+ g.P("// Code generated by protoc-gen-go. DO NOT EDIT.")
+ g.P("// source: ", g.file.Name)
+ g.P()
+
+ name := g.file.PackageName()
+
+ if g.file.index == 0 {
+ // Generate package docs for the first file in the package.
+ g.P("/*")
+ g.P("Package ", name, " is a generated protocol buffer package.")
+ g.P()
+ if loc, ok := g.file.comments[strconv.Itoa(packagePath)]; ok {
+ // not using g.PrintComments because this is a /* */ comment block.
+ text := strings.TrimSuffix(loc.GetLeadingComments(), "\n")
+ for _, line := range strings.Split(text, "\n") {
+ line = strings.TrimPrefix(line, " ")
+ // ensure we don't escape from the block comment
+ line = strings.Replace(line, "*/", "* /", -1)
+ g.P(line)
+ }
+ g.P()
+ }
+ var topMsgs []string
+ g.P("It is generated from these files:")
+ for _, f := range g.genFiles {
+ g.P("\t", f.Name)
+ for _, msg := range f.desc {
+ if msg.parent != nil {
+ continue
+ }
+ topMsgs = append(topMsgs, CamelCaseSlice(msg.TypeName()))
+ }
+ }
+ g.P()
+ g.P("It has these top-level messages:")
+ for _, msg := range topMsgs {
+ g.P("\t", msg)
+ }
+ g.P("*/")
+ }
+
+ g.P("package ", name)
+ g.P()
+}
+
+// PrintComments prints any comments from the source .proto file.
+// The path is a comma-separated list of integers.
+// It returns an indication of whether any comments were printed.
+// See descriptor.proto for its format.
+func (g *Generator) PrintComments(path string) bool {
+ if !g.writeOutput {
+ return false
+ }
+ if loc, ok := g.file.comments[path]; ok {
+ text := strings.TrimSuffix(loc.GetLeadingComments(), "\n")
+ for _, line := range strings.Split(text, "\n") {
+ g.P("// ", strings.TrimPrefix(line, " "))
+ }
+ return true
+ }
+ return false
+}
+
+func (g *Generator) fileByName(filename string) *FileDescriptor {
+ return g.allFilesByName[filename]
+}
+
+// weak returns whether the ith import of the current file is a weak import.
+func (g *Generator) weak(i int32) bool {
+ for _, j := range g.file.WeakDependency {
+ if j == i {
+ return true
+ }
+ }
+ return false
+}
+
+// Generate the imports
+func (g *Generator) generateImports() {
+ // We almost always need a proto import. Rather than computing when we
+ // do, which is tricky when there's a plugin, just import it and
+ // reference it later. The same argument applies to the fmt and math packages.
+ g.P("import " + g.Pkg["proto"] + " " + strconv.Quote(g.ImportPrefix+"github.com/golang/protobuf/proto"))
+ g.P("import " + g.Pkg["fmt"] + ` "fmt"`)
+ g.P("import " + g.Pkg["math"] + ` "math"`)
+ for i, s := range g.file.Dependency {
+ fd := g.fileByName(s)
+ // Do not import our own package.
+ if fd.PackageName() == g.packageName {
+ continue
+ }
+ filename := fd.goFileName()
+ // By default, import path is the dirname of the Go filename.
+ importPath := path.Dir(filename)
+ if substitution, ok := g.ImportMap[s]; ok {
+ importPath = substitution
+ }
+ importPath = g.ImportPrefix + importPath
+ // Skip weak imports.
+ if g.weak(int32(i)) {
+ g.P("// skipping weak import ", fd.PackageName(), " ", strconv.Quote(importPath))
+ continue
+ }
+ // We need to import all the dependencies, even if we don't reference them,
+ // because other code and tools depend on having the full transitive closure
+ // of protocol buffer types in the binary.
+ pname := fd.PackageName()
+ if _, ok := g.usedPackages[pname]; !ok {
+ pname = "_"
+ }
+ g.P("import ", pname, " ", strconv.Quote(importPath))
+ }
+ g.P()
+ // TODO: may need to worry about uniqueness across plugins
+ for _, p := range plugins {
+ p.GenerateImports(g.file)
+ g.P()
+ }
+ g.P("// Reference imports to suppress errors if they are not otherwise used.")
+ g.P("var _ = ", g.Pkg["proto"], ".Marshal")
+ g.P("var _ = ", g.Pkg["fmt"], ".Errorf")
+ g.P("var _ = ", g.Pkg["math"], ".Inf")
+ g.P()
+}
+
+func (g *Generator) generateImported(id *ImportedDescriptor) {
+ // Don't generate public import symbols for files that we are generating
+ // code for, since those symbols will already be in this package.
+ // We can't simply avoid creating the ImportedDescriptor objects,
+ // because g.genFiles isn't populated at that stage.
+ tn := id.TypeName()
+ sn := tn[len(tn)-1]
+ df := g.FileOf(id.o.File())
+ filename := *df.Name
+ for _, fd := range g.genFiles {
+ if *fd.Name == filename {
+ g.P("// Ignoring public import of ", sn, " from ", filename)
+ g.P()
+ return
+ }
+ }
+ g.P("// ", sn, " from public import ", filename)
+ g.usedPackages[df.PackageName()] = true
+
+ for _, sym := range df.exported[id.o] {
+ sym.GenerateAlias(g, df.PackageName())
+ }
+
+ g.P()
+}
+
+// Generate the enum definitions for this EnumDescriptor.
+func (g *Generator) generateEnum(enum *EnumDescriptor) {
+ // The full type name
+ typeName := enum.TypeName()
+ // The full type name, CamelCased.
+ ccTypeName := CamelCaseSlice(typeName)
+ ccPrefix := enum.prefix()
+
+ g.PrintComments(enum.path)
+ g.P("type ", ccTypeName, " int32")
+ g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()})
+ g.P("const (")
+ g.In()
+ for i, e := range enum.Value {
+ g.PrintComments(fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i))
+
+ name := ccPrefix + *e.Name
+ g.P(name, " ", ccTypeName, " = ", e.Number)
+ g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName})
+ }
+ g.Out()
+ g.P(")")
+ g.P("var ", ccTypeName, "_name = map[int32]string{")
+ g.In()
+ generated := make(map[int32]bool) // avoid duplicate values
+ for _, e := range enum.Value {
+ duplicate := ""
+ if _, present := generated[*e.Number]; present {
+ duplicate = "// Duplicate value: "
+ }
+ g.P(duplicate, e.Number, ": ", strconv.Quote(*e.Name), ",")
+ generated[*e.Number] = true
+ }
+ g.Out()
+ g.P("}")
+ g.P("var ", ccTypeName, "_value = map[string]int32{")
+ g.In()
+ for _, e := range enum.Value {
+ g.P(strconv.Quote(*e.Name), ": ", e.Number, ",")
+ }
+ g.Out()
+ g.P("}")
+
+ if !enum.proto3() {
+ g.P("func (x ", ccTypeName, ") Enum() *", ccTypeName, " {")
+ g.In()
+ g.P("p := new(", ccTypeName, ")")
+ g.P("*p = x")
+ g.P("return p")
+ g.Out()
+ g.P("}")
+ }
+
+ g.P("func (x ", ccTypeName, ") String() string {")
+ g.In()
+ g.P("return ", g.Pkg["proto"], ".EnumName(", ccTypeName, "_name, int32(x))")
+ g.Out()
+ g.P("}")
+
+ if !enum.proto3() {
+ g.P("func (x *", ccTypeName, ") UnmarshalJSON(data []byte) error {")
+ g.In()
+ g.P("value, err := ", g.Pkg["proto"], ".UnmarshalJSONEnum(", ccTypeName, `_value, data, "`, ccTypeName, `")`)
+ g.P("if err != nil {")
+ g.In()
+ g.P("return err")
+ g.Out()
+ g.P("}")
+ g.P("*x = ", ccTypeName, "(value)")
+ g.P("return nil")
+ g.Out()
+ g.P("}")
+ }
+
+ var indexes []string
+ for m := enum.parent; m != nil; m = m.parent {
+ // XXX: skip groups?
+ indexes = append([]string{strconv.Itoa(m.index)}, indexes...)
+ }
+ indexes = append(indexes, strconv.Itoa(enum.index))
+ g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) { return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "} }")
+ if enum.file.GetPackage() == "google.protobuf" && enum.GetName() == "NullValue" {
+ g.P("func (", ccTypeName, `) XXX_WellKnownType() string { return "`, enum.GetName(), `" }`)
+ }
+
+ g.P()
+}
+
+// The tag is a string like "varint,2,opt,name=fieldname,def=7" that
+// identifies details of the field for the protocol buffer marshaling and unmarshaling
+// code. The fields are:
+// wire encoding
+// protocol tag number
+// opt,req,rep for optional, required, or repeated
+// packed whether the encoding is "packed" (optional; repeated primitives only)
+// name= the original declared name
+// enum= the name of the enum type if it is an enum-typed field.
+// proto3 if this field is in a proto3 message
+// def= string representation of the default value, if any.
+// The default value must be in a representation that can be used at run-time
+// to generate the default value. Thus bools become 0 and 1, for instance.
+func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptorProto, wiretype string) string {
+ optrepreq := ""
+ switch {
+ case isOptional(field):
+ optrepreq = "opt"
+ case isRequired(field):
+ optrepreq = "req"
+ case isRepeated(field):
+ optrepreq = "rep"
+ }
+ var defaultValue string
+ if dv := field.DefaultValue; dv != nil { // set means an explicit default
+ defaultValue = *dv
+ // Some types need tweaking.
+ switch *field.Type {
+ case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ if defaultValue == "true" {
+ defaultValue = "1"
+ } else {
+ defaultValue = "0"
+ }
+ case descriptor.FieldDescriptorProto_TYPE_STRING,
+ descriptor.FieldDescriptorProto_TYPE_BYTES:
+ // Nothing to do. Quoting is done for the whole tag.
+ case descriptor.FieldDescriptorProto_TYPE_ENUM:
+ // For enums we need to provide the integer constant.
+ obj := g.ObjectNamed(field.GetTypeName())
+ if id, ok := obj.(*ImportedDescriptor); ok {
+ // It is an enum that was publicly imported.
+ // We need the underlying type.
+ obj = id.o
+ }
+ enum, ok := obj.(*EnumDescriptor)
+ if !ok {
+ log.Printf("obj is a %T", obj)
+ if id, ok := obj.(*ImportedDescriptor); ok {
+ log.Printf("id.o is a %T", id.o)
+ }
+ g.Fail("unknown enum type", CamelCaseSlice(obj.TypeName()))
+ }
+ defaultValue = enum.integerValueAsString(defaultValue)
+ }
+ defaultValue = ",def=" + defaultValue
+ }
+ enum := ""
+ if *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM {
+ // We avoid using obj.PackageName(), because we want to use the
+ // original (proto-world) package name.
+ obj := g.ObjectNamed(field.GetTypeName())
+ if id, ok := obj.(*ImportedDescriptor); ok {
+ obj = id.o
+ }
+ enum = ",enum="
+ if pkg := obj.File().GetPackage(); pkg != "" {
+ enum += pkg + "."
+ }
+ enum += CamelCaseSlice(obj.TypeName())
+ }
+ packed := ""
+ if (field.Options != nil && field.Options.GetPacked()) ||
+ // Per https://developers.google.com/protocol-buffers/docs/proto3#simple:
+ // "In proto3, repeated fields of scalar numeric types use packed encoding by default."
+ (message.proto3() && (field.Options == nil || field.Options.Packed == nil) &&
+ isRepeated(field) && isScalar(field)) {
+ packed = ",packed"
+ }
+ fieldName := field.GetName()
+ name := fieldName
+ if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP {
+ // We must use the type name for groups instead of
+ // the field name to preserve capitalization.
+ // type_name in FieldDescriptorProto is fully-qualified,
+ // but we only want the local part.
+ name = *field.TypeName
+ if i := strings.LastIndex(name, "."); i >= 0 {
+ name = name[i+1:]
+ }
+ }
+ if json := field.GetJsonName(); json != "" && json != name {
+ // TODO: escaping might be needed, in which case
+ // perhaps this should be in its own "json" tag.
+ name += ",json=" + json
+ }
+ name = ",name=" + name
+ if message.proto3() {
+ // We only need the extra tag for []byte fields;
+ // no need to add noise for the others.
+ if *field.Type == descriptor.FieldDescriptorProto_TYPE_BYTES {
+ name += ",proto3"
+ }
+
+ }
+ oneof := ""
+ if field.OneofIndex != nil {
+ oneof = ",oneof"
+ }
+ return strconv.Quote(fmt.Sprintf("%s,%d,%s%s%s%s%s%s",
+ wiretype,
+ field.GetNumber(),
+ optrepreq,
+ packed,
+ name,
+ enum,
+ oneof,
+ defaultValue))
+}
+
+func needsStar(typ descriptor.FieldDescriptorProto_Type) bool {
+ switch typ {
+ case descriptor.FieldDescriptorProto_TYPE_GROUP:
+ return false
+ case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+ return false
+ case descriptor.FieldDescriptorProto_TYPE_BYTES:
+ return false
+ }
+ return true
+}
+
+// TypeName is the printed name appropriate for an item. If the object is in the current file,
+// TypeName drops the package name and underscores the rest.
+// Otherwise the object is from another package; and the result is the underscored
+// package name followed by the item name.
+// The result always has an initial capital.
+func (g *Generator) TypeName(obj Object) string {
+ return g.DefaultPackageName(obj) + CamelCaseSlice(obj.TypeName())
+}
+
+// TypeNameWithPackage is like TypeName, but always includes the package
+// name even if the object is in our own package.
+func (g *Generator) TypeNameWithPackage(obj Object) string {
+ return obj.PackageName() + CamelCaseSlice(obj.TypeName())
+}
+
+// GoType returns a string representing the type name, and the wire type
+func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescriptorProto) (typ string, wire string) {
+ // TODO: Options.
+ switch *field.Type {
+ case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ typ, wire = "float64", "fixed64"
+ case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ typ, wire = "float32", "fixed32"
+ case descriptor.FieldDescriptorProto_TYPE_INT64:
+ typ, wire = "int64", "varint"
+ case descriptor.FieldDescriptorProto_TYPE_UINT64:
+ typ, wire = "uint64", "varint"
+ case descriptor.FieldDescriptorProto_TYPE_INT32:
+ typ, wire = "int32", "varint"
+ case descriptor.FieldDescriptorProto_TYPE_UINT32:
+ typ, wire = "uint32", "varint"
+ case descriptor.FieldDescriptorProto_TYPE_FIXED64:
+ typ, wire = "uint64", "fixed64"
+ case descriptor.FieldDescriptorProto_TYPE_FIXED32:
+ typ, wire = "uint32", "fixed32"
+ case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ typ, wire = "bool", "varint"
+ case descriptor.FieldDescriptorProto_TYPE_STRING:
+ typ, wire = "string", "bytes"
+ case descriptor.FieldDescriptorProto_TYPE_GROUP:
+ desc := g.ObjectNamed(field.GetTypeName())
+ typ, wire = "*"+g.TypeName(desc), "group"
+ case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+ desc := g.ObjectNamed(field.GetTypeName())
+ typ, wire = "*"+g.TypeName(desc), "bytes"
+ case descriptor.FieldDescriptorProto_TYPE_BYTES:
+ typ, wire = "[]byte", "bytes"
+ case descriptor.FieldDescriptorProto_TYPE_ENUM:
+ desc := g.ObjectNamed(field.GetTypeName())
+ typ, wire = g.TypeName(desc), "varint"
+ case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+ typ, wire = "int32", "fixed32"
+ case descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+ typ, wire = "int64", "fixed64"
+ case descriptor.FieldDescriptorProto_TYPE_SINT32:
+ typ, wire = "int32", "zigzag32"
+ case descriptor.FieldDescriptorProto_TYPE_SINT64:
+ typ, wire = "int64", "zigzag64"
+ default:
+ g.Fail("unknown type for", field.GetName())
+ }
+ if isRepeated(field) {
+ typ = "[]" + typ
+ } else if message != nil && message.proto3() {
+ return
+ } else if field.OneofIndex != nil && message != nil {
+ return
+ } else if needsStar(*field.Type) {
+ typ = "*" + typ
+ }
+ return
+}
+
+func (g *Generator) RecordTypeUse(t string) {
+ if obj, ok := g.typeNameToObject[t]; ok {
+ // Call ObjectNamed to get the true object to record the use.
+ obj = g.ObjectNamed(t)
+ g.usedPackages[obj.PackageName()] = true
+ }
+}
+
+// Method names that may be generated. Fields with these names get an
+// underscore appended. Any change to this set is a potential incompatible
+// API change because it changes generated field names.
+var methodNames = [...]string{
+ "Reset",
+ "String",
+ "ProtoMessage",
+ "Marshal",
+ "Unmarshal",
+ "ExtensionRangeArray",
+ "ExtensionMap",
+ "Descriptor",
+}
+
+// Names of messages in the `google.protobuf` package for which
+// we will generate XXX_WellKnownType methods.
+var wellKnownTypes = map[string]bool{
+ "Any": true,
+ "Duration": true,
+ "Empty": true,
+ "Struct": true,
+ "Timestamp": true,
+
+ "Value": true,
+ "ListValue": true,
+ "DoubleValue": true,
+ "FloatValue": true,
+ "Int64Value": true,
+ "UInt64Value": true,
+ "Int32Value": true,
+ "UInt32Value": true,
+ "BoolValue": true,
+ "StringValue": true,
+ "BytesValue": true,
+}
+
+// Generate the type and default constant definitions for this Descriptor.
+func (g *Generator) generateMessage(message *Descriptor) {
+ // The full type name
+ typeName := message.TypeName()
+ // The full type name, CamelCased.
+ ccTypeName := CamelCaseSlice(typeName)
+
+ usedNames := make(map[string]bool)
+ for _, n := range methodNames {
+ usedNames[n] = true
+ }
+ fieldNames := make(map[*descriptor.FieldDescriptorProto]string)
+ fieldGetterNames := make(map[*descriptor.FieldDescriptorProto]string)
+ fieldTypes := make(map[*descriptor.FieldDescriptorProto]string)
+ mapFieldTypes := make(map[*descriptor.FieldDescriptorProto]string)
+
+ oneofFieldName := make(map[int32]string) // indexed by oneof_index field of FieldDescriptorProto
+ oneofDisc := make(map[int32]string) // name of discriminator method
+ oneofTypeName := make(map[*descriptor.FieldDescriptorProto]string) // without star
+ oneofInsertPoints := make(map[int32]int) // oneof_index => offset of g.Buffer
+
+ g.PrintComments(message.path)
+ g.P("type ", ccTypeName, " struct {")
+ g.In()
+
+ // allocNames finds a conflict-free variation of the given strings,
+ // consistently mutating their suffixes.
+ // It returns the same number of strings.
+ allocNames := func(ns ...string) []string {
+ Loop:
+ for {
+ for _, n := range ns {
+ if usedNames[n] {
+ for i := range ns {
+ ns[i] += "_"
+ }
+ continue Loop
+ }
+ }
+ for _, n := range ns {
+ usedNames[n] = true
+ }
+ return ns
+ }
+ }
+
+ for i, field := range message.Field {
+ // Allocate the getter and the field at the same time so name
+ // collisions create field/method consistent names.
+ // TODO: This allocation occurs based on the order of the fields
+ // in the proto file, meaning that a change in the field
+ // ordering can change generated Method/Field names.
+ base := CamelCase(*field.Name)
+ ns := allocNames(base, "Get"+base)
+ fieldName, fieldGetterName := ns[0], ns[1]
+ typename, wiretype := g.GoType(message, field)
+ jsonName := *field.Name
+ tag := fmt.Sprintf("protobuf:%s json:%q", g.goTag(message, field, wiretype), jsonName+",omitempty")
+
+ fieldNames[field] = fieldName
+ fieldGetterNames[field] = fieldGetterName
+
+ oneof := field.OneofIndex != nil
+ if oneof && oneofFieldName[*field.OneofIndex] == "" {
+ odp := message.OneofDecl[int(*field.OneofIndex)]
+ fname := allocNames(CamelCase(odp.GetName()))[0]
+
+ // This is the first field of a oneof we haven't seen before.
+ // Generate the union field.
+ com := g.PrintComments(fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex))
+ if com {
+ g.P("//")
+ }
+ g.P("// Types that are valid to be assigned to ", fname, ":")
+ // Generate the rest of this comment later,
+ // when we've computed any disambiguation.
+ oneofInsertPoints[*field.OneofIndex] = g.Buffer.Len()
+
+ dname := "is" + ccTypeName + "_" + fname
+ oneofFieldName[*field.OneofIndex] = fname
+ oneofDisc[*field.OneofIndex] = dname
+ tag := `protobuf_oneof:"` + odp.GetName() + `"`
+ g.P(fname, " ", dname, " `", tag, "`")
+ }
+
+ if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE {
+ desc := g.ObjectNamed(field.GetTypeName())
+ if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() {
+ // Figure out the Go types and tags for the key and value types.
+ keyField, valField := d.Field[0], d.Field[1]
+ keyType, keyWire := g.GoType(d, keyField)
+ valType, valWire := g.GoType(d, valField)
+ keyTag, valTag := g.goTag(d, keyField, keyWire), g.goTag(d, valField, valWire)
+
+ // We don't use stars, except for message-typed values.
+ // Message and enum types are the only two possibly foreign types used in maps,
+ // so record their use. They are not permitted as map keys.
+ keyType = strings.TrimPrefix(keyType, "*")
+ switch *valField.Type {
+ case descriptor.FieldDescriptorProto_TYPE_ENUM:
+ valType = strings.TrimPrefix(valType, "*")
+ g.RecordTypeUse(valField.GetTypeName())
+ case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+ g.RecordTypeUse(valField.GetTypeName())
+ default:
+ valType = strings.TrimPrefix(valType, "*")
+ }
+
+ typename = fmt.Sprintf("map[%s]%s", keyType, valType)
+ mapFieldTypes[field] = typename // record for the getter generation
+
+ tag += fmt.Sprintf(" protobuf_key:%s protobuf_val:%s", keyTag, valTag)
+ }
+ }
+
+ fieldTypes[field] = typename
+
+ if oneof {
+ tname := ccTypeName + "_" + fieldName
+ // It is possible for this to collide with a message or enum
+ // nested in this message. Check for collisions.
+ for {
+ ok := true
+ for _, desc := range message.nested {
+ if CamelCaseSlice(desc.TypeName()) == tname {
+ ok = false
+ break
+ }
+ }
+ for _, enum := range message.enums {
+ if CamelCaseSlice(enum.TypeName()) == tname {
+ ok = false
+ break
+ }
+ }
+ if !ok {
+ tname += "_"
+ continue
+ }
+ break
+ }
+
+ oneofTypeName[field] = tname
+ continue
+ }
+
+ g.PrintComments(fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i))
+ g.P(fieldName, "\t", typename, "\t`", tag, "`")
+ g.RecordTypeUse(field.GetTypeName())
+ }
+ if len(message.ExtensionRange) > 0 {
+ g.P(g.Pkg["proto"], ".XXX_InternalExtensions `json:\"-\"`")
+ }
+ if !message.proto3() {
+ g.P("XXX_unrecognized\t[]byte `json:\"-\"`")
+ }
+ g.Out()
+ g.P("}")
+
+ // Update g.Buffer to list valid oneof types.
+ // We do this down here, after we've disambiguated the oneof type names.
+ // We go in reverse order of insertion point to avoid invalidating offsets.
+ for oi := int32(len(message.OneofDecl)); oi >= 0; oi-- {
+ ip := oneofInsertPoints[oi]
+ all := g.Buffer.Bytes()
+ rem := all[ip:]
+ g.Buffer = bytes.NewBuffer(all[:ip:ip]) // set cap so we don't scribble on rem
+ for _, field := range message.Field {
+ if field.OneofIndex == nil || *field.OneofIndex != oi {
+ continue
+ }
+ g.P("//\t*", oneofTypeName[field])
+ }
+ g.Buffer.Write(rem)
+ }
+
+ // Reset, String and ProtoMessage methods.
+ g.P("func (m *", ccTypeName, ") Reset() { *m = ", ccTypeName, "{} }")
+ g.P("func (m *", ccTypeName, ") String() string { return ", g.Pkg["proto"], ".CompactTextString(m) }")
+ g.P("func (*", ccTypeName, ") ProtoMessage() {}")
+ var indexes []string
+ for m := message; m != nil; m = m.parent {
+ indexes = append([]string{strconv.Itoa(m.index)}, indexes...)
+ }
+ g.P("func (*", ccTypeName, ") Descriptor() ([]byte, []int) { return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "} }")
+ // TODO: Revisit the decision to use a XXX_WellKnownType method
+ // if we change proto.MessageName to work with multiple equivalents.
+ if message.file.GetPackage() == "google.protobuf" && wellKnownTypes[message.GetName()] {
+ g.P("func (*", ccTypeName, `) XXX_WellKnownType() string { return "`, message.GetName(), `" }`)
+ }
+
+ // Extension support methods
+ var hasExtensions, isMessageSet bool
+ if len(message.ExtensionRange) > 0 {
+ hasExtensions = true
+ // message_set_wire_format only makes sense when extensions are defined.
+ if opts := message.Options; opts != nil && opts.GetMessageSetWireFormat() {
+ isMessageSet = true
+ g.P()
+ g.P("func (m *", ccTypeName, ") Marshal() ([]byte, error) {")
+ g.In()
+ g.P("return ", g.Pkg["proto"], ".MarshalMessageSet(&m.XXX_InternalExtensions)")
+ g.Out()
+ g.P("}")
+ g.P("func (m *", ccTypeName, ") Unmarshal(buf []byte) error {")
+ g.In()
+ g.P("return ", g.Pkg["proto"], ".UnmarshalMessageSet(buf, &m.XXX_InternalExtensions)")
+ g.Out()
+ g.P("}")
+ g.P("func (m *", ccTypeName, ") MarshalJSON() ([]byte, error) {")
+ g.In()
+ g.P("return ", g.Pkg["proto"], ".MarshalMessageSetJSON(&m.XXX_InternalExtensions)")
+ g.Out()
+ g.P("}")
+ g.P("func (m *", ccTypeName, ") UnmarshalJSON(buf []byte) error {")
+ g.In()
+ g.P("return ", g.Pkg["proto"], ".UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions)")
+ g.Out()
+ g.P("}")
+ g.P("// ensure ", ccTypeName, " satisfies proto.Marshaler and proto.Unmarshaler")
+ g.P("var _ ", g.Pkg["proto"], ".Marshaler = (*", ccTypeName, ")(nil)")
+ g.P("var _ ", g.Pkg["proto"], ".Unmarshaler = (*", ccTypeName, ")(nil)")
+ }
+
+ g.P()
+ g.P("var extRange_", ccTypeName, " = []", g.Pkg["proto"], ".ExtensionRange{")
+ g.In()
+ for _, r := range message.ExtensionRange {
+ end := fmt.Sprint(*r.End - 1) // make range inclusive on both ends
+ g.P("{", r.Start, ", ", end, "},")
+ }
+ g.Out()
+ g.P("}")
+ g.P("func (*", ccTypeName, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange {")
+ g.In()
+ g.P("return extRange_", ccTypeName)
+ g.Out()
+ g.P("}")
+ }
+
+ // Default constants
+ defNames := make(map[*descriptor.FieldDescriptorProto]string)
+ for _, field := range message.Field {
+ def := field.GetDefaultValue()
+ if def == "" {
+ continue
+ }
+ fieldname := "Default_" + ccTypeName + "_" + CamelCase(*field.Name)
+ defNames[field] = fieldname
+ typename, _ := g.GoType(message, field)
+ if typename[0] == '*' {
+ typename = typename[1:]
+ }
+ kind := "const "
+ switch {
+ case typename == "bool":
+ case typename == "string":
+ def = strconv.Quote(def)
+ case typename == "[]byte":
+ def = "[]byte(" + strconv.Quote(unescape(def)) + ")"
+ kind = "var "
+ case def == "inf", def == "-inf", def == "nan":
+ // These names are known to, and defined by, the protocol language.
+ switch def {
+ case "inf":
+ def = "math.Inf(1)"
+ case "-inf":
+ def = "math.Inf(-1)"
+ case "nan":
+ def = "math.NaN()"
+ }
+ if *field.Type == descriptor.FieldDescriptorProto_TYPE_FLOAT {
+ def = "float32(" + def + ")"
+ }
+ kind = "var "
+ case *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM:
+ // Must be an enum. Need to construct the prefixed name.
+ obj := g.ObjectNamed(field.GetTypeName())
+ var enum *EnumDescriptor
+ if id, ok := obj.(*ImportedDescriptor); ok {
+ // The enum type has been publicly imported.
+ enum, _ = id.o.(*EnumDescriptor)
+ } else {
+ enum, _ = obj.(*EnumDescriptor)
+ }
+ if enum == nil {
+ log.Printf("don't know how to generate constant for %s", fieldname)
+ continue
+ }
+ def = g.DefaultPackageName(obj) + enum.prefix() + def
+ }
+ g.P(kind, fieldname, " ", typename, " = ", def)
+ g.file.addExport(message, constOrVarSymbol{fieldname, kind, ""})
+ }
+ g.P()
+
+ // Oneof per-field types, discriminants and getters.
+ //
+ // Generate unexported named types for the discriminant interfaces.
+ // We shouldn't have to do this, but there was (~19 Aug 2015) a compiler/linker bug
+ // that was triggered by using anonymous interfaces here.
+ // TODO: Revisit this and consider reverting back to anonymous interfaces.
+ for oi := range message.OneofDecl {
+ dname := oneofDisc[int32(oi)]
+ g.P("type ", dname, " interface { ", dname, "() }")
+ }
+ g.P()
+ for _, field := range message.Field {
+ if field.OneofIndex == nil {
+ continue
+ }
+ _, wiretype := g.GoType(message, field)
+ tag := "protobuf:" + g.goTag(message, field, wiretype)
+ g.P("type ", oneofTypeName[field], " struct{ ", fieldNames[field], " ", fieldTypes[field], " `", tag, "` }")
+ g.RecordTypeUse(field.GetTypeName())
+ }
+ g.P()
+ for _, field := range message.Field {
+ if field.OneofIndex == nil {
+ continue
+ }
+ g.P("func (*", oneofTypeName[field], ") ", oneofDisc[*field.OneofIndex], "() {}")
+ }
+ g.P()
+ for oi := range message.OneofDecl {
+ fname := oneofFieldName[int32(oi)]
+ g.P("func (m *", ccTypeName, ") Get", fname, "() ", oneofDisc[int32(oi)], " {")
+ g.P("if m != nil { return m.", fname, " }")
+ g.P("return nil")
+ g.P("}")
+ }
+ g.P()
+
+ // Field getters
+ var getters []getterSymbol
+ for _, field := range message.Field {
+ oneof := field.OneofIndex != nil
+
+ fname := fieldNames[field]
+ typename, _ := g.GoType(message, field)
+ if t, ok := mapFieldTypes[field]; ok {
+ typename = t
+ }
+ mname := fieldGetterNames[field]
+ star := ""
+ if needsStar(*field.Type) && typename[0] == '*' {
+ typename = typename[1:]
+ star = "*"
+ }
+
+ // Only export getter symbols for basic types,
+ // and for messages and enums in the same package.
+ // Groups are not exported.
+ // Foreign types can't be hoisted through a public import because
+ // the importer may not already be importing the defining .proto.
+ // As an example, imagine we have an import tree like this:
+ // A.proto -> B.proto -> C.proto
+ // If A publicly imports B, we need to generate the getters from B in A's output,
+ // but if one such getter returns something from C then we cannot do that
+ // because A is not importing C already.
+ var getter, genType bool
+ switch *field.Type {
+ case descriptor.FieldDescriptorProto_TYPE_GROUP:
+ getter = false
+ case descriptor.FieldDescriptorProto_TYPE_MESSAGE, descriptor.FieldDescriptorProto_TYPE_ENUM:
+ // Only export getter if its return type is in this package.
+ getter = g.ObjectNamed(field.GetTypeName()).PackageName() == message.PackageName()
+ genType = true
+ default:
+ getter = true
+ }
+ if getter {
+ getters = append(getters, getterSymbol{
+ name: mname,
+ typ: typename,
+ typeName: field.GetTypeName(),
+ genType: genType,
+ })
+ }
+
+ g.P("func (m *", ccTypeName, ") "+mname+"() "+typename+" {")
+ g.In()
+ def, hasDef := defNames[field]
+ typeDefaultIsNil := false // whether this field type's default value is a literal nil unless specified
+ switch *field.Type {
+ case descriptor.FieldDescriptorProto_TYPE_BYTES:
+ typeDefaultIsNil = !hasDef
+ case descriptor.FieldDescriptorProto_TYPE_GROUP, descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+ typeDefaultIsNil = true
+ }
+ if isRepeated(field) {
+ typeDefaultIsNil = true
+ }
+ if typeDefaultIsNil && !oneof {
+ // A bytes field with no explicit default needs less generated code,
+ // as does a message or group field, or a repeated field.
+ g.P("if m != nil {")
+ g.In()
+ g.P("return m." + fname)
+ g.Out()
+ g.P("}")
+ g.P("return nil")
+ g.Out()
+ g.P("}")
+ g.P()
+ continue
+ }
+ if !oneof {
+ if message.proto3() {
+ g.P("if m != nil {")
+ } else {
+ g.P("if m != nil && m." + fname + " != nil {")
+ }
+ g.In()
+ g.P("return " + star + "m." + fname)
+ g.Out()
+ g.P("}")
+ } else {
+ uname := oneofFieldName[*field.OneofIndex]
+ tname := oneofTypeName[field]
+ g.P("if x, ok := m.Get", uname, "().(*", tname, "); ok {")
+ g.P("return x.", fname)
+ g.P("}")
+ }
+ if hasDef {
+ if *field.Type != descriptor.FieldDescriptorProto_TYPE_BYTES {
+ g.P("return " + def)
+ } else {
+ // The default is a []byte var.
+ // Make a copy when returning it to be safe.
+ g.P("return append([]byte(nil), ", def, "...)")
+ }
+ } else {
+ switch *field.Type {
+ case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ g.P("return false")
+ case descriptor.FieldDescriptorProto_TYPE_STRING:
+ g.P(`return ""`)
+ case descriptor.FieldDescriptorProto_TYPE_GROUP,
+ descriptor.FieldDescriptorProto_TYPE_MESSAGE,
+ descriptor.FieldDescriptorProto_TYPE_BYTES:
+ // This is only possible for oneof fields.
+ g.P("return nil")
+ case descriptor.FieldDescriptorProto_TYPE_ENUM:
+ // The default default for an enum is the first value in the enum,
+ // not zero.
+ obj := g.ObjectNamed(field.GetTypeName())
+ var enum *EnumDescriptor
+ if id, ok := obj.(*ImportedDescriptor); ok {
+ // The enum type has been publicly imported.
+ enum, _ = id.o.(*EnumDescriptor)
+ } else {
+ enum, _ = obj.(*EnumDescriptor)
+ }
+ if enum == nil {
+ log.Printf("don't know how to generate getter for %s", field.GetName())
+ continue
+ }
+ if len(enum.Value) == 0 {
+ g.P("return 0 // empty enum")
+ } else {
+ first := enum.Value[0].GetName()
+ g.P("return ", g.DefaultPackageName(obj)+enum.prefix()+first)
+ }
+ default:
+ g.P("return 0")
+ }
+ }
+ g.Out()
+ g.P("}")
+ g.P()
+ }
+
+ if !message.group {
+ ms := &messageSymbol{
+ sym: ccTypeName,
+ hasExtensions: hasExtensions,
+ isMessageSet: isMessageSet,
+ hasOneof: len(message.OneofDecl) > 0,
+ getters: getters,
+ }
+ g.file.addExport(message, ms)
+ }
+
+ // Oneof functions
+ if len(message.OneofDecl) > 0 {
+ fieldWire := make(map[*descriptor.FieldDescriptorProto]string)
+
+ // method
+ enc := "_" + ccTypeName + "_OneofMarshaler"
+ dec := "_" + ccTypeName + "_OneofUnmarshaler"
+ size := "_" + ccTypeName + "_OneofSizer"
+ encSig := "(msg " + g.Pkg["proto"] + ".Message, b *" + g.Pkg["proto"] + ".Buffer) error"
+ decSig := "(msg " + g.Pkg["proto"] + ".Message, tag, wire int, b *" + g.Pkg["proto"] + ".Buffer) (bool, error)"
+ sizeSig := "(msg " + g.Pkg["proto"] + ".Message) (n int)"
+
+ g.P("// XXX_OneofFuncs is for the internal use of the proto package.")
+ g.P("func (*", ccTypeName, ") XXX_OneofFuncs() (func", encSig, ", func", decSig, ", func", sizeSig, ", []interface{}) {")
+ g.P("return ", enc, ", ", dec, ", ", size, ", []interface{}{")
+ for _, field := range message.Field {
+ if field.OneofIndex == nil {
+ continue
+ }
+ g.P("(*", oneofTypeName[field], ")(nil),")
+ }
+ g.P("}")
+ g.P("}")
+ g.P()
+
+ // marshaler
+ g.P("func ", enc, encSig, " {")
+ g.P("m := msg.(*", ccTypeName, ")")
+ for oi, odp := range message.OneofDecl {
+ g.P("// ", odp.GetName())
+ fname := oneofFieldName[int32(oi)]
+ g.P("switch x := m.", fname, ".(type) {")
+ for _, field := range message.Field {
+ if field.OneofIndex == nil || int(*field.OneofIndex) != oi {
+ continue
+ }
+ g.P("case *", oneofTypeName[field], ":")
+ var wire, pre, post string
+ val := "x." + fieldNames[field] // overridden for TYPE_BOOL
+ canFail := false // only TYPE_MESSAGE and TYPE_GROUP can fail
+ switch *field.Type {
+ case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ wire = "WireFixed64"
+ pre = "b.EncodeFixed64(" + g.Pkg["math"] + ".Float64bits("
+ post = "))"
+ case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ wire = "WireFixed32"
+ pre = "b.EncodeFixed32(uint64(" + g.Pkg["math"] + ".Float32bits("
+ post = ")))"
+ case descriptor.FieldDescriptorProto_TYPE_INT64,
+ descriptor.FieldDescriptorProto_TYPE_UINT64:
+ wire = "WireVarint"
+ pre, post = "b.EncodeVarint(uint64(", "))"
+ case descriptor.FieldDescriptorProto_TYPE_INT32,
+ descriptor.FieldDescriptorProto_TYPE_UINT32,
+ descriptor.FieldDescriptorProto_TYPE_ENUM:
+ wire = "WireVarint"
+ pre, post = "b.EncodeVarint(uint64(", "))"
+ case descriptor.FieldDescriptorProto_TYPE_FIXED64,
+ descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+ wire = "WireFixed64"
+ pre, post = "b.EncodeFixed64(uint64(", "))"
+ case descriptor.FieldDescriptorProto_TYPE_FIXED32,
+ descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+ wire = "WireFixed32"
+ pre, post = "b.EncodeFixed32(uint64(", "))"
+ case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ // bool needs special handling.
+ g.P("t := uint64(0)")
+ g.P("if ", val, " { t = 1 }")
+ val = "t"
+ wire = "WireVarint"
+ pre, post = "b.EncodeVarint(", ")"
+ case descriptor.FieldDescriptorProto_TYPE_STRING:
+ wire = "WireBytes"
+ pre, post = "b.EncodeStringBytes(", ")"
+ case descriptor.FieldDescriptorProto_TYPE_GROUP:
+ wire = "WireStartGroup"
+ pre, post = "b.Marshal(", ")"
+ canFail = true
+ case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+ wire = "WireBytes"
+ pre, post = "b.EncodeMessage(", ")"
+ canFail = true
+ case descriptor.FieldDescriptorProto_TYPE_BYTES:
+ wire = "WireBytes"
+ pre, post = "b.EncodeRawBytes(", ")"
+ case descriptor.FieldDescriptorProto_TYPE_SINT32:
+ wire = "WireVarint"
+ pre, post = "b.EncodeZigzag32(uint64(", "))"
+ case descriptor.FieldDescriptorProto_TYPE_SINT64:
+ wire = "WireVarint"
+ pre, post = "b.EncodeZigzag64(uint64(", "))"
+ default:
+ g.Fail("unhandled oneof field type ", field.Type.String())
+ }
+ fieldWire[field] = wire
+ g.P("b.EncodeVarint(", field.Number, "<<3|", g.Pkg["proto"], ".", wire, ")")
+ if !canFail {
+ g.P(pre, val, post)
+ } else {
+ g.P("if err := ", pre, val, post, "; err != nil {")
+ g.P("return err")
+ g.P("}")
+ }
+ if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP {
+ g.P("b.EncodeVarint(", field.Number, "<<3|", g.Pkg["proto"], ".WireEndGroup)")
+ }
+ }
+ g.P("case nil:")
+ g.P("default: return ", g.Pkg["fmt"], `.Errorf("`, ccTypeName, ".", fname, ` has unexpected type %T", x)`)
+ g.P("}")
+ }
+ g.P("return nil")
+ g.P("}")
+ g.P()
+
+ // unmarshaler
+ g.P("func ", dec, decSig, " {")
+ g.P("m := msg.(*", ccTypeName, ")")
+ g.P("switch tag {")
+ for _, field := range message.Field {
+ if field.OneofIndex == nil {
+ continue
+ }
+ odp := message.OneofDecl[int(*field.OneofIndex)]
+ g.P("case ", field.Number, ": // ", odp.GetName(), ".", *field.Name)
+ g.P("if wire != ", g.Pkg["proto"], ".", fieldWire[field], " {")
+ g.P("return true, ", g.Pkg["proto"], ".ErrInternalBadWireType")
+ g.P("}")
+ lhs := "x, err" // overridden for TYPE_MESSAGE and TYPE_GROUP
+ var dec, cast, cast2 string
+ switch *field.Type {
+ case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ dec, cast = "b.DecodeFixed64()", g.Pkg["math"]+".Float64frombits"
+ case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ dec, cast, cast2 = "b.DecodeFixed32()", "uint32", g.Pkg["math"]+".Float32frombits"
+ case descriptor.FieldDescriptorProto_TYPE_INT64:
+ dec, cast = "b.DecodeVarint()", "int64"
+ case descriptor.FieldDescriptorProto_TYPE_UINT64:
+ dec = "b.DecodeVarint()"
+ case descriptor.FieldDescriptorProto_TYPE_INT32:
+ dec, cast = "b.DecodeVarint()", "int32"
+ case descriptor.FieldDescriptorProto_TYPE_FIXED64:
+ dec = "b.DecodeFixed64()"
+ case descriptor.FieldDescriptorProto_TYPE_FIXED32:
+ dec, cast = "b.DecodeFixed32()", "uint32"
+ case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ dec = "b.DecodeVarint()"
+ // handled specially below
+ case descriptor.FieldDescriptorProto_TYPE_STRING:
+ dec = "b.DecodeStringBytes()"
+ case descriptor.FieldDescriptorProto_TYPE_GROUP:
+ g.P("msg := new(", fieldTypes[field][1:], ")") // drop star
+ lhs = "err"
+ dec = "b.DecodeGroup(msg)"
+ // handled specially below
+ case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+ g.P("msg := new(", fieldTypes[field][1:], ")") // drop star
+ lhs = "err"
+ dec = "b.DecodeMessage(msg)"
+ // handled specially below
+ case descriptor.FieldDescriptorProto_TYPE_BYTES:
+ dec = "b.DecodeRawBytes(true)"
+ case descriptor.FieldDescriptorProto_TYPE_UINT32:
+ dec, cast = "b.DecodeVarint()", "uint32"
+ case descriptor.FieldDescriptorProto_TYPE_ENUM:
+ dec, cast = "b.DecodeVarint()", fieldTypes[field]
+ case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+ dec, cast = "b.DecodeFixed32()", "int32"
+ case descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+ dec, cast = "b.DecodeFixed64()", "int64"
+ case descriptor.FieldDescriptorProto_TYPE_SINT32:
+ dec, cast = "b.DecodeZigzag32()", "int32"
+ case descriptor.FieldDescriptorProto_TYPE_SINT64:
+ dec, cast = "b.DecodeZigzag64()", "int64"
+ default:
+ g.Fail("unhandled oneof field type ", field.Type.String())
+ }
+ g.P(lhs, " := ", dec)
+ val := "x"
+ if cast != "" {
+ val = cast + "(" + val + ")"
+ }
+ if cast2 != "" {
+ val = cast2 + "(" + val + ")"
+ }
+ switch *field.Type {
+ case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ val += " != 0"
+ case descriptor.FieldDescriptorProto_TYPE_GROUP,
+ descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+ val = "msg"
+ }
+ g.P("m.", oneofFieldName[*field.OneofIndex], " = &", oneofTypeName[field], "{", val, "}")
+ g.P("return true, err")
+ }
+ g.P("default: return false, nil")
+ g.P("}")
+ g.P("}")
+ g.P()
+
+ // sizer
+ g.P("func ", size, sizeSig, " {")
+ g.P("m := msg.(*", ccTypeName, ")")
+ for oi, odp := range message.OneofDecl {
+ g.P("// ", odp.GetName())
+ fname := oneofFieldName[int32(oi)]
+ g.P("switch x := m.", fname, ".(type) {")
+ for _, field := range message.Field {
+ if field.OneofIndex == nil || int(*field.OneofIndex) != oi {
+ continue
+ }
+ g.P("case *", oneofTypeName[field], ":")
+ val := "x." + fieldNames[field]
+ var wire, varint, fixed string
+ switch *field.Type {
+ case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ wire = "WireFixed64"
+ fixed = "8"
+ case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ wire = "WireFixed32"
+ fixed = "4"
+ case descriptor.FieldDescriptorProto_TYPE_INT64,
+ descriptor.FieldDescriptorProto_TYPE_UINT64,
+ descriptor.FieldDescriptorProto_TYPE_INT32,
+ descriptor.FieldDescriptorProto_TYPE_UINT32,
+ descriptor.FieldDescriptorProto_TYPE_ENUM:
+ wire = "WireVarint"
+ varint = val
+ case descriptor.FieldDescriptorProto_TYPE_FIXED64,
+ descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+ wire = "WireFixed64"
+ fixed = "8"
+ case descriptor.FieldDescriptorProto_TYPE_FIXED32,
+ descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+ wire = "WireFixed32"
+ fixed = "4"
+ case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ wire = "WireVarint"
+ fixed = "1"
+ case descriptor.FieldDescriptorProto_TYPE_STRING:
+ wire = "WireBytes"
+ fixed = "len(" + val + ")"
+ varint = fixed
+ case descriptor.FieldDescriptorProto_TYPE_GROUP:
+ wire = "WireStartGroup"
+ fixed = g.Pkg["proto"] + ".Size(" + val + ")"
+ case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+ wire = "WireBytes"
+ g.P("s := ", g.Pkg["proto"], ".Size(", val, ")")
+ fixed = "s"
+ varint = fixed
+ case descriptor.FieldDescriptorProto_TYPE_BYTES:
+ wire = "WireBytes"
+ fixed = "len(" + val + ")"
+ varint = fixed
+ case descriptor.FieldDescriptorProto_TYPE_SINT32:
+ wire = "WireVarint"
+ varint = "(uint32(" + val + ") << 1) ^ uint32((int32(" + val + ") >> 31))"
+ case descriptor.FieldDescriptorProto_TYPE_SINT64:
+ wire = "WireVarint"
+ varint = "uint64(" + val + " << 1) ^ uint64((int64(" + val + ") >> 63))"
+ default:
+ g.Fail("unhandled oneof field type ", field.Type.String())
+ }
+ g.P("n += ", g.Pkg["proto"], ".SizeVarint(", field.Number, "<<3|", g.Pkg["proto"], ".", wire, ")")
+ if varint != "" {
+ g.P("n += ", g.Pkg["proto"], ".SizeVarint(uint64(", varint, "))")
+ }
+ if fixed != "" {
+ g.P("n += ", fixed)
+ }
+ if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP {
+ g.P("n += ", g.Pkg["proto"], ".SizeVarint(", field.Number, "<<3|", g.Pkg["proto"], ".WireEndGroup)")
+ }
+ }
+ g.P("case nil:")
+ g.P("default:")
+ g.P("panic(", g.Pkg["fmt"], ".Sprintf(\"proto: unexpected type %T in oneof\", x))")
+ g.P("}")
+ }
+ g.P("return n")
+ g.P("}")
+ g.P()
+ }
+
+ for _, ext := range message.ext {
+ g.generateExtension(ext)
+ }
+
+ fullName := strings.Join(message.TypeName(), ".")
+ if g.file.Package != nil {
+ fullName = *g.file.Package + "." + fullName
+ }
+
+ g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["proto"], ccTypeName, fullName)
+}
+
+var escapeChars = [256]byte{
+ 'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', 'v': '\v', '\\': '\\', '"': '"', '\'': '\'', '?': '?',
+}
+
+// unescape reverses the "C" escaping that protoc does for default values of bytes fields.
+// It is best effort in that it effectively ignores malformed input. Seemingly invalid escape
+// sequences are conveyed, unmodified, into the decoded result.
+func unescape(s string) string {
+ // NB: Sadly, we can't use strconv.Unquote because protoc will escape both
+ // single and double quotes, but strconv.Unquote only allows one or the
+ // other (based on actual surrounding quotes of its input argument).
+
+ var out []byte
+ for len(s) > 0 {
+ // regular character, or too short to be valid escape
+ if s[0] != '\\' || len(s) < 2 {
+ out = append(out, s[0])
+ s = s[1:]
+ } else if c := escapeChars[s[1]]; c != 0 {
+ // escape sequence
+ out = append(out, c)
+ s = s[2:]
+ } else if s[1] == 'x' || s[1] == 'X' {
+ // hex escape, e.g. "\x80
+ if len(s) < 4 {
+ // too short to be valid
+ out = append(out, s[:2]...)
+ s = s[2:]
+ continue
+ }
+ v, err := strconv.ParseUint(s[2:4], 16, 8)
+ if err != nil {
+ out = append(out, s[:4]...)
+ } else {
+ out = append(out, byte(v))
+ }
+ s = s[4:]
+ } else if '0' <= s[1] && s[1] <= '7' {
+ // octal escape, can vary from 1 to 3 octal digits; e.g., "\0" "\40" or "\164"
+ // so consume up to 2 more bytes or up to end-of-string
+ n := len(s[1:]) - len(strings.TrimLeft(s[1:], "01234567"))
+ if n > 3 {
+ n = 3
+ }
+ v, err := strconv.ParseUint(s[1:1+n], 8, 8)
+ if err != nil {
+ out = append(out, s[:1+n]...)
+ } else {
+ out = append(out, byte(v))
+ }
+ s = s[1+n:]
+ } else {
+ // bad escape, just propagate the slash as-is
+ out = append(out, s[0])
+ s = s[1:]
+ }
+ }
+
+ return string(out)
+}
+
+func (g *Generator) generateExtension(ext *ExtensionDescriptor) {
+ ccTypeName := ext.DescName()
+
+ extObj := g.ObjectNamed(*ext.Extendee)
+ var extDesc *Descriptor
+ if id, ok := extObj.(*ImportedDescriptor); ok {
+ // This is extending a publicly imported message.
+ // We need the underlying type for goTag.
+ extDesc = id.o.(*Descriptor)
+ } else {
+ extDesc = extObj.(*Descriptor)
+ }
+ extendedType := "*" + g.TypeName(extObj) // always use the original
+ field := ext.FieldDescriptorProto
+ fieldType, wireType := g.GoType(ext.parent, field)
+ tag := g.goTag(extDesc, field, wireType)
+ g.RecordTypeUse(*ext.Extendee)
+ if n := ext.FieldDescriptorProto.TypeName; n != nil {
+ // foreign extension type
+ g.RecordTypeUse(*n)
+ }
+
+ typeName := ext.TypeName()
+
+ // Special case for proto2 message sets: If this extension is extending
+ // proto2_bridge.MessageSet, and its final name component is "message_set_extension",
+ // then drop that last component.
+ mset := false
+ if extendedType == "*proto2_bridge.MessageSet" && typeName[len(typeName)-1] == "message_set_extension" {
+ typeName = typeName[:len(typeName)-1]
+ mset = true
+ }
+
+ // For text formatting, the package must be exactly what the .proto file declares,
+ // ignoring overrides such as the go_package option, and with no dot/underscore mapping.
+ extName := strings.Join(typeName, ".")
+ if g.file.Package != nil {
+ extName = *g.file.Package + "." + extName
+ }
+
+ g.P("var ", ccTypeName, " = &", g.Pkg["proto"], ".ExtensionDesc{")
+ g.In()
+ g.P("ExtendedType: (", extendedType, ")(nil),")
+ g.P("ExtensionType: (", fieldType, ")(nil),")
+ g.P("Field: ", field.Number, ",")
+ g.P(`Name: "`, extName, `",`)
+ g.P("Tag: ", tag, ",")
+ g.P(`Filename: "`, g.file.GetName(), `",`)
+
+ g.Out()
+ g.P("}")
+ g.P()
+
+ if mset {
+ // Generate a bit more code to register with message_set.go.
+ g.addInitf("%s.RegisterMessageSetType((%s)(nil), %d, %q)", g.Pkg["proto"], fieldType, *field.Number, extName)
+ }
+
+ g.file.addExport(ext, constOrVarSymbol{ccTypeName, "var", ""})
+}
+
+func (g *Generator) generateInitFunction() {
+ for _, enum := range g.file.enum {
+ g.generateEnumRegistration(enum)
+ }
+ for _, d := range g.file.desc {
+ for _, ext := range d.ext {
+ g.generateExtensionRegistration(ext)
+ }
+ }
+ for _, ext := range g.file.ext {
+ g.generateExtensionRegistration(ext)
+ }
+ if len(g.init) == 0 {
+ return
+ }
+ g.P("func init() {")
+ g.In()
+ for _, l := range g.init {
+ g.P(l)
+ }
+ g.Out()
+ g.P("}")
+ g.init = nil
+}
+
+func (g *Generator) generateFileDescriptor(file *FileDescriptor) {
+ // Make a copy and trim source_code_info data.
+ // TODO: Trim this more when we know exactly what we need.
+ pb := proto.Clone(file.FileDescriptorProto).(*descriptor.FileDescriptorProto)
+ pb.SourceCodeInfo = nil
+
+ b, err := proto.Marshal(pb)
+ if err != nil {
+ g.Fail(err.Error())
+ }
+
+ var buf bytes.Buffer
+ w, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression)
+ w.Write(b)
+ w.Close()
+ b = buf.Bytes()
+
+ v := file.VarName()
+ g.P()
+ g.P("func init() { ", g.Pkg["proto"], ".RegisterFile(", strconv.Quote(*file.Name), ", ", v, ") }")
+ g.P("var ", v, " = []byte{")
+ g.In()
+ g.P("// ", len(b), " bytes of a gzipped FileDescriptorProto")
+ for len(b) > 0 {
+ n := 16
+ if n > len(b) {
+ n = len(b)
+ }
+
+ s := ""
+ for _, c := range b[:n] {
+ s += fmt.Sprintf("0x%02x,", c)
+ }
+ g.P(s)
+
+ b = b[n:]
+ }
+ g.Out()
+ g.P("}")
+}
+
+func (g *Generator) generateEnumRegistration(enum *EnumDescriptor) {
+ // // We always print the full (proto-world) package name here.
+ pkg := enum.File().GetPackage()
+ if pkg != "" {
+ pkg += "."
+ }
+ // The full type name
+ typeName := enum.TypeName()
+ // The full type name, CamelCased.
+ ccTypeName := CamelCaseSlice(typeName)
+ g.addInitf("%s.RegisterEnum(%q, %[3]s_name, %[3]s_value)", g.Pkg["proto"], pkg+ccTypeName, ccTypeName)
+}
+
+func (g *Generator) generateExtensionRegistration(ext *ExtensionDescriptor) {
+ g.addInitf("%s.RegisterExtension(%s)", g.Pkg["proto"], ext.DescName())
+}
+
+// And now lots of helper functions.
+
+// Is c an ASCII lower-case letter?
+func isASCIILower(c byte) bool {
+ return 'a' <= c && c <= 'z'
+}
+
+// Is c an ASCII digit?
+func isASCIIDigit(c byte) bool {
+ return '0' <= c && c <= '9'
+}
+
+// CamelCase returns the CamelCased name.
+// If there is an interior underscore followed by a lower case letter,
+// drop the underscore and convert the letter to upper case.
+// There is a remote possibility of this rewrite causing a name collision,
+// but it's so remote we're prepared to pretend it's nonexistent - since the
+// C++ generator lowercases names, it's extremely unlikely to have two fields
+// with different capitalizations.
+// In short, _my_field_name_2 becomes XMyFieldName_2.
+func CamelCase(s string) string {
+ if s == "" {
+ return ""
+ }
+ t := make([]byte, 0, 32)
+ i := 0
+ if s[0] == '_' {
+ // Need a capital letter; drop the '_'.
+ t = append(t, 'X')
+ i++
+ }
+ // Invariant: if the next letter is lower case, it must be converted
+ // to upper case.
+ // That is, we process a word at a time, where words are marked by _ or
+ // upper case letter. Digits are treated as words.
+ for ; i < len(s); i++ {
+ c := s[i]
+ if c == '_' && i+1 < len(s) && isASCIILower(s[i+1]) {
+ continue // Skip the underscore in s.
+ }
+ if isASCIIDigit(c) {
+ t = append(t, c)
+ continue
+ }
+ // Assume we have a letter now - if not, it's a bogus identifier.
+ // The next word is a sequence of characters that must start upper case.
+ if isASCIILower(c) {
+ c ^= ' ' // Make it a capital letter.
+ }
+ t = append(t, c) // Guaranteed not lower case.
+ // Accept lower case sequence that follows.
+ for i+1 < len(s) && isASCIILower(s[i+1]) {
+ i++
+ t = append(t, s[i])
+ }
+ }
+ return string(t)
+}
+
+// CamelCaseSlice is like CamelCase, but the argument is a slice of strings to
+// be joined with "_".
+func CamelCaseSlice(elem []string) string { return CamelCase(strings.Join(elem, "_")) }
+
+// dottedSlice turns a sliced name into a dotted name.
+func dottedSlice(elem []string) string { return strings.Join(elem, ".") }
+
+// Is this field optional?
+func isOptional(field *descriptor.FieldDescriptorProto) bool {
+ return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_OPTIONAL
+}
+
+// Is this field required?
+func isRequired(field *descriptor.FieldDescriptorProto) bool {
+ return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REQUIRED
+}
+
+// Is this field repeated?
+func isRepeated(field *descriptor.FieldDescriptorProto) bool {
+ return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED
+}
+
+// Is this field a scalar numeric type?
+func isScalar(field *descriptor.FieldDescriptorProto) bool {
+ if field.Type == nil {
+ return false
+ }
+ switch *field.Type {
+ case descriptor.FieldDescriptorProto_TYPE_DOUBLE,
+ descriptor.FieldDescriptorProto_TYPE_FLOAT,
+ descriptor.FieldDescriptorProto_TYPE_INT64,
+ descriptor.FieldDescriptorProto_TYPE_UINT64,
+ descriptor.FieldDescriptorProto_TYPE_INT32,
+ descriptor.FieldDescriptorProto_TYPE_FIXED64,
+ descriptor.FieldDescriptorProto_TYPE_FIXED32,
+ descriptor.FieldDescriptorProto_TYPE_BOOL,
+ descriptor.FieldDescriptorProto_TYPE_UINT32,
+ descriptor.FieldDescriptorProto_TYPE_ENUM,
+ descriptor.FieldDescriptorProto_TYPE_SFIXED32,
+ descriptor.FieldDescriptorProto_TYPE_SFIXED64,
+ descriptor.FieldDescriptorProto_TYPE_SINT32,
+ descriptor.FieldDescriptorProto_TYPE_SINT64:
+ return true
+ default:
+ return false
+ }
+}
+
+// badToUnderscore is the mapping function used to generate Go names from package names,
+// which can be dotted in the input .proto file. It replaces non-identifier characters such as
+// dot or dash with underscore.
+func badToUnderscore(r rune) rune {
+ if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' {
+ return r
+ }
+ return '_'
+}
+
+// baseName returns the last path element of the name, with the last dotted suffix removed.
+func baseName(name string) string {
+ // First, find the last element
+ if i := strings.LastIndex(name, "/"); i >= 0 {
+ name = name[i+1:]
+ }
+ // Now drop the suffix
+ if i := strings.LastIndex(name, "."); i >= 0 {
+ name = name[0:i]
+ }
+ return name
+}
+
+// The SourceCodeInfo message describes the location of elements of a parsed
+// .proto file by way of a "path", which is a sequence of integers that
+// describe the route from a FileDescriptorProto to the relevant submessage.
+// The path alternates between a field number of a repeated field, and an index
+// into that repeated field. The constants below define the field numbers that
+// are used.
+//
+// See descriptor.proto for more information about this.
+const (
+ // tag numbers in FileDescriptorProto
+ packagePath = 2 // package
+ messagePath = 4 // message_type
+ enumPath = 5 // enum_type
+ // tag numbers in DescriptorProto
+ messageFieldPath = 2 // field
+ messageMessagePath = 3 // nested_type
+ messageEnumPath = 4 // enum_type
+ messageOneofPath = 8 // oneof_decl
+ // tag numbers in EnumDescriptorProto
+ enumValuePath = 2 // value
+)
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go
new file mode 100644
index 0000000..76808f3
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go
@@ -0,0 +1,114 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2013 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package generator
+
+import (
+ "testing"
+
+ "github.com/golang/protobuf/protoc-gen-go/descriptor"
+)
+
+func TestCamelCase(t *testing.T) {
+ tests := []struct {
+ in, want string
+ }{
+ {"one", "One"},
+ {"one_two", "OneTwo"},
+ {"_my_field_name_2", "XMyFieldName_2"},
+ {"Something_Capped", "Something_Capped"},
+ {"my_Name", "My_Name"},
+ {"OneTwo", "OneTwo"},
+ {"_", "X"},
+ {"_a_", "XA_"},
+ }
+ for _, tc := range tests {
+ if got := CamelCase(tc.in); got != tc.want {
+ t.Errorf("CamelCase(%q) = %q, want %q", tc.in, got, tc.want)
+ }
+ }
+}
+
+func TestGoPackageOption(t *testing.T) {
+ tests := []struct {
+ in string
+ impPath, pkg string
+ ok bool
+ }{
+ {"", "", "", false},
+ {"foo", "", "foo", true},
+ {"github.com/golang/bar", "github.com/golang/bar", "bar", true},
+ {"github.com/golang/bar;baz", "github.com/golang/bar", "baz", true},
+ }
+ for _, tc := range tests {
+ d := &FileDescriptor{
+ FileDescriptorProto: &descriptor.FileDescriptorProto{
+ Options: &descriptor.FileOptions{
+ GoPackage: &tc.in,
+ },
+ },
+ }
+ impPath, pkg, ok := d.goPackageOption()
+ if impPath != tc.impPath || pkg != tc.pkg || ok != tc.ok {
+ t.Errorf("go_package = %q => (%q, %q, %t), want (%q, %q, %t)", tc.in,
+ impPath, pkg, ok, tc.impPath, tc.pkg, tc.ok)
+ }
+ }
+}
+
+func TestUnescape(t *testing.T) {
+ tests := []struct {
+ in string
+ out string
+ }{
+ // successful cases, including all kinds of escapes
+ {"", ""},
+ {"foo bar baz frob nitz", "foo bar baz frob nitz"},
+ {`\000\001\002\003\004\005\006\007`, string([]byte{0, 1, 2, 3, 4, 5, 6, 7})},
+ {`\a\b\f\n\r\t\v\\\?\'\"`, string([]byte{'\a', '\b', '\f', '\n', '\r', '\t', '\v', '\\', '?', '\'', '"'})},
+ {`\x10\x20\x30\x40\x50\x60\x70\x80`, string([]byte{16, 32, 48, 64, 80, 96, 112, 128})},
+ // variable length octal escapes
+ {`\0\018\222\377\3\04\005\6\07`, string([]byte{0, 1, '8', 0222, 255, 3, 4, 5, 6, 7})},
+ // malformed escape sequences left as is
+ {"foo \\g bar", "foo \\g bar"},
+ {"foo \\xg0 bar", "foo \\xg0 bar"},
+ {"\\", "\\"},
+ {"\\x", "\\x"},
+ {"\\xf", "\\xf"},
+ {"\\777", "\\777"}, // overflows byte
+ }
+ for _, tc := range tests {
+ s := unescape(tc.in)
+ if s != tc.out {
+ t.Errorf("doUnescape(%q) = %q; should have been %q", tc.in, s, tc.out)
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go
new file mode 100644
index 0000000..2660e47
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go
@@ -0,0 +1,463 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2015 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package grpc outputs gRPC service descriptions in Go code.
+// It runs as a plugin for the Go protocol buffer compiler plugin.
+// It is linked in to protoc-gen-go.
+package grpc
+
+import (
+ "fmt"
+ "path"
+ "strconv"
+ "strings"
+
+ pb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ "github.com/golang/protobuf/protoc-gen-go/generator"
+)
+
+// generatedCodeVersion indicates a version of the generated code.
+// It is incremented whenever an incompatibility between the generated code and
+// the grpc package is introduced; the generated code references
+// a constant, grpc.SupportPackageIsVersionN (where N is generatedCodeVersion).
+const generatedCodeVersion = 4
+
+// Paths for packages used by code generated in this file,
+// relative to the import_prefix of the generator.Generator.
+const (
+ contextPkgPath = "golang.org/x/net/context"
+ grpcPkgPath = "google.golang.org/grpc"
+)
+
+func init() {
+ generator.RegisterPlugin(new(grpc))
+}
+
+// grpc is an implementation of the Go protocol buffer compiler's
+// plugin architecture. It generates bindings for gRPC support.
+type grpc struct {
+ gen *generator.Generator
+}
+
+// Name returns the name of this plugin, "grpc".
+func (g *grpc) Name() string {
+ return "grpc"
+}
+
+// The names for packages imported in the generated code.
+// They may vary from the final path component of the import path
+// if the name is used by other packages.
+var (
+ contextPkg string
+ grpcPkg string
+)
+
+// Init initializes the plugin.
+func (g *grpc) Init(gen *generator.Generator) {
+ g.gen = gen
+ contextPkg = generator.RegisterUniquePackageName("context", nil)
+ grpcPkg = generator.RegisterUniquePackageName("grpc", nil)
+}
+
+// Given a type name defined in a .proto, return its object.
+// Also record that we're using it, to guarantee the associated import.
+func (g *grpc) objectNamed(name string) generator.Object {
+ g.gen.RecordTypeUse(name)
+ return g.gen.ObjectNamed(name)
+}
+
+// Given a type name defined in a .proto, return its name as we will print it.
+func (g *grpc) typeName(str string) string {
+ return g.gen.TypeName(g.objectNamed(str))
+}
+
+// P forwards to g.gen.P.
+func (g *grpc) P(args ...interface{}) { g.gen.P(args...) }
+
+// Generate generates code for the services in the given file.
+func (g *grpc) Generate(file *generator.FileDescriptor) {
+ if len(file.FileDescriptorProto.Service) == 0 {
+ return
+ }
+
+ g.P("// Reference imports to suppress errors if they are not otherwise used.")
+ g.P("var _ ", contextPkg, ".Context")
+ g.P("var _ ", grpcPkg, ".ClientConn")
+ g.P()
+
+ // Assert version compatibility.
+ g.P("// This is a compile-time assertion to ensure that this generated file")
+ g.P("// is compatible with the grpc package it is being compiled against.")
+ g.P("const _ = ", grpcPkg, ".SupportPackageIsVersion", generatedCodeVersion)
+ g.P()
+
+ for i, service := range file.FileDescriptorProto.Service {
+ g.generateService(file, service, i)
+ }
+}
+
+// GenerateImports generates the import declaration for this file.
+func (g *grpc) GenerateImports(file *generator.FileDescriptor) {
+ if len(file.FileDescriptorProto.Service) == 0 {
+ return
+ }
+ g.P("import (")
+ g.P(contextPkg, " ", strconv.Quote(path.Join(g.gen.ImportPrefix, contextPkgPath)))
+ g.P(grpcPkg, " ", strconv.Quote(path.Join(g.gen.ImportPrefix, grpcPkgPath)))
+ g.P(")")
+ g.P()
+}
+
+// reservedClientName records whether a client name is reserved on the client side.
+var reservedClientName = map[string]bool{
+// TODO: do we need any in gRPC?
+}
+
+func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] }
+
+// generateService generates all the code for the named service.
+func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.ServiceDescriptorProto, index int) {
+ path := fmt.Sprintf("6,%d", index) // 6 means service.
+
+ origServName := service.GetName()
+ fullServName := origServName
+ if pkg := file.GetPackage(); pkg != "" {
+ fullServName = pkg + "." + fullServName
+ }
+ servName := generator.CamelCase(origServName)
+
+ g.P()
+ g.P("// Client API for ", servName, " service")
+ g.P()
+
+ // Client interface.
+ g.P("type ", servName, "Client interface {")
+ for i, method := range service.Method {
+ g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service.
+ g.P(g.generateClientSignature(servName, method))
+ }
+ g.P("}")
+ g.P()
+
+ // Client structure.
+ g.P("type ", unexport(servName), "Client struct {")
+ g.P("cc *", grpcPkg, ".ClientConn")
+ g.P("}")
+ g.P()
+
+ // NewClient factory.
+ g.P("func New", servName, "Client (cc *", grpcPkg, ".ClientConn) ", servName, "Client {")
+ g.P("return &", unexport(servName), "Client{cc}")
+ g.P("}")
+ g.P()
+
+ var methodIndex, streamIndex int
+ serviceDescVar := "_" + servName + "_serviceDesc"
+ // Client method implementations.
+ for _, method := range service.Method {
+ var descExpr string
+ if !method.GetServerStreaming() && !method.GetClientStreaming() {
+ // Unary RPC method
+ descExpr = fmt.Sprintf("&%s.Methods[%d]", serviceDescVar, methodIndex)
+ methodIndex++
+ } else {
+ // Streaming RPC method
+ descExpr = fmt.Sprintf("&%s.Streams[%d]", serviceDescVar, streamIndex)
+ streamIndex++
+ }
+ g.generateClientMethod(servName, fullServName, serviceDescVar, method, descExpr)
+ }
+
+ g.P("// Server API for ", servName, " service")
+ g.P()
+
+ // Server interface.
+ serverType := servName + "Server"
+ g.P("type ", serverType, " interface {")
+ for i, method := range service.Method {
+ g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service.
+ g.P(g.generateServerSignature(servName, method))
+ }
+ g.P("}")
+ g.P()
+
+ // Server registration.
+ g.P("func Register", servName, "Server(s *", grpcPkg, ".Server, srv ", serverType, ") {")
+ g.P("s.RegisterService(&", serviceDescVar, `, srv)`)
+ g.P("}")
+ g.P()
+
+ // Server handler implementations.
+ var handlerNames []string
+ for _, method := range service.Method {
+ hname := g.generateServerMethod(servName, fullServName, method)
+ handlerNames = append(handlerNames, hname)
+ }
+
+ // Service descriptor.
+ g.P("var ", serviceDescVar, " = ", grpcPkg, ".ServiceDesc {")
+ g.P("ServiceName: ", strconv.Quote(fullServName), ",")
+ g.P("HandlerType: (*", serverType, ")(nil),")
+ g.P("Methods: []", grpcPkg, ".MethodDesc{")
+ for i, method := range service.Method {
+ if method.GetServerStreaming() || method.GetClientStreaming() {
+ continue
+ }
+ g.P("{")
+ g.P("MethodName: ", strconv.Quote(method.GetName()), ",")
+ g.P("Handler: ", handlerNames[i], ",")
+ g.P("},")
+ }
+ g.P("},")
+ g.P("Streams: []", grpcPkg, ".StreamDesc{")
+ for i, method := range service.Method {
+ if !method.GetServerStreaming() && !method.GetClientStreaming() {
+ continue
+ }
+ g.P("{")
+ g.P("StreamName: ", strconv.Quote(method.GetName()), ",")
+ g.P("Handler: ", handlerNames[i], ",")
+ if method.GetServerStreaming() {
+ g.P("ServerStreams: true,")
+ }
+ if method.GetClientStreaming() {
+ g.P("ClientStreams: true,")
+ }
+ g.P("},")
+ }
+ g.P("},")
+ g.P("Metadata: \"", file.GetName(), "\",")
+ g.P("}")
+ g.P()
+}
+
+// generateClientSignature returns the client-side signature for a method.
+func (g *grpc) generateClientSignature(servName string, method *pb.MethodDescriptorProto) string {
+ origMethName := method.GetName()
+ methName := generator.CamelCase(origMethName)
+ if reservedClientName[methName] {
+ methName += "_"
+ }
+ reqArg := ", in *" + g.typeName(method.GetInputType())
+ if method.GetClientStreaming() {
+ reqArg = ""
+ }
+ respName := "*" + g.typeName(method.GetOutputType())
+ if method.GetServerStreaming() || method.GetClientStreaming() {
+ respName = servName + "_" + generator.CamelCase(origMethName) + "Client"
+ }
+ return fmt.Sprintf("%s(ctx %s.Context%s, opts ...%s.CallOption) (%s, error)", methName, contextPkg, reqArg, grpcPkg, respName)
+}
+
+func (g *grpc) generateClientMethod(servName, fullServName, serviceDescVar string, method *pb.MethodDescriptorProto, descExpr string) {
+ sname := fmt.Sprintf("/%s/%s", fullServName, method.GetName())
+ methName := generator.CamelCase(method.GetName())
+ inType := g.typeName(method.GetInputType())
+ outType := g.typeName(method.GetOutputType())
+
+ g.P("func (c *", unexport(servName), "Client) ", g.generateClientSignature(servName, method), "{")
+ if !method.GetServerStreaming() && !method.GetClientStreaming() {
+ g.P("out := new(", outType, ")")
+ // TODO: Pass descExpr to Invoke.
+ g.P("err := ", grpcPkg, `.Invoke(ctx, "`, sname, `", in, out, c.cc, opts...)`)
+ g.P("if err != nil { return nil, err }")
+ g.P("return out, nil")
+ g.P("}")
+ g.P()
+ return
+ }
+ streamType := unexport(servName) + methName + "Client"
+ g.P("stream, err := ", grpcPkg, ".NewClientStream(ctx, ", descExpr, `, c.cc, "`, sname, `", opts...)`)
+ g.P("if err != nil { return nil, err }")
+ g.P("x := &", streamType, "{stream}")
+ if !method.GetClientStreaming() {
+ g.P("if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }")
+ g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }")
+ }
+ g.P("return x, nil")
+ g.P("}")
+ g.P()
+
+ genSend := method.GetClientStreaming()
+ genRecv := method.GetServerStreaming()
+ genCloseAndRecv := !method.GetServerStreaming()
+
+ // Stream auxiliary types and methods.
+ g.P("type ", servName, "_", methName, "Client interface {")
+ if genSend {
+ g.P("Send(*", inType, ") error")
+ }
+ if genRecv {
+ g.P("Recv() (*", outType, ", error)")
+ }
+ if genCloseAndRecv {
+ g.P("CloseAndRecv() (*", outType, ", error)")
+ }
+ g.P(grpcPkg, ".ClientStream")
+ g.P("}")
+ g.P()
+
+ g.P("type ", streamType, " struct {")
+ g.P(grpcPkg, ".ClientStream")
+ g.P("}")
+ g.P()
+
+ if genSend {
+ g.P("func (x *", streamType, ") Send(m *", inType, ") error {")
+ g.P("return x.ClientStream.SendMsg(m)")
+ g.P("}")
+ g.P()
+ }
+ if genRecv {
+ g.P("func (x *", streamType, ") Recv() (*", outType, ", error) {")
+ g.P("m := new(", outType, ")")
+ g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }")
+ g.P("return m, nil")
+ g.P("}")
+ g.P()
+ }
+ if genCloseAndRecv {
+ g.P("func (x *", streamType, ") CloseAndRecv() (*", outType, ", error) {")
+ g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }")
+ g.P("m := new(", outType, ")")
+ g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }")
+ g.P("return m, nil")
+ g.P("}")
+ g.P()
+ }
+}
+
+// generateServerSignature returns the server-side signature for a method.
+func (g *grpc) generateServerSignature(servName string, method *pb.MethodDescriptorProto) string {
+ origMethName := method.GetName()
+ methName := generator.CamelCase(origMethName)
+ if reservedClientName[methName] {
+ methName += "_"
+ }
+
+ var reqArgs []string
+ ret := "error"
+ if !method.GetServerStreaming() && !method.GetClientStreaming() {
+ reqArgs = append(reqArgs, contextPkg+".Context")
+ ret = "(*" + g.typeName(method.GetOutputType()) + ", error)"
+ }
+ if !method.GetClientStreaming() {
+ reqArgs = append(reqArgs, "*"+g.typeName(method.GetInputType()))
+ }
+ if method.GetServerStreaming() || method.GetClientStreaming() {
+ reqArgs = append(reqArgs, servName+"_"+generator.CamelCase(origMethName)+"Server")
+ }
+
+ return methName + "(" + strings.Join(reqArgs, ", ") + ") " + ret
+}
+
+func (g *grpc) generateServerMethod(servName, fullServName string, method *pb.MethodDescriptorProto) string {
+ methName := generator.CamelCase(method.GetName())
+ hname := fmt.Sprintf("_%s_%s_Handler", servName, methName)
+ inType := g.typeName(method.GetInputType())
+ outType := g.typeName(method.GetOutputType())
+
+ if !method.GetServerStreaming() && !method.GetClientStreaming() {
+ g.P("func ", hname, "(srv interface{}, ctx ", contextPkg, ".Context, dec func(interface{}) error, interceptor ", grpcPkg, ".UnaryServerInterceptor) (interface{}, error) {")
+ g.P("in := new(", inType, ")")
+ g.P("if err := dec(in); err != nil { return nil, err }")
+ g.P("if interceptor == nil { return srv.(", servName, "Server).", methName, "(ctx, in) }")
+ g.P("info := &", grpcPkg, ".UnaryServerInfo{")
+ g.P("Server: srv,")
+ g.P("FullMethod: ", strconv.Quote(fmt.Sprintf("/%s/%s", fullServName, methName)), ",")
+ g.P("}")
+ g.P("handler := func(ctx ", contextPkg, ".Context, req interface{}) (interface{}, error) {")
+ g.P("return srv.(", servName, "Server).", methName, "(ctx, req.(*", inType, "))")
+ g.P("}")
+ g.P("return interceptor(ctx, in, info, handler)")
+ g.P("}")
+ g.P()
+ return hname
+ }
+ streamType := unexport(servName) + methName + "Server"
+ g.P("func ", hname, "(srv interface{}, stream ", grpcPkg, ".ServerStream) error {")
+ if !method.GetClientStreaming() {
+ g.P("m := new(", inType, ")")
+ g.P("if err := stream.RecvMsg(m); err != nil { return err }")
+ g.P("return srv.(", servName, "Server).", methName, "(m, &", streamType, "{stream})")
+ } else {
+ g.P("return srv.(", servName, "Server).", methName, "(&", streamType, "{stream})")
+ }
+ g.P("}")
+ g.P()
+
+ genSend := method.GetServerStreaming()
+ genSendAndClose := !method.GetServerStreaming()
+ genRecv := method.GetClientStreaming()
+
+ // Stream auxiliary types and methods.
+ g.P("type ", servName, "_", methName, "Server interface {")
+ if genSend {
+ g.P("Send(*", outType, ") error")
+ }
+ if genSendAndClose {
+ g.P("SendAndClose(*", outType, ") error")
+ }
+ if genRecv {
+ g.P("Recv() (*", inType, ", error)")
+ }
+ g.P(grpcPkg, ".ServerStream")
+ g.P("}")
+ g.P()
+
+ g.P("type ", streamType, " struct {")
+ g.P(grpcPkg, ".ServerStream")
+ g.P("}")
+ g.P()
+
+ if genSend {
+ g.P("func (x *", streamType, ") Send(m *", outType, ") error {")
+ g.P("return x.ServerStream.SendMsg(m)")
+ g.P("}")
+ g.P()
+ }
+ if genSendAndClose {
+ g.P("func (x *", streamType, ") SendAndClose(m *", outType, ") error {")
+ g.P("return x.ServerStream.SendMsg(m)")
+ g.P("}")
+ g.P()
+ }
+ if genRecv {
+ g.P("func (x *", streamType, ") Recv() (*", inType, ", error) {")
+ g.P("m := new(", inType, ")")
+ g.P("if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err }")
+ g.P("return m, nil")
+ g.P("}")
+ g.P()
+ }
+
+ return hname
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go
new file mode 100644
index 0000000..532a550
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go
@@ -0,0 +1,34 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2015 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package main
+
+import _ "github.com/golang/protobuf/protoc-gen-go/grpc"
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/main.go b/vendor/github.com/golang/protobuf/protoc-gen-go/main.go
new file mode 100644
index 0000000..8e2486d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/main.go
@@ -0,0 +1,98 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// protoc-gen-go is a plugin for the Google protocol buffer compiler to generate
+// Go code. Run it by building this program and putting it in your path with
+// the name
+// protoc-gen-go
+// That word 'go' at the end becomes part of the option string set for the
+// protocol compiler, so once the protocol compiler (protoc) is installed
+// you can run
+// protoc --go_out=output_directory input_directory/file.proto
+// to generate Go bindings for the protocol defined by file.proto.
+// With that input, the output will be written to
+// output_directory/file.pb.go
+//
+// The generated code is documented in the package comment for
+// the library.
+//
+// See the README and documentation for protocol buffers to learn more:
+// https://developers.google.com/protocol-buffers/
+package main
+
+import (
+ "io/ioutil"
+ "os"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/protoc-gen-go/generator"
+)
+
+func main() {
+ // Begin by allocating a generator. The request and response structures are stored there
+ // so we can do error handling easily - the response structure contains the field to
+ // report failure.
+ g := generator.New()
+
+ data, err := ioutil.ReadAll(os.Stdin)
+ if err != nil {
+ g.Error(err, "reading input")
+ }
+
+ if err := proto.Unmarshal(data, g.Request); err != nil {
+ g.Error(err, "parsing input proto")
+ }
+
+ if len(g.Request.FileToGenerate) == 0 {
+ g.Fail("no files to generate")
+ }
+
+ g.CommandLineParameters(g.Request.GetParameter())
+
+ // Create a wrapped version of the Descriptors and EnumDescriptors that
+ // point to the file that defines them.
+ g.WrapTypes()
+
+ g.SetPackageNames()
+ g.BuildTypeNameMap()
+
+ g.GenerateAllFiles()
+
+ // Send back the results.
+ data, err = proto.Marshal(g.Response)
+ if err != nil {
+ g.Error(err, "failed to marshal output proto")
+ }
+ _, err = os.Stdout.Write(data)
+ if err != nil {
+ g.Error(err, "failed to write output proto")
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile
new file mode 100644
index 0000000..bc0463d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile
@@ -0,0 +1,45 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Not stored here, but plugin.proto is in https://github.com/google/protobuf/
+# at src/google/protobuf/compiler/plugin.proto
+# Also we need to fix an import.
+regenerate:
+ @echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION
+ cp $(HOME)/src/protobuf/include/google/protobuf/compiler/plugin.proto .
+ protoc --go_out=Mgoogle/protobuf/descriptor.proto=github.com/golang/protobuf/protoc-gen-go/descriptor:../../../../.. \
+ -I$(HOME)/src/protobuf/include $(HOME)/src/protobuf/include/google/protobuf/compiler/plugin.proto
+
+restore:
+ cp plugin.pb.golden plugin.pb.go
+
+preserve:
+ cp plugin.pb.go plugin.pb.golden
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go
new file mode 100644
index 0000000..c608a24
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go
@@ -0,0 +1,293 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/compiler/plugin.proto
+
+/*
+Package plugin_go is a generated protocol buffer package.
+
+It is generated from these files:
+ google/protobuf/compiler/plugin.proto
+
+It has these top-level messages:
+ Version
+ CodeGeneratorRequest
+ CodeGeneratorResponse
+*/
+package plugin_go
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// The version number of protocol compiler.
+type Version struct {
+ Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"`
+ Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"`
+ Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"`
+ // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
+ // be empty for mainline stable releases.
+ Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Version) Reset() { *m = Version{} }
+func (m *Version) String() string { return proto.CompactTextString(m) }
+func (*Version) ProtoMessage() {}
+func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *Version) GetMajor() int32 {
+ if m != nil && m.Major != nil {
+ return *m.Major
+ }
+ return 0
+}
+
+func (m *Version) GetMinor() int32 {
+ if m != nil && m.Minor != nil {
+ return *m.Minor
+ }
+ return 0
+}
+
+func (m *Version) GetPatch() int32 {
+ if m != nil && m.Patch != nil {
+ return *m.Patch
+ }
+ return 0
+}
+
+func (m *Version) GetSuffix() string {
+ if m != nil && m.Suffix != nil {
+ return *m.Suffix
+ }
+ return ""
+}
+
+// An encoded CodeGeneratorRequest is written to the plugin's stdin.
+type CodeGeneratorRequest struct {
+ // The .proto files that were explicitly listed on the command-line. The
+ // code generator should generate code only for these files. Each file's
+ // descriptor will be included in proto_file, below.
+ FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"`
+ // The generator parameter passed on the command-line.
+ Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"`
+ // FileDescriptorProtos for all files in files_to_generate and everything
+ // they import. The files will appear in topological order, so each file
+ // appears before any file that imports it.
+ //
+ // protoc guarantees that all proto_files will be written after
+ // the fields above, even though this is not technically guaranteed by the
+ // protobuf wire format. This theoretically could allow a plugin to stream
+ // in the FileDescriptorProtos and handle them one by one rather than read
+ // the entire set into memory at once. However, as of this writing, this
+ // is not similarly optimized on protoc's end -- it will store all fields in
+ // memory at once before sending them to the plugin.
+ //
+ // Type names of fields and extensions in the FileDescriptorProto are always
+ // fully qualified.
+ ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"`
+ // The version number of protocol compiler.
+ CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} }
+func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) }
+func (*CodeGeneratorRequest) ProtoMessage() {}
+func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *CodeGeneratorRequest) GetFileToGenerate() []string {
+ if m != nil {
+ return m.FileToGenerate
+ }
+ return nil
+}
+
+func (m *CodeGeneratorRequest) GetParameter() string {
+ if m != nil && m.Parameter != nil {
+ return *m.Parameter
+ }
+ return ""
+}
+
+func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorProto {
+ if m != nil {
+ return m.ProtoFile
+ }
+ return nil
+}
+
+func (m *CodeGeneratorRequest) GetCompilerVersion() *Version {
+ if m != nil {
+ return m.CompilerVersion
+ }
+ return nil
+}
+
+// The plugin writes an encoded CodeGeneratorResponse to stdout.
+type CodeGeneratorResponse struct {
+ // Error message. If non-empty, code generation failed. The plugin process
+ // should exit with status code zero even if it reports an error in this way.
+ //
+ // This should be used to indicate errors in .proto files which prevent the
+ // code generator from generating correct code. Errors which indicate a
+ // problem in protoc itself -- such as the input CodeGeneratorRequest being
+ // unparseable -- should be reported by writing a message to stderr and
+ // exiting with a non-zero status code.
+ Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
+ File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} }
+func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) }
+func (*CodeGeneratorResponse) ProtoMessage() {}
+func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+func (m *CodeGeneratorResponse) GetError() string {
+ if m != nil && m.Error != nil {
+ return *m.Error
+ }
+ return ""
+}
+
+func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File {
+ if m != nil {
+ return m.File
+ }
+ return nil
+}
+
+// Represents a single generated file.
+type CodeGeneratorResponse_File struct {
+ // The file name, relative to the output directory. The name must not
+ // contain "." or ".." components and must be relative, not be absolute (so,
+ // the file cannot lie outside the output directory). "/" must be used as
+ // the path separator, not "\".
+ //
+ // If the name is omitted, the content will be appended to the previous
+ // file. This allows the generator to break large files into small chunks,
+ // and allows the generated text to be streamed back to protoc so that large
+ // files need not reside completely in memory at one time. Note that as of
+ // this writing protoc does not optimize for this -- it will read the entire
+ // CodeGeneratorResponse before writing files to disk.
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ // If non-empty, indicates that the named file should already exist, and the
+ // content here is to be inserted into that file at a defined insertion
+ // point. This feature allows a code generator to extend the output
+ // produced by another code generator. The original generator may provide
+ // insertion points by placing special annotations in the file that look
+ // like:
+ // @@protoc_insertion_point(NAME)
+ // The annotation can have arbitrary text before and after it on the line,
+ // which allows it to be placed in a comment. NAME should be replaced with
+ // an identifier naming the point -- this is what other generators will use
+ // as the insertion_point. Code inserted at this point will be placed
+ // immediately above the line containing the insertion point (thus multiple
+ // insertions to the same point will come out in the order they were added).
+ // The double-@ is intended to make it unlikely that the generated code
+ // could contain things that look like insertion points by accident.
+ //
+ // For example, the C++ code generator places the following line in the
+ // .pb.h files that it generates:
+ // // @@protoc_insertion_point(namespace_scope)
+ // This line appears within the scope of the file's package namespace, but
+ // outside of any particular class. Another plugin can then specify the
+ // insertion_point "namespace_scope" to generate additional classes or
+ // other declarations that should be placed in this scope.
+ //
+ // Note that if the line containing the insertion point begins with
+ // whitespace, the same whitespace will be added to every line of the
+ // inserted text. This is useful for languages like Python, where
+ // indentation matters. In these languages, the insertion point comment
+ // should be indented the same amount as any inserted code will need to be
+ // in order to work correctly in that context.
+ //
+ // The code generator that generates the initial file and the one which
+ // inserts into it must both run as part of a single invocation of protoc.
+ // Code generators are executed in the order in which they appear on the
+ // command line.
+ //
+ // If |insertion_point| is present, |name| must also be present.
+ InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"`
+ // The file contents.
+ Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorResponse_File{} }
+func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) }
+func (*CodeGeneratorResponse_File) ProtoMessage() {}
+func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} }
+
+func (m *CodeGeneratorResponse_File) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *CodeGeneratorResponse_File) GetInsertionPoint() string {
+ if m != nil && m.InsertionPoint != nil {
+ return *m.InsertionPoint
+ }
+ return ""
+}
+
+func (m *CodeGeneratorResponse_File) GetContent() string {
+ if m != nil && m.Content != nil {
+ return *m.Content
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterType((*Version)(nil), "google.protobuf.compiler.Version")
+ proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest")
+ proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse")
+ proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File")
+}
+
+func init() { proto.RegisterFile("google/protobuf/compiler/plugin.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 417 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcf, 0x6a, 0x14, 0x41,
+ 0x10, 0xc6, 0x19, 0x77, 0x63, 0x98, 0x8a, 0x64, 0x43, 0x13, 0xa5, 0x09, 0x39, 0x8c, 0x8b, 0xe2,
+ 0x5c, 0x32, 0x0b, 0xc1, 0x8b, 0x78, 0x4b, 0x44, 0x3d, 0x78, 0x58, 0x1a, 0xf1, 0x20, 0xc8, 0x30,
+ 0x99, 0xd4, 0x74, 0x5a, 0x66, 0xba, 0xc6, 0xee, 0x1e, 0xf1, 0x49, 0x7d, 0x0f, 0xdf, 0x40, 0xfa,
+ 0xcf, 0x24, 0xb2, 0xb8, 0xa7, 0xee, 0xef, 0x57, 0xd5, 0xd5, 0x55, 0x1f, 0x05, 0x2f, 0x25, 0x91,
+ 0xec, 0x71, 0x33, 0x1a, 0x72, 0x74, 0x33, 0x75, 0x9b, 0x96, 0x86, 0x51, 0xf5, 0x68, 0x36, 0x63,
+ 0x3f, 0x49, 0xa5, 0xab, 0x10, 0x60, 0x3c, 0xa6, 0x55, 0x73, 0x5a, 0x35, 0xa7, 0x9d, 0x15, 0xbb,
+ 0x05, 0x6e, 0xd1, 0xb6, 0x46, 0x8d, 0x8e, 0x4c, 0xcc, 0x5e, 0xb7, 0x70, 0xf8, 0x05, 0x8d, 0x55,
+ 0xa4, 0xd9, 0x29, 0x1c, 0x0c, 0xcd, 0x77, 0x32, 0x3c, 0x2b, 0xb2, 0xf2, 0x40, 0x44, 0x11, 0xa8,
+ 0xd2, 0x64, 0xf8, 0xa3, 0x44, 0xbd, 0xf0, 0x74, 0x6c, 0x5c, 0x7b, 0xc7, 0x17, 0x91, 0x06, 0xc1,
+ 0x9e, 0xc1, 0x63, 0x3b, 0x75, 0x9d, 0xfa, 0xc5, 0x97, 0x45, 0x56, 0xe6, 0x22, 0xa9, 0xf5, 0x9f,
+ 0x0c, 0x4e, 0xaf, 0xe9, 0x16, 0x3f, 0xa0, 0x46, 0xd3, 0x38, 0x32, 0x02, 0x7f, 0x4c, 0x68, 0x1d,
+ 0x2b, 0xe1, 0xa4, 0x53, 0x3d, 0xd6, 0x8e, 0x6a, 0x19, 0x63, 0xc8, 0xb3, 0x62, 0x51, 0xe6, 0xe2,
+ 0xd8, 0xf3, 0xcf, 0x94, 0x5e, 0x20, 0x3b, 0x87, 0x7c, 0x6c, 0x4c, 0x33, 0xa0, 0xc3, 0xd8, 0x4a,
+ 0x2e, 0x1e, 0x00, 0xbb, 0x06, 0x08, 0xe3, 0xd4, 0xfe, 0x15, 0x5f, 0x15, 0x8b, 0xf2, 0xe8, 0xf2,
+ 0x45, 0xb5, 0x6b, 0xcb, 0x7b, 0xd5, 0xe3, 0xbb, 0x7b, 0x03, 0xb6, 0x1e, 0x8b, 0x3c, 0x44, 0x7d,
+ 0x84, 0x7d, 0x82, 0x93, 0xd9, 0xb8, 0xfa, 0x67, 0xf4, 0x24, 0x8c, 0x77, 0x74, 0xf9, 0xbc, 0xda,
+ 0xe7, 0x70, 0x95, 0xcc, 0x13, 0xab, 0x99, 0x24, 0xb0, 0xfe, 0x9d, 0xc1, 0xd3, 0x9d, 0x99, 0xed,
+ 0x48, 0xda, 0xa2, 0xf7, 0x0e, 0x8d, 0x49, 0x3e, 0xe7, 0x22, 0x0a, 0xf6, 0x11, 0x96, 0xff, 0x34,
+ 0xff, 0x7a, 0xff, 0x8f, 0xff, 0x2d, 0x1a, 0x66, 0x13, 0xa1, 0xc2, 0xd9, 0x37, 0x58, 0x86, 0x79,
+ 0x18, 0x2c, 0x75, 0x33, 0x60, 0xfa, 0x26, 0xdc, 0xd9, 0x2b, 0x58, 0x29, 0x6d, 0xd1, 0x38, 0x45,
+ 0xba, 0x1e, 0x49, 0x69, 0x97, 0xcc, 0x3c, 0xbe, 0xc7, 0x5b, 0x4f, 0x19, 0x87, 0xc3, 0x96, 0xb4,
+ 0x43, 0xed, 0xf8, 0x2a, 0x24, 0xcc, 0xf2, 0x4a, 0xc2, 0x79, 0x4b, 0xc3, 0xde, 0xfe, 0xae, 0x9e,
+ 0x6c, 0xc3, 0x6e, 0x06, 0x7b, 0xed, 0xd7, 0x37, 0x52, 0xb9, 0xbb, 0xe9, 0xc6, 0x87, 0x37, 0x92,
+ 0xfa, 0x46, 0xcb, 0x87, 0x65, 0x0c, 0x97, 0xf6, 0x42, 0xa2, 0xbe, 0x90, 0x94, 0x56, 0xfa, 0x6d,
+ 0x3c, 0x6a, 0x49, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x15, 0x40, 0xc5, 0xfe, 0x02, 0x00,
+ 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden
new file mode 100644
index 0000000..8953d0f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden
@@ -0,0 +1,83 @@
+// Code generated by protoc-gen-go.
+// source: google/protobuf/compiler/plugin.proto
+// DO NOT EDIT!
+
+package google_protobuf_compiler
+
+import proto "github.com/golang/protobuf/proto"
+import "math"
+import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+// Reference proto and math imports to suppress error if they are not otherwise used.
+var _ = proto.GetString
+var _ = math.Inf
+
+type CodeGeneratorRequest struct {
+ FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"`
+ Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"`
+ ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (this *CodeGeneratorRequest) Reset() { *this = CodeGeneratorRequest{} }
+func (this *CodeGeneratorRequest) String() string { return proto.CompactTextString(this) }
+func (*CodeGeneratorRequest) ProtoMessage() {}
+
+func (this *CodeGeneratorRequest) GetParameter() string {
+ if this != nil && this.Parameter != nil {
+ return *this.Parameter
+ }
+ return ""
+}
+
+type CodeGeneratorResponse struct {
+ Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
+ File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (this *CodeGeneratorResponse) Reset() { *this = CodeGeneratorResponse{} }
+func (this *CodeGeneratorResponse) String() string { return proto.CompactTextString(this) }
+func (*CodeGeneratorResponse) ProtoMessage() {}
+
+func (this *CodeGeneratorResponse) GetError() string {
+ if this != nil && this.Error != nil {
+ return *this.Error
+ }
+ return ""
+}
+
+type CodeGeneratorResponse_File struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"`
+ Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (this *CodeGeneratorResponse_File) Reset() { *this = CodeGeneratorResponse_File{} }
+func (this *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(this) }
+func (*CodeGeneratorResponse_File) ProtoMessage() {}
+
+func (this *CodeGeneratorResponse_File) GetName() string {
+ if this != nil && this.Name != nil {
+ return *this.Name
+ }
+ return ""
+}
+
+func (this *CodeGeneratorResponse_File) GetInsertionPoint() string {
+ if this != nil && this.InsertionPoint != nil {
+ return *this.InsertionPoint
+ }
+ return ""
+}
+
+func (this *CodeGeneratorResponse_File) GetContent() string {
+ if this != nil && this.Content != nil {
+ return *this.Content
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto
new file mode 100644
index 0000000..5b55745
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto
@@ -0,0 +1,167 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+//
+// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to
+// change.
+//
+// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is
+// just a program that reads a CodeGeneratorRequest from stdin and writes a
+// CodeGeneratorResponse to stdout.
+//
+// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead
+// of dealing with the raw protocol defined here.
+//
+// A plugin executable needs only to be placed somewhere in the path. The
+// plugin should be named "protoc-gen-$NAME", and will then be used when the
+// flag "--${NAME}_out" is passed to protoc.
+
+syntax = "proto2";
+package google.protobuf.compiler;
+option java_package = "com.google.protobuf.compiler";
+option java_outer_classname = "PluginProtos";
+
+option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go";
+
+import "google/protobuf/descriptor.proto";
+
+// The version number of protocol compiler.
+message Version {
+ optional int32 major = 1;
+ optional int32 minor = 2;
+ optional int32 patch = 3;
+ // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
+ // be empty for mainline stable releases.
+ optional string suffix = 4;
+}
+
+// An encoded CodeGeneratorRequest is written to the plugin's stdin.
+message CodeGeneratorRequest {
+ // The .proto files that were explicitly listed on the command-line. The
+ // code generator should generate code only for these files. Each file's
+ // descriptor will be included in proto_file, below.
+ repeated string file_to_generate = 1;
+
+ // The generator parameter passed on the command-line.
+ optional string parameter = 2;
+
+ // FileDescriptorProtos for all files in files_to_generate and everything
+ // they import. The files will appear in topological order, so each file
+ // appears before any file that imports it.
+ //
+ // protoc guarantees that all proto_files will be written after
+ // the fields above, even though this is not technically guaranteed by the
+ // protobuf wire format. This theoretically could allow a plugin to stream
+ // in the FileDescriptorProtos and handle them one by one rather than read
+ // the entire set into memory at once. However, as of this writing, this
+ // is not similarly optimized on protoc's end -- it will store all fields in
+ // memory at once before sending them to the plugin.
+ //
+ // Type names of fields and extensions in the FileDescriptorProto are always
+ // fully qualified.
+ repeated FileDescriptorProto proto_file = 15;
+
+ // The version number of protocol compiler.
+ optional Version compiler_version = 3;
+
+}
+
+// The plugin writes an encoded CodeGeneratorResponse to stdout.
+message CodeGeneratorResponse {
+ // Error message. If non-empty, code generation failed. The plugin process
+ // should exit with status code zero even if it reports an error in this way.
+ //
+ // This should be used to indicate errors in .proto files which prevent the
+ // code generator from generating correct code. Errors which indicate a
+ // problem in protoc itself -- such as the input CodeGeneratorRequest being
+ // unparseable -- should be reported by writing a message to stderr and
+ // exiting with a non-zero status code.
+ optional string error = 1;
+
+ // Represents a single generated file.
+ message File {
+ // The file name, relative to the output directory. The name must not
+ // contain "." or ".." components and must be relative, not be absolute (so,
+ // the file cannot lie outside the output directory). "/" must be used as
+ // the path separator, not "\".
+ //
+ // If the name is omitted, the content will be appended to the previous
+ // file. This allows the generator to break large files into small chunks,
+ // and allows the generated text to be streamed back to protoc so that large
+ // files need not reside completely in memory at one time. Note that as of
+ // this writing protoc does not optimize for this -- it will read the entire
+ // CodeGeneratorResponse before writing files to disk.
+ optional string name = 1;
+
+ // If non-empty, indicates that the named file should already exist, and the
+ // content here is to be inserted into that file at a defined insertion
+ // point. This feature allows a code generator to extend the output
+ // produced by another code generator. The original generator may provide
+ // insertion points by placing special annotations in the file that look
+ // like:
+ // @@protoc_insertion_point(NAME)
+ // The annotation can have arbitrary text before and after it on the line,
+ // which allows it to be placed in a comment. NAME should be replaced with
+ // an identifier naming the point -- this is what other generators will use
+ // as the insertion_point. Code inserted at this point will be placed
+ // immediately above the line containing the insertion point (thus multiple
+ // insertions to the same point will come out in the order they were added).
+ // The double-@ is intended to make it unlikely that the generated code
+ // could contain things that look like insertion points by accident.
+ //
+ // For example, the C++ code generator places the following line in the
+ // .pb.h files that it generates:
+ // // @@protoc_insertion_point(namespace_scope)
+ // This line appears within the scope of the file's package namespace, but
+ // outside of any particular class. Another plugin can then specify the
+ // insertion_point "namespace_scope" to generate additional classes or
+ // other declarations that should be placed in this scope.
+ //
+ // Note that if the line containing the insertion point begins with
+ // whitespace, the same whitespace will be added to every line of the
+ // inserted text. This is useful for languages like Python, where
+ // indentation matters. In these languages, the insertion point comment
+ // should be indented the same amount as any inserted code will need to be
+ // in order to work correctly in that context.
+ //
+ // The code generator that generates the initial file and the one which
+ // inserts into it must both run as part of a single invocation of protoc.
+ // Code generators are executed in the order in which they appear on the
+ // command line.
+ //
+ // If |insertion_point| is present, |name| must also be present.
+ optional string insertion_point = 2;
+
+ // The file contents.
+ optional string content = 15;
+ }
+ repeated File file = 15;
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile
new file mode 100644
index 0000000..a0bf9fe
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile
@@ -0,0 +1,73 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+all:
+ @echo run make test
+
+include ../../Make.protobuf
+
+test: golden testbuild
+
+#test: golden testbuild extension_test
+# ./extension_test
+# @echo PASS
+
+my_test/test.pb.go: my_test/test.proto
+ protoc --go_out=Mmulti/multi1.proto=github.com/golang/protobuf/protoc-gen-go/testdata/multi:. $<
+
+golden:
+ make -B my_test/test.pb.go
+ sed -i -e '/return.*fileDescriptor/d' my_test/test.pb.go
+ sed -i -e '/^var fileDescriptor/,/^}/d' my_test/test.pb.go
+ sed -i -e '/proto.RegisterFile.*fileDescriptor/d' my_test/test.pb.go
+ gofmt -w my_test/test.pb.go
+ diff -w my_test/test.pb.go my_test/test.pb.go.golden
+
+nuke: clean
+
+testbuild: regenerate
+ go test
+
+regenerate:
+ # Invoke protoc once to generate three independent .pb.go files in the same package.
+ protoc --go_out=. multi/multi1.proto multi/multi2.proto multi/multi3.proto
+
+#extension_test: extension_test.$O
+# $(LD) -L. -o $@ $<
+
+#multi.a: multi3.pb.$O multi2.pb.$O multi1.pb.$O
+# rm -f multi.a
+# $(QUOTED_GOBIN)/gopack grc $@ $<
+
+#test.pb.go: imp.pb.go
+#multi1.pb.go: multi2.pb.go multi3.pb.go
+#main.$O: imp.pb.$O test.pb.$O multi.a
+#extension_test.$O: extension_base.pb.$O extension_extra.pb.$O extension_user.pb.$O
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base.proto
new file mode 100644
index 0000000..94acfc1
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base.proto
@@ -0,0 +1,46 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto2";
+
+package extension_base;
+
+message BaseMessage {
+ optional int32 height = 1;
+ extensions 4 to 9;
+ extensions 16 to max;
+}
+
+// Another message that may be extended, using message_set_wire_format.
+message OldStyleMessage {
+ option message_set_wire_format = true;
+ extensions 100 to max;
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra.proto
new file mode 100644
index 0000000..fca7f60
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra.proto
@@ -0,0 +1,38 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto2";
+
+package extension_extra;
+
+message ExtraMessage {
+ optional int32 width = 1;
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go
new file mode 100644
index 0000000..86e9c11
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go
@@ -0,0 +1,210 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that we can use protocol buffers that use extensions.
+
+package testdata
+
+/*
+
+import (
+ "bytes"
+ "regexp"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ base "extension_base.pb"
+ user "extension_user.pb"
+)
+
+func TestSingleFieldExtension(t *testing.T) {
+ bm := &base.BaseMessage{
+ Height: proto.Int32(178),
+ }
+
+ // Use extension within scope of another type.
+ vol := proto.Uint32(11)
+ err := proto.SetExtension(bm, user.E_LoudMessage_Volume, vol)
+ if err != nil {
+ t.Fatal("Failed setting extension:", err)
+ }
+ buf, err := proto.Marshal(bm)
+ if err != nil {
+ t.Fatal("Failed encoding message with extension:", err)
+ }
+ bm_new := new(base.BaseMessage)
+ if err := proto.Unmarshal(buf, bm_new); err != nil {
+ t.Fatal("Failed decoding message with extension:", err)
+ }
+ if !proto.HasExtension(bm_new, user.E_LoudMessage_Volume) {
+ t.Fatal("Decoded message didn't contain extension.")
+ }
+ vol_out, err := proto.GetExtension(bm_new, user.E_LoudMessage_Volume)
+ if err != nil {
+ t.Fatal("Failed getting extension:", err)
+ }
+ if v := vol_out.(*uint32); *v != *vol {
+ t.Errorf("vol_out = %v, expected %v", *v, *vol)
+ }
+ proto.ClearExtension(bm_new, user.E_LoudMessage_Volume)
+ if proto.HasExtension(bm_new, user.E_LoudMessage_Volume) {
+ t.Fatal("Failed clearing extension.")
+ }
+}
+
+func TestMessageExtension(t *testing.T) {
+ bm := &base.BaseMessage{
+ Height: proto.Int32(179),
+ }
+
+ // Use extension that is itself a message.
+ um := &user.UserMessage{
+ Name: proto.String("Dave"),
+ Rank: proto.String("Major"),
+ }
+ err := proto.SetExtension(bm, user.E_LoginMessage_UserMessage, um)
+ if err != nil {
+ t.Fatal("Failed setting extension:", err)
+ }
+ buf, err := proto.Marshal(bm)
+ if err != nil {
+ t.Fatal("Failed encoding message with extension:", err)
+ }
+ bm_new := new(base.BaseMessage)
+ if err := proto.Unmarshal(buf, bm_new); err != nil {
+ t.Fatal("Failed decoding message with extension:", err)
+ }
+ if !proto.HasExtension(bm_new, user.E_LoginMessage_UserMessage) {
+ t.Fatal("Decoded message didn't contain extension.")
+ }
+ um_out, err := proto.GetExtension(bm_new, user.E_LoginMessage_UserMessage)
+ if err != nil {
+ t.Fatal("Failed getting extension:", err)
+ }
+ if n := um_out.(*user.UserMessage).Name; *n != *um.Name {
+ t.Errorf("um_out.Name = %q, expected %q", *n, *um.Name)
+ }
+ if r := um_out.(*user.UserMessage).Rank; *r != *um.Rank {
+ t.Errorf("um_out.Rank = %q, expected %q", *r, *um.Rank)
+ }
+ proto.ClearExtension(bm_new, user.E_LoginMessage_UserMessage)
+ if proto.HasExtension(bm_new, user.E_LoginMessage_UserMessage) {
+ t.Fatal("Failed clearing extension.")
+ }
+}
+
+func TestTopLevelExtension(t *testing.T) {
+ bm := &base.BaseMessage{
+ Height: proto.Int32(179),
+ }
+
+ width := proto.Int32(17)
+ err := proto.SetExtension(bm, user.E_Width, width)
+ if err != nil {
+ t.Fatal("Failed setting extension:", err)
+ }
+ buf, err := proto.Marshal(bm)
+ if err != nil {
+ t.Fatal("Failed encoding message with extension:", err)
+ }
+ bm_new := new(base.BaseMessage)
+ if err := proto.Unmarshal(buf, bm_new); err != nil {
+ t.Fatal("Failed decoding message with extension:", err)
+ }
+ if !proto.HasExtension(bm_new, user.E_Width) {
+ t.Fatal("Decoded message didn't contain extension.")
+ }
+ width_out, err := proto.GetExtension(bm_new, user.E_Width)
+ if err != nil {
+ t.Fatal("Failed getting extension:", err)
+ }
+ if w := width_out.(*int32); *w != *width {
+ t.Errorf("width_out = %v, expected %v", *w, *width)
+ }
+ proto.ClearExtension(bm_new, user.E_Width)
+ if proto.HasExtension(bm_new, user.E_Width) {
+ t.Fatal("Failed clearing extension.")
+ }
+}
+
+func TestMessageSetWireFormat(t *testing.T) {
+ osm := new(base.OldStyleMessage)
+ osp := &user.OldStyleParcel{
+ Name: proto.String("Dave"),
+ Height: proto.Int32(178),
+ }
+
+ err := proto.SetExtension(osm, user.E_OldStyleParcel_MessageSetExtension, osp)
+ if err != nil {
+ t.Fatal("Failed setting extension:", err)
+ }
+
+ buf, err := proto.Marshal(osm)
+ if err != nil {
+ t.Fatal("Failed encoding message:", err)
+ }
+
+ // Data generated from Python implementation.
+ expected := []byte{
+ 11, 16, 209, 15, 26, 9, 10, 4, 68, 97, 118, 101, 16, 178, 1, 12,
+ }
+
+ if !bytes.Equal(expected, buf) {
+ t.Errorf("Encoding mismatch.\nwant %+v\n got %+v", expected, buf)
+ }
+
+ // Check that it is restored correctly.
+ osm = new(base.OldStyleMessage)
+ if err := proto.Unmarshal(buf, osm); err != nil {
+ t.Fatal("Failed decoding message:", err)
+ }
+ osp_out, err := proto.GetExtension(osm, user.E_OldStyleParcel_MessageSetExtension)
+ if err != nil {
+ t.Fatal("Failed getting extension:", err)
+ }
+ osp = osp_out.(*user.OldStyleParcel)
+ if *osp.Name != "Dave" || *osp.Height != 178 {
+ t.Errorf("Retrieved extension from decoded message is not correct: %+v", osp)
+ }
+}
+
+func main() {
+ // simpler than rigging up gotest
+ testing.Main(regexp.MatchString, []testing.InternalTest{
+ {"TestSingleFieldExtension", TestSingleFieldExtension},
+ {"TestMessageExtension", TestMessageExtension},
+ {"TestTopLevelExtension", TestTopLevelExtension},
+ },
+ []testing.InternalBenchmark{},
+ []testing.InternalExample{})
+}
+
+*/
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user.proto
new file mode 100644
index 0000000..ff65873
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user.proto
@@ -0,0 +1,100 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto2";
+
+import "extension_base.proto";
+import "extension_extra.proto";
+
+package extension_user;
+
+message UserMessage {
+ optional string name = 1;
+ optional string rank = 2;
+}
+
+// Extend with a message
+extend extension_base.BaseMessage {
+ optional UserMessage user_message = 5;
+}
+
+// Extend with a foreign message
+extend extension_base.BaseMessage {
+ optional extension_extra.ExtraMessage extra_message = 9;
+}
+
+// Extend with some primitive types
+extend extension_base.BaseMessage {
+ optional int32 width = 6;
+ optional int64 area = 7;
+}
+
+// Extend inside the scope of another type
+message LoudMessage {
+ extend extension_base.BaseMessage {
+ optional uint32 volume = 8;
+ }
+ extensions 100 to max;
+}
+
+// Extend inside the scope of another type, using a message.
+message LoginMessage {
+ extend extension_base.BaseMessage {
+ optional UserMessage user_message = 16;
+ }
+}
+
+// Extend with a repeated field
+extend extension_base.BaseMessage {
+ repeated Detail detail = 17;
+}
+
+message Detail {
+ optional string color = 1;
+}
+
+// An extension of an extension
+message Announcement {
+ optional string words = 1;
+ extend LoudMessage {
+ optional Announcement loud_ext = 100;
+ }
+}
+
+// Something that can be put in a message set.
+message OldStyleParcel {
+ extend extension_base.OldStyleMessage {
+ optional OldStyleParcel message_set_extension = 2001;
+ }
+
+ required string name = 1;
+ optional int32 height = 2;
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc.proto
new file mode 100644
index 0000000..b8bc41a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc.proto
@@ -0,0 +1,59 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2015 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package grpc.testing;
+
+message SimpleRequest {
+}
+
+message SimpleResponse {
+}
+
+message StreamMsg {
+}
+
+message StreamMsg2 {
+}
+
+service Test {
+ rpc UnaryCall(SimpleRequest) returns (SimpleResponse);
+
+ // This RPC streams from the server only.
+ rpc Downstream(SimpleRequest) returns (stream StreamMsg);
+
+ // This RPC streams from the client.
+ rpc Upstream(stream StreamMsg) returns (SimpleResponse);
+
+ // This one streams in both directions.
+ rpc Bidi(stream StreamMsg) returns (stream StreamMsg2);
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.pb.go.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.pb.go.golden
new file mode 100644
index 0000000..784a4f8
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.pb.go.golden
@@ -0,0 +1,113 @@
+// Code generated by protoc-gen-go.
+// source: imp.proto
+// DO NOT EDIT!
+
+package imp
+
+import proto "github.com/golang/protobuf/proto"
+import "math"
+import "os"
+import imp1 "imp2.pb"
+
+// Reference proto & math imports to suppress error if they are not otherwise used.
+var _ = proto.GetString
+var _ = math.Inf
+
+// Types from public import imp2.proto
+type PubliclyImportedMessage imp1.PubliclyImportedMessage
+
+func (this *PubliclyImportedMessage) Reset() { (*imp1.PubliclyImportedMessage)(this).Reset() }
+func (this *PubliclyImportedMessage) String() string {
+ return (*imp1.PubliclyImportedMessage)(this).String()
+}
+
+// PubliclyImportedMessage from public import imp.proto
+
+type ImportedMessage_Owner int32
+
+const (
+ ImportedMessage_DAVE ImportedMessage_Owner = 1
+ ImportedMessage_MIKE ImportedMessage_Owner = 2
+)
+
+var ImportedMessage_Owner_name = map[int32]string{
+ 1: "DAVE",
+ 2: "MIKE",
+}
+var ImportedMessage_Owner_value = map[string]int32{
+ "DAVE": 1,
+ "MIKE": 2,
+}
+
+// NewImportedMessage_Owner is deprecated. Use x.Enum() instead.
+func NewImportedMessage_Owner(x ImportedMessage_Owner) *ImportedMessage_Owner {
+ e := ImportedMessage_Owner(x)
+ return &e
+}
+func (x ImportedMessage_Owner) Enum() *ImportedMessage_Owner {
+ p := new(ImportedMessage_Owner)
+ *p = x
+ return p
+}
+func (x ImportedMessage_Owner) String() string {
+ return proto.EnumName(ImportedMessage_Owner_name, int32(x))
+}
+
+type ImportedMessage struct {
+ Field *int64 `protobuf:"varint,1,req,name=field" json:"field,omitempty"`
+ XXX_extensions map[int32][]byte `json:",omitempty"`
+ XXX_unrecognized []byte `json:",omitempty"`
+}
+
+func (this *ImportedMessage) Reset() { *this = ImportedMessage{} }
+func (this *ImportedMessage) String() string { return proto.CompactTextString(this) }
+
+var extRange_ImportedMessage = []proto.ExtensionRange{
+ proto.ExtensionRange{90, 100},
+}
+
+func (*ImportedMessage) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_ImportedMessage
+}
+func (this *ImportedMessage) ExtensionMap() map[int32][]byte {
+ if this.XXX_extensions == nil {
+ this.XXX_extensions = make(map[int32][]byte)
+ }
+ return this.XXX_extensions
+}
+
+type ImportedExtendable struct {
+ XXX_extensions map[int32][]byte `json:",omitempty"`
+ XXX_unrecognized []byte `json:",omitempty"`
+}
+
+func (this *ImportedExtendable) Reset() { *this = ImportedExtendable{} }
+func (this *ImportedExtendable) String() string { return proto.CompactTextString(this) }
+
+func (this *ImportedExtendable) Marshal() ([]byte, error) {
+ return proto.MarshalMessageSet(this.ExtensionMap())
+}
+func (this *ImportedExtendable) Unmarshal(buf []byte) error {
+ return proto.UnmarshalMessageSet(buf, this.ExtensionMap())
+}
+// ensure ImportedExtendable satisfies proto.Marshaler and proto.Unmarshaler
+var _ proto.Marshaler = (*ImportedExtendable)(nil)
+var _ proto.Unmarshaler = (*ImportedExtendable)(nil)
+
+var extRange_ImportedExtendable = []proto.ExtensionRange{
+ proto.ExtensionRange{100, 536870911},
+}
+
+func (*ImportedExtendable) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_ImportedExtendable
+}
+func (this *ImportedExtendable) ExtensionMap() map[int32][]byte {
+ if this.XXX_extensions == nil {
+ this.XXX_extensions = make(map[int32][]byte)
+ }
+ return this.XXX_extensions
+}
+
+func init() {
+ proto.RegisterEnum("imp.ImportedMessage_Owner", ImportedMessage_Owner_name, ImportedMessage_Owner_value)
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.proto
new file mode 100644
index 0000000..156e078
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.proto
@@ -0,0 +1,70 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto2";
+
+package imp;
+
+import "imp2.proto";
+import "imp3.proto";
+
+message ImportedMessage {
+ required int64 field = 1;
+
+ // The forwarded getters for these fields are fiddly to get right.
+ optional ImportedMessage2 local_msg = 2;
+ optional ForeignImportedMessage foreign_msg = 3; // in imp3.proto
+ optional Owner enum_field = 4;
+ oneof union {
+ int32 state = 9;
+ }
+
+ repeated string name = 5;
+ repeated Owner boss = 6;
+ repeated ImportedMessage2 memo = 7;
+
+ map<string, ImportedMessage2> msg_map = 8;
+
+ enum Owner {
+ DAVE = 1;
+ MIKE = 2;
+ }
+
+ extensions 90 to 100;
+}
+
+message ImportedMessage2 {
+}
+
+message ImportedExtendable {
+ option message_set_wire_format = true;
+ extensions 100 to max;
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp2.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp2.proto
new file mode 100644
index 0000000..3bb0632
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp2.proto
@@ -0,0 +1,43 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto2";
+
+package imp;
+
+message PubliclyImportedMessage {
+ optional int64 field = 1;
+}
+
+enum PubliclyImportedEnum {
+ GLASSES = 1;
+ HAIR = 2;
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp3.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp3.proto
new file mode 100644
index 0000000..58fc759
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp3.proto
@@ -0,0 +1,38 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto2";
+
+package imp;
+
+message ForeignImportedMessage {
+ optional string tuber = 1;
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go
new file mode 100644
index 0000000..f9b5ccf
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go
@@ -0,0 +1,46 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// A simple binary to link together the protocol buffers in this test.
+
+package testdata
+
+import (
+ "testing"
+
+ mytestpb "./my_test"
+ multipb "github.com/golang/protobuf/protoc-gen-go/testdata/multi"
+)
+
+func TestLink(t *testing.T) {
+ _ = &multipb.Multi1{}
+ _ = &mytestpb.Request{}
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto
new file mode 100644
index 0000000..0da6e0a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto
@@ -0,0 +1,44 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto2";
+
+import "multi/multi2.proto";
+import "multi/multi3.proto";
+
+package multitest;
+
+message Multi1 {
+ required Multi2 multi2 = 1;
+ optional Multi2.Color color = 2;
+ optional Multi3.HatType hat_type = 3;
+}
+
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto
new file mode 100644
index 0000000..e6bfc71
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto
@@ -0,0 +1,46 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto2";
+
+package multitest;
+
+message Multi2 {
+ required int32 required_value = 1;
+
+ enum Color {
+ BLUE = 1;
+ GREEN = 2;
+ RED = 3;
+ };
+ optional Color color = 2;
+}
+
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto
new file mode 100644
index 0000000..146c255
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto
@@ -0,0 +1,43 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto2";
+
+package multitest;
+
+message Multi3 {
+ enum HatType {
+ FEDORA = 1;
+ FEZ = 2;
+ };
+ optional HatType hat_type = 1;
+}
+
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go
new file mode 100644
index 0000000..1954e3f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go
@@ -0,0 +1,870 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: my_test/test.proto
+
+/*
+Package my_test is a generated protocol buffer package.
+
+This package holds interesting messages.
+
+It is generated from these files:
+ my_test/test.proto
+
+It has these top-level messages:
+ Request
+ Reply
+ OtherBase
+ ReplyExtensions
+ OtherReplyExtensions
+ OldReply
+ Communique
+*/
+package my_test
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/golang/protobuf/protoc-gen-go/testdata/multi"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type HatType int32
+
+const (
+ // deliberately skipping 0
+ HatType_FEDORA HatType = 1
+ HatType_FEZ HatType = 2
+)
+
+var HatType_name = map[int32]string{
+ 1: "FEDORA",
+ 2: "FEZ",
+}
+var HatType_value = map[string]int32{
+ "FEDORA": 1,
+ "FEZ": 2,
+}
+
+func (x HatType) Enum() *HatType {
+ p := new(HatType)
+ *p = x
+ return p
+}
+func (x HatType) String() string {
+ return proto.EnumName(HatType_name, int32(x))
+}
+func (x *HatType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(HatType_value, data, "HatType")
+ if err != nil {
+ return err
+ }
+ *x = HatType(value)
+ return nil
+}
+
+// This enum represents days of the week.
+type Days int32
+
+const (
+ Days_MONDAY Days = 1
+ Days_TUESDAY Days = 2
+ Days_LUNDI Days = 1
+)
+
+var Days_name = map[int32]string{
+ 1: "MONDAY",
+ 2: "TUESDAY",
+ // Duplicate value: 1: "LUNDI",
+}
+var Days_value = map[string]int32{
+ "MONDAY": 1,
+ "TUESDAY": 2,
+ "LUNDI": 1,
+}
+
+func (x Days) Enum() *Days {
+ p := new(Days)
+ *p = x
+ return p
+}
+func (x Days) String() string {
+ return proto.EnumName(Days_name, int32(x))
+}
+func (x *Days) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Days_value, data, "Days")
+ if err != nil {
+ return err
+ }
+ *x = Days(value)
+ return nil
+}
+
+type Request_Color int32
+
+const (
+ Request_RED Request_Color = 0
+ Request_GREEN Request_Color = 1
+ Request_BLUE Request_Color = 2
+)
+
+var Request_Color_name = map[int32]string{
+ 0: "RED",
+ 1: "GREEN",
+ 2: "BLUE",
+}
+var Request_Color_value = map[string]int32{
+ "RED": 0,
+ "GREEN": 1,
+ "BLUE": 2,
+}
+
+func (x Request_Color) Enum() *Request_Color {
+ p := new(Request_Color)
+ *p = x
+ return p
+}
+func (x Request_Color) String() string {
+ return proto.EnumName(Request_Color_name, int32(x))
+}
+func (x *Request_Color) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Request_Color_value, data, "Request_Color")
+ if err != nil {
+ return err
+ }
+ *x = Request_Color(value)
+ return nil
+}
+
+type Reply_Entry_Game int32
+
+const (
+ Reply_Entry_FOOTBALL Reply_Entry_Game = 1
+ Reply_Entry_TENNIS Reply_Entry_Game = 2
+)
+
+var Reply_Entry_Game_name = map[int32]string{
+ 1: "FOOTBALL",
+ 2: "TENNIS",
+}
+var Reply_Entry_Game_value = map[string]int32{
+ "FOOTBALL": 1,
+ "TENNIS": 2,
+}
+
+func (x Reply_Entry_Game) Enum() *Reply_Entry_Game {
+ p := new(Reply_Entry_Game)
+ *p = x
+ return p
+}
+func (x Reply_Entry_Game) String() string {
+ return proto.EnumName(Reply_Entry_Game_name, int32(x))
+}
+func (x *Reply_Entry_Game) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Reply_Entry_Game_value, data, "Reply_Entry_Game")
+ if err != nil {
+ return err
+ }
+ *x = Reply_Entry_Game(value)
+ return nil
+}
+
+// This is a message that might be sent somewhere.
+type Request struct {
+ Key []int64 `protobuf:"varint,1,rep,name=key" json:"key,omitempty"`
+ // optional imp.ImportedMessage imported_message = 2;
+ Hue *Request_Color `protobuf:"varint,3,opt,name=hue,enum=my.test.Request_Color" json:"hue,omitempty"`
+ Hat *HatType `protobuf:"varint,4,opt,name=hat,enum=my.test.HatType,def=1" json:"hat,omitempty"`
+ // optional imp.ImportedMessage.Owner owner = 6;
+ Deadline *float32 `protobuf:"fixed32,7,opt,name=deadline,def=inf" json:"deadline,omitempty"`
+ Somegroup *Request_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"`
+ // This is a map field. It will generate map[int32]string.
+ NameMapping map[int32]string `protobuf:"bytes,14,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ // This is a map field whose value type is a message.
+ MsgMapping map[int64]*Reply `protobuf:"bytes,15,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ Reset_ *int32 `protobuf:"varint,12,opt,name=reset" json:"reset,omitempty"`
+ // This field should not conflict with any getters.
+ GetKey_ *string `protobuf:"bytes,16,opt,name=get_key,json=getKey" json:"get_key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Request) Reset() { *m = Request{} }
+func (m *Request) String() string { return proto.CompactTextString(m) }
+func (*Request) ProtoMessage() {}
+
+const Default_Request_Hat HatType = HatType_FEDORA
+
+var Default_Request_Deadline float32 = float32(math.Inf(1))
+
+func (m *Request) GetKey() []int64 {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *Request) GetHue() Request_Color {
+ if m != nil && m.Hue != nil {
+ return *m.Hue
+ }
+ return Request_RED
+}
+
+func (m *Request) GetHat() HatType {
+ if m != nil && m.Hat != nil {
+ return *m.Hat
+ }
+ return Default_Request_Hat
+}
+
+func (m *Request) GetDeadline() float32 {
+ if m != nil && m.Deadline != nil {
+ return *m.Deadline
+ }
+ return Default_Request_Deadline
+}
+
+func (m *Request) GetSomegroup() *Request_SomeGroup {
+ if m != nil {
+ return m.Somegroup
+ }
+ return nil
+}
+
+func (m *Request) GetNameMapping() map[int32]string {
+ if m != nil {
+ return m.NameMapping
+ }
+ return nil
+}
+
+func (m *Request) GetMsgMapping() map[int64]*Reply {
+ if m != nil {
+ return m.MsgMapping
+ }
+ return nil
+}
+
+func (m *Request) GetReset_() int32 {
+ if m != nil && m.Reset_ != nil {
+ return *m.Reset_
+ }
+ return 0
+}
+
+func (m *Request) GetGetKey_() string {
+ if m != nil && m.GetKey_ != nil {
+ return *m.GetKey_
+ }
+ return ""
+}
+
+type Request_SomeGroup struct {
+ GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Request_SomeGroup) Reset() { *m = Request_SomeGroup{} }
+func (m *Request_SomeGroup) String() string { return proto.CompactTextString(m) }
+func (*Request_SomeGroup) ProtoMessage() {}
+
+func (m *Request_SomeGroup) GetGroupField() int32 {
+ if m != nil && m.GroupField != nil {
+ return *m.GroupField
+ }
+ return 0
+}
+
+type Reply struct {
+ Found []*Reply_Entry `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"`
+ CompactKeys []int32 `protobuf:"varint,2,rep,packed,name=compact_keys,json=compactKeys" json:"compact_keys,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Reply) Reset() { *m = Reply{} }
+func (m *Reply) String() string { return proto.CompactTextString(m) }
+func (*Reply) ProtoMessage() {}
+
+var extRange_Reply = []proto.ExtensionRange{
+ {100, 536870911},
+}
+
+func (*Reply) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_Reply
+}
+
+func (m *Reply) GetFound() []*Reply_Entry {
+ if m != nil {
+ return m.Found
+ }
+ return nil
+}
+
+func (m *Reply) GetCompactKeys() []int32 {
+ if m != nil {
+ return m.CompactKeys
+ }
+ return nil
+}
+
+type Reply_Entry struct {
+ KeyThatNeeds_1234Camel_CasIng *int64 `protobuf:"varint,1,req,name=key_that_needs_1234camel_CasIng,json=keyThatNeeds1234camelCasIng" json:"key_that_needs_1234camel_CasIng,omitempty"`
+ Value *int64 `protobuf:"varint,2,opt,name=value,def=7" json:"value,omitempty"`
+ XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2,json=MyFieldName2" json:"_my_field_name_2,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Reply_Entry) Reset() { *m = Reply_Entry{} }
+func (m *Reply_Entry) String() string { return proto.CompactTextString(m) }
+func (*Reply_Entry) ProtoMessage() {}
+
+const Default_Reply_Entry_Value int64 = 7
+
+func (m *Reply_Entry) GetKeyThatNeeds_1234Camel_CasIng() int64 {
+ if m != nil && m.KeyThatNeeds_1234Camel_CasIng != nil {
+ return *m.KeyThatNeeds_1234Camel_CasIng
+ }
+ return 0
+}
+
+func (m *Reply_Entry) GetValue() int64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return Default_Reply_Entry_Value
+}
+
+func (m *Reply_Entry) GetXMyFieldName_2() int64 {
+ if m != nil && m.XMyFieldName_2 != nil {
+ return *m.XMyFieldName_2
+ }
+ return 0
+}
+
+type OtherBase struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OtherBase) Reset() { *m = OtherBase{} }
+func (m *OtherBase) String() string { return proto.CompactTextString(m) }
+func (*OtherBase) ProtoMessage() {}
+
+var extRange_OtherBase = []proto.ExtensionRange{
+ {100, 536870911},
+}
+
+func (*OtherBase) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_OtherBase
+}
+
+func (m *OtherBase) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+type ReplyExtensions struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ReplyExtensions) Reset() { *m = ReplyExtensions{} }
+func (m *ReplyExtensions) String() string { return proto.CompactTextString(m) }
+func (*ReplyExtensions) ProtoMessage() {}
+
+var E_ReplyExtensions_Time = &proto.ExtensionDesc{
+ ExtendedType: (*Reply)(nil),
+ ExtensionType: (*float64)(nil),
+ Field: 101,
+ Name: "my.test.ReplyExtensions.time",
+ Tag: "fixed64,101,opt,name=time",
+ Filename: "my_test/test.proto",
+}
+
+var E_ReplyExtensions_Carrot = &proto.ExtensionDesc{
+ ExtendedType: (*Reply)(nil),
+ ExtensionType: (*ReplyExtensions)(nil),
+ Field: 105,
+ Name: "my.test.ReplyExtensions.carrot",
+ Tag: "bytes,105,opt,name=carrot",
+ Filename: "my_test/test.proto",
+}
+
+var E_ReplyExtensions_Donut = &proto.ExtensionDesc{
+ ExtendedType: (*OtherBase)(nil),
+ ExtensionType: (*ReplyExtensions)(nil),
+ Field: 101,
+ Name: "my.test.ReplyExtensions.donut",
+ Tag: "bytes,101,opt,name=donut",
+ Filename: "my_test/test.proto",
+}
+
+type OtherReplyExtensions struct {
+ Key *int32 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OtherReplyExtensions) Reset() { *m = OtherReplyExtensions{} }
+func (m *OtherReplyExtensions) String() string { return proto.CompactTextString(m) }
+func (*OtherReplyExtensions) ProtoMessage() {}
+
+func (m *OtherReplyExtensions) GetKey() int32 {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return 0
+}
+
+type OldReply struct {
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OldReply) Reset() { *m = OldReply{} }
+func (m *OldReply) String() string { return proto.CompactTextString(m) }
+func (*OldReply) ProtoMessage() {}
+
+func (m *OldReply) Marshal() ([]byte, error) {
+ return proto.MarshalMessageSet(&m.XXX_InternalExtensions)
+}
+func (m *OldReply) Unmarshal(buf []byte) error {
+ return proto.UnmarshalMessageSet(buf, &m.XXX_InternalExtensions)
+}
+func (m *OldReply) MarshalJSON() ([]byte, error) {
+ return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions)
+}
+func (m *OldReply) UnmarshalJSON(buf []byte) error {
+ return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions)
+}
+
+// ensure OldReply satisfies proto.Marshaler and proto.Unmarshaler
+var _ proto.Marshaler = (*OldReply)(nil)
+var _ proto.Unmarshaler = (*OldReply)(nil)
+
+var extRange_OldReply = []proto.ExtensionRange{
+ {100, 2147483646},
+}
+
+func (*OldReply) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_OldReply
+}
+
+type Communique struct {
+ MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"`
+ // This is a oneof, called "union".
+ //
+ // Types that are valid to be assigned to Union:
+ // *Communique_Number
+ // *Communique_Name
+ // *Communique_Data
+ // *Communique_TempC
+ // *Communique_Height
+ // *Communique_Today
+ // *Communique_Maybe
+ // *Communique_Delta_
+ // *Communique_Msg
+ // *Communique_Somegroup
+ Union isCommunique_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Communique) Reset() { *m = Communique{} }
+func (m *Communique) String() string { return proto.CompactTextString(m) }
+func (*Communique) ProtoMessage() {}
+
+type isCommunique_Union interface {
+ isCommunique_Union()
+}
+
+type Communique_Number struct {
+ Number int32 `protobuf:"varint,5,opt,name=number,oneof"`
+}
+type Communique_Name struct {
+ Name string `protobuf:"bytes,6,opt,name=name,oneof"`
+}
+type Communique_Data struct {
+ Data []byte `protobuf:"bytes,7,opt,name=data,oneof"`
+}
+type Communique_TempC struct {
+ TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"`
+}
+type Communique_Height struct {
+ Height float32 `protobuf:"fixed32,9,opt,name=height,oneof"`
+}
+type Communique_Today struct {
+ Today Days `protobuf:"varint,10,opt,name=today,enum=my.test.Days,oneof"`
+}
+type Communique_Maybe struct {
+ Maybe bool `protobuf:"varint,11,opt,name=maybe,oneof"`
+}
+type Communique_Delta_ struct {
+ Delta int32 `protobuf:"zigzag32,12,opt,name=delta,oneof"`
+}
+type Communique_Msg struct {
+ Msg *Reply `protobuf:"bytes,13,opt,name=msg,oneof"`
+}
+type Communique_Somegroup struct {
+ Somegroup *Communique_SomeGroup `protobuf:"group,14,opt,name=SomeGroup,json=somegroup,oneof"`
+}
+
+func (*Communique_Number) isCommunique_Union() {}
+func (*Communique_Name) isCommunique_Union() {}
+func (*Communique_Data) isCommunique_Union() {}
+func (*Communique_TempC) isCommunique_Union() {}
+func (*Communique_Height) isCommunique_Union() {}
+func (*Communique_Today) isCommunique_Union() {}
+func (*Communique_Maybe) isCommunique_Union() {}
+func (*Communique_Delta_) isCommunique_Union() {}
+func (*Communique_Msg) isCommunique_Union() {}
+func (*Communique_Somegroup) isCommunique_Union() {}
+
+func (m *Communique) GetUnion() isCommunique_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+}
+
+func (m *Communique) GetMakeMeCry() bool {
+ if m != nil && m.MakeMeCry != nil {
+ return *m.MakeMeCry
+ }
+ return false
+}
+
+func (m *Communique) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Communique_Number); ok {
+ return x.Number
+ }
+ return 0
+}
+
+func (m *Communique) GetName() string {
+ if x, ok := m.GetUnion().(*Communique_Name); ok {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *Communique) GetData() []byte {
+ if x, ok := m.GetUnion().(*Communique_Data); ok {
+ return x.Data
+ }
+ return nil
+}
+
+func (m *Communique) GetTempC() float64 {
+ if x, ok := m.GetUnion().(*Communique_TempC); ok {
+ return x.TempC
+ }
+ return 0
+}
+
+func (m *Communique) GetHeight() float32 {
+ if x, ok := m.GetUnion().(*Communique_Height); ok {
+ return x.Height
+ }
+ return 0
+}
+
+func (m *Communique) GetToday() Days {
+ if x, ok := m.GetUnion().(*Communique_Today); ok {
+ return x.Today
+ }
+ return Days_MONDAY
+}
+
+func (m *Communique) GetMaybe() bool {
+ if x, ok := m.GetUnion().(*Communique_Maybe); ok {
+ return x.Maybe
+ }
+ return false
+}
+
+func (m *Communique) GetDelta() int32 {
+ if x, ok := m.GetUnion().(*Communique_Delta_); ok {
+ return x.Delta
+ }
+ return 0
+}
+
+func (m *Communique) GetMsg() *Reply {
+ if x, ok := m.GetUnion().(*Communique_Msg); ok {
+ return x.Msg
+ }
+ return nil
+}
+
+func (m *Communique) GetSomegroup() *Communique_SomeGroup {
+ if x, ok := m.GetUnion().(*Communique_Somegroup); ok {
+ return x.Somegroup
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{
+ (*Communique_Number)(nil),
+ (*Communique_Name)(nil),
+ (*Communique_Data)(nil),
+ (*Communique_TempC)(nil),
+ (*Communique_Height)(nil),
+ (*Communique_Today)(nil),
+ (*Communique_Maybe)(nil),
+ (*Communique_Delta_)(nil),
+ (*Communique_Msg)(nil),
+ (*Communique_Somegroup)(nil),
+ }
+}
+
+func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Communique)
+ // union
+ switch x := m.Union.(type) {
+ case *Communique_Number:
+ b.EncodeVarint(5<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Number))
+ case *Communique_Name:
+ b.EncodeVarint(6<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.Name)
+ case *Communique_Data:
+ b.EncodeVarint(7<<3 | proto.WireBytes)
+ b.EncodeRawBytes(x.Data)
+ case *Communique_TempC:
+ b.EncodeVarint(8<<3 | proto.WireFixed64)
+ b.EncodeFixed64(math.Float64bits(x.TempC))
+ case *Communique_Height:
+ b.EncodeVarint(9<<3 | proto.WireFixed32)
+ b.EncodeFixed32(uint64(math.Float32bits(x.Height)))
+ case *Communique_Today:
+ b.EncodeVarint(10<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Today))
+ case *Communique_Maybe:
+ t := uint64(0)
+ if x.Maybe {
+ t = 1
+ }
+ b.EncodeVarint(11<<3 | proto.WireVarint)
+ b.EncodeVarint(t)
+ case *Communique_Delta_:
+ b.EncodeVarint(12<<3 | proto.WireVarint)
+ b.EncodeZigzag32(uint64(x.Delta))
+ case *Communique_Msg:
+ b.EncodeVarint(13<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Msg); err != nil {
+ return err
+ }
+ case *Communique_Somegroup:
+ b.EncodeVarint(14<<3 | proto.WireStartGroup)
+ if err := b.Marshal(x.Somegroup); err != nil {
+ return err
+ }
+ b.EncodeVarint(14<<3 | proto.WireEndGroup)
+ case nil:
+ default:
+ return fmt.Errorf("Communique.Union has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Communique)
+ switch tag {
+ case 5: // union.number
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Communique_Number{int32(x)}
+ return true, err
+ case 6: // union.name
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Union = &Communique_Name{x}
+ return true, err
+ case 7: // union.data
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeRawBytes(true)
+ m.Union = &Communique_Data{x}
+ return true, err
+ case 8: // union.temp_c
+ if wire != proto.WireFixed64 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed64()
+ m.Union = &Communique_TempC{math.Float64frombits(x)}
+ return true, err
+ case 9: // union.height
+ if wire != proto.WireFixed32 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed32()
+ m.Union = &Communique_Height{math.Float32frombits(uint32(x))}
+ return true, err
+ case 10: // union.today
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Communique_Today{Days(x)}
+ return true, err
+ case 11: // union.maybe
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Communique_Maybe{x != 0}
+ return true, err
+ case 12: // union.delta
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeZigzag32()
+ m.Union = &Communique_Delta_{int32(x)}
+ return true, err
+ case 13: // union.msg
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Reply)
+ err := b.DecodeMessage(msg)
+ m.Union = &Communique_Msg{msg}
+ return true, err
+ case 14: // union.somegroup
+ if wire != proto.WireStartGroup {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Communique_SomeGroup)
+ err := b.DecodeGroup(msg)
+ m.Union = &Communique_Somegroup{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _Communique_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*Communique)
+ // union
+ switch x := m.Union.(type) {
+ case *Communique_Number:
+ n += proto.SizeVarint(5<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.Number))
+ case *Communique_Name:
+ n += proto.SizeVarint(6<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.Name)))
+ n += len(x.Name)
+ case *Communique_Data:
+ n += proto.SizeVarint(7<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.Data)))
+ n += len(x.Data)
+ case *Communique_TempC:
+ n += proto.SizeVarint(8<<3 | proto.WireFixed64)
+ n += 8
+ case *Communique_Height:
+ n += proto.SizeVarint(9<<3 | proto.WireFixed32)
+ n += 4
+ case *Communique_Today:
+ n += proto.SizeVarint(10<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.Today))
+ case *Communique_Maybe:
+ n += proto.SizeVarint(11<<3 | proto.WireVarint)
+ n += 1
+ case *Communique_Delta_:
+ n += proto.SizeVarint(12<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64((uint32(x.Delta) << 1) ^ uint32((int32(x.Delta) >> 31))))
+ case *Communique_Msg:
+ s := proto.Size(x.Msg)
+ n += proto.SizeVarint(13<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Communique_Somegroup:
+ n += proto.SizeVarint(14<<3 | proto.WireStartGroup)
+ n += proto.Size(x.Somegroup)
+ n += proto.SizeVarint(14<<3 | proto.WireEndGroup)
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+type Communique_SomeGroup struct {
+ Member *string `protobuf:"bytes,15,opt,name=member" json:"member,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Communique_SomeGroup) Reset() { *m = Communique_SomeGroup{} }
+func (m *Communique_SomeGroup) String() string { return proto.CompactTextString(m) }
+func (*Communique_SomeGroup) ProtoMessage() {}
+
+func (m *Communique_SomeGroup) GetMember() string {
+ if m != nil && m.Member != nil {
+ return *m.Member
+ }
+ return ""
+}
+
+type Communique_Delta struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Communique_Delta) Reset() { *m = Communique_Delta{} }
+func (m *Communique_Delta) String() string { return proto.CompactTextString(m) }
+func (*Communique_Delta) ProtoMessage() {}
+
+var E_Tag = &proto.ExtensionDesc{
+ ExtendedType: (*Reply)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 103,
+ Name: "my.test.tag",
+ Tag: "bytes,103,opt,name=tag",
+ Filename: "my_test/test.proto",
+}
+
+var E_Donut = &proto.ExtensionDesc{
+ ExtendedType: (*Reply)(nil),
+ ExtensionType: (*OtherReplyExtensions)(nil),
+ Field: 106,
+ Name: "my.test.donut",
+ Tag: "bytes,106,opt,name=donut",
+ Filename: "my_test/test.proto",
+}
+
+func init() {
+ proto.RegisterType((*Request)(nil), "my.test.Request")
+ proto.RegisterType((*Request_SomeGroup)(nil), "my.test.Request.SomeGroup")
+ proto.RegisterType((*Reply)(nil), "my.test.Reply")
+ proto.RegisterType((*Reply_Entry)(nil), "my.test.Reply.Entry")
+ proto.RegisterType((*OtherBase)(nil), "my.test.OtherBase")
+ proto.RegisterType((*ReplyExtensions)(nil), "my.test.ReplyExtensions")
+ proto.RegisterType((*OtherReplyExtensions)(nil), "my.test.OtherReplyExtensions")
+ proto.RegisterType((*OldReply)(nil), "my.test.OldReply")
+ proto.RegisterType((*Communique)(nil), "my.test.Communique")
+ proto.RegisterType((*Communique_SomeGroup)(nil), "my.test.Communique.SomeGroup")
+ proto.RegisterType((*Communique_Delta)(nil), "my.test.Communique.Delta")
+ proto.RegisterEnum("my.test.HatType", HatType_name, HatType_value)
+ proto.RegisterEnum("my.test.Days", Days_name, Days_value)
+ proto.RegisterEnum("my.test.Request_Color", Request_Color_name, Request_Color_value)
+ proto.RegisterEnum("my.test.Reply_Entry_Game", Reply_Entry_Game_name, Reply_Entry_Game_value)
+ proto.RegisterExtension(E_ReplyExtensions_Time)
+ proto.RegisterExtension(E_ReplyExtensions_Carrot)
+ proto.RegisterExtension(E_ReplyExtensions_Donut)
+ proto.RegisterExtension(E_Tag)
+ proto.RegisterExtension(E_Donut)
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden
new file mode 100644
index 0000000..1954e3f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden
@@ -0,0 +1,870 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: my_test/test.proto
+
+/*
+Package my_test is a generated protocol buffer package.
+
+This package holds interesting messages.
+
+It is generated from these files:
+ my_test/test.proto
+
+It has these top-level messages:
+ Request
+ Reply
+ OtherBase
+ ReplyExtensions
+ OtherReplyExtensions
+ OldReply
+ Communique
+*/
+package my_test
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/golang/protobuf/protoc-gen-go/testdata/multi"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type HatType int32
+
+const (
+ // deliberately skipping 0
+ HatType_FEDORA HatType = 1
+ HatType_FEZ HatType = 2
+)
+
+var HatType_name = map[int32]string{
+ 1: "FEDORA",
+ 2: "FEZ",
+}
+var HatType_value = map[string]int32{
+ "FEDORA": 1,
+ "FEZ": 2,
+}
+
+func (x HatType) Enum() *HatType {
+ p := new(HatType)
+ *p = x
+ return p
+}
+func (x HatType) String() string {
+ return proto.EnumName(HatType_name, int32(x))
+}
+func (x *HatType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(HatType_value, data, "HatType")
+ if err != nil {
+ return err
+ }
+ *x = HatType(value)
+ return nil
+}
+
+// This enum represents days of the week.
+type Days int32
+
+const (
+ Days_MONDAY Days = 1
+ Days_TUESDAY Days = 2
+ Days_LUNDI Days = 1
+)
+
+var Days_name = map[int32]string{
+ 1: "MONDAY",
+ 2: "TUESDAY",
+ // Duplicate value: 1: "LUNDI",
+}
+var Days_value = map[string]int32{
+ "MONDAY": 1,
+ "TUESDAY": 2,
+ "LUNDI": 1,
+}
+
+func (x Days) Enum() *Days {
+ p := new(Days)
+ *p = x
+ return p
+}
+func (x Days) String() string {
+ return proto.EnumName(Days_name, int32(x))
+}
+func (x *Days) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Days_value, data, "Days")
+ if err != nil {
+ return err
+ }
+ *x = Days(value)
+ return nil
+}
+
+type Request_Color int32
+
+const (
+ Request_RED Request_Color = 0
+ Request_GREEN Request_Color = 1
+ Request_BLUE Request_Color = 2
+)
+
+var Request_Color_name = map[int32]string{
+ 0: "RED",
+ 1: "GREEN",
+ 2: "BLUE",
+}
+var Request_Color_value = map[string]int32{
+ "RED": 0,
+ "GREEN": 1,
+ "BLUE": 2,
+}
+
+func (x Request_Color) Enum() *Request_Color {
+ p := new(Request_Color)
+ *p = x
+ return p
+}
+func (x Request_Color) String() string {
+ return proto.EnumName(Request_Color_name, int32(x))
+}
+func (x *Request_Color) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Request_Color_value, data, "Request_Color")
+ if err != nil {
+ return err
+ }
+ *x = Request_Color(value)
+ return nil
+}
+
+type Reply_Entry_Game int32
+
+const (
+ Reply_Entry_FOOTBALL Reply_Entry_Game = 1
+ Reply_Entry_TENNIS Reply_Entry_Game = 2
+)
+
+var Reply_Entry_Game_name = map[int32]string{
+ 1: "FOOTBALL",
+ 2: "TENNIS",
+}
+var Reply_Entry_Game_value = map[string]int32{
+ "FOOTBALL": 1,
+ "TENNIS": 2,
+}
+
+func (x Reply_Entry_Game) Enum() *Reply_Entry_Game {
+ p := new(Reply_Entry_Game)
+ *p = x
+ return p
+}
+func (x Reply_Entry_Game) String() string {
+ return proto.EnumName(Reply_Entry_Game_name, int32(x))
+}
+func (x *Reply_Entry_Game) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Reply_Entry_Game_value, data, "Reply_Entry_Game")
+ if err != nil {
+ return err
+ }
+ *x = Reply_Entry_Game(value)
+ return nil
+}
+
+// This is a message that might be sent somewhere.
+type Request struct {
+ Key []int64 `protobuf:"varint,1,rep,name=key" json:"key,omitempty"`
+ // optional imp.ImportedMessage imported_message = 2;
+ Hue *Request_Color `protobuf:"varint,3,opt,name=hue,enum=my.test.Request_Color" json:"hue,omitempty"`
+ Hat *HatType `protobuf:"varint,4,opt,name=hat,enum=my.test.HatType,def=1" json:"hat,omitempty"`
+ // optional imp.ImportedMessage.Owner owner = 6;
+ Deadline *float32 `protobuf:"fixed32,7,opt,name=deadline,def=inf" json:"deadline,omitempty"`
+ Somegroup *Request_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"`
+ // This is a map field. It will generate map[int32]string.
+ NameMapping map[int32]string `protobuf:"bytes,14,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ // This is a map field whose value type is a message.
+ MsgMapping map[int64]*Reply `protobuf:"bytes,15,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ Reset_ *int32 `protobuf:"varint,12,opt,name=reset" json:"reset,omitempty"`
+ // This field should not conflict with any getters.
+ GetKey_ *string `protobuf:"bytes,16,opt,name=get_key,json=getKey" json:"get_key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Request) Reset() { *m = Request{} }
+func (m *Request) String() string { return proto.CompactTextString(m) }
+func (*Request) ProtoMessage() {}
+
+const Default_Request_Hat HatType = HatType_FEDORA
+
+var Default_Request_Deadline float32 = float32(math.Inf(1))
+
+func (m *Request) GetKey() []int64 {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *Request) GetHue() Request_Color {
+ if m != nil && m.Hue != nil {
+ return *m.Hue
+ }
+ return Request_RED
+}
+
+func (m *Request) GetHat() HatType {
+ if m != nil && m.Hat != nil {
+ return *m.Hat
+ }
+ return Default_Request_Hat
+}
+
+func (m *Request) GetDeadline() float32 {
+ if m != nil && m.Deadline != nil {
+ return *m.Deadline
+ }
+ return Default_Request_Deadline
+}
+
+func (m *Request) GetSomegroup() *Request_SomeGroup {
+ if m != nil {
+ return m.Somegroup
+ }
+ return nil
+}
+
+func (m *Request) GetNameMapping() map[int32]string {
+ if m != nil {
+ return m.NameMapping
+ }
+ return nil
+}
+
+func (m *Request) GetMsgMapping() map[int64]*Reply {
+ if m != nil {
+ return m.MsgMapping
+ }
+ return nil
+}
+
+func (m *Request) GetReset_() int32 {
+ if m != nil && m.Reset_ != nil {
+ return *m.Reset_
+ }
+ return 0
+}
+
+func (m *Request) GetGetKey_() string {
+ if m != nil && m.GetKey_ != nil {
+ return *m.GetKey_
+ }
+ return ""
+}
+
+type Request_SomeGroup struct {
+ GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Request_SomeGroup) Reset() { *m = Request_SomeGroup{} }
+func (m *Request_SomeGroup) String() string { return proto.CompactTextString(m) }
+func (*Request_SomeGroup) ProtoMessage() {}
+
+func (m *Request_SomeGroup) GetGroupField() int32 {
+ if m != nil && m.GroupField != nil {
+ return *m.GroupField
+ }
+ return 0
+}
+
+type Reply struct {
+ Found []*Reply_Entry `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"`
+ CompactKeys []int32 `protobuf:"varint,2,rep,packed,name=compact_keys,json=compactKeys" json:"compact_keys,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Reply) Reset() { *m = Reply{} }
+func (m *Reply) String() string { return proto.CompactTextString(m) }
+func (*Reply) ProtoMessage() {}
+
+var extRange_Reply = []proto.ExtensionRange{
+ {100, 536870911},
+}
+
+func (*Reply) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_Reply
+}
+
+func (m *Reply) GetFound() []*Reply_Entry {
+ if m != nil {
+ return m.Found
+ }
+ return nil
+}
+
+func (m *Reply) GetCompactKeys() []int32 {
+ if m != nil {
+ return m.CompactKeys
+ }
+ return nil
+}
+
+type Reply_Entry struct {
+ KeyThatNeeds_1234Camel_CasIng *int64 `protobuf:"varint,1,req,name=key_that_needs_1234camel_CasIng,json=keyThatNeeds1234camelCasIng" json:"key_that_needs_1234camel_CasIng,omitempty"`
+ Value *int64 `protobuf:"varint,2,opt,name=value,def=7" json:"value,omitempty"`
+ XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2,json=MyFieldName2" json:"_my_field_name_2,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Reply_Entry) Reset() { *m = Reply_Entry{} }
+func (m *Reply_Entry) String() string { return proto.CompactTextString(m) }
+func (*Reply_Entry) ProtoMessage() {}
+
+const Default_Reply_Entry_Value int64 = 7
+
+func (m *Reply_Entry) GetKeyThatNeeds_1234Camel_CasIng() int64 {
+ if m != nil && m.KeyThatNeeds_1234Camel_CasIng != nil {
+ return *m.KeyThatNeeds_1234Camel_CasIng
+ }
+ return 0
+}
+
+func (m *Reply_Entry) GetValue() int64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return Default_Reply_Entry_Value
+}
+
+func (m *Reply_Entry) GetXMyFieldName_2() int64 {
+ if m != nil && m.XMyFieldName_2 != nil {
+ return *m.XMyFieldName_2
+ }
+ return 0
+}
+
+type OtherBase struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OtherBase) Reset() { *m = OtherBase{} }
+func (m *OtherBase) String() string { return proto.CompactTextString(m) }
+func (*OtherBase) ProtoMessage() {}
+
+var extRange_OtherBase = []proto.ExtensionRange{
+ {100, 536870911},
+}
+
+func (*OtherBase) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_OtherBase
+}
+
+func (m *OtherBase) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+type ReplyExtensions struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ReplyExtensions) Reset() { *m = ReplyExtensions{} }
+func (m *ReplyExtensions) String() string { return proto.CompactTextString(m) }
+func (*ReplyExtensions) ProtoMessage() {}
+
+var E_ReplyExtensions_Time = &proto.ExtensionDesc{
+ ExtendedType: (*Reply)(nil),
+ ExtensionType: (*float64)(nil),
+ Field: 101,
+ Name: "my.test.ReplyExtensions.time",
+ Tag: "fixed64,101,opt,name=time",
+ Filename: "my_test/test.proto",
+}
+
+var E_ReplyExtensions_Carrot = &proto.ExtensionDesc{
+ ExtendedType: (*Reply)(nil),
+ ExtensionType: (*ReplyExtensions)(nil),
+ Field: 105,
+ Name: "my.test.ReplyExtensions.carrot",
+ Tag: "bytes,105,opt,name=carrot",
+ Filename: "my_test/test.proto",
+}
+
+var E_ReplyExtensions_Donut = &proto.ExtensionDesc{
+ ExtendedType: (*OtherBase)(nil),
+ ExtensionType: (*ReplyExtensions)(nil),
+ Field: 101,
+ Name: "my.test.ReplyExtensions.donut",
+ Tag: "bytes,101,opt,name=donut",
+ Filename: "my_test/test.proto",
+}
+
+type OtherReplyExtensions struct {
+ Key *int32 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OtherReplyExtensions) Reset() { *m = OtherReplyExtensions{} }
+func (m *OtherReplyExtensions) String() string { return proto.CompactTextString(m) }
+func (*OtherReplyExtensions) ProtoMessage() {}
+
+func (m *OtherReplyExtensions) GetKey() int32 {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return 0
+}
+
+type OldReply struct {
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OldReply) Reset() { *m = OldReply{} }
+func (m *OldReply) String() string { return proto.CompactTextString(m) }
+func (*OldReply) ProtoMessage() {}
+
+func (m *OldReply) Marshal() ([]byte, error) {
+ return proto.MarshalMessageSet(&m.XXX_InternalExtensions)
+}
+func (m *OldReply) Unmarshal(buf []byte) error {
+ return proto.UnmarshalMessageSet(buf, &m.XXX_InternalExtensions)
+}
+func (m *OldReply) MarshalJSON() ([]byte, error) {
+ return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions)
+}
+func (m *OldReply) UnmarshalJSON(buf []byte) error {
+ return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions)
+}
+
+// ensure OldReply satisfies proto.Marshaler and proto.Unmarshaler
+var _ proto.Marshaler = (*OldReply)(nil)
+var _ proto.Unmarshaler = (*OldReply)(nil)
+
+var extRange_OldReply = []proto.ExtensionRange{
+ {100, 2147483646},
+}
+
+func (*OldReply) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_OldReply
+}
+
+type Communique struct {
+ MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"`
+ // This is a oneof, called "union".
+ //
+ // Types that are valid to be assigned to Union:
+ // *Communique_Number
+ // *Communique_Name
+ // *Communique_Data
+ // *Communique_TempC
+ // *Communique_Height
+ // *Communique_Today
+ // *Communique_Maybe
+ // *Communique_Delta_
+ // *Communique_Msg
+ // *Communique_Somegroup
+ Union isCommunique_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Communique) Reset() { *m = Communique{} }
+func (m *Communique) String() string { return proto.CompactTextString(m) }
+func (*Communique) ProtoMessage() {}
+
+type isCommunique_Union interface {
+ isCommunique_Union()
+}
+
+type Communique_Number struct {
+ Number int32 `protobuf:"varint,5,opt,name=number,oneof"`
+}
+type Communique_Name struct {
+ Name string `protobuf:"bytes,6,opt,name=name,oneof"`
+}
+type Communique_Data struct {
+ Data []byte `protobuf:"bytes,7,opt,name=data,oneof"`
+}
+type Communique_TempC struct {
+ TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"`
+}
+type Communique_Height struct {
+ Height float32 `protobuf:"fixed32,9,opt,name=height,oneof"`
+}
+type Communique_Today struct {
+ Today Days `protobuf:"varint,10,opt,name=today,enum=my.test.Days,oneof"`
+}
+type Communique_Maybe struct {
+ Maybe bool `protobuf:"varint,11,opt,name=maybe,oneof"`
+}
+type Communique_Delta_ struct {
+ Delta int32 `protobuf:"zigzag32,12,opt,name=delta,oneof"`
+}
+type Communique_Msg struct {
+ Msg *Reply `protobuf:"bytes,13,opt,name=msg,oneof"`
+}
+type Communique_Somegroup struct {
+ Somegroup *Communique_SomeGroup `protobuf:"group,14,opt,name=SomeGroup,json=somegroup,oneof"`
+}
+
+func (*Communique_Number) isCommunique_Union() {}
+func (*Communique_Name) isCommunique_Union() {}
+func (*Communique_Data) isCommunique_Union() {}
+func (*Communique_TempC) isCommunique_Union() {}
+func (*Communique_Height) isCommunique_Union() {}
+func (*Communique_Today) isCommunique_Union() {}
+func (*Communique_Maybe) isCommunique_Union() {}
+func (*Communique_Delta_) isCommunique_Union() {}
+func (*Communique_Msg) isCommunique_Union() {}
+func (*Communique_Somegroup) isCommunique_Union() {}
+
+func (m *Communique) GetUnion() isCommunique_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+}
+
+func (m *Communique) GetMakeMeCry() bool {
+ if m != nil && m.MakeMeCry != nil {
+ return *m.MakeMeCry
+ }
+ return false
+}
+
+func (m *Communique) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Communique_Number); ok {
+ return x.Number
+ }
+ return 0
+}
+
+func (m *Communique) GetName() string {
+ if x, ok := m.GetUnion().(*Communique_Name); ok {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *Communique) GetData() []byte {
+ if x, ok := m.GetUnion().(*Communique_Data); ok {
+ return x.Data
+ }
+ return nil
+}
+
+func (m *Communique) GetTempC() float64 {
+ if x, ok := m.GetUnion().(*Communique_TempC); ok {
+ return x.TempC
+ }
+ return 0
+}
+
+func (m *Communique) GetHeight() float32 {
+ if x, ok := m.GetUnion().(*Communique_Height); ok {
+ return x.Height
+ }
+ return 0
+}
+
+func (m *Communique) GetToday() Days {
+ if x, ok := m.GetUnion().(*Communique_Today); ok {
+ return x.Today
+ }
+ return Days_MONDAY
+}
+
+func (m *Communique) GetMaybe() bool {
+ if x, ok := m.GetUnion().(*Communique_Maybe); ok {
+ return x.Maybe
+ }
+ return false
+}
+
+func (m *Communique) GetDelta() int32 {
+ if x, ok := m.GetUnion().(*Communique_Delta_); ok {
+ return x.Delta
+ }
+ return 0
+}
+
+func (m *Communique) GetMsg() *Reply {
+ if x, ok := m.GetUnion().(*Communique_Msg); ok {
+ return x.Msg
+ }
+ return nil
+}
+
+func (m *Communique) GetSomegroup() *Communique_SomeGroup {
+ if x, ok := m.GetUnion().(*Communique_Somegroup); ok {
+ return x.Somegroup
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{
+ (*Communique_Number)(nil),
+ (*Communique_Name)(nil),
+ (*Communique_Data)(nil),
+ (*Communique_TempC)(nil),
+ (*Communique_Height)(nil),
+ (*Communique_Today)(nil),
+ (*Communique_Maybe)(nil),
+ (*Communique_Delta_)(nil),
+ (*Communique_Msg)(nil),
+ (*Communique_Somegroup)(nil),
+ }
+}
+
+func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Communique)
+ // union
+ switch x := m.Union.(type) {
+ case *Communique_Number:
+ b.EncodeVarint(5<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Number))
+ case *Communique_Name:
+ b.EncodeVarint(6<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.Name)
+ case *Communique_Data:
+ b.EncodeVarint(7<<3 | proto.WireBytes)
+ b.EncodeRawBytes(x.Data)
+ case *Communique_TempC:
+ b.EncodeVarint(8<<3 | proto.WireFixed64)
+ b.EncodeFixed64(math.Float64bits(x.TempC))
+ case *Communique_Height:
+ b.EncodeVarint(9<<3 | proto.WireFixed32)
+ b.EncodeFixed32(uint64(math.Float32bits(x.Height)))
+ case *Communique_Today:
+ b.EncodeVarint(10<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Today))
+ case *Communique_Maybe:
+ t := uint64(0)
+ if x.Maybe {
+ t = 1
+ }
+ b.EncodeVarint(11<<3 | proto.WireVarint)
+ b.EncodeVarint(t)
+ case *Communique_Delta_:
+ b.EncodeVarint(12<<3 | proto.WireVarint)
+ b.EncodeZigzag32(uint64(x.Delta))
+ case *Communique_Msg:
+ b.EncodeVarint(13<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Msg); err != nil {
+ return err
+ }
+ case *Communique_Somegroup:
+ b.EncodeVarint(14<<3 | proto.WireStartGroup)
+ if err := b.Marshal(x.Somegroup); err != nil {
+ return err
+ }
+ b.EncodeVarint(14<<3 | proto.WireEndGroup)
+ case nil:
+ default:
+ return fmt.Errorf("Communique.Union has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Communique)
+ switch tag {
+ case 5: // union.number
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Communique_Number{int32(x)}
+ return true, err
+ case 6: // union.name
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Union = &Communique_Name{x}
+ return true, err
+ case 7: // union.data
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeRawBytes(true)
+ m.Union = &Communique_Data{x}
+ return true, err
+ case 8: // union.temp_c
+ if wire != proto.WireFixed64 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed64()
+ m.Union = &Communique_TempC{math.Float64frombits(x)}
+ return true, err
+ case 9: // union.height
+ if wire != proto.WireFixed32 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed32()
+ m.Union = &Communique_Height{math.Float32frombits(uint32(x))}
+ return true, err
+ case 10: // union.today
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Communique_Today{Days(x)}
+ return true, err
+ case 11: // union.maybe
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Communique_Maybe{x != 0}
+ return true, err
+ case 12: // union.delta
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeZigzag32()
+ m.Union = &Communique_Delta_{int32(x)}
+ return true, err
+ case 13: // union.msg
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Reply)
+ err := b.DecodeMessage(msg)
+ m.Union = &Communique_Msg{msg}
+ return true, err
+ case 14: // union.somegroup
+ if wire != proto.WireStartGroup {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Communique_SomeGroup)
+ err := b.DecodeGroup(msg)
+ m.Union = &Communique_Somegroup{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _Communique_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*Communique)
+ // union
+ switch x := m.Union.(type) {
+ case *Communique_Number:
+ n += proto.SizeVarint(5<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.Number))
+ case *Communique_Name:
+ n += proto.SizeVarint(6<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.Name)))
+ n += len(x.Name)
+ case *Communique_Data:
+ n += proto.SizeVarint(7<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.Data)))
+ n += len(x.Data)
+ case *Communique_TempC:
+ n += proto.SizeVarint(8<<3 | proto.WireFixed64)
+ n += 8
+ case *Communique_Height:
+ n += proto.SizeVarint(9<<3 | proto.WireFixed32)
+ n += 4
+ case *Communique_Today:
+ n += proto.SizeVarint(10<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.Today))
+ case *Communique_Maybe:
+ n += proto.SizeVarint(11<<3 | proto.WireVarint)
+ n += 1
+ case *Communique_Delta_:
+ n += proto.SizeVarint(12<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64((uint32(x.Delta) << 1) ^ uint32((int32(x.Delta) >> 31))))
+ case *Communique_Msg:
+ s := proto.Size(x.Msg)
+ n += proto.SizeVarint(13<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Communique_Somegroup:
+ n += proto.SizeVarint(14<<3 | proto.WireStartGroup)
+ n += proto.Size(x.Somegroup)
+ n += proto.SizeVarint(14<<3 | proto.WireEndGroup)
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+type Communique_SomeGroup struct {
+ Member *string `protobuf:"bytes,15,opt,name=member" json:"member,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Communique_SomeGroup) Reset() { *m = Communique_SomeGroup{} }
+func (m *Communique_SomeGroup) String() string { return proto.CompactTextString(m) }
+func (*Communique_SomeGroup) ProtoMessage() {}
+
+func (m *Communique_SomeGroup) GetMember() string {
+ if m != nil && m.Member != nil {
+ return *m.Member
+ }
+ return ""
+}
+
+type Communique_Delta struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Communique_Delta) Reset() { *m = Communique_Delta{} }
+func (m *Communique_Delta) String() string { return proto.CompactTextString(m) }
+func (*Communique_Delta) ProtoMessage() {}
+
+var E_Tag = &proto.ExtensionDesc{
+ ExtendedType: (*Reply)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 103,
+ Name: "my.test.tag",
+ Tag: "bytes,103,opt,name=tag",
+ Filename: "my_test/test.proto",
+}
+
+var E_Donut = &proto.ExtensionDesc{
+ ExtendedType: (*Reply)(nil),
+ ExtensionType: (*OtherReplyExtensions)(nil),
+ Field: 106,
+ Name: "my.test.donut",
+ Tag: "bytes,106,opt,name=donut",
+ Filename: "my_test/test.proto",
+}
+
+func init() {
+ proto.RegisterType((*Request)(nil), "my.test.Request")
+ proto.RegisterType((*Request_SomeGroup)(nil), "my.test.Request.SomeGroup")
+ proto.RegisterType((*Reply)(nil), "my.test.Reply")
+ proto.RegisterType((*Reply_Entry)(nil), "my.test.Reply.Entry")
+ proto.RegisterType((*OtherBase)(nil), "my.test.OtherBase")
+ proto.RegisterType((*ReplyExtensions)(nil), "my.test.ReplyExtensions")
+ proto.RegisterType((*OtherReplyExtensions)(nil), "my.test.OtherReplyExtensions")
+ proto.RegisterType((*OldReply)(nil), "my.test.OldReply")
+ proto.RegisterType((*Communique)(nil), "my.test.Communique")
+ proto.RegisterType((*Communique_SomeGroup)(nil), "my.test.Communique.SomeGroup")
+ proto.RegisterType((*Communique_Delta)(nil), "my.test.Communique.Delta")
+ proto.RegisterEnum("my.test.HatType", HatType_name, HatType_value)
+ proto.RegisterEnum("my.test.Days", Days_name, Days_value)
+ proto.RegisterEnum("my.test.Request_Color", Request_Color_name, Request_Color_value)
+ proto.RegisterEnum("my.test.Reply_Entry_Game", Reply_Entry_Game_name, Reply_Entry_Game_value)
+ proto.RegisterExtension(E_ReplyExtensions_Time)
+ proto.RegisterExtension(E_ReplyExtensions_Carrot)
+ proto.RegisterExtension(E_ReplyExtensions_Donut)
+ proto.RegisterExtension(E_Tag)
+ proto.RegisterExtension(E_Donut)
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto
new file mode 100644
index 0000000..8e70946
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto
@@ -0,0 +1,156 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto2";
+
+// This package holds interesting messages.
+package my.test; // dotted package name
+
+//import "imp.proto";
+import "multi/multi1.proto"; // unused import
+
+enum HatType {
+ // deliberately skipping 0
+ FEDORA = 1;
+ FEZ = 2;
+}
+
+// This enum represents days of the week.
+enum Days {
+ option allow_alias = true;
+
+ MONDAY = 1;
+ TUESDAY = 2;
+ LUNDI = 1; // same value as MONDAY
+}
+
+// This is a message that might be sent somewhere.
+message Request {
+ enum Color {
+ RED = 0;
+ GREEN = 1;
+ BLUE = 2;
+ }
+ repeated int64 key = 1;
+// optional imp.ImportedMessage imported_message = 2;
+ optional Color hue = 3; // no default
+ optional HatType hat = 4 [default=FEDORA];
+// optional imp.ImportedMessage.Owner owner = 6;
+ optional float deadline = 7 [default=inf];
+ optional group SomeGroup = 8 {
+ optional int32 group_field = 9;
+ }
+
+ // These foreign types are in imp2.proto,
+ // which is publicly imported by imp.proto.
+// optional imp.PubliclyImportedMessage pub = 10;
+// optional imp.PubliclyImportedEnum pub_enum = 13 [default=HAIR];
+
+
+ // This is a map field. It will generate map[int32]string.
+ map<int32, string> name_mapping = 14;
+ // This is a map field whose value type is a message.
+ map<sint64, Reply> msg_mapping = 15;
+
+ optional int32 reset = 12;
+ // This field should not conflict with any getters.
+ optional string get_key = 16;
+}
+
+message Reply {
+ message Entry {
+ required int64 key_that_needs_1234camel_CasIng = 1;
+ optional int64 value = 2 [default=7];
+ optional int64 _my_field_name_2 = 3;
+ enum Game {
+ FOOTBALL = 1;
+ TENNIS = 2;
+ }
+ }
+ repeated Entry found = 1;
+ repeated int32 compact_keys = 2 [packed=true];
+ extensions 100 to max;
+}
+
+message OtherBase {
+ optional string name = 1;
+ extensions 100 to max;
+}
+
+message ReplyExtensions {
+ extend Reply {
+ optional double time = 101;
+ optional ReplyExtensions carrot = 105;
+ }
+ extend OtherBase {
+ optional ReplyExtensions donut = 101;
+ }
+}
+
+message OtherReplyExtensions {
+ optional int32 key = 1;
+}
+
+// top-level extension
+extend Reply {
+ optional string tag = 103;
+ optional OtherReplyExtensions donut = 106;
+// optional imp.ImportedMessage elephant = 107; // extend with message from another file.
+}
+
+message OldReply {
+ // Extensions will be encoded in MessageSet wire format.
+ option message_set_wire_format = true;
+ extensions 100 to max;
+}
+
+message Communique {
+ optional bool make_me_cry = 1;
+
+ // This is a oneof, called "union".
+ oneof union {
+ int32 number = 5;
+ string name = 6;
+ bytes data = 7;
+ double temp_c = 8;
+ float height = 9;
+ Days today = 10;
+ bool maybe = 11;
+ sint32 delta = 12; // name will conflict with Delta below
+ Reply msg = 13;
+ group SomeGroup = 14 {
+ optional string member = 15;
+ }
+ }
+
+ message Delta {}
+}
+
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto
new file mode 100644
index 0000000..869b9af
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto
@@ -0,0 +1,53 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2014 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package proto3;
+
+message Request {
+ enum Flavour {
+ SWEET = 0;
+ SOUR = 1;
+ UMAMI = 2;
+ GOPHERLICIOUS = 3;
+ }
+ string name = 1;
+ repeated int64 key = 2;
+ Flavour taste = 3;
+ Book book = 4;
+ repeated int64 unpacked = 5 [packed=false];
+}
+
+message Book {
+ string title = 1;
+ bytes raw_data = 2;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
new file mode 100644
index 0000000..b2af97f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any.go
@@ -0,0 +1,139 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements functions to marshal proto.Message to/from
+// google.protobuf.Any message.
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/ptypes/any"
+)
+
+const googleApis = "type.googleapis.com/"
+
+// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
+//
+// Note that regular type assertions should be done using the Is
+// function. AnyMessageName is provided for less common use cases like filtering a
+// sequence of Any messages based on a set of allowed message type names.
+func AnyMessageName(any *any.Any) (string, error) {
+ if any == nil {
+ return "", fmt.Errorf("message is nil")
+ }
+ slash := strings.LastIndex(any.TypeUrl, "/")
+ if slash < 0 {
+ return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
+ }
+ return any.TypeUrl[slash+1:], nil
+}
+
+// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
+func MarshalAny(pb proto.Message) (*any.Any, error) {
+ value, err := proto.Marshal(pb)
+ if err != nil {
+ return nil, err
+ }
+ return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
+}
+
+// DynamicAny is a value that can be passed to UnmarshalAny to automatically
+// allocate a proto.Message for the type specified in a google.protobuf.Any
+// message. The allocated message is stored in the embedded proto.Message.
+//
+// Example:
+//
+// var x ptypes.DynamicAny
+// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
+// fmt.Printf("unmarshaled message: %v", x.Message)
+type DynamicAny struct {
+ proto.Message
+}
+
+// Empty returns a new proto.Message of the type specified in a
+// google.protobuf.Any message. It returns an error if corresponding message
+// type isn't linked in.
+func Empty(any *any.Any) (proto.Message, error) {
+ aname, err := AnyMessageName(any)
+ if err != nil {
+ return nil, err
+ }
+
+ t := proto.MessageType(aname)
+ if t == nil {
+ return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
+ }
+ return reflect.New(t.Elem()).Interface().(proto.Message), nil
+}
+
+// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
+// message and places the decoded result in pb. It returns an error if type of
+// contents of Any message does not match type of pb message.
+//
+// pb can be a proto.Message, or a *DynamicAny.
+func UnmarshalAny(any *any.Any, pb proto.Message) error {
+ if d, ok := pb.(*DynamicAny); ok {
+ if d.Message == nil {
+ var err error
+ d.Message, err = Empty(any)
+ if err != nil {
+ return err
+ }
+ }
+ return UnmarshalAny(any, d.Message)
+ }
+
+ aname, err := AnyMessageName(any)
+ if err != nil {
+ return err
+ }
+
+ mname := proto.MessageName(pb)
+ if aname != mname {
+ return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
+ }
+ return proto.Unmarshal(any.Value, pb)
+}
+
+// Is returns true if any value contains a given message type.
+func Is(any *any.Any, pb proto.Message) bool {
+ aname, err := AnyMessageName(any)
+ if err != nil {
+ return false
+ }
+
+ return aname == proto.MessageName(pb)
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
new file mode 100644
index 0000000..f346017
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
@@ -0,0 +1,178 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/any.proto
+
+/*
+Package any is a generated protocol buffer package.
+
+It is generated from these files:
+ google/protobuf/any.proto
+
+It has these top-level messages:
+ Any
+*/
+package any
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// `Any` contains an arbitrary serialized protocol buffer message along with a
+// URL that describes the type of the serialized message.
+//
+// Protobuf library provides support to pack/unpack Any values in the form
+// of utility functions or additional generated methods of the Any type.
+//
+// Example 1: Pack and unpack a message in C++.
+//
+// Foo foo = ...;
+// Any any;
+// any.PackFrom(foo);
+// ...
+// if (any.UnpackTo(&foo)) {
+// ...
+// }
+//
+// Example 2: Pack and unpack a message in Java.
+//
+// Foo foo = ...;
+// Any any = Any.pack(foo);
+// ...
+// if (any.is(Foo.class)) {
+// foo = any.unpack(Foo.class);
+// }
+//
+// Example 3: Pack and unpack a message in Python.
+//
+// foo = Foo(...)
+// any = Any()
+// any.Pack(foo)
+// ...
+// if any.Is(Foo.DESCRIPTOR):
+// any.Unpack(foo)
+// ...
+//
+// Example 4: Pack and unpack a message in Go
+//
+// foo := &pb.Foo{...}
+// any, err := ptypes.MarshalAny(foo)
+// ...
+// foo := &pb.Foo{}
+// if err := ptypes.UnmarshalAny(any, foo); err != nil {
+// ...
+// }
+//
+// The pack methods provided by protobuf library will by default use
+// 'type.googleapis.com/full.type.name' as the type URL and the unpack
+// methods only use the fully qualified type name after the last '/'
+// in the type URL, for example "foo.bar.com/x/y.z" will yield type
+// name "y.z".
+//
+//
+// JSON
+// ====
+// The JSON representation of an `Any` value uses the regular
+// representation of the deserialized, embedded message, with an
+// additional field `@type` which contains the type URL. Example:
+//
+// package google.profile;
+// message Person {
+// string first_name = 1;
+// string last_name = 2;
+// }
+//
+// {
+// "@type": "type.googleapis.com/google.profile.Person",
+// "firstName": <string>,
+// "lastName": <string>
+// }
+//
+// If the embedded message type is well-known and has a custom JSON
+// representation, that representation will be embedded adding a field
+// `value` which holds the custom JSON in addition to the `@type`
+// field. Example (for message [google.protobuf.Duration][]):
+//
+// {
+// "@type": "type.googleapis.com/google.protobuf.Duration",
+// "value": "1.212s"
+// }
+//
+type Any struct {
+ // A URL/resource name whose content describes the type of the
+ // serialized protocol buffer message.
+ //
+ // For URLs which use the scheme `http`, `https`, or no scheme, the
+ // following restrictions and interpretations apply:
+ //
+ // * If no scheme is provided, `https` is assumed.
+ // * The last segment of the URL's path must represent the fully
+ // qualified name of the type (as in `path/google.protobuf.Duration`).
+ // The name should be in a canonical form (e.g., leading "." is
+ // not accepted).
+ // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
+ // value in binary format, or produce an error.
+ // * Applications are allowed to cache lookup results based on the
+ // URL, or have them precompiled into a binary to avoid any
+ // lookup. Therefore, binary compatibility needs to be preserved
+ // on changes to types. (Use versioned type names to manage
+ // breaking changes.)
+ //
+ // Schemes other than `http`, `https` (or the empty scheme) might be
+ // used with implementation specific semantics.
+ //
+ TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
+ // Must be a valid serialized protocol buffer of the above specified type.
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *Any) Reset() { *m = Any{} }
+func (m *Any) String() string { return proto.CompactTextString(m) }
+func (*Any) ProtoMessage() {}
+func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (*Any) XXX_WellKnownType() string { return "Any" }
+
+func (m *Any) GetTypeUrl() string {
+ if m != nil {
+ return m.TypeUrl
+ }
+ return ""
+}
+
+func (m *Any) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*Any)(nil), "google.protobuf.Any")
+}
+
+func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 185 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
+ 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
+ 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
+ 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
+ 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
+ 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce,
+ 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52,
+ 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc,
+ 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c,
+ 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce,
+ 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff,
+ 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
new file mode 100644
index 0000000..c748667
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
@@ -0,0 +1,149 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option go_package = "github.com/golang/protobuf/ptypes/any";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "AnyProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// `Any` contains an arbitrary serialized protocol buffer message along with a
+// URL that describes the type of the serialized message.
+//
+// Protobuf library provides support to pack/unpack Any values in the form
+// of utility functions or additional generated methods of the Any type.
+//
+// Example 1: Pack and unpack a message in C++.
+//
+// Foo foo = ...;
+// Any any;
+// any.PackFrom(foo);
+// ...
+// if (any.UnpackTo(&foo)) {
+// ...
+// }
+//
+// Example 2: Pack and unpack a message in Java.
+//
+// Foo foo = ...;
+// Any any = Any.pack(foo);
+// ...
+// if (any.is(Foo.class)) {
+// foo = any.unpack(Foo.class);
+// }
+//
+// Example 3: Pack and unpack a message in Python.
+//
+// foo = Foo(...)
+// any = Any()
+// any.Pack(foo)
+// ...
+// if any.Is(Foo.DESCRIPTOR):
+// any.Unpack(foo)
+// ...
+//
+// Example 4: Pack and unpack a message in Go
+//
+// foo := &pb.Foo{...}
+// any, err := ptypes.MarshalAny(foo)
+// ...
+// foo := &pb.Foo{}
+// if err := ptypes.UnmarshalAny(any, foo); err != nil {
+// ...
+// }
+//
+// The pack methods provided by protobuf library will by default use
+// 'type.googleapis.com/full.type.name' as the type URL and the unpack
+// methods only use the fully qualified type name after the last '/'
+// in the type URL, for example "foo.bar.com/x/y.z" will yield type
+// name "y.z".
+//
+//
+// JSON
+// ====
+// The JSON representation of an `Any` value uses the regular
+// representation of the deserialized, embedded message, with an
+// additional field `@type` which contains the type URL. Example:
+//
+// package google.profile;
+// message Person {
+// string first_name = 1;
+// string last_name = 2;
+// }
+//
+// {
+// "@type": "type.googleapis.com/google.profile.Person",
+// "firstName": <string>,
+// "lastName": <string>
+// }
+//
+// If the embedded message type is well-known and has a custom JSON
+// representation, that representation will be embedded adding a field
+// `value` which holds the custom JSON in addition to the `@type`
+// field. Example (for message [google.protobuf.Duration][]):
+//
+// {
+// "@type": "type.googleapis.com/google.protobuf.Duration",
+// "value": "1.212s"
+// }
+//
+message Any {
+ // A URL/resource name whose content describes the type of the
+ // serialized protocol buffer message.
+ //
+ // For URLs which use the scheme `http`, `https`, or no scheme, the
+ // following restrictions and interpretations apply:
+ //
+ // * If no scheme is provided, `https` is assumed.
+ // * The last segment of the URL's path must represent the fully
+ // qualified name of the type (as in `path/google.protobuf.Duration`).
+ // The name should be in a canonical form (e.g., leading "." is
+ // not accepted).
+ // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
+ // value in binary format, or produce an error.
+ // * Applications are allowed to cache lookup results based on the
+ // URL, or have them precompiled into a binary to avoid any
+ // lookup. Therefore, binary compatibility needs to be preserved
+ // on changes to types. (Use versioned type names to manage
+ // breaking changes.)
+ //
+ // Schemes other than `http`, `https` (or the empty scheme) might be
+ // used with implementation specific semantics.
+ //
+ string type_url = 1;
+
+ // Must be a valid serialized protocol buffer of the above specified type.
+ bytes value = 2;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any_test.go b/vendor/github.com/golang/protobuf/ptypes/any_test.go
new file mode 100644
index 0000000..ed675b4
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any_test.go
@@ -0,0 +1,113 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+import (
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ pb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ "github.com/golang/protobuf/ptypes/any"
+)
+
+func TestMarshalUnmarshal(t *testing.T) {
+ orig := &any.Any{Value: []byte("test")}
+
+ packed, err := MarshalAny(orig)
+ if err != nil {
+ t.Errorf("MarshalAny(%+v): got: _, %v exp: _, nil", orig, err)
+ }
+
+ unpacked := &any.Any{}
+ err = UnmarshalAny(packed, unpacked)
+ if err != nil || !proto.Equal(unpacked, orig) {
+ t.Errorf("got: %v, %+v; want nil, %+v", err, unpacked, orig)
+ }
+}
+
+func TestIs(t *testing.T) {
+ a, err := MarshalAny(&pb.FileDescriptorProto{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if Is(a, &pb.DescriptorProto{}) {
+ t.Error("FileDescriptorProto is not a DescriptorProto, but Is says it is")
+ }
+ if !Is(a, &pb.FileDescriptorProto{}) {
+ t.Error("FileDescriptorProto is indeed a FileDescriptorProto, but Is says it is not")
+ }
+}
+
+func TestIsDifferentUrlPrefixes(t *testing.T) {
+ m := &pb.FileDescriptorProto{}
+ a := &any.Any{TypeUrl: "foo/bar/" + proto.MessageName(m)}
+ if !Is(a, m) {
+ t.Errorf("message with type url %q didn't satisfy Is for type %q", a.TypeUrl, proto.MessageName(m))
+ }
+}
+
+func TestUnmarshalDynamic(t *testing.T) {
+ want := &pb.FileDescriptorProto{Name: proto.String("foo")}
+ a, err := MarshalAny(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var got DynamicAny
+ if err := UnmarshalAny(a, &got); err != nil {
+ t.Fatal(err)
+ }
+ if !proto.Equal(got.Message, want) {
+ t.Errorf("invalid result from UnmarshalAny, got %q want %q", got.Message, want)
+ }
+}
+
+func TestEmpty(t *testing.T) {
+ want := &pb.FileDescriptorProto{}
+ a, err := MarshalAny(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := Empty(a)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !proto.Equal(got, want) {
+ t.Errorf("unequal empty message, got %q, want %q", got, want)
+ }
+
+ // that's a valid type_url for a message which shouldn't be linked into this
+ // test binary. We want an error.
+ a.TypeUrl = "type.googleapis.com/google.protobuf.FieldMask"
+ if _, err := Empty(a); err == nil {
+ t.Errorf("got no error for an attempt to create a message of type %q, which shouldn't be linked in", a.TypeUrl)
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go
new file mode 100644
index 0000000..c0d595d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/doc.go
@@ -0,0 +1,35 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package ptypes contains code for interacting with well-known types.
+*/
+package ptypes
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
new file mode 100644
index 0000000..65cb0f8
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration.go
@@ -0,0 +1,102 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements conversions between google.protobuf.Duration
+// and time.Duration.
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ durpb "github.com/golang/protobuf/ptypes/duration"
+)
+
+const (
+ // Range of a durpb.Duration in seconds, as specified in
+ // google/protobuf/duration.proto. This is about 10,000 years in seconds.
+ maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
+ minSeconds = -maxSeconds
+)
+
+// validateDuration determines whether the durpb.Duration is valid according to the
+// definition in google/protobuf/duration.proto. A valid durpb.Duration
+// may still be too large to fit into a time.Duration (the range of durpb.Duration
+// is about 10,000 years, and the range of time.Duration is about 290).
+func validateDuration(d *durpb.Duration) error {
+ if d == nil {
+ return errors.New("duration: nil Duration")
+ }
+ if d.Seconds < minSeconds || d.Seconds > maxSeconds {
+ return fmt.Errorf("duration: %v: seconds out of range", d)
+ }
+ if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
+ return fmt.Errorf("duration: %v: nanos out of range", d)
+ }
+ // Seconds and Nanos must have the same sign, unless d.Nanos is zero.
+ if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
+ return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
+ }
+ return nil
+}
+
+// Duration converts a durpb.Duration to a time.Duration. Duration
+// returns an error if the durpb.Duration is invalid or is too large to be
+// represented in a time.Duration.
+func Duration(p *durpb.Duration) (time.Duration, error) {
+ if err := validateDuration(p); err != nil {
+ return 0, err
+ }
+ d := time.Duration(p.Seconds) * time.Second
+ if int64(d/time.Second) != p.Seconds {
+ return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+ }
+ if p.Nanos != 0 {
+ d += time.Duration(p.Nanos)
+ if (d < 0) != (p.Nanos < 0) {
+ return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+ }
+ }
+ return d, nil
+}
+
+// DurationProto converts a time.Duration to a durpb.Duration.
+func DurationProto(d time.Duration) *durpb.Duration {
+ nanos := d.Nanoseconds()
+ secs := nanos / 1e9
+ nanos -= secs * 1e9
+ return &durpb.Duration{
+ Seconds: secs,
+ Nanos: int32(nanos),
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
new file mode 100644
index 0000000..b2410a0
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
@@ -0,0 +1,144 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/duration.proto
+
+/*
+Package duration is a generated protocol buffer package.
+
+It is generated from these files:
+ google/protobuf/duration.proto
+
+It has these top-level messages:
+ Duration
+*/
+package duration
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// A Duration represents a signed, fixed-length span of time represented
+// as a count of seconds and fractions of seconds at nanosecond
+// resolution. It is independent of any calendar and concepts like "day"
+// or "month". It is related to Timestamp in that the difference between
+// two Timestamp values is a Duration and it can be added or subtracted
+// from a Timestamp. Range is approximately +-10,000 years.
+//
+// # Examples
+//
+// Example 1: Compute Duration from two Timestamps in pseudo code.
+//
+// Timestamp start = ...;
+// Timestamp end = ...;
+// Duration duration = ...;
+//
+// duration.seconds = end.seconds - start.seconds;
+// duration.nanos = end.nanos - start.nanos;
+//
+// if (duration.seconds < 0 && duration.nanos > 0) {
+// duration.seconds += 1;
+// duration.nanos -= 1000000000;
+// } else if (durations.seconds > 0 && duration.nanos < 0) {
+// duration.seconds -= 1;
+// duration.nanos += 1000000000;
+// }
+//
+// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
+//
+// Timestamp start = ...;
+// Duration duration = ...;
+// Timestamp end = ...;
+//
+// end.seconds = start.seconds + duration.seconds;
+// end.nanos = start.nanos + duration.nanos;
+//
+// if (end.nanos < 0) {
+// end.seconds -= 1;
+// end.nanos += 1000000000;
+// } else if (end.nanos >= 1000000000) {
+// end.seconds += 1;
+// end.nanos -= 1000000000;
+// }
+//
+// Example 3: Compute Duration from datetime.timedelta in Python.
+//
+// td = datetime.timedelta(days=3, minutes=10)
+// duration = Duration()
+// duration.FromTimedelta(td)
+//
+// # JSON Mapping
+//
+// In JSON format, the Duration type is encoded as a string rather than an
+// object, where the string ends in the suffix "s" (indicating seconds) and
+// is preceded by the number of seconds, with nanoseconds expressed as
+// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
+// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
+// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
+// microsecond should be expressed in JSON format as "3.000001s".
+//
+//
+type Duration struct {
+ // Signed seconds of the span of time. Must be from -315,576,000,000
+ // to +315,576,000,000 inclusive. Note: these bounds are computed from:
+ // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
+ // Signed fractions of a second at nanosecond resolution of the span
+ // of time. Durations less than one second are represented with a 0
+ // `seconds` field and a positive or negative `nanos` field. For durations
+ // of one second or more, a non-zero value for the `nanos` field must be
+ // of the same sign as the `seconds` field. Must be from -999,999,999
+ // to +999,999,999 inclusive.
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
+}
+
+func (m *Duration) Reset() { *m = Duration{} }
+func (m *Duration) String() string { return proto.CompactTextString(m) }
+func (*Duration) ProtoMessage() {}
+func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (*Duration) XXX_WellKnownType() string { return "Duration" }
+
+func (m *Duration) GetSeconds() int64 {
+ if m != nil {
+ return m.Seconds
+ }
+ return 0
+}
+
+func (m *Duration) GetNanos() int32 {
+ if m != nil {
+ return m.Nanos
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
+}
+
+func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 190 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
+ 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
+ 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
+ 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
+ 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
+ 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c,
+ 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56,
+ 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e,
+ 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4,
+ 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78,
+ 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63,
+ 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
new file mode 100644
index 0000000..975fce4
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
@@ -0,0 +1,117 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/duration";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "DurationProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// A Duration represents a signed, fixed-length span of time represented
+// as a count of seconds and fractions of seconds at nanosecond
+// resolution. It is independent of any calendar and concepts like "day"
+// or "month". It is related to Timestamp in that the difference between
+// two Timestamp values is a Duration and it can be added or subtracted
+// from a Timestamp. Range is approximately +-10,000 years.
+//
+// # Examples
+//
+// Example 1: Compute Duration from two Timestamps in pseudo code.
+//
+// Timestamp start = ...;
+// Timestamp end = ...;
+// Duration duration = ...;
+//
+// duration.seconds = end.seconds - start.seconds;
+// duration.nanos = end.nanos - start.nanos;
+//
+// if (duration.seconds < 0 && duration.nanos > 0) {
+// duration.seconds += 1;
+// duration.nanos -= 1000000000;
+// } else if (durations.seconds > 0 && duration.nanos < 0) {
+// duration.seconds -= 1;
+// duration.nanos += 1000000000;
+// }
+//
+// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
+//
+// Timestamp start = ...;
+// Duration duration = ...;
+// Timestamp end = ...;
+//
+// end.seconds = start.seconds + duration.seconds;
+// end.nanos = start.nanos + duration.nanos;
+//
+// if (end.nanos < 0) {
+// end.seconds -= 1;
+// end.nanos += 1000000000;
+// } else if (end.nanos >= 1000000000) {
+// end.seconds += 1;
+// end.nanos -= 1000000000;
+// }
+//
+// Example 3: Compute Duration from datetime.timedelta in Python.
+//
+// td = datetime.timedelta(days=3, minutes=10)
+// duration = Duration()
+// duration.FromTimedelta(td)
+//
+// # JSON Mapping
+//
+// In JSON format, the Duration type is encoded as a string rather than an
+// object, where the string ends in the suffix "s" (indicating seconds) and
+// is preceded by the number of seconds, with nanoseconds expressed as
+// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
+// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
+// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
+// microsecond should be expressed in JSON format as "3.000001s".
+//
+//
+message Duration {
+
+ // Signed seconds of the span of time. Must be from -315,576,000,000
+ // to +315,576,000,000 inclusive. Note: these bounds are computed from:
+ // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
+ int64 seconds = 1;
+
+ // Signed fractions of a second at nanosecond resolution of the span
+ // of time. Durations less than one second are represented with a 0
+ // `seconds` field and a positive or negative `nanos` field. For durations
+ // of one second or more, a non-zero value for the `nanos` field must be
+ // of the same sign as the `seconds` field. Must be from -999,999,999
+ // to +999,999,999 inclusive.
+ int32 nanos = 2;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration_test.go b/vendor/github.com/golang/protobuf/ptypes/duration_test.go
new file mode 100644
index 0000000..e00491a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration_test.go
@@ -0,0 +1,121 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+import (
+ "math"
+ "testing"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ durpb "github.com/golang/protobuf/ptypes/duration"
+)
+
+const (
+ minGoSeconds = math.MinInt64 / int64(1e9)
+ maxGoSeconds = math.MaxInt64 / int64(1e9)
+)
+
+var durationTests = []struct {
+ proto *durpb.Duration
+ isValid bool
+ inRange bool
+ dur time.Duration
+}{
+ // The zero duration.
+ {&durpb.Duration{Seconds: 0, Nanos: 0}, true, true, 0},
+ // Some ordinary non-zero durations.
+ {&durpb.Duration{Seconds: 100, Nanos: 0}, true, true, 100 * time.Second},
+ {&durpb.Duration{Seconds: -100, Nanos: 0}, true, true, -100 * time.Second},
+ {&durpb.Duration{Seconds: 100, Nanos: 987}, true, true, 100*time.Second + 987},
+ {&durpb.Duration{Seconds: -100, Nanos: -987}, true, true, -(100*time.Second + 987)},
+ // The largest duration representable in Go.
+ {&durpb.Duration{Seconds: maxGoSeconds, Nanos: int32(math.MaxInt64 - 1e9*maxGoSeconds)}, true, true, math.MaxInt64},
+ // The smallest duration representable in Go.
+ {&durpb.Duration{Seconds: minGoSeconds, Nanos: int32(math.MinInt64 - 1e9*minGoSeconds)}, true, true, math.MinInt64},
+ {nil, false, false, 0},
+ {&durpb.Duration{Seconds: -100, Nanos: 987}, false, false, 0},
+ {&durpb.Duration{Seconds: 100, Nanos: -987}, false, false, 0},
+ {&durpb.Duration{Seconds: math.MinInt64, Nanos: 0}, false, false, 0},
+ {&durpb.Duration{Seconds: math.MaxInt64, Nanos: 0}, false, false, 0},
+ // The largest valid duration.
+ {&durpb.Duration{Seconds: maxSeconds, Nanos: 1e9 - 1}, true, false, 0},
+ // The smallest valid duration.
+ {&durpb.Duration{Seconds: minSeconds, Nanos: -(1e9 - 1)}, true, false, 0},
+ // The smallest invalid duration above the valid range.
+ {&durpb.Duration{Seconds: maxSeconds + 1, Nanos: 0}, false, false, 0},
+ // The largest invalid duration below the valid range.
+ {&durpb.Duration{Seconds: minSeconds - 1, Nanos: -(1e9 - 1)}, false, false, 0},
+ // One nanosecond past the largest duration representable in Go.
+ {&durpb.Duration{Seconds: maxGoSeconds, Nanos: int32(math.MaxInt64-1e9*maxGoSeconds) + 1}, true, false, 0},
+ // One nanosecond past the smallest duration representable in Go.
+ {&durpb.Duration{Seconds: minGoSeconds, Nanos: int32(math.MinInt64-1e9*minGoSeconds) - 1}, true, false, 0},
+ // One second past the largest duration representable in Go.
+ {&durpb.Duration{Seconds: maxGoSeconds + 1, Nanos: int32(math.MaxInt64 - 1e9*maxGoSeconds)}, true, false, 0},
+ // One second past the smallest duration representable in Go.
+ {&durpb.Duration{Seconds: minGoSeconds - 1, Nanos: int32(math.MinInt64 - 1e9*minGoSeconds)}, true, false, 0},
+}
+
+func TestValidateDuration(t *testing.T) {
+ for _, test := range durationTests {
+ err := validateDuration(test.proto)
+ gotValid := (err == nil)
+ if gotValid != test.isValid {
+ t.Errorf("validateDuration(%v) = %t, want %t", test.proto, gotValid, test.isValid)
+ }
+ }
+}
+
+func TestDuration(t *testing.T) {
+ for _, test := range durationTests {
+ got, err := Duration(test.proto)
+ gotOK := (err == nil)
+ wantOK := test.isValid && test.inRange
+ if gotOK != wantOK {
+ t.Errorf("Duration(%v) ok = %t, want %t", test.proto, gotOK, wantOK)
+ }
+ if err == nil && got != test.dur {
+ t.Errorf("Duration(%v) = %v, want %v", test.proto, got, test.dur)
+ }
+ }
+}
+
+func TestDurationProto(t *testing.T) {
+ for _, test := range durationTests {
+ if test.isValid && test.inRange {
+ got := DurationProto(test.dur)
+ if !proto.Equal(got, test.proto) {
+ t.Errorf("DurationProto(%v) = %v, want %v", test.dur, got, test.proto)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
new file mode 100644
index 0000000..e877b72
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
@@ -0,0 +1,66 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/empty.proto
+
+/*
+Package empty is a generated protocol buffer package.
+
+It is generated from these files:
+ google/protobuf/empty.proto
+
+It has these top-level messages:
+ Empty
+*/
+package empty
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// A generic empty message that you can re-use to avoid defining duplicated
+// empty messages in your APIs. A typical example is to use it as the request
+// or the response type of an API method. For instance:
+//
+// service Foo {
+// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
+// }
+//
+// The JSON representation for `Empty` is empty JSON object `{}`.
+type Empty struct {
+}
+
+func (m *Empty) Reset() { *m = Empty{} }
+func (m *Empty) String() string { return proto.CompactTextString(m) }
+func (*Empty) ProtoMessage() {}
+func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (*Empty) XXX_WellKnownType() string { return "Empty" }
+
+func init() {
+ proto.RegisterType((*Empty)(nil), "google.protobuf.Empty")
+}
+
+func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 148 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f,
+ 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28,
+ 0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57,
+ 0x90, 0xbc, 0x53, 0x19, 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36,
+ 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf,
+ 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0x47, 0x58, 0x53, 0x50, 0x52, 0x59, 0x90, 0x5a, 0x0c,
+ 0xb1, 0xed, 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10,
+ 0x13, 0x03, 0xa0, 0xea, 0xf4, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40,
+ 0xea, 0x93, 0xd8, 0xc0, 0x06, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x64, 0xd4, 0xb3, 0xa6,
+ 0xb7, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto
new file mode 100644
index 0000000..03cacd2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto
@@ -0,0 +1,52 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option go_package = "github.com/golang/protobuf/ptypes/empty";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "EmptyProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+option cc_enable_arenas = true;
+
+// A generic empty message that you can re-use to avoid defining duplicated
+// empty messages in your APIs. A typical example is to use it as the request
+// or the response type of an API method. For instance:
+//
+// service Foo {
+// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
+// }
+//
+// The JSON representation for `Empty` is empty JSON object `{}`.
+message Empty {}
diff --git a/vendor/github.com/golang/protobuf/ptypes/regen.sh b/vendor/github.com/golang/protobuf/ptypes/regen.sh
new file mode 100755
index 0000000..b50a941
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/regen.sh
@@ -0,0 +1,43 @@
+#!/bin/bash -e
+#
+# This script fetches and rebuilds the "well-known types" protocol buffers.
+# To run this you will need protoc and goprotobuf installed;
+# see https://github.com/golang/protobuf for instructions.
+# You also need Go and Git installed.
+
+PKG=github.com/golang/protobuf/ptypes
+UPSTREAM=https://github.com/google/protobuf
+UPSTREAM_SUBDIR=src/google/protobuf
+PROTO_FILES=(any duration empty struct timestamp wrappers)
+
+function die() {
+ echo 1>&2 $*
+ exit 1
+}
+
+# Sanity check that the right tools are accessible.
+for tool in go git protoc protoc-gen-go; do
+ q=$(which $tool) || die "didn't find $tool"
+ echo 1>&2 "$tool: $q"
+done
+
+tmpdir=$(mktemp -d -t regen-wkt.XXXXXX)
+trap 'rm -rf $tmpdir' EXIT
+
+echo -n 1>&2 "finding package dir... "
+pkgdir=$(go list -f '{{.Dir}}' $PKG)
+echo 1>&2 $pkgdir
+base=$(echo $pkgdir | sed "s,/$PKG\$,,")
+echo 1>&2 "base: $base"
+cd "$base"
+
+echo 1>&2 "fetching latest protos... "
+git clone -q $UPSTREAM $tmpdir
+
+for file in ${PROTO_FILES[@]}; do
+ echo 1>&2 "* $file"
+ protoc --go_out=. -I$tmpdir/src $tmpdir/src/google/protobuf/$file.proto || die
+ cp $tmpdir/src/google/protobuf/$file.proto $PKG/$file
+done
+
+echo 1>&2 "All OK"
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
new file mode 100644
index 0000000..4cfe608
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
@@ -0,0 +1,380 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/struct.proto
+
+/*
+Package structpb is a generated protocol buffer package.
+
+It is generated from these files:
+ google/protobuf/struct.proto
+
+It has these top-level messages:
+ Struct
+ Value
+ ListValue
+*/
+package structpb
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// `NullValue` is a singleton enumeration to represent the null value for the
+// `Value` type union.
+//
+// The JSON representation for `NullValue` is JSON `null`.
+type NullValue int32
+
+const (
+ // Null value.
+ NullValue_NULL_VALUE NullValue = 0
+)
+
+var NullValue_name = map[int32]string{
+ 0: "NULL_VALUE",
+}
+var NullValue_value = map[string]int32{
+ "NULL_VALUE": 0,
+}
+
+func (x NullValue) String() string {
+ return proto.EnumName(NullValue_name, int32(x))
+}
+func (NullValue) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (NullValue) XXX_WellKnownType() string { return "NullValue" }
+
+// `Struct` represents a structured data value, consisting of fields
+// which map to dynamically typed values. In some languages, `Struct`
+// might be supported by a native representation. For example, in
+// scripting languages like JS a struct is represented as an
+// object. The details of that representation are described together
+// with the proto support for the language.
+//
+// The JSON representation for `Struct` is JSON object.
+type Struct struct {
+ // Unordered map of dynamically typed values.
+ Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+}
+
+func (m *Struct) Reset() { *m = Struct{} }
+func (m *Struct) String() string { return proto.CompactTextString(m) }
+func (*Struct) ProtoMessage() {}
+func (*Struct) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (*Struct) XXX_WellKnownType() string { return "Struct" }
+
+func (m *Struct) GetFields() map[string]*Value {
+ if m != nil {
+ return m.Fields
+ }
+ return nil
+}
+
+// `Value` represents a dynamically typed value which can be either
+// null, a number, a string, a boolean, a recursive struct value, or a
+// list of values. A producer of value is expected to set one of that
+// variants, absence of any variant indicates an error.
+//
+// The JSON representation for `Value` is JSON value.
+type Value struct {
+ // The kind of value.
+ //
+ // Types that are valid to be assigned to Kind:
+ // *Value_NullValue
+ // *Value_NumberValue
+ // *Value_StringValue
+ // *Value_BoolValue
+ // *Value_StructValue
+ // *Value_ListValue
+ Kind isValue_Kind `protobuf_oneof:"kind"`
+}
+
+func (m *Value) Reset() { *m = Value{} }
+func (m *Value) String() string { return proto.CompactTextString(m) }
+func (*Value) ProtoMessage() {}
+func (*Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+func (*Value) XXX_WellKnownType() string { return "Value" }
+
+type isValue_Kind interface {
+ isValue_Kind()
+}
+
+type Value_NullValue struct {
+ NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,enum=google.protobuf.NullValue,oneof"`
+}
+type Value_NumberValue struct {
+ NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,oneof"`
+}
+type Value_StringValue struct {
+ StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,oneof"`
+}
+type Value_BoolValue struct {
+ BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,oneof"`
+}
+type Value_StructValue struct {
+ StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,oneof"`
+}
+type Value_ListValue struct {
+ ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,oneof"`
+}
+
+func (*Value_NullValue) isValue_Kind() {}
+func (*Value_NumberValue) isValue_Kind() {}
+func (*Value_StringValue) isValue_Kind() {}
+func (*Value_BoolValue) isValue_Kind() {}
+func (*Value_StructValue) isValue_Kind() {}
+func (*Value_ListValue) isValue_Kind() {}
+
+func (m *Value) GetKind() isValue_Kind {
+ if m != nil {
+ return m.Kind
+ }
+ return nil
+}
+
+func (m *Value) GetNullValue() NullValue {
+ if x, ok := m.GetKind().(*Value_NullValue); ok {
+ return x.NullValue
+ }
+ return NullValue_NULL_VALUE
+}
+
+func (m *Value) GetNumberValue() float64 {
+ if x, ok := m.GetKind().(*Value_NumberValue); ok {
+ return x.NumberValue
+ }
+ return 0
+}
+
+func (m *Value) GetStringValue() string {
+ if x, ok := m.GetKind().(*Value_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (m *Value) GetBoolValue() bool {
+ if x, ok := m.GetKind().(*Value_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (m *Value) GetStructValue() *Struct {
+ if x, ok := m.GetKind().(*Value_StructValue); ok {
+ return x.StructValue
+ }
+ return nil
+}
+
+func (m *Value) GetListValue() *ListValue {
+ if x, ok := m.GetKind().(*Value_ListValue); ok {
+ return x.ListValue
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{
+ (*Value_NullValue)(nil),
+ (*Value_NumberValue)(nil),
+ (*Value_StringValue)(nil),
+ (*Value_BoolValue)(nil),
+ (*Value_StructValue)(nil),
+ (*Value_ListValue)(nil),
+ }
+}
+
+func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Value)
+ // kind
+ switch x := m.Kind.(type) {
+ case *Value_NullValue:
+ b.EncodeVarint(1<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.NullValue))
+ case *Value_NumberValue:
+ b.EncodeVarint(2<<3 | proto.WireFixed64)
+ b.EncodeFixed64(math.Float64bits(x.NumberValue))
+ case *Value_StringValue:
+ b.EncodeVarint(3<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.StringValue)
+ case *Value_BoolValue:
+ t := uint64(0)
+ if x.BoolValue {
+ t = 1
+ }
+ b.EncodeVarint(4<<3 | proto.WireVarint)
+ b.EncodeVarint(t)
+ case *Value_StructValue:
+ b.EncodeVarint(5<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.StructValue); err != nil {
+ return err
+ }
+ case *Value_ListValue:
+ b.EncodeVarint(6<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ListValue); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("Value.Kind has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Value)
+ switch tag {
+ case 1: // kind.null_value
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Kind = &Value_NullValue{NullValue(x)}
+ return true, err
+ case 2: // kind.number_value
+ if wire != proto.WireFixed64 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed64()
+ m.Kind = &Value_NumberValue{math.Float64frombits(x)}
+ return true, err
+ case 3: // kind.string_value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Kind = &Value_StringValue{x}
+ return true, err
+ case 4: // kind.bool_value
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Kind = &Value_BoolValue{x != 0}
+ return true, err
+ case 5: // kind.struct_value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Struct)
+ err := b.DecodeMessage(msg)
+ m.Kind = &Value_StructValue{msg}
+ return true, err
+ case 6: // kind.list_value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(ListValue)
+ err := b.DecodeMessage(msg)
+ m.Kind = &Value_ListValue{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _Value_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*Value)
+ // kind
+ switch x := m.Kind.(type) {
+ case *Value_NullValue:
+ n += proto.SizeVarint(1<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.NullValue))
+ case *Value_NumberValue:
+ n += proto.SizeVarint(2<<3 | proto.WireFixed64)
+ n += 8
+ case *Value_StringValue:
+ n += proto.SizeVarint(3<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.StringValue)))
+ n += len(x.StringValue)
+ case *Value_BoolValue:
+ n += proto.SizeVarint(4<<3 | proto.WireVarint)
+ n += 1
+ case *Value_StructValue:
+ s := proto.Size(x.StructValue)
+ n += proto.SizeVarint(5<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Value_ListValue:
+ s := proto.Size(x.ListValue)
+ n += proto.SizeVarint(6<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// `ListValue` is a wrapper around a repeated field of values.
+//
+// The JSON representation for `ListValue` is JSON array.
+type ListValue struct {
+ // Repeated field of dynamically typed values.
+ Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"`
+}
+
+func (m *ListValue) Reset() { *m = ListValue{} }
+func (m *ListValue) String() string { return proto.CompactTextString(m) }
+func (*ListValue) ProtoMessage() {}
+func (*ListValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+func (*ListValue) XXX_WellKnownType() string { return "ListValue" }
+
+func (m *ListValue) GetValues() []*Value {
+ if m != nil {
+ return m.Values
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*Struct)(nil), "google.protobuf.Struct")
+ proto.RegisterType((*Value)(nil), "google.protobuf.Value")
+ proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue")
+ proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value)
+}
+
+func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 417 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40,
+ 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09,
+ 0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94,
+ 0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa,
+ 0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff,
+ 0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc,
+ 0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15,
+ 0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d,
+ 0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce,
+ 0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39,
+ 0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab,
+ 0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84,
+ 0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48,
+ 0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f,
+ 0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59,
+ 0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a,
+ 0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64,
+ 0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92,
+ 0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25,
+ 0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37,
+ 0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6,
+ 0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4,
+ 0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda,
+ 0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9,
+ 0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53,
+ 0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00,
+ 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
new file mode 100644
index 0000000..7d7808e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
@@ -0,0 +1,96 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/struct;structpb";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "StructProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+
+// `Struct` represents a structured data value, consisting of fields
+// which map to dynamically typed values. In some languages, `Struct`
+// might be supported by a native representation. For example, in
+// scripting languages like JS a struct is represented as an
+// object. The details of that representation are described together
+// with the proto support for the language.
+//
+// The JSON representation for `Struct` is JSON object.
+message Struct {
+ // Unordered map of dynamically typed values.
+ map<string, Value> fields = 1;
+}
+
+// `Value` represents a dynamically typed value which can be either
+// null, a number, a string, a boolean, a recursive struct value, or a
+// list of values. A producer of value is expected to set one of that
+// variants, absence of any variant indicates an error.
+//
+// The JSON representation for `Value` is JSON value.
+message Value {
+ // The kind of value.
+ oneof kind {
+ // Represents a null value.
+ NullValue null_value = 1;
+ // Represents a double value.
+ double number_value = 2;
+ // Represents a string value.
+ string string_value = 3;
+ // Represents a boolean value.
+ bool bool_value = 4;
+ // Represents a structured value.
+ Struct struct_value = 5;
+ // Represents a repeated `Value`.
+ ListValue list_value = 6;
+ }
+}
+
+// `NullValue` is a singleton enumeration to represent the null value for the
+// `Value` type union.
+//
+// The JSON representation for `NullValue` is JSON `null`.
+enum NullValue {
+ // Null value.
+ NULL_VALUE = 0;
+}
+
+// `ListValue` is a wrapper around a repeated field of values.
+//
+// The JSON representation for `ListValue` is JSON array.
+message ListValue {
+ // Repeated field of dynamically typed values.
+ repeated Value values = 1;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
new file mode 100644
index 0000000..47f10db
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
@@ -0,0 +1,134 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements operations on google.protobuf.Timestamp.
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ tspb "github.com/golang/protobuf/ptypes/timestamp"
+)
+
+const (
+ // Seconds field of the earliest valid Timestamp.
+ // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+ minValidSeconds = -62135596800
+ // Seconds field just after the latest valid Timestamp.
+ // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+ maxValidSeconds = 253402300800
+)
+
+// validateTimestamp determines whether a Timestamp is valid.
+// A valid timestamp represents a time in the range
+// [0001-01-01, 10000-01-01) and has a Nanos field
+// in the range [0, 1e9).
+//
+// If the Timestamp is valid, validateTimestamp returns nil.
+// Otherwise, it returns an error that describes
+// the problem.
+//
+// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
+func validateTimestamp(ts *tspb.Timestamp) error {
+ if ts == nil {
+ return errors.New("timestamp: nil Timestamp")
+ }
+ if ts.Seconds < minValidSeconds {
+ return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
+ }
+ if ts.Seconds >= maxValidSeconds {
+ return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
+ }
+ if ts.Nanos < 0 || ts.Nanos >= 1e9 {
+ return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
+ }
+ return nil
+}
+
+// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
+// It returns an error if the argument is invalid.
+//
+// Unlike most Go functions, if Timestamp returns an error, the first return value
+// is not the zero time.Time. Instead, it is the value obtained from the
+// time.Unix function when passed the contents of the Timestamp, in the UTC
+// locale. This may or may not be a meaningful time; many invalid Timestamps
+// do map to valid time.Times.
+//
+// A nil Timestamp returns an error. The first return value in that case is
+// undefined.
+func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
+ // Don't return the zero value on error, because corresponds to a valid
+ // timestamp. Instead return whatever time.Unix gives us.
+ var t time.Time
+ if ts == nil {
+ t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
+ } else {
+ t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
+ }
+ return t, validateTimestamp(ts)
+}
+
+// TimestampNow returns a google.protobuf.Timestamp for the current time.
+func TimestampNow() *tspb.Timestamp {
+ ts, err := TimestampProto(time.Now())
+ if err != nil {
+ panic("ptypes: time.Now() out of Timestamp range")
+ }
+ return ts
+}
+
+// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
+// It returns an error if the resulting Timestamp is invalid.
+func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
+ seconds := t.Unix()
+ nanos := int32(t.Sub(time.Unix(seconds, 0)))
+ ts := &tspb.Timestamp{
+ Seconds: seconds,
+ Nanos: nanos,
+ }
+ if err := validateTimestamp(ts); err != nil {
+ return nil, err
+ }
+ return ts, nil
+}
+
+// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
+// Timestamps, it returns an error message in parentheses.
+func TimestampString(ts *tspb.Timestamp) string {
+ t, err := Timestamp(ts)
+ if err != nil {
+ return fmt.Sprintf("(%v)", err)
+ }
+ return t.Format(time.RFC3339Nano)
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
new file mode 100644
index 0000000..e23e4a2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
@@ -0,0 +1,160 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/timestamp.proto
+
+/*
+Package timestamp is a generated protocol buffer package.
+
+It is generated from these files:
+ google/protobuf/timestamp.proto
+
+It has these top-level messages:
+ Timestamp
+*/
+package timestamp
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// A Timestamp represents a point in time independent of any time zone
+// or calendar, represented as seconds and fractions of seconds at
+// nanosecond resolution in UTC Epoch time. It is encoded using the
+// Proleptic Gregorian Calendar which extends the Gregorian calendar
+// backwards to year one. It is encoded assuming all minutes are 60
+// seconds long, i.e. leap seconds are "smeared" so that no leap second
+// table is needed for interpretation. Range is from
+// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
+// By restricting to that range, we ensure that we can convert to
+// and from RFC 3339 date strings.
+// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+//
+// # Examples
+//
+// Example 1: Compute Timestamp from POSIX `time()`.
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(time(NULL));
+// timestamp.set_nanos(0);
+//
+// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+//
+// struct timeval tv;
+// gettimeofday(&tv, NULL);
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(tv.tv_sec);
+// timestamp.set_nanos(tv.tv_usec * 1000);
+//
+// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+//
+// FILETIME ft;
+// GetSystemTimeAsFileTime(&ft);
+// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+//
+// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+// Timestamp timestamp;
+// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+//
+// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+//
+// long millis = System.currentTimeMillis();
+//
+// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+// .setNanos((int) ((millis % 1000) * 1000000)).build();
+//
+//
+// Example 5: Compute Timestamp from current time in Python.
+//
+// timestamp = Timestamp()
+// timestamp.GetCurrentTime()
+//
+// # JSON Mapping
+//
+// In JSON format, the Timestamp type is encoded as a string in the
+// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
+// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
+// where {year} is always expressed using four digits while {month}, {day},
+// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
+// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
+// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
+// is required, though only UTC (as indicated by "Z") is presently supported.
+//
+// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
+// 01:30 UTC on January 15, 2017.
+//
+// In JavaScript, one can convert a Date object to this format using the
+// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
+// method. In Python, a standard `datetime.datetime` object can be converted
+// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
+// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
+// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime())
+// to obtain a formatter capable of generating timestamps in this format.
+//
+//
+type Timestamp struct {
+ // Represents seconds of UTC time since Unix epoch
+ // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+ // 9999-12-31T23:59:59Z inclusive.
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
+ // Non-negative fractions of a second at nanosecond resolution. Negative
+ // second values with fractions must still have non-negative nanos values
+ // that count forward in time. Must be from 0 to 999,999,999
+ // inclusive.
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
+}
+
+func (m *Timestamp) Reset() { *m = Timestamp{} }
+func (m *Timestamp) String() string { return proto.CompactTextString(m) }
+func (*Timestamp) ProtoMessage() {}
+func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
+
+func (m *Timestamp) GetSeconds() int64 {
+ if m != nil {
+ return m.Seconds
+ }
+ return 0
+}
+
+func (m *Timestamp) GetNanos() int32 {
+ if m != nil {
+ return m.Nanos
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
+}
+
+func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 191 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
+ 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
+ 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
+ 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
+ 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
+ 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70,
+ 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51,
+ 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89,
+ 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71,
+ 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a,
+ 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43,
+ 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
new file mode 100644
index 0000000..b7cbd17
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
@@ -0,0 +1,133 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/timestamp";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "TimestampProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// A Timestamp represents a point in time independent of any time zone
+// or calendar, represented as seconds and fractions of seconds at
+// nanosecond resolution in UTC Epoch time. It is encoded using the
+// Proleptic Gregorian Calendar which extends the Gregorian calendar
+// backwards to year one. It is encoded assuming all minutes are 60
+// seconds long, i.e. leap seconds are "smeared" so that no leap second
+// table is needed for interpretation. Range is from
+// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
+// By restricting to that range, we ensure that we can convert to
+// and from RFC 3339 date strings.
+// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+//
+// # Examples
+//
+// Example 1: Compute Timestamp from POSIX `time()`.
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(time(NULL));
+// timestamp.set_nanos(0);
+//
+// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+//
+// struct timeval tv;
+// gettimeofday(&tv, NULL);
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(tv.tv_sec);
+// timestamp.set_nanos(tv.tv_usec * 1000);
+//
+// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+//
+// FILETIME ft;
+// GetSystemTimeAsFileTime(&ft);
+// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+//
+// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+// Timestamp timestamp;
+// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+//
+// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+//
+// long millis = System.currentTimeMillis();
+//
+// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+// .setNanos((int) ((millis % 1000) * 1000000)).build();
+//
+//
+// Example 5: Compute Timestamp from current time in Python.
+//
+// timestamp = Timestamp()
+// timestamp.GetCurrentTime()
+//
+// # JSON Mapping
+//
+// In JSON format, the Timestamp type is encoded as a string in the
+// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
+// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
+// where {year} is always expressed using four digits while {month}, {day},
+// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
+// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
+// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
+// is required, though only UTC (as indicated by "Z") is presently supported.
+//
+// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
+// 01:30 UTC on January 15, 2017.
+//
+// In JavaScript, one can convert a Date object to this format using the
+// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
+// method. In Python, a standard `datetime.datetime` object can be converted
+// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
+// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
+// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime())
+// to obtain a formatter capable of generating timestamps in this format.
+//
+//
+message Timestamp {
+
+ // Represents seconds of UTC time since Unix epoch
+ // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+ // 9999-12-31T23:59:59Z inclusive.
+ int64 seconds = 1;
+
+ // Non-negative fractions of a second at nanosecond resolution. Negative
+ // second values with fractions must still have non-negative nanos values
+ // that count forward in time. Must be from 0 to 999,999,999
+ // inclusive.
+ int32 nanos = 2;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp_test.go b/vendor/github.com/golang/protobuf/ptypes/timestamp_test.go
new file mode 100644
index 0000000..6e3c969
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp_test.go
@@ -0,0 +1,153 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+import (
+ "math"
+ "testing"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ tspb "github.com/golang/protobuf/ptypes/timestamp"
+)
+
+var tests = []struct {
+ ts *tspb.Timestamp
+ valid bool
+ t time.Time
+}{
+ // The timestamp representing the Unix epoch date.
+ {&tspb.Timestamp{Seconds: 0, Nanos: 0}, true, utcDate(1970, 1, 1)},
+ // The smallest representable timestamp.
+ {&tspb.Timestamp{Seconds: math.MinInt64, Nanos: math.MinInt32}, false,
+ time.Unix(math.MinInt64, math.MinInt32).UTC()},
+ // The smallest representable timestamp with non-negative nanos.
+ {&tspb.Timestamp{Seconds: math.MinInt64, Nanos: 0}, false, time.Unix(math.MinInt64, 0).UTC()},
+ // The earliest valid timestamp.
+ {&tspb.Timestamp{Seconds: minValidSeconds, Nanos: 0}, true, utcDate(1, 1, 1)},
+ //"0001-01-01T00:00:00Z"},
+ // The largest representable timestamp.
+ {&tspb.Timestamp{Seconds: math.MaxInt64, Nanos: math.MaxInt32}, false,
+ time.Unix(math.MaxInt64, math.MaxInt32).UTC()},
+ // The largest representable timestamp with nanos in range.
+ {&tspb.Timestamp{Seconds: math.MaxInt64, Nanos: 1e9 - 1}, false,
+ time.Unix(math.MaxInt64, 1e9-1).UTC()},
+ // The largest valid timestamp.
+ {&tspb.Timestamp{Seconds: maxValidSeconds - 1, Nanos: 1e9 - 1}, true,
+ time.Date(9999, 12, 31, 23, 59, 59, 1e9-1, time.UTC)},
+ // The smallest invalid timestamp that is larger than the valid range.
+ {&tspb.Timestamp{Seconds: maxValidSeconds, Nanos: 0}, false, time.Unix(maxValidSeconds, 0).UTC()},
+ // A date before the epoch.
+ {&tspb.Timestamp{Seconds: -281836800, Nanos: 0}, true, utcDate(1961, 1, 26)},
+ // A date after the epoch.
+ {&tspb.Timestamp{Seconds: 1296000000, Nanos: 0}, true, utcDate(2011, 1, 26)},
+ // A date after the epoch, in the middle of the day.
+ {&tspb.Timestamp{Seconds: 1296012345, Nanos: 940483}, true,
+ time.Date(2011, 1, 26, 3, 25, 45, 940483, time.UTC)},
+}
+
+func TestValidateTimestamp(t *testing.T) {
+ for _, s := range tests {
+ got := validateTimestamp(s.ts)
+ if (got == nil) != s.valid {
+ t.Errorf("validateTimestamp(%v) = %v, want %v", s.ts, got, s.valid)
+ }
+ }
+}
+
+func TestTimestamp(t *testing.T) {
+ for _, s := range tests {
+ got, err := Timestamp(s.ts)
+ if (err == nil) != s.valid {
+ t.Errorf("Timestamp(%v) error = %v, but valid = %t", s.ts, err, s.valid)
+ } else if s.valid && got != s.t {
+ t.Errorf("Timestamp(%v) = %v, want %v", s.ts, got, s.t)
+ }
+ }
+ // Special case: a nil Timestamp is an error, but returns the 0 Unix time.
+ got, err := Timestamp(nil)
+ want := time.Unix(0, 0).UTC()
+ if got != want {
+ t.Errorf("Timestamp(nil) = %v, want %v", got, want)
+ }
+ if err == nil {
+ t.Errorf("Timestamp(nil) error = nil, expected error")
+ }
+}
+
+func TestTimestampProto(t *testing.T) {
+ for _, s := range tests {
+ got, err := TimestampProto(s.t)
+ if (err == nil) != s.valid {
+ t.Errorf("TimestampProto(%v) error = %v, but valid = %t", s.t, err, s.valid)
+ } else if s.valid && !proto.Equal(got, s.ts) {
+ t.Errorf("TimestampProto(%v) = %v, want %v", s.t, got, s.ts)
+ }
+ }
+ // No corresponding special case here: no time.Time results in a nil Timestamp.
+}
+
+func TestTimestampString(t *testing.T) {
+ for _, test := range []struct {
+ ts *tspb.Timestamp
+ want string
+ }{
+ // Not much testing needed because presumably time.Format is
+ // well-tested.
+ {&tspb.Timestamp{Seconds: 0, Nanos: 0}, "1970-01-01T00:00:00Z"},
+ {&tspb.Timestamp{Seconds: minValidSeconds - 1, Nanos: 0}, "(timestamp: seconds:-62135596801 before 0001-01-01)"},
+ } {
+ got := TimestampString(test.ts)
+ if got != test.want {
+ t.Errorf("TimestampString(%v) = %q, want %q", test.ts, got, test.want)
+ }
+ }
+}
+
+func utcDate(year, month, day int) time.Time {
+ return time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC)
+}
+
+func TestTimestampNow(t *testing.T) {
+ // Bracket the expected time.
+ before := time.Now()
+ ts := TimestampNow()
+ after := time.Now()
+
+ tm, err := Timestamp(ts)
+ if err != nil {
+ t.Errorf("between %v and %v\nTimestampNow() = %v\nwhich is invalid (%v)", before, after, ts, err)
+ }
+ if tm.Before(before) || tm.After(after) {
+ t.Errorf("between %v and %v\nTimestamp(TimestampNow()) = %v", before, after, tm)
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
new file mode 100644
index 0000000..0ed59bf
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
@@ -0,0 +1,260 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/wrappers.proto
+
+/*
+Package wrappers is a generated protocol buffer package.
+
+It is generated from these files:
+ google/protobuf/wrappers.proto
+
+It has these top-level messages:
+ DoubleValue
+ FloatValue
+ Int64Value
+ UInt64Value
+ Int32Value
+ UInt32Value
+ BoolValue
+ StringValue
+ BytesValue
+*/
+package wrappers
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// Wrapper message for `double`.
+//
+// The JSON representation for `DoubleValue` is JSON number.
+type DoubleValue struct {
+ // The double value.
+ Value float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *DoubleValue) Reset() { *m = DoubleValue{} }
+func (m *DoubleValue) String() string { return proto.CompactTextString(m) }
+func (*DoubleValue) ProtoMessage() {}
+func (*DoubleValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" }
+
+func (m *DoubleValue) GetValue() float64 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// Wrapper message for `float`.
+//
+// The JSON representation for `FloatValue` is JSON number.
+type FloatValue struct {
+ // The float value.
+ Value float32 `protobuf:"fixed32,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *FloatValue) Reset() { *m = FloatValue{} }
+func (m *FloatValue) String() string { return proto.CompactTextString(m) }
+func (*FloatValue) ProtoMessage() {}
+func (*FloatValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" }
+
+func (m *FloatValue) GetValue() float32 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// Wrapper message for `int64`.
+//
+// The JSON representation for `Int64Value` is JSON string.
+type Int64Value struct {
+ // The int64 value.
+ Value int64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *Int64Value) Reset() { *m = Int64Value{} }
+func (m *Int64Value) String() string { return proto.CompactTextString(m) }
+func (*Int64Value) ProtoMessage() {}
+func (*Int64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" }
+
+func (m *Int64Value) GetValue() int64 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// Wrapper message for `uint64`.
+//
+// The JSON representation for `UInt64Value` is JSON string.
+type UInt64Value struct {
+ // The uint64 value.
+ Value uint64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *UInt64Value) Reset() { *m = UInt64Value{} }
+func (m *UInt64Value) String() string { return proto.CompactTextString(m) }
+func (*UInt64Value) ProtoMessage() {}
+func (*UInt64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" }
+
+func (m *UInt64Value) GetValue() uint64 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// Wrapper message for `int32`.
+//
+// The JSON representation for `Int32Value` is JSON number.
+type Int32Value struct {
+ // The int32 value.
+ Value int32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *Int32Value) Reset() { *m = Int32Value{} }
+func (m *Int32Value) String() string { return proto.CompactTextString(m) }
+func (*Int32Value) ProtoMessage() {}
+func (*Int32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" }
+
+func (m *Int32Value) GetValue() int32 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// Wrapper message for `uint32`.
+//
+// The JSON representation for `UInt32Value` is JSON number.
+type UInt32Value struct {
+ // The uint32 value.
+ Value uint32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *UInt32Value) Reset() { *m = UInt32Value{} }
+func (m *UInt32Value) String() string { return proto.CompactTextString(m) }
+func (*UInt32Value) ProtoMessage() {}
+func (*UInt32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" }
+
+func (m *UInt32Value) GetValue() uint32 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// Wrapper message for `bool`.
+//
+// The JSON representation for `BoolValue` is JSON `true` and `false`.
+type BoolValue struct {
+ // The bool value.
+ Value bool `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *BoolValue) Reset() { *m = BoolValue{} }
+func (m *BoolValue) String() string { return proto.CompactTextString(m) }
+func (*BoolValue) ProtoMessage() {}
+func (*BoolValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" }
+
+func (m *BoolValue) GetValue() bool {
+ if m != nil {
+ return m.Value
+ }
+ return false
+}
+
+// Wrapper message for `string`.
+//
+// The JSON representation for `StringValue` is JSON string.
+type StringValue struct {
+ // The string value.
+ Value string `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *StringValue) Reset() { *m = StringValue{} }
+func (m *StringValue) String() string { return proto.CompactTextString(m) }
+func (*StringValue) ProtoMessage() {}
+func (*StringValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+func (*StringValue) XXX_WellKnownType() string { return "StringValue" }
+
+func (m *StringValue) GetValue() string {
+ if m != nil {
+ return m.Value
+ }
+ return ""
+}
+
+// Wrapper message for `bytes`.
+//
+// The JSON representation for `BytesValue` is JSON string.
+type BytesValue struct {
+ // The bytes value.
+ Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *BytesValue) Reset() { *m = BytesValue{} }
+func (m *BytesValue) String() string { return proto.CompactTextString(m) }
+func (*BytesValue) ProtoMessage() {}
+func (*BytesValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" }
+
+func (m *BytesValue) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue")
+ proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue")
+ proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value")
+ proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value")
+ proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value")
+ proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value")
+ proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue")
+ proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue")
+ proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue")
+}
+
+func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 259 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
+ 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c,
+ 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca,
+ 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c,
+ 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5,
+ 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13,
+ 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8,
+ 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca,
+ 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a,
+ 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d,
+ 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24,
+ 0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f,
+ 0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c,
+ 0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e,
+ 0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b,
+ 0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe,
+ 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
new file mode 100644
index 0000000..0194763
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
@@ -0,0 +1,118 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Wrappers for primitive (non-message) types. These types are useful
+// for embedding primitives in the `google.protobuf.Any` type and for places
+// where we need to distinguish between the absence of a primitive
+// typed field and its default value.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/wrappers";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "WrappersProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// Wrapper message for `double`.
+//
+// The JSON representation for `DoubleValue` is JSON number.
+message DoubleValue {
+ // The double value.
+ double value = 1;
+}
+
+// Wrapper message for `float`.
+//
+// The JSON representation for `FloatValue` is JSON number.
+message FloatValue {
+ // The float value.
+ float value = 1;
+}
+
+// Wrapper message for `int64`.
+//
+// The JSON representation for `Int64Value` is JSON string.
+message Int64Value {
+ // The int64 value.
+ int64 value = 1;
+}
+
+// Wrapper message for `uint64`.
+//
+// The JSON representation for `UInt64Value` is JSON string.
+message UInt64Value {
+ // The uint64 value.
+ uint64 value = 1;
+}
+
+// Wrapper message for `int32`.
+//
+// The JSON representation for `Int32Value` is JSON number.
+message Int32Value {
+ // The int32 value.
+ int32 value = 1;
+}
+
+// Wrapper message for `uint32`.
+//
+// The JSON representation for `UInt32Value` is JSON number.
+message UInt32Value {
+ // The uint32 value.
+ uint32 value = 1;
+}
+
+// Wrapper message for `bool`.
+//
+// The JSON representation for `BoolValue` is JSON `true` and `false`.
+message BoolValue {
+ // The bool value.
+ bool value = 1;
+}
+
+// Wrapper message for `string`.
+//
+// The JSON representation for `StringValue` is JSON string.
+message StringValue {
+ // The string value.
+ string value = 1;
+}
+
+// Wrapper message for `bytes`.
+//
+// The JSON representation for `BytesValue` is JSON string.
+message BytesValue {
+ // The bytes value.
+ bytes value = 1;
+}
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 3e705a0..4c8ee56 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -652,7 +652,7 @@ func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
if err == nil {
return
}
- if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) {
+ if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) || err == errPrefaceTimeout {
// Boring, expected errors.
sc.vlogf(format, args...)
} else {
@@ -897,8 +897,11 @@ func (sc *serverConn) sendServeMsg(msg interface{}) {
}
}
-// readPreface reads the ClientPreface greeting from the peer
-// or returns an error on timeout or an invalid greeting.
+var errPrefaceTimeout = errors.New("timeout waiting for client preface")
+
+// readPreface reads the ClientPreface greeting from the peer or
+// returns errPrefaceTimeout on timeout, or an error if the greeting
+// is invalid.
func (sc *serverConn) readPreface() error {
errc := make(chan error, 1)
go func() {
@@ -916,7 +919,7 @@ func (sc *serverConn) readPreface() error {
defer timer.Stop()
select {
case <-timer.C:
- return errors.New("timeout waiting for client preface")
+ return errPrefaceTimeout
case err := <-errc:
if err == nil {
if VerboseLogs {
diff --git a/vendor/golang.org/x/oauth2/.travis.yml b/vendor/golang.org/x/oauth2/.travis.yml
new file mode 100644
index 0000000..fa139db
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+
+go:
+ - tip
+
+install:
+ - export GOPATH="$HOME/gopath"
+ - mkdir -p "$GOPATH/src/golang.org/x"
+ - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2"
+ - go get -v -t -d golang.org/x/oauth2/...
+
+script:
+ - go test -v golang.org/x/oauth2/...
diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS
new file mode 100644
index 0000000..15167cd
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md
new file mode 100644
index 0000000..46aa2b1
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/CONTRIBUTING.md
@@ -0,0 +1,31 @@
+# Contributing to Go
+
+Go is an open source project.
+
+It is the work of hundreds of contributors. We appreciate your help!
+
+
+## Filing issues
+
+When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions:
+
+1. What version of Go are you using (`go version`)?
+2. What operating system and processor architecture are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
+
+General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
+The gophers there will answer or ask you to file an issue if you've tripped over a bug.
+
+## Contributing code
+
+Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
+before sending patches.
+
+**We do not accept GitHub pull requests**
+(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
+
+Unless otherwise noted, the Go source files are distributed under
+the BSD-style license found in the LICENSE file.
+
diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS
new file mode 100644
index 0000000..1c4577e
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md
new file mode 100644
index 0000000..eb8dcee
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/README.md
@@ -0,0 +1,77 @@
+# OAuth2 for Go
+
+[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2)
+[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2)
+
+oauth2 package contains a client implementation for OAuth 2.0 spec.
+
+## Installation
+
+~~~~
+go get golang.org/x/oauth2
+~~~~
+
+Or you can manually git clone the repository to
+`$(go env GOPATH)/src/golang.org/x/oauth2`.
+
+See godoc for further documentation and examples.
+
+* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2)
+* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google)
+
+
+## App Engine
+
+In change 96e89be (March 2015), we removed the `oauth2.Context2` type in favor
+of the [`context.Context`](https://golang.org/x/net/context#Context) type from
+the `golang.org/x/net/context` package
+
+This means it's no longer possible to use the "Classic App Engine"
+`appengine.Context` type with the `oauth2` package. (You're using
+Classic App Engine if you import the package `"appengine"`.)
+
+To work around this, you may use the new `"google.golang.org/appengine"`
+package. This package has almost the same API as the `"appengine"` package,
+but it can be fetched with `go get` and used on "Managed VMs" and well as
+Classic App Engine.
+
+See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app)
+for information on updating your app.
+
+If you don't want to update your entire app to use the new App Engine packages,
+you may use both sets of packages in parallel, using only the new packages
+with the `oauth2` package.
+
+```go
+import (
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/google"
+ newappengine "google.golang.org/appengine"
+ newurlfetch "google.golang.org/appengine/urlfetch"
+
+ "appengine"
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+ var c appengine.Context = appengine.NewContext(r)
+ c.Infof("Logging a message with the old package")
+
+ var ctx context.Context = newappengine.NewContext(r)
+ client := &http.Client{
+ Transport: &oauth2.Transport{
+ Source: google.AppEngineTokenSource(ctx, "scope"),
+ Base: &newurlfetch.Transport{Context: ctx},
+ },
+ }
+ client.Get("...")
+}
+```
+
+## Report Issues / Send Patches
+
+This repository uses Gerrit for code changes. To learn how to submit changes to
+this repository, see https://golang.org/doc/contribute.html.
+
+The main issue tracker for the oauth2 repository is located at
+https://github.com/golang/oauth2/issues.
diff --git a/vendor/golang.org/x/oauth2/amazon/amazon.go b/vendor/golang.org/x/oauth2/amazon/amazon.go
new file mode 100644
index 0000000..d21da11
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/amazon/amazon.go
@@ -0,0 +1,16 @@
+// Copyright 2017 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package amazon provides constants for using OAuth2 to access Amazon.
+package amazon
+
+import (
+ "golang.org/x/oauth2"
+)
+
+// Endpoint is Amazon's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://www.amazon.com/ap/oa",
+ TokenURL: "https://api.amazon.com/auth/o2/token",
+}
diff --git a/vendor/golang.org/x/oauth2/bitbucket/bitbucket.go b/vendor/golang.org/x/oauth2/bitbucket/bitbucket.go
new file mode 100644
index 0000000..44af1f1
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/bitbucket/bitbucket.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package bitbucket provides constants for using OAuth2 to access Bitbucket.
+package bitbucket
+
+import (
+ "golang.org/x/oauth2"
+)
+
+// Endpoint is Bitbucket's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://bitbucket.org/site/oauth2/authorize",
+ TokenURL: "https://bitbucket.org/site/oauth2/access_token",
+}
diff --git a/vendor/golang.org/x/oauth2/client_appengine.go b/vendor/golang.org/x/oauth2/client_appengine.go
new file mode 100644
index 0000000..8962c49
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/client_appengine.go
@@ -0,0 +1,25 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+// App Engine hooks.
+
+package oauth2
+
+import (
+ "net/http"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2/internal"
+ "google.golang.org/appengine/urlfetch"
+)
+
+func init() {
+ internal.RegisterContextClientFunc(contextClientAppEngine)
+}
+
+func contextClientAppEngine(ctx context.Context) (*http.Client, error) {
+ return urlfetch.Client(ctx), nil
+}
diff --git a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go
new file mode 100644
index 0000000..53a96b6
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go
@@ -0,0 +1,104 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package clientcredentials implements the OAuth2.0 "client credentials" token flow,
+// also known as the "two-legged OAuth 2.0".
+//
+// This should be used when the client is acting on its own behalf or when the client
+// is the resource owner. It may also be used when requesting access to protected
+// resources based on an authorization previously arranged with the authorization
+// server.
+//
+// See https://tools.ietf.org/html/rfc6749#section-4.4
+package clientcredentials // import "golang.org/x/oauth2/clientcredentials"
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/internal"
+)
+
+// Config describes a 2-legged OAuth2 flow, with both the
+// client application information and the server's endpoint URLs.
+type Config struct {
+ // ClientID is the application's ID.
+ ClientID string
+
+ // ClientSecret is the application's secret.
+ ClientSecret string
+
+ // TokenURL is the resource server's token endpoint
+ // URL. This is a constant specific to each server.
+ TokenURL string
+
+ // Scope specifies optional requested permissions.
+ Scopes []string
+
+ // EndpointParams specifies additional parameters for requests to the token endpoint.
+ EndpointParams url.Values
+}
+
+// Token uses client credentials to retrieve a token.
+// The HTTP client to use is derived from the context.
+// If nil, http.DefaultClient is used.
+func (c *Config) Token(ctx context.Context) (*oauth2.Token, error) {
+ return c.TokenSource(ctx).Token()
+}
+
+// Client returns an HTTP client using the provided token.
+// The token will auto-refresh as necessary. The underlying
+// HTTP transport will be obtained using the provided context.
+// The returned client and its Transport should not be modified.
+func (c *Config) Client(ctx context.Context) *http.Client {
+ return oauth2.NewClient(ctx, c.TokenSource(ctx))
+}
+
+// TokenSource returns a TokenSource that returns t until t expires,
+// automatically refreshing it as necessary using the provided context and the
+// client ID and client secret.
+//
+// Most users will use Config.Client instead.
+func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource {
+ source := &tokenSource{
+ ctx: ctx,
+ conf: c,
+ }
+ return oauth2.ReuseTokenSource(nil, source)
+}
+
+type tokenSource struct {
+ ctx context.Context
+ conf *Config
+}
+
+// Token refreshes the token by using a new client credentials request.
+// tokens received this way do not include a refresh token
+func (c *tokenSource) Token() (*oauth2.Token, error) {
+ v := url.Values{
+ "grant_type": {"client_credentials"},
+ "scope": internal.CondVal(strings.Join(c.conf.Scopes, " ")),
+ }
+ for k, p := range c.conf.EndpointParams {
+ if _, ok := v[k]; ok {
+ return nil, fmt.Errorf("oauth2: cannot overwrite parameter %q", k)
+ }
+ v[k] = p
+ }
+ tk, err := internal.RetrieveToken(c.ctx, c.conf.ClientID, c.conf.ClientSecret, c.conf.TokenURL, v)
+ if err != nil {
+ return nil, err
+ }
+ t := &oauth2.Token{
+ AccessToken: tk.AccessToken,
+ TokenType: tk.TokenType,
+ RefreshToken: tk.RefreshToken,
+ Expiry: tk.Expiry,
+ }
+ return t.WithExtra(tk.Raw), nil
+}
diff --git a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go
new file mode 100644
index 0000000..108520c
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go
@@ -0,0 +1,97 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package clientcredentials
+
+import (
+ "context"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "testing"
+)
+
+func newConf(serverURL string) *Config {
+ return &Config{
+ ClientID: "CLIENT_ID",
+ ClientSecret: "CLIENT_SECRET",
+ Scopes: []string{"scope1", "scope2"},
+ TokenURL: serverURL + "/token",
+ EndpointParams: url.Values{"audience": {"audience1"}},
+ }
+}
+
+type mockTransport struct {
+ rt func(req *http.Request) (resp *http.Response, err error)
+}
+
+func (t *mockTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
+ return t.rt(req)
+}
+
+func TestTokenRequest(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.String() != "/token" {
+ t.Errorf("authenticate client request URL = %q; want %q", r.URL, "/token")
+ }
+ headerAuth := r.Header.Get("Authorization")
+ if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" {
+ t.Errorf("Unexpected authorization header, %v is found.", headerAuth)
+ }
+ if got, want := r.Header.Get("Content-Type"), "application/x-www-form-urlencoded"; got != want {
+ t.Errorf("Content-Type header = %q; want %q", got, want)
+ }
+ body, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ r.Body.Close()
+ }
+ if err != nil {
+ t.Errorf("failed reading request body: %s.", err)
+ }
+ if string(body) != "audience=audience1&grant_type=client_credentials&scope=scope1+scope2" {
+ t.Errorf("payload = %q; want %q", string(body), "grant_type=client_credentials&scope=scope1+scope2")
+ }
+ w.Header().Set("Content-Type", "application/x-www-form-urlencoded")
+ w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&token_type=bearer"))
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ tok, err := conf.Token(context.Background())
+ if err != nil {
+ t.Error(err)
+ }
+ if !tok.Valid() {
+ t.Fatalf("token invalid. got: %#v", tok)
+ }
+ if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" {
+ t.Errorf("Access token = %q; want %q", tok.AccessToken, "90d64460d14870c08c81352a05dedd3465940a7c")
+ }
+ if tok.TokenType != "bearer" {
+ t.Errorf("token type = %q; want %q", tok.TokenType, "bearer")
+ }
+}
+
+func TestTokenRefreshRequest(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.String() == "/somethingelse" {
+ return
+ }
+ if r.URL.String() != "/token" {
+ t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL)
+ }
+ headerContentType := r.Header.Get("Content-Type")
+ if headerContentType != "application/x-www-form-urlencoded" {
+ t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
+ }
+ body, _ := ioutil.ReadAll(r.Body)
+ if string(body) != "audience=audience1&grant_type=client_credentials&scope=scope1+scope2" {
+ t.Errorf("Unexpected refresh token payload, %v is found.", string(body))
+ }
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ c := conf.Client(context.Background())
+ c.Get(ts.URL + "/somethingelse")
+}
diff --git a/vendor/golang.org/x/oauth2/example_test.go b/vendor/golang.org/x/oauth2/example_test.go
new file mode 100644
index 0000000..fc2f793
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/example_test.go
@@ -0,0 +1,89 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2_test
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "net/http"
+ "time"
+
+ "golang.org/x/oauth2"
+)
+
+func ExampleConfig() {
+ ctx := context.Background()
+ conf := &oauth2.Config{
+ ClientID: "YOUR_CLIENT_ID",
+ ClientSecret: "YOUR_CLIENT_SECRET",
+ Scopes: []string{"SCOPE1", "SCOPE2"},
+ Endpoint: oauth2.Endpoint{
+ AuthURL: "https://provider.com/o/oauth2/auth",
+ TokenURL: "https://provider.com/o/oauth2/token",
+ },
+ }
+
+ // Redirect user to consent page to ask for permission
+ // for the scopes specified above.
+ url := conf.AuthCodeURL("state", oauth2.AccessTypeOffline)
+ fmt.Printf("Visit the URL for the auth dialog: %v", url)
+
+ // Use the authorization code that is pushed to the redirect
+ // URL. Exchange will do the handshake to retrieve the
+ // initial access token. The HTTP Client returned by
+ // conf.Client will refresh the token as necessary.
+ var code string
+ if _, err := fmt.Scan(&code); err != nil {
+ log.Fatal(err)
+ }
+ tok, err := conf.Exchange(ctx, code)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ client := conf.Client(ctx, tok)
+ client.Get("...")
+}
+
+func ExampleConfig_customHTTP() {
+ ctx := context.Background()
+
+ conf := &oauth2.Config{
+ ClientID: "YOUR_CLIENT_ID",
+ ClientSecret: "YOUR_CLIENT_SECRET",
+ Scopes: []string{"SCOPE1", "SCOPE2"},
+ Endpoint: oauth2.Endpoint{
+ TokenURL: "https://provider.com/o/oauth2/token",
+ AuthURL: "https://provider.com/o/oauth2/auth",
+ },
+ }
+
+ // Redirect user to consent page to ask for permission
+ // for the scopes specified above.
+ url := conf.AuthCodeURL("state", oauth2.AccessTypeOffline)
+ fmt.Printf("Visit the URL for the auth dialog: %v", url)
+
+ // Use the authorization code that is pushed to the redirect
+ // URL. Exchange will do the handshake to retrieve the
+ // initial access token. The HTTP Client returned by
+ // conf.Client will refresh the token as necessary.
+ var code string
+ if _, err := fmt.Scan(&code); err != nil {
+ log.Fatal(err)
+ }
+
+ // Use the custom HTTP client when requesting a token.
+ httpClient := &http.Client{Timeout: 2 * time.Second}
+ ctx = context.WithValue(ctx, oauth2.HTTPClient, httpClient)
+
+ tok, err := conf.Exchange(ctx, code)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ client := conf.Client(ctx, tok)
+ _ = client
+}
diff --git a/vendor/golang.org/x/oauth2/facebook/facebook.go b/vendor/golang.org/x/oauth2/facebook/facebook.go
new file mode 100644
index 0000000..14c801a
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/facebook/facebook.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package facebook provides constants for using OAuth2 to access Facebook.
+package facebook // import "golang.org/x/oauth2/facebook"
+
+import (
+ "golang.org/x/oauth2"
+)
+
+// Endpoint is Facebook's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://www.facebook.com/dialog/oauth",
+ TokenURL: "https://graph.facebook.com/oauth/access_token",
+}
diff --git a/vendor/golang.org/x/oauth2/fitbit/fitbit.go b/vendor/golang.org/x/oauth2/fitbit/fitbit.go
new file mode 100644
index 0000000..b31b82a
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/fitbit/fitbit.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fitbit provides constants for using OAuth2 to access the Fitbit API.
+package fitbit // import "golang.org/x/oauth2/fitbit"
+
+import (
+ "golang.org/x/oauth2"
+)
+
+// Endpoint is the Fitbit API's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://www.fitbit.com/oauth2/authorize",
+ TokenURL: "https://api.fitbit.com/oauth2/token",
+}
diff --git a/vendor/golang.org/x/oauth2/foursquare/foursquare.go b/vendor/golang.org/x/oauth2/foursquare/foursquare.go
new file mode 100644
index 0000000..d2fa099
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/foursquare/foursquare.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package foursquare provides constants for using OAuth2 to access Foursquare.
+package foursquare // import "golang.org/x/oauth2/foursquare"
+
+import (
+ "golang.org/x/oauth2"
+)
+
+// Endpoint is Foursquare's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://foursquare.com/oauth2/authorize",
+ TokenURL: "https://foursquare.com/oauth2/access_token",
+}
diff --git a/vendor/golang.org/x/oauth2/github/github.go b/vendor/golang.org/x/oauth2/github/github.go
new file mode 100644
index 0000000..f297801
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/github/github.go
@@ -0,0 +1,16 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package github provides constants for using OAuth2 to access Github.
+package github // import "golang.org/x/oauth2/github"
+
+import (
+ "golang.org/x/oauth2"
+)
+
+// Endpoint is Github's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://github.com/login/oauth/authorize",
+ TokenURL: "https://github.com/login/oauth/access_token",
+}
diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go
new file mode 100644
index 0000000..50d918b
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/appengine.go
@@ -0,0 +1,89 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+)
+
+// appengineFlex is set at init time by appengineflex_hook.go. If true, we are on App Engine Flex.
+var appengineFlex bool
+
+// Set at init time by appengine_hook.go. If nil, we're not on App Engine.
+var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error)
+
+// Set at init time by appengine_hook.go. If nil, we're not on App Engine.
+var appengineAppIDFunc func(c context.Context) string
+
+// AppEngineTokenSource returns a token source that fetches tokens
+// issued to the current App Engine application's service account.
+// If you are implementing a 3-legged OAuth 2.0 flow on App Engine
+// that involves user accounts, see oauth2.Config instead.
+//
+// The provided context must have come from appengine.NewContext.
+func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource {
+ if appengineTokenFunc == nil {
+ panic("google: AppEngineTokenSource can only be used on App Engine.")
+ }
+ scopes := append([]string{}, scope...)
+ sort.Strings(scopes)
+ return &appEngineTokenSource{
+ ctx: ctx,
+ scopes: scopes,
+ key: strings.Join(scopes, " "),
+ }
+}
+
+// aeTokens helps the fetched tokens to be reused until their expiration.
+var (
+ aeTokensMu sync.Mutex
+ aeTokens = make(map[string]*tokenLock) // key is space-separated scopes
+)
+
+type tokenLock struct {
+ mu sync.Mutex // guards t; held while fetching or updating t
+ t *oauth2.Token
+}
+
+type appEngineTokenSource struct {
+ ctx context.Context
+ scopes []string
+ key string // to aeTokens map; space-separated scopes
+}
+
+func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) {
+ if appengineTokenFunc == nil {
+ panic("google: AppEngineTokenSource can only be used on App Engine.")
+ }
+
+ aeTokensMu.Lock()
+ tok, ok := aeTokens[ts.key]
+ if !ok {
+ tok = &tokenLock{}
+ aeTokens[ts.key] = tok
+ }
+ aeTokensMu.Unlock()
+
+ tok.mu.Lock()
+ defer tok.mu.Unlock()
+ if tok.t.Valid() {
+ return tok.t, nil
+ }
+ access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...)
+ if err != nil {
+ return nil, err
+ }
+ tok.t = &oauth2.Token{
+ AccessToken: access,
+ Expiry: exp,
+ }
+ return tok.t, nil
+}
diff --git a/vendor/golang.org/x/oauth2/google/appengine_hook.go b/vendor/golang.org/x/oauth2/google/appengine_hook.go
new file mode 100644
index 0000000..56669ea
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/appengine_hook.go
@@ -0,0 +1,14 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine appenginevm
+
+package google
+
+import "google.golang.org/appengine"
+
+func init() {
+ appengineTokenFunc = appengine.AccessToken
+ appengineAppIDFunc = appengine.AppID
+}
diff --git a/vendor/golang.org/x/oauth2/google/appengineflex_hook.go b/vendor/golang.org/x/oauth2/google/appengineflex_hook.go
new file mode 100644
index 0000000..5d0231a
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/appengineflex_hook.go
@@ -0,0 +1,11 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appenginevm
+
+package google
+
+func init() {
+ appengineFlex = true // Flex doesn't support appengine.AccessToken; depend on metadata server.
+}
diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go
new file mode 100644
index 0000000..b4b6274
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/default.go
@@ -0,0 +1,137 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "path/filepath"
+ "runtime"
+
+ "cloud.google.com/go/compute/metadata"
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+)
+
+// DefaultCredentials holds "Application Default Credentials".
+// For more details, see:
+// https://developers.google.com/accounts/docs/application-default-credentials
+type DefaultCredentials struct {
+ ProjectID string // may be empty
+ TokenSource oauth2.TokenSource
+
+ // JSON contains the raw bytes from a JSON credentials file.
+ // This field may be nil if authentication is provided by the
+ // environment and not with a credentials file, e.g. when code is
+ // running on Google Cloud Platform.
+ JSON []byte
+}
+
+// DefaultClient returns an HTTP Client that uses the
+// DefaultTokenSource to obtain authentication credentials.
+func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) {
+ ts, err := DefaultTokenSource(ctx, scope...)
+ if err != nil {
+ return nil, err
+ }
+ return oauth2.NewClient(ctx, ts), nil
+}
+
+// DefaultTokenSource returns the token source for
+// "Application Default Credentials".
+// It is a shortcut for FindDefaultCredentials(ctx, scope).TokenSource.
+func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) {
+ creds, err := FindDefaultCredentials(ctx, scope...)
+ if err != nil {
+ return nil, err
+ }
+ return creds.TokenSource, nil
+}
+
+// FindDefaultCredentials searches for "Application Default Credentials".
+//
+// It looks for credentials in the following places,
+// preferring the first location found:
+//
+// 1. A JSON file whose path is specified by the
+// GOOGLE_APPLICATION_CREDENTIALS environment variable.
+// 2. A JSON file in a location known to the gcloud command-line tool.
+// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json.
+// On other systems, $HOME/.config/gcloud/application_default_credentials.json.
+// 3. On Google App Engine it uses the appengine.AccessToken function.
+// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches
+// credentials from the metadata server.
+// (In this final case any provided scopes are ignored.)
+func FindDefaultCredentials(ctx context.Context, scope ...string) (*DefaultCredentials, error) {
+ // First, try the environment variable.
+ const envVar = "GOOGLE_APPLICATION_CREDENTIALS"
+ if filename := os.Getenv(envVar); filename != "" {
+ creds, err := readCredentialsFile(ctx, filename, scope)
+ if err != nil {
+ return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err)
+ }
+ return creds, nil
+ }
+
+ // Second, try a well-known file.
+ filename := wellKnownFile()
+ if creds, err := readCredentialsFile(ctx, filename, scope); err == nil {
+ return creds, nil
+ } else if !os.IsNotExist(err) {
+ return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err)
+ }
+
+ // Third, if we're on Google App Engine use those credentials.
+ if appengineTokenFunc != nil && !appengineFlex {
+ return &DefaultCredentials{
+ ProjectID: appengineAppIDFunc(ctx),
+ TokenSource: AppEngineTokenSource(ctx, scope...),
+ }, nil
+ }
+
+ // Fourth, if we're on Google Compute Engine use the metadata server.
+ if metadata.OnGCE() {
+ id, _ := metadata.ProjectID()
+ return &DefaultCredentials{
+ ProjectID: id,
+ TokenSource: ComputeTokenSource(""),
+ }, nil
+ }
+
+ // None are found; return helpful error.
+ const url = "https://developers.google.com/accounts/docs/application-default-credentials"
+ return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url)
+}
+
+func wellKnownFile() string {
+ const f = "application_default_credentials.json"
+ if runtime.GOOS == "windows" {
+ return filepath.Join(os.Getenv("APPDATA"), "gcloud", f)
+ }
+ return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f)
+}
+
+func readCredentialsFile(ctx context.Context, filename string, scopes []string) (*DefaultCredentials, error) {
+ b, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ var f credentialsFile
+ if err := json.Unmarshal(b, &f); err != nil {
+ return nil, err
+ }
+ ts, err := f.tokenSource(ctx, append([]string(nil), scopes...))
+ if err != nil {
+ return nil, err
+ }
+ return &DefaultCredentials{
+ ProjectID: f.ProjectID,
+ TokenSource: ts,
+ JSON: b,
+ }, nil
+}
diff --git a/vendor/golang.org/x/oauth2/google/example_test.go b/vendor/golang.org/x/oauth2/google/example_test.go
new file mode 100644
index 0000000..92bc3b4
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/example_test.go
@@ -0,0 +1,150 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appenginevm appengine
+
+package google_test
+
+import (
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net/http"
+
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/google"
+ "golang.org/x/oauth2/jwt"
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/urlfetch"
+)
+
+func ExampleDefaultClient() {
+ client, err := google.DefaultClient(oauth2.NoContext,
+ "https://www.googleapis.com/auth/devstorage.full_control")
+ if err != nil {
+ log.Fatal(err)
+ }
+ client.Get("...")
+}
+
+func Example_webServer() {
+ // Your credentials should be obtained from the Google
+ // Developer Console (https://console.developers.google.com).
+ conf := &oauth2.Config{
+ ClientID: "YOUR_CLIENT_ID",
+ ClientSecret: "YOUR_CLIENT_SECRET",
+ RedirectURL: "YOUR_REDIRECT_URL",
+ Scopes: []string{
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/blogger",
+ },
+ Endpoint: google.Endpoint,
+ }
+ // Redirect user to Google's consent page to ask for permission
+ // for the scopes specified above.
+ url := conf.AuthCodeURL("state")
+ fmt.Printf("Visit the URL for the auth dialog: %v", url)
+
+ // Handle the exchange code to initiate a transport.
+ tok, err := conf.Exchange(oauth2.NoContext, "authorization-code")
+ if err != nil {
+ log.Fatal(err)
+ }
+ client := conf.Client(oauth2.NoContext, tok)
+ client.Get("...")
+}
+
+func ExampleJWTConfigFromJSON() {
+ // Your credentials should be obtained from the Google
+ // Developer Console (https://console.developers.google.com).
+ // Navigate to your project, then see the "Credentials" page
+ // under "APIs & Auth".
+ // To create a service account client, click "Create new Client ID",
+ // select "Service Account", and click "Create Client ID". A JSON
+ // key file will then be downloaded to your computer.
+ data, err := ioutil.ReadFile("/path/to/your-project-key.json")
+ if err != nil {
+ log.Fatal(err)
+ }
+ conf, err := google.JWTConfigFromJSON(data, "https://www.googleapis.com/auth/bigquery")
+ if err != nil {
+ log.Fatal(err)
+ }
+ // Initiate an http.Client. The following GET request will be
+ // authorized and authenticated on the behalf of
+ // your service account.
+ client := conf.Client(oauth2.NoContext)
+ client.Get("...")
+}
+
+func ExampleSDKConfig() {
+ // The credentials will be obtained from the first account that
+ // has been authorized with `gcloud auth login`.
+ conf, err := google.NewSDKConfig("")
+ if err != nil {
+ log.Fatal(err)
+ }
+ // Initiate an http.Client. The following GET request will be
+ // authorized and authenticated on the behalf of the SDK user.
+ client := conf.Client(oauth2.NoContext)
+ client.Get("...")
+}
+
+func Example_serviceAccount() {
+ // Your credentials should be obtained from the Google
+ // Developer Console (https://console.developers.google.com).
+ conf := &jwt.Config{
+ Email: "xxx@developer.gserviceaccount.com",
+ // The contents of your RSA private key or your PEM file
+ // that contains a private key.
+ // If you have a p12 file instead, you
+ // can use `openssl` to export the private key into a pem file.
+ //
+ // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes
+ //
+ // The field only supports PEM containers with no passphrase.
+ // The openssl command will convert p12 keys to passphrase-less PEM containers.
+ PrivateKey: []byte("-----BEGIN RSA PRIVATE KEY-----..."),
+ Scopes: []string{
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/blogger",
+ },
+ TokenURL: google.JWTTokenURL,
+ // If you would like to impersonate a user, you can
+ // create a transport with a subject. The following GET
+ // request will be made on the behalf of user@example.com.
+ // Optional.
+ Subject: "user@example.com",
+ }
+ // Initiate an http.Client, the following GET request will be
+ // authorized and authenticated on the behalf of user@example.com.
+ client := conf.Client(oauth2.NoContext)
+ client.Get("...")
+}
+
+func ExampleAppEngineTokenSource() {
+ var req *http.Request // from the ServeHTTP handler
+ ctx := appengine.NewContext(req)
+ client := &http.Client{
+ Transport: &oauth2.Transport{
+ Source: google.AppEngineTokenSource(ctx, "https://www.googleapis.com/auth/bigquery"),
+ Base: &urlfetch.Transport{
+ Context: ctx,
+ },
+ },
+ }
+ client.Get("...")
+}
+
+func ExampleComputeTokenSource() {
+ client := &http.Client{
+ Transport: &oauth2.Transport{
+ // Fetch from Google Compute Engine's metadata server to retrieve
+ // an access token for the provided account.
+ // If no account is specified, "default" is used.
+ Source: google.ComputeTokenSource(""),
+ },
+ }
+ client.Get("...")
+}
diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go
new file mode 100644
index 0000000..66a8b0e
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/google.go
@@ -0,0 +1,202 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package google provides support for making OAuth2 authorized and
+// authenticated HTTP requests to Google APIs.
+// It supports the Web server flow, client-side credentials, service accounts,
+// Google Compute Engine service accounts, and Google App Engine service
+// accounts.
+//
+// For more information, please read
+// https://developers.google.com/accounts/docs/OAuth2
+// and
+// https://developers.google.com/accounts/docs/application-default-credentials.
+package google // import "golang.org/x/oauth2/google"
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "cloud.google.com/go/compute/metadata"
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/jwt"
+)
+
+// Endpoint is Google's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://accounts.google.com/o/oauth2/auth",
+ TokenURL: "https://accounts.google.com/o/oauth2/token",
+}
+
+// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow.
+const JWTTokenURL = "https://accounts.google.com/o/oauth2/token"
+
+// ConfigFromJSON uses a Google Developers Console client_credentials.json
+// file to construct a config.
+// client_credentials.json can be downloaded from
+// https://console.developers.google.com, under "Credentials". Download the Web
+// application credentials in the JSON format and provide the contents of the
+// file as jsonKey.
+func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) {
+ type cred struct {
+ ClientID string `json:"client_id"`
+ ClientSecret string `json:"client_secret"`
+ RedirectURIs []string `json:"redirect_uris"`
+ AuthURI string `json:"auth_uri"`
+ TokenURI string `json:"token_uri"`
+ }
+ var j struct {
+ Web *cred `json:"web"`
+ Installed *cred `json:"installed"`
+ }
+ if err := json.Unmarshal(jsonKey, &j); err != nil {
+ return nil, err
+ }
+ var c *cred
+ switch {
+ case j.Web != nil:
+ c = j.Web
+ case j.Installed != nil:
+ c = j.Installed
+ default:
+ return nil, fmt.Errorf("oauth2/google: no credentials found")
+ }
+ if len(c.RedirectURIs) < 1 {
+ return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json")
+ }
+ return &oauth2.Config{
+ ClientID: c.ClientID,
+ ClientSecret: c.ClientSecret,
+ RedirectURL: c.RedirectURIs[0],
+ Scopes: scope,
+ Endpoint: oauth2.Endpoint{
+ AuthURL: c.AuthURI,
+ TokenURL: c.TokenURI,
+ },
+ }, nil
+}
+
+// JWTConfigFromJSON uses a Google Developers service account JSON key file to read
+// the credentials that authorize and authenticate the requests.
+// Create a service account on "Credentials" for your project at
+// https://console.developers.google.com to download a JSON key file.
+func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) {
+ var f credentialsFile
+ if err := json.Unmarshal(jsonKey, &f); err != nil {
+ return nil, err
+ }
+ if f.Type != serviceAccountKey {
+ return nil, fmt.Errorf("google: read JWT from JSON credentials: 'type' field is %q (expected %q)", f.Type, serviceAccountKey)
+ }
+ scope = append([]string(nil), scope...) // copy
+ return f.jwtConfig(scope), nil
+}
+
+// JSON key file types.
+const (
+ serviceAccountKey = "service_account"
+ userCredentialsKey = "authorized_user"
+)
+
+// credentialsFile is the unmarshalled representation of a credentials file.
+type credentialsFile struct {
+ Type string `json:"type"` // serviceAccountKey or userCredentialsKey
+
+ // Service Account fields
+ ClientEmail string `json:"client_email"`
+ PrivateKeyID string `json:"private_key_id"`
+ PrivateKey string `json:"private_key"`
+ TokenURL string `json:"token_uri"`
+ ProjectID string `json:"project_id"`
+
+ // User Credential fields
+ // (These typically come from gcloud auth.)
+ ClientSecret string `json:"client_secret"`
+ ClientID string `json:"client_id"`
+ RefreshToken string `json:"refresh_token"`
+}
+
+func (f *credentialsFile) jwtConfig(scopes []string) *jwt.Config {
+ cfg := &jwt.Config{
+ Email: f.ClientEmail,
+ PrivateKey: []byte(f.PrivateKey),
+ PrivateKeyID: f.PrivateKeyID,
+ Scopes: scopes,
+ TokenURL: f.TokenURL,
+ }
+ if cfg.TokenURL == "" {
+ cfg.TokenURL = JWTTokenURL
+ }
+ return cfg
+}
+
+func (f *credentialsFile) tokenSource(ctx context.Context, scopes []string) (oauth2.TokenSource, error) {
+ switch f.Type {
+ case serviceAccountKey:
+ cfg := f.jwtConfig(scopes)
+ return cfg.TokenSource(ctx), nil
+ case userCredentialsKey:
+ cfg := &oauth2.Config{
+ ClientID: f.ClientID,
+ ClientSecret: f.ClientSecret,
+ Scopes: scopes,
+ Endpoint: Endpoint,
+ }
+ tok := &oauth2.Token{RefreshToken: f.RefreshToken}
+ return cfg.TokenSource(ctx, tok), nil
+ case "":
+ return nil, errors.New("missing 'type' field in credentials")
+ default:
+ return nil, fmt.Errorf("unknown credential type: %q", f.Type)
+ }
+}
+
+// ComputeTokenSource returns a token source that fetches access tokens
+// from Google Compute Engine (GCE)'s metadata server. It's only valid to use
+// this token source if your program is running on a GCE instance.
+// If no account is specified, "default" is used.
+// Further information about retrieving access tokens from the GCE metadata
+// server can be found at https://cloud.google.com/compute/docs/authentication.
+func ComputeTokenSource(account string) oauth2.TokenSource {
+ return oauth2.ReuseTokenSource(nil, computeSource{account: account})
+}
+
+type computeSource struct {
+ account string
+}
+
+func (cs computeSource) Token() (*oauth2.Token, error) {
+ if !metadata.OnGCE() {
+ return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE")
+ }
+ acct := cs.account
+ if acct == "" {
+ acct = "default"
+ }
+ tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token")
+ if err != nil {
+ return nil, err
+ }
+ var res struct {
+ AccessToken string `json:"access_token"`
+ ExpiresInSec int `json:"expires_in"`
+ TokenType string `json:"token_type"`
+ }
+ err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err)
+ }
+ if res.ExpiresInSec == 0 || res.AccessToken == "" {
+ return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata")
+ }
+ return &oauth2.Token{
+ AccessToken: res.AccessToken,
+ TokenType: res.TokenType,
+ Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second),
+ }, nil
+}
diff --git a/vendor/golang.org/x/oauth2/google/google_test.go b/vendor/golang.org/x/oauth2/google/google_test.go
new file mode 100644
index 0000000..287c699
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/google_test.go
@@ -0,0 +1,116 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+ "strings"
+ "testing"
+)
+
+var webJSONKey = []byte(`
+{
+ "web": {
+ "auth_uri": "https://google.com/o/oauth2/auth",
+ "client_secret": "3Oknc4jS_wA2r9i",
+ "token_uri": "https://google.com/o/oauth2/token",
+ "client_email": "222-nprqovg5k43uum874cs9osjt2koe97g8@developer.gserviceaccount.com",
+ "redirect_uris": ["https://www.example.com/oauth2callback"],
+ "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/222-nprqovg5k43uum874cs9osjt2koe97g8@developer.gserviceaccount.com",
+ "client_id": "222-nprqovg5k43uum874cs9osjt2koe97g8.apps.googleusercontent.com",
+ "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
+ "javascript_origins": ["https://www.example.com"]
+ }
+}`)
+
+var installedJSONKey = []byte(`{
+ "installed": {
+ "client_id": "222-installed.apps.googleusercontent.com",
+ "redirect_uris": ["https://www.example.com/oauth2callback"]
+ }
+}`)
+
+var jwtJSONKey = []byte(`{
+ "private_key_id": "268f54e43a1af97cfc71731688434f45aca15c8b",
+ "private_key": "super secret key",
+ "client_email": "gopher@developer.gserviceaccount.com",
+ "client_id": "gopher.apps.googleusercontent.com",
+ "token_uri": "https://accounts.google.com/o/gophers/token",
+ "type": "service_account"
+}`)
+
+var jwtJSONKeyNoTokenURL = []byte(`{
+ "private_key_id": "268f54e43a1af97cfc71731688434f45aca15c8b",
+ "private_key": "super secret key",
+ "client_email": "gopher@developer.gserviceaccount.com",
+ "client_id": "gopher.apps.googleusercontent.com",
+ "type": "service_account"
+}`)
+
+func TestConfigFromJSON(t *testing.T) {
+ conf, err := ConfigFromJSON(webJSONKey, "scope1", "scope2")
+ if err != nil {
+ t.Error(err)
+ }
+ if got, want := conf.ClientID, "222-nprqovg5k43uum874cs9osjt2koe97g8.apps.googleusercontent.com"; got != want {
+ t.Errorf("ClientID = %q; want %q", got, want)
+ }
+ if got, want := conf.ClientSecret, "3Oknc4jS_wA2r9i"; got != want {
+ t.Errorf("ClientSecret = %q; want %q", got, want)
+ }
+ if got, want := conf.RedirectURL, "https://www.example.com/oauth2callback"; got != want {
+ t.Errorf("RedictURL = %q; want %q", got, want)
+ }
+ if got, want := strings.Join(conf.Scopes, ","), "scope1,scope2"; got != want {
+ t.Errorf("Scopes = %q; want %q", got, want)
+ }
+ if got, want := conf.Endpoint.AuthURL, "https://google.com/o/oauth2/auth"; got != want {
+ t.Errorf("AuthURL = %q; want %q", got, want)
+ }
+ if got, want := conf.Endpoint.TokenURL, "https://google.com/o/oauth2/token"; got != want {
+ t.Errorf("TokenURL = %q; want %q", got, want)
+ }
+}
+
+func TestConfigFromJSON_Installed(t *testing.T) {
+ conf, err := ConfigFromJSON(installedJSONKey)
+ if err != nil {
+ t.Error(err)
+ }
+ if got, want := conf.ClientID, "222-installed.apps.googleusercontent.com"; got != want {
+ t.Errorf("ClientID = %q; want %q", got, want)
+ }
+}
+
+func TestJWTConfigFromJSON(t *testing.T) {
+ conf, err := JWTConfigFromJSON(jwtJSONKey, "scope1", "scope2")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got, want := conf.Email, "gopher@developer.gserviceaccount.com"; got != want {
+ t.Errorf("Email = %q, want %q", got, want)
+ }
+ if got, want := string(conf.PrivateKey), "super secret key"; got != want {
+ t.Errorf("PrivateKey = %q, want %q", got, want)
+ }
+ if got, want := conf.PrivateKeyID, "268f54e43a1af97cfc71731688434f45aca15c8b"; got != want {
+ t.Errorf("PrivateKeyID = %q, want %q", got, want)
+ }
+ if got, want := strings.Join(conf.Scopes, ","), "scope1,scope2"; got != want {
+ t.Errorf("Scopes = %q; want %q", got, want)
+ }
+ if got, want := conf.TokenURL, "https://accounts.google.com/o/gophers/token"; got != want {
+ t.Errorf("TokenURL = %q; want %q", got, want)
+ }
+}
+
+func TestJWTConfigFromJSONNoTokenURL(t *testing.T) {
+ conf, err := JWTConfigFromJSON(jwtJSONKeyNoTokenURL, "scope1", "scope2")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got, want := conf.TokenURL, "https://accounts.google.com/o/oauth2/token"; got != want {
+ t.Errorf("TokenURL = %q; want %q", got, want)
+ }
+}
diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go
new file mode 100644
index 0000000..b0fdb3a
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/jwt.go
@@ -0,0 +1,74 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+ "crypto/rsa"
+ "fmt"
+ "time"
+
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/internal"
+ "golang.org/x/oauth2/jws"
+)
+
+// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON
+// key file to read the credentials that authorize and authenticate the
+// requests, and returns a TokenSource that does not use any OAuth2 flow but
+// instead creates a JWT and sends that as the access token.
+// The audience is typically a URL that specifies the scope of the credentials.
+//
+// Note that this is not a standard OAuth flow, but rather an
+// optimization supported by a few Google services.
+// Unless you know otherwise, you should use JWTConfigFromJSON instead.
+func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) {
+ cfg, err := JWTConfigFromJSON(jsonKey)
+ if err != nil {
+ return nil, fmt.Errorf("google: could not parse JSON key: %v", err)
+ }
+ pk, err := internal.ParseKey(cfg.PrivateKey)
+ if err != nil {
+ return nil, fmt.Errorf("google: could not parse key: %v", err)
+ }
+ ts := &jwtAccessTokenSource{
+ email: cfg.Email,
+ audience: audience,
+ pk: pk,
+ pkID: cfg.PrivateKeyID,
+ }
+ tok, err := ts.Token()
+ if err != nil {
+ return nil, err
+ }
+ return oauth2.ReuseTokenSource(tok, ts), nil
+}
+
+type jwtAccessTokenSource struct {
+ email, audience string
+ pk *rsa.PrivateKey
+ pkID string
+}
+
+func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) {
+ iat := time.Now()
+ exp := iat.Add(time.Hour)
+ cs := &jws.ClaimSet{
+ Iss: ts.email,
+ Sub: ts.email,
+ Aud: ts.audience,
+ Iat: iat.Unix(),
+ Exp: exp.Unix(),
+ }
+ hdr := &jws.Header{
+ Algorithm: "RS256",
+ Typ: "JWT",
+ KeyID: string(ts.pkID),
+ }
+ msg, err := jws.Encode(hdr, cs, ts.pk)
+ if err != nil {
+ return nil, fmt.Errorf("google: could not encode JWT: %v", err)
+ }
+ return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil
+}
diff --git a/vendor/golang.org/x/oauth2/google/jwt_test.go b/vendor/golang.org/x/oauth2/google/jwt_test.go
new file mode 100644
index 0000000..f844436
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/jwt_test.go
@@ -0,0 +1,91 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+ "bytes"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/json"
+ "encoding/pem"
+ "strings"
+ "testing"
+ "time"
+
+ "golang.org/x/oauth2/jws"
+)
+
+func TestJWTAccessTokenSourceFromJSON(t *testing.T) {
+ // Generate a key we can use in the test data.
+ privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Encode the key and substitute into our example JSON.
+ enc := pem.EncodeToMemory(&pem.Block{
+ Type: "PRIVATE KEY",
+ Bytes: x509.MarshalPKCS1PrivateKey(privateKey),
+ })
+ enc, err = json.Marshal(string(enc))
+ if err != nil {
+ t.Fatalf("json.Marshal: %v", err)
+ }
+ jsonKey := bytes.Replace(jwtJSONKey, []byte(`"super secret key"`), enc, 1)
+
+ ts, err := JWTAccessTokenSourceFromJSON(jsonKey, "audience")
+ if err != nil {
+ t.Fatalf("JWTAccessTokenSourceFromJSON: %v\nJSON: %s", err, string(jsonKey))
+ }
+
+ tok, err := ts.Token()
+ if err != nil {
+ t.Fatalf("Token: %v", err)
+ }
+
+ if got, want := tok.TokenType, "Bearer"; got != want {
+ t.Errorf("TokenType = %q, want %q", got, want)
+ }
+ if got := tok.Expiry; tok.Expiry.Before(time.Now()) {
+ t.Errorf("Expiry = %v, should not be expired", got)
+ }
+
+ err = jws.Verify(tok.AccessToken, &privateKey.PublicKey)
+ if err != nil {
+ t.Errorf("jws.Verify on AccessToken: %v", err)
+ }
+
+ claim, err := jws.Decode(tok.AccessToken)
+ if err != nil {
+ t.Fatalf("jws.Decode on AccessToken: %v", err)
+ }
+
+ if got, want := claim.Iss, "gopher@developer.gserviceaccount.com"; got != want {
+ t.Errorf("Iss = %q, want %q", got, want)
+ }
+ if got, want := claim.Sub, "gopher@developer.gserviceaccount.com"; got != want {
+ t.Errorf("Sub = %q, want %q", got, want)
+ }
+ if got, want := claim.Aud, "audience"; got != want {
+ t.Errorf("Aud = %q, want %q", got, want)
+ }
+
+ // Finally, check the header private key.
+ parts := strings.Split(tok.AccessToken, ".")
+ hdrJSON, err := base64.RawURLEncoding.DecodeString(parts[0])
+ if err != nil {
+ t.Fatalf("base64 DecodeString: %v\nString: %q", err, parts[0])
+ }
+ var hdr jws.Header
+ if err := json.Unmarshal([]byte(hdrJSON), &hdr); err != nil {
+ t.Fatalf("json.Unmarshal: %v (%q)", err, hdrJSON)
+ }
+
+ if got, want := hdr.KeyID, "268f54e43a1af97cfc71731688434f45aca15c8b"; got != want {
+ t.Errorf("Header KeyID = %q, want %q", got, want)
+ }
+}
diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go
new file mode 100644
index 0000000..bdc1808
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/sdk.go
@@ -0,0 +1,172 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "os"
+ "os/user"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/internal"
+)
+
+type sdkCredentials struct {
+ Data []struct {
+ Credential struct {
+ ClientID string `json:"client_id"`
+ ClientSecret string `json:"client_secret"`
+ AccessToken string `json:"access_token"`
+ RefreshToken string `json:"refresh_token"`
+ TokenExpiry *time.Time `json:"token_expiry"`
+ } `json:"credential"`
+ Key struct {
+ Account string `json:"account"`
+ Scope string `json:"scope"`
+ } `json:"key"`
+ }
+}
+
+// An SDKConfig provides access to tokens from an account already
+// authorized via the Google Cloud SDK.
+type SDKConfig struct {
+ conf oauth2.Config
+ initialToken *oauth2.Token
+}
+
+// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK
+// account. If account is empty, the account currently active in
+// Google Cloud SDK properties is used.
+// Google Cloud SDK credentials must be created by running `gcloud auth`
+// before using this function.
+// The Google Cloud SDK is available at https://cloud.google.com/sdk/.
+func NewSDKConfig(account string) (*SDKConfig, error) {
+ configPath, err := sdkConfigPath()
+ if err != nil {
+ return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err)
+ }
+ credentialsPath := filepath.Join(configPath, "credentials")
+ f, err := os.Open(credentialsPath)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err)
+ }
+ defer f.Close()
+
+ var c sdkCredentials
+ if err := json.NewDecoder(f).Decode(&c); err != nil {
+ return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err)
+ }
+ if len(c.Data) == 0 {
+ return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath)
+ }
+ if account == "" {
+ propertiesPath := filepath.Join(configPath, "properties")
+ f, err := os.Open(propertiesPath)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err)
+ }
+ defer f.Close()
+ ini, err := internal.ParseINI(f)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err)
+ }
+ core, ok := ini["core"]
+ if !ok {
+ return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini)
+ }
+ active, ok := core["account"]
+ if !ok {
+ return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core)
+ }
+ account = active
+ }
+
+ for _, d := range c.Data {
+ if account == "" || d.Key.Account == account {
+ if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" {
+ return nil, fmt.Errorf("oauth2/google: no token available for account %q", account)
+ }
+ var expiry time.Time
+ if d.Credential.TokenExpiry != nil {
+ expiry = *d.Credential.TokenExpiry
+ }
+ return &SDKConfig{
+ conf: oauth2.Config{
+ ClientID: d.Credential.ClientID,
+ ClientSecret: d.Credential.ClientSecret,
+ Scopes: strings.Split(d.Key.Scope, " "),
+ Endpoint: Endpoint,
+ RedirectURL: "oob",
+ },
+ initialToken: &oauth2.Token{
+ AccessToken: d.Credential.AccessToken,
+ RefreshToken: d.Credential.RefreshToken,
+ Expiry: expiry,
+ },
+ }, nil
+ }
+ }
+ return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account)
+}
+
+// Client returns an HTTP client using Google Cloud SDK credentials to
+// authorize requests. The token will auto-refresh as necessary. The
+// underlying http.RoundTripper will be obtained using the provided
+// context. The returned client and its Transport should not be
+// modified.
+func (c *SDKConfig) Client(ctx context.Context) *http.Client {
+ return &http.Client{
+ Transport: &oauth2.Transport{
+ Source: c.TokenSource(ctx),
+ },
+ }
+}
+
+// TokenSource returns an oauth2.TokenSource that retrieve tokens from
+// Google Cloud SDK credentials using the provided context.
+// It will returns the current access token stored in the credentials,
+// and refresh it when it expires, but it won't update the credentials
+// with the new access token.
+func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource {
+ return c.conf.TokenSource(ctx, c.initialToken)
+}
+
+// Scopes are the OAuth 2.0 scopes the current account is authorized for.
+func (c *SDKConfig) Scopes() []string {
+ return c.conf.Scopes
+}
+
+// sdkConfigPath tries to guess where the gcloud config is located.
+// It can be overridden during tests.
+var sdkConfigPath = func() (string, error) {
+ if runtime.GOOS == "windows" {
+ return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil
+ }
+ homeDir := guessUnixHomeDir()
+ if homeDir == "" {
+ return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty")
+ }
+ return filepath.Join(homeDir, ".config", "gcloud"), nil
+}
+
+func guessUnixHomeDir() string {
+ // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470
+ if v := os.Getenv("HOME"); v != "" {
+ return v
+ }
+ // Else, fall back to user.Current:
+ if u, err := user.Current(); err == nil {
+ return u.HomeDir
+ }
+ return ""
+}
diff --git a/vendor/golang.org/x/oauth2/google/sdk_test.go b/vendor/golang.org/x/oauth2/google/sdk_test.go
new file mode 100644
index 0000000..4489bb9
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/sdk_test.go
@@ -0,0 +1,46 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import "testing"
+
+func TestSDKConfig(t *testing.T) {
+ sdkConfigPath = func() (string, error) {
+ return "testdata/gcloud", nil
+ }
+
+ tests := []struct {
+ account string
+ accessToken string
+ err bool
+ }{
+ {"", "bar_access_token", false},
+ {"foo@example.com", "foo_access_token", false},
+ {"bar@example.com", "bar_access_token", false},
+ {"baz@serviceaccount.example.com", "", true},
+ }
+ for _, tt := range tests {
+ c, err := NewSDKConfig(tt.account)
+ if got, want := err != nil, tt.err; got != want {
+ if !tt.err {
+ t.Errorf("got %v, want nil", err)
+ } else {
+ t.Errorf("got nil, want error")
+ }
+ continue
+ }
+ if err != nil {
+ continue
+ }
+ tok := c.initialToken
+ if tok == nil {
+ t.Errorf("got nil, want %q", tt.accessToken)
+ continue
+ }
+ if tok.AccessToken != tt.accessToken {
+ t.Errorf("got %q, want %q", tok.AccessToken, tt.accessToken)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/oauth2/google/testdata/gcloud/credentials b/vendor/golang.org/x/oauth2/google/testdata/gcloud/credentials
new file mode 100644
index 0000000..ff5eefb
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/testdata/gcloud/credentials
@@ -0,0 +1,122 @@
+{
+ "data": [
+ {
+ "credential": {
+ "_class": "OAuth2Credentials",
+ "_module": "oauth2client.client",
+ "access_token": "foo_access_token",
+ "client_id": "foo_client_id",
+ "client_secret": "foo_client_secret",
+ "id_token": {
+ "at_hash": "foo_at_hash",
+ "aud": "foo_aud",
+ "azp": "foo_azp",
+ "cid": "foo_cid",
+ "email": "foo@example.com",
+ "email_verified": true,
+ "exp": 1420573614,
+ "iat": 1420569714,
+ "id": "1337",
+ "iss": "accounts.google.com",
+ "sub": "1337",
+ "token_hash": "foo_token_hash",
+ "verified_email": true
+ },
+ "invalid": false,
+ "refresh_token": "foo_refresh_token",
+ "revoke_uri": "https://accounts.google.com/o/oauth2/revoke",
+ "token_expiry": "2015-01-09T00:51:51Z",
+ "token_response": {
+ "access_token": "foo_access_token",
+ "expires_in": 3600,
+ "id_token": "foo_id_token",
+ "token_type": "Bearer"
+ },
+ "token_uri": "https://accounts.google.com/o/oauth2/token",
+ "user_agent": "Cloud SDK Command Line Tool"
+ },
+ "key": {
+ "account": "foo@example.com",
+ "clientId": "foo_client_id",
+ "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting",
+ "type": "google-cloud-sdk"
+ }
+ },
+ {
+ "credential": {
+ "_class": "OAuth2Credentials",
+ "_module": "oauth2client.client",
+ "access_token": "bar_access_token",
+ "client_id": "bar_client_id",
+ "client_secret": "bar_client_secret",
+ "id_token": {
+ "at_hash": "bar_at_hash",
+ "aud": "bar_aud",
+ "azp": "bar_azp",
+ "cid": "bar_cid",
+ "email": "bar@example.com",
+ "email_verified": true,
+ "exp": 1420573614,
+ "iat": 1420569714,
+ "id": "1337",
+ "iss": "accounts.google.com",
+ "sub": "1337",
+ "token_hash": "bar_token_hash",
+ "verified_email": true
+ },
+ "invalid": false,
+ "refresh_token": "bar_refresh_token",
+ "revoke_uri": "https://accounts.google.com/o/oauth2/revoke",
+ "token_expiry": "2015-01-09T00:51:51Z",
+ "token_response": {
+ "access_token": "bar_access_token",
+ "expires_in": 3600,
+ "id_token": "bar_id_token",
+ "token_type": "Bearer"
+ },
+ "token_uri": "https://accounts.google.com/o/oauth2/token",
+ "user_agent": "Cloud SDK Command Line Tool"
+ },
+ "key": {
+ "account": "bar@example.com",
+ "clientId": "bar_client_id",
+ "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting",
+ "type": "google-cloud-sdk"
+ }
+ },
+ {
+ "credential": {
+ "_class": "ServiceAccountCredentials",
+ "_kwargs": {},
+ "_module": "oauth2client.client",
+ "_private_key_id": "00000000000000000000000000000000",
+ "_private_key_pkcs8_text": "-----BEGIN RSA PRIVATE KEY-----\nMIICWwIBAAKBgQCt3fpiynPSaUhWSIKMGV331zudwJ6GkGmvQtwsoK2S2LbvnSwU\nNxgj4fp08kIDR5p26wF4+t/HrKydMwzftXBfZ9UmLVJgRdSswmS5SmChCrfDS5OE\nvFFcN5+6w1w8/Nu657PF/dse8T0bV95YrqyoR0Osy8WHrUOMSIIbC3hRuwIDAQAB\nAoGAJrGE/KFjn0sQ7yrZ6sXmdLawrM3mObo/2uI9T60+k7SpGbBX0/Pi6nFrJMWZ\nTVONG7P3Mu5aCPzzuVRYJB0j8aldSfzABTY3HKoWCczqw1OztJiEseXGiYz4QOyr\nYU3qDyEpdhS6q6wcoLKGH+hqRmz6pcSEsc8XzOOu7s4xW8kCQQDkc75HjhbarCnd\nJJGMe3U76+6UGmdK67ltZj6k6xoB5WbTNChY9TAyI2JC+ppYV89zv3ssj4L+02u3\nHIHFGxsHAkEAwtU1qYb1tScpchPobnYUFiVKJ7KA8EZaHVaJJODW/cghTCV7BxcJ\nbgVvlmk4lFKn3lPKAgWw7PdQsBTVBUcCrQJATPwoIirizrv3u5soJUQxZIkENAqV\nxmybZx9uetrzP7JTrVbFRf0SScMcyN90hdLJiQL8+i4+gaszgFht7sNMnwJAAbfj\nq0UXcauQwALQ7/h2oONfTg5S+MuGC/AxcXPSMZbMRGGoPh3D5YaCv27aIuS/ukQ+\n6dmm/9AGlCb64fsIWQJAPaokbjIifo+LwC5gyK73Mc4t8nAOSZDenzd/2f6TCq76\nS1dcnKiPxaED7W/y6LJiuBT2rbZiQ2L93NJpFZD/UA==\n-----END RSA PRIVATE KEY-----\n",
+ "_revoke_uri": "https://accounts.google.com/o/oauth2/revoke",
+ "_scopes": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting",
+ "_service_account_email": "baz@serviceaccount.example.com",
+ "_service_account_id": "baz.serviceaccount.example.com",
+ "_token_uri": "https://accounts.google.com/o/oauth2/token",
+ "_user_agent": "Cloud SDK Command Line Tool",
+ "access_token": null,
+ "assertion_type": null,
+ "client_id": null,
+ "client_secret": null,
+ "id_token": null,
+ "invalid": false,
+ "refresh_token": null,
+ "revoke_uri": "https://accounts.google.com/o/oauth2/revoke",
+ "service_account_name": "baz@serviceaccount.example.com",
+ "token_expiry": null,
+ "token_response": null,
+ "user_agent": "Cloud SDK Command Line Tool"
+ },
+ "key": {
+ "account": "baz@serviceaccount.example.com",
+ "clientId": "baz_client_id",
+ "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting",
+ "type": "google-cloud-sdk"
+ }
+ }
+ ],
+ "file_version": 1
+}
diff --git a/vendor/golang.org/x/oauth2/google/testdata/gcloud/properties b/vendor/golang.org/x/oauth2/google/testdata/gcloud/properties
new file mode 100644
index 0000000..025de88
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/testdata/gcloud/properties
@@ -0,0 +1,2 @@
+[core]
+account = bar@example.com \ No newline at end of file
diff --git a/vendor/golang.org/x/oauth2/heroku/heroku.go b/vendor/golang.org/x/oauth2/heroku/heroku.go
new file mode 100644
index 0000000..5b4fdb8
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/heroku/heroku.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package heroku provides constants for using OAuth2 to access Heroku.
+package heroku // import "golang.org/x/oauth2/heroku"
+
+import (
+ "golang.org/x/oauth2"
+)
+
+// Endpoint is Heroku's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://id.heroku.com/oauth/authorize",
+ TokenURL: "https://id.heroku.com/oauth/token",
+}
diff --git a/vendor/golang.org/x/oauth2/hipchat/hipchat.go b/vendor/golang.org/x/oauth2/hipchat/hipchat.go
new file mode 100644
index 0000000..594fe07
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/hipchat/hipchat.go
@@ -0,0 +1,60 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package hipchat provides constants for using OAuth2 to access HipChat.
+package hipchat // import "golang.org/x/oauth2/hipchat"
+
+import (
+ "encoding/json"
+ "errors"
+
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/clientcredentials"
+)
+
+// Endpoint is HipChat's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://www.hipchat.com/users/authorize",
+ TokenURL: "https://api.hipchat.com/v2/oauth/token",
+}
+
+// ServerEndpoint returns a new oauth2.Endpoint for a HipChat Server instance
+// running on the given domain or host.
+func ServerEndpoint(host string) oauth2.Endpoint {
+ return oauth2.Endpoint{
+ AuthURL: "https://" + host + "/users/authorize",
+ TokenURL: "https://" + host + "/v2/oauth/token",
+ }
+}
+
+// ClientCredentialsConfigFromCaps generates a Config from a HipChat API
+// capabilities descriptor. It does not verify the scopes against the
+// capabilities document at this time.
+//
+// For more information see: https://www.hipchat.com/docs/apiv2/method/get_capabilities
+func ClientCredentialsConfigFromCaps(capsJSON []byte, clientID, clientSecret string, scopes ...string) (*clientcredentials.Config, error) {
+ var caps struct {
+ Caps struct {
+ Endpoint struct {
+ TokenURL string `json:"tokenUrl"`
+ } `json:"oauth2Provider"`
+ } `json:"capabilities"`
+ }
+
+ if err := json.Unmarshal(capsJSON, &caps); err != nil {
+ return nil, err
+ }
+
+ // Verify required fields.
+ if caps.Caps.Endpoint.TokenURL == "" {
+ return nil, errors.New("oauth2/hipchat: missing OAuth2 token URL in the capabilities descriptor JSON")
+ }
+
+ return &clientcredentials.Config{
+ ClientID: clientID,
+ ClientSecret: clientSecret,
+ Scopes: scopes,
+ TokenURL: caps.Caps.Endpoint.TokenURL,
+ }, nil
+}
diff --git a/vendor/golang.org/x/oauth2/internal/doc.go b/vendor/golang.org/x/oauth2/internal/doc.go
new file mode 100644
index 0000000..03265e8
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/doc.go
@@ -0,0 +1,6 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains support packages for oauth2 package.
+package internal
diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go
new file mode 100644
index 0000000..6978192
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/oauth2.go
@@ -0,0 +1,75 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "bufio"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// ParseKey converts the binary contents of a private key file
+// to an *rsa.PrivateKey. It detects whether the private key is in a
+// PEM container or not. If so, it extracts the the private key
+// from PEM container before conversion. It only supports PEM
+// containers with no passphrase.
+func ParseKey(key []byte) (*rsa.PrivateKey, error) {
+ block, _ := pem.Decode(key)
+ if block != nil {
+ key = block.Bytes
+ }
+ parsedKey, err := x509.ParsePKCS8PrivateKey(key)
+ if err != nil {
+ parsedKey, err = x509.ParsePKCS1PrivateKey(key)
+ if err != nil {
+ return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err)
+ }
+ }
+ parsed, ok := parsedKey.(*rsa.PrivateKey)
+ if !ok {
+ return nil, errors.New("private key is invalid")
+ }
+ return parsed, nil
+}
+
+func ParseINI(ini io.Reader) (map[string]map[string]string, error) {
+ result := map[string]map[string]string{
+ "": {}, // root section
+ }
+ scanner := bufio.NewScanner(ini)
+ currentSection := ""
+ for scanner.Scan() {
+ line := strings.TrimSpace(scanner.Text())
+ if strings.HasPrefix(line, ";") {
+ // comment.
+ continue
+ }
+ if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
+ currentSection = strings.TrimSpace(line[1 : len(line)-1])
+ result[currentSection] = map[string]string{}
+ continue
+ }
+ parts := strings.SplitN(line, "=", 2)
+ if len(parts) == 2 && parts[0] != "" {
+ result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, fmt.Errorf("error scanning ini: %v", err)
+ }
+ return result, nil
+}
+
+func CondVal(v string) []string {
+ if v == "" {
+ return nil
+ }
+ return []string{v}
+}
diff --git a/vendor/golang.org/x/oauth2/internal/oauth2_test.go b/vendor/golang.org/x/oauth2/internal/oauth2_test.go
new file mode 100644
index 0000000..07d51c4
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/oauth2_test.go
@@ -0,0 +1,61 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "reflect"
+ "strings"
+ "testing"
+)
+
+func TestParseINI(t *testing.T) {
+ tests := []struct {
+ ini string
+ want map[string]map[string]string
+ }{
+ {
+ `root = toor
+[foo]
+bar = hop
+ini = nin
+`,
+ map[string]map[string]string{
+ "": {"root": "toor"},
+ "foo": {"bar": "hop", "ini": "nin"},
+ },
+ },
+ {
+ `[empty]
+[section]
+empty=
+`,
+ map[string]map[string]string{
+ "": {},
+ "empty": {},
+ "section": {"empty": ""},
+ },
+ },
+ {
+ `ignore
+[invalid
+=stuff
+;comment=true
+`,
+ map[string]map[string]string{
+ "": {},
+ },
+ },
+ }
+ for _, tt := range tests {
+ result, err := ParseINI(strings.NewReader(tt.ini))
+ if err != nil {
+ t.Errorf("ParseINI(%q) error %v, want: no error", tt.ini, err)
+ continue
+ }
+ if !reflect.DeepEqual(result, tt.want) {
+ t.Errorf("ParseINI(%q) = %#v, want: %#v", tt.ini, result, tt.want)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go
new file mode 100644
index 0000000..600dbe6
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/token.go
@@ -0,0 +1,251 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/net/context/ctxhttp"
+)
+
+// Token represents the credentials used to authorize
+// the requests to access protected resources on the OAuth 2.0
+// provider's backend.
+//
+// This type is a mirror of oauth2.Token and exists to break
+// an otherwise-circular dependency. Other internal packages
+// should convert this Token into an oauth2.Token before use.
+type Token struct {
+ // AccessToken is the token that authorizes and authenticates
+ // the requests.
+ AccessToken string
+
+ // TokenType is the type of token.
+ // The Type method returns either this or "Bearer", the default.
+ TokenType string
+
+ // RefreshToken is a token that's used by the application
+ // (as opposed to the user) to refresh the access token
+ // if it expires.
+ RefreshToken string
+
+ // Expiry is the optional expiration time of the access token.
+ //
+ // If zero, TokenSource implementations will reuse the same
+ // token forever and RefreshToken or equivalent
+ // mechanisms for that TokenSource will not be used.
+ Expiry time.Time
+
+ // Raw optionally contains extra metadata from the server
+ // when updating a token.
+ Raw interface{}
+}
+
+// tokenJSON is the struct representing the HTTP response from OAuth2
+// providers returning a token in JSON form.
+type tokenJSON struct {
+ AccessToken string `json:"access_token"`
+ TokenType string `json:"token_type"`
+ RefreshToken string `json:"refresh_token"`
+ ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number
+ Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in
+}
+
+func (e *tokenJSON) expiry() (t time.Time) {
+ if v := e.ExpiresIn; v != 0 {
+ return time.Now().Add(time.Duration(v) * time.Second)
+ }
+ if v := e.Expires; v != 0 {
+ return time.Now().Add(time.Duration(v) * time.Second)
+ }
+ return
+}
+
+type expirationTime int32
+
+func (e *expirationTime) UnmarshalJSON(b []byte) error {
+ var n json.Number
+ err := json.Unmarshal(b, &n)
+ if err != nil {
+ return err
+ }
+ i, err := n.Int64()
+ if err != nil {
+ return err
+ }
+ *e = expirationTime(i)
+ return nil
+}
+
+var brokenAuthHeaderProviders = []string{
+ "https://accounts.google.com/",
+ "https://api.codeswholesale.com/oauth/token",
+ "https://api.dropbox.com/",
+ "https://api.dropboxapi.com/",
+ "https://api.instagram.com/",
+ "https://api.netatmo.net/",
+ "https://api.odnoklassniki.ru/",
+ "https://api.pushbullet.com/",
+ "https://api.soundcloud.com/",
+ "https://api.twitch.tv/",
+ "https://app.box.com/",
+ "https://connect.stripe.com/",
+ "https://graph.facebook.com", // see https://github.com/golang/oauth2/issues/214
+ "https://login.microsoftonline.com/",
+ "https://login.salesforce.com/",
+ "https://login.windows.net",
+ "https://login.live.com/",
+ "https://oauth.sandbox.trainingpeaks.com/",
+ "https://oauth.trainingpeaks.com/",
+ "https://oauth.vk.com/",
+ "https://openapi.baidu.com/",
+ "https://slack.com/",
+ "https://test-sandbox.auth.corp.google.com",
+ "https://test.salesforce.com/",
+ "https://user.gini.net/",
+ "https://www.douban.com/",
+ "https://www.googleapis.com/",
+ "https://www.linkedin.com/",
+ "https://www.strava.com/oauth/",
+ "https://www.wunderlist.com/oauth/",
+ "https://api.patreon.com/",
+ "https://sandbox.codeswholesale.com/oauth/token",
+ "https://api.sipgate.com/v1/authorization/oauth",
+}
+
+// brokenAuthHeaderDomains lists broken providers that issue dynamic endpoints.
+var brokenAuthHeaderDomains = []string{
+ ".force.com",
+ ".myshopify.com",
+ ".okta.com",
+ ".oktapreview.com",
+}
+
+func RegisterBrokenAuthHeaderProvider(tokenURL string) {
+ brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL)
+}
+
+// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL
+// implements the OAuth2 spec correctly
+// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
+// In summary:
+// - Reddit only accepts client secret in the Authorization header
+// - Dropbox accepts either it in URL param or Auth header, but not both.
+// - Google only accepts URL param (not spec compliant?), not Auth header
+// - Stripe only accepts client secret in Auth header with Bearer method, not Basic
+func providerAuthHeaderWorks(tokenURL string) bool {
+ for _, s := range brokenAuthHeaderProviders {
+ if strings.HasPrefix(tokenURL, s) {
+ // Some sites fail to implement the OAuth2 spec fully.
+ return false
+ }
+ }
+
+ if u, err := url.Parse(tokenURL); err == nil {
+ for _, s := range brokenAuthHeaderDomains {
+ if strings.HasSuffix(u.Host, s) {
+ return false
+ }
+ }
+ }
+
+ // Assume the provider implements the spec properly
+ // otherwise. We can add more exceptions as they're
+ // discovered. We will _not_ be adding configurable hooks
+ // to this package to let users select server bugs.
+ return true
+}
+
+func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values) (*Token, error) {
+ hc, err := ContextClient(ctx)
+ if err != nil {
+ return nil, err
+ }
+ bustedAuth := !providerAuthHeaderWorks(tokenURL)
+ if bustedAuth {
+ if clientID != "" {
+ v.Set("client_id", clientID)
+ }
+ if clientSecret != "" {
+ v.Set("client_secret", clientSecret)
+ }
+ }
+ req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode()))
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ if !bustedAuth {
+ req.SetBasicAuth(url.QueryEscape(clientID), url.QueryEscape(clientSecret))
+ }
+ r, err := ctxhttp.Do(ctx, hc, req)
+ if err != nil {
+ return nil, err
+ }
+ defer r.Body.Close()
+ body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
+ if err != nil {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+ }
+ if code := r.StatusCode; code < 200 || code > 299 {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body)
+ }
+
+ var token *Token
+ content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
+ switch content {
+ case "application/x-www-form-urlencoded", "text/plain":
+ vals, err := url.ParseQuery(string(body))
+ if err != nil {
+ return nil, err
+ }
+ token = &Token{
+ AccessToken: vals.Get("access_token"),
+ TokenType: vals.Get("token_type"),
+ RefreshToken: vals.Get("refresh_token"),
+ Raw: vals,
+ }
+ e := vals.Get("expires_in")
+ if e == "" {
+ // TODO(jbd): Facebook's OAuth2 implementation is broken and
+ // returns expires_in field in expires. Remove the fallback to expires,
+ // when Facebook fixes their implementation.
+ e = vals.Get("expires")
+ }
+ expires, _ := strconv.Atoi(e)
+ if expires != 0 {
+ token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
+ }
+ default:
+ var tj tokenJSON
+ if err = json.Unmarshal(body, &tj); err != nil {
+ return nil, err
+ }
+ token = &Token{
+ AccessToken: tj.AccessToken,
+ TokenType: tj.TokenType,
+ RefreshToken: tj.RefreshToken,
+ Expiry: tj.expiry(),
+ Raw: make(map[string]interface{}),
+ }
+ json.Unmarshal(body, &token.Raw) // no error checks for optional fields
+ }
+ // Don't overwrite `RefreshToken` with an empty value
+ // if this was a token refreshing request.
+ if token.RefreshToken == "" {
+ token.RefreshToken = v.Get("refresh_token")
+ }
+ return token, nil
+}
diff --git a/vendor/golang.org/x/oauth2/internal/token_test.go b/vendor/golang.org/x/oauth2/internal/token_test.go
new file mode 100644
index 0000000..df5eb26
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/token_test.go
@@ -0,0 +1,104 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "testing"
+
+ "golang.org/x/net/context"
+)
+
+func TestRegisterBrokenAuthHeaderProvider(t *testing.T) {
+ RegisterBrokenAuthHeaderProvider("https://aaa.com/")
+ tokenURL := "https://aaa.com/token"
+ if providerAuthHeaderWorks(tokenURL) {
+ t.Errorf("got %q as unbroken; want broken", tokenURL)
+ }
+}
+
+func TestRetrieveTokenBustedNoSecret(t *testing.T) {
+ const clientID = "client-id"
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if got, want := r.FormValue("client_id"), clientID; got != want {
+ t.Errorf("client_id = %q; want %q", got, want)
+ }
+ if got, want := r.FormValue("client_secret"), ""; got != want {
+ t.Errorf("client_secret = %q; want empty", got)
+ }
+ }))
+ defer ts.Close()
+
+ RegisterBrokenAuthHeaderProvider(ts.URL)
+ _, err := RetrieveToken(context.Background(), clientID, "", ts.URL, url.Values{})
+ if err != nil {
+ t.Errorf("RetrieveToken = %v; want no error", err)
+ }
+}
+
+func Test_providerAuthHeaderWorks(t *testing.T) {
+ for _, p := range brokenAuthHeaderProviders {
+ if providerAuthHeaderWorks(p) {
+ t.Errorf("got %q as unbroken; want broken", p)
+ }
+ p := fmt.Sprintf("%ssomesuffix", p)
+ if providerAuthHeaderWorks(p) {
+ t.Errorf("got %q as unbroken; want broken", p)
+ }
+ }
+ p := "https://api.not-in-the-list-example.com/"
+ if !providerAuthHeaderWorks(p) {
+ t.Errorf("got %q as unbroken; want broken", p)
+ }
+}
+
+func TestProviderAuthHeaderWorksDomain(t *testing.T) {
+ tests := []struct {
+ tokenURL string
+ wantWorks bool
+ }{
+ {"https://dev-12345.okta.com/token-url", false},
+ {"https://dev-12345.oktapreview.com/token-url", false},
+ {"https://dev-12345.okta.org/token-url", true},
+ {"https://foo.bar.force.com/token-url", false},
+ {"https://foo.force.com/token-url", false},
+ {"https://force.com/token-url", true},
+ }
+
+ for _, test := range tests {
+ got := providerAuthHeaderWorks(test.tokenURL)
+ if got != test.wantWorks {
+ t.Errorf("providerAuthHeaderWorks(%q) = %v; want %v", test.tokenURL, got, test.wantWorks)
+ }
+ }
+}
+
+func TestRetrieveTokenWithContexts(t *testing.T) {
+ const clientID = "client-id"
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}))
+ defer ts.Close()
+
+ _, err := RetrieveToken(context.Background(), clientID, "", ts.URL, url.Values{})
+ if err != nil {
+ t.Errorf("RetrieveToken (with background context) = %v; want no error", err)
+ }
+
+ ctx, cancelfunc := context.WithCancel(context.Background())
+
+ cancellingts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ cancelfunc()
+ }))
+ defer cancellingts.Close()
+
+ _, err = RetrieveToken(ctx, clientID, "", cancellingts.URL, url.Values{})
+ if err == nil {
+ t.Errorf("RetrieveToken (with cancelled context) = nil; want error")
+ }
+}
diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go
new file mode 100644
index 0000000..783bd98
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/transport.go
@@ -0,0 +1,68 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "net/http"
+
+ "golang.org/x/net/context"
+)
+
+// HTTPClient is the context key to use with golang.org/x/net/context's
+// WithValue function to associate an *http.Client value with a context.
+var HTTPClient ContextKey
+
+// ContextKey is just an empty struct. It exists so HTTPClient can be
+// an immutable public variable with a unique type. It's immutable
+// because nobody else can create a ContextKey, being unexported.
+type ContextKey struct{}
+
+// ContextClientFunc is a func which tries to return an *http.Client
+// given a Context value. If it returns an error, the search stops
+// with that error. If it returns (nil, nil), the search continues
+// down the list of registered funcs.
+type ContextClientFunc func(context.Context) (*http.Client, error)
+
+var contextClientFuncs []ContextClientFunc
+
+func RegisterContextClientFunc(fn ContextClientFunc) {
+ contextClientFuncs = append(contextClientFuncs, fn)
+}
+
+func ContextClient(ctx context.Context) (*http.Client, error) {
+ if ctx != nil {
+ if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {
+ return hc, nil
+ }
+ }
+ for _, fn := range contextClientFuncs {
+ c, err := fn(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if c != nil {
+ return c, nil
+ }
+ }
+ return http.DefaultClient, nil
+}
+
+func ContextTransport(ctx context.Context) http.RoundTripper {
+ hc, err := ContextClient(ctx)
+ // This is a rare error case (somebody using nil on App Engine).
+ if err != nil {
+ return ErrorTransport{err}
+ }
+ return hc.Transport
+}
+
+// ErrorTransport returns the specified error on RoundTrip.
+// This RoundTripper should be used in rare error cases where
+// error handling can be postponed to response handling time.
+type ErrorTransport struct{ Err error }
+
+func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) {
+ return nil, t.Err
+}
diff --git a/vendor/golang.org/x/oauth2/internal/transport_test.go b/vendor/golang.org/x/oauth2/internal/transport_test.go
new file mode 100644
index 0000000..8772ec5
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/transport_test.go
@@ -0,0 +1,38 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "net/http"
+ "testing"
+
+ "golang.org/x/net/context"
+)
+
+func TestContextClient(t *testing.T) {
+ rc := &http.Client{}
+ RegisterContextClientFunc(func(context.Context) (*http.Client, error) {
+ return rc, nil
+ })
+
+ c := &http.Client{}
+ ctx := context.WithValue(context.Background(), HTTPClient, c)
+
+ hc, err := ContextClient(ctx)
+ if err != nil {
+ t.Fatalf("want valid client; got err = %v", err)
+ }
+ if hc != c {
+ t.Fatalf("want context client = %p; got = %p", c, hc)
+ }
+
+ hc, err = ContextClient(context.TODO())
+ if err != nil {
+ t.Fatalf("want valid client; got err = %v", err)
+ }
+ if hc != rc {
+ t.Fatalf("want registered client = %p; got = %p", c, hc)
+ }
+}
diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go
new file mode 100644
index 0000000..683d2d2
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/jws/jws.go
@@ -0,0 +1,182 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package jws provides a partial implementation
+// of JSON Web Signature encoding and decoding.
+// It exists to support the golang.org/x/oauth2 package.
+//
+// See RFC 7515.
+//
+// Deprecated: this package is not intended for public use and might be
+// removed in the future. It exists for internal use only.
+// Please switch to another JWS package or copy this package into your own
+// source tree.
+package jws // import "golang.org/x/oauth2/jws"
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+)
+
+// ClaimSet contains information about the JWT signature including the
+// permissions being requested (scopes), the target of the token, the issuer,
+// the time the token was issued, and the lifetime of the token.
+type ClaimSet struct {
+ Iss string `json:"iss"` // email address of the client_id of the application making the access token request
+ Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests
+ Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional).
+ Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch)
+ Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch)
+ Typ string `json:"typ,omitempty"` // token type (Optional).
+
+ // Email for which the application is requesting delegated access (Optional).
+ Sub string `json:"sub,omitempty"`
+
+ // The old name of Sub. Client keeps setting Prn to be
+ // complaint with legacy OAuth 2.0 providers. (Optional)
+ Prn string `json:"prn,omitempty"`
+
+ // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3
+ // This array is marshalled using custom code (see (c *ClaimSet) encode()).
+ PrivateClaims map[string]interface{} `json:"-"`
+}
+
+func (c *ClaimSet) encode() (string, error) {
+ // Reverting time back for machines whose time is not perfectly in sync.
+ // If client machine's time is in the future according
+ // to Google servers, an access token will not be issued.
+ now := time.Now().Add(-10 * time.Second)
+ if c.Iat == 0 {
+ c.Iat = now.Unix()
+ }
+ if c.Exp == 0 {
+ c.Exp = now.Add(time.Hour).Unix()
+ }
+ if c.Exp < c.Iat {
+ return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat)
+ }
+
+ b, err := json.Marshal(c)
+ if err != nil {
+ return "", err
+ }
+
+ if len(c.PrivateClaims) == 0 {
+ return base64.RawURLEncoding.EncodeToString(b), nil
+ }
+
+ // Marshal private claim set and then append it to b.
+ prv, err := json.Marshal(c.PrivateClaims)
+ if err != nil {
+ return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims)
+ }
+
+ // Concatenate public and private claim JSON objects.
+ if !bytes.HasSuffix(b, []byte{'}'}) {
+ return "", fmt.Errorf("jws: invalid JSON %s", b)
+ }
+ if !bytes.HasPrefix(prv, []byte{'{'}) {
+ return "", fmt.Errorf("jws: invalid JSON %s", prv)
+ }
+ b[len(b)-1] = ',' // Replace closing curly brace with a comma.
+ b = append(b, prv[1:]...) // Append private claims.
+ return base64.RawURLEncoding.EncodeToString(b), nil
+}
+
+// Header represents the header for the signed JWS payloads.
+type Header struct {
+ // The algorithm used for signature.
+ Algorithm string `json:"alg"`
+
+ // Represents the token type.
+ Typ string `json:"typ"`
+
+ // The optional hint of which key is being used.
+ KeyID string `json:"kid,omitempty"`
+}
+
+func (h *Header) encode() (string, error) {
+ b, err := json.Marshal(h)
+ if err != nil {
+ return "", err
+ }
+ return base64.RawURLEncoding.EncodeToString(b), nil
+}
+
+// Decode decodes a claim set from a JWS payload.
+func Decode(payload string) (*ClaimSet, error) {
+ // decode returned id token to get expiry
+ s := strings.Split(payload, ".")
+ if len(s) < 2 {
+ // TODO(jbd): Provide more context about the error.
+ return nil, errors.New("jws: invalid token received")
+ }
+ decoded, err := base64.RawURLEncoding.DecodeString(s[1])
+ if err != nil {
+ return nil, err
+ }
+ c := &ClaimSet{}
+ err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c)
+ return c, err
+}
+
+// Signer returns a signature for the given data.
+type Signer func(data []byte) (sig []byte, err error)
+
+// EncodeWithSigner encodes a header and claim set with the provided signer.
+func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) {
+ head, err := header.encode()
+ if err != nil {
+ return "", err
+ }
+ cs, err := c.encode()
+ if err != nil {
+ return "", err
+ }
+ ss := fmt.Sprintf("%s.%s", head, cs)
+ sig, err := sg([]byte(ss))
+ if err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil
+}
+
+// Encode encodes a signed JWS with provided header and claim set.
+// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key.
+func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) {
+ sg := func(data []byte) (sig []byte, err error) {
+ h := sha256.New()
+ h.Write(data)
+ return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil))
+ }
+ return EncodeWithSigner(header, c, sg)
+}
+
+// Verify tests whether the provided JWT token's signature was produced by the private key
+// associated with the supplied public key.
+func Verify(token string, key *rsa.PublicKey) error {
+ parts := strings.Split(token, ".")
+ if len(parts) != 3 {
+ return errors.New("jws: invalid token received, token must have 3 parts")
+ }
+
+ signedContent := parts[0] + "." + parts[1]
+ signatureString, err := base64.RawURLEncoding.DecodeString(parts[2])
+ if err != nil {
+ return err
+ }
+
+ h := sha256.New()
+ h.Write([]byte(signedContent))
+ return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString))
+}
diff --git a/vendor/golang.org/x/oauth2/jws/jws_test.go b/vendor/golang.org/x/oauth2/jws/jws_test.go
new file mode 100644
index 0000000..39a136a
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/jws/jws_test.go
@@ -0,0 +1,46 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package jws
+
+import (
+ "crypto/rand"
+ "crypto/rsa"
+ "testing"
+)
+
+func TestSignAndVerify(t *testing.T) {
+ header := &Header{
+ Algorithm: "RS256",
+ Typ: "JWT",
+ }
+ payload := &ClaimSet{
+ Iss: "http://google.com/",
+ Aud: "",
+ Exp: 3610,
+ Iat: 10,
+ }
+
+ privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ token, err := Encode(header, payload, privateKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = Verify(token, &privateKey.PublicKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestVerifyFailsOnMalformedClaim(t *testing.T) {
+ err := Verify("abc.def", nil)
+ if err == nil {
+ t.Error("got no errors; want improperly formed JWT not to be verified")
+ }
+}
diff --git a/vendor/golang.org/x/oauth2/jwt/example_test.go b/vendor/golang.org/x/oauth2/jwt/example_test.go
new file mode 100644
index 0000000..58503d8
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/jwt/example_test.go
@@ -0,0 +1,33 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package jwt_test
+
+import (
+ "context"
+
+ "golang.org/x/oauth2/jwt"
+)
+
+func ExampleJWTConfig() {
+ ctx := context.Background()
+ conf := &jwt.Config{
+ Email: "xxx@developer.com",
+ // The contents of your RSA private key or your PEM file
+ // that contains a private key.
+ // If you have a p12 file instead, you
+ // can use `openssl` to export the private key into a pem file.
+ //
+ // $ openssl pkcs12 -in key.p12 -out key.pem -nodes
+ //
+ // It only supports PEM containers with no passphrase.
+ PrivateKey: []byte("-----BEGIN RSA PRIVATE KEY-----..."),
+ Subject: "user@example.com",
+ TokenURL: "https://provider.com/o/oauth2/token",
+ }
+ // Initiate an http.Client, the following GET request will be
+ // authorized and authenticated on the behalf of user@example.com.
+ client := conf.Client(ctx)
+ client.Get("...")
+}
diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go
new file mode 100644
index 0000000..e016db4
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/jwt/jwt.go
@@ -0,0 +1,159 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly
+// known as "two-legged OAuth 2.0".
+//
+// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12
+package jwt
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/internal"
+ "golang.org/x/oauth2/jws"
+)
+
+var (
+ defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer"
+ defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"}
+)
+
+// Config is the configuration for using JWT to fetch tokens,
+// commonly known as "two-legged OAuth 2.0".
+type Config struct {
+ // Email is the OAuth client identifier used when communicating with
+ // the configured OAuth provider.
+ Email string
+
+ // PrivateKey contains the contents of an RSA private key or the
+ // contents of a PEM file that contains a private key. The provided
+ // private key is used to sign JWT payloads.
+ // PEM containers with a passphrase are not supported.
+ // Use the following command to convert a PKCS 12 file into a PEM.
+ //
+ // $ openssl pkcs12 -in key.p12 -out key.pem -nodes
+ //
+ PrivateKey []byte
+
+ // PrivateKeyID contains an optional hint indicating which key is being
+ // used.
+ PrivateKeyID string
+
+ // Subject is the optional user to impersonate.
+ Subject string
+
+ // Scopes optionally specifies a list of requested permission scopes.
+ Scopes []string
+
+ // TokenURL is the endpoint required to complete the 2-legged JWT flow.
+ TokenURL string
+
+ // Expires optionally specifies how long the token is valid for.
+ Expires time.Duration
+}
+
+// TokenSource returns a JWT TokenSource using the configuration
+// in c and the HTTP client from the provided context.
+func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource {
+ return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c})
+}
+
+// Client returns an HTTP client wrapping the context's
+// HTTP transport and adding Authorization headers with tokens
+// obtained from c.
+//
+// The returned client and its Transport should not be modified.
+func (c *Config) Client(ctx context.Context) *http.Client {
+ return oauth2.NewClient(ctx, c.TokenSource(ctx))
+}
+
+// jwtSource is a source that always does a signed JWT request for a token.
+// It should typically be wrapped with a reuseTokenSource.
+type jwtSource struct {
+ ctx context.Context
+ conf *Config
+}
+
+func (js jwtSource) Token() (*oauth2.Token, error) {
+ pk, err := internal.ParseKey(js.conf.PrivateKey)
+ if err != nil {
+ return nil, err
+ }
+ hc := oauth2.NewClient(js.ctx, nil)
+ claimSet := &jws.ClaimSet{
+ Iss: js.conf.Email,
+ Scope: strings.Join(js.conf.Scopes, " "),
+ Aud: js.conf.TokenURL,
+ }
+ if subject := js.conf.Subject; subject != "" {
+ claimSet.Sub = subject
+ // prn is the old name of sub. Keep setting it
+ // to be compatible with legacy OAuth 2.0 providers.
+ claimSet.Prn = subject
+ }
+ if t := js.conf.Expires; t > 0 {
+ claimSet.Exp = time.Now().Add(t).Unix()
+ }
+ h := *defaultHeader
+ h.KeyID = js.conf.PrivateKeyID
+ payload, err := jws.Encode(&h, claimSet, pk)
+ if err != nil {
+ return nil, err
+ }
+ v := url.Values{}
+ v.Set("grant_type", defaultGrantType)
+ v.Set("assertion", payload)
+ resp, err := hc.PostForm(js.conf.TokenURL, v)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+ }
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20))
+ if err != nil {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+ }
+ if c := resp.StatusCode; c < 200 || c > 299 {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body)
+ }
+ // tokenRes is the JSON response body.
+ var tokenRes struct {
+ AccessToken string `json:"access_token"`
+ TokenType string `json:"token_type"`
+ IDToken string `json:"id_token"`
+ ExpiresIn int64 `json:"expires_in"` // relative seconds from now
+ }
+ if err := json.Unmarshal(body, &tokenRes); err != nil {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+ }
+ token := &oauth2.Token{
+ AccessToken: tokenRes.AccessToken,
+ TokenType: tokenRes.TokenType,
+ }
+ raw := make(map[string]interface{})
+ json.Unmarshal(body, &raw) // no error checks for optional fields
+ token = token.WithExtra(raw)
+
+ if secs := tokenRes.ExpiresIn; secs > 0 {
+ token.Expiry = time.Now().Add(time.Duration(secs) * time.Second)
+ }
+ if v := tokenRes.IDToken; v != "" {
+ // decode returned id token to get expiry
+ claimSet, err := jws.Decode(v)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err)
+ }
+ token.Expiry = time.Unix(claimSet.Exp, 0)
+ }
+ return token, nil
+}
diff --git a/vendor/golang.org/x/oauth2/jwt/jwt_test.go b/vendor/golang.org/x/oauth2/jwt/jwt_test.go
new file mode 100644
index 0000000..9f82c71
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/jwt/jwt_test.go
@@ -0,0 +1,190 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package jwt
+
+import (
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+
+ "golang.org/x/oauth2/jws"
+)
+
+var dummyPrivateKey = []byte(`-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAx4fm7dngEmOULNmAs1IGZ9Apfzh+BkaQ1dzkmbUgpcoghucE
+DZRnAGd2aPyB6skGMXUytWQvNYav0WTR00wFtX1ohWTfv68HGXJ8QXCpyoSKSSFY
+fuP9X36wBSkSX9J5DVgiuzD5VBdzUISSmapjKm+DcbRALjz6OUIPEWi1Tjl6p5RK
+1w41qdbmt7E5/kGhKLDuT7+M83g4VWhgIvaAXtnhklDAggilPPa8ZJ1IFe31lNlr
+k4DRk38nc6sEutdf3RL7QoH7FBusI7uXV03DC6dwN1kP4GE7bjJhcRb/7jYt7CQ9
+/E9Exz3c0yAp0yrTg0Fwh+qxfH9dKwN52S7SBwIDAQABAoIBAQCaCs26K07WY5Jt
+3a2Cw3y2gPrIgTCqX6hJs7O5ByEhXZ8nBwsWANBUe4vrGaajQHdLj5OKfsIDrOvn
+2NI1MqflqeAbu/kR32q3tq8/Rl+PPiwUsW3E6Pcf1orGMSNCXxeducF2iySySzh3
+nSIhCG5uwJDWI7a4+9KiieFgK1pt/Iv30q1SQS8IEntTfXYwANQrfKUVMmVF9aIK
+6/WZE2yd5+q3wVVIJ6jsmTzoDCX6QQkkJICIYwCkglmVy5AeTckOVwcXL0jqw5Kf
+5/soZJQwLEyBoQq7Kbpa26QHq+CJONetPP8Ssy8MJJXBT+u/bSseMb3Zsr5cr43e
+DJOhwsThAoGBAPY6rPKl2NT/K7XfRCGm1sbWjUQyDShscwuWJ5+kD0yudnT/ZEJ1
+M3+KS/iOOAoHDdEDi9crRvMl0UfNa8MAcDKHflzxg2jg/QI+fTBjPP5GOX0lkZ9g
+z6VePoVoQw2gpPFVNPPTxKfk27tEzbaffvOLGBEih0Kb7HTINkW8rIlzAoGBAM9y
+1yr+jvfS1cGFtNU+Gotoihw2eMKtIqR03Yn3n0PK1nVCDKqwdUqCypz4+ml6cxRK
+J8+Pfdh7D+ZJd4LEG6Y4QRDLuv5OA700tUoSHxMSNn3q9As4+T3MUyYxWKvTeu3U
+f2NWP9ePU0lV8ttk7YlpVRaPQmc1qwooBA/z/8AdAoGAW9x0HWqmRICWTBnpjyxx
+QGlW9rQ9mHEtUotIaRSJ6K/F3cxSGUEkX1a3FRnp6kPLcckC6NlqdNgNBd6rb2rA
+cPl/uSkZP42Als+9YMoFPU/xrrDPbUhu72EDrj3Bllnyb168jKLa4VBOccUvggxr
+Dm08I1hgYgdN5huzs7y6GeUCgYEAj+AZJSOJ6o1aXS6rfV3mMRve9bQ9yt8jcKXw
+5HhOCEmMtaSKfnOF1Ziih34Sxsb7O2428DiX0mV/YHtBnPsAJidL0SdLWIapBzeg
+KHArByIRkwE6IvJvwpGMdaex1PIGhx5i/3VZL9qiq/ElT05PhIb+UXgoWMabCp84
+OgxDK20CgYAeaFo8BdQ7FmVX2+EEejF+8xSge6WVLtkaon8bqcn6P0O8lLypoOhd
+mJAYH8WU+UAy9pecUnDZj14LAGNVmYcse8HFX71MoshnvCTFEPVo4rZxIAGwMpeJ
+5jgQ3slYLpqrGlcbLgUXBUgzEO684Wk/UV9DFPlHALVqCfXQ9dpJPg==
+-----END RSA PRIVATE KEY-----`)
+
+func TestJWTFetch_JSONResponse(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte(`{
+ "access_token": "90d64460d14870c08c81352a05dedd3465940a7c",
+ "scope": "user",
+ "token_type": "bearer",
+ "expires_in": 3600
+ }`))
+ }))
+ defer ts.Close()
+
+ conf := &Config{
+ Email: "aaa@xxx.com",
+ PrivateKey: dummyPrivateKey,
+ TokenURL: ts.URL,
+ }
+ tok, err := conf.TokenSource(context.Background()).Token()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !tok.Valid() {
+ t.Errorf("got invalid token: %v", tok)
+ }
+ if got, want := tok.AccessToken, "90d64460d14870c08c81352a05dedd3465940a7c"; got != want {
+ t.Errorf("access token = %q; want %q", got, want)
+ }
+ if got, want := tok.TokenType, "bearer"; got != want {
+ t.Errorf("token type = %q; want %q", got, want)
+ }
+ if got := tok.Expiry.IsZero(); got {
+ t.Errorf("token expiry = %v, want none", got)
+ }
+ scope := tok.Extra("scope")
+ if got, want := scope, "user"; got != want {
+ t.Errorf("scope = %q; want %q", got, want)
+ }
+}
+
+func TestJWTFetch_BadResponse(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte(`{"scope": "user", "token_type": "bearer"}`))
+ }))
+ defer ts.Close()
+
+ conf := &Config{
+ Email: "aaa@xxx.com",
+ PrivateKey: dummyPrivateKey,
+ TokenURL: ts.URL,
+ }
+ tok, err := conf.TokenSource(context.Background()).Token()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if tok == nil {
+ t.Fatalf("got nil token; want token")
+ }
+ if tok.Valid() {
+ t.Errorf("got invalid token: %v", tok)
+ }
+ if got, want := tok.AccessToken, ""; got != want {
+ t.Errorf("access token = %q; want %q", got, want)
+ }
+ if got, want := tok.TokenType, "bearer"; got != want {
+ t.Errorf("token type = %q; want %q", got, want)
+ }
+ scope := tok.Extra("scope")
+ if got, want := scope, "user"; got != want {
+ t.Errorf("token scope = %q; want %q", got, want)
+ }
+}
+
+func TestJWTFetch_BadResponseType(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte(`{"access_token":123, "scope": "user", "token_type": "bearer"}`))
+ }))
+ defer ts.Close()
+ conf := &Config{
+ Email: "aaa@xxx.com",
+ PrivateKey: dummyPrivateKey,
+ TokenURL: ts.URL,
+ }
+ tok, err := conf.TokenSource(context.Background()).Token()
+ if err == nil {
+ t.Error("got a token; expected error")
+ if got, want := tok.AccessToken, ""; got != want {
+ t.Errorf("access token = %q; want %q", got, want)
+ }
+ }
+}
+
+func TestJWTFetch_Assertion(t *testing.T) {
+ var assertion string
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ r.ParseForm()
+ assertion = r.Form.Get("assertion")
+
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte(`{
+ "access_token": "90d64460d14870c08c81352a05dedd3465940a7c",
+ "scope": "user",
+ "token_type": "bearer",
+ "expires_in": 3600
+ }`))
+ }))
+ defer ts.Close()
+
+ conf := &Config{
+ Email: "aaa@xxx.com",
+ PrivateKey: dummyPrivateKey,
+ PrivateKeyID: "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
+ TokenURL: ts.URL,
+ }
+
+ _, err := conf.TokenSource(context.Background()).Token()
+ if err != nil {
+ t.Fatalf("Failed to fetch token: %v", err)
+ }
+
+ parts := strings.Split(assertion, ".")
+ if len(parts) != 3 {
+ t.Fatalf("assertion = %q; want 3 parts", assertion)
+ }
+ gotjson, err := base64.RawURLEncoding.DecodeString(parts[0])
+ if err != nil {
+ t.Fatalf("invalid token header; err = %v", err)
+ }
+
+ got := jws.Header{}
+ if err := json.Unmarshal(gotjson, &got); err != nil {
+ t.Errorf("failed to unmarshal json token header = %q; err = %v", gotjson, err)
+ }
+
+ want := jws.Header{
+ Algorithm: "RS256",
+ Typ: "JWT",
+ KeyID: "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
+ }
+ if got != want {
+ t.Errorf("access token header = %q; want %q", got, want)
+ }
+}
diff --git a/vendor/golang.org/x/oauth2/linkedin/linkedin.go b/vendor/golang.org/x/oauth2/linkedin/linkedin.go
new file mode 100644
index 0000000..b619f93
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/linkedin/linkedin.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package linkedin provides constants for using OAuth2 to access LinkedIn.
+package linkedin // import "golang.org/x/oauth2/linkedin"
+
+import (
+ "golang.org/x/oauth2"
+)
+
+// Endpoint is LinkedIn's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://www.linkedin.com/uas/oauth2/authorization",
+ TokenURL: "https://www.linkedin.com/uas/oauth2/accessToken",
+}
diff --git a/vendor/golang.org/x/oauth2/mediamath/mediamath.go b/vendor/golang.org/x/oauth2/mediamath/mediamath.go
new file mode 100644
index 0000000..3ebce5d
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/mediamath/mediamath.go
@@ -0,0 +1,22 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package mediamath provides constants for using OAuth2 to access MediaMath.
+package mediamath // import "golang.org/x/oauth2/mediamath"
+
+import (
+ "golang.org/x/oauth2"
+)
+
+// Endpoint is MediaMath's OAuth 2.0 endpoint for production.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://api.mediamath.com/oauth2/v1.0/authorize",
+ TokenURL: "https://api.mediamath.com/oauth2/v1.0/token",
+}
+
+// SandboxEndpoint is MediaMath's OAuth 2.0 endpoint for sandbox.
+var SandboxEndpoint = oauth2.Endpoint{
+ AuthURL: "https://t1sandbox.mediamath.com/oauth2/v1.0/authorize",
+ TokenURL: "https://t1sandbox.mediamath.com/oauth2/v1.0/token",
+}
diff --git a/vendor/golang.org/x/oauth2/microsoft/microsoft.go b/vendor/golang.org/x/oauth2/microsoft/microsoft.go
new file mode 100644
index 0000000..f21b398
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/microsoft/microsoft.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package microsoft provides constants for using OAuth2 to access Windows Live ID.
+package microsoft // import "golang.org/x/oauth2/microsoft"
+
+import (
+ "golang.org/x/oauth2"
+)
+
+// LiveConnectEndpoint is Windows's Live ID OAuth 2.0 endpoint.
+var LiveConnectEndpoint = oauth2.Endpoint{
+ AuthURL: "https://login.live.com/oauth20_authorize.srf",
+ TokenURL: "https://login.live.com/oauth20_token.srf",
+}
diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go
new file mode 100644
index 0000000..4bafe87
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/oauth2.go
@@ -0,0 +1,344 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package oauth2 provides support for making
+// OAuth2 authorized and authenticated HTTP requests.
+// It can additionally grant authorization with Bearer JWT.
+package oauth2 // import "golang.org/x/oauth2"
+
+import (
+ "bytes"
+ "errors"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2/internal"
+)
+
+// NoContext is the default context you should supply if not using
+// your own context.Context (see https://golang.org/x/net/context).
+//
+// Deprecated: Use context.Background() or context.TODO() instead.
+var NoContext = context.TODO()
+
+// RegisterBrokenAuthHeaderProvider registers an OAuth2 server
+// identified by the tokenURL prefix as an OAuth2 implementation
+// which doesn't support the HTTP Basic authentication
+// scheme to authenticate with the authorization server.
+// Once a server is registered, credentials (client_id and client_secret)
+// will be passed as query parameters rather than being present
+// in the Authorization header.
+// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
+func RegisterBrokenAuthHeaderProvider(tokenURL string) {
+ internal.RegisterBrokenAuthHeaderProvider(tokenURL)
+}
+
+// Config describes a typical 3-legged OAuth2 flow, with both the
+// client application information and the server's endpoint URLs.
+// For the client credentials 2-legged OAuth2 flow, see the clientcredentials
+// package (https://golang.org/x/oauth2/clientcredentials).
+type Config struct {
+ // ClientID is the application's ID.
+ ClientID string
+
+ // ClientSecret is the application's secret.
+ ClientSecret string
+
+ // Endpoint contains the resource server's token endpoint
+ // URLs. These are constants specific to each server and are
+ // often available via site-specific packages, such as
+ // google.Endpoint or github.Endpoint.
+ Endpoint Endpoint
+
+ // RedirectURL is the URL to redirect users going through
+ // the OAuth flow, after the resource owner's URLs.
+ RedirectURL string
+
+ // Scope specifies optional requested permissions.
+ Scopes []string
+}
+
+// A TokenSource is anything that can return a token.
+type TokenSource interface {
+ // Token returns a token or an error.
+ // Token must be safe for concurrent use by multiple goroutines.
+ // The returned Token must not be modified.
+ Token() (*Token, error)
+}
+
+// Endpoint contains the OAuth 2.0 provider's authorization and token
+// endpoint URLs.
+type Endpoint struct {
+ AuthURL string
+ TokenURL string
+}
+
+var (
+ // AccessTypeOnline and AccessTypeOffline are options passed
+ // to the Options.AuthCodeURL method. They modify the
+ // "access_type" field that gets sent in the URL returned by
+ // AuthCodeURL.
+ //
+ // Online is the default if neither is specified. If your
+ // application needs to refresh access tokens when the user
+ // is not present at the browser, then use offline. This will
+ // result in your application obtaining a refresh token the
+ // first time your application exchanges an authorization
+ // code for a user.
+ AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online")
+ AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline")
+
+ // ApprovalForce forces the users to view the consent dialog
+ // and confirm the permissions request at the URL returned
+ // from AuthCodeURL, even if they've already done so.
+ ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force")
+)
+
+// An AuthCodeOption is passed to Config.AuthCodeURL.
+type AuthCodeOption interface {
+ setValue(url.Values)
+}
+
+type setParam struct{ k, v string }
+
+func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) }
+
+// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters
+// to a provider's authorization endpoint.
+func SetAuthURLParam(key, value string) AuthCodeOption {
+ return setParam{key, value}
+}
+
+// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page
+// that asks for permissions for the required scopes explicitly.
+//
+// State is a token to protect the user from CSRF attacks. You must
+// always provide a non-zero string and validate that it matches the
+// the state query parameter on your redirect callback.
+// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
+//
+// Opts may include AccessTypeOnline or AccessTypeOffline, as well
+// as ApprovalForce.
+func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
+ var buf bytes.Buffer
+ buf.WriteString(c.Endpoint.AuthURL)
+ v := url.Values{
+ "response_type": {"code"},
+ "client_id": {c.ClientID},
+ "redirect_uri": internal.CondVal(c.RedirectURL),
+ "scope": internal.CondVal(strings.Join(c.Scopes, " ")),
+ "state": internal.CondVal(state),
+ }
+ for _, opt := range opts {
+ opt.setValue(v)
+ }
+ if strings.Contains(c.Endpoint.AuthURL, "?") {
+ buf.WriteByte('&')
+ } else {
+ buf.WriteByte('?')
+ }
+ buf.WriteString(v.Encode())
+ return buf.String()
+}
+
+// PasswordCredentialsToken converts a resource owner username and password
+// pair into a token.
+//
+// Per the RFC, this grant type should only be used "when there is a high
+// degree of trust between the resource owner and the client (e.g., the client
+// is part of the device operating system or a highly privileged application),
+// and when other authorization grant types are not available."
+// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info.
+//
+// The HTTP client to use is derived from the context.
+// If nil, http.DefaultClient is used.
+func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {
+ return retrieveToken(ctx, c, url.Values{
+ "grant_type": {"password"},
+ "username": {username},
+ "password": {password},
+ "scope": internal.CondVal(strings.Join(c.Scopes, " ")),
+ })
+}
+
+// Exchange converts an authorization code into a token.
+//
+// It is used after a resource provider redirects the user back
+// to the Redirect URI (the URL obtained from AuthCodeURL).
+//
+// The HTTP client to use is derived from the context.
+// If a client is not provided via the context, http.DefaultClient is used.
+//
+// The code will be in the *http.Request.FormValue("code"). Before
+// calling Exchange, be sure to validate FormValue("state").
+func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) {
+ return retrieveToken(ctx, c, url.Values{
+ "grant_type": {"authorization_code"},
+ "code": {code},
+ "redirect_uri": internal.CondVal(c.RedirectURL),
+ })
+}
+
+// Client returns an HTTP client using the provided token.
+// The token will auto-refresh as necessary. The underlying
+// HTTP transport will be obtained using the provided context.
+// The returned client and its Transport should not be modified.
+func (c *Config) Client(ctx context.Context, t *Token) *http.Client {
+ return NewClient(ctx, c.TokenSource(ctx, t))
+}
+
+// TokenSource returns a TokenSource that returns t until t expires,
+// automatically refreshing it as necessary using the provided context.
+//
+// Most users will use Config.Client instead.
+func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {
+ tkr := &tokenRefresher{
+ ctx: ctx,
+ conf: c,
+ }
+ if t != nil {
+ tkr.refreshToken = t.RefreshToken
+ }
+ return &reuseTokenSource{
+ t: t,
+ new: tkr,
+ }
+}
+
+// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token"
+// HTTP requests to renew a token using a RefreshToken.
+type tokenRefresher struct {
+ ctx context.Context // used to get HTTP requests
+ conf *Config
+ refreshToken string
+}
+
+// WARNING: Token is not safe for concurrent access, as it
+// updates the tokenRefresher's refreshToken field.
+// Within this package, it is used by reuseTokenSource which
+// synchronizes calls to this method with its own mutex.
+func (tf *tokenRefresher) Token() (*Token, error) {
+ if tf.refreshToken == "" {
+ return nil, errors.New("oauth2: token expired and refresh token is not set")
+ }
+
+ tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{
+ "grant_type": {"refresh_token"},
+ "refresh_token": {tf.refreshToken},
+ })
+
+ if err != nil {
+ return nil, err
+ }
+ if tf.refreshToken != tk.RefreshToken {
+ tf.refreshToken = tk.RefreshToken
+ }
+ return tk, err
+}
+
+// reuseTokenSource is a TokenSource that holds a single token in memory
+// and validates its expiry before each call to retrieve it with
+// Token. If it's expired, it will be auto-refreshed using the
+// new TokenSource.
+type reuseTokenSource struct {
+ new TokenSource // called when t is expired.
+
+ mu sync.Mutex // guards t
+ t *Token
+}
+
+// Token returns the current token if it's still valid, else will
+// refresh the current token (using r.Context for HTTP client
+// information) and return the new one.
+func (s *reuseTokenSource) Token() (*Token, error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.t.Valid() {
+ return s.t, nil
+ }
+ t, err := s.new.Token()
+ if err != nil {
+ return nil, err
+ }
+ s.t = t
+ return t, nil
+}
+
+// StaticTokenSource returns a TokenSource that always returns the same token.
+// Because the provided token t is never refreshed, StaticTokenSource is only
+// useful for tokens that never expire.
+func StaticTokenSource(t *Token) TokenSource {
+ return staticTokenSource{t}
+}
+
+// staticTokenSource is a TokenSource that always returns the same Token.
+type staticTokenSource struct {
+ t *Token
+}
+
+func (s staticTokenSource) Token() (*Token, error) {
+ return s.t, nil
+}
+
+// HTTPClient is the context key to use with golang.org/x/net/context's
+// WithValue function to associate an *http.Client value with a context.
+var HTTPClient internal.ContextKey
+
+// NewClient creates an *http.Client from a Context and TokenSource.
+// The returned client is not valid beyond the lifetime of the context.
+//
+// Note that if a custom *http.Client is provided via the Context it
+// is used only for token acquisition and is not used to configure the
+// *http.Client returned from NewClient.
+//
+// As a special case, if src is nil, a non-OAuth2 client is returned
+// using the provided context. This exists to support related OAuth2
+// packages.
+func NewClient(ctx context.Context, src TokenSource) *http.Client {
+ if src == nil {
+ c, err := internal.ContextClient(ctx)
+ if err != nil {
+ return &http.Client{Transport: internal.ErrorTransport{Err: err}}
+ }
+ return c
+ }
+ return &http.Client{
+ Transport: &Transport{
+ Base: internal.ContextTransport(ctx),
+ Source: ReuseTokenSource(nil, src),
+ },
+ }
+}
+
+// ReuseTokenSource returns a TokenSource which repeatedly returns the
+// same token as long as it's valid, starting with t.
+// When its cached token is invalid, a new token is obtained from src.
+//
+// ReuseTokenSource is typically used to reuse tokens from a cache
+// (such as a file on disk) between runs of a program, rather than
+// obtaining new tokens unnecessarily.
+//
+// The initial token t may be nil, in which case the TokenSource is
+// wrapped in a caching version if it isn't one already. This also
+// means it's always safe to wrap ReuseTokenSource around any other
+// TokenSource without adverse effects.
+func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
+ // Don't wrap a reuseTokenSource in itself. That would work,
+ // but cause an unnecessary number of mutex operations.
+ // Just build the equivalent one.
+ if rt, ok := src.(*reuseTokenSource); ok {
+ if t == nil {
+ // Just use it directly.
+ return rt
+ }
+ src = rt.new
+ }
+ return &reuseTokenSource{
+ t: t,
+ new: src,
+ }
+}
diff --git a/vendor/golang.org/x/oauth2/oauth2_test.go b/vendor/golang.org/x/oauth2/oauth2_test.go
new file mode 100644
index 0000000..09293ed
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/oauth2_test.go
@@ -0,0 +1,490 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "testing"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+type mockTransport struct {
+ rt func(req *http.Request) (resp *http.Response, err error)
+}
+
+func (t *mockTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
+ return t.rt(req)
+}
+
+func newConf(url string) *Config {
+ return &Config{
+ ClientID: "CLIENT_ID",
+ ClientSecret: "CLIENT_SECRET",
+ RedirectURL: "REDIRECT_URL",
+ Scopes: []string{"scope1", "scope2"},
+ Endpoint: Endpoint{
+ AuthURL: url + "/auth",
+ TokenURL: url + "/token",
+ },
+ }
+}
+
+func TestAuthCodeURL(t *testing.T) {
+ conf := newConf("server")
+ url := conf.AuthCodeURL("foo", AccessTypeOffline, ApprovalForce)
+ const want = "server/auth?access_type=offline&approval_prompt=force&client_id=CLIENT_ID&redirect_uri=REDIRECT_URL&response_type=code&scope=scope1+scope2&state=foo"
+ if got := url; got != want {
+ t.Errorf("got auth code URL = %q; want %q", got, want)
+ }
+}
+
+func TestAuthCodeURL_CustomParam(t *testing.T) {
+ conf := newConf("server")
+ param := SetAuthURLParam("foo", "bar")
+ url := conf.AuthCodeURL("baz", param)
+ const want = "server/auth?client_id=CLIENT_ID&foo=bar&redirect_uri=REDIRECT_URL&response_type=code&scope=scope1+scope2&state=baz"
+ if got := url; got != want {
+ t.Errorf("got auth code = %q; want %q", got, want)
+ }
+}
+
+func TestAuthCodeURL_Optional(t *testing.T) {
+ conf := &Config{
+ ClientID: "CLIENT_ID",
+ Endpoint: Endpoint{
+ AuthURL: "/auth-url",
+ TokenURL: "/token-url",
+ },
+ }
+ url := conf.AuthCodeURL("")
+ const want = "/auth-url?client_id=CLIENT_ID&response_type=code"
+ if got := url; got != want {
+ t.Fatalf("got auth code = %q; want %q", got, want)
+ }
+}
+
+func TestURLUnsafeClientConfig(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if got, want := r.Header.Get("Authorization"), "Basic Q0xJRU5UX0lEJTNGJTNGOkNMSUVOVF9TRUNSRVQlM0YlM0Y="; got != want {
+ t.Errorf("Authorization header = %q; want %q", got, want)
+ }
+
+ w.Header().Set("Content-Type", "application/x-www-form-urlencoded")
+ w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer"))
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ conf.ClientID = "CLIENT_ID??"
+ conf.ClientSecret = "CLIENT_SECRET??"
+ _, err := conf.Exchange(context.Background(), "exchange-code")
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestExchangeRequest(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.String() != "/token" {
+ t.Errorf("Unexpected exchange request URL, %v is found.", r.URL)
+ }
+ headerAuth := r.Header.Get("Authorization")
+ if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" {
+ t.Errorf("Unexpected authorization header, %v is found.", headerAuth)
+ }
+ headerContentType := r.Header.Get("Content-Type")
+ if headerContentType != "application/x-www-form-urlencoded" {
+ t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
+ }
+ body, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ t.Errorf("Failed reading request body: %s.", err)
+ }
+ if string(body) != "code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL" {
+ t.Errorf("Unexpected exchange payload, %v is found.", string(body))
+ }
+ w.Header().Set("Content-Type", "application/x-www-form-urlencoded")
+ w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer"))
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ tok, err := conf.Exchange(context.Background(), "exchange-code")
+ if err != nil {
+ t.Error(err)
+ }
+ if !tok.Valid() {
+ t.Fatalf("Token invalid. Got: %#v", tok)
+ }
+ if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" {
+ t.Errorf("Unexpected access token, %#v.", tok.AccessToken)
+ }
+ if tok.TokenType != "bearer" {
+ t.Errorf("Unexpected token type, %#v.", tok.TokenType)
+ }
+ scope := tok.Extra("scope")
+ if scope != "user" {
+ t.Errorf("Unexpected value for scope: %v", scope)
+ }
+}
+
+func TestExchangeRequest_JSONResponse(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.String() != "/token" {
+ t.Errorf("Unexpected exchange request URL, %v is found.", r.URL)
+ }
+ headerAuth := r.Header.Get("Authorization")
+ if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" {
+ t.Errorf("Unexpected authorization header, %v is found.", headerAuth)
+ }
+ headerContentType := r.Header.Get("Content-Type")
+ if headerContentType != "application/x-www-form-urlencoded" {
+ t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
+ }
+ body, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ t.Errorf("Failed reading request body: %s.", err)
+ }
+ if string(body) != "code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL" {
+ t.Errorf("Unexpected exchange payload, %v is found.", string(body))
+ }
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte(`{"access_token": "90d64460d14870c08c81352a05dedd3465940a7c", "scope": "user", "token_type": "bearer", "expires_in": 86400}`))
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ tok, err := conf.Exchange(context.Background(), "exchange-code")
+ if err != nil {
+ t.Error(err)
+ }
+ if !tok.Valid() {
+ t.Fatalf("Token invalid. Got: %#v", tok)
+ }
+ if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" {
+ t.Errorf("Unexpected access token, %#v.", tok.AccessToken)
+ }
+ if tok.TokenType != "bearer" {
+ t.Errorf("Unexpected token type, %#v.", tok.TokenType)
+ }
+ scope := tok.Extra("scope")
+ if scope != "user" {
+ t.Errorf("Unexpected value for scope: %v", scope)
+ }
+ expiresIn := tok.Extra("expires_in")
+ if expiresIn != float64(86400) {
+ t.Errorf("Unexpected non-numeric value for expires_in: %v", expiresIn)
+ }
+}
+
+func TestExtraValueRetrieval(t *testing.T) {
+ values := url.Values{}
+ kvmap := map[string]string{
+ "scope": "user", "token_type": "bearer", "expires_in": "86400.92",
+ "server_time": "1443571905.5606415", "referer_ip": "10.0.0.1",
+ "etag": "\"afZYj912P4alikMz_P11982\"", "request_id": "86400",
+ "untrimmed": " untrimmed ",
+ }
+ for key, value := range kvmap {
+ values.Set(key, value)
+ }
+
+ tok := Token{raw: values}
+ scope := tok.Extra("scope")
+ if got, want := scope, "user"; got != want {
+ t.Errorf("got scope = %q; want %q", got, want)
+ }
+ serverTime := tok.Extra("server_time")
+ if got, want := serverTime, 1443571905.5606415; got != want {
+ t.Errorf("got server_time value = %v; want %v", got, want)
+ }
+ refererIP := tok.Extra("referer_ip")
+ if got, want := refererIP, "10.0.0.1"; got != want {
+ t.Errorf("got referer_ip value = %v, want %v", got, want)
+ }
+ expiresIn := tok.Extra("expires_in")
+ if got, want := expiresIn, 86400.92; got != want {
+ t.Errorf("got expires_in value = %v, want %v", got, want)
+ }
+ requestID := tok.Extra("request_id")
+ if got, want := requestID, int64(86400); got != want {
+ t.Errorf("got request_id value = %v, want %v", got, want)
+ }
+ untrimmed := tok.Extra("untrimmed")
+ if got, want := untrimmed, " untrimmed "; got != want {
+ t.Errorf("got untrimmed = %q; want %q", got, want)
+ }
+}
+
+const day = 24 * time.Hour
+
+func TestExchangeRequest_JSONResponse_Expiry(t *testing.T) {
+ seconds := int32(day.Seconds())
+ for _, c := range []struct {
+ expires string
+ want bool
+ }{
+ {fmt.Sprintf(`"expires_in": %d`, seconds), true},
+ {fmt.Sprintf(`"expires_in": "%d"`, seconds), true}, // PayPal case
+ {fmt.Sprintf(`"expires": %d`, seconds), true}, // Facebook case
+ {`"expires": false`, false}, // wrong type
+ {`"expires": {}`, false}, // wrong type
+ {`"expires": "zzz"`, false}, // wrong value
+ } {
+ testExchangeRequest_JSONResponse_expiry(t, c.expires, c.want)
+ }
+}
+
+func testExchangeRequest_JSONResponse_expiry(t *testing.T, exp string, want bool) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte(fmt.Sprintf(`{"access_token": "90d", "scope": "user", "token_type": "bearer", %s}`, exp)))
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ t1 := time.Now().Add(day)
+ tok, err := conf.Exchange(context.Background(), "exchange-code")
+ t2 := time.Now().Add(day)
+
+ if got := (err == nil); got != want {
+ if want {
+ t.Errorf("unexpected error: got %v", err)
+ } else {
+ t.Errorf("unexpected success")
+ }
+ }
+ if !want {
+ return
+ }
+ if !tok.Valid() {
+ t.Fatalf("Token invalid. Got: %#v", tok)
+ }
+ expiry := tok.Expiry
+ if expiry.Before(t1) || expiry.After(t2) {
+ t.Errorf("Unexpected value for Expiry: %v (shold be between %v and %v)", expiry, t1, t2)
+ }
+}
+
+func TestExchangeRequest_BadResponse(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte(`{"scope": "user", "token_type": "bearer"}`))
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ tok, err := conf.Exchange(context.Background(), "code")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if tok.AccessToken != "" {
+ t.Errorf("Unexpected access token, %#v.", tok.AccessToken)
+ }
+}
+
+func TestExchangeRequest_BadResponseType(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte(`{"access_token":123, "scope": "user", "token_type": "bearer"}`))
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ _, err := conf.Exchange(context.Background(), "exchange-code")
+ if err == nil {
+ t.Error("expected error from invalid access_token type")
+ }
+}
+
+func TestExchangeRequest_NonBasicAuth(t *testing.T) {
+ tr := &mockTransport{
+ rt: func(r *http.Request) (w *http.Response, err error) {
+ headerAuth := r.Header.Get("Authorization")
+ if headerAuth != "" {
+ t.Errorf("Unexpected authorization header, %v is found.", headerAuth)
+ }
+ return nil, errors.New("no response")
+ },
+ }
+ c := &http.Client{Transport: tr}
+ conf := &Config{
+ ClientID: "CLIENT_ID",
+ Endpoint: Endpoint{
+ AuthURL: "https://accounts.google.com/auth",
+ TokenURL: "https://accounts.google.com/token",
+ },
+ }
+
+ ctx := context.WithValue(context.Background(), HTTPClient, c)
+ conf.Exchange(ctx, "code")
+}
+
+func TestPasswordCredentialsTokenRequest(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ defer r.Body.Close()
+ expected := "/token"
+ if r.URL.String() != expected {
+ t.Errorf("URL = %q; want %q", r.URL, expected)
+ }
+ headerAuth := r.Header.Get("Authorization")
+ expected = "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ="
+ if headerAuth != expected {
+ t.Errorf("Authorization header = %q; want %q", headerAuth, expected)
+ }
+ headerContentType := r.Header.Get("Content-Type")
+ expected = "application/x-www-form-urlencoded"
+ if headerContentType != expected {
+ t.Errorf("Content-Type header = %q; want %q", headerContentType, expected)
+ }
+ body, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ t.Errorf("Failed reading request body: %s.", err)
+ }
+ expected = "grant_type=password&password=password1&scope=scope1+scope2&username=user1"
+ if string(body) != expected {
+ t.Errorf("res.Body = %q; want %q", string(body), expected)
+ }
+ w.Header().Set("Content-Type", "application/x-www-form-urlencoded")
+ w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer"))
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ tok, err := conf.PasswordCredentialsToken(context.Background(), "user1", "password1")
+ if err != nil {
+ t.Error(err)
+ }
+ if !tok.Valid() {
+ t.Fatalf("Token invalid. Got: %#v", tok)
+ }
+ expected := "90d64460d14870c08c81352a05dedd3465940a7c"
+ if tok.AccessToken != expected {
+ t.Errorf("AccessToken = %q; want %q", tok.AccessToken, expected)
+ }
+ expected = "bearer"
+ if tok.TokenType != expected {
+ t.Errorf("TokenType = %q; want %q", tok.TokenType, expected)
+ }
+}
+
+func TestTokenRefreshRequest(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.String() == "/somethingelse" {
+ return
+ }
+ if r.URL.String() != "/token" {
+ t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL)
+ }
+ headerContentType := r.Header.Get("Content-Type")
+ if headerContentType != "application/x-www-form-urlencoded" {
+ t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
+ }
+ body, _ := ioutil.ReadAll(r.Body)
+ if string(body) != "grant_type=refresh_token&refresh_token=REFRESH_TOKEN" {
+ t.Errorf("Unexpected refresh token payload, %v is found.", string(body))
+ }
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ c := conf.Client(context.Background(), &Token{RefreshToken: "REFRESH_TOKEN"})
+ c.Get(ts.URL + "/somethingelse")
+}
+
+func TestFetchWithNoRefreshToken(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.String() == "/somethingelse" {
+ return
+ }
+ if r.URL.String() != "/token" {
+ t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL)
+ }
+ headerContentType := r.Header.Get("Content-Type")
+ if headerContentType != "application/x-www-form-urlencoded" {
+ t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
+ }
+ body, _ := ioutil.ReadAll(r.Body)
+ if string(body) != "client_id=CLIENT_ID&grant_type=refresh_token&refresh_token=REFRESH_TOKEN" {
+ t.Errorf("Unexpected refresh token payload, %v is found.", string(body))
+ }
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ c := conf.Client(context.Background(), nil)
+ _, err := c.Get(ts.URL + "/somethingelse")
+ if err == nil {
+ t.Errorf("Fetch should return an error if no refresh token is set")
+ }
+}
+
+func TestRefreshToken_RefreshTokenReplacement(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte(`{"access_token":"ACCESS_TOKEN", "scope": "user", "token_type": "bearer", "refresh_token": "NEW_REFRESH_TOKEN"}`))
+ return
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ tkr := &tokenRefresher{
+ conf: conf,
+ ctx: context.Background(),
+ refreshToken: "OLD_REFRESH_TOKEN",
+ }
+ tk, err := tkr.Token()
+ if err != nil {
+ t.Errorf("got err = %v; want none", err)
+ return
+ }
+ if tk.RefreshToken != tkr.refreshToken {
+ t.Errorf("tokenRefresher.refresh_token = %q; want %q", tkr.refreshToken, tk.RefreshToken)
+ }
+}
+
+func TestRefreshToken_RefreshTokenPreservation(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte(`{"access_token":"ACCESS_TOKEN", "scope": "user", "token_type": "bearer"}`))
+ return
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ const oldRefreshToken = "OLD_REFRESH_TOKEN"
+ tkr := &tokenRefresher{
+ conf: conf,
+ ctx: context.Background(),
+ refreshToken: oldRefreshToken,
+ }
+ _, err := tkr.Token()
+ if err != nil {
+ t.Fatalf("got err = %v; want none", err)
+ }
+ if tkr.refreshToken != oldRefreshToken {
+ t.Errorf("tokenRefresher.refreshToken = %q; want %q", tkr.refreshToken, oldRefreshToken)
+ }
+}
+
+func TestConfigClientWithToken(t *testing.T) {
+ tok := &Token{
+ AccessToken: "abc123",
+ }
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if got, want := r.Header.Get("Authorization"), fmt.Sprintf("Bearer %s", tok.AccessToken); got != want {
+ t.Errorf("Authorization header = %q; want %q", got, want)
+ }
+ return
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+
+ c := conf.Client(context.Background(), tok)
+ req, err := http.NewRequest("GET", ts.URL, nil)
+ if err != nil {
+ t.Error(err)
+ }
+ _, err = c.Do(req)
+ if err != nil {
+ t.Error(err)
+ }
+}
diff --git a/vendor/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go b/vendor/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go
new file mode 100644
index 0000000..c0d093c
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package odnoklassniki provides constants for using OAuth2 to access Odnoklassniki.
+package odnoklassniki // import "golang.org/x/oauth2/odnoklassniki"
+
+import (
+ "golang.org/x/oauth2"
+)
+
+// Endpoint is Odnoklassniki's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://www.odnoklassniki.ru/oauth/authorize",
+ TokenURL: "https://api.odnoklassniki.ru/oauth/token.do",
+}
diff --git a/vendor/golang.org/x/oauth2/paypal/paypal.go b/vendor/golang.org/x/oauth2/paypal/paypal.go
new file mode 100644
index 0000000..2e713c5
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/paypal/paypal.go
@@ -0,0 +1,22 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package paypal provides constants for using OAuth2 to access PayPal.
+package paypal // import "golang.org/x/oauth2/paypal"
+
+import (
+ "golang.org/x/oauth2"
+)
+
+// Endpoint is PayPal's OAuth 2.0 endpoint in live (production) environment.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://www.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize",
+ TokenURL: "https://api.paypal.com/v1/identity/openidconnect/tokenservice",
+}
+
+// SandboxEndpoint is PayPal's OAuth 2.0 endpoint in sandbox (testing) environment.
+var SandboxEndpoint = oauth2.Endpoint{
+ AuthURL: "https://www.sandbox.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize",
+ TokenURL: "https://api.sandbox.paypal.com/v1/identity/openidconnect/tokenservice",
+}
diff --git a/vendor/golang.org/x/oauth2/slack/slack.go b/vendor/golang.org/x/oauth2/slack/slack.go
new file mode 100644
index 0000000..593d2f6
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/slack/slack.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package slack provides constants for using OAuth2 to access Slack.
+package slack // import "golang.org/x/oauth2/slack"
+
+import (
+ "golang.org/x/oauth2"
+)
+
+// Endpoint is Slack's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://slack.com/oauth/authorize",
+ TokenURL: "https://slack.com/api/oauth.access",
+}
diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go
new file mode 100644
index 0000000..bdac1de
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/token.go
@@ -0,0 +1,158 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2/internal"
+)
+
+// expiryDelta determines how earlier a token should be considered
+// expired than its actual expiration time. It is used to avoid late
+// expirations due to client-server time mismatches.
+const expiryDelta = 10 * time.Second
+
+// Token represents the credentials used to authorize
+// the requests to access protected resources on the OAuth 2.0
+// provider's backend.
+//
+// Most users of this package should not access fields of Token
+// directly. They're exported mostly for use by related packages
+// implementing derivative OAuth2 flows.
+type Token struct {
+ // AccessToken is the token that authorizes and authenticates
+ // the requests.
+ AccessToken string `json:"access_token"`
+
+ // TokenType is the type of token.
+ // The Type method returns either this or "Bearer", the default.
+ TokenType string `json:"token_type,omitempty"`
+
+ // RefreshToken is a token that's used by the application
+ // (as opposed to the user) to refresh the access token
+ // if it expires.
+ RefreshToken string `json:"refresh_token,omitempty"`
+
+ // Expiry is the optional expiration time of the access token.
+ //
+ // If zero, TokenSource implementations will reuse the same
+ // token forever and RefreshToken or equivalent
+ // mechanisms for that TokenSource will not be used.
+ Expiry time.Time `json:"expiry,omitempty"`
+
+ // raw optionally contains extra metadata from the server
+ // when updating a token.
+ raw interface{}
+}
+
+// Type returns t.TokenType if non-empty, else "Bearer".
+func (t *Token) Type() string {
+ if strings.EqualFold(t.TokenType, "bearer") {
+ return "Bearer"
+ }
+ if strings.EqualFold(t.TokenType, "mac") {
+ return "MAC"
+ }
+ if strings.EqualFold(t.TokenType, "basic") {
+ return "Basic"
+ }
+ if t.TokenType != "" {
+ return t.TokenType
+ }
+ return "Bearer"
+}
+
+// SetAuthHeader sets the Authorization header to r using the access
+// token in t.
+//
+// This method is unnecessary when using Transport or an HTTP Client
+// returned by this package.
+func (t *Token) SetAuthHeader(r *http.Request) {
+ r.Header.Set("Authorization", t.Type()+" "+t.AccessToken)
+}
+
+// WithExtra returns a new Token that's a clone of t, but using the
+// provided raw extra map. This is only intended for use by packages
+// implementing derivative OAuth2 flows.
+func (t *Token) WithExtra(extra interface{}) *Token {
+ t2 := new(Token)
+ *t2 = *t
+ t2.raw = extra
+ return t2
+}
+
+// Extra returns an extra field.
+// Extra fields are key-value pairs returned by the server as a
+// part of the token retrieval response.
+func (t *Token) Extra(key string) interface{} {
+ if raw, ok := t.raw.(map[string]interface{}); ok {
+ return raw[key]
+ }
+
+ vals, ok := t.raw.(url.Values)
+ if !ok {
+ return nil
+ }
+
+ v := vals.Get(key)
+ switch s := strings.TrimSpace(v); strings.Count(s, ".") {
+ case 0: // Contains no "."; try to parse as int
+ if i, err := strconv.ParseInt(s, 10, 64); err == nil {
+ return i
+ }
+ case 1: // Contains a single "."; try to parse as float
+ if f, err := strconv.ParseFloat(s, 64); err == nil {
+ return f
+ }
+ }
+
+ return v
+}
+
+// expired reports whether the token is expired.
+// t must be non-nil.
+func (t *Token) expired() bool {
+ if t.Expiry.IsZero() {
+ return false
+ }
+ return t.Expiry.Add(-expiryDelta).Before(time.Now())
+}
+
+// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
+func (t *Token) Valid() bool {
+ return t != nil && t.AccessToken != "" && !t.expired()
+}
+
+// tokenFromInternal maps an *internal.Token struct into
+// a *Token struct.
+func tokenFromInternal(t *internal.Token) *Token {
+ if t == nil {
+ return nil
+ }
+ return &Token{
+ AccessToken: t.AccessToken,
+ TokenType: t.TokenType,
+ RefreshToken: t.RefreshToken,
+ Expiry: t.Expiry,
+ raw: t.Raw,
+ }
+}
+
+// retrieveToken takes a *Config and uses that to retrieve an *internal.Token.
+// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along
+// with an error..
+func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {
+ tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v)
+ if err != nil {
+ return nil, err
+ }
+ return tokenFromInternal(tk), nil
+}
diff --git a/vendor/golang.org/x/oauth2/token_test.go b/vendor/golang.org/x/oauth2/token_test.go
new file mode 100644
index 0000000..80db83c
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/token_test.go
@@ -0,0 +1,72 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+ "testing"
+ "time"
+)
+
+func TestTokenExtra(t *testing.T) {
+ type testCase struct {
+ key string
+ val interface{}
+ want interface{}
+ }
+ const key = "extra-key"
+ cases := []testCase{
+ {key: key, val: "abc", want: "abc"},
+ {key: key, val: 123, want: 123},
+ {key: key, val: "", want: ""},
+ {key: "other-key", val: "def", want: nil},
+ }
+ for _, tc := range cases {
+ extra := make(map[string]interface{})
+ extra[tc.key] = tc.val
+ tok := &Token{raw: extra}
+ if got, want := tok.Extra(key), tc.want; got != want {
+ t.Errorf("Extra(%q) = %q; want %q", key, got, want)
+ }
+ }
+}
+
+func TestTokenExpiry(t *testing.T) {
+ now := time.Now()
+ cases := []struct {
+ name string
+ tok *Token
+ want bool
+ }{
+ {name: "12 seconds", tok: &Token{Expiry: now.Add(12 * time.Second)}, want: false},
+ {name: "10 seconds", tok: &Token{Expiry: now.Add(expiryDelta)}, want: true},
+ {name: "-1 hour", tok: &Token{Expiry: now.Add(-1 * time.Hour)}, want: true},
+ }
+ for _, tc := range cases {
+ if got, want := tc.tok.expired(), tc.want; got != want {
+ t.Errorf("expired (%q) = %v; want %v", tc.name, got, want)
+ }
+ }
+}
+
+func TestTokenTypeMethod(t *testing.T) {
+ cases := []struct {
+ name string
+ tok *Token
+ want string
+ }{
+ {name: "bearer-mixed_case", tok: &Token{TokenType: "beAREr"}, want: "Bearer"},
+ {name: "default-bearer", tok: &Token{}, want: "Bearer"},
+ {name: "basic", tok: &Token{TokenType: "basic"}, want: "Basic"},
+ {name: "basic-capitalized", tok: &Token{TokenType: "Basic"}, want: "Basic"},
+ {name: "mac", tok: &Token{TokenType: "mac"}, want: "MAC"},
+ {name: "mac-caps", tok: &Token{TokenType: "MAC"}, want: "MAC"},
+ {name: "mac-mixed_case", tok: &Token{TokenType: "mAc"}, want: "MAC"},
+ }
+ for _, tc := range cases {
+ if got, want := tc.tok.Type(), tc.want; got != want {
+ t.Errorf("TokenType(%q) = %v; want %v", tc.name, got, want)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go
new file mode 100644
index 0000000..92ac7e2
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/transport.go
@@ -0,0 +1,132 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+ "errors"
+ "io"
+ "net/http"
+ "sync"
+)
+
+// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests,
+// wrapping a base RoundTripper and adding an Authorization header
+// with a token from the supplied Sources.
+//
+// Transport is a low-level mechanism. Most code will use the
+// higher-level Config.Client method instead.
+type Transport struct {
+ // Source supplies the token to add to outgoing requests'
+ // Authorization headers.
+ Source TokenSource
+
+ // Base is the base RoundTripper used to make HTTP requests.
+ // If nil, http.DefaultTransport is used.
+ Base http.RoundTripper
+
+ mu sync.Mutex // guards modReq
+ modReq map[*http.Request]*http.Request // original -> modified
+}
+
+// RoundTrip authorizes and authenticates the request with an
+// access token. If no token exists or token is expired,
+// tries to refresh/fetch a new token.
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ if t.Source == nil {
+ return nil, errors.New("oauth2: Transport's Source is nil")
+ }
+ token, err := t.Source.Token()
+ if err != nil {
+ return nil, err
+ }
+
+ req2 := cloneRequest(req) // per RoundTripper contract
+ token.SetAuthHeader(req2)
+ t.setModReq(req, req2)
+ res, err := t.base().RoundTrip(req2)
+ if err != nil {
+ t.setModReq(req, nil)
+ return nil, err
+ }
+ res.Body = &onEOFReader{
+ rc: res.Body,
+ fn: func() { t.setModReq(req, nil) },
+ }
+ return res, nil
+}
+
+// CancelRequest cancels an in-flight request by closing its connection.
+func (t *Transport) CancelRequest(req *http.Request) {
+ type canceler interface {
+ CancelRequest(*http.Request)
+ }
+ if cr, ok := t.base().(canceler); ok {
+ t.mu.Lock()
+ modReq := t.modReq[req]
+ delete(t.modReq, req)
+ t.mu.Unlock()
+ cr.CancelRequest(modReq)
+ }
+}
+
+func (t *Transport) base() http.RoundTripper {
+ if t.Base != nil {
+ return t.Base
+ }
+ return http.DefaultTransport
+}
+
+func (t *Transport) setModReq(orig, mod *http.Request) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.modReq == nil {
+ t.modReq = make(map[*http.Request]*http.Request)
+ }
+ if mod == nil {
+ delete(t.modReq, orig)
+ } else {
+ t.modReq[orig] = mod
+ }
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+ // shallow copy of the struct
+ r2 := new(http.Request)
+ *r2 = *r
+ // deep copy of the Header
+ r2.Header = make(http.Header, len(r.Header))
+ for k, s := range r.Header {
+ r2.Header[k] = append([]string(nil), s...)
+ }
+ return r2
+}
+
+type onEOFReader struct {
+ rc io.ReadCloser
+ fn func()
+}
+
+func (r *onEOFReader) Read(p []byte) (n int, err error) {
+ n, err = r.rc.Read(p)
+ if err == io.EOF {
+ r.runFunc()
+ }
+ return
+}
+
+func (r *onEOFReader) Close() error {
+ err := r.rc.Close()
+ r.runFunc()
+ return err
+}
+
+func (r *onEOFReader) runFunc() {
+ if fn := r.fn; fn != nil {
+ fn()
+ r.fn = nil
+ }
+}
diff --git a/vendor/golang.org/x/oauth2/transport_test.go b/vendor/golang.org/x/oauth2/transport_test.go
new file mode 100644
index 0000000..d6e8087
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/transport_test.go
@@ -0,0 +1,108 @@
+package oauth2
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+)
+
+type tokenSource struct{ token *Token }
+
+func (t *tokenSource) Token() (*Token, error) {
+ return t.token, nil
+}
+
+func TestTransportNilTokenSource(t *testing.T) {
+ tr := &Transport{}
+ server := newMockServer(func(w http.ResponseWriter, r *http.Request) {})
+ defer server.Close()
+ client := &http.Client{Transport: tr}
+ resp, err := client.Get(server.URL)
+ if err == nil {
+ t.Errorf("got no errors, want an error with nil token source")
+ }
+ if resp != nil {
+ t.Errorf("Response = %v; want nil", resp)
+ }
+}
+
+func TestTransportTokenSource(t *testing.T) {
+ ts := &tokenSource{
+ token: &Token{
+ AccessToken: "abc",
+ },
+ }
+ tr := &Transport{
+ Source: ts,
+ }
+ server := newMockServer(func(w http.ResponseWriter, r *http.Request) {
+ if got, want := r.Header.Get("Authorization"), "Bearer abc"; got != want {
+ t.Errorf("Authorization header = %q; want %q", got, want)
+ }
+ })
+ defer server.Close()
+ client := &http.Client{Transport: tr}
+ res, err := client.Get(server.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res.Body.Close()
+}
+
+// Test for case-sensitive token types, per https://github.com/golang/oauth2/issues/113
+func TestTransportTokenSourceTypes(t *testing.T) {
+ const val = "abc"
+ tests := []struct {
+ key string
+ val string
+ want string
+ }{
+ {key: "bearer", val: val, want: "Bearer abc"},
+ {key: "mac", val: val, want: "MAC abc"},
+ {key: "basic", val: val, want: "Basic abc"},
+ }
+ for _, tc := range tests {
+ ts := &tokenSource{
+ token: &Token{
+ AccessToken: tc.val,
+ TokenType: tc.key,
+ },
+ }
+ tr := &Transport{
+ Source: ts,
+ }
+ server := newMockServer(func(w http.ResponseWriter, r *http.Request) {
+ if got, want := r.Header.Get("Authorization"), tc.want; got != want {
+ t.Errorf("Authorization header (%q) = %q; want %q", val, got, want)
+ }
+ })
+ defer server.Close()
+ client := &http.Client{Transport: tr}
+ res, err := client.Get(server.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res.Body.Close()
+ }
+}
+
+func TestTokenValidNoAccessToken(t *testing.T) {
+ token := &Token{}
+ if token.Valid() {
+ t.Errorf("got valid with no access token; want invalid")
+ }
+}
+
+func TestExpiredWithExpiry(t *testing.T) {
+ token := &Token{
+ Expiry: time.Now().Add(-5 * time.Hour),
+ }
+ if token.Valid() {
+ t.Errorf("got valid with expired token; want invalid")
+ }
+}
+
+func newMockServer(handler func(w http.ResponseWriter, r *http.Request)) *httptest.Server {
+ return httptest.NewServer(http.HandlerFunc(handler))
+}
diff --git a/vendor/golang.org/x/oauth2/uber/uber.go b/vendor/golang.org/x/oauth2/uber/uber.go
new file mode 100644
index 0000000..5520a64
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/uber/uber.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package uber provides constants for using OAuth2 to access Uber.
+package uber // import "golang.org/x/oauth2/uber"
+
+import (
+ "golang.org/x/oauth2"
+)
+
+// Endpoint is Uber's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://login.uber.com/oauth/v2/authorize",
+ TokenURL: "https://login.uber.com/oauth/v2/token",
+}
diff --git a/vendor/golang.org/x/oauth2/vk/vk.go b/vendor/golang.org/x/oauth2/vk/vk.go
new file mode 100644
index 0000000..bd8e159
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/vk/vk.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package vk provides constants for using OAuth2 to access VK.com.
+package vk // import "golang.org/x/oauth2/vk"
+
+import (
+ "golang.org/x/oauth2"
+)
+
+// Endpoint is VK's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://oauth.vk.com/authorize",
+ TokenURL: "https://oauth.vk.com/access_token",
+}
diff --git a/vendor/golang.org/x/oauth2/yandex/yandex.go b/vendor/golang.org/x/oauth2/yandex/yandex.go
new file mode 100644
index 0000000..5ebf666
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/yandex/yandex.go
@@ -0,0 +1,16 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package yandex provides constants for using OAuth2 to access Yandex APIs.
+package yandex // import "golang.org/x/oauth2/yandex"
+
+import (
+ "golang.org/x/oauth2"
+)
+
+// Endpoint is the Yandex OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://oauth.yandex.com/authorize",
+ TokenURL: "https://oauth.yandex.com/token",
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_darwin_test.go b/vendor/golang.org/x/sys/unix/dev_darwin_test.go
index 48d0448..bf1adf3 100644
--- a/vendor/golang.org/x/sys/unix/dev_darwin_test.go
+++ b/vendor/golang.org/x/sys/unix/dev_darwin_test.go
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build go1.7
+
package unix_test
import (
diff --git a/vendor/golang.org/x/sys/unix/dev_dragonfly_test.go b/vendor/golang.org/x/sys/unix/dev_dragonfly_test.go
index 2caba08..9add376 100644
--- a/vendor/golang.org/x/sys/unix/dev_dragonfly_test.go
+++ b/vendor/golang.org/x/sys/unix/dev_dragonfly_test.go
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build go1.7
+
package unix_test
import (
diff --git a/vendor/golang.org/x/sys/unix/dev_linux_test.go b/vendor/golang.org/x/sys/unix/dev_linux_test.go
index 6e001f3..2fd3ead 100644
--- a/vendor/golang.org/x/sys/unix/dev_linux_test.go
+++ b/vendor/golang.org/x/sys/unix/dev_linux_test.go
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build go1.7
+
package unix_test
import (
diff --git a/vendor/golang.org/x/sys/unix/dev_netbsd_test.go b/vendor/golang.org/x/sys/unix/dev_netbsd_test.go
index c39a80a..24a7e17 100644
--- a/vendor/golang.org/x/sys/unix/dev_netbsd_test.go
+++ b/vendor/golang.org/x/sys/unix/dev_netbsd_test.go
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build go1.7
+
package unix_test
import (
diff --git a/vendor/golang.org/x/sys/unix/dev_openbsd_test.go b/vendor/golang.org/x/sys/unix/dev_openbsd_test.go
index 5635d27..e6cb64f 100644
--- a/vendor/golang.org/x/sys/unix/dev_openbsd_test.go
+++ b/vendor/golang.org/x/sys/unix/dev_openbsd_test.go
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build go1.7
+
package unix_test
import (
diff --git a/vendor/golang.org/x/sys/unix/dev_solaris_test.go b/vendor/golang.org/x/sys/unix/dev_solaris_test.go
index db58c0d..656508c 100644
--- a/vendor/golang.org/x/sys/unix/dev_solaris_test.go
+++ b/vendor/golang.org/x/sys/unix/dev_solaris_test.go
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build go1.7
+
package unix_test
import (
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go
index 47ab664..faae207 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go
@@ -60,3 +60,7 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
}
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) // sic
+
+// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions
+// of darwin/arm the syscall is called sysctl instead of __sysctl.
+const SYS___SYSCTL = SYS_SYSCTL
diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go
index 78b714c..52c2037 100644
--- a/vendor/golang.org/x/sys/windows/types_windows.go
+++ b/vendor/golang.org/x/sys/windows/types_windows.go
@@ -159,9 +159,6 @@ const (
WAIT_OBJECT_0 = 0x00000000
WAIT_FAILED = 0xFFFFFFFF
- CREATE_NEW_PROCESS_GROUP = 0x00000200
- CREATE_UNICODE_ENVIRONMENT = 0x00000400
-
PROCESS_TERMINATE = 1
PROCESS_QUERY_INFORMATION = 0x00000400
SYNCHRONIZE = 0x00100000
@@ -179,6 +176,26 @@ const (
)
const (
+ // Process creation flags.
+ CREATE_BREAKAWAY_FROM_JOB = 0x01000000
+ CREATE_DEFAULT_ERROR_MODE = 0x04000000
+ CREATE_NEW_CONSOLE = 0x00000010
+ CREATE_NEW_PROCESS_GROUP = 0x00000200
+ CREATE_NO_WINDOW = 0x08000000
+ CREATE_PROTECTED_PROCESS = 0x00040000
+ CREATE_PRESERVE_CODE_AUTHZ_LEVEL = 0x02000000
+ CREATE_SEPARATE_WOW_VDM = 0x00000800
+ CREATE_SHARED_WOW_VDM = 0x00001000
+ CREATE_SUSPENDED = 0x00000004
+ CREATE_UNICODE_ENVIRONMENT = 0x00000400
+ DEBUG_ONLY_THIS_PROCESS = 0x00000002
+ DEBUG_PROCESS = 0x00000001
+ DETACHED_PROCESS = 0x00000008
+ EXTENDED_STARTUPINFO_PRESENT = 0x00080000
+ INHERIT_PARENT_AFFINITY = 0x00010000
+)
+
+const (
// flags for CreateToolhelp32Snapshot
TH32CS_SNAPHEAPLIST = 0x01
TH32CS_SNAPPROCESS = 0x02
diff --git a/vendor/google.golang.org/appengine/.travis.yml b/vendor/google.golang.org/appengine/.travis.yml
new file mode 100644
index 0000000..0762cb9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/.travis.yml
@@ -0,0 +1,18 @@
+language: go
+
+go:
+ - 1.6.3
+ - 1.7.1
+
+install:
+ - go get -v -t -d google.golang.org/appengine/...
+ - mkdir sdk
+ - curl -o sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.40.zip"
+ - unzip -q sdk.zip -d sdk
+ - export APPENGINE_DEV_APPSERVER=$(pwd)/sdk/go_appengine/dev_appserver.py
+
+script:
+ - go version
+ - go test -v google.golang.org/appengine/...
+ - go test -v -race google.golang.org/appengine/...
+ - sdk/go_appengine/goapp test -v google.golang.org/appengine/...
diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/google.golang.org/appengine/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/google.golang.org/appengine/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md
new file mode 100644
index 0000000..b6b11d9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/README.md
@@ -0,0 +1,73 @@
+# Go App Engine packages
+
+[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine)
+
+This repository supports the Go runtime on App Engine,
+including both the standard App Engine and the
+"App Engine flexible environment" (formerly known as "Managed VMs").
+It provides APIs for interacting with App Engine services.
+Its canonical import path is `google.golang.org/appengine`.
+
+See https://cloud.google.com/appengine/docs/go/
+for more information.
+
+File issue reports and feature requests on the [Google App Engine issue
+tracker](https://code.google.com/p/googleappengine/issues/entry?template=Go%20defect).
+
+## Directory structure
+The top level directory of this repository is the `appengine` package. It
+contains the
+basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API
+packages are in subdirectories (e.g. `datastore`).
+
+There is an `internal` subdirectory that contains service protocol buffers,
+plus packages required for connectivity to make API calls. App Engine apps
+should not directly import any package under `internal`.
+
+## Updating a Go App Engine app
+
+This section describes how to update an older Go App Engine app to use
+these packages. A provided tool, `aefix`, can help automate steps 2 and 3
+(run `go get google.golang.org/appengine/cmd/aefix` to install it), but
+read the details below since `aefix` can't perform all the changes.
+
+### 1. Update YAML files (App Engine flexible environment / Managed VMs only)
+
+The `app.yaml` file (and YAML files for modules) should have these new lines added:
+```
+vm: true
+```
+See https://cloud.google.com/appengine/docs/go/modules/#Go_Instance_scaling_and_class for details.
+
+### 2. Update import paths
+
+The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`.
+You will need to update your code to use import paths starting with that; for instance,
+code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`.
+
+### 3. Update code using deprecated, removed or modified APIs
+
+Most App Engine services are available with exactly the same API.
+A few APIs were cleaned up, and some are not available yet.
+This list summarises the differences:
+
+* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`.
+* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`.
+* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead.
+* `appengine.Datacenter` now takes a `context.Context` argument.
+* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels.
+* `delay.Call` now returns an error.
+* `search.FieldLoadSaver` now handles document metadata.
+* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the
+ `context.Context` instead.
+* `aetest` no longer declares its own Context type, and uses the standard one instead.
+* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been
+ deprecated and unused for a long time.
+* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature.
+ Use `appengine.ModuleHostname`and `appengine.ModuleName` instead.
+* Most of `appengine/file` and parts of `appengine/blobstore` are deprecated.
+ Use [Google Cloud Storage](https://godoc.org/cloud.google.com/go/storage) if the
+ feature you require is not present in the new
+ [blobstore package](https://google.golang.org/appengine/blobstore).
+* `appengine/socket` is not required on App Engine flexible environment / Managed VMs.
+ Use the standard `net` package instead.
diff --git a/vendor/google.golang.org/appengine/aetest/doc.go b/vendor/google.golang.org/appengine/aetest/doc.go
new file mode 100644
index 0000000..86ce8c2
--- /dev/null
+++ b/vendor/google.golang.org/appengine/aetest/doc.go
@@ -0,0 +1,42 @@
+/*
+Package aetest provides an API for running dev_appserver for use in tests.
+
+An example test file:
+
+ package foo_test
+
+ import (
+ "testing"
+
+ "google.golang.org/appengine/memcache"
+ "google.golang.org/appengine/aetest"
+ )
+
+ func TestFoo(t *testing.T) {
+ ctx, done, err := aetest.NewContext()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer done()
+
+ it := &memcache.Item{
+ Key: "some-key",
+ Value: []byte("some-value"),
+ }
+ err = memcache.Set(ctx, it)
+ if err != nil {
+ t.Fatalf("Set err: %v", err)
+ }
+ it, err = memcache.Get(ctx, "some-key")
+ if err != nil {
+ t.Fatalf("Get err: %v; want no error", err)
+ }
+ if g, w := string(it.Value), "some-value" ; g != w {
+ t.Errorf("retrieved Item.Value = %q, want %q", g, w)
+ }
+ }
+
+The environment variable APPENGINE_DEV_APPSERVER specifies the location of the
+dev_appserver.py executable to use. If unset, the system PATH is consulted.
+*/
+package aetest
diff --git a/vendor/google.golang.org/appengine/aetest/instance.go b/vendor/google.golang.org/appengine/aetest/instance.go
new file mode 100644
index 0000000..a8f99d8
--- /dev/null
+++ b/vendor/google.golang.org/appengine/aetest/instance.go
@@ -0,0 +1,51 @@
+package aetest
+
+import (
+ "io"
+ "net/http"
+
+ "golang.org/x/net/context"
+ "google.golang.org/appengine"
+)
+
+// Instance represents a running instance of the development API Server.
+type Instance interface {
+ // Close kills the child api_server.py process, releasing its resources.
+ io.Closer
+ // NewRequest returns an *http.Request associated with this instance.
+ NewRequest(method, urlStr string, body io.Reader) (*http.Request, error)
+}
+
+// Options is used to specify options when creating an Instance.
+type Options struct {
+ // AppID specifies the App ID to use during tests.
+ // By default, "testapp".
+ AppID string
+ // StronglyConsistentDatastore is whether the local datastore should be
+ // strongly consistent. This will diverge from production behaviour.
+ StronglyConsistentDatastore bool
+}
+
+// NewContext starts an instance of the development API server, and returns
+// a context that will route all API calls to that server, as well as a
+// closure that must be called when the Context is no longer required.
+func NewContext() (context.Context, func(), error) {
+ inst, err := NewInstance(nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ req, err := inst.NewRequest("GET", "/", nil)
+ if err != nil {
+ inst.Close()
+ return nil, nil, err
+ }
+ ctx := appengine.NewContext(req)
+ return ctx, func() {
+ inst.Close()
+ }, nil
+}
+
+// PrepareDevAppserver is a hook which, if set, will be called before the
+// dev_appserver.py is started, each time it is started. If aetest.NewContext
+// is invoked from the goapp test tool, this hook is unnecessary.
+var PrepareDevAppserver func() error
diff --git a/vendor/google.golang.org/appengine/aetest/instance_classic.go b/vendor/google.golang.org/appengine/aetest/instance_classic.go
new file mode 100644
index 0000000..fbceaa5
--- /dev/null
+++ b/vendor/google.golang.org/appengine/aetest/instance_classic.go
@@ -0,0 +1,21 @@
+// +build appengine
+
+package aetest
+
+import "appengine/aetest"
+
+// NewInstance launches a running instance of api_server.py which can be used
+// for multiple test Contexts that delegate all App Engine API calls to that
+// instance.
+// If opts is nil the default values are used.
+func NewInstance(opts *Options) (Instance, error) {
+ aetest.PrepareDevAppserver = PrepareDevAppserver
+ var aeOpts *aetest.Options
+ if opts != nil {
+ aeOpts = &aetest.Options{
+ AppID: opts.AppID,
+ StronglyConsistentDatastore: opts.StronglyConsistentDatastore,
+ }
+ }
+ return aetest.NewInstance(aeOpts)
+}
diff --git a/vendor/google.golang.org/appengine/aetest/instance_test.go b/vendor/google.golang.org/appengine/aetest/instance_test.go
new file mode 100644
index 0000000..edc3ecd
--- /dev/null
+++ b/vendor/google.golang.org/appengine/aetest/instance_test.go
@@ -0,0 +1,116 @@
+package aetest
+
+import (
+ "os"
+ "testing"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/datastore"
+ "google.golang.org/appengine/memcache"
+ "google.golang.org/appengine/user"
+)
+
+func TestBasicAPICalls(t *testing.T) {
+ // Only run the test if APPENGINE_DEV_APPSERVER is explicitly set.
+ if os.Getenv("APPENGINE_DEV_APPSERVER") == "" {
+ t.Skip("APPENGINE_DEV_APPSERVER not set")
+ }
+
+ inst, err := NewInstance(nil)
+ if err != nil {
+ t.Fatalf("NewInstance: %v", err)
+ }
+ defer inst.Close()
+
+ req, err := inst.NewRequest("GET", "http://example.com/page", nil)
+ if err != nil {
+ t.Fatalf("NewRequest: %v", err)
+ }
+ ctx := appengine.NewContext(req)
+
+ it := &memcache.Item{
+ Key: "some-key",
+ Value: []byte("some-value"),
+ }
+ err = memcache.Set(ctx, it)
+ if err != nil {
+ t.Fatalf("Set err: %v", err)
+ }
+ it, err = memcache.Get(ctx, "some-key")
+ if err != nil {
+ t.Fatalf("Get err: %v; want no error", err)
+ }
+ if g, w := string(it.Value), "some-value"; g != w {
+ t.Errorf("retrieved Item.Value = %q, want %q", g, w)
+ }
+
+ type Entity struct{ Value string }
+ e := &Entity{Value: "foo"}
+ k := datastore.NewIncompleteKey(ctx, "Entity", nil)
+ k, err = datastore.Put(ctx, k, e)
+ if err != nil {
+ t.Fatalf("datastore.Put: %v", err)
+ }
+ e = new(Entity)
+ if err := datastore.Get(ctx, k, e); err != nil {
+ t.Fatalf("datastore.Get: %v", err)
+ }
+ if g, w := e.Value, "foo"; g != w {
+ t.Errorf("retrieved Entity.Value = %q, want %q", g, w)
+ }
+}
+
+func TestContext(t *testing.T) {
+ // Only run the test if APPENGINE_DEV_APPSERVER is explicitly set.
+ if os.Getenv("APPENGINE_DEV_APPSERVER") == "" {
+ t.Skip("APPENGINE_DEV_APPSERVER not set")
+ }
+
+ // Check that the context methods work.
+ _, done, err := NewContext()
+ if err != nil {
+ t.Fatalf("NewContext: %v", err)
+ }
+ done()
+}
+
+func TestUsers(t *testing.T) {
+ // Only run the test if APPENGINE_DEV_APPSERVER is explicitly set.
+ if os.Getenv("APPENGINE_DEV_APPSERVER") == "" {
+ t.Skip("APPENGINE_DEV_APPSERVER not set")
+ }
+
+ inst, err := NewInstance(nil)
+ if err != nil {
+ t.Fatalf("NewInstance: %v", err)
+ }
+ defer inst.Close()
+
+ req, err := inst.NewRequest("GET", "http://example.com/page", nil)
+ if err != nil {
+ t.Fatalf("NewRequest: %v", err)
+ }
+ ctx := appengine.NewContext(req)
+
+ if user := user.Current(ctx); user != nil {
+ t.Errorf("user.Current initially %v, want nil", user)
+ }
+
+ u := &user.User{
+ Email: "gopher@example.com",
+ Admin: true,
+ }
+ Login(u, req)
+
+ if got := user.Current(ctx); got.Email != u.Email {
+ t.Errorf("user.Current: %v, want %v", got, u)
+ }
+ if admin := user.IsAdmin(ctx); !admin {
+ t.Errorf("user.IsAdmin: %t, want true", admin)
+ }
+
+ Logout(req)
+ if user := user.Current(ctx); user != nil {
+ t.Errorf("user.Current after logout %v, want nil", user)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/aetest/instance_vm.go b/vendor/google.golang.org/appengine/aetest/instance_vm.go
new file mode 100644
index 0000000..ee81480
--- /dev/null
+++ b/vendor/google.golang.org/appengine/aetest/instance_vm.go
@@ -0,0 +1,276 @@
+// +build !appengine
+
+package aetest
+
+import (
+ "bufio"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "time"
+
+ "golang.org/x/net/context"
+ "google.golang.org/appengine/internal"
+)
+
+// NewInstance launches a running instance of api_server.py which can be used
+// for multiple test Contexts that delegate all App Engine API calls to that
+// instance.
+// If opts is nil the default values are used.
+func NewInstance(opts *Options) (Instance, error) {
+ i := &instance{
+ opts: opts,
+ appID: "testapp",
+ }
+ if opts != nil && opts.AppID != "" {
+ i.appID = opts.AppID
+ }
+ if err := i.startChild(); err != nil {
+ return nil, err
+ }
+ return i, nil
+}
+
+func newSessionID() string {
+ var buf [16]byte
+ io.ReadFull(rand.Reader, buf[:])
+ return fmt.Sprintf("%x", buf[:])
+}
+
+// instance implements the Instance interface.
+type instance struct {
+ opts *Options
+ child *exec.Cmd
+ apiURL *url.URL // base URL of API HTTP server
+ adminURL string // base URL of admin HTTP server
+ appDir string
+ appID string
+ relFuncs []func() // funcs to release any associated contexts
+}
+
+// NewRequest returns an *http.Request associated with this instance.
+func (i *instance) NewRequest(method, urlStr string, body io.Reader) (*http.Request, error) {
+ req, err := http.NewRequest(method, urlStr, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Associate this request.
+ release := internal.RegisterTestRequest(req, i.apiURL, func(ctx context.Context) context.Context {
+ ctx = internal.WithAppIDOverride(ctx, "dev~"+i.appID)
+ return ctx
+ })
+ i.relFuncs = append(i.relFuncs, release)
+
+ return req, nil
+}
+
+// Close kills the child api_server.py process, releasing its resources.
+func (i *instance) Close() (err error) {
+ for _, rel := range i.relFuncs {
+ rel()
+ }
+ i.relFuncs = nil
+ if i.child == nil {
+ return nil
+ }
+ defer func() {
+ i.child = nil
+ err1 := os.RemoveAll(i.appDir)
+ if err == nil {
+ err = err1
+ }
+ }()
+
+ if p := i.child.Process; p != nil {
+ errc := make(chan error, 1)
+ go func() {
+ errc <- i.child.Wait()
+ }()
+
+ // Call the quit handler on the admin server.
+ res, err := http.Get(i.adminURL + "/quit")
+ if err != nil {
+ p.Kill()
+ return fmt.Errorf("unable to call /quit handler: %v", err)
+ }
+ res.Body.Close()
+
+ select {
+ case <-time.After(15 * time.Second):
+ p.Kill()
+ return errors.New("timeout killing child process")
+ case err = <-errc:
+ // Do nothing.
+ }
+ }
+ return
+}
+
+func fileExists(path string) bool {
+ _, err := os.Stat(path)
+ return err == nil
+}
+
+func findPython() (path string, err error) {
+ for _, name := range []string{"python2.7", "python"} {
+ path, err = exec.LookPath(name)
+ if err == nil {
+ return
+ }
+ }
+ return
+}
+
+func findDevAppserver() (string, error) {
+ if p := os.Getenv("APPENGINE_DEV_APPSERVER"); p != "" {
+ if fileExists(p) {
+ return p, nil
+ }
+ return "", fmt.Errorf("invalid APPENGINE_DEV_APPSERVER environment variable; path %q doesn't exist", p)
+ }
+ return exec.LookPath("dev_appserver.py")
+}
+
+var apiServerAddrRE = regexp.MustCompile(`Starting API server at: (\S+)`)
+var adminServerAddrRE = regexp.MustCompile(`Starting admin server at: (\S+)`)
+
+func (i *instance) startChild() (err error) {
+ if PrepareDevAppserver != nil {
+ if err := PrepareDevAppserver(); err != nil {
+ return err
+ }
+ }
+ python, err := findPython()
+ if err != nil {
+ return fmt.Errorf("Could not find python interpreter: %v", err)
+ }
+ devAppserver, err := findDevAppserver()
+ if err != nil {
+ return fmt.Errorf("Could not find dev_appserver.py: %v", err)
+ }
+
+ i.appDir, err = ioutil.TempDir("", "appengine-aetest")
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ os.RemoveAll(i.appDir)
+ }
+ }()
+ err = os.Mkdir(filepath.Join(i.appDir, "app"), 0755)
+ if err != nil {
+ return err
+ }
+ err = ioutil.WriteFile(filepath.Join(i.appDir, "app", "app.yaml"), []byte(i.appYAML()), 0644)
+ if err != nil {
+ return err
+ }
+ err = ioutil.WriteFile(filepath.Join(i.appDir, "app", "stubapp.go"), []byte(appSource), 0644)
+ if err != nil {
+ return err
+ }
+
+ appserverArgs := []string{
+ devAppserver,
+ "--port=0",
+ "--api_port=0",
+ "--admin_port=0",
+ "--automatic_restart=false",
+ "--skip_sdk_update_check=true",
+ "--clear_datastore=true",
+ "--clear_search_indexes=true",
+ "--datastore_path", filepath.Join(i.appDir, "datastore"),
+ }
+ if i.opts != nil && i.opts.StronglyConsistentDatastore {
+ appserverArgs = append(appserverArgs, "--datastore_consistency_policy=consistent")
+ }
+ appserverArgs = append(appserverArgs, filepath.Join(i.appDir, "app"))
+
+ i.child = exec.Command(python,
+ appserverArgs...,
+ )
+ i.child.Stdout = os.Stdout
+ var stderr io.Reader
+ stderr, err = i.child.StderrPipe()
+ if err != nil {
+ return err
+ }
+ stderr = io.TeeReader(stderr, os.Stderr)
+ if err = i.child.Start(); err != nil {
+ return err
+ }
+
+ // Read stderr until we have read the URLs of the API server and admin interface.
+ errc := make(chan error, 1)
+ go func() {
+ s := bufio.NewScanner(stderr)
+ for s.Scan() {
+ if match := apiServerAddrRE.FindStringSubmatch(s.Text()); match != nil {
+ u, err := url.Parse(match[1])
+ if err != nil {
+ errc <- fmt.Errorf("failed to parse API URL %q: %v", match[1], err)
+ return
+ }
+ i.apiURL = u
+ }
+ if match := adminServerAddrRE.FindStringSubmatch(s.Text()); match != nil {
+ i.adminURL = match[1]
+ }
+ if i.adminURL != "" && i.apiURL != nil {
+ break
+ }
+ }
+ errc <- s.Err()
+ }()
+
+ select {
+ case <-time.After(15 * time.Second):
+ if p := i.child.Process; p != nil {
+ p.Kill()
+ }
+ return errors.New("timeout starting child process")
+ case err := <-errc:
+ if err != nil {
+ return fmt.Errorf("error reading child process stderr: %v", err)
+ }
+ }
+ if i.adminURL == "" {
+ return errors.New("unable to find admin server URL")
+ }
+ if i.apiURL == nil {
+ return errors.New("unable to find API server URL")
+ }
+ return nil
+}
+
+func (i *instance) appYAML() string {
+ return fmt.Sprintf(appYAMLTemplate, i.appID)
+}
+
+const appYAMLTemplate = `
+application: %s
+version: 1
+runtime: go
+api_version: go1
+vm: true
+
+handlers:
+- url: /.*
+ script: _go_app
+`
+
+const appSource = `
+package main
+import "google.golang.org/appengine"
+func main() { appengine.Main() }
+`
diff --git a/vendor/google.golang.org/appengine/aetest/user.go b/vendor/google.golang.org/appengine/aetest/user.go
new file mode 100644
index 0000000..bf9266f
--- /dev/null
+++ b/vendor/google.golang.org/appengine/aetest/user.go
@@ -0,0 +1,36 @@
+package aetest
+
+import (
+ "hash/crc32"
+ "net/http"
+ "strconv"
+
+ "google.golang.org/appengine/user"
+)
+
+// Login causes the provided Request to act as though issued by the given user.
+func Login(u *user.User, req *http.Request) {
+ req.Header.Set("X-AppEngine-User-Email", u.Email)
+ id := u.ID
+ if id == "" {
+ id = strconv.Itoa(int(crc32.Checksum([]byte(u.Email), crc32.IEEETable)))
+ }
+ req.Header.Set("X-AppEngine-User-Id", id)
+ req.Header.Set("X-AppEngine-User-Federated-Identity", u.Email)
+ req.Header.Set("X-AppEngine-User-Federated-Provider", u.FederatedProvider)
+ if u.Admin {
+ req.Header.Set("X-AppEngine-User-Is-Admin", "1")
+ } else {
+ req.Header.Set("X-AppEngine-User-Is-Admin", "0")
+ }
+}
+
+// Logout causes the provided Request to act as though issued by a logged-out
+// user.
+func Logout(req *http.Request) {
+ req.Header.Del("X-AppEngine-User-Email")
+ req.Header.Del("X-AppEngine-User-Id")
+ req.Header.Del("X-AppEngine-User-Is-Admin")
+ req.Header.Del("X-AppEngine-User-Federated-Identity")
+ req.Header.Del("X-AppEngine-User-Federated-Provider")
+}
diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go
new file mode 100644
index 0000000..475cf2e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/appengine.go
@@ -0,0 +1,112 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package appengine provides basic functionality for Google App Engine.
+//
+// For more information on how to write Go apps for Google App Engine, see:
+// https://cloud.google.com/appengine/docs/go/
+package appengine // import "google.golang.org/appengine"
+
+import (
+ "net/http"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+// The gophers party all night; the rabbits provide the beats.
+
+// Main is the principal entry point for an app running in App Engine.
+//
+// On App Engine Flexible it installs a trivial health checker if one isn't
+// already registered, and starts listening on port 8080 (overridden by the
+// $PORT environment variable).
+//
+// See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests
+// for details on how to do your own health checking.
+//
+// Main is not yet supported on App Engine Standard.
+//
+// Main never returns.
+//
+// Main is designed so that the app's main package looks like this:
+//
+// package main
+//
+// import (
+// "google.golang.org/appengine"
+//
+// _ "myapp/package0"
+// _ "myapp/package1"
+// )
+//
+// func main() {
+// appengine.Main()
+// }
+//
+// The "myapp/packageX" packages are expected to register HTTP handlers
+// in their init functions.
+func Main() {
+ internal.Main()
+}
+
+// IsDevAppServer reports whether the App Engine app is running in the
+// development App Server.
+func IsDevAppServer() bool {
+ return internal.IsDevAppServer()
+}
+
+// NewContext returns a context for an in-flight HTTP request.
+// This function is cheap.
+func NewContext(req *http.Request) context.Context {
+ return WithContext(context.Background(), req)
+}
+
+// WithContext returns a copy of the parent context
+// and associates it with an in-flight HTTP request.
+// This function is cheap.
+func WithContext(parent context.Context, req *http.Request) context.Context {
+ return internal.WithContext(parent, req)
+}
+
+// TODO(dsymonds): Add a Call function here? Otherwise other packages can't access internal.Call.
+
+// BlobKey is a key for a blobstore blob.
+//
+// Conceptually, this type belongs in the blobstore package, but it lives in
+// the appengine package to avoid a circular dependency: blobstore depends on
+// datastore, and datastore needs to refer to the BlobKey type.
+type BlobKey string
+
+// GeoPoint represents a location as latitude/longitude in degrees.
+type GeoPoint struct {
+ Lat, Lng float64
+}
+
+// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude.
+func (g GeoPoint) Valid() bool {
+ return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180
+}
+
+// APICallFunc defines a function type for handling an API call.
+// See WithCallOverride.
+type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error
+
+// WithAPICallFunc returns a copy of the parent context
+// that will cause API calls to invoke f instead of their normal operation.
+//
+// This is intended for advanced users only.
+func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context {
+ return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f))
+}
+
+// APICall performs an API call.
+//
+// This is not intended for general use; it is exported for use in conjunction
+// with WithAPICallFunc.
+func APICall(ctx context.Context, service, method string, in, out proto.Message) error {
+ return internal.Call(ctx, service, method, in, out)
+}
diff --git a/vendor/google.golang.org/appengine/appengine_test.go b/vendor/google.golang.org/appengine/appengine_test.go
new file mode 100644
index 0000000..f1cf0a1
--- /dev/null
+++ b/vendor/google.golang.org/appengine/appengine_test.go
@@ -0,0 +1,49 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import (
+ "testing"
+)
+
+func TestValidGeoPoint(t *testing.T) {
+ testCases := []struct {
+ desc string
+ pt GeoPoint
+ want bool
+ }{
+ {
+ "valid",
+ GeoPoint{67.21, 13.37},
+ true,
+ },
+ {
+ "high lat",
+ GeoPoint{-90.01, 13.37},
+ false,
+ },
+ {
+ "low lat",
+ GeoPoint{90.01, 13.37},
+ false,
+ },
+ {
+ "high lng",
+ GeoPoint{67.21, 182},
+ false,
+ },
+ {
+ "low lng",
+ GeoPoint{67.21, -181},
+ false,
+ },
+ }
+
+ for _, tc := range testCases {
+ if got := tc.pt.Valid(); got != tc.want {
+ t.Errorf("%s: got %v, want %v", tc.desc, got, tc.want)
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go
new file mode 100644
index 0000000..f4b645a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/appengine_vm.go
@@ -0,0 +1,20 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package appengine
+
+import (
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+// BackgroundContext returns a context not associated with a request.
+// This should only be used when not servicing a request.
+// This only works in App Engine "flexible environment".
+func BackgroundContext() context.Context {
+ return internal.BackgroundContext()
+}
diff --git a/vendor/google.golang.org/appengine/blobstore/blobstore.go b/vendor/google.golang.org/appengine/blobstore/blobstore.go
new file mode 100644
index 0000000..1c8087b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/blobstore/blobstore.go
@@ -0,0 +1,276 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package blobstore provides a client for App Engine's persistent blob
+// storage service.
+package blobstore // import "google.golang.org/appengine/blobstore"
+
+import (
+ "bufio"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime"
+ "mime/multipart"
+ "net/http"
+ "net/textproto"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/datastore"
+ "google.golang.org/appengine/internal"
+
+ basepb "google.golang.org/appengine/internal/base"
+ blobpb "google.golang.org/appengine/internal/blobstore"
+)
+
+const (
+ blobInfoKind = "__BlobInfo__"
+ blobFileIndexKind = "__BlobFileIndex__"
+ zeroKey = appengine.BlobKey("")
+)
+
+// BlobInfo is the blob metadata that is stored in the datastore.
+// Filename may be empty.
+type BlobInfo struct {
+ BlobKey appengine.BlobKey
+ ContentType string `datastore:"content_type"`
+ CreationTime time.Time `datastore:"creation"`
+ Filename string `datastore:"filename"`
+ Size int64 `datastore:"size"`
+ MD5 string `datastore:"md5_hash"`
+
+ // ObjectName is the Google Cloud Storage name for this blob.
+ ObjectName string `datastore:"gs_object_name"`
+}
+
+// isErrFieldMismatch returns whether err is a datastore.ErrFieldMismatch.
+//
+// The blobstore stores blob metadata in the datastore. When loading that
+// metadata, it may contain fields that we don't care about. datastore.Get will
+// return datastore.ErrFieldMismatch in that case, so we ignore that specific
+// error.
+func isErrFieldMismatch(err error) bool {
+ _, ok := err.(*datastore.ErrFieldMismatch)
+ return ok
+}
+
+// Stat returns the BlobInfo for a provided blobKey. If no blob was found for
+// that key, Stat returns datastore.ErrNoSuchEntity.
+func Stat(c context.Context, blobKey appengine.BlobKey) (*BlobInfo, error) {
+ c, _ = appengine.Namespace(c, "") // Blobstore is always in the empty string namespace
+ dskey := datastore.NewKey(c, blobInfoKind, string(blobKey), 0, nil)
+ bi := &BlobInfo{
+ BlobKey: blobKey,
+ }
+ if err := datastore.Get(c, dskey, bi); err != nil && !isErrFieldMismatch(err) {
+ return nil, err
+ }
+ return bi, nil
+}
+
+// Send sets the headers on response to instruct App Engine to send a blob as
+// the response body. This is more efficient than reading and writing it out
+// manually and isn't subject to normal response size limits.
+func Send(response http.ResponseWriter, blobKey appengine.BlobKey) {
+ hdr := response.Header()
+ hdr.Set("X-AppEngine-BlobKey", string(blobKey))
+
+ if hdr.Get("Content-Type") == "" {
+ // This value is known to dev_appserver to mean automatic.
+ // In production this is remapped to the empty value which
+ // means automatic.
+ hdr.Set("Content-Type", "application/vnd.google.appengine.auto")
+ }
+}
+
+// UploadURL creates an upload URL for the form that the user will
+// fill out, passing the application path to load when the POST of the
+// form is completed. These URLs expire and should not be reused. The
+// opts parameter may be nil.
+func UploadURL(c context.Context, successPath string, opts *UploadURLOptions) (*url.URL, error) {
+ req := &blobpb.CreateUploadURLRequest{
+ SuccessPath: proto.String(successPath),
+ }
+ if opts != nil {
+ if n := opts.MaxUploadBytes; n != 0 {
+ req.MaxUploadSizeBytes = &n
+ }
+ if n := opts.MaxUploadBytesPerBlob; n != 0 {
+ req.MaxUploadSizePerBlobBytes = &n
+ }
+ if s := opts.StorageBucket; s != "" {
+ req.GsBucketName = &s
+ }
+ }
+ res := &blobpb.CreateUploadURLResponse{}
+ if err := internal.Call(c, "blobstore", "CreateUploadURL", req, res); err != nil {
+ return nil, err
+ }
+ return url.Parse(*res.Url)
+}
+
+// UploadURLOptions are the options to create an upload URL.
+type UploadURLOptions struct {
+ MaxUploadBytes int64 // optional
+ MaxUploadBytesPerBlob int64 // optional
+
+ // StorageBucket specifies the Google Cloud Storage bucket in which
+ // to store the blob.
+ // This is required if you use Cloud Storage instead of Blobstore.
+ // Your application must have permission to write to the bucket.
+ // You may optionally specify a bucket name and path in the format
+ // "bucket_name/path", in which case the included path will be the
+ // prefix of the uploaded object's name.
+ StorageBucket string
+}
+
+// Delete deletes a blob.
+func Delete(c context.Context, blobKey appengine.BlobKey) error {
+ return DeleteMulti(c, []appengine.BlobKey{blobKey})
+}
+
+// DeleteMulti deletes multiple blobs.
+func DeleteMulti(c context.Context, blobKey []appengine.BlobKey) error {
+ s := make([]string, len(blobKey))
+ for i, b := range blobKey {
+ s[i] = string(b)
+ }
+ req := &blobpb.DeleteBlobRequest{
+ BlobKey: s,
+ }
+ res := &basepb.VoidProto{}
+ if err := internal.Call(c, "blobstore", "DeleteBlob", req, res); err != nil {
+ return err
+ }
+ return nil
+}
+
+func errorf(format string, args ...interface{}) error {
+ return fmt.Errorf("blobstore: "+format, args...)
+}
+
+// ParseUpload parses the synthetic POST request that your app gets from
+// App Engine after a user's successful upload of blobs. Given the request,
+// ParseUpload returns a map of the blobs received (keyed by HTML form
+// element name) and other non-blob POST parameters.
+func ParseUpload(req *http.Request) (blobs map[string][]*BlobInfo, other url.Values, err error) {
+ _, params, err := mime.ParseMediaType(req.Header.Get("Content-Type"))
+ if err != nil {
+ return nil, nil, err
+ }
+ boundary := params["boundary"]
+ if boundary == "" {
+ return nil, nil, errorf("did not find MIME multipart boundary")
+ }
+
+ blobs = make(map[string][]*BlobInfo)
+ other = make(url.Values)
+
+ mreader := multipart.NewReader(io.MultiReader(req.Body, strings.NewReader("\r\n\r\n")), boundary)
+ for {
+ part, perr := mreader.NextPart()
+ if perr == io.EOF {
+ break
+ }
+ if perr != nil {
+ return nil, nil, errorf("error reading next mime part with boundary %q (len=%d): %v",
+ boundary, len(boundary), perr)
+ }
+
+ bi := &BlobInfo{}
+ ctype, params, err := mime.ParseMediaType(part.Header.Get("Content-Disposition"))
+ if err != nil {
+ return nil, nil, err
+ }
+ bi.Filename = params["filename"]
+ formKey := params["name"]
+
+ ctype, params, err = mime.ParseMediaType(part.Header.Get("Content-Type"))
+ if err != nil {
+ return nil, nil, err
+ }
+ bi.BlobKey = appengine.BlobKey(params["blob-key"])
+ if ctype != "message/external-body" || bi.BlobKey == "" {
+ if formKey != "" {
+ slurp, serr := ioutil.ReadAll(part)
+ if serr != nil {
+ return nil, nil, errorf("error reading %q MIME part", formKey)
+ }
+ other[formKey] = append(other[formKey], string(slurp))
+ }
+ continue
+ }
+
+ // App Engine sends a MIME header as the body of each MIME part.
+ tp := textproto.NewReader(bufio.NewReader(part))
+ header, mimeerr := tp.ReadMIMEHeader()
+ if mimeerr != nil {
+ return nil, nil, mimeerr
+ }
+ bi.Size, err = strconv.ParseInt(header.Get("Content-Length"), 10, 64)
+ if err != nil {
+ return nil, nil, err
+ }
+ bi.ContentType = header.Get("Content-Type")
+
+ // Parse the time from the MIME header like:
+ // X-AppEngine-Upload-Creation: 2011-03-15 21:38:34.712136
+ createDate := header.Get("X-AppEngine-Upload-Creation")
+ if createDate == "" {
+ return nil, nil, errorf("expected to find an X-AppEngine-Upload-Creation header")
+ }
+ bi.CreationTime, err = time.Parse("2006-01-02 15:04:05.000000", createDate)
+ if err != nil {
+ return nil, nil, errorf("error parsing X-AppEngine-Upload-Creation: %s", err)
+ }
+
+ if hdr := header.Get("Content-MD5"); hdr != "" {
+ md5, err := base64.URLEncoding.DecodeString(hdr)
+ if err != nil {
+ return nil, nil, errorf("bad Content-MD5 %q: %v", hdr, err)
+ }
+ bi.MD5 = string(md5)
+ }
+
+ // If the GCS object name was provided, record it.
+ bi.ObjectName = header.Get("X-AppEngine-Cloud-Storage-Object")
+
+ blobs[formKey] = append(blobs[formKey], bi)
+ }
+ return
+}
+
+// Reader is a blob reader.
+type Reader interface {
+ io.Reader
+ io.ReaderAt
+ io.Seeker
+}
+
+// NewReader returns a reader for a blob. It always succeeds; if the blob does
+// not exist then an error will be reported upon first read.
+func NewReader(c context.Context, blobKey appengine.BlobKey) Reader {
+ return openBlob(c, blobKey)
+}
+
+// BlobKeyForFile returns a BlobKey for a Google Storage file.
+// The filename should be of the form "/gs/bucket_name/object_name".
+func BlobKeyForFile(c context.Context, filename string) (appengine.BlobKey, error) {
+ req := &blobpb.CreateEncodedGoogleStorageKeyRequest{
+ Filename: &filename,
+ }
+ res := &blobpb.CreateEncodedGoogleStorageKeyResponse{}
+ if err := internal.Call(c, "blobstore", "CreateEncodedGoogleStorageKey", req, res); err != nil {
+ return "", err
+ }
+ return appengine.BlobKey(*res.BlobKey), nil
+}
diff --git a/vendor/google.golang.org/appengine/blobstore/blobstore_test.go b/vendor/google.golang.org/appengine/blobstore/blobstore_test.go
new file mode 100644
index 0000000..c2be7ef
--- /dev/null
+++ b/vendor/google.golang.org/appengine/blobstore/blobstore_test.go
@@ -0,0 +1,183 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package blobstore
+
+import (
+ "io"
+ "os"
+ "strconv"
+ "strings"
+ "testing"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal/aetesting"
+
+ pb "google.golang.org/appengine/internal/blobstore"
+)
+
+const rbs = readBufferSize
+
+func min(x, y int) int {
+ if x < y {
+ return x
+ }
+ return y
+}
+
+func fakeFetchData(req *pb.FetchDataRequest, res *pb.FetchDataResponse) error {
+ i0 := int(*req.StartIndex)
+ i1 := int(*req.EndIndex + 1) // Blobstore's end-indices are inclusive; Go's are exclusive.
+ bk := *req.BlobKey
+ if i := strings.Index(bk, "."); i != -1 {
+ // Strip everything past the ".".
+ bk = bk[:i]
+ }
+ switch bk {
+ case "a14p":
+ const s = "abcdefghijklmnop"
+ i0 := min(len(s), i0)
+ i1 := min(len(s), i1)
+ res.Data = []byte(s[i0:i1])
+ case "longBlob":
+ res.Data = make([]byte, i1-i0)
+ for i := range res.Data {
+ res.Data[i] = 'A' + uint8(i0/rbs)
+ i0++
+ }
+ }
+ return nil
+}
+
+// step is one step of a readerTest.
+// It consists of a Reader method to call, the method arguments
+// (lenp, offset, whence) and the expected results.
+type step struct {
+ method string
+ lenp int
+ offset int64
+ whence int
+ want string
+ wantErr error
+}
+
+var readerTest = []struct {
+ blobKey string
+ step []step
+}{
+ {"noSuchBlobKey", []step{
+ {"Read", 8, 0, 0, "", io.EOF},
+ }},
+ {"a14p.0", []step{
+ // Test basic reads.
+ {"Read", 1, 0, 0, "a", nil},
+ {"Read", 3, 0, 0, "bcd", nil},
+ {"Read", 1, 0, 0, "e", nil},
+ {"Read", 2, 0, 0, "fg", nil},
+ // Test Seek.
+ {"Seek", 0, 2, os.SEEK_SET, "2", nil},
+ {"Read", 5, 0, 0, "cdefg", nil},
+ {"Seek", 0, 2, os.SEEK_CUR, "9", nil},
+ {"Read", 1, 0, 0, "j", nil},
+ // Test reads up to and past EOF.
+ {"Read", 5, 0, 0, "klmno", nil},
+ {"Read", 5, 0, 0, "p", nil},
+ {"Read", 5, 0, 0, "", io.EOF},
+ // Test ReadAt.
+ {"ReadAt", 4, 0, 0, "abcd", nil},
+ {"ReadAt", 4, 3, 0, "defg", nil},
+ {"ReadAt", 4, 12, 0, "mnop", nil},
+ {"ReadAt", 4, 13, 0, "nop", io.EOF},
+ {"ReadAt", 4, 99, 0, "", io.EOF},
+ }},
+ {"a14p.1", []step{
+ // Test Seek before any reads.
+ {"Seek", 0, 2, os.SEEK_SET, "2", nil},
+ {"Read", 1, 0, 0, "c", nil},
+ // Test that ReadAt doesn't affect the Read offset.
+ {"ReadAt", 3, 9, 0, "jkl", nil},
+ {"Read", 3, 0, 0, "def", nil},
+ }},
+ {"a14p.2", []step{
+ // Test ReadAt before any reads or seeks.
+ {"ReadAt", 2, 14, 0, "op", nil},
+ }},
+ {"longBlob.0", []step{
+ // Test basic read.
+ {"Read", 1, 0, 0, "A", nil},
+ // Test that Read returns early when the buffer is exhausted.
+ {"Seek", 0, rbs - 2, os.SEEK_SET, strconv.Itoa(rbs - 2), nil},
+ {"Read", 5, 0, 0, "AA", nil},
+ {"Read", 3, 0, 0, "BBB", nil},
+ // Test that what we just read is still in the buffer.
+ {"Seek", 0, rbs - 2, os.SEEK_SET, strconv.Itoa(rbs - 2), nil},
+ {"Read", 5, 0, 0, "AABBB", nil},
+ // Test ReadAt.
+ {"ReadAt", 3, rbs - 4, 0, "AAA", nil},
+ {"ReadAt", 6, rbs - 4, 0, "AAAABB", nil},
+ {"ReadAt", 8, rbs - 4, 0, "AAAABBBB", nil},
+ {"ReadAt", 5, rbs - 4, 0, "AAAAB", nil},
+ {"ReadAt", 2, rbs - 4, 0, "AA", nil},
+ // Test seeking backwards from the Read offset.
+ {"Seek", 0, 2*rbs - 8, os.SEEK_SET, strconv.Itoa(2*rbs - 8), nil},
+ {"Read", 1, 0, 0, "B", nil},
+ {"Read", 1, 0, 0, "B", nil},
+ {"Read", 1, 0, 0, "B", nil},
+ {"Read", 1, 0, 0, "B", nil},
+ {"Read", 8, 0, 0, "BBBBCCCC", nil},
+ }},
+ {"longBlob.1", []step{
+ // Test ReadAt with a slice larger than the buffer size.
+ {"LargeReadAt", 2*rbs - 2, 0, 0, strconv.Itoa(2*rbs - 2), nil},
+ {"LargeReadAt", 2*rbs - 1, 0, 0, strconv.Itoa(2*rbs - 1), nil},
+ {"LargeReadAt", 2*rbs + 0, 0, 0, strconv.Itoa(2*rbs + 0), nil},
+ {"LargeReadAt", 2*rbs + 1, 0, 0, strconv.Itoa(2*rbs + 1), nil},
+ {"LargeReadAt", 2*rbs + 2, 0, 0, strconv.Itoa(2*rbs + 2), nil},
+ {"LargeReadAt", 2*rbs - 2, 1, 0, strconv.Itoa(2*rbs - 2), nil},
+ {"LargeReadAt", 2*rbs - 1, 1, 0, strconv.Itoa(2*rbs - 1), nil},
+ {"LargeReadAt", 2*rbs + 0, 1, 0, strconv.Itoa(2*rbs + 0), nil},
+ {"LargeReadAt", 2*rbs + 1, 1, 0, strconv.Itoa(2*rbs + 1), nil},
+ {"LargeReadAt", 2*rbs + 2, 1, 0, strconv.Itoa(2*rbs + 2), nil},
+ }},
+}
+
+func TestReader(t *testing.T) {
+ for _, rt := range readerTest {
+ c := aetesting.FakeSingleContext(t, "blobstore", "FetchData", fakeFetchData)
+ r := NewReader(c, appengine.BlobKey(rt.blobKey))
+ for i, step := range rt.step {
+ var (
+ got string
+ gotErr error
+ n int
+ offset int64
+ )
+ switch step.method {
+ case "LargeReadAt":
+ p := make([]byte, step.lenp)
+ n, gotErr = r.ReadAt(p, step.offset)
+ got = strconv.Itoa(n)
+ case "Read":
+ p := make([]byte, step.lenp)
+ n, gotErr = r.Read(p)
+ got = string(p[:n])
+ case "ReadAt":
+ p := make([]byte, step.lenp)
+ n, gotErr = r.ReadAt(p, step.offset)
+ got = string(p[:n])
+ case "Seek":
+ offset, gotErr = r.Seek(step.offset, step.whence)
+ got = strconv.FormatInt(offset, 10)
+ default:
+ t.Fatalf("unknown method: %s", step.method)
+ }
+ if gotErr != step.wantErr {
+ t.Fatalf("%s step %d: got error %v want %v", rt.blobKey, i, gotErr, step.wantErr)
+ }
+ if got != step.want {
+ t.Fatalf("%s step %d: got %q want %q", rt.blobKey, i, got, step.want)
+ }
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/blobstore/read.go b/vendor/google.golang.org/appengine/blobstore/read.go
new file mode 100644
index 0000000..578b1f5
--- /dev/null
+++ b/vendor/google.golang.org/appengine/blobstore/read.go
@@ -0,0 +1,160 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package blobstore
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+
+ blobpb "google.golang.org/appengine/internal/blobstore"
+)
+
+// openBlob returns a reader for a blob. It always succeeds; if the blob does
+// not exist then an error will be reported upon first read.
+func openBlob(c context.Context, blobKey appengine.BlobKey) Reader {
+ return &reader{
+ c: c,
+ blobKey: blobKey,
+ }
+}
+
+const readBufferSize = 256 * 1024
+
+// reader is a blob reader. It implements the Reader interface.
+type reader struct {
+ c context.Context
+
+ // Either blobKey or filename is set:
+ blobKey appengine.BlobKey
+ filename string
+
+ closeFunc func() // is nil if unavailable or already closed.
+
+ // buf is the read buffer. r is how much of buf has been read.
+ // off is the offset of buf[0] relative to the start of the blob.
+ // An invariant is 0 <= r && r <= len(buf).
+ // Reads that don't require an RPC call will increment r but not off.
+ // Seeks may modify r without discarding the buffer, but only if the
+ // invariant can be maintained.
+ mu sync.Mutex
+ buf []byte
+ r int
+ off int64
+}
+
+func (r *reader) Close() error {
+ if f := r.closeFunc; f != nil {
+ f()
+ }
+ r.closeFunc = nil
+ return nil
+}
+
+func (r *reader) Read(p []byte) (int, error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.r == len(r.buf) {
+ if err := r.fetch(r.off + int64(r.r)); err != nil {
+ return 0, err
+ }
+ }
+ n := copy(p, r.buf[r.r:])
+ r.r += n
+ return n, nil
+}
+
+func (r *reader) ReadAt(p []byte, off int64) (int, error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ // Convert relative offsets to absolute offsets.
+ ab0 := r.off + int64(r.r)
+ ab1 := r.off + int64(len(r.buf))
+ ap0 := off
+ ap1 := off + int64(len(p))
+ // Check if we can satisfy the read entirely out of the existing buffer.
+ if r.off <= ap0 && ap1 <= ab1 {
+ // Convert off from an absolute offset to a relative offset.
+ rp0 := int(ap0 - r.off)
+ return copy(p, r.buf[rp0:]), nil
+ }
+ // Restore the original Read/Seek offset after ReadAt completes.
+ defer r.seek(ab0)
+ // Repeatedly fetch and copy until we have filled p.
+ n := 0
+ for len(p) > 0 {
+ if err := r.fetch(off + int64(n)); err != nil {
+ return n, err
+ }
+ r.r = copy(p, r.buf)
+ n += r.r
+ p = p[r.r:]
+ }
+ return n, nil
+}
+
+func (r *reader) Seek(offset int64, whence int) (ret int64, err error) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ switch whence {
+ case os.SEEK_SET:
+ ret = offset
+ case os.SEEK_CUR:
+ ret = r.off + int64(r.r) + offset
+ case os.SEEK_END:
+ return 0, errors.New("seeking relative to the end of a blob isn't supported")
+ default:
+ return 0, fmt.Errorf("invalid Seek whence value: %d", whence)
+ }
+ if ret < 0 {
+ return 0, errors.New("negative Seek offset")
+ }
+ return r.seek(ret)
+}
+
+// fetch fetches readBufferSize bytes starting at the given offset. On success,
+// the data is saved as r.buf.
+func (r *reader) fetch(off int64) error {
+ req := &blobpb.FetchDataRequest{
+ BlobKey: proto.String(string(r.blobKey)),
+ StartIndex: proto.Int64(off),
+ EndIndex: proto.Int64(off + readBufferSize - 1), // EndIndex is inclusive.
+ }
+ res := &blobpb.FetchDataResponse{}
+ if err := internal.Call(r.c, "blobstore", "FetchData", req, res); err != nil {
+ return err
+ }
+ if len(res.Data) == 0 {
+ return io.EOF
+ }
+ r.buf, r.r, r.off = res.Data, 0, off
+ return nil
+}
+
+// seek seeks to the given offset with an effective whence equal to SEEK_SET.
+// It discards the read buffer if the invariant cannot be maintained.
+func (r *reader) seek(off int64) (int64, error) {
+ delta := off - r.off
+ if delta >= 0 && delta < int64(len(r.buf)) {
+ r.r = int(delta)
+ return off, nil
+ }
+ r.buf, r.r, r.off = nil, 0, off
+ return off, nil
+}
diff --git a/vendor/google.golang.org/appengine/capability/capability.go b/vendor/google.golang.org/appengine/capability/capability.go
new file mode 100644
index 0000000..3a60bd5
--- /dev/null
+++ b/vendor/google.golang.org/appengine/capability/capability.go
@@ -0,0 +1,52 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package capability exposes information about outages and scheduled downtime
+for specific API capabilities.
+
+This package does not work in App Engine "flexible environment".
+
+Example:
+ if !capability.Enabled(c, "datastore_v3", "write") {
+ // show user a different page
+ }
+*/
+package capability // import "google.golang.org/appengine/capability"
+
+import (
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ "google.golang.org/appengine/log"
+
+ pb "google.golang.org/appengine/internal/capability"
+)
+
+// Enabled returns whether an API's capabilities are enabled.
+// The wildcard "*" capability matches every capability of an API.
+// If the underlying RPC fails (if the package is unknown, for example),
+// false is returned and information is written to the application log.
+func Enabled(ctx context.Context, api, capability string) bool {
+ req := &pb.IsEnabledRequest{
+ Package: &api,
+ Capability: []string{capability},
+ }
+ res := &pb.IsEnabledResponse{}
+ if err := internal.Call(ctx, "capability_service", "IsEnabled", req, res); err != nil {
+ log.Warningf(ctx, "capability.Enabled: RPC failed: %v", err)
+ return false
+ }
+ switch *res.SummaryStatus {
+ case pb.IsEnabledResponse_ENABLED,
+ pb.IsEnabledResponse_SCHEDULED_FUTURE,
+ pb.IsEnabledResponse_SCHEDULED_NOW:
+ return true
+ case pb.IsEnabledResponse_UNKNOWN:
+ log.Errorf(ctx, "capability.Enabled: unknown API capability %s/%s", api, capability)
+ return false
+ default:
+ return false
+ }
+}
diff --git a/vendor/google.golang.org/appengine/channel/channel.go b/vendor/google.golang.org/appengine/channel/channel.go
new file mode 100644
index 0000000..dfe0a3f
--- /dev/null
+++ b/vendor/google.golang.org/appengine/channel/channel.go
@@ -0,0 +1,83 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package channel implements the server side of App Engine's Channel API.
+
+Create creates a new channel associated with the given clientID,
+which must be unique to the client that will use the returned token.
+
+ token, err := channel.Create(c, "player1")
+ if err != nil {
+ // handle error
+ }
+ // return token to the client in an HTTP response
+
+Send sends a message to the client over the channel identified by clientID.
+
+ channel.Send(c, "player1", "Game over!")
+*/
+package channel // import "google.golang.org/appengine/channel"
+
+import (
+ "encoding/json"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ basepb "google.golang.org/appengine/internal/base"
+ pb "google.golang.org/appengine/internal/channel"
+)
+
+// Create creates a channel and returns a token for use by the client.
+// The clientID is an application-provided string used to identify the client.
+func Create(c context.Context, clientID string) (token string, err error) {
+ req := &pb.CreateChannelRequest{
+ ApplicationKey: &clientID,
+ }
+ resp := &pb.CreateChannelResponse{}
+ err = internal.Call(c, service, "CreateChannel", req, resp)
+ token = resp.GetToken()
+ return token, remapError(err)
+}
+
+// Send sends a message on the channel associated with clientID.
+func Send(c context.Context, clientID, message string) error {
+ req := &pb.SendMessageRequest{
+ ApplicationKey: &clientID,
+ Message: &message,
+ }
+ resp := &basepb.VoidProto{}
+ return remapError(internal.Call(c, service, "SendChannelMessage", req, resp))
+}
+
+// SendJSON is a helper function that sends a JSON-encoded value
+// on the channel associated with clientID.
+func SendJSON(c context.Context, clientID string, value interface{}) error {
+ m, err := json.Marshal(value)
+ if err != nil {
+ return err
+ }
+ return Send(c, clientID, string(m))
+}
+
+// remapError fixes any APIError referencing "xmpp" into one referencing "channel".
+func remapError(err error) error {
+ if e, ok := err.(*internal.APIError); ok {
+ if e.Service == "xmpp" {
+ e.Service = "channel"
+ }
+ }
+ return err
+}
+
+var service = "xmpp" // prod
+
+func init() {
+ if appengine.IsDevAppServer() {
+ service = "channel" // dev
+ }
+ internal.RegisterErrorCodeMap("channel", pb.ChannelServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/channel/channel_test.go b/vendor/google.golang.org/appengine/channel/channel_test.go
new file mode 100644
index 0000000..c7498eb
--- /dev/null
+++ b/vendor/google.golang.org/appengine/channel/channel_test.go
@@ -0,0 +1,21 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package channel
+
+import (
+ "testing"
+
+ "google.golang.org/appengine/internal"
+)
+
+func TestRemapError(t *testing.T) {
+ err := &internal.APIError{
+ Service: "xmpp",
+ }
+ err = remapError(err).(*internal.APIError)
+ if err.Service != "channel" {
+ t.Errorf("err.Service = %q, want %q", err.Service, "channel")
+ }
+}
diff --git a/vendor/google.golang.org/appengine/cloudsql/cloudsql.go b/vendor/google.golang.org/appengine/cloudsql/cloudsql.go
new file mode 100644
index 0000000..7b27e6b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cloudsql/cloudsql.go
@@ -0,0 +1,62 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package cloudsql exposes access to Google Cloud SQL databases.
+
+This package does not work in App Engine "flexible environment".
+
+This package is intended for MySQL drivers to make App Engine-specific
+connections. Applications should use this package through database/sql:
+Select a pure Go MySQL driver that supports this package, and use sql.Open
+with protocol "cloudsql" and an address of the Cloud SQL instance.
+
+A Go MySQL driver that has been tested to work well with Cloud SQL
+is the go-sql-driver:
+ import "database/sql"
+ import _ "github.com/go-sql-driver/mysql"
+
+ db, err := sql.Open("mysql", "user@cloudsql(project-id:instance-name)/dbname")
+
+
+Another driver that works well with Cloud SQL is the mymysql driver:
+ import "database/sql"
+ import _ "github.com/ziutek/mymysql/godrv"
+
+ db, err := sql.Open("mymysql", "cloudsql:instance-name*dbname/user/password")
+
+
+Using either of these drivers, you can perform a standard SQL query.
+This example assumes there is a table named 'users' with
+columns 'first_name' and 'last_name':
+
+ rows, err := db.Query("SELECT first_name, last_name FROM users")
+ if err != nil {
+ log.Errorf(ctx, "db.Query: %v", err)
+ }
+ defer rows.Close()
+
+ for rows.Next() {
+ var firstName string
+ var lastName string
+ if err := rows.Scan(&firstName, &lastName); err != nil {
+ log.Errorf(ctx, "rows.Scan: %v", err)
+ continue
+ }
+ log.Infof(ctx, "First: %v - Last: %v", firstName, lastName)
+ }
+ if err := rows.Err(); err != nil {
+ log.Errorf(ctx, "Row error: %v", err)
+ }
+*/
+package cloudsql
+
+import (
+ "net"
+)
+
+// Dial connects to the named Cloud SQL instance.
+func Dial(instance string) (net.Conn, error) {
+ return connect(instance)
+}
diff --git a/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go b/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go
new file mode 100644
index 0000000..af62dba
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go
@@ -0,0 +1,17 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package cloudsql
+
+import (
+ "net"
+
+ "appengine/cloudsql"
+)
+
+func connect(instance string) (net.Conn, error) {
+ return cloudsql.Dial(instance)
+}
diff --git a/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go b/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go
new file mode 100644
index 0000000..90fa7b3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go
@@ -0,0 +1,16 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package cloudsql
+
+import (
+ "errors"
+ "net"
+)
+
+func connect(instance string) (net.Conn, error) {
+ return nil, errors.New(`cloudsql: not supported in App Engine "flexible environment"`)
+}
diff --git a/vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go b/vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go
new file mode 100644
index 0000000..e317cdd
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go
@@ -0,0 +1,342 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Program aebundler turns a Go app into a fully self-contained tar file.
+// The app and its subdirectories (if any) are placed under "."
+// and the dependencies from $GOPATH are placed under ./_gopath/src.
+// A main func is synthesized if one does not exist.
+//
+// A sample Dockerfile to be used with this bundler could look like this:
+// FROM gcr.io/google_appengine/go-compat
+// ADD . /app
+// RUN GOPATH=/app/_gopath go build -tags appenginevm -o /app/_ah/exe
+package main
+
+import (
+ "archive/tar"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/parser"
+ "go/token"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+var (
+ output = flag.String("o", "", "name of output tar file or '-' for stdout")
+ rootDir = flag.String("root", ".", "directory name of application root")
+ vm = flag.Bool("vm", true, `bundle an app for App Engine "flexible environment"`)
+
+ skipFiles = map[string]bool{
+ ".git": true,
+ ".gitconfig": true,
+ ".hg": true,
+ ".travis.yml": true,
+ }
+)
+
+const (
+ newMain = `package main
+import "google.golang.org/appengine"
+func main() {
+ appengine.Main()
+}
+`
+)
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+ fmt.Fprintf(os.Stderr, "\t%s -o <file.tar|->\tBundle app to named tar file or stdout\n", os.Args[0])
+ fmt.Fprintf(os.Stderr, "\noptional arguments:\n")
+ flag.PrintDefaults()
+}
+
+func main() {
+ flag.Usage = usage
+ flag.Parse()
+
+ var tags []string
+ if *vm {
+ tags = append(tags, "appenginevm")
+ } else {
+ tags = append(tags, "appengine")
+ }
+
+ tarFile := *output
+ if tarFile == "" {
+ usage()
+ errorf("Required -o flag not specified.")
+ }
+
+ app, err := analyze(tags)
+ if err != nil {
+ errorf("Error analyzing app: %v", err)
+ }
+ if err := app.bundle(tarFile); err != nil {
+ errorf("Unable to bundle app: %v", err)
+ }
+}
+
+// errorf prints the error message and exits.
+func errorf(format string, a ...interface{}) {
+ fmt.Fprintf(os.Stderr, "aebundler: "+format+"\n", a...)
+ os.Exit(1)
+}
+
+type app struct {
+ hasMain bool
+ appFiles []string
+ imports map[string]string
+}
+
+// analyze checks the app for building with the given build tags and returns hasMain,
+// app files, and a map of full directory import names to original import names.
+func analyze(tags []string) (*app, error) {
+ ctxt := buildContext(tags)
+ hasMain, appFiles, err := checkMain(ctxt)
+ if err != nil {
+ return nil, err
+ }
+ gopath := filepath.SplitList(ctxt.GOPATH)
+ im, err := imports(ctxt, *rootDir, gopath)
+ return &app{
+ hasMain: hasMain,
+ appFiles: appFiles,
+ imports: im,
+ }, err
+}
+
+// buildContext returns the context for building the source.
+func buildContext(tags []string) *build.Context {
+ return &build.Context{
+ GOARCH: build.Default.GOARCH,
+ GOOS: build.Default.GOOS,
+ GOROOT: build.Default.GOROOT,
+ GOPATH: build.Default.GOPATH,
+ Compiler: build.Default.Compiler,
+ BuildTags: append(build.Default.BuildTags, tags...),
+ }
+}
+
+// bundle bundles the app into the named tarFile ("-"==stdout).
+func (s *app) bundle(tarFile string) (err error) {
+ var out io.Writer
+ if tarFile == "-" {
+ out = os.Stdout
+ } else {
+ f, err := os.Create(tarFile)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if cerr := f.Close(); err == nil {
+ err = cerr
+ }
+ }()
+ out = f
+ }
+ tw := tar.NewWriter(out)
+
+ for srcDir, importName := range s.imports {
+ dstDir := "_gopath/src/" + importName
+ if err = copyTree(tw, dstDir, srcDir); err != nil {
+ return fmt.Errorf("unable to copy directory %v to %v: %v", srcDir, dstDir, err)
+ }
+ }
+ if err := copyTree(tw, ".", *rootDir); err != nil {
+ return fmt.Errorf("unable to copy root directory to /app: %v", err)
+ }
+ if !s.hasMain {
+ if err := synthesizeMain(tw, s.appFiles); err != nil {
+ return fmt.Errorf("unable to synthesize new main func: %v", err)
+ }
+ }
+
+ if err := tw.Close(); err != nil {
+ return fmt.Errorf("unable to close tar file %v: %v", tarFile, err)
+ }
+ return nil
+}
+
+// synthesizeMain generates a new main func and writes it to the tarball.
+func synthesizeMain(tw *tar.Writer, appFiles []string) error {
+ appMap := make(map[string]bool)
+ for _, f := range appFiles {
+ appMap[f] = true
+ }
+ var f string
+ for i := 0; i < 100; i++ {
+ f = fmt.Sprintf("app_main%d.go", i)
+ if !appMap[filepath.Join(*rootDir, f)] {
+ break
+ }
+ }
+ if appMap[filepath.Join(*rootDir, f)] {
+ return fmt.Errorf("unable to find unique name for %v", f)
+ }
+ hdr := &tar.Header{
+ Name: f,
+ Mode: 0644,
+ Size: int64(len(newMain)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ return fmt.Errorf("unable to write header for %v: %v", f, err)
+ }
+ if _, err := tw.Write([]byte(newMain)); err != nil {
+ return fmt.Errorf("unable to write %v to tar file: %v", f, err)
+ }
+ return nil
+}
+
+// imports returns a map of all import directories (recursively) used by the app.
+// The return value maps full directory names to original import names.
+func imports(ctxt *build.Context, srcDir string, gopath []string) (map[string]string, error) {
+ pkg, err := ctxt.ImportDir(srcDir, 0)
+ if err != nil {
+ return nil, fmt.Errorf("unable to analyze source: %v", err)
+ }
+
+ // Resolve all non-standard-library imports
+ result := make(map[string]string)
+ for _, v := range pkg.Imports {
+ if !strings.Contains(v, ".") {
+ continue
+ }
+ src, err := findInGopath(v, gopath)
+ if err != nil {
+ return nil, fmt.Errorf("unable to find import %v in gopath %v: %v", v, gopath, err)
+ }
+ result[src] = v
+ im, err := imports(ctxt, src, gopath)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse package %v: %v", src, err)
+ }
+ for k, v := range im {
+ result[k] = v
+ }
+ }
+ return result, nil
+}
+
+// findInGopath searches the gopath for the named import directory.
+func findInGopath(dir string, gopath []string) (string, error) {
+ for _, v := range gopath {
+ dst := filepath.Join(v, "src", dir)
+ if _, err := os.Stat(dst); err == nil {
+ return dst, nil
+ }
+ }
+ return "", fmt.Errorf("unable to find package %v in gopath %v", dir, gopath)
+}
+
+// copyTree copies srcDir to tar file dstDir, ignoring skipFiles.
+func copyTree(tw *tar.Writer, dstDir, srcDir string) error {
+ entries, err := ioutil.ReadDir(srcDir)
+ if err != nil {
+ return fmt.Errorf("unable to read dir %v: %v", srcDir, err)
+ }
+ for _, entry := range entries {
+ n := entry.Name()
+ if skipFiles[n] {
+ continue
+ }
+ s := filepath.Join(srcDir, n)
+ d := filepath.Join(dstDir, n)
+ if entry.IsDir() {
+ if err := copyTree(tw, d, s); err != nil {
+ return fmt.Errorf("unable to copy dir %v to %v: %v", s, d, err)
+ }
+ continue
+ }
+ if err := copyFile(tw, d, s); err != nil {
+ return fmt.Errorf("unable to copy dir %v to %v: %v", s, d, err)
+ }
+ }
+ return nil
+}
+
+// copyFile copies src to tar file dst.
+func copyFile(tw *tar.Writer, dst, src string) error {
+ s, err := os.Open(src)
+ if err != nil {
+ return fmt.Errorf("unable to open %v: %v", src, err)
+ }
+ defer s.Close()
+ fi, err := s.Stat()
+ if err != nil {
+ return fmt.Errorf("unable to stat %v: %v", src, err)
+ }
+
+ hdr, err := tar.FileInfoHeader(fi, dst)
+ if err != nil {
+ return fmt.Errorf("unable to create tar header for %v: %v", dst, err)
+ }
+ hdr.Name = dst
+ if err := tw.WriteHeader(hdr); err != nil {
+ return fmt.Errorf("unable to write header for %v: %v", dst, err)
+ }
+ _, err = io.Copy(tw, s)
+ if err != nil {
+ return fmt.Errorf("unable to copy %v to %v: %v", src, dst, err)
+ }
+ return nil
+}
+
+// checkMain verifies that there is a single "main" function.
+// It also returns a list of all Go source files in the app.
+func checkMain(ctxt *build.Context) (bool, []string, error) {
+ pkg, err := ctxt.ImportDir(*rootDir, 0)
+ if err != nil {
+ return false, nil, fmt.Errorf("unable to analyze source: %v", err)
+ }
+ if !pkg.IsCommand() {
+ errorf("Your app's package needs to be changed from %q to \"main\".\n", pkg.Name)
+ }
+ // Search for a "func main"
+ var hasMain bool
+ var appFiles []string
+ for _, f := range pkg.GoFiles {
+ n := filepath.Join(*rootDir, f)
+ appFiles = append(appFiles, n)
+ if hasMain, err = readFile(n); err != nil {
+ return false, nil, fmt.Errorf("error parsing %q: %v", n, err)
+ }
+ }
+ return hasMain, appFiles, nil
+}
+
+// isMain returns whether the given function declaration is a main function.
+// Such a function must be called "main", not have a receiver, and have no arguments or return types.
+func isMain(f *ast.FuncDecl) bool {
+ ft := f.Type
+ return f.Name.Name == "main" && f.Recv == nil && ft.Params.NumFields() == 0 && ft.Results.NumFields() == 0
+}
+
+// readFile reads and parses the Go source code file and returns whether it has a main function.
+func readFile(filename string) (hasMain bool, err error) {
+ var src []byte
+ src, err = ioutil.ReadFile(filename)
+ if err != nil {
+ return
+ }
+ fset := token.NewFileSet()
+ file, err := parser.ParseFile(fset, filename, src, 0)
+ for _, decl := range file.Decls {
+ funcDecl, ok := decl.(*ast.FuncDecl)
+ if !ok {
+ continue
+ }
+ if !isMain(funcDecl) {
+ continue
+ }
+ hasMain = true
+ break
+ }
+ return
+}
diff --git a/vendor/google.golang.org/appengine/cmd/aedeploy/aedeploy.go b/vendor/google.golang.org/appengine/cmd/aedeploy/aedeploy.go
new file mode 100644
index 0000000..155fc1c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cmd/aedeploy/aedeploy.go
@@ -0,0 +1,268 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Program aedeploy assists with deploying App Engine "flexible environment" Go apps to production.
+// A temporary directory is created; the app, its subdirectories, and all its
+// dependencies from $GOPATH are copied into the directory; then the app
+// is deployed to production with the provided command.
+//
+// The app must be in "package main".
+//
+// This command must be issued from within the root directory of the app
+// (where the app.yaml file is located).
+package main
+
+import (
+ "flag"
+ "fmt"
+ "go/build"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+)
+
+var (
+ skipFiles = map[string]bool{
+ ".git": true,
+ ".gitconfig": true,
+ ".hg": true,
+ ".travis.yml": true,
+ }
+
+ gopathCache = map[string]string{}
+)
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+ fmt.Fprintf(os.Stderr, "\t%s gcloud --verbosity debug preview app deploy --version myversion ./app.yaml\tDeploy app to production\n", os.Args[0])
+}
+
+func main() {
+ flag.Usage = usage
+ flag.Parse()
+ if flag.NArg() < 1 {
+ usage()
+ os.Exit(1)
+ }
+
+ if err := aedeploy(); err != nil {
+ fmt.Fprintf(os.Stderr, os.Args[0]+": Error: %v\n", err)
+ os.Exit(1)
+ }
+}
+
+func aedeploy() error {
+ tags := []string{"appenginevm"}
+ app, err := analyze(tags)
+ if err != nil {
+ return err
+ }
+
+ tmpDir, err := app.bundle()
+ if tmpDir != "" {
+ defer os.RemoveAll(tmpDir)
+ }
+ if err != nil {
+ return err
+ }
+
+ if err := os.Chdir(tmpDir); err != nil {
+ return fmt.Errorf("unable to chdir to %v: %v", tmpDir, err)
+ }
+ return deploy()
+}
+
+// deploy calls the provided command to deploy the app from the temporary directory.
+func deploy() error {
+ cmd := exec.Command(flag.Arg(0), flag.Args()[1:]...)
+ cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
+ if err := cmd.Run(); err != nil {
+ return fmt.Errorf("unable to run %q: %v", strings.Join(flag.Args(), " "), err)
+ }
+ return nil
+}
+
+type app struct {
+ appFiles []string
+ imports map[string]string
+}
+
+// analyze checks the app for building with the given build tags and returns
+// app files, and a map of full directory import names to original import names.
+func analyze(tags []string) (*app, error) {
+ ctxt := buildContext(tags)
+ appFiles, err := appFiles(ctxt)
+ if err != nil {
+ return nil, err
+ }
+ gopath := filepath.SplitList(ctxt.GOPATH)
+ im, err := imports(ctxt, ".", gopath)
+ return &app{
+ appFiles: appFiles,
+ imports: im,
+ }, err
+}
+
+// buildContext returns the context for building the source.
+func buildContext(tags []string) *build.Context {
+ return &build.Context{
+ GOARCH: "amd64",
+ GOOS: "linux",
+ GOROOT: build.Default.GOROOT,
+ GOPATH: build.Default.GOPATH,
+ Compiler: build.Default.Compiler,
+ BuildTags: append(defaultBuildTags, tags...),
+ }
+}
+
+// All build tags except go1.7, since Go 1.6 is the runtime version.
+var defaultBuildTags = []string{
+ "go1.1", "go1.2", "go1.3", "go1.4", "go1.5", "go1.6"}
+
+// bundle bundles the app into a temporary directory.
+func (s *app) bundle() (tmpdir string, err error) {
+ workDir, err := ioutil.TempDir("", "aedeploy")
+ if err != nil {
+ return "", fmt.Errorf("unable to create tmpdir: %v", err)
+ }
+
+ for srcDir, importName := range s.imports {
+ dstDir := "_gopath/src/" + importName
+ if err := copyTree(workDir, dstDir, srcDir); err != nil {
+ return workDir, fmt.Errorf("unable to copy directory %v to %v: %v", srcDir, dstDir, err)
+ }
+ }
+ if err := copyTree(workDir, ".", "."); err != nil {
+ return workDir, fmt.Errorf("unable to copy root directory to /app: %v", err)
+ }
+ return workDir, nil
+}
+
+// imports returns a map of all import directories (recursively) used by the app.
+// The return value maps full directory names to original import names.
+func imports(ctxt *build.Context, srcDir string, gopath []string) (map[string]string, error) {
+ pkg, err := ctxt.ImportDir(srcDir, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ // Resolve all non-standard-library imports
+ result := make(map[string]string)
+ for _, v := range pkg.Imports {
+ if !strings.Contains(v, ".") {
+ continue
+ }
+ src, err := findInGopath(v, gopath)
+ if err != nil {
+ return nil, fmt.Errorf("unable to find import %v in gopath %v: %v", v, gopath, err)
+ }
+ if _, ok := result[src]; ok { // Already processed
+ continue
+ }
+ result[src] = v
+ im, err := imports(ctxt, src, gopath)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse package %v: %v", src, err)
+ }
+ for k, v := range im {
+ result[k] = v
+ }
+ }
+ return result, nil
+}
+
+// findInGopath searches the gopath for the named import directory.
+func findInGopath(dir string, gopath []string) (string, error) {
+ if v, ok := gopathCache[dir]; ok {
+ return v, nil
+ }
+ for _, v := range gopath {
+ dst := filepath.Join(v, "src", dir)
+ if _, err := os.Stat(dst); err == nil {
+ gopathCache[dir] = dst
+ return dst, nil
+ }
+ }
+ return "", fmt.Errorf("unable to find package %v in gopath %v", dir, gopath)
+}
+
+// copyTree copies srcDir to dstDir relative to dstRoot, ignoring skipFiles.
+func copyTree(dstRoot, dstDir, srcDir string) error {
+ d := filepath.Join(dstRoot, dstDir)
+ if err := os.MkdirAll(d, 0755); err != nil {
+ return fmt.Errorf("unable to create directory %q: %v", d, err)
+ }
+
+ entries, err := ioutil.ReadDir(srcDir)
+ if err != nil {
+ return fmt.Errorf("unable to read dir %q: %v", srcDir, err)
+ }
+ for _, entry := range entries {
+ n := entry.Name()
+ if skipFiles[n] {
+ continue
+ }
+ s := filepath.Join(srcDir, n)
+ if entry.Mode()&os.ModeSymlink == os.ModeSymlink {
+ if entry, err = os.Stat(s); err != nil {
+ return fmt.Errorf("unable to stat %v: %v", s, err)
+ }
+ }
+ d := filepath.Join(dstDir, n)
+ if entry.IsDir() {
+ if err := copyTree(dstRoot, d, s); err != nil {
+ return fmt.Errorf("unable to copy dir %q to %q: %v", s, d, err)
+ }
+ continue
+ }
+ if err := copyFile(dstRoot, d, s); err != nil {
+ return fmt.Errorf("unable to copy dir %q to %q: %v", s, d, err)
+ }
+ }
+ return nil
+}
+
+// copyFile copies src to dst relative to dstRoot.
+func copyFile(dstRoot, dst, src string) error {
+ s, err := os.Open(src)
+ if err != nil {
+ return fmt.Errorf("unable to open %q: %v", src, err)
+ }
+ defer s.Close()
+
+ dst = filepath.Join(dstRoot, dst)
+ d, err := os.Create(dst)
+ if err != nil {
+ return fmt.Errorf("unable to create %q: %v", dst, err)
+ }
+ _, err = io.Copy(d, s)
+ if err != nil {
+ d.Close() // ignore error, copy already failed.
+ return fmt.Errorf("unable to copy %q to %q: %v", src, dst, err)
+ }
+ if err := d.Close(); err != nil {
+ return fmt.Errorf("unable to close %q: %v", dst, err)
+ }
+ return nil
+}
+
+// appFiles returns a list of all Go source files in the app.
+func appFiles(ctxt *build.Context) ([]string, error) {
+ pkg, err := ctxt.ImportDir(".", 0)
+ if err != nil {
+ return nil, err
+ }
+ if !pkg.IsCommand() {
+ return nil, fmt.Errorf(`the root of your app needs to be package "main" (currently %q). Please see https://cloud.google.com/appengine/docs/flexible/go/ for more details on structuring your app.`, pkg.Name)
+ }
+ var appFiles []string
+ for _, f := range pkg.GoFiles {
+ n := filepath.Join(".", f)
+ appFiles = append(appFiles, n)
+ }
+ return appFiles, nil
+}
diff --git a/vendor/google.golang.org/appengine/cmd/aefix/ae.go b/vendor/google.golang.org/appengine/cmd/aefix/ae.go
new file mode 100644
index 0000000..0fe2d4a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cmd/aefix/ae.go
@@ -0,0 +1,185 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "go/ast"
+ "path"
+ "strconv"
+ "strings"
+)
+
+const (
+ ctxPackage = "golang.org/x/net/context"
+
+ newPackageBase = "google.golang.org/"
+ stutterPackage = false
+)
+
+func init() {
+ register(fix{
+ "ae",
+ "2016-04-15",
+ aeFn,
+ `Update old App Engine APIs to new App Engine APIs`,
+ })
+}
+
+// logMethod is the set of methods on appengine.Context used for logging.
+var logMethod = map[string]bool{
+ "Debugf": true,
+ "Infof": true,
+ "Warningf": true,
+ "Errorf": true,
+ "Criticalf": true,
+}
+
+// mapPackage turns "appengine" into "google.golang.org/appengine", etc.
+func mapPackage(s string) string {
+ if stutterPackage {
+ s += "/" + path.Base(s)
+ }
+ return newPackageBase + s
+}
+
+func aeFn(f *ast.File) bool {
+ // During the walk, we track the last thing seen that looks like
+ // an appengine.Context, and reset it once the walk leaves a func.
+ var lastContext *ast.Ident
+
+ fixed := false
+
+ // Update imports.
+ mainImp := "appengine"
+ for _, imp := range f.Imports {
+ pth, _ := strconv.Unquote(imp.Path.Value)
+ if pth == "appengine" || strings.HasPrefix(pth, "appengine/") {
+ newPth := mapPackage(pth)
+ imp.Path.Value = strconv.Quote(newPth)
+ fixed = true
+
+ if pth == "appengine" {
+ mainImp = newPth
+ }
+ }
+ }
+
+ // Update any API changes.
+ walk(f, func(n interface{}) {
+ if ft, ok := n.(*ast.FuncType); ok && ft.Params != nil {
+ // See if this func has an `appengine.Context arg`.
+ // If so, remember its identifier.
+ for _, param := range ft.Params.List {
+ if !isPkgDot(param.Type, "appengine", "Context") {
+ continue
+ }
+ if len(param.Names) == 1 {
+ lastContext = param.Names[0]
+ break
+ }
+ }
+ return
+ }
+
+ if as, ok := n.(*ast.AssignStmt); ok {
+ if len(as.Lhs) == 1 && len(as.Rhs) == 1 {
+ // If this node is an assignment from an appengine.NewContext invocation,
+ // remember the identifier on the LHS.
+ if isCall(as.Rhs[0], "appengine", "NewContext") {
+ if ident, ok := as.Lhs[0].(*ast.Ident); ok {
+ lastContext = ident
+ return
+ }
+ }
+ // x (=|:=) appengine.Timeout(y, z)
+ // should become
+ // x, _ (=|:=) context.WithTimeout(y, z)
+ if isCall(as.Rhs[0], "appengine", "Timeout") {
+ addImport(f, ctxPackage)
+ as.Lhs = append(as.Lhs, ast.NewIdent("_"))
+ // isCall already did the type checking.
+ sel := as.Rhs[0].(*ast.CallExpr).Fun.(*ast.SelectorExpr)
+ sel.X = ast.NewIdent("context")
+ sel.Sel = ast.NewIdent("WithTimeout")
+ fixed = true
+ return
+ }
+ }
+ return
+ }
+
+ // If this node is a FuncDecl, we've finished the function, so reset lastContext.
+ if _, ok := n.(*ast.FuncDecl); ok {
+ lastContext = nil
+ return
+ }
+
+ if call, ok := n.(*ast.CallExpr); ok {
+ if isPkgDot(call.Fun, "appengine", "Datacenter") && len(call.Args) == 0 {
+ insertContext(f, call, lastContext)
+ fixed = true
+ return
+ }
+ if isPkgDot(call.Fun, "taskqueue", "QueueStats") && len(call.Args) == 3 {
+ call.Args = call.Args[:2] // drop last arg
+ fixed = true
+ return
+ }
+
+ sel, ok := call.Fun.(*ast.SelectorExpr)
+ if !ok {
+ return
+ }
+ if lastContext != nil && refersTo(sel.X, lastContext) && logMethod[sel.Sel.Name] {
+ // c.Errorf(...)
+ // should become
+ // log.Errorf(c, ...)
+ addImport(f, mapPackage("appengine/log"))
+ sel.X = &ast.Ident{ // ast.NewIdent doesn't preserve the position.
+ NamePos: sel.X.Pos(),
+ Name: "log",
+ }
+ insertContext(f, call, lastContext)
+ fixed = true
+ return
+ }
+ }
+ })
+
+ // Change any `appengine.Context` to `context.Context`.
+ // Do this in a separate walk because the previous walk
+ // wants to identify "appengine.Context".
+ walk(f, func(n interface{}) {
+ expr, ok := n.(ast.Expr)
+ if ok && isPkgDot(expr, "appengine", "Context") {
+ addImport(f, ctxPackage)
+ // isPkgDot did the type checking.
+ n.(*ast.SelectorExpr).X.(*ast.Ident).Name = "context"
+ fixed = true
+ return
+ }
+ })
+
+ // The changes above might remove the need to import "appengine".
+ // Check if it's used, and drop it if it isn't.
+ if fixed && !usesImport(f, mainImp) {
+ deleteImport(f, mainImp)
+ }
+
+ return fixed
+}
+
+// ctx may be nil.
+func insertContext(f *ast.File, call *ast.CallExpr, ctx *ast.Ident) {
+ if ctx == nil {
+ // context is unknown, so use a plain "ctx".
+ ctx = ast.NewIdent("ctx")
+ } else {
+ // Create a fresh *ast.Ident so we drop the position information.
+ ctx = ast.NewIdent(ctx.Name)
+ }
+
+ call.Args = append([]ast.Expr{ctx}, call.Args...)
+}
diff --git a/vendor/google.golang.org/appengine/cmd/aefix/ae_test.go b/vendor/google.golang.org/appengine/cmd/aefix/ae_test.go
new file mode 100644
index 0000000..21f5695
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cmd/aefix/ae_test.go
@@ -0,0 +1,144 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package main
+
+func init() {
+ addTestCases(aeTests, nil)
+}
+
+var aeTests = []testCase{
+ // Collection of fixes:
+ // - imports
+ // - appengine.Timeout -> context.WithTimeout
+ // - add ctx arg to appengine.Datacenter
+ // - logging API
+ {
+ Name: "ae.0",
+ In: `package foo
+
+import (
+ "net/http"
+ "time"
+
+ "appengine"
+ "appengine/datastore"
+)
+
+func f(w http.ResponseWriter, r *http.Request) {
+ c := appengine.NewContext(r)
+
+ c = appengine.Timeout(c, 5*time.Second)
+ err := datastore.ErrNoSuchEntity
+ c.Errorf("Something interesting happened: %v", err)
+ _ = appengine.Datacenter()
+}
+`,
+ Out: `package foo
+
+import (
+ "net/http"
+ "time"
+
+ "golang.org/x/net/context"
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/datastore"
+ "google.golang.org/appengine/log"
+)
+
+func f(w http.ResponseWriter, r *http.Request) {
+ c := appengine.NewContext(r)
+
+ c, _ = context.WithTimeout(c, 5*time.Second)
+ err := datastore.ErrNoSuchEntity
+ log.Errorf(c, "Something interesting happened: %v", err)
+ _ = appengine.Datacenter(c)
+}
+`,
+ },
+
+ // Updating a function that takes an appengine.Context arg.
+ {
+ Name: "ae.1",
+ In: `package foo
+
+import (
+ "appengine"
+)
+
+func LogSomething(c2 appengine.Context) {
+ c2.Warningf("Stand back! I'm going to try science!")
+}
+`,
+ Out: `package foo
+
+import (
+ "golang.org/x/net/context"
+ "google.golang.org/appengine/log"
+)
+
+func LogSomething(c2 context.Context) {
+ log.Warningf(c2, "Stand back! I'm going to try science!")
+}
+`,
+ },
+
+ // Less widely used API changes:
+ // - drop maxTasks arg to taskqueue.QueueStats
+ {
+ Name: "ae.2",
+ In: `package foo
+
+import (
+ "appengine"
+ "appengine/taskqueue"
+)
+
+func f(ctx appengine.Context) {
+ stats, err := taskqueue.QueueStats(ctx, []string{"one", "two"}, 0)
+}
+`,
+ Out: `package foo
+
+import (
+ "golang.org/x/net/context"
+ "google.golang.org/appengine/taskqueue"
+)
+
+func f(ctx context.Context) {
+ stats, err := taskqueue.QueueStats(ctx, []string{"one", "two"})
+}
+`,
+ },
+
+ // Check that the main "appengine" import will not be dropped
+ // if an appengine.Context -> context.Context change happens
+ // but the appengine package is still referenced.
+ {
+ Name: "ae.3",
+ In: `package foo
+
+import (
+ "appengine"
+ "io"
+)
+
+func f(ctx appengine.Context, w io.Writer) {
+ _ = appengine.IsDevAppServer()
+}
+`,
+ Out: `package foo
+
+import (
+ "golang.org/x/net/context"
+ "google.golang.org/appengine"
+ "io"
+)
+
+func f(ctx context.Context, w io.Writer) {
+ _ = appengine.IsDevAppServer()
+}
+`,
+ },
+}
diff --git a/vendor/google.golang.org/appengine/cmd/aefix/fix.go b/vendor/google.golang.org/appengine/cmd/aefix/fix.go
new file mode 100644
index 0000000..a100be7
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cmd/aefix/fix.go
@@ -0,0 +1,848 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "os"
+ "path"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+type fix struct {
+ name string
+ date string // date that fix was introduced, in YYYY-MM-DD format
+ f func(*ast.File) bool
+ desc string
+}
+
+// main runs sort.Sort(byName(fixes)) before printing list of fixes.
+type byName []fix
+
+func (f byName) Len() int { return len(f) }
+func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
+func (f byName) Less(i, j int) bool { return f[i].name < f[j].name }
+
+// main runs sort.Sort(byDate(fixes)) before applying fixes.
+type byDate []fix
+
+func (f byDate) Len() int { return len(f) }
+func (f byDate) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
+func (f byDate) Less(i, j int) bool { return f[i].date < f[j].date }
+
+var fixes []fix
+
+func register(f fix) {
+ fixes = append(fixes, f)
+}
+
+// walk traverses the AST x, calling visit(y) for each node y in the tree but
+// also with a pointer to each ast.Expr, ast.Stmt, and *ast.BlockStmt,
+// in a bottom-up traversal.
+func walk(x interface{}, visit func(interface{})) {
+ walkBeforeAfter(x, nop, visit)
+}
+
+func nop(interface{}) {}
+
+// walkBeforeAfter is like walk but calls before(x) before traversing
+// x's children and after(x) afterward.
+func walkBeforeAfter(x interface{}, before, after func(interface{})) {
+ before(x)
+
+ switch n := x.(type) {
+ default:
+ panic(fmt.Errorf("unexpected type %T in walkBeforeAfter", x))
+
+ case nil:
+
+ // pointers to interfaces
+ case *ast.Decl:
+ walkBeforeAfter(*n, before, after)
+ case *ast.Expr:
+ walkBeforeAfter(*n, before, after)
+ case *ast.Spec:
+ walkBeforeAfter(*n, before, after)
+ case *ast.Stmt:
+ walkBeforeAfter(*n, before, after)
+
+ // pointers to struct pointers
+ case **ast.BlockStmt:
+ walkBeforeAfter(*n, before, after)
+ case **ast.CallExpr:
+ walkBeforeAfter(*n, before, after)
+ case **ast.FieldList:
+ walkBeforeAfter(*n, before, after)
+ case **ast.FuncType:
+ walkBeforeAfter(*n, before, after)
+ case **ast.Ident:
+ walkBeforeAfter(*n, before, after)
+ case **ast.BasicLit:
+ walkBeforeAfter(*n, before, after)
+
+ // pointers to slices
+ case *[]ast.Decl:
+ walkBeforeAfter(*n, before, after)
+ case *[]ast.Expr:
+ walkBeforeAfter(*n, before, after)
+ case *[]*ast.File:
+ walkBeforeAfter(*n, before, after)
+ case *[]*ast.Ident:
+ walkBeforeAfter(*n, before, after)
+ case *[]ast.Spec:
+ walkBeforeAfter(*n, before, after)
+ case *[]ast.Stmt:
+ walkBeforeAfter(*n, before, after)
+
+ // These are ordered and grouped to match ../../pkg/go/ast/ast.go
+ case *ast.Field:
+ walkBeforeAfter(&n.Names, before, after)
+ walkBeforeAfter(&n.Type, before, after)
+ walkBeforeAfter(&n.Tag, before, after)
+ case *ast.FieldList:
+ for _, field := range n.List {
+ walkBeforeAfter(field, before, after)
+ }
+ case *ast.BadExpr:
+ case *ast.Ident:
+ case *ast.Ellipsis:
+ walkBeforeAfter(&n.Elt, before, after)
+ case *ast.BasicLit:
+ case *ast.FuncLit:
+ walkBeforeAfter(&n.Type, before, after)
+ walkBeforeAfter(&n.Body, before, after)
+ case *ast.CompositeLit:
+ walkBeforeAfter(&n.Type, before, after)
+ walkBeforeAfter(&n.Elts, before, after)
+ case *ast.ParenExpr:
+ walkBeforeAfter(&n.X, before, after)
+ case *ast.SelectorExpr:
+ walkBeforeAfter(&n.X, before, after)
+ case *ast.IndexExpr:
+ walkBeforeAfter(&n.X, before, after)
+ walkBeforeAfter(&n.Index, before, after)
+ case *ast.SliceExpr:
+ walkBeforeAfter(&n.X, before, after)
+ if n.Low != nil {
+ walkBeforeAfter(&n.Low, before, after)
+ }
+ if n.High != nil {
+ walkBeforeAfter(&n.High, before, after)
+ }
+ case *ast.TypeAssertExpr:
+ walkBeforeAfter(&n.X, before, after)
+ walkBeforeAfter(&n.Type, before, after)
+ case *ast.CallExpr:
+ walkBeforeAfter(&n.Fun, before, after)
+ walkBeforeAfter(&n.Args, before, after)
+ case *ast.StarExpr:
+ walkBeforeAfter(&n.X, before, after)
+ case *ast.UnaryExpr:
+ walkBeforeAfter(&n.X, before, after)
+ case *ast.BinaryExpr:
+ walkBeforeAfter(&n.X, before, after)
+ walkBeforeAfter(&n.Y, before, after)
+ case *ast.KeyValueExpr:
+ walkBeforeAfter(&n.Key, before, after)
+ walkBeforeAfter(&n.Value, before, after)
+
+ case *ast.ArrayType:
+ walkBeforeAfter(&n.Len, before, after)
+ walkBeforeAfter(&n.Elt, before, after)
+ case *ast.StructType:
+ walkBeforeAfter(&n.Fields, before, after)
+ case *ast.FuncType:
+ walkBeforeAfter(&n.Params, before, after)
+ if n.Results != nil {
+ walkBeforeAfter(&n.Results, before, after)
+ }
+ case *ast.InterfaceType:
+ walkBeforeAfter(&n.Methods, before, after)
+ case *ast.MapType:
+ walkBeforeAfter(&n.Key, before, after)
+ walkBeforeAfter(&n.Value, before, after)
+ case *ast.ChanType:
+ walkBeforeAfter(&n.Value, before, after)
+
+ case *ast.BadStmt:
+ case *ast.DeclStmt:
+ walkBeforeAfter(&n.Decl, before, after)
+ case *ast.EmptyStmt:
+ case *ast.LabeledStmt:
+ walkBeforeAfter(&n.Stmt, before, after)
+ case *ast.ExprStmt:
+ walkBeforeAfter(&n.X, before, after)
+ case *ast.SendStmt:
+ walkBeforeAfter(&n.Chan, before, after)
+ walkBeforeAfter(&n.Value, before, after)
+ case *ast.IncDecStmt:
+ walkBeforeAfter(&n.X, before, after)
+ case *ast.AssignStmt:
+ walkBeforeAfter(&n.Lhs, before, after)
+ walkBeforeAfter(&n.Rhs, before, after)
+ case *ast.GoStmt:
+ walkBeforeAfter(&n.Call, before, after)
+ case *ast.DeferStmt:
+ walkBeforeAfter(&n.Call, before, after)
+ case *ast.ReturnStmt:
+ walkBeforeAfter(&n.Results, before, after)
+ case *ast.BranchStmt:
+ case *ast.BlockStmt:
+ walkBeforeAfter(&n.List, before, after)
+ case *ast.IfStmt:
+ walkBeforeAfter(&n.Init, before, after)
+ walkBeforeAfter(&n.Cond, before, after)
+ walkBeforeAfter(&n.Body, before, after)
+ walkBeforeAfter(&n.Else, before, after)
+ case *ast.CaseClause:
+ walkBeforeAfter(&n.List, before, after)
+ walkBeforeAfter(&n.Body, before, after)
+ case *ast.SwitchStmt:
+ walkBeforeAfter(&n.Init, before, after)
+ walkBeforeAfter(&n.Tag, before, after)
+ walkBeforeAfter(&n.Body, before, after)
+ case *ast.TypeSwitchStmt:
+ walkBeforeAfter(&n.Init, before, after)
+ walkBeforeAfter(&n.Assign, before, after)
+ walkBeforeAfter(&n.Body, before, after)
+ case *ast.CommClause:
+ walkBeforeAfter(&n.Comm, before, after)
+ walkBeforeAfter(&n.Body, before, after)
+ case *ast.SelectStmt:
+ walkBeforeAfter(&n.Body, before, after)
+ case *ast.ForStmt:
+ walkBeforeAfter(&n.Init, before, after)
+ walkBeforeAfter(&n.Cond, before, after)
+ walkBeforeAfter(&n.Post, before, after)
+ walkBeforeAfter(&n.Body, before, after)
+ case *ast.RangeStmt:
+ walkBeforeAfter(&n.Key, before, after)
+ walkBeforeAfter(&n.Value, before, after)
+ walkBeforeAfter(&n.X, before, after)
+ walkBeforeAfter(&n.Body, before, after)
+
+ case *ast.ImportSpec:
+ case *ast.ValueSpec:
+ walkBeforeAfter(&n.Type, before, after)
+ walkBeforeAfter(&n.Values, before, after)
+ walkBeforeAfter(&n.Names, before, after)
+ case *ast.TypeSpec:
+ walkBeforeAfter(&n.Type, before, after)
+
+ case *ast.BadDecl:
+ case *ast.GenDecl:
+ walkBeforeAfter(&n.Specs, before, after)
+ case *ast.FuncDecl:
+ if n.Recv != nil {
+ walkBeforeAfter(&n.Recv, before, after)
+ }
+ walkBeforeAfter(&n.Type, before, after)
+ if n.Body != nil {
+ walkBeforeAfter(&n.Body, before, after)
+ }
+
+ case *ast.File:
+ walkBeforeAfter(&n.Decls, before, after)
+
+ case *ast.Package:
+ walkBeforeAfter(&n.Files, before, after)
+
+ case []*ast.File:
+ for i := range n {
+ walkBeforeAfter(&n[i], before, after)
+ }
+ case []ast.Decl:
+ for i := range n {
+ walkBeforeAfter(&n[i], before, after)
+ }
+ case []ast.Expr:
+ for i := range n {
+ walkBeforeAfter(&n[i], before, after)
+ }
+ case []*ast.Ident:
+ for i := range n {
+ walkBeforeAfter(&n[i], before, after)
+ }
+ case []ast.Stmt:
+ for i := range n {
+ walkBeforeAfter(&n[i], before, after)
+ }
+ case []ast.Spec:
+ for i := range n {
+ walkBeforeAfter(&n[i], before, after)
+ }
+ }
+ after(x)
+}
+
+// imports returns true if f imports path.
+func imports(f *ast.File, path string) bool {
+ return importSpec(f, path) != nil
+}
+
+// importSpec returns the import spec if f imports path,
+// or nil otherwise.
+func importSpec(f *ast.File, path string) *ast.ImportSpec {
+ for _, s := range f.Imports {
+ if importPath(s) == path {
+ return s
+ }
+ }
+ return nil
+}
+
+// importPath returns the unquoted import path of s,
+// or "" if the path is not properly quoted.
+func importPath(s *ast.ImportSpec) string {
+ t, err := strconv.Unquote(s.Path.Value)
+ if err == nil {
+ return t
+ }
+ return ""
+}
+
+// declImports reports whether gen contains an import of path.
+func declImports(gen *ast.GenDecl, path string) bool {
+ if gen.Tok != token.IMPORT {
+ return false
+ }
+ for _, spec := range gen.Specs {
+ impspec := spec.(*ast.ImportSpec)
+ if importPath(impspec) == path {
+ return true
+ }
+ }
+ return false
+}
+
+// isPkgDot returns true if t is the expression "pkg.name"
+// where pkg is an imported identifier.
+func isPkgDot(t ast.Expr, pkg, name string) bool {
+ sel, ok := t.(*ast.SelectorExpr)
+ return ok && isTopName(sel.X, pkg) && sel.Sel.String() == name
+}
+
+// isPtrPkgDot returns true if f is the expression "*pkg.name"
+// where pkg is an imported identifier.
+func isPtrPkgDot(t ast.Expr, pkg, name string) bool {
+ ptr, ok := t.(*ast.StarExpr)
+ return ok && isPkgDot(ptr.X, pkg, name)
+}
+
+// isTopName returns true if n is a top-level unresolved identifier with the given name.
+func isTopName(n ast.Expr, name string) bool {
+ id, ok := n.(*ast.Ident)
+ return ok && id.Name == name && id.Obj == nil
+}
+
+// isName returns true if n is an identifier with the given name.
+func isName(n ast.Expr, name string) bool {
+ id, ok := n.(*ast.Ident)
+ return ok && id.String() == name
+}
+
+// isCall returns true if t is a call to pkg.name.
+func isCall(t ast.Expr, pkg, name string) bool {
+ call, ok := t.(*ast.CallExpr)
+ return ok && isPkgDot(call.Fun, pkg, name)
+}
+
+// If n is an *ast.Ident, isIdent returns it; otherwise isIdent returns nil.
+func isIdent(n interface{}) *ast.Ident {
+ id, _ := n.(*ast.Ident)
+ return id
+}
+
+// refersTo returns true if n is a reference to the same object as x.
+func refersTo(n ast.Node, x *ast.Ident) bool {
+ id, ok := n.(*ast.Ident)
+ // The test of id.Name == x.Name handles top-level unresolved
+ // identifiers, which all have Obj == nil.
+ return ok && id.Obj == x.Obj && id.Name == x.Name
+}
+
+// isBlank returns true if n is the blank identifier.
+func isBlank(n ast.Expr) bool {
+ return isName(n, "_")
+}
+
+// isEmptyString returns true if n is an empty string literal.
+func isEmptyString(n ast.Expr) bool {
+ lit, ok := n.(*ast.BasicLit)
+ return ok && lit.Kind == token.STRING && len(lit.Value) == 2
+}
+
+func warn(pos token.Pos, msg string, args ...interface{}) {
+ if pos.IsValid() {
+ msg = "%s: " + msg
+ arg1 := []interface{}{fset.Position(pos).String()}
+ args = append(arg1, args...)
+ }
+ fmt.Fprintf(os.Stderr, msg+"\n", args...)
+}
+
+// countUses returns the number of uses of the identifier x in scope.
+func countUses(x *ast.Ident, scope []ast.Stmt) int {
+ count := 0
+ ff := func(n interface{}) {
+ if n, ok := n.(ast.Node); ok && refersTo(n, x) {
+ count++
+ }
+ }
+ for _, n := range scope {
+ walk(n, ff)
+ }
+ return count
+}
+
+// rewriteUses replaces all uses of the identifier x and !x in scope
+// with f(x.Pos()) and fnot(x.Pos()).
+func rewriteUses(x *ast.Ident, f, fnot func(token.Pos) ast.Expr, scope []ast.Stmt) {
+ var lastF ast.Expr
+ ff := func(n interface{}) {
+ ptr, ok := n.(*ast.Expr)
+ if !ok {
+ return
+ }
+ nn := *ptr
+
+ // The child node was just walked and possibly replaced.
+ // If it was replaced and this is a negation, replace with fnot(p).
+ not, ok := nn.(*ast.UnaryExpr)
+ if ok && not.Op == token.NOT && not.X == lastF {
+ *ptr = fnot(nn.Pos())
+ return
+ }
+ if refersTo(nn, x) {
+ lastF = f(nn.Pos())
+ *ptr = lastF
+ }
+ }
+ for _, n := range scope {
+ walk(n, ff)
+ }
+}
+
+// assignsTo returns true if any of the code in scope assigns to or takes the address of x.
+func assignsTo(x *ast.Ident, scope []ast.Stmt) bool {
+ assigned := false
+ ff := func(n interface{}) {
+ if assigned {
+ return
+ }
+ switch n := n.(type) {
+ case *ast.UnaryExpr:
+ // use of &x
+ if n.Op == token.AND && refersTo(n.X, x) {
+ assigned = true
+ return
+ }
+ case *ast.AssignStmt:
+ for _, l := range n.Lhs {
+ if refersTo(l, x) {
+ assigned = true
+ return
+ }
+ }
+ }
+ }
+ for _, n := range scope {
+ if assigned {
+ break
+ }
+ walk(n, ff)
+ }
+ return assigned
+}
+
+// newPkgDot returns an ast.Expr referring to "pkg.name" at position pos.
+func newPkgDot(pos token.Pos, pkg, name string) ast.Expr {
+ return &ast.SelectorExpr{
+ X: &ast.Ident{
+ NamePos: pos,
+ Name: pkg,
+ },
+ Sel: &ast.Ident{
+ NamePos: pos,
+ Name: name,
+ },
+ }
+}
+
+// renameTop renames all references to the top-level name old.
+// It returns true if it makes any changes.
+func renameTop(f *ast.File, old, new string) bool {
+ var fixed bool
+
+ // Rename any conflicting imports
+ // (assuming package name is last element of path).
+ for _, s := range f.Imports {
+ if s.Name != nil {
+ if s.Name.Name == old {
+ s.Name.Name = new
+ fixed = true
+ }
+ } else {
+ _, thisName := path.Split(importPath(s))
+ if thisName == old {
+ s.Name = ast.NewIdent(new)
+ fixed = true
+ }
+ }
+ }
+
+ // Rename any top-level declarations.
+ for _, d := range f.Decls {
+ switch d := d.(type) {
+ case *ast.FuncDecl:
+ if d.Recv == nil && d.Name.Name == old {
+ d.Name.Name = new
+ d.Name.Obj.Name = new
+ fixed = true
+ }
+ case *ast.GenDecl:
+ for _, s := range d.Specs {
+ switch s := s.(type) {
+ case *ast.TypeSpec:
+ if s.Name.Name == old {
+ s.Name.Name = new
+ s.Name.Obj.Name = new
+ fixed = true
+ }
+ case *ast.ValueSpec:
+ for _, n := range s.Names {
+ if n.Name == old {
+ n.Name = new
+ n.Obj.Name = new
+ fixed = true
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Rename top-level old to new, both unresolved names
+ // (probably defined in another file) and names that resolve
+ // to a declaration we renamed.
+ walk(f, func(n interface{}) {
+ id, ok := n.(*ast.Ident)
+ if ok && isTopName(id, old) {
+ id.Name = new
+ fixed = true
+ }
+ if ok && id.Obj != nil && id.Name == old && id.Obj.Name == new {
+ id.Name = id.Obj.Name
+ fixed = true
+ }
+ })
+
+ return fixed
+}
+
+// matchLen returns the length of the longest prefix shared by x and y.
+func matchLen(x, y string) int {
+ i := 0
+ for i < len(x) && i < len(y) && x[i] == y[i] {
+ i++
+ }
+ return i
+}
+
+// addImport adds the import path to the file f, if absent.
+func addImport(f *ast.File, ipath string) (added bool) {
+ if imports(f, ipath) {
+ return false
+ }
+
+ // Determine name of import.
+ // Assume added imports follow convention of using last element.
+ _, name := path.Split(ipath)
+
+ // Rename any conflicting top-level references from name to name_.
+ renameTop(f, name, name+"_")
+
+ newImport := &ast.ImportSpec{
+ Path: &ast.BasicLit{
+ Kind: token.STRING,
+ Value: strconv.Quote(ipath),
+ },
+ }
+
+ // Find an import decl to add to.
+ var (
+ bestMatch = -1
+ lastImport = -1
+ impDecl *ast.GenDecl
+ impIndex = -1
+ )
+ for i, decl := range f.Decls {
+ gen, ok := decl.(*ast.GenDecl)
+ if ok && gen.Tok == token.IMPORT {
+ lastImport = i
+ // Do not add to import "C", to avoid disrupting the
+ // association with its doc comment, breaking cgo.
+ if declImports(gen, "C") {
+ continue
+ }
+
+ // Compute longest shared prefix with imports in this block.
+ for j, spec := range gen.Specs {
+ impspec := spec.(*ast.ImportSpec)
+ n := matchLen(importPath(impspec), ipath)
+ if n > bestMatch {
+ bestMatch = n
+ impDecl = gen
+ impIndex = j
+ }
+ }
+ }
+ }
+
+ // If no import decl found, add one after the last import.
+ if impDecl == nil {
+ impDecl = &ast.GenDecl{
+ Tok: token.IMPORT,
+ }
+ f.Decls = append(f.Decls, nil)
+ copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:])
+ f.Decls[lastImport+1] = impDecl
+ }
+
+ // Ensure the import decl has parentheses, if needed.
+ if len(impDecl.Specs) > 0 && !impDecl.Lparen.IsValid() {
+ impDecl.Lparen = impDecl.Pos()
+ }
+
+ insertAt := impIndex + 1
+ if insertAt == 0 {
+ insertAt = len(impDecl.Specs)
+ }
+ impDecl.Specs = append(impDecl.Specs, nil)
+ copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:])
+ impDecl.Specs[insertAt] = newImport
+ if insertAt > 0 {
+ // Assign same position as the previous import,
+ // so that the sorter sees it as being in the same block.
+ prev := impDecl.Specs[insertAt-1]
+ newImport.Path.ValuePos = prev.Pos()
+ newImport.EndPos = prev.Pos()
+ }
+
+ f.Imports = append(f.Imports, newImport)
+ return true
+}
+
+// deleteImport deletes the import path from the file f, if present.
+func deleteImport(f *ast.File, path string) (deleted bool) {
+ oldImport := importSpec(f, path)
+
+ // Find the import node that imports path, if any.
+ for i, decl := range f.Decls {
+ gen, ok := decl.(*ast.GenDecl)
+ if !ok || gen.Tok != token.IMPORT {
+ continue
+ }
+ for j, spec := range gen.Specs {
+ impspec := spec.(*ast.ImportSpec)
+ if oldImport != impspec {
+ continue
+ }
+
+ // We found an import spec that imports path.
+ // Delete it.
+ deleted = true
+ copy(gen.Specs[j:], gen.Specs[j+1:])
+ gen.Specs = gen.Specs[:len(gen.Specs)-1]
+
+ // If this was the last import spec in this decl,
+ // delete the decl, too.
+ if len(gen.Specs) == 0 {
+ copy(f.Decls[i:], f.Decls[i+1:])
+ f.Decls = f.Decls[:len(f.Decls)-1]
+ } else if len(gen.Specs) == 1 {
+ gen.Lparen = token.NoPos // drop parens
+ }
+ if j > 0 {
+ // We deleted an entry but now there will be
+ // a blank line-sized hole where the import was.
+ // Close the hole by making the previous
+ // import appear to "end" where this one did.
+ gen.Specs[j-1].(*ast.ImportSpec).EndPos = impspec.End()
+ }
+ break
+ }
+ }
+
+ // Delete it from f.Imports.
+ for i, imp := range f.Imports {
+ if imp == oldImport {
+ copy(f.Imports[i:], f.Imports[i+1:])
+ f.Imports = f.Imports[:len(f.Imports)-1]
+ break
+ }
+ }
+
+ return
+}
+
+// rewriteImport rewrites any import of path oldPath to path newPath.
+func rewriteImport(f *ast.File, oldPath, newPath string) (rewrote bool) {
+ for _, imp := range f.Imports {
+ if importPath(imp) == oldPath {
+ rewrote = true
+ // record old End, because the default is to compute
+ // it using the length of imp.Path.Value.
+ imp.EndPos = imp.End()
+ imp.Path.Value = strconv.Quote(newPath)
+ }
+ }
+ return
+}
+
+func usesImport(f *ast.File, path string) (used bool) {
+ spec := importSpec(f, path)
+ if spec == nil {
+ return
+ }
+
+ name := spec.Name.String()
+ switch name {
+ case "<nil>":
+ // If the package name is not explicitly specified,
+ // make an educated guess. This is not guaranteed to be correct.
+ lastSlash := strings.LastIndex(path, "/")
+ if lastSlash == -1 {
+ name = path
+ } else {
+ name = path[lastSlash+1:]
+ }
+ case "_", ".":
+ // Not sure if this import is used - err on the side of caution.
+ return true
+ }
+
+ walk(f, func(n interface{}) {
+ sel, ok := n.(*ast.SelectorExpr)
+ if ok && isTopName(sel.X, name) {
+ used = true
+ }
+ })
+
+ return
+}
+
+func expr(s string) ast.Expr {
+ x, err := parser.ParseExpr(s)
+ if err != nil {
+ panic("parsing " + s + ": " + err.Error())
+ }
+ // Remove position information to avoid spurious newlines.
+ killPos(reflect.ValueOf(x))
+ return x
+}
+
+var posType = reflect.TypeOf(token.Pos(0))
+
+func killPos(v reflect.Value) {
+ switch v.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ if !v.IsNil() {
+ killPos(v.Elem())
+ }
+ case reflect.Slice:
+ n := v.Len()
+ for i := 0; i < n; i++ {
+ killPos(v.Index(i))
+ }
+ case reflect.Struct:
+ n := v.NumField()
+ for i := 0; i < n; i++ {
+ f := v.Field(i)
+ if f.Type() == posType {
+ f.SetInt(0)
+ continue
+ }
+ killPos(f)
+ }
+ }
+}
+
+// A Rename describes a single renaming.
+type rename struct {
+ OldImport string // only apply rename if this import is present
+ NewImport string // add this import during rewrite
+ Old string // old name: p.T or *p.T
+ New string // new name: p.T or *p.T
+}
+
+func renameFix(tab []rename) func(*ast.File) bool {
+ return func(f *ast.File) bool {
+ return renameFixTab(f, tab)
+ }
+}
+
+func parseName(s string) (ptr bool, pkg, nam string) {
+ i := strings.Index(s, ".")
+ if i < 0 {
+ panic("parseName: invalid name " + s)
+ }
+ if strings.HasPrefix(s, "*") {
+ ptr = true
+ s = s[1:]
+ i--
+ }
+ pkg = s[:i]
+ nam = s[i+1:]
+ return
+}
+
+func renameFixTab(f *ast.File, tab []rename) bool {
+ fixed := false
+ added := map[string]bool{}
+ check := map[string]bool{}
+ for _, t := range tab {
+ if !imports(f, t.OldImport) {
+ continue
+ }
+ optr, opkg, onam := parseName(t.Old)
+ walk(f, func(n interface{}) {
+ np, ok := n.(*ast.Expr)
+ if !ok {
+ return
+ }
+ x := *np
+ if optr {
+ p, ok := x.(*ast.StarExpr)
+ if !ok {
+ return
+ }
+ x = p.X
+ }
+ if !isPkgDot(x, opkg, onam) {
+ return
+ }
+ if t.NewImport != "" && !added[t.NewImport] {
+ addImport(f, t.NewImport)
+ added[t.NewImport] = true
+ }
+ *np = expr(t.New)
+ check[t.OldImport] = true
+ fixed = true
+ })
+ }
+
+ for ipath := range check {
+ if !usesImport(f, ipath) {
+ deleteImport(f, ipath)
+ }
+ }
+ return fixed
+}
diff --git a/vendor/google.golang.org/appengine/cmd/aefix/main.go b/vendor/google.golang.org/appengine/cmd/aefix/main.go
new file mode 100644
index 0000000..8e193a6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cmd/aefix/main.go
@@ -0,0 +1,258 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/format"
+ "go/parser"
+ "go/scanner"
+ "go/token"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "sort"
+ "strings"
+)
+
+var (
+ fset = token.NewFileSet()
+ exitCode = 0
+)
+
+var allowedRewrites = flag.String("r", "",
+ "restrict the rewrites to this comma-separated list")
+
+var forceRewrites = flag.String("force", "",
+ "force these fixes to run even if the code looks updated")
+
+var allowed, force map[string]bool
+
+var doDiff = flag.Bool("diff", false, "display diffs instead of rewriting files")
+
+// enable for debugging fix failures
+const debug = false // display incorrectly reformatted source and exit
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "usage: aefix [-diff] [-r fixname,...] [-force fixname,...] [path ...]\n")
+ flag.PrintDefaults()
+ fmt.Fprintf(os.Stderr, "\nAvailable rewrites are:\n")
+ sort.Sort(byName(fixes))
+ for _, f := range fixes {
+ fmt.Fprintf(os.Stderr, "\n%s\n", f.name)
+ desc := strings.TrimSpace(f.desc)
+ desc = strings.Replace(desc, "\n", "\n\t", -1)
+ fmt.Fprintf(os.Stderr, "\t%s\n", desc)
+ }
+ os.Exit(2)
+}
+
+func main() {
+ flag.Usage = usage
+ flag.Parse()
+
+ sort.Sort(byDate(fixes))
+
+ if *allowedRewrites != "" {
+ allowed = make(map[string]bool)
+ for _, f := range strings.Split(*allowedRewrites, ",") {
+ allowed[f] = true
+ }
+ }
+
+ if *forceRewrites != "" {
+ force = make(map[string]bool)
+ for _, f := range strings.Split(*forceRewrites, ",") {
+ force[f] = true
+ }
+ }
+
+ if flag.NArg() == 0 {
+ if err := processFile("standard input", true); err != nil {
+ report(err)
+ }
+ os.Exit(exitCode)
+ }
+
+ for i := 0; i < flag.NArg(); i++ {
+ path := flag.Arg(i)
+ switch dir, err := os.Stat(path); {
+ case err != nil:
+ report(err)
+ case dir.IsDir():
+ walkDir(path)
+ default:
+ if err := processFile(path, false); err != nil {
+ report(err)
+ }
+ }
+ }
+
+ os.Exit(exitCode)
+}
+
+const parserMode = parser.ParseComments
+
+func gofmtFile(f *ast.File) ([]byte, error) {
+ var buf bytes.Buffer
+ if err := format.Node(&buf, fset, f); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func processFile(filename string, useStdin bool) error {
+ var f *os.File
+ var err error
+ var fixlog bytes.Buffer
+
+ if useStdin {
+ f = os.Stdin
+ } else {
+ f, err = os.Open(filename)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ }
+
+ src, err := ioutil.ReadAll(f)
+ if err != nil {
+ return err
+ }
+
+ file, err := parser.ParseFile(fset, filename, src, parserMode)
+ if err != nil {
+ return err
+ }
+
+ // Apply all fixes to file.
+ newFile := file
+ fixed := false
+ for _, fix := range fixes {
+ if allowed != nil && !allowed[fix.name] {
+ continue
+ }
+ if fix.f(newFile) {
+ fixed = true
+ fmt.Fprintf(&fixlog, " %s", fix.name)
+
+ // AST changed.
+ // Print and parse, to update any missing scoping
+ // or position information for subsequent fixers.
+ newSrc, err := gofmtFile(newFile)
+ if err != nil {
+ return err
+ }
+ newFile, err = parser.ParseFile(fset, filename, newSrc, parserMode)
+ if err != nil {
+ if debug {
+ fmt.Printf("%s", newSrc)
+ report(err)
+ os.Exit(exitCode)
+ }
+ return err
+ }
+ }
+ }
+ if !fixed {
+ return nil
+ }
+ fmt.Fprintf(os.Stderr, "%s: fixed %s\n", filename, fixlog.String()[1:])
+
+ // Print AST. We did that after each fix, so this appears
+ // redundant, but it is necessary to generate gofmt-compatible
+ // source code in a few cases. The official gofmt style is the
+ // output of the printer run on a standard AST generated by the parser,
+ // but the source we generated inside the loop above is the
+ // output of the printer run on a mangled AST generated by a fixer.
+ newSrc, err := gofmtFile(newFile)
+ if err != nil {
+ return err
+ }
+
+ if *doDiff {
+ data, err := diff(src, newSrc)
+ if err != nil {
+ return fmt.Errorf("computing diff: %s", err)
+ }
+ fmt.Printf("diff %s fixed/%s\n", filename, filename)
+ os.Stdout.Write(data)
+ return nil
+ }
+
+ if useStdin {
+ os.Stdout.Write(newSrc)
+ return nil
+ }
+
+ return ioutil.WriteFile(f.Name(), newSrc, 0)
+}
+
+var gofmtBuf bytes.Buffer
+
+func gofmt(n interface{}) string {
+ gofmtBuf.Reset()
+ if err := format.Node(&gofmtBuf, fset, n); err != nil {
+ return "<" + err.Error() + ">"
+ }
+ return gofmtBuf.String()
+}
+
+func report(err error) {
+ scanner.PrintError(os.Stderr, err)
+ exitCode = 2
+}
+
+func walkDir(path string) {
+ filepath.Walk(path, visitFile)
+}
+
+func visitFile(path string, f os.FileInfo, err error) error {
+ if err == nil && isGoFile(f) {
+ err = processFile(path, false)
+ }
+ if err != nil {
+ report(err)
+ }
+ return nil
+}
+
+func isGoFile(f os.FileInfo) bool {
+ // ignore non-Go files
+ name := f.Name()
+ return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go")
+}
+
+func diff(b1, b2 []byte) (data []byte, err error) {
+ f1, err := ioutil.TempFile("", "go-fix")
+ if err != nil {
+ return nil, err
+ }
+ defer os.Remove(f1.Name())
+ defer f1.Close()
+
+ f2, err := ioutil.TempFile("", "go-fix")
+ if err != nil {
+ return nil, err
+ }
+ defer os.Remove(f2.Name())
+ defer f2.Close()
+
+ f1.Write(b1)
+ f2.Write(b2)
+
+ data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput()
+ if len(data) > 0 {
+ // diff exits with a non-zero status when the files don't match.
+ // Ignore that failure as long as we get output.
+ err = nil
+ }
+ return
+}
diff --git a/vendor/google.golang.org/appengine/cmd/aefix/main_test.go b/vendor/google.golang.org/appengine/cmd/aefix/main_test.go
new file mode 100644
index 0000000..2151bf2
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cmd/aefix/main_test.go
@@ -0,0 +1,129 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "go/ast"
+ "go/parser"
+ "strings"
+ "testing"
+)
+
+type testCase struct {
+ Name string
+ Fn func(*ast.File) bool
+ In string
+ Out string
+}
+
+var testCases []testCase
+
+func addTestCases(t []testCase, fn func(*ast.File) bool) {
+ // Fill in fn to avoid repetition in definitions.
+ if fn != nil {
+ for i := range t {
+ if t[i].Fn == nil {
+ t[i].Fn = fn
+ }
+ }
+ }
+ testCases = append(testCases, t...)
+}
+
+func fnop(*ast.File) bool { return false }
+
+func parseFixPrint(t *testing.T, fn func(*ast.File) bool, desc, in string, mustBeGofmt bool) (out string, fixed, ok bool) {
+ file, err := parser.ParseFile(fset, desc, in, parserMode)
+ if err != nil {
+ t.Errorf("%s: parsing: %v", desc, err)
+ return
+ }
+
+ outb, err := gofmtFile(file)
+ if err != nil {
+ t.Errorf("%s: printing: %v", desc, err)
+ return
+ }
+ if s := string(outb); in != s && mustBeGofmt {
+ t.Errorf("%s: not gofmt-formatted.\n--- %s\n%s\n--- %s | gofmt\n%s",
+ desc, desc, in, desc, s)
+ tdiff(t, in, s)
+ return
+ }
+
+ if fn == nil {
+ for _, fix := range fixes {
+ if fix.f(file) {
+ fixed = true
+ }
+ }
+ } else {
+ fixed = fn(file)
+ }
+
+ outb, err = gofmtFile(file)
+ if err != nil {
+ t.Errorf("%s: printing: %v", desc, err)
+ return
+ }
+
+ return string(outb), fixed, true
+}
+
+func TestRewrite(t *testing.T) {
+ for _, tt := range testCases {
+ // Apply fix: should get tt.Out.
+ out, fixed, ok := parseFixPrint(t, tt.Fn, tt.Name, tt.In, true)
+ if !ok {
+ continue
+ }
+
+ // reformat to get printing right
+ out, _, ok = parseFixPrint(t, fnop, tt.Name, out, false)
+ if !ok {
+ continue
+ }
+
+ if out != tt.Out {
+ t.Errorf("%s: incorrect output.\n", tt.Name)
+ if !strings.HasPrefix(tt.Name, "testdata/") {
+ t.Errorf("--- have\n%s\n--- want\n%s", out, tt.Out)
+ }
+ tdiff(t, out, tt.Out)
+ continue
+ }
+
+ if changed := out != tt.In; changed != fixed {
+ t.Errorf("%s: changed=%v != fixed=%v", tt.Name, changed, fixed)
+ continue
+ }
+
+ // Should not change if run again.
+ out2, fixed2, ok := parseFixPrint(t, tt.Fn, tt.Name+" output", out, true)
+ if !ok {
+ continue
+ }
+
+ if fixed2 {
+ t.Errorf("%s: applied fixes during second round", tt.Name)
+ continue
+ }
+
+ if out2 != out {
+ t.Errorf("%s: changed output after second round of fixes.\n--- output after first round\n%s\n--- output after second round\n%s",
+ tt.Name, out, out2)
+ tdiff(t, out, out2)
+ }
+ }
+}
+
+func tdiff(t *testing.T, a, b string) {
+ data, err := diff([]byte(a), []byte(b))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ t.Error(string(data))
+}
diff --git a/vendor/google.golang.org/appengine/cmd/aefix/typecheck.go b/vendor/google.golang.org/appengine/cmd/aefix/typecheck.go
new file mode 100644
index 0000000..d54d375
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cmd/aefix/typecheck.go
@@ -0,0 +1,673 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "os"
+ "reflect"
+ "strings"
+)
+
+// Partial type checker.
+//
+// The fact that it is partial is very important: the input is
+// an AST and a description of some type information to
+// assume about one or more packages, but not all the
+// packages that the program imports. The checker is
+// expected to do as much as it can with what it has been
+// given. There is not enough information supplied to do
+// a full type check, but the type checker is expected to
+// apply information that can be derived from variable
+// declarations, function and method returns, and type switches
+// as far as it can, so that the caller can still tell the types
+// of expression relevant to a particular fix.
+//
+// TODO(rsc,gri): Replace with go/typechecker.
+// Doing that could be an interesting test case for go/typechecker:
+// the constraints about working with partial information will
+// likely exercise it in interesting ways. The ideal interface would
+// be to pass typecheck a map from importpath to package API text
+// (Go source code), but for now we use data structures (TypeConfig, Type).
+//
+// The strings mostly use gofmt form.
+//
+// A Field or FieldList has as its type a comma-separated list
+// of the types of the fields. For example, the field list
+// x, y, z int
+// has type "int, int, int".
+
+// The prefix "type " is the type of a type.
+// For example, given
+// var x int
+// type T int
+// x's type is "int" but T's type is "type int".
+// mkType inserts the "type " prefix.
+// getType removes it.
+// isType tests for it.
+
+func mkType(t string) string {
+ return "type " + t
+}
+
+func getType(t string) string {
+ if !isType(t) {
+ return ""
+ }
+ return t[len("type "):]
+}
+
+func isType(t string) bool {
+ return strings.HasPrefix(t, "type ")
+}
+
+// TypeConfig describes the universe of relevant types.
+// For ease of creation, the types are all referred to by string
+// name (e.g., "reflect.Value"). TypeByName is the only place
+// where the strings are resolved.
+
+type TypeConfig struct {
+ Type map[string]*Type
+ Var map[string]string
+ Func map[string]string
+}
+
+// typeof returns the type of the given name, which may be of
+// the form "x" or "p.X".
+func (cfg *TypeConfig) typeof(name string) string {
+ if cfg.Var != nil {
+ if t := cfg.Var[name]; t != "" {
+ return t
+ }
+ }
+ if cfg.Func != nil {
+ if t := cfg.Func[name]; t != "" {
+ return "func()" + t
+ }
+ }
+ return ""
+}
+
+// Type describes the Fields and Methods of a type.
+// If the field or method cannot be found there, it is next
+// looked for in the Embed list.
+type Type struct {
+ Field map[string]string // map field name to type
+ Method map[string]string // map method name to comma-separated return types (should start with "func ")
+ Embed []string // list of types this type embeds (for extra methods)
+ Def string // definition of named type
+}
+
+// dot returns the type of "typ.name", making its decision
+// using the type information in cfg.
+func (typ *Type) dot(cfg *TypeConfig, name string) string {
+ if typ.Field != nil {
+ if t := typ.Field[name]; t != "" {
+ return t
+ }
+ }
+ if typ.Method != nil {
+ if t := typ.Method[name]; t != "" {
+ return t
+ }
+ }
+
+ for _, e := range typ.Embed {
+ etyp := cfg.Type[e]
+ if etyp != nil {
+ if t := etyp.dot(cfg, name); t != "" {
+ return t
+ }
+ }
+ }
+
+ return ""
+}
+
+// typecheck type checks the AST f assuming the information in cfg.
+// It returns two maps with type information:
+// typeof maps AST nodes to type information in gofmt string form.
+// assign maps type strings to lists of expressions that were assigned
+// to values of another type that were assigned to that type.
+func typecheck(cfg *TypeConfig, f *ast.File) (typeof map[interface{}]string, assign map[string][]interface{}) {
+ typeof = make(map[interface{}]string)
+ assign = make(map[string][]interface{})
+ cfg1 := &TypeConfig{}
+ *cfg1 = *cfg // make copy so we can add locally
+ copied := false
+
+ // gather function declarations
+ for _, decl := range f.Decls {
+ fn, ok := decl.(*ast.FuncDecl)
+ if !ok {
+ continue
+ }
+ typecheck1(cfg, fn.Type, typeof, assign)
+ t := typeof[fn.Type]
+ if fn.Recv != nil {
+ // The receiver must be a type.
+ rcvr := typeof[fn.Recv]
+ if !isType(rcvr) {
+ if len(fn.Recv.List) != 1 {
+ continue
+ }
+ rcvr = mkType(gofmt(fn.Recv.List[0].Type))
+ typeof[fn.Recv.List[0].Type] = rcvr
+ }
+ rcvr = getType(rcvr)
+ if rcvr != "" && rcvr[0] == '*' {
+ rcvr = rcvr[1:]
+ }
+ typeof[rcvr+"."+fn.Name.Name] = t
+ } else {
+ if isType(t) {
+ t = getType(t)
+ } else {
+ t = gofmt(fn.Type)
+ }
+ typeof[fn.Name] = t
+
+ // Record typeof[fn.Name.Obj] for future references to fn.Name.
+ typeof[fn.Name.Obj] = t
+ }
+ }
+
+ // gather struct declarations
+ for _, decl := range f.Decls {
+ d, ok := decl.(*ast.GenDecl)
+ if ok {
+ for _, s := range d.Specs {
+ switch s := s.(type) {
+ case *ast.TypeSpec:
+ if cfg1.Type[s.Name.Name] != nil {
+ break
+ }
+ if !copied {
+ copied = true
+ // Copy map lazily: it's time.
+ cfg1.Type = make(map[string]*Type)
+ for k, v := range cfg.Type {
+ cfg1.Type[k] = v
+ }
+ }
+ t := &Type{Field: map[string]string{}}
+ cfg1.Type[s.Name.Name] = t
+ switch st := s.Type.(type) {
+ case *ast.StructType:
+ for _, f := range st.Fields.List {
+ for _, n := range f.Names {
+ t.Field[n.Name] = gofmt(f.Type)
+ }
+ }
+ case *ast.ArrayType, *ast.StarExpr, *ast.MapType:
+ t.Def = gofmt(st)
+ }
+ }
+ }
+ }
+ }
+
+ typecheck1(cfg1, f, typeof, assign)
+ return typeof, assign
+}
+
+func makeExprList(a []*ast.Ident) []ast.Expr {
+ var b []ast.Expr
+ for _, x := range a {
+ b = append(b, x)
+ }
+ return b
+}
+
+// Typecheck1 is the recursive form of typecheck.
+// It is like typecheck but adds to the information in typeof
+// instead of allocating a new map.
+func typecheck1(cfg *TypeConfig, f interface{}, typeof map[interface{}]string, assign map[string][]interface{}) {
+ // set sets the type of n to typ.
+ // If isDecl is true, n is being declared.
+ set := func(n ast.Expr, typ string, isDecl bool) {
+ if typeof[n] != "" || typ == "" {
+ if typeof[n] != typ {
+ assign[typ] = append(assign[typ], n)
+ }
+ return
+ }
+ typeof[n] = typ
+
+ // If we obtained typ from the declaration of x
+ // propagate the type to all the uses.
+ // The !isDecl case is a cheat here, but it makes
+ // up in some cases for not paying attention to
+ // struct fields. The real type checker will be
+ // more accurate so we won't need the cheat.
+ if id, ok := n.(*ast.Ident); ok && id.Obj != nil && (isDecl || typeof[id.Obj] == "") {
+ typeof[id.Obj] = typ
+ }
+ }
+
+ // Type-check an assignment lhs = rhs.
+ // If isDecl is true, this is := so we can update
+ // the types of the objects that lhs refers to.
+ typecheckAssign := func(lhs, rhs []ast.Expr, isDecl bool) {
+ if len(lhs) > 1 && len(rhs) == 1 {
+ if _, ok := rhs[0].(*ast.CallExpr); ok {
+ t := split(typeof[rhs[0]])
+ // Lists should have same length but may not; pair what can be paired.
+ for i := 0; i < len(lhs) && i < len(t); i++ {
+ set(lhs[i], t[i], isDecl)
+ }
+ return
+ }
+ }
+ if len(lhs) == 1 && len(rhs) == 2 {
+ // x = y, ok
+ rhs = rhs[:1]
+ } else if len(lhs) == 2 && len(rhs) == 1 {
+ // x, ok = y
+ lhs = lhs[:1]
+ }
+
+ // Match as much as we can.
+ for i := 0; i < len(lhs) && i < len(rhs); i++ {
+ x, y := lhs[i], rhs[i]
+ if typeof[y] != "" {
+ set(x, typeof[y], isDecl)
+ } else {
+ set(y, typeof[x], false)
+ }
+ }
+ }
+
+ expand := func(s string) string {
+ typ := cfg.Type[s]
+ if typ != nil && typ.Def != "" {
+ return typ.Def
+ }
+ return s
+ }
+
+ // The main type check is a recursive algorithm implemented
+ // by walkBeforeAfter(n, before, after).
+ // Most of it is bottom-up, but in a few places we need
+ // to know the type of the function we are checking.
+ // The before function records that information on
+ // the curfn stack.
+ var curfn []*ast.FuncType
+
+ before := func(n interface{}) {
+ // push function type on stack
+ switch n := n.(type) {
+ case *ast.FuncDecl:
+ curfn = append(curfn, n.Type)
+ case *ast.FuncLit:
+ curfn = append(curfn, n.Type)
+ }
+ }
+
+ // After is the real type checker.
+ after := func(n interface{}) {
+ if n == nil {
+ return
+ }
+ if false && reflect.TypeOf(n).Kind() == reflect.Ptr { // debugging trace
+ defer func() {
+ if t := typeof[n]; t != "" {
+ pos := fset.Position(n.(ast.Node).Pos())
+ fmt.Fprintf(os.Stderr, "%s: typeof[%s] = %s\n", pos, gofmt(n), t)
+ }
+ }()
+ }
+
+ switch n := n.(type) {
+ case *ast.FuncDecl, *ast.FuncLit:
+ // pop function type off stack
+ curfn = curfn[:len(curfn)-1]
+
+ case *ast.FuncType:
+ typeof[n] = mkType(joinFunc(split(typeof[n.Params]), split(typeof[n.Results])))
+
+ case *ast.FieldList:
+ // Field list is concatenation of sub-lists.
+ t := ""
+ for _, field := range n.List {
+ if t != "" {
+ t += ", "
+ }
+ t += typeof[field]
+ }
+ typeof[n] = t
+
+ case *ast.Field:
+ // Field is one instance of the type per name.
+ all := ""
+ t := typeof[n.Type]
+ if !isType(t) {
+ // Create a type, because it is typically *T or *p.T
+ // and we might care about that type.
+ t = mkType(gofmt(n.Type))
+ typeof[n.Type] = t
+ }
+ t = getType(t)
+ if len(n.Names) == 0 {
+ all = t
+ } else {
+ for _, id := range n.Names {
+ if all != "" {
+ all += ", "
+ }
+ all += t
+ typeof[id.Obj] = t
+ typeof[id] = t
+ }
+ }
+ typeof[n] = all
+
+ case *ast.ValueSpec:
+ // var declaration. Use type if present.
+ if n.Type != nil {
+ t := typeof[n.Type]
+ if !isType(t) {
+ t = mkType(gofmt(n.Type))
+ typeof[n.Type] = t
+ }
+ t = getType(t)
+ for _, id := range n.Names {
+ set(id, t, true)
+ }
+ }
+ // Now treat same as assignment.
+ typecheckAssign(makeExprList(n.Names), n.Values, true)
+
+ case *ast.AssignStmt:
+ typecheckAssign(n.Lhs, n.Rhs, n.Tok == token.DEFINE)
+
+ case *ast.Ident:
+ // Identifier can take its type from underlying object.
+ if t := typeof[n.Obj]; t != "" {
+ typeof[n] = t
+ }
+
+ case *ast.SelectorExpr:
+ // Field or method.
+ name := n.Sel.Name
+ if t := typeof[n.X]; t != "" {
+ if strings.HasPrefix(t, "*") {
+ t = t[1:] // implicit *
+ }
+ if typ := cfg.Type[t]; typ != nil {
+ if t := typ.dot(cfg, name); t != "" {
+ typeof[n] = t
+ return
+ }
+ }
+ tt := typeof[t+"."+name]
+ if isType(tt) {
+ typeof[n] = getType(tt)
+ return
+ }
+ }
+ // Package selector.
+ if x, ok := n.X.(*ast.Ident); ok && x.Obj == nil {
+ str := x.Name + "." + name
+ if cfg.Type[str] != nil {
+ typeof[n] = mkType(str)
+ return
+ }
+ if t := cfg.typeof(x.Name + "." + name); t != "" {
+ typeof[n] = t
+ return
+ }
+ }
+
+ case *ast.CallExpr:
+ // make(T) has type T.
+ if isTopName(n.Fun, "make") && len(n.Args) >= 1 {
+ typeof[n] = gofmt(n.Args[0])
+ return
+ }
+ // new(T) has type *T
+ if isTopName(n.Fun, "new") && len(n.Args) == 1 {
+ typeof[n] = "*" + gofmt(n.Args[0])
+ return
+ }
+ // Otherwise, use type of function to determine arguments.
+ t := typeof[n.Fun]
+ in, out := splitFunc(t)
+ if in == nil && out == nil {
+ return
+ }
+ typeof[n] = join(out)
+ for i, arg := range n.Args {
+ if i >= len(in) {
+ break
+ }
+ if typeof[arg] == "" {
+ typeof[arg] = in[i]
+ }
+ }
+
+ case *ast.TypeAssertExpr:
+ // x.(type) has type of x.
+ if n.Type == nil {
+ typeof[n] = typeof[n.X]
+ return
+ }
+ // x.(T) has type T.
+ if t := typeof[n.Type]; isType(t) {
+ typeof[n] = getType(t)
+ } else {
+ typeof[n] = gofmt(n.Type)
+ }
+
+ case *ast.SliceExpr:
+ // x[i:j] has type of x.
+ typeof[n] = typeof[n.X]
+
+ case *ast.IndexExpr:
+ // x[i] has key type of x's type.
+ t := expand(typeof[n.X])
+ if strings.HasPrefix(t, "[") || strings.HasPrefix(t, "map[") {
+ // Lazy: assume there are no nested [] in the array
+ // length or map key type.
+ if i := strings.Index(t, "]"); i >= 0 {
+ typeof[n] = t[i+1:]
+ }
+ }
+
+ case *ast.StarExpr:
+ // *x for x of type *T has type T when x is an expr.
+ // We don't use the result when *x is a type, but
+ // compute it anyway.
+ t := expand(typeof[n.X])
+ if isType(t) {
+ typeof[n] = "type *" + getType(t)
+ } else if strings.HasPrefix(t, "*") {
+ typeof[n] = t[len("*"):]
+ }
+
+ case *ast.UnaryExpr:
+ // &x for x of type T has type *T.
+ t := typeof[n.X]
+ if t != "" && n.Op == token.AND {
+ typeof[n] = "*" + t
+ }
+
+ case *ast.CompositeLit:
+ // T{...} has type T.
+ typeof[n] = gofmt(n.Type)
+
+ case *ast.ParenExpr:
+ // (x) has type of x.
+ typeof[n] = typeof[n.X]
+
+ case *ast.RangeStmt:
+ t := expand(typeof[n.X])
+ if t == "" {
+ return
+ }
+ var key, value string
+ if t == "string" {
+ key, value = "int", "rune"
+ } else if strings.HasPrefix(t, "[") {
+ key = "int"
+ if i := strings.Index(t, "]"); i >= 0 {
+ value = t[i+1:]
+ }
+ } else if strings.HasPrefix(t, "map[") {
+ if i := strings.Index(t, "]"); i >= 0 {
+ key, value = t[4:i], t[i+1:]
+ }
+ }
+ changed := false
+ if n.Key != nil && key != "" {
+ changed = true
+ set(n.Key, key, n.Tok == token.DEFINE)
+ }
+ if n.Value != nil && value != "" {
+ changed = true
+ set(n.Value, value, n.Tok == token.DEFINE)
+ }
+ // Ugly failure of vision: already type-checked body.
+ // Do it again now that we have that type info.
+ if changed {
+ typecheck1(cfg, n.Body, typeof, assign)
+ }
+
+ case *ast.TypeSwitchStmt:
+ // Type of variable changes for each case in type switch,
+ // but go/parser generates just one variable.
+ // Repeat type check for each case with more precise
+ // type information.
+ as, ok := n.Assign.(*ast.AssignStmt)
+ if !ok {
+ return
+ }
+ varx, ok := as.Lhs[0].(*ast.Ident)
+ if !ok {
+ return
+ }
+ t := typeof[varx]
+ for _, cas := range n.Body.List {
+ cas := cas.(*ast.CaseClause)
+ if len(cas.List) == 1 {
+ // Variable has specific type only when there is
+ // exactly one type in the case list.
+ if tt := typeof[cas.List[0]]; isType(tt) {
+ tt = getType(tt)
+ typeof[varx] = tt
+ typeof[varx.Obj] = tt
+ typecheck1(cfg, cas.Body, typeof, assign)
+ }
+ }
+ }
+ // Restore t.
+ typeof[varx] = t
+ typeof[varx.Obj] = t
+
+ case *ast.ReturnStmt:
+ if len(curfn) == 0 {
+ // Probably can't happen.
+ return
+ }
+ f := curfn[len(curfn)-1]
+ res := n.Results
+ if f.Results != nil {
+ t := split(typeof[f.Results])
+ for i := 0; i < len(res) && i < len(t); i++ {
+ set(res[i], t[i], false)
+ }
+ }
+ }
+ }
+ walkBeforeAfter(f, before, after)
+}
+
+// Convert between function type strings and lists of types.
+// Using strings makes this a little harder, but it makes
+// a lot of the rest of the code easier. This will all go away
+// when we can use go/typechecker directly.
+
+// splitFunc splits "func(x,y,z) (a,b,c)" into ["x", "y", "z"] and ["a", "b", "c"].
+func splitFunc(s string) (in, out []string) {
+ if !strings.HasPrefix(s, "func(") {
+ return nil, nil
+ }
+
+ i := len("func(") // index of beginning of 'in' arguments
+ nparen := 0
+ for j := i; j < len(s); j++ {
+ switch s[j] {
+ case '(':
+ nparen++
+ case ')':
+ nparen--
+ if nparen < 0 {
+ // found end of parameter list
+ out := strings.TrimSpace(s[j+1:])
+ if len(out) >= 2 && out[0] == '(' && out[len(out)-1] == ')' {
+ out = out[1 : len(out)-1]
+ }
+ return split(s[i:j]), split(out)
+ }
+ }
+ }
+ return nil, nil
+}
+
+// joinFunc is the inverse of splitFunc.
+func joinFunc(in, out []string) string {
+ outs := ""
+ if len(out) == 1 {
+ outs = " " + out[0]
+ } else if len(out) > 1 {
+ outs = " (" + join(out) + ")"
+ }
+ return "func(" + join(in) + ")" + outs
+}
+
+// split splits "int, float" into ["int", "float"] and splits "" into [].
+func split(s string) []string {
+ out := []string{}
+ i := 0 // current type being scanned is s[i:j].
+ nparen := 0
+ for j := 0; j < len(s); j++ {
+ switch s[j] {
+ case ' ':
+ if i == j {
+ i++
+ }
+ case '(':
+ nparen++
+ case ')':
+ nparen--
+ if nparen < 0 {
+ // probably can't happen
+ return nil
+ }
+ case ',':
+ if nparen == 0 {
+ if i < j {
+ out = append(out, s[i:j])
+ }
+ i = j + 1
+ }
+ }
+ }
+ if nparen != 0 {
+ // probably can't happen
+ return nil
+ }
+ if i < len(s) {
+ out = append(out, s[i:])
+ }
+ return out
+}
+
+// join is the inverse of split.
+func join(x []string) string {
+ return strings.Join(x, ", ")
+}
diff --git a/vendor/google.golang.org/appengine/datastore/datastore.go b/vendor/google.golang.org/appengine/datastore/datastore.go
new file mode 100644
index 0000000..9422e41
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/datastore.go
@@ -0,0 +1,406 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+var (
+ // ErrInvalidEntityType is returned when functions like Get or Next are
+ // passed a dst or src argument of invalid type.
+ ErrInvalidEntityType = errors.New("datastore: invalid entity type")
+ // ErrInvalidKey is returned when an invalid key is presented.
+ ErrInvalidKey = errors.New("datastore: invalid key")
+ // ErrNoSuchEntity is returned when no entity was found for a given key.
+ ErrNoSuchEntity = errors.New("datastore: no such entity")
+)
+
+// ErrFieldMismatch is returned when a field is to be loaded into a different
+// type than the one it was stored from, or when a field is missing or
+// unexported in the destination struct.
+// StructType is the type of the struct pointed to by the destination argument
+// passed to Get or to Iterator.Next.
+type ErrFieldMismatch struct {
+ StructType reflect.Type
+ FieldName string
+ Reason string
+}
+
+func (e *ErrFieldMismatch) Error() string {
+ return fmt.Sprintf("datastore: cannot load field %q into a %q: %s",
+ e.FieldName, e.StructType, e.Reason)
+}
+
+// protoToKey converts a Reference proto to a *Key.
+func protoToKey(r *pb.Reference) (k *Key, err error) {
+ appID := r.GetApp()
+ namespace := r.GetNameSpace()
+ for _, e := range r.Path.Element {
+ k = &Key{
+ kind: e.GetType(),
+ stringID: e.GetName(),
+ intID: e.GetId(),
+ parent: k,
+ appID: appID,
+ namespace: namespace,
+ }
+ if !k.valid() {
+ return nil, ErrInvalidKey
+ }
+ }
+ return
+}
+
+// keyToProto converts a *Key to a Reference proto.
+func keyToProto(defaultAppID string, k *Key) *pb.Reference {
+ appID := k.appID
+ if appID == "" {
+ appID = defaultAppID
+ }
+ n := 0
+ for i := k; i != nil; i = i.parent {
+ n++
+ }
+ e := make([]*pb.Path_Element, n)
+ for i := k; i != nil; i = i.parent {
+ n--
+ e[n] = &pb.Path_Element{
+ Type: &i.kind,
+ }
+ // At most one of {Name,Id} should be set.
+ // Neither will be set for incomplete keys.
+ if i.stringID != "" {
+ e[n].Name = &i.stringID
+ } else if i.intID != 0 {
+ e[n].Id = &i.intID
+ }
+ }
+ var namespace *string
+ if k.namespace != "" {
+ namespace = proto.String(k.namespace)
+ }
+ return &pb.Reference{
+ App: proto.String(appID),
+ NameSpace: namespace,
+ Path: &pb.Path{
+ Element: e,
+ },
+ }
+}
+
+// multiKeyToProto is a batch version of keyToProto.
+func multiKeyToProto(appID string, key []*Key) []*pb.Reference {
+ ret := make([]*pb.Reference, len(key))
+ for i, k := range key {
+ ret[i] = keyToProto(appID, k)
+ }
+ return ret
+}
+
+// multiValid is a batch version of Key.valid. It returns an error, not a
+// []bool.
+func multiValid(key []*Key) error {
+ invalid := false
+ for _, k := range key {
+ if !k.valid() {
+ invalid = true
+ break
+ }
+ }
+ if !invalid {
+ return nil
+ }
+ err := make(appengine.MultiError, len(key))
+ for i, k := range key {
+ if !k.valid() {
+ err[i] = ErrInvalidKey
+ }
+ }
+ return err
+}
+
+// It's unfortunate that the two semantically equivalent concepts pb.Reference
+// and pb.PropertyValue_ReferenceValue aren't the same type. For example, the
+// two have different protobuf field numbers.
+
+// referenceValueToKey is the same as protoToKey except the input is a
+// PropertyValue_ReferenceValue instead of a Reference.
+func referenceValueToKey(r *pb.PropertyValue_ReferenceValue) (k *Key, err error) {
+ appID := r.GetApp()
+ namespace := r.GetNameSpace()
+ for _, e := range r.Pathelement {
+ k = &Key{
+ kind: e.GetType(),
+ stringID: e.GetName(),
+ intID: e.GetId(),
+ parent: k,
+ appID: appID,
+ namespace: namespace,
+ }
+ if !k.valid() {
+ return nil, ErrInvalidKey
+ }
+ }
+ return
+}
+
+// keyToReferenceValue is the same as keyToProto except the output is a
+// PropertyValue_ReferenceValue instead of a Reference.
+func keyToReferenceValue(defaultAppID string, k *Key) *pb.PropertyValue_ReferenceValue {
+ ref := keyToProto(defaultAppID, k)
+ pe := make([]*pb.PropertyValue_ReferenceValue_PathElement, len(ref.Path.Element))
+ for i, e := range ref.Path.Element {
+ pe[i] = &pb.PropertyValue_ReferenceValue_PathElement{
+ Type: e.Type,
+ Id: e.Id,
+ Name: e.Name,
+ }
+ }
+ return &pb.PropertyValue_ReferenceValue{
+ App: ref.App,
+ NameSpace: ref.NameSpace,
+ Pathelement: pe,
+ }
+}
+
+type multiArgType int
+
+const (
+ multiArgTypeInvalid multiArgType = iota
+ multiArgTypePropertyLoadSaver
+ multiArgTypeStruct
+ multiArgTypeStructPtr
+ multiArgTypeInterface
+)
+
+// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct
+// type S, for some interface type I, or some non-interface non-pointer type P
+// such that P or *P implements PropertyLoadSaver.
+//
+// It returns what category the slice's elements are, and the reflect.Type
+// that represents S, I or P.
+//
+// As a special case, PropertyList is an invalid type for v.
+func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {
+ if v.Kind() != reflect.Slice {
+ return multiArgTypeInvalid, nil
+ }
+ if v.Type() == typeOfPropertyList {
+ return multiArgTypeInvalid, nil
+ }
+ elemType = v.Type().Elem()
+ if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) {
+ return multiArgTypePropertyLoadSaver, elemType
+ }
+ switch elemType.Kind() {
+ case reflect.Struct:
+ return multiArgTypeStruct, elemType
+ case reflect.Interface:
+ return multiArgTypeInterface, elemType
+ case reflect.Ptr:
+ elemType = elemType.Elem()
+ if elemType.Kind() == reflect.Struct {
+ return multiArgTypeStructPtr, elemType
+ }
+ }
+ return multiArgTypeInvalid, nil
+}
+
+// Get loads the entity stored for k into dst, which must be a struct pointer
+// or implement PropertyLoadSaver. If there is no such entity for the key, Get
+// returns ErrNoSuchEntity.
+//
+// The values of dst's unmatched struct fields are not modified, and matching
+// slice-typed fields are not reset before appending to them. In particular, it
+// is recommended to pass a pointer to a zero valued struct on each Get call.
+//
+// ErrFieldMismatch is returned when a field is to be loaded into a different
+// type than the one it was stored from, or when a field is missing or
+// unexported in the destination struct. ErrFieldMismatch is only returned if
+// dst is a struct pointer.
+func Get(c context.Context, key *Key, dst interface{}) error {
+ if dst == nil { // GetMulti catches nil interface; we need to catch nil ptr here
+ return ErrInvalidEntityType
+ }
+ err := GetMulti(c, []*Key{key}, []interface{}{dst})
+ if me, ok := err.(appengine.MultiError); ok {
+ return me[0]
+ }
+ return err
+}
+
+// GetMulti is a batch version of Get.
+//
+// dst must be a []S, []*S, []I or []P, for some struct type S, some interface
+// type I, or some non-interface non-pointer type P such that P or *P
+// implements PropertyLoadSaver. If an []I, each element must be a valid dst
+// for Get: it must be a struct pointer or implement PropertyLoadSaver.
+//
+// As a special case, PropertyList is an invalid type for dst, even though a
+// PropertyList is a slice of structs. It is treated as invalid to avoid being
+// mistakenly passed when []PropertyList was intended.
+func GetMulti(c context.Context, key []*Key, dst interface{}) error {
+ v := reflect.ValueOf(dst)
+ multiArgType, _ := checkMultiArg(v)
+ if multiArgType == multiArgTypeInvalid {
+ return errors.New("datastore: dst has invalid type")
+ }
+ if len(key) != v.Len() {
+ return errors.New("datastore: key and dst slices have different length")
+ }
+ if len(key) == 0 {
+ return nil
+ }
+ if err := multiValid(key); err != nil {
+ return err
+ }
+ req := &pb.GetRequest{
+ Key: multiKeyToProto(internal.FullyQualifiedAppID(c), key),
+ }
+ res := &pb.GetResponse{}
+ if err := internal.Call(c, "datastore_v3", "Get", req, res); err != nil {
+ return err
+ }
+ if len(key) != len(res.Entity) {
+ return errors.New("datastore: internal error: server returned the wrong number of entities")
+ }
+ multiErr, any := make(appengine.MultiError, len(key)), false
+ for i, e := range res.Entity {
+ if e.Entity == nil {
+ multiErr[i] = ErrNoSuchEntity
+ } else {
+ elem := v.Index(i)
+ if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
+ elem = elem.Addr()
+ }
+ if multiArgType == multiArgTypeStructPtr && elem.IsNil() {
+ elem.Set(reflect.New(elem.Type().Elem()))
+ }
+ multiErr[i] = loadEntity(elem.Interface(), e.Entity)
+ }
+ if multiErr[i] != nil {
+ any = true
+ }
+ }
+ if any {
+ return multiErr
+ }
+ return nil
+}
+
+// Put saves the entity src into the datastore with key k. src must be a struct
+// pointer or implement PropertyLoadSaver; if a struct pointer then any
+// unexported fields of that struct will be skipped. If k is an incomplete key,
+// the returned key will be a unique key generated by the datastore.
+func Put(c context.Context, key *Key, src interface{}) (*Key, error) {
+ k, err := PutMulti(c, []*Key{key}, []interface{}{src})
+ if err != nil {
+ if me, ok := err.(appengine.MultiError); ok {
+ return nil, me[0]
+ }
+ return nil, err
+ }
+ return k[0], nil
+}
+
+// PutMulti is a batch version of Put.
+//
+// src must satisfy the same conditions as the dst argument to GetMulti.
+func PutMulti(c context.Context, key []*Key, src interface{}) ([]*Key, error) {
+ v := reflect.ValueOf(src)
+ multiArgType, _ := checkMultiArg(v)
+ if multiArgType == multiArgTypeInvalid {
+ return nil, errors.New("datastore: src has invalid type")
+ }
+ if len(key) != v.Len() {
+ return nil, errors.New("datastore: key and src slices have different length")
+ }
+ if len(key) == 0 {
+ return nil, nil
+ }
+ appID := internal.FullyQualifiedAppID(c)
+ if err := multiValid(key); err != nil {
+ return nil, err
+ }
+ req := &pb.PutRequest{}
+ for i := range key {
+ elem := v.Index(i)
+ if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
+ elem = elem.Addr()
+ }
+ sProto, err := saveEntity(appID, key[i], elem.Interface())
+ if err != nil {
+ return nil, err
+ }
+ req.Entity = append(req.Entity, sProto)
+ }
+ res := &pb.PutResponse{}
+ if err := internal.Call(c, "datastore_v3", "Put", req, res); err != nil {
+ return nil, err
+ }
+ if len(key) != len(res.Key) {
+ return nil, errors.New("datastore: internal error: server returned the wrong number of keys")
+ }
+ ret := make([]*Key, len(key))
+ for i := range ret {
+ var err error
+ ret[i], err = protoToKey(res.Key[i])
+ if err != nil || ret[i].Incomplete() {
+ return nil, errors.New("datastore: internal error: server returned an invalid key")
+ }
+ }
+ return ret, nil
+}
+
+// Delete deletes the entity for the given key.
+func Delete(c context.Context, key *Key) error {
+ err := DeleteMulti(c, []*Key{key})
+ if me, ok := err.(appengine.MultiError); ok {
+ return me[0]
+ }
+ return err
+}
+
+// DeleteMulti is a batch version of Delete.
+func DeleteMulti(c context.Context, key []*Key) error {
+ if len(key) == 0 {
+ return nil
+ }
+ if err := multiValid(key); err != nil {
+ return err
+ }
+ req := &pb.DeleteRequest{
+ Key: multiKeyToProto(internal.FullyQualifiedAppID(c), key),
+ }
+ res := &pb.DeleteResponse{}
+ return internal.Call(c, "datastore_v3", "Delete", req, res)
+}
+
+func namespaceMod(m proto.Message, namespace string) {
+ // pb.Query is the only type that has a name_space field.
+ // All other namespace support in datastore is in the keys.
+ switch m := m.(type) {
+ case *pb.Query:
+ if m.NameSpace == nil {
+ m.NameSpace = &namespace
+ }
+ }
+}
+
+func init() {
+ internal.NamespaceMods["datastore_v3"] = namespaceMod
+ internal.RegisterErrorCodeMap("datastore_v3", pb.Error_ErrorCode_name)
+ internal.RegisterTimeoutErrorCode("datastore_v3", int32(pb.Error_TIMEOUT))
+}
diff --git a/vendor/google.golang.org/appengine/datastore/datastore_test.go b/vendor/google.golang.org/appengine/datastore/datastore_test.go
new file mode 100644
index 0000000..b2856a9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/datastore_test.go
@@ -0,0 +1,1567 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+const testAppID = "testApp"
+
+type (
+ myBlob []byte
+ myByte byte
+ myString string
+)
+
+func makeMyByteSlice(n int) []myByte {
+ b := make([]myByte, n)
+ for i := range b {
+ b[i] = myByte(i)
+ }
+ return b
+}
+
+func makeInt8Slice(n int) []int8 {
+ b := make([]int8, n)
+ for i := range b {
+ b[i] = int8(i)
+ }
+ return b
+}
+
+func makeUint8Slice(n int) []uint8 {
+ b := make([]uint8, n)
+ for i := range b {
+ b[i] = uint8(i)
+ }
+ return b
+}
+
+func newKey(stringID string, parent *Key) *Key {
+ return &Key{
+ kind: "kind",
+ stringID: stringID,
+ intID: 0,
+ parent: parent,
+ appID: testAppID,
+ }
+}
+
+var (
+ testKey0 = newKey("name0", nil)
+ testKey1a = newKey("name1", nil)
+ testKey1b = newKey("name1", nil)
+ testKey2a = newKey("name2", testKey0)
+ testKey2b = newKey("name2", testKey0)
+ testGeoPt0 = appengine.GeoPoint{Lat: 1.2, Lng: 3.4}
+ testGeoPt1 = appengine.GeoPoint{Lat: 5, Lng: 10}
+ testBadGeoPt = appengine.GeoPoint{Lat: 1000, Lng: 34}
+)
+
+type B0 struct {
+ B []byte
+}
+
+type B1 struct {
+ B []int8
+}
+
+type B2 struct {
+ B myBlob
+}
+
+type B3 struct {
+ B []myByte
+}
+
+type B4 struct {
+ B [][]byte
+}
+
+type B5 struct {
+ B ByteString
+}
+
+type C0 struct {
+ I int
+ C chan int
+}
+
+type C1 struct {
+ I int
+ C *chan int
+}
+
+type C2 struct {
+ I int
+ C []chan int
+}
+
+type C3 struct {
+ C string
+}
+
+type E struct{}
+
+type G0 struct {
+ G appengine.GeoPoint
+}
+
+type G1 struct {
+ G []appengine.GeoPoint
+}
+
+type K0 struct {
+ K *Key
+}
+
+type K1 struct {
+ K []*Key
+}
+
+type N0 struct {
+ X0
+ Nonymous X0
+ Ignore string `datastore:"-"`
+ Other string
+}
+
+type N1 struct {
+ X0
+ Nonymous []X0
+ Ignore string `datastore:"-"`
+ Other string
+}
+
+type N2 struct {
+ N1 `datastore:"red"`
+ Green N1 `datastore:"green"`
+ Blue N1
+ White N1 `datastore:"-"`
+}
+
+type O0 struct {
+ I int64
+}
+
+type O1 struct {
+ I int32
+}
+
+type U0 struct {
+ U uint
+}
+
+type U1 struct {
+ U string
+}
+
+type T struct {
+ T time.Time
+}
+
+type X0 struct {
+ S string
+ I int
+ i int
+}
+
+type X1 struct {
+ S myString
+ I int32
+ J int64
+}
+
+type X2 struct {
+ Z string
+ i int
+}
+
+type X3 struct {
+ S bool
+ I int
+}
+
+type Y0 struct {
+ B bool
+ F []float64
+ G []float64
+}
+
+type Y1 struct {
+ B bool
+ F float64
+}
+
+type Y2 struct {
+ B bool
+ F []int64
+}
+
+type Tagged struct {
+ A int `datastore:"a,noindex"`
+ B []int `datastore:"b"`
+ C int `datastore:",noindex"`
+ D int `datastore:""`
+ E int
+ // The "flatten" option is parsed but ignored for now.
+ F int `datastore:",noindex,flatten"`
+ G int `datastore:",flatten"`
+ I int `datastore:"-"`
+ J int `datastore:",noindex" json:"j"`
+
+ Y0 `datastore:"-"`
+ Z chan int `datastore:"-,"`
+}
+
+type InvalidTagged1 struct {
+ I int `datastore:"\t"`
+}
+
+type InvalidTagged2 struct {
+ I int
+ J int `datastore:"I"`
+}
+
+type Inner1 struct {
+ W int32
+ X string
+}
+
+type Inner2 struct {
+ Y float64
+}
+
+type Inner3 struct {
+ Z bool
+}
+
+type Outer struct {
+ A int16
+ I []Inner1
+ J Inner2
+ Inner3
+}
+
+type OuterEquivalent struct {
+ A int16
+ IDotW []int32 `datastore:"I.W"`
+ IDotX []string `datastore:"I.X"`
+ JDotY float64 `datastore:"J.Y"`
+ Z bool
+}
+
+type Dotted struct {
+ A DottedA `datastore:"A0.A1.A2"`
+}
+
+type DottedA struct {
+ B DottedB `datastore:"B3"`
+}
+
+type DottedB struct {
+ C int `datastore:"C4.C5"`
+}
+
+type SliceOfSlices struct {
+ I int
+ S []struct {
+ J int
+ F []float64
+ }
+}
+
+type Recursive struct {
+ I int
+ R []Recursive
+}
+
+type MutuallyRecursive0 struct {
+ I int
+ R []MutuallyRecursive1
+}
+
+type MutuallyRecursive1 struct {
+ I int
+ R []MutuallyRecursive0
+}
+
+type Doubler struct {
+ S string
+ I int64
+ B bool
+}
+
+func (d *Doubler) Load(props []Property) error {
+ return LoadStruct(d, props)
+}
+
+func (d *Doubler) Save() ([]Property, error) {
+ // Save the default Property slice to an in-memory buffer (a PropertyList).
+ props, err := SaveStruct(d)
+ if err != nil {
+ return nil, err
+ }
+ var list PropertyList
+ if err := list.Load(props); err != nil {
+ return nil, err
+ }
+
+ // Edit that PropertyList, and send it on.
+ for i := range list {
+ switch v := list[i].Value.(type) {
+ case string:
+ // + means string concatenation.
+ list[i].Value = v + v
+ case int64:
+ // + means integer addition.
+ list[i].Value = v + v
+ }
+ }
+ return list.Save()
+}
+
+var _ PropertyLoadSaver = (*Doubler)(nil)
+
+type Deriver struct {
+ S, Derived, Ignored string
+}
+
+func (e *Deriver) Load(props []Property) error {
+ for _, p := range props {
+ if p.Name != "S" {
+ continue
+ }
+ e.S = p.Value.(string)
+ e.Derived = "derived+" + e.S
+ }
+ return nil
+}
+
+func (e *Deriver) Save() ([]Property, error) {
+ return []Property{
+ {
+ Name: "S",
+ Value: e.S,
+ },
+ }, nil
+}
+
+var _ PropertyLoadSaver = (*Deriver)(nil)
+
+type BadMultiPropEntity struct{}
+
+func (e *BadMultiPropEntity) Load(props []Property) error {
+ return errors.New("unimplemented")
+}
+
+func (e *BadMultiPropEntity) Save() ([]Property, error) {
+ // Write multiple properties with the same name "I", but Multiple is false.
+ var props []Property
+ for i := 0; i < 3; i++ {
+ props = append(props, Property{
+ Name: "I",
+ Value: int64(i),
+ })
+ }
+ return props, nil
+}
+
+var _ PropertyLoadSaver = (*BadMultiPropEntity)(nil)
+
+type BK struct {
+ Key appengine.BlobKey
+}
+
+type testCase struct {
+ desc string
+ src interface{}
+ want interface{}
+ putErr string
+ getErr string
+}
+
+var testCases = []testCase{
+ {
+ "chan save fails",
+ &C0{I: -1},
+ &E{},
+ "unsupported struct field",
+ "",
+ },
+ {
+ "*chan save fails",
+ &C1{I: -1},
+ &E{},
+ "unsupported struct field",
+ "",
+ },
+ {
+ "[]chan save fails",
+ &C2{I: -1, C: make([]chan int, 8)},
+ &E{},
+ "unsupported struct field",
+ "",
+ },
+ {
+ "chan load fails",
+ &C3{C: "not a chan"},
+ &C0{},
+ "",
+ "type mismatch",
+ },
+ {
+ "*chan load fails",
+ &C3{C: "not a *chan"},
+ &C1{},
+ "",
+ "type mismatch",
+ },
+ {
+ "[]chan load fails",
+ &C3{C: "not a []chan"},
+ &C2{},
+ "",
+ "type mismatch",
+ },
+ {
+ "empty struct",
+ &E{},
+ &E{},
+ "",
+ "",
+ },
+ {
+ "geopoint",
+ &G0{G: testGeoPt0},
+ &G0{G: testGeoPt0},
+ "",
+ "",
+ },
+ {
+ "geopoint invalid",
+ &G0{G: testBadGeoPt},
+ &G0{},
+ "invalid GeoPoint value",
+ "",
+ },
+ {
+ "geopoint as props",
+ &G0{G: testGeoPt0},
+ &PropertyList{
+ Property{Name: "G", Value: testGeoPt0, NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "geopoint slice",
+ &G1{G: []appengine.GeoPoint{testGeoPt0, testGeoPt1}},
+ &G1{G: []appengine.GeoPoint{testGeoPt0, testGeoPt1}},
+ "",
+ "",
+ },
+ {
+ "key",
+ &K0{K: testKey1a},
+ &K0{K: testKey1b},
+ "",
+ "",
+ },
+ {
+ "key with parent",
+ &K0{K: testKey2a},
+ &K0{K: testKey2b},
+ "",
+ "",
+ },
+ {
+ "nil key",
+ &K0{},
+ &K0{},
+ "",
+ "",
+ },
+ {
+ "all nil keys in slice",
+ &K1{[]*Key{nil, nil}},
+ &K1{[]*Key{nil, nil}},
+ "",
+ "",
+ },
+ {
+ "some nil keys in slice",
+ &K1{[]*Key{testKey1a, nil, testKey2a}},
+ &K1{[]*Key{testKey1b, nil, testKey2b}},
+ "",
+ "",
+ },
+ {
+ "overflow",
+ &O0{I: 1 << 48},
+ &O1{},
+ "",
+ "overflow",
+ },
+ {
+ "time",
+ &T{T: time.Unix(1e9, 0)},
+ &T{T: time.Unix(1e9, 0)},
+ "",
+ "",
+ },
+ {
+ "time as props",
+ &T{T: time.Unix(1e9, 0)},
+ &PropertyList{
+ Property{Name: "T", Value: time.Unix(1e9, 0).UTC(), NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "uint save",
+ &U0{U: 1},
+ &U0{},
+ "unsupported struct field",
+ "",
+ },
+ {
+ "uint load",
+ &U1{U: "not a uint"},
+ &U0{},
+ "",
+ "type mismatch",
+ },
+ {
+ "zero",
+ &X0{},
+ &X0{},
+ "",
+ "",
+ },
+ {
+ "basic",
+ &X0{S: "one", I: 2, i: 3},
+ &X0{S: "one", I: 2},
+ "",
+ "",
+ },
+ {
+ "save string/int load myString/int32",
+ &X0{S: "one", I: 2, i: 3},
+ &X1{S: "one", I: 2},
+ "",
+ "",
+ },
+ {
+ "missing fields",
+ &X0{S: "one", I: 2, i: 3},
+ &X2{},
+ "",
+ "no such struct field",
+ },
+ {
+ "save string load bool",
+ &X0{S: "one", I: 2, i: 3},
+ &X3{I: 2},
+ "",
+ "type mismatch",
+ },
+ {
+ "basic slice",
+ &Y0{B: true, F: []float64{7, 8, 9}},
+ &Y0{B: true, F: []float64{7, 8, 9}},
+ "",
+ "",
+ },
+ {
+ "save []float64 load float64",
+ &Y0{B: true, F: []float64{7, 8, 9}},
+ &Y1{B: true},
+ "",
+ "requires a slice",
+ },
+ {
+ "save []float64 load []int64",
+ &Y0{B: true, F: []float64{7, 8, 9}},
+ &Y2{B: true},
+ "",
+ "type mismatch",
+ },
+ {
+ "single slice is too long",
+ &Y0{F: make([]float64, maxIndexedProperties+1)},
+ &Y0{},
+ "too many indexed properties",
+ "",
+ },
+ {
+ "two slices are too long",
+ &Y0{F: make([]float64, maxIndexedProperties), G: make([]float64, maxIndexedProperties)},
+ &Y0{},
+ "too many indexed properties",
+ "",
+ },
+ {
+ "one slice and one scalar are too long",
+ &Y0{F: make([]float64, maxIndexedProperties), B: true},
+ &Y0{},
+ "too many indexed properties",
+ "",
+ },
+ {
+ "long blob",
+ &B0{B: makeUint8Slice(maxIndexedProperties + 1)},
+ &B0{B: makeUint8Slice(maxIndexedProperties + 1)},
+ "",
+ "",
+ },
+ {
+ "long []int8 is too long",
+ &B1{B: makeInt8Slice(maxIndexedProperties + 1)},
+ &B1{},
+ "too many indexed properties",
+ "",
+ },
+ {
+ "short []int8",
+ &B1{B: makeInt8Slice(3)},
+ &B1{B: makeInt8Slice(3)},
+ "",
+ "",
+ },
+ {
+ "long myBlob",
+ &B2{B: makeUint8Slice(maxIndexedProperties + 1)},
+ &B2{B: makeUint8Slice(maxIndexedProperties + 1)},
+ "",
+ "",
+ },
+ {
+ "short myBlob",
+ &B2{B: makeUint8Slice(3)},
+ &B2{B: makeUint8Slice(3)},
+ "",
+ "",
+ },
+ {
+ "long []myByte",
+ &B3{B: makeMyByteSlice(maxIndexedProperties + 1)},
+ &B3{B: makeMyByteSlice(maxIndexedProperties + 1)},
+ "",
+ "",
+ },
+ {
+ "short []myByte",
+ &B3{B: makeMyByteSlice(3)},
+ &B3{B: makeMyByteSlice(3)},
+ "",
+ "",
+ },
+ {
+ "slice of blobs",
+ &B4{B: [][]byte{
+ makeUint8Slice(3),
+ makeUint8Slice(4),
+ makeUint8Slice(5),
+ }},
+ &B4{B: [][]byte{
+ makeUint8Slice(3),
+ makeUint8Slice(4),
+ makeUint8Slice(5),
+ }},
+ "",
+ "",
+ },
+ {
+ "short ByteString",
+ &B5{B: ByteString(makeUint8Slice(3))},
+ &B5{B: ByteString(makeUint8Slice(3))},
+ "",
+ "",
+ },
+ {
+ "short ByteString as props",
+ &B5{B: ByteString(makeUint8Slice(3))},
+ &PropertyList{
+ Property{Name: "B", Value: ByteString(makeUint8Slice(3)), NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "short ByteString into string",
+ &B5{B: ByteString("legacy")},
+ &struct{ B string }{"legacy"},
+ "",
+ "",
+ },
+ {
+ "[]byte must be noindex",
+ &PropertyList{
+ Property{Name: "B", Value: makeUint8Slice(3), NoIndex: false},
+ },
+ nil,
+ "cannot index a []byte valued Property",
+ "",
+ },
+ {
+ "save tagged load props",
+ &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, F: 6, G: 7, I: 8, J: 9},
+ &PropertyList{
+ // A and B are renamed to a and b; A and C are noindex, I is ignored.
+ // Indexed properties are loaded before raw properties. Thus, the
+ // result is: b, b, b, D, E, a, c.
+ Property{Name: "b", Value: int64(21), NoIndex: false, Multiple: true},
+ Property{Name: "b", Value: int64(22), NoIndex: false, Multiple: true},
+ Property{Name: "b", Value: int64(23), NoIndex: false, Multiple: true},
+ Property{Name: "D", Value: int64(4), NoIndex: false, Multiple: false},
+ Property{Name: "E", Value: int64(5), NoIndex: false, Multiple: false},
+ Property{Name: "G", Value: int64(7), NoIndex: false, Multiple: false},
+ Property{Name: "a", Value: int64(1), NoIndex: true, Multiple: false},
+ Property{Name: "C", Value: int64(3), NoIndex: true, Multiple: false},
+ Property{Name: "F", Value: int64(6), NoIndex: true, Multiple: false},
+ Property{Name: "J", Value: int64(9), NoIndex: true, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "save tagged load tagged",
+ &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, I: 6, J: 7},
+ &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, J: 7},
+ "",
+ "",
+ },
+ {
+ "save props load tagged",
+ &PropertyList{
+ Property{Name: "A", Value: int64(11), NoIndex: true, Multiple: false},
+ Property{Name: "a", Value: int64(12), NoIndex: true, Multiple: false},
+ },
+ &Tagged{A: 12},
+ "",
+ `cannot load field "A"`,
+ },
+ {
+ "invalid tagged1",
+ &InvalidTagged1{I: 1},
+ &InvalidTagged1{},
+ "struct tag has invalid property name",
+ "",
+ },
+ {
+ "invalid tagged2",
+ &InvalidTagged2{I: 1, J: 2},
+ &InvalidTagged2{},
+ "struct tag has repeated property name",
+ "",
+ },
+ {
+ "doubler",
+ &Doubler{S: "s", I: 1, B: true},
+ &Doubler{S: "ss", I: 2, B: true},
+ "",
+ "",
+ },
+ {
+ "save struct load props",
+ &X0{S: "s", I: 1},
+ &PropertyList{
+ Property{Name: "S", Value: "s", NoIndex: false, Multiple: false},
+ Property{Name: "I", Value: int64(1), NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "save props load struct",
+ &PropertyList{
+ Property{Name: "S", Value: "s", NoIndex: false, Multiple: false},
+ Property{Name: "I", Value: int64(1), NoIndex: false, Multiple: false},
+ },
+ &X0{S: "s", I: 1},
+ "",
+ "",
+ },
+ {
+ "nil-value props",
+ &PropertyList{
+ Property{Name: "I", Value: nil, NoIndex: false, Multiple: false},
+ Property{Name: "B", Value: nil, NoIndex: false, Multiple: false},
+ Property{Name: "S", Value: nil, NoIndex: false, Multiple: false},
+ Property{Name: "F", Value: nil, NoIndex: false, Multiple: false},
+ Property{Name: "K", Value: nil, NoIndex: false, Multiple: false},
+ Property{Name: "T", Value: nil, NoIndex: false, Multiple: false},
+ Property{Name: "J", Value: nil, NoIndex: false, Multiple: true},
+ Property{Name: "J", Value: int64(7), NoIndex: false, Multiple: true},
+ Property{Name: "J", Value: nil, NoIndex: false, Multiple: true},
+ },
+ &struct {
+ I int64
+ B bool
+ S string
+ F float64
+ K *Key
+ T time.Time
+ J []int64
+ }{
+ J: []int64{0, 7, 0},
+ },
+ "",
+ "",
+ },
+ {
+ "save outer load props",
+ &Outer{
+ A: 1,
+ I: []Inner1{
+ {10, "ten"},
+ {20, "twenty"},
+ {30, "thirty"},
+ },
+ J: Inner2{
+ Y: 3.14,
+ },
+ Inner3: Inner3{
+ Z: true,
+ },
+ },
+ &PropertyList{
+ Property{Name: "A", Value: int64(1), NoIndex: false, Multiple: false},
+ Property{Name: "I.W", Value: int64(10), NoIndex: false, Multiple: true},
+ Property{Name: "I.X", Value: "ten", NoIndex: false, Multiple: true},
+ Property{Name: "I.W", Value: int64(20), NoIndex: false, Multiple: true},
+ Property{Name: "I.X", Value: "twenty", NoIndex: false, Multiple: true},
+ Property{Name: "I.W", Value: int64(30), NoIndex: false, Multiple: true},
+ Property{Name: "I.X", Value: "thirty", NoIndex: false, Multiple: true},
+ Property{Name: "J.Y", Value: float64(3.14), NoIndex: false, Multiple: false},
+ Property{Name: "Z", Value: true, NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "save props load outer-equivalent",
+ &PropertyList{
+ Property{Name: "A", Value: int64(1), NoIndex: false, Multiple: false},
+ Property{Name: "I.W", Value: int64(10), NoIndex: false, Multiple: true},
+ Property{Name: "I.X", Value: "ten", NoIndex: false, Multiple: true},
+ Property{Name: "I.W", Value: int64(20), NoIndex: false, Multiple: true},
+ Property{Name: "I.X", Value: "twenty", NoIndex: false, Multiple: true},
+ Property{Name: "I.W", Value: int64(30), NoIndex: false, Multiple: true},
+ Property{Name: "I.X", Value: "thirty", NoIndex: false, Multiple: true},
+ Property{Name: "J.Y", Value: float64(3.14), NoIndex: false, Multiple: false},
+ Property{Name: "Z", Value: true, NoIndex: false, Multiple: false},
+ },
+ &OuterEquivalent{
+ A: 1,
+ IDotW: []int32{10, 20, 30},
+ IDotX: []string{"ten", "twenty", "thirty"},
+ JDotY: 3.14,
+ Z: true,
+ },
+ "",
+ "",
+ },
+ {
+ "save outer-equivalent load outer",
+ &OuterEquivalent{
+ A: 1,
+ IDotW: []int32{10, 20, 30},
+ IDotX: []string{"ten", "twenty", "thirty"},
+ JDotY: 3.14,
+ Z: true,
+ },
+ &Outer{
+ A: 1,
+ I: []Inner1{
+ {10, "ten"},
+ {20, "twenty"},
+ {30, "thirty"},
+ },
+ J: Inner2{
+ Y: 3.14,
+ },
+ Inner3: Inner3{
+ Z: true,
+ },
+ },
+ "",
+ "",
+ },
+ {
+ "dotted names save",
+ &Dotted{A: DottedA{B: DottedB{C: 88}}},
+ &PropertyList{
+ Property{Name: "A0.A1.A2.B3.C4.C5", Value: int64(88), NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "dotted names load",
+ &PropertyList{
+ Property{Name: "A0.A1.A2.B3.C4.C5", Value: int64(99), NoIndex: false, Multiple: false},
+ },
+ &Dotted{A: DottedA{B: DottedB{C: 99}}},
+ "",
+ "",
+ },
+ {
+ "save struct load deriver",
+ &X0{S: "s", I: 1},
+ &Deriver{S: "s", Derived: "derived+s"},
+ "",
+ "",
+ },
+ {
+ "save deriver load struct",
+ &Deriver{S: "s", Derived: "derived+s", Ignored: "ignored"},
+ &X0{S: "s"},
+ "",
+ "",
+ },
+ {
+ "bad multi-prop entity",
+ &BadMultiPropEntity{},
+ &BadMultiPropEntity{},
+ "Multiple is false",
+ "",
+ },
+ // Regression: CL 25062824 broke handling of appengine.BlobKey fields.
+ {
+ "appengine.BlobKey",
+ &BK{Key: "blah"},
+ &BK{Key: "blah"},
+ "",
+ "",
+ },
+ {
+ "zero time.Time",
+ &T{T: time.Time{}},
+ &T{T: time.Time{}},
+ "",
+ "",
+ },
+ {
+ "time.Time near Unix zero time",
+ &T{T: time.Unix(0, 4e3)},
+ &T{T: time.Unix(0, 4e3)},
+ "",
+ "",
+ },
+ {
+ "time.Time, far in the future",
+ &T{T: time.Date(99999, 1, 1, 0, 0, 0, 0, time.UTC)},
+ &T{T: time.Date(99999, 1, 1, 0, 0, 0, 0, time.UTC)},
+ "",
+ "",
+ },
+ {
+ "time.Time, very far in the past",
+ &T{T: time.Date(-300000, 1, 1, 0, 0, 0, 0, time.UTC)},
+ &T{},
+ "time value out of range",
+ "",
+ },
+ {
+ "time.Time, very far in the future",
+ &T{T: time.Date(294248, 1, 1, 0, 0, 0, 0, time.UTC)},
+ &T{},
+ "time value out of range",
+ "",
+ },
+ {
+ "structs",
+ &N0{
+ X0: X0{S: "one", I: 2, i: 3},
+ Nonymous: X0{S: "four", I: 5, i: 6},
+ Ignore: "ignore",
+ Other: "other",
+ },
+ &N0{
+ X0: X0{S: "one", I: 2},
+ Nonymous: X0{S: "four", I: 5},
+ Other: "other",
+ },
+ "",
+ "",
+ },
+ {
+ "slice of structs",
+ &N1{
+ X0: X0{S: "one", I: 2, i: 3},
+ Nonymous: []X0{
+ {S: "four", I: 5, i: 6},
+ {S: "seven", I: 8, i: 9},
+ {S: "ten", I: 11, i: 12},
+ {S: "thirteen", I: 14, i: 15},
+ },
+ Ignore: "ignore",
+ Other: "other",
+ },
+ &N1{
+ X0: X0{S: "one", I: 2},
+ Nonymous: []X0{
+ {S: "four", I: 5},
+ {S: "seven", I: 8},
+ {S: "ten", I: 11},
+ {S: "thirteen", I: 14},
+ },
+ Other: "other",
+ },
+ "",
+ "",
+ },
+ {
+ "structs with slices of structs",
+ &N2{
+ N1: N1{
+ X0: X0{S: "rouge"},
+ Nonymous: []X0{
+ {S: "rosso0"},
+ {S: "rosso1"},
+ },
+ },
+ Green: N1{
+ X0: X0{S: "vert"},
+ Nonymous: []X0{
+ {S: "verde0"},
+ {S: "verde1"},
+ {S: "verde2"},
+ },
+ },
+ Blue: N1{
+ X0: X0{S: "bleu"},
+ Nonymous: []X0{
+ {S: "blu0"},
+ {S: "blu1"},
+ {S: "blu2"},
+ {S: "blu3"},
+ },
+ },
+ },
+ &N2{
+ N1: N1{
+ X0: X0{S: "rouge"},
+ Nonymous: []X0{
+ {S: "rosso0"},
+ {S: "rosso1"},
+ },
+ },
+ Green: N1{
+ X0: X0{S: "vert"},
+ Nonymous: []X0{
+ {S: "verde0"},
+ {S: "verde1"},
+ {S: "verde2"},
+ },
+ },
+ Blue: N1{
+ X0: X0{S: "bleu"},
+ Nonymous: []X0{
+ {S: "blu0"},
+ {S: "blu1"},
+ {S: "blu2"},
+ {S: "blu3"},
+ },
+ },
+ },
+ "",
+ "",
+ },
+ {
+ "save structs load props",
+ &N2{
+ N1: N1{
+ X0: X0{S: "rouge"},
+ Nonymous: []X0{
+ {S: "rosso0"},
+ {S: "rosso1"},
+ },
+ },
+ Green: N1{
+ X0: X0{S: "vert"},
+ Nonymous: []X0{
+ {S: "verde0"},
+ {S: "verde1"},
+ {S: "verde2"},
+ },
+ },
+ Blue: N1{
+ X0: X0{S: "bleu"},
+ Nonymous: []X0{
+ {S: "blu0"},
+ {S: "blu1"},
+ {S: "blu2"},
+ {S: "blu3"},
+ },
+ },
+ },
+ &PropertyList{
+ Property{Name: "red.S", Value: "rouge", NoIndex: false, Multiple: false},
+ Property{Name: "red.I", Value: int64(0), NoIndex: false, Multiple: false},
+ Property{Name: "red.Nonymous.S", Value: "rosso0", NoIndex: false, Multiple: true},
+ Property{Name: "red.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "red.Nonymous.S", Value: "rosso1", NoIndex: false, Multiple: true},
+ Property{Name: "red.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "red.Other", Value: "", NoIndex: false, Multiple: false},
+ Property{Name: "green.S", Value: "vert", NoIndex: false, Multiple: false},
+ Property{Name: "green.I", Value: int64(0), NoIndex: false, Multiple: false},
+ Property{Name: "green.Nonymous.S", Value: "verde0", NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.S", Value: "verde1", NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.S", Value: "verde2", NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "green.Other", Value: "", NoIndex: false, Multiple: false},
+ Property{Name: "Blue.S", Value: "bleu", NoIndex: false, Multiple: false},
+ Property{Name: "Blue.I", Value: int64(0), NoIndex: false, Multiple: false},
+ Property{Name: "Blue.Nonymous.S", Value: "blu0", NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.S", Value: "blu1", NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.S", Value: "blu2", NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.S", Value: "blu3", NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Other", Value: "", NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "save props load structs with ragged fields",
+ &PropertyList{
+ Property{Name: "red.S", Value: "rot", NoIndex: false, Multiple: false},
+ Property{Name: "green.Nonymous.I", Value: int64(10), NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.I", Value: int64(11), NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.I", Value: int64(12), NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.I", Value: int64(13), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.S", Value: "blau0", NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.I", Value: int64(20), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.S", Value: "blau1", NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.I", Value: int64(21), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.S", Value: "blau2", NoIndex: false, Multiple: true},
+ },
+ &N2{
+ N1: N1{
+ X0: X0{S: "rot"},
+ },
+ Green: N1{
+ Nonymous: []X0{
+ {I: 10},
+ {I: 11},
+ {I: 12},
+ {I: 13},
+ },
+ },
+ Blue: N1{
+ Nonymous: []X0{
+ {S: "blau0", I: 20},
+ {S: "blau1", I: 21},
+ {S: "blau2"},
+ },
+ },
+ },
+ "",
+ "",
+ },
+ {
+ "save structs with noindex tags",
+ &struct {
+ A struct {
+ X string `datastore:",noindex"`
+ Y string
+ } `datastore:",noindex"`
+ B struct {
+ X string `datastore:",noindex"`
+ Y string
+ }
+ }{},
+ &PropertyList{
+ Property{Name: "B.Y", Value: "", NoIndex: false, Multiple: false},
+ Property{Name: "A.X", Value: "", NoIndex: true, Multiple: false},
+ Property{Name: "A.Y", Value: "", NoIndex: true, Multiple: false},
+ Property{Name: "B.X", Value: "", NoIndex: true, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "embedded struct with name override",
+ &struct {
+ Inner1 `datastore:"foo"`
+ }{},
+ &PropertyList{
+ Property{Name: "foo.W", Value: int64(0), NoIndex: false, Multiple: false},
+ Property{Name: "foo.X", Value: "", NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "slice of slices",
+ &SliceOfSlices{},
+ nil,
+ "flattening nested structs leads to a slice of slices",
+ "",
+ },
+ {
+ "recursive struct",
+ &Recursive{},
+ nil,
+ "recursive struct",
+ "",
+ },
+ {
+ "mutually recursive struct",
+ &MutuallyRecursive0{},
+ nil,
+ "recursive struct",
+ "",
+ },
+ {
+ "non-exported struct fields",
+ &struct {
+ i, J int64
+ }{i: 1, J: 2},
+ &PropertyList{
+ Property{Name: "J", Value: int64(2), NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "json.RawMessage",
+ &struct {
+ J json.RawMessage
+ }{
+ J: json.RawMessage("rawr"),
+ },
+ &PropertyList{
+ Property{Name: "J", Value: []byte("rawr"), NoIndex: true, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "json.RawMessage to myBlob",
+ &struct {
+ B json.RawMessage
+ }{
+ B: json.RawMessage("rawr"),
+ },
+ &B2{B: myBlob("rawr")},
+ "",
+ "",
+ },
+}
+
+// checkErr returns the empty string if either both want and err are zero,
+// or if want is a non-empty substring of err's string representation.
+func checkErr(want string, err error) string {
+ if err != nil {
+ got := err.Error()
+ if want == "" || strings.Index(got, want) == -1 {
+ return got
+ }
+ } else if want != "" {
+ return fmt.Sprintf("want error %q", want)
+ }
+ return ""
+}
+
+func TestRoundTrip(t *testing.T) {
+ for _, tc := range testCases {
+ p, err := saveEntity(testAppID, testKey0, tc.src)
+ if s := checkErr(tc.putErr, err); s != "" {
+ t.Errorf("%s: save: %s", tc.desc, s)
+ continue
+ }
+ if p == nil {
+ continue
+ }
+ var got interface{}
+ if _, ok := tc.want.(*PropertyList); ok {
+ got = new(PropertyList)
+ } else {
+ got = reflect.New(reflect.TypeOf(tc.want).Elem()).Interface()
+ }
+ err = loadEntity(got, p)
+ if s := checkErr(tc.getErr, err); s != "" {
+ t.Errorf("%s: load: %s", tc.desc, s)
+ continue
+ }
+ equal := false
+ if gotT, ok := got.(*T); ok {
+ // Round tripping a time.Time can result in a different time.Location: Local instead of UTC.
+ // We therefore test equality explicitly, instead of relying on reflect.DeepEqual.
+ equal = gotT.T.Equal(tc.want.(*T).T)
+ } else {
+ equal = reflect.DeepEqual(got, tc.want)
+ }
+ if !equal {
+ t.Errorf("%s: compare: got %v want %v", tc.desc, got, tc.want)
+ continue
+ }
+ }
+}
+
+func TestQueryConstruction(t *testing.T) {
+ tests := []struct {
+ q, exp *Query
+ err string
+ }{
+ {
+ q: NewQuery("Foo"),
+ exp: &Query{
+ kind: "Foo",
+ limit: -1,
+ },
+ },
+ {
+ // Regular filtered query with standard spacing.
+ q: NewQuery("Foo").Filter("foo >", 7),
+ exp: &Query{
+ kind: "Foo",
+ filter: []filter{
+ {
+ FieldName: "foo",
+ Op: greaterThan,
+ Value: 7,
+ },
+ },
+ limit: -1,
+ },
+ },
+ {
+ // Filtered query with no spacing.
+ q: NewQuery("Foo").Filter("foo=", 6),
+ exp: &Query{
+ kind: "Foo",
+ filter: []filter{
+ {
+ FieldName: "foo",
+ Op: equal,
+ Value: 6,
+ },
+ },
+ limit: -1,
+ },
+ },
+ {
+ // Filtered query with funky spacing.
+ q: NewQuery("Foo").Filter(" foo< ", 8),
+ exp: &Query{
+ kind: "Foo",
+ filter: []filter{
+ {
+ FieldName: "foo",
+ Op: lessThan,
+ Value: 8,
+ },
+ },
+ limit: -1,
+ },
+ },
+ {
+ // Filtered query with multicharacter op.
+ q: NewQuery("Foo").Filter("foo >=", 9),
+ exp: &Query{
+ kind: "Foo",
+ filter: []filter{
+ {
+ FieldName: "foo",
+ Op: greaterEq,
+ Value: 9,
+ },
+ },
+ limit: -1,
+ },
+ },
+ {
+ // Query with ordering.
+ q: NewQuery("Foo").Order("bar"),
+ exp: &Query{
+ kind: "Foo",
+ order: []order{
+ {
+ FieldName: "bar",
+ Direction: ascending,
+ },
+ },
+ limit: -1,
+ },
+ },
+ {
+ // Query with reverse ordering, and funky spacing.
+ q: NewQuery("Foo").Order(" - bar"),
+ exp: &Query{
+ kind: "Foo",
+ order: []order{
+ {
+ FieldName: "bar",
+ Direction: descending,
+ },
+ },
+ limit: -1,
+ },
+ },
+ {
+ // Query with an empty ordering.
+ q: NewQuery("Foo").Order(""),
+ err: "empty order",
+ },
+ {
+ // Query with a + ordering.
+ q: NewQuery("Foo").Order("+bar"),
+ err: "invalid order",
+ },
+ }
+ for i, test := range tests {
+ if test.q.err != nil {
+ got := test.q.err.Error()
+ if !strings.Contains(got, test.err) {
+ t.Errorf("%d: error mismatch: got %q want something containing %q", i, got, test.err)
+ }
+ continue
+ }
+ if !reflect.DeepEqual(test.q, test.exp) {
+ t.Errorf("%d: mismatch: got %v want %v", i, test.q, test.exp)
+ }
+ }
+}
+
+func TestStringMeaning(t *testing.T) {
+ var xx [4]interface{}
+ xx[0] = &struct {
+ X string
+ }{"xx0"}
+ xx[1] = &struct {
+ X string `datastore:",noindex"`
+ }{"xx1"}
+ xx[2] = &struct {
+ X []byte
+ }{[]byte("xx2")}
+ xx[3] = &struct {
+ X []byte `datastore:",noindex"`
+ }{[]byte("xx3")}
+
+ indexed := [4]bool{
+ true,
+ false,
+ false, // A []byte is always no-index.
+ false,
+ }
+ want := [4]pb.Property_Meaning{
+ pb.Property_NO_MEANING,
+ pb.Property_TEXT,
+ pb.Property_BLOB,
+ pb.Property_BLOB,
+ }
+
+ for i, x := range xx {
+ props, err := SaveStruct(x)
+ if err != nil {
+ t.Errorf("i=%d: SaveStruct: %v", i, err)
+ continue
+ }
+ e, err := propertiesToProto("appID", testKey0, props)
+ if err != nil {
+ t.Errorf("i=%d: propertiesToProto: %v", i, err)
+ continue
+ }
+ var p *pb.Property
+ switch {
+ case indexed[i] && len(e.Property) == 1:
+ p = e.Property[0]
+ case !indexed[i] && len(e.RawProperty) == 1:
+ p = e.RawProperty[0]
+ default:
+ t.Errorf("i=%d: EntityProto did not have expected property slice", i)
+ continue
+ }
+ if got := p.GetMeaning(); got != want[i] {
+ t.Errorf("i=%d: meaning: got %v, want %v", i, got, want[i])
+ continue
+ }
+ }
+}
+
+func TestNamespaceResetting(t *testing.T) {
+ // These environment variables are necessary because *Query.Run will
+ // call internal.FullyQualifiedAppID which checks these variables or falls
+ // back to the Metadata service that is not available in tests.
+ environ := []struct {
+ key, value string
+ }{
+ {"GAE_LONG_APP_ID", "my-app-id"},
+ {"GAE_PARTITION", "1"},
+ }
+ for _, v := range environ {
+ old := os.Getenv(v.key)
+ os.Setenv(v.key, v.value)
+ v.value = old
+ }
+ defer func() { // Restore old environment after the test completes.
+ for _, v := range environ {
+ if v.value == "" {
+ os.Unsetenv(v.key)
+ continue
+ }
+ os.Setenv(v.key, v.value)
+ }
+ }()
+
+ namec := make(chan *string, 1)
+ c0 := aetesting.FakeSingleContext(t, "datastore_v3", "RunQuery", func(req *pb.Query, res *pb.QueryResult) error {
+ namec <- req.NameSpace
+ return fmt.Errorf("RPC error")
+ })
+
+ // Check that wrapping c0 in a namespace twice works correctly.
+ c1, err := appengine.Namespace(c0, "A")
+ if err != nil {
+ t.Fatalf("appengine.Namespace: %v", err)
+ }
+ c2, err := appengine.Namespace(c1, "") // should act as the original context
+ if err != nil {
+ t.Fatalf("appengine.Namespace: %v", err)
+ }
+
+ q := NewQuery("SomeKind")
+
+ q.Run(c0)
+ if ns := <-namec; ns != nil {
+ t.Errorf(`RunQuery with c0: ns = %q, want nil`, *ns)
+ }
+
+ q.Run(c1)
+ if ns := <-namec; ns == nil {
+ t.Error(`RunQuery with c1: ns = nil, want "A"`)
+ } else if *ns != "A" {
+ t.Errorf(`RunQuery with c1: ns = %q, want "A"`, *ns)
+ }
+
+ q.Run(c2)
+ if ns := <-namec; ns != nil {
+ t.Errorf(`RunQuery with c2: ns = %q, want nil`, *ns)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/datastore/doc.go b/vendor/google.golang.org/appengine/datastore/doc.go
new file mode 100644
index 0000000..92ffe6d
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/doc.go
@@ -0,0 +1,351 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package datastore provides a client for App Engine's datastore service.
+
+
+Basic Operations
+
+Entities are the unit of storage and are associated with a key. A key
+consists of an optional parent key, a string application ID, a string kind
+(also known as an entity type), and either a StringID or an IntID. A
+StringID is also known as an entity name or key name.
+
+It is valid to create a key with a zero StringID and a zero IntID; this is
+called an incomplete key, and does not refer to any saved entity. Putting an
+entity into the datastore under an incomplete key will cause a unique key
+to be generated for that entity, with a non-zero IntID.
+
+An entity's contents are a mapping from case-sensitive field names to values.
+Valid value types are:
+ - signed integers (int, int8, int16, int32 and int64),
+ - bool,
+ - string,
+ - float32 and float64,
+ - []byte (up to 1 megabyte in length),
+ - any type whose underlying type is one of the above predeclared types,
+ - ByteString,
+ - *Key,
+ - time.Time (stored with microsecond precision),
+ - appengine.BlobKey,
+ - appengine.GeoPoint,
+ - structs whose fields are all valid value types,
+ - slices of any of the above.
+
+Slices of structs are valid, as are structs that contain slices. However, if
+one struct contains another, then at most one of those can be repeated. This
+disqualifies recursively defined struct types: any struct T that (directly or
+indirectly) contains a []T.
+
+The Get and Put functions load and save an entity's contents. An entity's
+contents are typically represented by a struct pointer.
+
+Example code:
+
+ type Entity struct {
+ Value string
+ }
+
+ func handle(w http.ResponseWriter, r *http.Request) {
+ ctx := appengine.NewContext(r)
+
+ k := datastore.NewKey(ctx, "Entity", "stringID", 0, nil)
+ e := new(Entity)
+ if err := datastore.Get(ctx, k, e); err != nil {
+ http.Error(w, err.Error(), 500)
+ return
+ }
+
+ old := e.Value
+ e.Value = r.URL.Path
+
+ if _, err := datastore.Put(ctx, k, e); err != nil {
+ http.Error(w, err.Error(), 500)
+ return
+ }
+
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ fmt.Fprintf(w, "old=%q\nnew=%q\n", old, e.Value)
+ }
+
+GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and
+Delete functions. They take a []*Key instead of a *Key, and may return an
+appengine.MultiError when encountering partial failure.
+
+
+Properties
+
+An entity's contents can be represented by a variety of types. These are
+typically struct pointers, but can also be any type that implements the
+PropertyLoadSaver interface. If using a struct pointer, you do not have to
+explicitly implement the PropertyLoadSaver interface; the datastore will
+automatically convert via reflection. If a struct pointer does implement that
+interface then those methods will be used in preference to the default
+behavior for struct pointers. Struct pointers are more strongly typed and are
+easier to use; PropertyLoadSavers are more flexible.
+
+The actual types passed do not have to match between Get and Put calls or even
+across different App Engine requests. It is valid to put a *PropertyList and
+get that same entity as a *myStruct, or put a *myStruct0 and get a *myStruct1.
+Conceptually, any entity is saved as a sequence of properties, and is loaded
+into the destination value on a property-by-property basis. When loading into
+a struct pointer, an entity that cannot be completely represented (such as a
+missing field) will result in an ErrFieldMismatch error but it is up to the
+caller whether this error is fatal, recoverable or ignorable.
+
+By default, for struct pointers, all properties are potentially indexed, and
+the property name is the same as the field name (and hence must start with an
+upper case letter). Fields may have a `datastore:"name,options"` tag. The tag
+name is the property name, which must be one or more valid Go identifiers
+joined by ".", but may start with a lower case letter. An empty tag name means
+to just use the field name. A "-" tag name means that the datastore will
+ignore that field. If options is "noindex" then the field will not be indexed.
+If the options is "" then the comma may be omitted. There are no other
+recognized options.
+
+Fields (except for []byte) are indexed by default. Strings longer than 1500
+bytes cannot be indexed; fields used to store long strings should be
+tagged with "noindex". Similarly, ByteStrings longer than 1500 bytes cannot be
+indexed.
+
+Example code:
+
+ // A and B are renamed to a and b.
+ // A, C and J are not indexed.
+ // D's tag is equivalent to having no tag at all (E).
+ // I is ignored entirely by the datastore.
+ // J has tag information for both the datastore and json packages.
+ type TaggedStruct struct {
+ A int `datastore:"a,noindex"`
+ B int `datastore:"b"`
+ C int `datastore:",noindex"`
+ D int `datastore:""`
+ E int
+ I int `datastore:"-"`
+ J int `datastore:",noindex" json:"j"`
+ }
+
+
+Structured Properties
+
+If the struct pointed to contains other structs, then the nested or embedded
+structs are flattened. For example, given these definitions:
+
+ type Inner1 struct {
+ W int32
+ X string
+ }
+
+ type Inner2 struct {
+ Y float64
+ }
+
+ type Inner3 struct {
+ Z bool
+ }
+
+ type Outer struct {
+ A int16
+ I []Inner1
+ J Inner2
+ Inner3
+ }
+
+then an Outer's properties would be equivalent to those of:
+
+ type OuterEquivalent struct {
+ A int16
+ IDotW []int32 `datastore:"I.W"`
+ IDotX []string `datastore:"I.X"`
+ JDotY float64 `datastore:"J.Y"`
+ Z bool
+ }
+
+If Outer's embedded Inner3 field was tagged as `datastore:"Foo"` then the
+equivalent field would instead be: FooDotZ bool `datastore:"Foo.Z"`.
+
+If an outer struct is tagged "noindex" then all of its implicit flattened
+fields are effectively "noindex".
+
+
+The PropertyLoadSaver Interface
+
+An entity's contents can also be represented by any type that implements the
+PropertyLoadSaver interface. This type may be a struct pointer, but it does
+not have to be. The datastore package will call Load when getting the entity's
+contents, and Save when putting the entity's contents.
+Possible uses include deriving non-stored fields, verifying fields, or indexing
+a field only if its value is positive.
+
+Example code:
+
+ type CustomPropsExample struct {
+ I, J int
+ // Sum is not stored, but should always be equal to I + J.
+ Sum int `datastore:"-"`
+ }
+
+ func (x *CustomPropsExample) Load(ps []datastore.Property) error {
+ // Load I and J as usual.
+ if err := datastore.LoadStruct(x, ps); err != nil {
+ return err
+ }
+ // Derive the Sum field.
+ x.Sum = x.I + x.J
+ return nil
+ }
+
+ func (x *CustomPropsExample) Save() ([]datastore.Property, error) {
+ // Validate the Sum field.
+ if x.Sum != x.I + x.J {
+ return errors.New("CustomPropsExample has inconsistent sum")
+ }
+ // Save I and J as usual. The code below is equivalent to calling
+ // "return datastore.SaveStruct(x)", but is done manually for
+ // demonstration purposes.
+ return []datastore.Property{
+ {
+ Name: "I",
+ Value: int64(x.I),
+ },
+ {
+ Name: "J",
+ Value: int64(x.J),
+ },
+ }
+ }
+
+The *PropertyList type implements PropertyLoadSaver, and can therefore hold an
+arbitrary entity's contents.
+
+
+Queries
+
+Queries retrieve entities based on their properties or key's ancestry. Running
+a query yields an iterator of results: either keys or (key, entity) pairs.
+Queries are re-usable and it is safe to call Query.Run from concurrent
+goroutines. Iterators are not safe for concurrent use.
+
+Queries are immutable, and are either created by calling NewQuery, or derived
+from an existing query by calling a method like Filter or Order that returns a
+new query value. A query is typically constructed by calling NewQuery followed
+by a chain of zero or more such methods. These methods are:
+ - Ancestor and Filter constrain the entities returned by running a query.
+ - Order affects the order in which they are returned.
+ - Project constrains the fields returned.
+ - Distinct de-duplicates projected entities.
+ - KeysOnly makes the iterator return only keys, not (key, entity) pairs.
+ - Start, End, Offset and Limit define which sub-sequence of matching entities
+ to return. Start and End take cursors, Offset and Limit take integers. Start
+ and Offset affect the first result, End and Limit affect the last result.
+ If both Start and Offset are set, then the offset is relative to Start.
+ If both End and Limit are set, then the earliest constraint wins. Limit is
+ relative to Start+Offset, not relative to End. As a special case, a
+ negative limit means unlimited.
+
+Example code:
+
+ type Widget struct {
+ Description string
+ Price int
+ }
+
+ func handle(w http.ResponseWriter, r *http.Request) {
+ ctx := appengine.NewContext(r)
+ q := datastore.NewQuery("Widget").
+ Filter("Price <", 1000).
+ Order("-Price")
+ b := new(bytes.Buffer)
+ for t := q.Run(ctx); ; {
+ var x Widget
+ key, err := t.Next(&x)
+ if err == datastore.Done {
+ break
+ }
+ if err != nil {
+ serveError(ctx, w, err)
+ return
+ }
+ fmt.Fprintf(b, "Key=%v\nWidget=%#v\n\n", key, x)
+ }
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ io.Copy(w, b)
+ }
+
+
+Transactions
+
+RunInTransaction runs a function in a transaction.
+
+Example code:
+
+ type Counter struct {
+ Count int
+ }
+
+ func inc(ctx context.Context, key *datastore.Key) (int, error) {
+ var x Counter
+ if err := datastore.Get(ctx, key, &x); err != nil && err != datastore.ErrNoSuchEntity {
+ return 0, err
+ }
+ x.Count++
+ if _, err := datastore.Put(ctx, key, &x); err != nil {
+ return 0, err
+ }
+ return x.Count, nil
+ }
+
+ func handle(w http.ResponseWriter, r *http.Request) {
+ ctx := appengine.NewContext(r)
+ var count int
+ err := datastore.RunInTransaction(ctx, func(ctx context.Context) error {
+ var err1 error
+ count, err1 = inc(ctx, datastore.NewKey(ctx, "Counter", "singleton", 0, nil))
+ return err1
+ }, nil)
+ if err != nil {
+ serveError(ctx, w, err)
+ return
+ }
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ fmt.Fprintf(w, "Count=%d", count)
+ }
+
+
+Metadata
+
+The datastore package provides access to some of App Engine's datastore
+metadata. This metadata includes information about the entity groups,
+namespaces, entity kinds, and properties in the datastore, as well as the
+property representations for each property.
+
+Example code:
+
+ func handle(w http.ResponseWriter, r *http.Request) {
+ // Print all the kinds in the datastore, with all the indexed
+ // properties (and their representations) for each.
+ ctx := appengine.NewContext(r)
+
+ kinds, err := datastore.Kinds(ctx)
+ if err != nil {
+ serveError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ for _, kind := range kinds {
+ fmt.Fprintf(w, "%s:\n", kind)
+ props, err := datastore.KindProperties(ctx, kind)
+ if err != nil {
+ fmt.Fprintln(w, "\t(unable to retrieve properties)")
+ continue
+ }
+ for p, rep := range props {
+ fmt.Fprintf(w, "\t-%s (%s)\n", p, strings.Join(", ", rep))
+ }
+ }
+ }
+*/
+package datastore // import "google.golang.org/appengine/datastore"
diff --git a/vendor/google.golang.org/appengine/datastore/key.go b/vendor/google.golang.org/appengine/datastore/key.go
new file mode 100644
index 0000000..ac1f002
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/key.go
@@ -0,0 +1,309 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/gob"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+// Key represents the datastore key for a stored entity, and is immutable.
+type Key struct {
+ kind string
+ stringID string
+ intID int64
+ parent *Key
+ appID string
+ namespace string
+}
+
+// Kind returns the key's kind (also known as entity type).
+func (k *Key) Kind() string {
+ return k.kind
+}
+
+// StringID returns the key's string ID (also known as an entity name or key
+// name), which may be "".
+func (k *Key) StringID() string {
+ return k.stringID
+}
+
+// IntID returns the key's integer ID, which may be 0.
+func (k *Key) IntID() int64 {
+ return k.intID
+}
+
+// Parent returns the key's parent key, which may be nil.
+func (k *Key) Parent() *Key {
+ return k.parent
+}
+
+// AppID returns the key's application ID.
+func (k *Key) AppID() string {
+ return k.appID
+}
+
+// Namespace returns the key's namespace.
+func (k *Key) Namespace() string {
+ return k.namespace
+}
+
+// Incomplete returns whether the key does not refer to a stored entity.
+// In particular, whether the key has a zero StringID and a zero IntID.
+func (k *Key) Incomplete() bool {
+ return k.stringID == "" && k.intID == 0
+}
+
+// valid returns whether the key is valid.
+func (k *Key) valid() bool {
+ if k == nil {
+ return false
+ }
+ for ; k != nil; k = k.parent {
+ if k.kind == "" || k.appID == "" {
+ return false
+ }
+ if k.stringID != "" && k.intID != 0 {
+ return false
+ }
+ if k.parent != nil {
+ if k.parent.Incomplete() {
+ return false
+ }
+ if k.parent.appID != k.appID || k.parent.namespace != k.namespace {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Equal returns whether two keys are equal.
+func (k *Key) Equal(o *Key) bool {
+ for k != nil && o != nil {
+ if k.kind != o.kind || k.stringID != o.stringID || k.intID != o.intID || k.appID != o.appID || k.namespace != o.namespace {
+ return false
+ }
+ k, o = k.parent, o.parent
+ }
+ return k == o
+}
+
+// root returns the furthest ancestor of a key, which may be itself.
+func (k *Key) root() *Key {
+ for k.parent != nil {
+ k = k.parent
+ }
+ return k
+}
+
+// marshal marshals the key's string representation to the buffer.
+func (k *Key) marshal(b *bytes.Buffer) {
+ if k.parent != nil {
+ k.parent.marshal(b)
+ }
+ b.WriteByte('/')
+ b.WriteString(k.kind)
+ b.WriteByte(',')
+ if k.stringID != "" {
+ b.WriteString(k.stringID)
+ } else {
+ b.WriteString(strconv.FormatInt(k.intID, 10))
+ }
+}
+
+// String returns a string representation of the key.
+func (k *Key) String() string {
+ if k == nil {
+ return ""
+ }
+ b := bytes.NewBuffer(make([]byte, 0, 512))
+ k.marshal(b)
+ return b.String()
+}
+
+type gobKey struct {
+ Kind string
+ StringID string
+ IntID int64
+ Parent *gobKey
+ AppID string
+ Namespace string
+}
+
+func keyToGobKey(k *Key) *gobKey {
+ if k == nil {
+ return nil
+ }
+ return &gobKey{
+ Kind: k.kind,
+ StringID: k.stringID,
+ IntID: k.intID,
+ Parent: keyToGobKey(k.parent),
+ AppID: k.appID,
+ Namespace: k.namespace,
+ }
+}
+
+func gobKeyToKey(gk *gobKey) *Key {
+ if gk == nil {
+ return nil
+ }
+ return &Key{
+ kind: gk.Kind,
+ stringID: gk.StringID,
+ intID: gk.IntID,
+ parent: gobKeyToKey(gk.Parent),
+ appID: gk.AppID,
+ namespace: gk.Namespace,
+ }
+}
+
+func (k *Key) GobEncode() ([]byte, error) {
+ buf := new(bytes.Buffer)
+ if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func (k *Key) GobDecode(buf []byte) error {
+ gk := new(gobKey)
+ if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil {
+ return err
+ }
+ *k = *gobKeyToKey(gk)
+ return nil
+}
+
+func (k *Key) MarshalJSON() ([]byte, error) {
+ return []byte(`"` + k.Encode() + `"`), nil
+}
+
+func (k *Key) UnmarshalJSON(buf []byte) error {
+ if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' {
+ return errors.New("datastore: bad JSON key")
+ }
+ k2, err := DecodeKey(string(buf[1 : len(buf)-1]))
+ if err != nil {
+ return err
+ }
+ *k = *k2
+ return nil
+}
+
+// Encode returns an opaque representation of the key
+// suitable for use in HTML and URLs.
+// This is compatible with the Python and Java runtimes.
+func (k *Key) Encode() string {
+ ref := keyToProto("", k)
+
+ b, err := proto.Marshal(ref)
+ if err != nil {
+ panic(err)
+ }
+
+ // Trailing padding is stripped.
+ return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
+}
+
+// DecodeKey decodes a key from the opaque representation returned by Encode.
+func DecodeKey(encoded string) (*Key, error) {
+ // Re-add padding.
+ if m := len(encoded) % 4; m != 0 {
+ encoded += strings.Repeat("=", 4-m)
+ }
+
+ b, err := base64.URLEncoding.DecodeString(encoded)
+ if err != nil {
+ return nil, err
+ }
+
+ ref := new(pb.Reference)
+ if err := proto.Unmarshal(b, ref); err != nil {
+ return nil, err
+ }
+
+ return protoToKey(ref)
+}
+
+// NewIncompleteKey creates a new incomplete key.
+// kind cannot be empty.
+func NewIncompleteKey(c context.Context, kind string, parent *Key) *Key {
+ return NewKey(c, kind, "", 0, parent)
+}
+
+// NewKey creates a new key.
+// kind cannot be empty.
+// Either one or both of stringID and intID must be zero. If both are zero,
+// the key returned is incomplete.
+// parent must either be a complete key or nil.
+func NewKey(c context.Context, kind, stringID string, intID int64, parent *Key) *Key {
+ // If there's a parent key, use its namespace.
+ // Otherwise, use any namespace attached to the context.
+ var namespace string
+ if parent != nil {
+ namespace = parent.namespace
+ } else {
+ namespace = internal.NamespaceFromContext(c)
+ }
+
+ return &Key{
+ kind: kind,
+ stringID: stringID,
+ intID: intID,
+ parent: parent,
+ appID: internal.FullyQualifiedAppID(c),
+ namespace: namespace,
+ }
+}
+
+// AllocateIDs returns a range of n integer IDs with the given kind and parent
+// combination. kind cannot be empty; parent may be nil. The IDs in the range
+// returned will not be used by the datastore's automatic ID sequence generator
+// and may be used with NewKey without conflict.
+//
+// The range is inclusive at the low end and exclusive at the high end. In
+// other words, valid intIDs x satisfy low <= x && x < high.
+//
+// If no error is returned, low + n == high.
+func AllocateIDs(c context.Context, kind string, parent *Key, n int) (low, high int64, err error) {
+ if kind == "" {
+ return 0, 0, errors.New("datastore: AllocateIDs given an empty kind")
+ }
+ if n < 0 {
+ return 0, 0, fmt.Errorf("datastore: AllocateIDs given a negative count: %d", n)
+ }
+ if n == 0 {
+ return 0, 0, nil
+ }
+ req := &pb.AllocateIdsRequest{
+ ModelKey: keyToProto("", NewIncompleteKey(c, kind, parent)),
+ Size: proto.Int64(int64(n)),
+ }
+ res := &pb.AllocateIdsResponse{}
+ if err := internal.Call(c, "datastore_v3", "AllocateIds", req, res); err != nil {
+ return 0, 0, err
+ }
+ // The protobuf is inclusive at both ends. Idiomatic Go (e.g. slices, for loops)
+ // is inclusive at the low end and exclusive at the high end, so we add 1.
+ low = res.GetStart()
+ high = res.GetEnd() + 1
+ if low+int64(n) != high {
+ return 0, 0, fmt.Errorf("datastore: internal error: could not allocate %d IDs", n)
+ }
+ return low, high, nil
+}
diff --git a/vendor/google.golang.org/appengine/datastore/key_test.go b/vendor/google.golang.org/appengine/datastore/key_test.go
new file mode 100644
index 0000000..1fb3e97
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/key_test.go
@@ -0,0 +1,204 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "bytes"
+ "encoding/gob"
+ "encoding/json"
+ "testing"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+func TestKeyEncoding(t *testing.T) {
+ testCases := []struct {
+ desc string
+ key *Key
+ exp string
+ }{
+ {
+ desc: "A simple key with an int ID",
+ key: &Key{
+ kind: "Person",
+ intID: 1,
+ appID: "glibrary",
+ },
+ exp: "aghnbGlicmFyeXIMCxIGUGVyc29uGAEM",
+ },
+ {
+ desc: "A simple key with a string ID",
+ key: &Key{
+ kind: "Graph",
+ stringID: "graph:7-day-active",
+ appID: "glibrary",
+ },
+ exp: "aghnbGlicmFyeXIdCxIFR3JhcGgiEmdyYXBoOjctZGF5LWFjdGl2ZQw",
+ },
+ {
+ desc: "A key with a parent",
+ key: &Key{
+ kind: "WordIndex",
+ intID: 1033,
+ parent: &Key{
+ kind: "WordIndex",
+ intID: 1020032,
+ appID: "glibrary",
+ },
+ appID: "glibrary",
+ },
+ exp: "aghnbGlicmFyeXIhCxIJV29yZEluZGV4GIChPgwLEglXb3JkSW5kZXgYiQgM",
+ },
+ }
+ for _, tc := range testCases {
+ enc := tc.key.Encode()
+ if enc != tc.exp {
+ t.Errorf("%s: got %q, want %q", tc.desc, enc, tc.exp)
+ }
+
+ key, err := DecodeKey(tc.exp)
+ if err != nil {
+ t.Errorf("%s: failed decoding key: %v", tc.desc, err)
+ continue
+ }
+ if !key.Equal(tc.key) {
+ t.Errorf("%s: decoded key %v, want %v", tc.desc, key, tc.key)
+ }
+ }
+}
+
+func TestKeyGob(t *testing.T) {
+ k := &Key{
+ kind: "Gopher",
+ intID: 3,
+ parent: &Key{
+ kind: "Mom",
+ stringID: "narwhal",
+ appID: "gopher-con",
+ },
+ appID: "gopher-con",
+ }
+
+ buf := new(bytes.Buffer)
+ if err := gob.NewEncoder(buf).Encode(k); err != nil {
+ t.Fatalf("gob encode failed: %v", err)
+ }
+
+ k2 := new(Key)
+ if err := gob.NewDecoder(buf).Decode(k2); err != nil {
+ t.Fatalf("gob decode failed: %v", err)
+ }
+ if !k2.Equal(k) {
+ t.Errorf("gob round trip of %v produced %v", k, k2)
+ }
+}
+
+func TestNilKeyGob(t *testing.T) {
+ type S struct {
+ Key *Key
+ }
+ s1 := new(S)
+
+ buf := new(bytes.Buffer)
+ if err := gob.NewEncoder(buf).Encode(s1); err != nil {
+ t.Fatalf("gob encode failed: %v", err)
+ }
+
+ s2 := new(S)
+ if err := gob.NewDecoder(buf).Decode(s2); err != nil {
+ t.Fatalf("gob decode failed: %v", err)
+ }
+ if s2.Key != nil {
+ t.Errorf("gob round trip of nil key produced %v", s2.Key)
+ }
+}
+
+func TestKeyJSON(t *testing.T) {
+ k := &Key{
+ kind: "Gopher",
+ intID: 2,
+ parent: &Key{
+ kind: "Mom",
+ stringID: "narwhal",
+ appID: "gopher-con",
+ },
+ appID: "gopher-con",
+ }
+ exp := `"` + k.Encode() + `"`
+
+ buf, err := json.Marshal(k)
+ if err != nil {
+ t.Fatalf("json.Marshal failed: %v", err)
+ }
+ if s := string(buf); s != exp {
+ t.Errorf("JSON encoding of key %v: got %q, want %q", k, s, exp)
+ }
+
+ k2 := new(Key)
+ if err := json.Unmarshal(buf, k2); err != nil {
+ t.Fatalf("json.Unmarshal failed: %v", err)
+ }
+ if !k2.Equal(k) {
+ t.Errorf("JSON round trip of %v produced %v", k, k2)
+ }
+}
+
+func TestNilKeyJSON(t *testing.T) {
+ type S struct {
+ Key *Key
+ }
+ s1 := new(S)
+
+ buf, err := json.Marshal(s1)
+ if err != nil {
+ t.Fatalf("json.Marshal failed: %v", err)
+ }
+
+ s2 := new(S)
+ if err := json.Unmarshal(buf, s2); err != nil {
+ t.Fatalf("json.Unmarshal failed: %v", err)
+ }
+ if s2.Key != nil {
+ t.Errorf("JSON round trip of nil key produced %v", s2.Key)
+ }
+}
+
+func TestIncompleteKeyWithParent(t *testing.T) {
+ c := internal.WithAppIDOverride(context.Background(), "s~some-app")
+
+ // fadduh is a complete key.
+ fadduh := NewKey(c, "Person", "", 1, nil)
+ if fadduh.Incomplete() {
+ t.Fatalf("fadduh is incomplete")
+ }
+
+ // robert is an incomplete key with fadduh as a parent.
+ robert := NewIncompleteKey(c, "Person", fadduh)
+ if !robert.Incomplete() {
+ t.Fatalf("robert is complete")
+ }
+
+ // Both should be valid keys.
+ if !fadduh.valid() {
+ t.Errorf("fadduh is invalid: %v", fadduh)
+ }
+ if !robert.valid() {
+ t.Errorf("robert is invalid: %v", robert)
+ }
+}
+
+func TestNamespace(t *testing.T) {
+ key := &Key{
+ kind: "Person",
+ intID: 1,
+ appID: "s~some-app",
+ namespace: "mynamespace",
+ }
+ if g, w := key.Namespace(), "mynamespace"; g != w {
+ t.Errorf("key.Namespace() = %q, want %q", g, w)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/datastore/load.go b/vendor/google.golang.org/appengine/datastore/load.go
new file mode 100644
index 0000000..3f3c80c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/load.go
@@ -0,0 +1,334 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "fmt"
+ "reflect"
+ "time"
+
+ "google.golang.org/appengine"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+var (
+ typeOfBlobKey = reflect.TypeOf(appengine.BlobKey(""))
+ typeOfByteSlice = reflect.TypeOf([]byte(nil))
+ typeOfByteString = reflect.TypeOf(ByteString(nil))
+ typeOfGeoPoint = reflect.TypeOf(appengine.GeoPoint{})
+ typeOfTime = reflect.TypeOf(time.Time{})
+)
+
+// typeMismatchReason returns a string explaining why the property p could not
+// be stored in an entity field of type v.Type().
+func typeMismatchReason(p Property, v reflect.Value) string {
+ entityType := "empty"
+ switch p.Value.(type) {
+ case int64:
+ entityType = "int"
+ case bool:
+ entityType = "bool"
+ case string:
+ entityType = "string"
+ case float64:
+ entityType = "float"
+ case *Key:
+ entityType = "*datastore.Key"
+ case time.Time:
+ entityType = "time.Time"
+ case appengine.BlobKey:
+ entityType = "appengine.BlobKey"
+ case appengine.GeoPoint:
+ entityType = "appengine.GeoPoint"
+ case ByteString:
+ entityType = "datastore.ByteString"
+ case []byte:
+ entityType = "[]byte"
+ }
+ return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type())
+}
+
+type propertyLoader struct {
+ // m holds the number of times a substruct field like "Foo.Bar.Baz" has
+ // been seen so far. The map is constructed lazily.
+ m map[string]int
+}
+
+func (l *propertyLoader) load(codec *structCodec, structValue reflect.Value, p Property, requireSlice bool) string {
+ var v reflect.Value
+ // Traverse a struct's struct-typed fields.
+ for name := p.Name; ; {
+ decoder, ok := codec.byName[name]
+ if !ok {
+ return "no such struct field"
+ }
+ v = structValue.Field(decoder.index)
+ if !v.IsValid() {
+ return "no such struct field"
+ }
+ if !v.CanSet() {
+ return "cannot set struct field"
+ }
+
+ if decoder.substructCodec == nil {
+ break
+ }
+
+ if v.Kind() == reflect.Slice {
+ if l.m == nil {
+ l.m = make(map[string]int)
+ }
+ index := l.m[p.Name]
+ l.m[p.Name] = index + 1
+ for v.Len() <= index {
+ v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem()))
+ }
+ structValue = v.Index(index)
+ requireSlice = false
+ } else {
+ structValue = v
+ }
+ // Strip the "I." from "I.X".
+ name = name[len(codec.byIndex[decoder.index].name):]
+ codec = decoder.substructCodec
+ }
+
+ var slice reflect.Value
+ if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
+ slice = v
+ v = reflect.New(v.Type().Elem()).Elem()
+ } else if requireSlice {
+ return "multiple-valued property requires a slice field type"
+ }
+
+ // Convert indexValues to a Go value with a meaning derived from the
+ // destination type.
+ pValue := p.Value
+ if iv, ok := pValue.(indexValue); ok {
+ meaning := pb.Property_NO_MEANING
+ switch v.Type() {
+ case typeOfBlobKey:
+ meaning = pb.Property_BLOBKEY
+ case typeOfByteSlice:
+ meaning = pb.Property_BLOB
+ case typeOfByteString:
+ meaning = pb.Property_BYTESTRING
+ case typeOfGeoPoint:
+ meaning = pb.Property_GEORSS_POINT
+ case typeOfTime:
+ meaning = pb.Property_GD_WHEN
+ }
+ var err error
+ pValue, err = propValue(iv.value, meaning)
+ if err != nil {
+ return err.Error()
+ }
+ }
+
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ x, ok := pValue.(int64)
+ if !ok && pValue != nil {
+ return typeMismatchReason(p, v)
+ }
+ if v.OverflowInt(x) {
+ return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
+ }
+ v.SetInt(x)
+ case reflect.Bool:
+ x, ok := pValue.(bool)
+ if !ok && pValue != nil {
+ return typeMismatchReason(p, v)
+ }
+ v.SetBool(x)
+ case reflect.String:
+ switch x := pValue.(type) {
+ case appengine.BlobKey:
+ v.SetString(string(x))
+ case ByteString:
+ v.SetString(string(x))
+ case string:
+ v.SetString(x)
+ default:
+ if pValue != nil {
+ return typeMismatchReason(p, v)
+ }
+ }
+ case reflect.Float32, reflect.Float64:
+ x, ok := pValue.(float64)
+ if !ok && pValue != nil {
+ return typeMismatchReason(p, v)
+ }
+ if v.OverflowFloat(x) {
+ return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
+ }
+ v.SetFloat(x)
+ case reflect.Ptr:
+ x, ok := pValue.(*Key)
+ if !ok && pValue != nil {
+ return typeMismatchReason(p, v)
+ }
+ if _, ok := v.Interface().(*Key); !ok {
+ return typeMismatchReason(p, v)
+ }
+ v.Set(reflect.ValueOf(x))
+ case reflect.Struct:
+ switch v.Type() {
+ case typeOfTime:
+ x, ok := pValue.(time.Time)
+ if !ok && pValue != nil {
+ return typeMismatchReason(p, v)
+ }
+ v.Set(reflect.ValueOf(x))
+ case typeOfGeoPoint:
+ x, ok := pValue.(appengine.GeoPoint)
+ if !ok && pValue != nil {
+ return typeMismatchReason(p, v)
+ }
+ v.Set(reflect.ValueOf(x))
+ default:
+ return typeMismatchReason(p, v)
+ }
+ case reflect.Slice:
+ x, ok := pValue.([]byte)
+ if !ok {
+ if y, yok := pValue.(ByteString); yok {
+ x, ok = []byte(y), true
+ }
+ }
+ if !ok && pValue != nil {
+ return typeMismatchReason(p, v)
+ }
+ if v.Type().Elem().Kind() != reflect.Uint8 {
+ return typeMismatchReason(p, v)
+ }
+ v.SetBytes(x)
+ default:
+ return typeMismatchReason(p, v)
+ }
+ if slice.IsValid() {
+ slice.Set(reflect.Append(slice, v))
+ }
+ return ""
+}
+
+// loadEntity loads an EntityProto into PropertyLoadSaver or struct pointer.
+func loadEntity(dst interface{}, src *pb.EntityProto) (err error) {
+ props, err := protoToProperties(src)
+ if err != nil {
+ return err
+ }
+ if e, ok := dst.(PropertyLoadSaver); ok {
+ return e.Load(props)
+ }
+ return LoadStruct(dst, props)
+}
+
+func (s structPLS) Load(props []Property) error {
+ var fieldName, reason string
+ var l propertyLoader
+ for _, p := range props {
+ if errStr := l.load(s.codec, s.v, p, p.Multiple); errStr != "" {
+ // We don't return early, as we try to load as many properties as possible.
+ // It is valid to load an entity into a struct that cannot fully represent it.
+ // That case returns an error, but the caller is free to ignore it.
+ fieldName, reason = p.Name, errStr
+ }
+ }
+ if reason != "" {
+ return &ErrFieldMismatch{
+ StructType: s.v.Type(),
+ FieldName: fieldName,
+ Reason: reason,
+ }
+ }
+ return nil
+}
+
+func protoToProperties(src *pb.EntityProto) ([]Property, error) {
+ props, rawProps := src.Property, src.RawProperty
+ out := make([]Property, 0, len(props)+len(rawProps))
+ for {
+ var (
+ x *pb.Property
+ noIndex bool
+ )
+ if len(props) > 0 {
+ x, props = props[0], props[1:]
+ } else if len(rawProps) > 0 {
+ x, rawProps = rawProps[0], rawProps[1:]
+ noIndex = true
+ } else {
+ break
+ }
+
+ var value interface{}
+ if x.Meaning != nil && *x.Meaning == pb.Property_INDEX_VALUE {
+ value = indexValue{x.Value}
+ } else {
+ var err error
+ value, err = propValue(x.Value, x.GetMeaning())
+ if err != nil {
+ return nil, err
+ }
+ }
+ out = append(out, Property{
+ Name: x.GetName(),
+ Value: value,
+ NoIndex: noIndex,
+ Multiple: x.GetMultiple(),
+ })
+ }
+ return out, nil
+}
+
+// propValue returns a Go value that combines the raw PropertyValue with a
+// meaning. For example, an Int64Value with GD_WHEN becomes a time.Time.
+func propValue(v *pb.PropertyValue, m pb.Property_Meaning) (interface{}, error) {
+ switch {
+ case v.Int64Value != nil:
+ if m == pb.Property_GD_WHEN {
+ return fromUnixMicro(*v.Int64Value), nil
+ } else {
+ return *v.Int64Value, nil
+ }
+ case v.BooleanValue != nil:
+ return *v.BooleanValue, nil
+ case v.StringValue != nil:
+ if m == pb.Property_BLOB {
+ return []byte(*v.StringValue), nil
+ } else if m == pb.Property_BLOBKEY {
+ return appengine.BlobKey(*v.StringValue), nil
+ } else if m == pb.Property_BYTESTRING {
+ return ByteString(*v.StringValue), nil
+ } else {
+ return *v.StringValue, nil
+ }
+ case v.DoubleValue != nil:
+ return *v.DoubleValue, nil
+ case v.Referencevalue != nil:
+ key, err := referenceValueToKey(v.Referencevalue)
+ if err != nil {
+ return nil, err
+ }
+ return key, nil
+ case v.Pointvalue != nil:
+ // NOTE: Strangely, latitude maps to X, longitude to Y.
+ return appengine.GeoPoint{Lat: v.Pointvalue.GetX(), Lng: v.Pointvalue.GetY()}, nil
+ }
+ return nil, nil
+}
+
+// indexValue is a Property value that is created when entities are loaded from
+// an index, such as from a projection query.
+//
+// Such Property values do not contain all of the metadata required to be
+// faithfully represented as a Go value, and are instead represented as an
+// opaque indexValue. Load the properties into a concrete struct type (e.g. by
+// passing a struct pointer to Iterator.Next) to reconstruct actual Go values
+// of type int, string, time.Time, etc.
+type indexValue struct {
+ value *pb.PropertyValue
+}
diff --git a/vendor/google.golang.org/appengine/datastore/metadata.go b/vendor/google.golang.org/appengine/datastore/metadata.go
new file mode 100644
index 0000000..67995f9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/metadata.go
@@ -0,0 +1,78 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import "golang.org/x/net/context"
+
+// Datastore kinds for the metadata entities.
+const (
+ namespaceKind = "__namespace__"
+ kindKind = "__kind__"
+ propertyKind = "__property__"
+)
+
+// Namespaces returns all the datastore namespaces.
+func Namespaces(ctx context.Context) ([]string, error) {
+ // TODO(djd): Support range queries.
+ q := NewQuery(namespaceKind).KeysOnly()
+ keys, err := q.GetAll(ctx, nil)
+ if err != nil {
+ return nil, err
+ }
+ // The empty namespace key uses a numeric ID (==1), but luckily
+ // the string ID defaults to "" for numeric IDs anyway.
+ return keyNames(keys), nil
+}
+
+// Kinds returns the names of all the kinds in the current namespace.
+func Kinds(ctx context.Context) ([]string, error) {
+ // TODO(djd): Support range queries.
+ q := NewQuery(kindKind).KeysOnly()
+ keys, err := q.GetAll(ctx, nil)
+ if err != nil {
+ return nil, err
+ }
+ return keyNames(keys), nil
+}
+
+// keyNames returns a slice of the provided keys' names (string IDs).
+func keyNames(keys []*Key) []string {
+ n := make([]string, 0, len(keys))
+ for _, k := range keys {
+ n = append(n, k.StringID())
+ }
+ return n
+}
+
+// KindProperties returns all the indexed properties for the given kind.
+// The properties are returned as a map of property names to a slice of the
+// representation types. The representation types for the supported Go property
+// types are:
+// "INT64": signed integers and time.Time
+// "DOUBLE": float32 and float64
+// "BOOLEAN": bool
+// "STRING": string, []byte and ByteString
+// "POINT": appengine.GeoPoint
+// "REFERENCE": *Key
+// "USER": (not used in the Go runtime)
+func KindProperties(ctx context.Context, kind string) (map[string][]string, error) {
+ // TODO(djd): Support range queries.
+ kindKey := NewKey(ctx, kindKind, kind, 0, nil)
+ q := NewQuery(propertyKind).Ancestor(kindKey)
+
+ propMap := map[string][]string{}
+ props := []struct {
+ Repr []string `datastore:property_representation`
+ }{}
+
+ keys, err := q.GetAll(ctx, &props)
+ if err != nil {
+ return nil, err
+ }
+ for i, p := range props {
+ propMap[keys[i].StringID()] = p.Repr
+ }
+ return propMap, nil
+}
diff --git a/vendor/google.golang.org/appengine/datastore/prop.go b/vendor/google.golang.org/appengine/datastore/prop.go
new file mode 100644
index 0000000..1f50ac0
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/prop.go
@@ -0,0 +1,296 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "unicode"
+)
+
+// Entities with more than this many indexed properties will not be saved.
+const maxIndexedProperties = 20000
+
+// []byte fields more than 1 megabyte long will not be loaded or saved.
+const maxBlobLen = 1 << 20
+
+// Property is a name/value pair plus some metadata. A datastore entity's
+// contents are loaded and saved as a sequence of Properties. An entity can
+// have multiple Properties with the same name, provided that p.Multiple is
+// true on all of that entity's Properties with that name.
+type Property struct {
+ // Name is the property name.
+ Name string
+ // Value is the property value. The valid types are:
+ // - int64
+ // - bool
+ // - string
+ // - float64
+ // - ByteString
+ // - *Key
+ // - time.Time
+ // - appengine.BlobKey
+ // - appengine.GeoPoint
+ // - []byte (up to 1 megabyte in length)
+ // This set is smaller than the set of valid struct field types that the
+ // datastore can load and save. A Property Value cannot be a slice (apart
+ // from []byte); use multiple Properties instead. Also, a Value's type
+ // must be explicitly on the list above; it is not sufficient for the
+ // underlying type to be on that list. For example, a Value of "type
+ // myInt64 int64" is invalid. Smaller-width integers and floats are also
+ // invalid. Again, this is more restrictive than the set of valid struct
+ // field types.
+ //
+ // A Value will have an opaque type when loading entities from an index,
+ // such as via a projection query. Load entities into a struct instead
+ // of a PropertyLoadSaver when using a projection query.
+ //
+ // A Value may also be the nil interface value; this is equivalent to
+ // Python's None but not directly representable by a Go struct. Loading
+ // a nil-valued property into a struct will set that field to the zero
+ // value.
+ Value interface{}
+ // NoIndex is whether the datastore cannot index this property.
+ NoIndex bool
+ // Multiple is whether the entity can have multiple properties with
+ // the same name. Even if a particular instance only has one property with
+ // a certain name, Multiple should be true if a struct would best represent
+ // it as a field of type []T instead of type T.
+ Multiple bool
+}
+
+// ByteString is a short byte slice (up to 1500 bytes) that can be indexed.
+type ByteString []byte
+
+// PropertyLoadSaver can be converted from and to a slice of Properties.
+type PropertyLoadSaver interface {
+ Load([]Property) error
+ Save() ([]Property, error)
+}
+
+// PropertyList converts a []Property to implement PropertyLoadSaver.
+type PropertyList []Property
+
+var (
+ typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem()
+ typeOfPropertyList = reflect.TypeOf(PropertyList(nil))
+)
+
+// Load loads all of the provided properties into l.
+// It does not first reset *l to an empty slice.
+func (l *PropertyList) Load(p []Property) error {
+ *l = append(*l, p...)
+ return nil
+}
+
+// Save saves all of l's properties as a slice or Properties.
+func (l *PropertyList) Save() ([]Property, error) {
+ return *l, nil
+}
+
+// validPropertyName returns whether name consists of one or more valid Go
+// identifiers joined by ".".
+func validPropertyName(name string) bool {
+ if name == "" {
+ return false
+ }
+ for _, s := range strings.Split(name, ".") {
+ if s == "" {
+ return false
+ }
+ first := true
+ for _, c := range s {
+ if first {
+ first = false
+ if c != '_' && !unicode.IsLetter(c) {
+ return false
+ }
+ } else {
+ if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ }
+ return true
+}
+
+// structTag is the parsed `datastore:"name,options"` tag of a struct field.
+// If a field has no tag, or the tag has an empty name, then the structTag's
+// name is just the field name. A "-" name means that the datastore ignores
+// that field.
+type structTag struct {
+ name string
+ noIndex bool
+}
+
+// structCodec describes how to convert a struct to and from a sequence of
+// properties.
+type structCodec struct {
+ // byIndex gives the structTag for the i'th field.
+ byIndex []structTag
+ // byName gives the field codec for the structTag with the given name.
+ byName map[string]fieldCodec
+ // hasSlice is whether a struct or any of its nested or embedded structs
+ // has a slice-typed field (other than []byte).
+ hasSlice bool
+ // complete is whether the structCodec is complete. An incomplete
+ // structCodec may be encountered when walking a recursive struct.
+ complete bool
+}
+
+// fieldCodec is a struct field's index and, if that struct field's type is
+// itself a struct, that substruct's structCodec.
+type fieldCodec struct {
+ index int
+ substructCodec *structCodec
+}
+
+// structCodecs collects the structCodecs that have already been calculated.
+var (
+ structCodecsMutex sync.Mutex
+ structCodecs = make(map[reflect.Type]*structCodec)
+)
+
+// getStructCodec returns the structCodec for the given struct type.
+func getStructCodec(t reflect.Type) (*structCodec, error) {
+ structCodecsMutex.Lock()
+ defer structCodecsMutex.Unlock()
+ return getStructCodecLocked(t)
+}
+
+// getStructCodecLocked implements getStructCodec. The structCodecsMutex must
+// be held when calling this function.
+func getStructCodecLocked(t reflect.Type) (ret *structCodec, retErr error) {
+ c, ok := structCodecs[t]
+ if ok {
+ return c, nil
+ }
+ c = &structCodec{
+ byIndex: make([]structTag, t.NumField()),
+ byName: make(map[string]fieldCodec),
+ }
+
+ // Add c to the structCodecs map before we are sure it is good. If t is
+ // a recursive type, it needs to find the incomplete entry for itself in
+ // the map.
+ structCodecs[t] = c
+ defer func() {
+ if retErr != nil {
+ delete(structCodecs, t)
+ }
+ }()
+
+ for i := range c.byIndex {
+ f := t.Field(i)
+ tags := strings.Split(f.Tag.Get("datastore"), ",")
+ name := tags[0]
+ opts := make(map[string]bool)
+ for _, t := range tags[1:] {
+ opts[t] = true
+ }
+ if name == "" {
+ if !f.Anonymous {
+ name = f.Name
+ }
+ } else if name == "-" {
+ c.byIndex[i] = structTag{name: name}
+ continue
+ } else if !validPropertyName(name) {
+ return nil, fmt.Errorf("datastore: struct tag has invalid property name: %q", name)
+ }
+
+ substructType, fIsSlice := reflect.Type(nil), false
+ switch f.Type.Kind() {
+ case reflect.Struct:
+ substructType = f.Type
+ case reflect.Slice:
+ if f.Type.Elem().Kind() == reflect.Struct {
+ substructType = f.Type.Elem()
+ }
+ fIsSlice = f.Type != typeOfByteSlice
+ c.hasSlice = c.hasSlice || fIsSlice
+ }
+
+ if substructType != nil && substructType != typeOfTime && substructType != typeOfGeoPoint {
+ if name != "" {
+ name = name + "."
+ }
+ sub, err := getStructCodecLocked(substructType)
+ if err != nil {
+ return nil, err
+ }
+ if !sub.complete {
+ return nil, fmt.Errorf("datastore: recursive struct: field %q", f.Name)
+ }
+ if fIsSlice && sub.hasSlice {
+ return nil, fmt.Errorf(
+ "datastore: flattening nested structs leads to a slice of slices: field %q", f.Name)
+ }
+ c.hasSlice = c.hasSlice || sub.hasSlice
+ for relName := range sub.byName {
+ absName := name + relName
+ if _, ok := c.byName[absName]; ok {
+ return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", absName)
+ }
+ c.byName[absName] = fieldCodec{index: i, substructCodec: sub}
+ }
+ } else {
+ if _, ok := c.byName[name]; ok {
+ return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", name)
+ }
+ c.byName[name] = fieldCodec{index: i}
+ }
+
+ c.byIndex[i] = structTag{
+ name: name,
+ noIndex: opts["noindex"],
+ }
+ }
+ c.complete = true
+ return c, nil
+}
+
+// structPLS adapts a struct to be a PropertyLoadSaver.
+type structPLS struct {
+ v reflect.Value
+ codec *structCodec
+}
+
+// newStructPLS returns a PropertyLoadSaver for the struct pointer p.
+func newStructPLS(p interface{}) (PropertyLoadSaver, error) {
+ v := reflect.ValueOf(p)
+ if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {
+ return nil, ErrInvalidEntityType
+ }
+ v = v.Elem()
+ codec, err := getStructCodec(v.Type())
+ if err != nil {
+ return nil, err
+ }
+ return structPLS{v, codec}, nil
+}
+
+// LoadStruct loads the properties from p to dst.
+// dst must be a struct pointer.
+func LoadStruct(dst interface{}, p []Property) error {
+ x, err := newStructPLS(dst)
+ if err != nil {
+ return err
+ }
+ return x.Load(p)
+}
+
+// SaveStruct returns the properties from src as a slice of Properties.
+// src must be a struct pointer.
+func SaveStruct(src interface{}) ([]Property, error) {
+ x, err := newStructPLS(src)
+ if err != nil {
+ return nil, err
+ }
+ return x.Save()
+}
diff --git a/vendor/google.golang.org/appengine/datastore/prop_test.go b/vendor/google.golang.org/appengine/datastore/prop_test.go
new file mode 100644
index 0000000..6889521
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/prop_test.go
@@ -0,0 +1,604 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "reflect"
+ "testing"
+ "time"
+
+ "google.golang.org/appengine"
+)
+
+func TestValidPropertyName(t *testing.T) {
+ testCases := []struct {
+ name string
+ want bool
+ }{
+ // Invalid names.
+ {"", false},
+ {"'", false},
+ {".", false},
+ {"..", false},
+ {".foo", false},
+ {"0", false},
+ {"00", false},
+ {"X.X.4.X.X", false},
+ {"\n", false},
+ {"\x00", false},
+ {"abc\xffz", false},
+ {"foo.", false},
+ {"foo..", false},
+ {"foo..bar", false},
+ {"☃", false},
+ {`"`, false},
+ // Valid names.
+ {"AB", true},
+ {"Abc", true},
+ {"X.X.X.X.X", true},
+ {"_", true},
+ {"_0", true},
+ {"a", true},
+ {"a_B", true},
+ {"f00", true},
+ {"f0o", true},
+ {"fo0", true},
+ {"foo", true},
+ {"foo.bar", true},
+ {"foo.bar.baz", true},
+ {"世界", true},
+ }
+ for _, tc := range testCases {
+ got := validPropertyName(tc.name)
+ if got != tc.want {
+ t.Errorf("%q: got %v, want %v", tc.name, got, tc.want)
+ }
+ }
+}
+
+func TestStructCodec(t *testing.T) {
+ type oStruct struct {
+ O int
+ }
+ type pStruct struct {
+ P int
+ Q int
+ }
+ type rStruct struct {
+ R int
+ S pStruct
+ T oStruct
+ oStruct
+ }
+ type uStruct struct {
+ U int
+ v int
+ }
+ type vStruct struct {
+ V string `datastore:",noindex"`
+ }
+ oStructCodec := &structCodec{
+ byIndex: []structTag{
+ {name: "O"},
+ },
+ byName: map[string]fieldCodec{
+ "O": {index: 0},
+ },
+ complete: true,
+ }
+ pStructCodec := &structCodec{
+ byIndex: []structTag{
+ {name: "P"},
+ {name: "Q"},
+ },
+ byName: map[string]fieldCodec{
+ "P": {index: 0},
+ "Q": {index: 1},
+ },
+ complete: true,
+ }
+ rStructCodec := &structCodec{
+ byIndex: []structTag{
+ {name: "R"},
+ {name: "S."},
+ {name: "T."},
+ {name: ""},
+ },
+ byName: map[string]fieldCodec{
+ "R": {index: 0},
+ "S.P": {index: 1, substructCodec: pStructCodec},
+ "S.Q": {index: 1, substructCodec: pStructCodec},
+ "T.O": {index: 2, substructCodec: oStructCodec},
+ "O": {index: 3, substructCodec: oStructCodec},
+ },
+ complete: true,
+ }
+ uStructCodec := &structCodec{
+ byIndex: []structTag{
+ {name: "U"},
+ {name: "v"},
+ },
+ byName: map[string]fieldCodec{
+ "U": {index: 0},
+ "v": {index: 1},
+ },
+ complete: true,
+ }
+ vStructCodec := &structCodec{
+ byIndex: []structTag{
+ {name: "V", noIndex: true},
+ },
+ byName: map[string]fieldCodec{
+ "V": {index: 0},
+ },
+ complete: true,
+ }
+
+ testCases := []struct {
+ desc string
+ structValue interface{}
+ want *structCodec
+ }{
+ {
+ "oStruct",
+ oStruct{},
+ oStructCodec,
+ },
+ {
+ "pStruct",
+ pStruct{},
+ pStructCodec,
+ },
+ {
+ "rStruct",
+ rStruct{},
+ rStructCodec,
+ },
+ {
+ "uStruct",
+ uStruct{},
+ uStructCodec,
+ },
+ {
+ "non-basic fields",
+ struct {
+ B appengine.BlobKey
+ K *Key
+ T time.Time
+ }{},
+ &structCodec{
+ byIndex: []structTag{
+ {name: "B"},
+ {name: "K"},
+ {name: "T"},
+ },
+ byName: map[string]fieldCodec{
+ "B": {index: 0},
+ "K": {index: 1},
+ "T": {index: 2},
+ },
+ complete: true,
+ },
+ },
+ {
+ "struct tags with ignored embed",
+ struct {
+ A int `datastore:"a,noindex"`
+ B int `datastore:"b"`
+ C int `datastore:",noindex"`
+ D int `datastore:""`
+ E int
+ I int `datastore:"-"`
+ J int `datastore:",noindex" json:"j"`
+ oStruct `datastore:"-"`
+ }{},
+ &structCodec{
+ byIndex: []structTag{
+ {name: "a", noIndex: true},
+ {name: "b", noIndex: false},
+ {name: "C", noIndex: true},
+ {name: "D", noIndex: false},
+ {name: "E", noIndex: false},
+ {name: "-", noIndex: false},
+ {name: "J", noIndex: true},
+ {name: "-", noIndex: false},
+ },
+ byName: map[string]fieldCodec{
+ "a": {index: 0},
+ "b": {index: 1},
+ "C": {index: 2},
+ "D": {index: 3},
+ "E": {index: 4},
+ "J": {index: 6},
+ },
+ complete: true,
+ },
+ },
+ {
+ "unexported fields",
+ struct {
+ A int
+ b int
+ C int `datastore:"x"`
+ d int `datastore:"Y"`
+ }{},
+ &structCodec{
+ byIndex: []structTag{
+ {name: "A"},
+ {name: "b"},
+ {name: "x"},
+ {name: "Y"},
+ },
+ byName: map[string]fieldCodec{
+ "A": {index: 0},
+ "b": {index: 1},
+ "x": {index: 2},
+ "Y": {index: 3},
+ },
+ complete: true,
+ },
+ },
+ {
+ "nested and embedded structs",
+ struct {
+ A int
+ B int
+ CC oStruct
+ DDD rStruct
+ oStruct
+ }{},
+ &structCodec{
+ byIndex: []structTag{
+ {name: "A"},
+ {name: "B"},
+ {name: "CC."},
+ {name: "DDD."},
+ {name: ""},
+ },
+ byName: map[string]fieldCodec{
+ "A": {index: 0},
+ "B": {index: 1},
+ "CC.O": {index: 2, substructCodec: oStructCodec},
+ "DDD.R": {index: 3, substructCodec: rStructCodec},
+ "DDD.S.P": {index: 3, substructCodec: rStructCodec},
+ "DDD.S.Q": {index: 3, substructCodec: rStructCodec},
+ "DDD.T.O": {index: 3, substructCodec: rStructCodec},
+ "DDD.O": {index: 3, substructCodec: rStructCodec},
+ "O": {index: 4, substructCodec: oStructCodec},
+ },
+ complete: true,
+ },
+ },
+ {
+ "struct tags with nested and embedded structs",
+ struct {
+ A int `datastore:"-"`
+ B int `datastore:"w"`
+ C oStruct `datastore:"xx"`
+ D rStruct `datastore:"y"`
+ oStruct `datastore:"z"`
+ }{},
+ &structCodec{
+ byIndex: []structTag{
+ {name: "-"},
+ {name: "w"},
+ {name: "xx."},
+ {name: "y."},
+ {name: "z."},
+ },
+ byName: map[string]fieldCodec{
+ "w": {index: 1},
+ "xx.O": {index: 2, substructCodec: oStructCodec},
+ "y.R": {index: 3, substructCodec: rStructCodec},
+ "y.S.P": {index: 3, substructCodec: rStructCodec},
+ "y.S.Q": {index: 3, substructCodec: rStructCodec},
+ "y.T.O": {index: 3, substructCodec: rStructCodec},
+ "y.O": {index: 3, substructCodec: rStructCodec},
+ "z.O": {index: 4, substructCodec: oStructCodec},
+ },
+ complete: true,
+ },
+ },
+ {
+ "unexported nested and embedded structs",
+ struct {
+ a int
+ B int
+ c uStruct
+ D uStruct
+ uStruct
+ }{},
+ &structCodec{
+ byIndex: []structTag{
+ {name: "a"},
+ {name: "B"},
+ {name: "c."},
+ {name: "D."},
+ {name: ""},
+ },
+ byName: map[string]fieldCodec{
+ "a": {index: 0},
+ "B": {index: 1},
+ "c.U": {index: 2, substructCodec: uStructCodec},
+ "c.v": {index: 2, substructCodec: uStructCodec},
+ "D.U": {index: 3, substructCodec: uStructCodec},
+ "D.v": {index: 3, substructCodec: uStructCodec},
+ "U": {index: 4, substructCodec: uStructCodec},
+ "v": {index: 4, substructCodec: uStructCodec},
+ },
+ complete: true,
+ },
+ },
+ {
+ "noindex nested struct",
+ struct {
+ A oStruct `datastore:",noindex"`
+ }{},
+ &structCodec{
+ byIndex: []structTag{
+ {name: "A.", noIndex: true},
+ },
+ byName: map[string]fieldCodec{
+ "A.O": {index: 0, substructCodec: oStructCodec},
+ },
+ complete: true,
+ },
+ },
+ {
+ "noindex slice",
+ struct {
+ A []string `datastore:",noindex"`
+ }{},
+ &structCodec{
+ byIndex: []structTag{
+ {name: "A", noIndex: true},
+ },
+ byName: map[string]fieldCodec{
+ "A": {index: 0},
+ },
+ hasSlice: true,
+ complete: true,
+ },
+ },
+ {
+ "noindex embedded struct slice",
+ struct {
+ // vStruct has a single field, V, also with noindex.
+ A []vStruct `datastore:",noindex"`
+ }{},
+ &structCodec{
+ byIndex: []structTag{
+ {name: "A.", noIndex: true},
+ },
+ byName: map[string]fieldCodec{
+ "A.V": {index: 0, substructCodec: vStructCodec},
+ },
+ hasSlice: true,
+ complete: true,
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ got, err := getStructCodec(reflect.TypeOf(tc.structValue))
+ if err != nil {
+ t.Errorf("%s: getStructCodec: %v", tc.desc, err)
+ continue
+ }
+ if !reflect.DeepEqual(got, tc.want) {
+ t.Errorf("%s\ngot %+v\nwant %+v\n", tc.desc, got, tc.want)
+ continue
+ }
+ }
+}
+
+func TestRepeatedPropertyName(t *testing.T) {
+ good := []interface{}{
+ struct {
+ A int `datastore:"-"`
+ }{},
+ struct {
+ A int `datastore:"b"`
+ B int
+ }{},
+ struct {
+ A int
+ B int `datastore:"B"`
+ }{},
+ struct {
+ A int `datastore:"B"`
+ B int `datastore:"-"`
+ }{},
+ struct {
+ A int `datastore:"-"`
+ B int `datastore:"A"`
+ }{},
+ struct {
+ A int `datastore:"B"`
+ B int `datastore:"A"`
+ }{},
+ struct {
+ A int `datastore:"B"`
+ B int `datastore:"C"`
+ C int `datastore:"A"`
+ }{},
+ struct {
+ A int `datastore:"B"`
+ B int `datastore:"C"`
+ C int `datastore:"D"`
+ }{},
+ }
+ bad := []interface{}{
+ struct {
+ A int `datastore:"B"`
+ B int
+ }{},
+ struct {
+ A int
+ B int `datastore:"A"`
+ }{},
+ struct {
+ A int `datastore:"C"`
+ B int `datastore:"C"`
+ }{},
+ struct {
+ A int `datastore:"B"`
+ B int `datastore:"C"`
+ C int `datastore:"B"`
+ }{},
+ }
+ testGetStructCodec(t, good, bad)
+}
+
+func TestFlatteningNestedStructs(t *testing.T) {
+ type deepGood struct {
+ A struct {
+ B []struct {
+ C struct {
+ D int
+ }
+ }
+ }
+ }
+ type deepBad struct {
+ A struct {
+ B []struct {
+ C struct {
+ D []int
+ }
+ }
+ }
+ }
+ type iSay struct {
+ Tomato int
+ }
+ type youSay struct {
+ Tomato int
+ }
+ type tweedledee struct {
+ Dee int `datastore:"D"`
+ }
+ type tweedledum struct {
+ Dum int `datastore:"D"`
+ }
+
+ good := []interface{}{
+ struct {
+ X []struct {
+ Y string
+ }
+ }{},
+ struct {
+ X []struct {
+ Y []byte
+ }
+ }{},
+ struct {
+ P []int
+ X struct {
+ Y []int
+ }
+ }{},
+ struct {
+ X struct {
+ Y []int
+ }
+ Q []int
+ }{},
+ struct {
+ P []int
+ X struct {
+ Y []int
+ }
+ Q []int
+ }{},
+ struct {
+ deepGood
+ }{},
+ struct {
+ DG deepGood
+ }{},
+ struct {
+ Foo struct {
+ Z int `datastore:"X"`
+ } `datastore:"A"`
+ Bar struct {
+ Z int `datastore:"Y"`
+ } `datastore:"A"`
+ }{},
+ }
+ bad := []interface{}{
+ struct {
+ X []struct {
+ Y []string
+ }
+ }{},
+ struct {
+ X []struct {
+ Y []int
+ }
+ }{},
+ struct {
+ deepBad
+ }{},
+ struct {
+ DB deepBad
+ }{},
+ struct {
+ iSay
+ youSay
+ }{},
+ struct {
+ tweedledee
+ tweedledum
+ }{},
+ struct {
+ Foo struct {
+ Z int
+ } `datastore:"A"`
+ Bar struct {
+ Z int
+ } `datastore:"A"`
+ }{},
+ }
+ testGetStructCodec(t, good, bad)
+}
+
+func testGetStructCodec(t *testing.T, good []interface{}, bad []interface{}) {
+ for _, x := range good {
+ if _, err := getStructCodec(reflect.TypeOf(x)); err != nil {
+ t.Errorf("type %T: got non-nil error (%s), want nil", x, err)
+ }
+ }
+ for _, x := range bad {
+ if _, err := getStructCodec(reflect.TypeOf(x)); err == nil {
+ t.Errorf("type %T: got nil error, want non-nil", x)
+ }
+ }
+}
+
+func TestNilKeyIsStored(t *testing.T) {
+ x := struct {
+ K *Key
+ I int
+ }{}
+ p := PropertyList{}
+ // Save x as properties.
+ p1, _ := SaveStruct(&x)
+ p.Load(p1)
+ // Set x's fields to non-zero.
+ x.K = &Key{}
+ x.I = 2
+ // Load x from properties.
+ p2, _ := p.Save()
+ LoadStruct(&x, p2)
+ // Check that x's fields were set to zero.
+ if x.K != nil {
+ t.Errorf("K field was not zero")
+ }
+ if x.I != 0 {
+ t.Errorf("I field was not zero")
+ }
+}
diff --git a/vendor/google.golang.org/appengine/datastore/query.go b/vendor/google.golang.org/appengine/datastore/query.go
new file mode 100644
index 0000000..3847b0f
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/query.go
@@ -0,0 +1,724 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+type operator int
+
+const (
+ lessThan operator = iota
+ lessEq
+ equal
+ greaterEq
+ greaterThan
+)
+
+var operatorToProto = map[operator]*pb.Query_Filter_Operator{
+ lessThan: pb.Query_Filter_LESS_THAN.Enum(),
+ lessEq: pb.Query_Filter_LESS_THAN_OR_EQUAL.Enum(),
+ equal: pb.Query_Filter_EQUAL.Enum(),
+ greaterEq: pb.Query_Filter_GREATER_THAN_OR_EQUAL.Enum(),
+ greaterThan: pb.Query_Filter_GREATER_THAN.Enum(),
+}
+
+// filter is a conditional filter on query results.
+type filter struct {
+ FieldName string
+ Op operator
+ Value interface{}
+}
+
+type sortDirection int
+
+const (
+ ascending sortDirection = iota
+ descending
+)
+
+var sortDirectionToProto = map[sortDirection]*pb.Query_Order_Direction{
+ ascending: pb.Query_Order_ASCENDING.Enum(),
+ descending: pb.Query_Order_DESCENDING.Enum(),
+}
+
+// order is a sort order on query results.
+type order struct {
+ FieldName string
+ Direction sortDirection
+}
+
+// NewQuery creates a new Query for a specific entity kind.
+//
+// An empty kind means to return all entities, including entities created and
+// managed by other App Engine features, and is called a kindless query.
+// Kindless queries cannot include filters or sort orders on property values.
+func NewQuery(kind string) *Query {
+ return &Query{
+ kind: kind,
+ limit: -1,
+ }
+}
+
+// Query represents a datastore query.
+type Query struct {
+ kind string
+ ancestor *Key
+ filter []filter
+ order []order
+ projection []string
+
+ distinct bool
+ keysOnly bool
+ eventual bool
+ limit int32
+ offset int32
+ start *pb.CompiledCursor
+ end *pb.CompiledCursor
+
+ err error
+}
+
+func (q *Query) clone() *Query {
+ x := *q
+ // Copy the contents of the slice-typed fields to a new backing store.
+ if len(q.filter) > 0 {
+ x.filter = make([]filter, len(q.filter))
+ copy(x.filter, q.filter)
+ }
+ if len(q.order) > 0 {
+ x.order = make([]order, len(q.order))
+ copy(x.order, q.order)
+ }
+ return &x
+}
+
+// Ancestor returns a derivative query with an ancestor filter.
+// The ancestor should not be nil.
+func (q *Query) Ancestor(ancestor *Key) *Query {
+ q = q.clone()
+ if ancestor == nil {
+ q.err = errors.New("datastore: nil query ancestor")
+ return q
+ }
+ q.ancestor = ancestor
+ return q
+}
+
+// EventualConsistency returns a derivative query that returns eventually
+// consistent results.
+// It only has an effect on ancestor queries.
+func (q *Query) EventualConsistency() *Query {
+ q = q.clone()
+ q.eventual = true
+ return q
+}
+
+// Filter returns a derivative query with a field-based filter.
+// The filterStr argument must be a field name followed by optional space,
+// followed by an operator, one of ">", "<", ">=", "<=", or "=".
+// Fields are compared against the provided value using the operator.
+// Multiple filters are AND'ed together.
+func (q *Query) Filter(filterStr string, value interface{}) *Query {
+ q = q.clone()
+ filterStr = strings.TrimSpace(filterStr)
+ if len(filterStr) < 1 {
+ q.err = errors.New("datastore: invalid filter: " + filterStr)
+ return q
+ }
+ f := filter{
+ FieldName: strings.TrimRight(filterStr, " ><=!"),
+ Value: value,
+ }
+ switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op {
+ case "<=":
+ f.Op = lessEq
+ case ">=":
+ f.Op = greaterEq
+ case "<":
+ f.Op = lessThan
+ case ">":
+ f.Op = greaterThan
+ case "=":
+ f.Op = equal
+ default:
+ q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr)
+ return q
+ }
+ q.filter = append(q.filter, f)
+ return q
+}
+
+// Order returns a derivative query with a field-based sort order. Orders are
+// applied in the order they are added. The default order is ascending; to sort
+// in descending order prefix the fieldName with a minus sign (-).
+func (q *Query) Order(fieldName string) *Query {
+ q = q.clone()
+ fieldName = strings.TrimSpace(fieldName)
+ o := order{
+ Direction: ascending,
+ FieldName: fieldName,
+ }
+ if strings.HasPrefix(fieldName, "-") {
+ o.Direction = descending
+ o.FieldName = strings.TrimSpace(fieldName[1:])
+ } else if strings.HasPrefix(fieldName, "+") {
+ q.err = fmt.Errorf("datastore: invalid order: %q", fieldName)
+ return q
+ }
+ if len(o.FieldName) == 0 {
+ q.err = errors.New("datastore: empty order")
+ return q
+ }
+ q.order = append(q.order, o)
+ return q
+}
+
+// Project returns a derivative query that yields only the given fields. It
+// cannot be used with KeysOnly.
+func (q *Query) Project(fieldNames ...string) *Query {
+ q = q.clone()
+ q.projection = append([]string(nil), fieldNames...)
+ return q
+}
+
+// Distinct returns a derivative query that yields de-duplicated entities with
+// respect to the set of projected fields. It is only used for projection
+// queries.
+func (q *Query) Distinct() *Query {
+ q = q.clone()
+ q.distinct = true
+ return q
+}
+
+// KeysOnly returns a derivative query that yields only keys, not keys and
+// entities. It cannot be used with projection queries.
+func (q *Query) KeysOnly() *Query {
+ q = q.clone()
+ q.keysOnly = true
+ return q
+}
+
+// Limit returns a derivative query that has a limit on the number of results
+// returned. A negative value means unlimited.
+func (q *Query) Limit(limit int) *Query {
+ q = q.clone()
+ if limit < math.MinInt32 || limit > math.MaxInt32 {
+ q.err = errors.New("datastore: query limit overflow")
+ return q
+ }
+ q.limit = int32(limit)
+ return q
+}
+
+// Offset returns a derivative query that has an offset of how many keys to
+// skip over before returning results. A negative value is invalid.
+func (q *Query) Offset(offset int) *Query {
+ q = q.clone()
+ if offset < 0 {
+ q.err = errors.New("datastore: negative query offset")
+ return q
+ }
+ if offset > math.MaxInt32 {
+ q.err = errors.New("datastore: query offset overflow")
+ return q
+ }
+ q.offset = int32(offset)
+ return q
+}
+
+// Start returns a derivative query with the given start point.
+func (q *Query) Start(c Cursor) *Query {
+ q = q.clone()
+ if c.cc == nil {
+ q.err = errors.New("datastore: invalid cursor")
+ return q
+ }
+ q.start = c.cc
+ return q
+}
+
+// End returns a derivative query with the given end point.
+func (q *Query) End(c Cursor) *Query {
+ q = q.clone()
+ if c.cc == nil {
+ q.err = errors.New("datastore: invalid cursor")
+ return q
+ }
+ q.end = c.cc
+ return q
+}
+
+// toProto converts the query to a protocol buffer.
+func (q *Query) toProto(dst *pb.Query, appID string) error {
+ if len(q.projection) != 0 && q.keysOnly {
+ return errors.New("datastore: query cannot both project and be keys-only")
+ }
+ dst.Reset()
+ dst.App = proto.String(appID)
+ if q.kind != "" {
+ dst.Kind = proto.String(q.kind)
+ }
+ if q.ancestor != nil {
+ dst.Ancestor = keyToProto(appID, q.ancestor)
+ if q.eventual {
+ dst.Strong = proto.Bool(false)
+ }
+ }
+ if q.projection != nil {
+ dst.PropertyName = q.projection
+ if q.distinct {
+ dst.GroupByPropertyName = q.projection
+ }
+ }
+ if q.keysOnly {
+ dst.KeysOnly = proto.Bool(true)
+ dst.RequirePerfectPlan = proto.Bool(true)
+ }
+ for _, qf := range q.filter {
+ if qf.FieldName == "" {
+ return errors.New("datastore: empty query filter field name")
+ }
+ p, errStr := valueToProto(appID, qf.FieldName, reflect.ValueOf(qf.Value), false)
+ if errStr != "" {
+ return errors.New("datastore: bad query filter value type: " + errStr)
+ }
+ xf := &pb.Query_Filter{
+ Op: operatorToProto[qf.Op],
+ Property: []*pb.Property{p},
+ }
+ if xf.Op == nil {
+ return errors.New("datastore: unknown query filter operator")
+ }
+ dst.Filter = append(dst.Filter, xf)
+ }
+ for _, qo := range q.order {
+ if qo.FieldName == "" {
+ return errors.New("datastore: empty query order field name")
+ }
+ xo := &pb.Query_Order{
+ Property: proto.String(qo.FieldName),
+ Direction: sortDirectionToProto[qo.Direction],
+ }
+ if xo.Direction == nil {
+ return errors.New("datastore: unknown query order direction")
+ }
+ dst.Order = append(dst.Order, xo)
+ }
+ if q.limit >= 0 {
+ dst.Limit = proto.Int32(q.limit)
+ }
+ if q.offset != 0 {
+ dst.Offset = proto.Int32(q.offset)
+ }
+ dst.CompiledCursor = q.start
+ dst.EndCompiledCursor = q.end
+ dst.Compile = proto.Bool(true)
+ return nil
+}
+
+// Count returns the number of results for the query.
+//
+// The running time and number of API calls made by Count scale linearly with
+// the sum of the query's offset and limit. Unless the result count is
+// expected to be small, it is best to specify a limit; otherwise Count will
+// continue until it finishes counting or the provided context expires.
+func (q *Query) Count(c context.Context) (int, error) {
+ // Check that the query is well-formed.
+ if q.err != nil {
+ return 0, q.err
+ }
+
+ // Run a copy of the query, with keysOnly true (if we're not a projection,
+ // since the two are incompatible), and an adjusted offset. We also set the
+ // limit to zero, as we don't want any actual entity data, just the number
+ // of skipped results.
+ newQ := q.clone()
+ newQ.keysOnly = len(newQ.projection) == 0
+ newQ.limit = 0
+ if q.limit < 0 {
+ // If the original query was unlimited, set the new query's offset to maximum.
+ newQ.offset = math.MaxInt32
+ } else {
+ newQ.offset = q.offset + q.limit
+ if newQ.offset < 0 {
+ // Do the best we can, in the presence of overflow.
+ newQ.offset = math.MaxInt32
+ }
+ }
+ req := &pb.Query{}
+ if err := newQ.toProto(req, internal.FullyQualifiedAppID(c)); err != nil {
+ return 0, err
+ }
+ res := &pb.QueryResult{}
+ if err := internal.Call(c, "datastore_v3", "RunQuery", req, res); err != nil {
+ return 0, err
+ }
+
+ // n is the count we will return. For example, suppose that our original
+ // query had an offset of 4 and a limit of 2008: the count will be 2008,
+ // provided that there are at least 2012 matching entities. However, the
+ // RPCs will only skip 1000 results at a time. The RPC sequence is:
+ // call RunQuery with (offset, limit) = (2012, 0) // 2012 == newQ.offset
+ // response has (skippedResults, moreResults) = (1000, true)
+ // n += 1000 // n == 1000
+ // call Next with (offset, limit) = (1012, 0) // 1012 == newQ.offset - n
+ // response has (skippedResults, moreResults) = (1000, true)
+ // n += 1000 // n == 2000
+ // call Next with (offset, limit) = (12, 0) // 12 == newQ.offset - n
+ // response has (skippedResults, moreResults) = (12, false)
+ // n += 12 // n == 2012
+ // // exit the loop
+ // n -= 4 // n == 2008
+ var n int32
+ for {
+ // The QueryResult should have no actual entity data, just skipped results.
+ if len(res.Result) != 0 {
+ return 0, errors.New("datastore: internal error: Count request returned too much data")
+ }
+ n += res.GetSkippedResults()
+ if !res.GetMoreResults() {
+ break
+ }
+ if err := callNext(c, res, newQ.offset-n, 0); err != nil {
+ return 0, err
+ }
+ }
+ n -= q.offset
+ if n < 0 {
+ // If the offset was greater than the number of matching entities,
+ // return 0 instead of negative.
+ n = 0
+ }
+ return int(n), nil
+}
+
+// callNext issues a datastore_v3/Next RPC to advance a cursor, such as that
+// returned by a query with more results.
+func callNext(c context.Context, res *pb.QueryResult, offset, limit int32) error {
+ if res.Cursor == nil {
+ return errors.New("datastore: internal error: server did not return a cursor")
+ }
+ req := &pb.NextRequest{
+ Cursor: res.Cursor,
+ }
+ if limit >= 0 {
+ req.Count = proto.Int32(limit)
+ }
+ if offset != 0 {
+ req.Offset = proto.Int32(offset)
+ }
+ if res.CompiledCursor != nil {
+ req.Compile = proto.Bool(true)
+ }
+ res.Reset()
+ return internal.Call(c, "datastore_v3", "Next", req, res)
+}
+
+// GetAll runs the query in the given context and returns all keys that match
+// that query, as well as appending the values to dst.
+//
+// dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non-
+// interface, non-pointer type P such that P or *P implements PropertyLoadSaver.
+//
+// As a special case, *PropertyList is an invalid type for dst, even though a
+// PropertyList is a slice of structs. It is treated as invalid to avoid being
+// mistakenly passed when *[]PropertyList was intended.
+//
+// The keys returned by GetAll will be in a 1-1 correspondence with the entities
+// added to dst.
+//
+// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys.
+//
+// The running time and number of API calls made by GetAll scale linearly with
+// with the sum of the query's offset and limit. Unless the result count is
+// expected to be small, it is best to specify a limit; otherwise GetAll will
+// continue until it finishes collecting results or the provided context
+// expires.
+func (q *Query) GetAll(c context.Context, dst interface{}) ([]*Key, error) {
+ var (
+ dv reflect.Value
+ mat multiArgType
+ elemType reflect.Type
+ errFieldMismatch error
+ )
+ if !q.keysOnly {
+ dv = reflect.ValueOf(dst)
+ if dv.Kind() != reflect.Ptr || dv.IsNil() {
+ return nil, ErrInvalidEntityType
+ }
+ dv = dv.Elem()
+ mat, elemType = checkMultiArg(dv)
+ if mat == multiArgTypeInvalid || mat == multiArgTypeInterface {
+ return nil, ErrInvalidEntityType
+ }
+ }
+
+ var keys []*Key
+ for t := q.Run(c); ; {
+ k, e, err := t.next()
+ if err == Done {
+ break
+ }
+ if err != nil {
+ return keys, err
+ }
+ if !q.keysOnly {
+ ev := reflect.New(elemType)
+ if elemType.Kind() == reflect.Map {
+ // This is a special case. The zero values of a map type are
+ // not immediately useful; they have to be make'd.
+ //
+ // Funcs and channels are similar, in that a zero value is not useful,
+ // but even a freshly make'd channel isn't useful: there's no fixed
+ // channel buffer size that is always going to be large enough, and
+ // there's no goroutine to drain the other end. Theoretically, these
+ // types could be supported, for example by sniffing for a constructor
+ // method or requiring prior registration, but for now it's not a
+ // frequent enough concern to be worth it. Programmers can work around
+ // it by explicitly using Iterator.Next instead of the Query.GetAll
+ // convenience method.
+ x := reflect.MakeMap(elemType)
+ ev.Elem().Set(x)
+ }
+ if err = loadEntity(ev.Interface(), e); err != nil {
+ if _, ok := err.(*ErrFieldMismatch); ok {
+ // We continue loading entities even in the face of field mismatch errors.
+ // If we encounter any other error, that other error is returned. Otherwise,
+ // an ErrFieldMismatch is returned.
+ errFieldMismatch = err
+ } else {
+ return keys, err
+ }
+ }
+ if mat != multiArgTypeStructPtr {
+ ev = ev.Elem()
+ }
+ dv.Set(reflect.Append(dv, ev))
+ }
+ keys = append(keys, k)
+ }
+ return keys, errFieldMismatch
+}
+
+// Run runs the query in the given context.
+func (q *Query) Run(c context.Context) *Iterator {
+ if q.err != nil {
+ return &Iterator{err: q.err}
+ }
+ t := &Iterator{
+ c: c,
+ limit: q.limit,
+ q: q,
+ prevCC: q.start,
+ }
+ var req pb.Query
+ if err := q.toProto(&req, internal.FullyQualifiedAppID(c)); err != nil {
+ t.err = err
+ return t
+ }
+ if err := internal.Call(c, "datastore_v3", "RunQuery", &req, &t.res); err != nil {
+ t.err = err
+ return t
+ }
+ offset := q.offset - t.res.GetSkippedResults()
+ for offset > 0 && t.res.GetMoreResults() {
+ t.prevCC = t.res.CompiledCursor
+ if err := callNext(t.c, &t.res, offset, t.limit); err != nil {
+ t.err = err
+ break
+ }
+ skip := t.res.GetSkippedResults()
+ if skip < 0 {
+ t.err = errors.New("datastore: internal error: negative number of skipped_results")
+ break
+ }
+ offset -= skip
+ }
+ if offset < 0 {
+ t.err = errors.New("datastore: internal error: query offset was overshot")
+ }
+ return t
+}
+
+// Iterator is the result of running a query.
+type Iterator struct {
+ c context.Context
+ err error
+ // res is the result of the most recent RunQuery or Next API call.
+ res pb.QueryResult
+ // i is how many elements of res.Result we have iterated over.
+ i int
+ // limit is the limit on the number of results this iterator should return.
+ // A negative value means unlimited.
+ limit int32
+ // q is the original query which yielded this iterator.
+ q *Query
+ // prevCC is the compiled cursor that marks the end of the previous batch
+ // of results.
+ prevCC *pb.CompiledCursor
+}
+
+// Done is returned when a query iteration has completed.
+var Done = errors.New("datastore: query has no more results")
+
+// Next returns the key of the next result. When there are no more results,
+// Done is returned as the error.
+//
+// If the query is not keys only and dst is non-nil, it also loads the entity
+// stored for that key into the struct pointer or PropertyLoadSaver dst, with
+// the same semantics and possible errors as for the Get function.
+func (t *Iterator) Next(dst interface{}) (*Key, error) {
+ k, e, err := t.next()
+ if err != nil {
+ return nil, err
+ }
+ if dst != nil && !t.q.keysOnly {
+ err = loadEntity(dst, e)
+ }
+ return k, err
+}
+
+func (t *Iterator) next() (*Key, *pb.EntityProto, error) {
+ if t.err != nil {
+ return nil, nil, t.err
+ }
+
+ // Issue datastore_v3/Next RPCs as necessary.
+ for t.i == len(t.res.Result) {
+ if !t.res.GetMoreResults() {
+ t.err = Done
+ return nil, nil, t.err
+ }
+ t.prevCC = t.res.CompiledCursor
+ if err := callNext(t.c, &t.res, 0, t.limit); err != nil {
+ t.err = err
+ return nil, nil, t.err
+ }
+ if t.res.GetSkippedResults() != 0 {
+ t.err = errors.New("datastore: internal error: iterator has skipped results")
+ return nil, nil, t.err
+ }
+ t.i = 0
+ if t.limit >= 0 {
+ t.limit -= int32(len(t.res.Result))
+ if t.limit < 0 {
+ t.err = errors.New("datastore: internal error: query returned more results than the limit")
+ return nil, nil, t.err
+ }
+ }
+ }
+
+ // Extract the key from the t.i'th element of t.res.Result.
+ e := t.res.Result[t.i]
+ t.i++
+ if e.Key == nil {
+ return nil, nil, errors.New("datastore: internal error: server did not return a key")
+ }
+ k, err := protoToKey(e.Key)
+ if err != nil || k.Incomplete() {
+ return nil, nil, errors.New("datastore: internal error: server returned an invalid key")
+ }
+ return k, e, nil
+}
+
+// Cursor returns a cursor for the iterator's current location.
+func (t *Iterator) Cursor() (Cursor, error) {
+ if t.err != nil && t.err != Done {
+ return Cursor{}, t.err
+ }
+ // If we are at either end of the current batch of results,
+ // return the compiled cursor at that end.
+ skipped := t.res.GetSkippedResults()
+ if t.i == 0 && skipped == 0 {
+ if t.prevCC == nil {
+ // A nil pointer (of type *pb.CompiledCursor) means no constraint:
+ // passing it as the end cursor of a new query means unlimited results
+ // (glossing over the integer limit parameter for now).
+ // A non-nil pointer to an empty pb.CompiledCursor means the start:
+ // passing it as the end cursor of a new query means 0 results.
+ // If prevCC was nil, then the original query had no start cursor, but
+ // Iterator.Cursor should return "the start" instead of unlimited.
+ return Cursor{&zeroCC}, nil
+ }
+ return Cursor{t.prevCC}, nil
+ }
+ if t.i == len(t.res.Result) {
+ return Cursor{t.res.CompiledCursor}, nil
+ }
+ // Otherwise, re-run the query offset to this iterator's position, starting from
+ // the most recent compiled cursor. This is done on a best-effort basis, as it
+ // is racy; if a concurrent process has added or removed entities, then the
+ // cursor returned may be inconsistent.
+ q := t.q.clone()
+ q.start = t.prevCC
+ q.offset = skipped + int32(t.i)
+ q.limit = 0
+ q.keysOnly = len(q.projection) == 0
+ t1 := q.Run(t.c)
+ _, _, err := t1.next()
+ if err != Done {
+ if err == nil {
+ err = fmt.Errorf("datastore: internal error: zero-limit query did not have zero results")
+ }
+ return Cursor{}, err
+ }
+ return Cursor{t1.res.CompiledCursor}, nil
+}
+
+var zeroCC pb.CompiledCursor
+
+// Cursor is an iterator's position. It can be converted to and from an opaque
+// string. A cursor can be used from different HTTP requests, but only with a
+// query with the same kind, ancestor, filter and order constraints.
+type Cursor struct {
+ cc *pb.CompiledCursor
+}
+
+// String returns a base-64 string representation of a cursor.
+func (c Cursor) String() string {
+ if c.cc == nil {
+ return ""
+ }
+ b, err := proto.Marshal(c.cc)
+ if err != nil {
+ // The only way to construct a Cursor with a non-nil cc field is to
+ // unmarshal from the byte representation. We panic if the unmarshal
+ // succeeds but the marshaling of the unchanged protobuf value fails.
+ panic(fmt.Sprintf("datastore: internal error: malformed cursor: %v", err))
+ }
+ return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
+}
+
+// Decode decodes a cursor from its base-64 string representation.
+func DecodeCursor(s string) (Cursor, error) {
+ if s == "" {
+ return Cursor{&zeroCC}, nil
+ }
+ if n := len(s) % 4; n != 0 {
+ s += strings.Repeat("=", 4-n)
+ }
+ b, err := base64.URLEncoding.DecodeString(s)
+ if err != nil {
+ return Cursor{}, err
+ }
+ cc := &pb.CompiledCursor{}
+ if err := proto.Unmarshal(b, cc); err != nil {
+ return Cursor{}, err
+ }
+ return Cursor{cc}, nil
+}
diff --git a/vendor/google.golang.org/appengine/datastore/query_test.go b/vendor/google.golang.org/appengine/datastore/query_test.go
new file mode 100644
index 0000000..f1b9de8
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/query_test.go
@@ -0,0 +1,583 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine/internal"
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+var (
+ path1 = &pb.Path{
+ Element: []*pb.Path_Element{
+ {
+ Type: proto.String("Gopher"),
+ Id: proto.Int64(6),
+ },
+ },
+ }
+ path2 = &pb.Path{
+ Element: []*pb.Path_Element{
+ {
+ Type: proto.String("Gopher"),
+ Id: proto.Int64(6),
+ },
+ {
+ Type: proto.String("Gopher"),
+ Id: proto.Int64(8),
+ },
+ },
+ }
+)
+
+func fakeRunQuery(in *pb.Query, out *pb.QueryResult) error {
+ expectedIn := &pb.Query{
+ App: proto.String("dev~fake-app"),
+ Kind: proto.String("Gopher"),
+ Compile: proto.Bool(true),
+ }
+ if !proto.Equal(in, expectedIn) {
+ return fmt.Errorf("unsupported argument: got %v want %v", in, expectedIn)
+ }
+ *out = pb.QueryResult{
+ Result: []*pb.EntityProto{
+ {
+ Key: &pb.Reference{
+ App: proto.String("s~test-app"),
+ Path: path1,
+ },
+ EntityGroup: path1,
+ Property: []*pb.Property{
+ {
+ Meaning: pb.Property_TEXT.Enum(),
+ Name: proto.String("Name"),
+ Value: &pb.PropertyValue{
+ StringValue: proto.String("George"),
+ },
+ },
+ {
+ Name: proto.String("Height"),
+ Value: &pb.PropertyValue{
+ Int64Value: proto.Int64(32),
+ },
+ },
+ },
+ },
+ {
+ Key: &pb.Reference{
+ App: proto.String("s~test-app"),
+ Path: path2,
+ },
+ EntityGroup: path1, // ancestor is George
+ Property: []*pb.Property{
+ {
+ Meaning: pb.Property_TEXT.Enum(),
+ Name: proto.String("Name"),
+ Value: &pb.PropertyValue{
+ StringValue: proto.String("Rufus"),
+ },
+ },
+ // No height for Rufus.
+ },
+ },
+ },
+ MoreResults: proto.Bool(false),
+ }
+ return nil
+}
+
+type StructThatImplementsPLS struct{}
+
+func (StructThatImplementsPLS) Load(p []Property) error { return nil }
+func (StructThatImplementsPLS) Save() ([]Property, error) { return nil, nil }
+
+var _ PropertyLoadSaver = StructThatImplementsPLS{}
+
+type StructPtrThatImplementsPLS struct{}
+
+func (*StructPtrThatImplementsPLS) Load(p []Property) error { return nil }
+func (*StructPtrThatImplementsPLS) Save() ([]Property, error) { return nil, nil }
+
+var _ PropertyLoadSaver = &StructPtrThatImplementsPLS{}
+
+type PropertyMap map[string]Property
+
+func (m PropertyMap) Load(props []Property) error {
+ for _, p := range props {
+ if p.Multiple {
+ return errors.New("PropertyMap does not support multiple properties")
+ }
+ m[p.Name] = p
+ }
+ return nil
+}
+
+func (m PropertyMap) Save() ([]Property, error) {
+ props := make([]Property, 0, len(m))
+ for _, p := range m {
+ if p.Multiple {
+ return nil, errors.New("PropertyMap does not support multiple properties")
+ }
+ props = append(props, p)
+ }
+ return props, nil
+}
+
+var _ PropertyLoadSaver = PropertyMap{}
+
+type Gopher struct {
+ Name string
+ Height int
+}
+
+// typeOfEmptyInterface is the type of interface{}, but we can't use
+// reflect.TypeOf((interface{})(nil)) directly because TypeOf takes an
+// interface{}.
+var typeOfEmptyInterface = reflect.TypeOf((*interface{})(nil)).Elem()
+
+func TestCheckMultiArg(t *testing.T) {
+ testCases := []struct {
+ v interface{}
+ mat multiArgType
+ elemType reflect.Type
+ }{
+ // Invalid cases.
+ {nil, multiArgTypeInvalid, nil},
+ {Gopher{}, multiArgTypeInvalid, nil},
+ {&Gopher{}, multiArgTypeInvalid, nil},
+ {PropertyList{}, multiArgTypeInvalid, nil}, // This is a special case.
+ {PropertyMap{}, multiArgTypeInvalid, nil},
+ {[]*PropertyList(nil), multiArgTypeInvalid, nil},
+ {[]*PropertyMap(nil), multiArgTypeInvalid, nil},
+ {[]**Gopher(nil), multiArgTypeInvalid, nil},
+ {[]*interface{}(nil), multiArgTypeInvalid, nil},
+ // Valid cases.
+ {
+ []PropertyList(nil),
+ multiArgTypePropertyLoadSaver,
+ reflect.TypeOf(PropertyList{}),
+ },
+ {
+ []PropertyMap(nil),
+ multiArgTypePropertyLoadSaver,
+ reflect.TypeOf(PropertyMap{}),
+ },
+ {
+ []StructThatImplementsPLS(nil),
+ multiArgTypePropertyLoadSaver,
+ reflect.TypeOf(StructThatImplementsPLS{}),
+ },
+ {
+ []StructPtrThatImplementsPLS(nil),
+ multiArgTypePropertyLoadSaver,
+ reflect.TypeOf(StructPtrThatImplementsPLS{}),
+ },
+ {
+ []Gopher(nil),
+ multiArgTypeStruct,
+ reflect.TypeOf(Gopher{}),
+ },
+ {
+ []*Gopher(nil),
+ multiArgTypeStructPtr,
+ reflect.TypeOf(Gopher{}),
+ },
+ {
+ []interface{}(nil),
+ multiArgTypeInterface,
+ typeOfEmptyInterface,
+ },
+ }
+ for _, tc := range testCases {
+ mat, elemType := checkMultiArg(reflect.ValueOf(tc.v))
+ if mat != tc.mat || elemType != tc.elemType {
+ t.Errorf("checkMultiArg(%T): got %v, %v want %v, %v",
+ tc.v, mat, elemType, tc.mat, tc.elemType)
+ }
+ }
+}
+
+func TestSimpleQuery(t *testing.T) {
+ struct1 := Gopher{Name: "George", Height: 32}
+ struct2 := Gopher{Name: "Rufus"}
+ pList1 := PropertyList{
+ {
+ Name: "Name",
+ Value: "George",
+ },
+ {
+ Name: "Height",
+ Value: int64(32),
+ },
+ }
+ pList2 := PropertyList{
+ {
+ Name: "Name",
+ Value: "Rufus",
+ },
+ }
+ pMap1 := PropertyMap{
+ "Name": Property{
+ Name: "Name",
+ Value: "George",
+ },
+ "Height": Property{
+ Name: "Height",
+ Value: int64(32),
+ },
+ }
+ pMap2 := PropertyMap{
+ "Name": Property{
+ Name: "Name",
+ Value: "Rufus",
+ },
+ }
+
+ testCases := []struct {
+ dst interface{}
+ want interface{}
+ }{
+ // The destination must have type *[]P, *[]S or *[]*S, for some non-interface
+ // type P such that *P implements PropertyLoadSaver, or for some struct type S.
+ {new([]Gopher), &[]Gopher{struct1, struct2}},
+ {new([]*Gopher), &[]*Gopher{&struct1, &struct2}},
+ {new([]PropertyList), &[]PropertyList{pList1, pList2}},
+ {new([]PropertyMap), &[]PropertyMap{pMap1, pMap2}},
+
+ // Any other destination type is invalid.
+ {0, nil},
+ {Gopher{}, nil},
+ {PropertyList{}, nil},
+ {PropertyMap{}, nil},
+ {[]int{}, nil},
+ {[]Gopher{}, nil},
+ {[]PropertyList{}, nil},
+ {new(int), nil},
+ {new(Gopher), nil},
+ {new(PropertyList), nil}, // This is a special case.
+ {new(PropertyMap), nil},
+ {new([]int), nil},
+ {new([]map[int]int), nil},
+ {new([]map[string]Property), nil},
+ {new([]map[string]interface{}), nil},
+ {new([]*int), nil},
+ {new([]*map[int]int), nil},
+ {new([]*map[string]Property), nil},
+ {new([]*map[string]interface{}), nil},
+ {new([]**Gopher), nil},
+ {new([]*PropertyList), nil},
+ {new([]*PropertyMap), nil},
+ }
+ for _, tc := range testCases {
+ nCall := 0
+ c := aetesting.FakeSingleContext(t, "datastore_v3", "RunQuery", func(in *pb.Query, out *pb.QueryResult) error {
+ nCall++
+ return fakeRunQuery(in, out)
+ })
+ c = internal.WithAppIDOverride(c, "dev~fake-app")
+
+ var (
+ expectedErr error
+ expectedNCall int
+ )
+ if tc.want == nil {
+ expectedErr = ErrInvalidEntityType
+ } else {
+ expectedNCall = 1
+ }
+ keys, err := NewQuery("Gopher").GetAll(c, tc.dst)
+ if err != expectedErr {
+ t.Errorf("dst type %T: got error [%v], want [%v]", tc.dst, err, expectedErr)
+ continue
+ }
+ if nCall != expectedNCall {
+ t.Errorf("dst type %T: Context.Call was called an incorrect number of times: got %d want %d", tc.dst, nCall, expectedNCall)
+ continue
+ }
+ if err != nil {
+ continue
+ }
+
+ key1 := NewKey(c, "Gopher", "", 6, nil)
+ expectedKeys := []*Key{
+ key1,
+ NewKey(c, "Gopher", "", 8, key1),
+ }
+ if l1, l2 := len(keys), len(expectedKeys); l1 != l2 {
+ t.Errorf("dst type %T: got %d keys, want %d keys", tc.dst, l1, l2)
+ continue
+ }
+ for i, key := range keys {
+ if key.AppID() != "s~test-app" {
+ t.Errorf(`dst type %T: Key #%d's AppID = %q, want "s~test-app"`, tc.dst, i, key.AppID())
+ continue
+ }
+ if !keysEqual(key, expectedKeys[i]) {
+ t.Errorf("dst type %T: got key #%d %v, want %v", tc.dst, i, key, expectedKeys[i])
+ continue
+ }
+ }
+
+ if !reflect.DeepEqual(tc.dst, tc.want) {
+ t.Errorf("dst type %T: Entities got %+v, want %+v", tc.dst, tc.dst, tc.want)
+ continue
+ }
+ }
+}
+
+// keysEqual is like (*Key).Equal, but ignores the App ID.
+func keysEqual(a, b *Key) bool {
+ for a != nil && b != nil {
+ if a.Kind() != b.Kind() || a.StringID() != b.StringID() || a.IntID() != b.IntID() {
+ return false
+ }
+ a, b = a.Parent(), b.Parent()
+ }
+ return a == b
+}
+
+func TestQueriesAreImmutable(t *testing.T) {
+ // Test that deriving q2 from q1 does not modify q1.
+ q0 := NewQuery("foo")
+ q1 := NewQuery("foo")
+ q2 := q1.Offset(2)
+ if !reflect.DeepEqual(q0, q1) {
+ t.Errorf("q0 and q1 were not equal")
+ }
+ if reflect.DeepEqual(q1, q2) {
+ t.Errorf("q1 and q2 were equal")
+ }
+
+ // Test that deriving from q4 twice does not conflict, even though
+ // q4 has a long list of order clauses. This tests that the arrays
+ // backed by a query's slice of orders are not shared.
+ f := func() *Query {
+ q := NewQuery("bar")
+ // 47 is an ugly number that is unlikely to be near a re-allocation
+ // point in repeated append calls. For example, it's not near a power
+ // of 2 or a multiple of 10.
+ for i := 0; i < 47; i++ {
+ q = q.Order(fmt.Sprintf("x%d", i))
+ }
+ return q
+ }
+ q3 := f().Order("y")
+ q4 := f()
+ q5 := q4.Order("y")
+ q6 := q4.Order("z")
+ if !reflect.DeepEqual(q3, q5) {
+ t.Errorf("q3 and q5 were not equal")
+ }
+ if reflect.DeepEqual(q5, q6) {
+ t.Errorf("q5 and q6 were equal")
+ }
+}
+
+func TestFilterParser(t *testing.T) {
+ testCases := []struct {
+ filterStr string
+ wantOK bool
+ wantFieldName string
+ wantOp operator
+ }{
+ // Supported ops.
+ {"x<", true, "x", lessThan},
+ {"x <", true, "x", lessThan},
+ {"x <", true, "x", lessThan},
+ {" x < ", true, "x", lessThan},
+ {"x <=", true, "x", lessEq},
+ {"x =", true, "x", equal},
+ {"x >=", true, "x", greaterEq},
+ {"x >", true, "x", greaterThan},
+ {"in >", true, "in", greaterThan},
+ {"in>", true, "in", greaterThan},
+ // Valid but (currently) unsupported ops.
+ {"x!=", false, "", 0},
+ {"x !=", false, "", 0},
+ {" x != ", false, "", 0},
+ {"x IN", false, "", 0},
+ {"x in", false, "", 0},
+ // Invalid ops.
+ {"x EQ", false, "", 0},
+ {"x lt", false, "", 0},
+ {"x <>", false, "", 0},
+ {"x >>", false, "", 0},
+ {"x ==", false, "", 0},
+ {"x =<", false, "", 0},
+ {"x =>", false, "", 0},
+ {"x !", false, "", 0},
+ {"x ", false, "", 0},
+ {"x", false, "", 0},
+ }
+ for _, tc := range testCases {
+ q := NewQuery("foo").Filter(tc.filterStr, 42)
+ if ok := q.err == nil; ok != tc.wantOK {
+ t.Errorf("%q: ok=%t, want %t", tc.filterStr, ok, tc.wantOK)
+ continue
+ }
+ if !tc.wantOK {
+ continue
+ }
+ if len(q.filter) != 1 {
+ t.Errorf("%q: len=%d, want %d", tc.filterStr, len(q.filter), 1)
+ continue
+ }
+ got, want := q.filter[0], filter{tc.wantFieldName, tc.wantOp, 42}
+ if got != want {
+ t.Errorf("%q: got %v, want %v", tc.filterStr, got, want)
+ continue
+ }
+ }
+}
+
+func TestQueryToProto(t *testing.T) {
+ // The context is required to make Keys for the test cases.
+ var got *pb.Query
+ NoErr := errors.New("No error")
+ c := aetesting.FakeSingleContext(t, "datastore_v3", "RunQuery", func(in *pb.Query, out *pb.QueryResult) error {
+ got = in
+ return NoErr // return a non-nil error so Run doesn't keep going.
+ })
+ c = internal.WithAppIDOverride(c, "dev~fake-app")
+
+ testCases := []struct {
+ desc string
+ query *Query
+ want *pb.Query
+ err string
+ }{
+ {
+ desc: "empty",
+ query: NewQuery(""),
+ want: &pb.Query{},
+ },
+ {
+ desc: "standard query",
+ query: NewQuery("kind").Order("-I").Filter("I >", 17).Filter("U =", "Dave").Limit(7).Offset(42),
+ want: &pb.Query{
+ Kind: proto.String("kind"),
+ Filter: []*pb.Query_Filter{
+ {
+ Op: pb.Query_Filter_GREATER_THAN.Enum(),
+ Property: []*pb.Property{
+ {
+ Name: proto.String("I"),
+ Value: &pb.PropertyValue{Int64Value: proto.Int64(17)},
+ Multiple: proto.Bool(false),
+ },
+ },
+ },
+ {
+ Op: pb.Query_Filter_EQUAL.Enum(),
+ Property: []*pb.Property{
+ {
+ Name: proto.String("U"),
+ Value: &pb.PropertyValue{StringValue: proto.String("Dave")},
+ Multiple: proto.Bool(false),
+ },
+ },
+ },
+ },
+ Order: []*pb.Query_Order{
+ {
+ Property: proto.String("I"),
+ Direction: pb.Query_Order_DESCENDING.Enum(),
+ },
+ },
+ Limit: proto.Int32(7),
+ Offset: proto.Int32(42),
+ },
+ },
+ {
+ desc: "ancestor",
+ query: NewQuery("").Ancestor(NewKey(c, "kind", "Mummy", 0, nil)),
+ want: &pb.Query{
+ Ancestor: &pb.Reference{
+ App: proto.String("dev~fake-app"),
+ Path: &pb.Path{
+ Element: []*pb.Path_Element{{Type: proto.String("kind"), Name: proto.String("Mummy")}},
+ },
+ },
+ },
+ },
+ {
+ desc: "projection",
+ query: NewQuery("").Project("A", "B"),
+ want: &pb.Query{
+ PropertyName: []string{"A", "B"},
+ },
+ },
+ {
+ desc: "projection with distinct",
+ query: NewQuery("").Project("A", "B").Distinct(),
+ want: &pb.Query{
+ PropertyName: []string{"A", "B"},
+ GroupByPropertyName: []string{"A", "B"},
+ },
+ },
+ {
+ desc: "keys only",
+ query: NewQuery("").KeysOnly(),
+ want: &pb.Query{
+ KeysOnly: proto.Bool(true),
+ RequirePerfectPlan: proto.Bool(true),
+ },
+ },
+ {
+ desc: "empty filter",
+ query: NewQuery("kind").Filter("=", 17),
+ err: "empty query filter field nam",
+ },
+ {
+ desc: "bad filter type",
+ query: NewQuery("kind").Filter("M =", map[string]bool{}),
+ err: "bad query filter value type",
+ },
+ {
+ desc: "bad filter operator",
+ query: NewQuery("kind").Filter("I <<=", 17),
+ err: `invalid operator "<<=" in filter "I <<="`,
+ },
+ {
+ desc: "empty order",
+ query: NewQuery("kind").Order(""),
+ err: "empty order",
+ },
+ {
+ desc: "bad order direction",
+ query: NewQuery("kind").Order("+I"),
+ err: `invalid order: "+I`,
+ },
+ }
+
+ for _, tt := range testCases {
+ got = nil
+ if _, err := tt.query.Run(c).Next(nil); err != NoErr {
+ if tt.err == "" || !strings.Contains(err.Error(), tt.err) {
+ t.Errorf("%s: error %v, want %q", tt.desc, err, tt.err)
+ }
+ continue
+ }
+ if tt.err != "" {
+ t.Errorf("%s: no error, want %q", tt.desc, tt.err)
+ continue
+ }
+ // Fields that are common to all protos.
+ tt.want.App = proto.String("dev~fake-app")
+ tt.want.Compile = proto.Bool(true)
+ if !proto.Equal(got, tt.want) {
+ t.Errorf("%s:\ngot %v\nwant %v", tt.desc, got, tt.want)
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/datastore/save.go b/vendor/google.golang.org/appengine/datastore/save.go
new file mode 100644
index 0000000..b5f9592
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/save.go
@@ -0,0 +1,300 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+func toUnixMicro(t time.Time) int64 {
+ // We cannot use t.UnixNano() / 1e3 because we want to handle times more than
+ // 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot
+ // be represented in the numerator of a single int64 divide.
+ return t.Unix()*1e6 + int64(t.Nanosecond()/1e3)
+}
+
+func fromUnixMicro(t int64) time.Time {
+ return time.Unix(t/1e6, (t%1e6)*1e3).UTC()
+}
+
+var (
+ minTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3)
+ maxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3)
+)
+
+// valueToProto converts a named value to a newly allocated Property.
+// The returned error string is empty on success.
+func valueToProto(defaultAppID, name string, v reflect.Value, multiple bool) (p *pb.Property, errStr string) {
+ var (
+ pv pb.PropertyValue
+ unsupported bool
+ )
+ switch v.Kind() {
+ case reflect.Invalid:
+ // No-op.
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ pv.Int64Value = proto.Int64(v.Int())
+ case reflect.Bool:
+ pv.BooleanValue = proto.Bool(v.Bool())
+ case reflect.String:
+ pv.StringValue = proto.String(v.String())
+ case reflect.Float32, reflect.Float64:
+ pv.DoubleValue = proto.Float64(v.Float())
+ case reflect.Ptr:
+ if k, ok := v.Interface().(*Key); ok {
+ if k != nil {
+ pv.Referencevalue = keyToReferenceValue(defaultAppID, k)
+ }
+ } else {
+ unsupported = true
+ }
+ case reflect.Struct:
+ switch t := v.Interface().(type) {
+ case time.Time:
+ if t.Before(minTime) || t.After(maxTime) {
+ return nil, "time value out of range"
+ }
+ pv.Int64Value = proto.Int64(toUnixMicro(t))
+ case appengine.GeoPoint:
+ if !t.Valid() {
+ return nil, "invalid GeoPoint value"
+ }
+ // NOTE: Strangely, latitude maps to X, longitude to Y.
+ pv.Pointvalue = &pb.PropertyValue_PointValue{X: &t.Lat, Y: &t.Lng}
+ default:
+ unsupported = true
+ }
+ case reflect.Slice:
+ if b, ok := v.Interface().([]byte); ok {
+ pv.StringValue = proto.String(string(b))
+ } else {
+ // nvToProto should already catch slice values.
+ // If we get here, we have a slice of slice values.
+ unsupported = true
+ }
+ default:
+ unsupported = true
+ }
+ if unsupported {
+ return nil, "unsupported datastore value type: " + v.Type().String()
+ }
+ p = &pb.Property{
+ Name: proto.String(name),
+ Value: &pv,
+ Multiple: proto.Bool(multiple),
+ }
+ if v.IsValid() {
+ switch v.Interface().(type) {
+ case []byte:
+ p.Meaning = pb.Property_BLOB.Enum()
+ case ByteString:
+ p.Meaning = pb.Property_BYTESTRING.Enum()
+ case appengine.BlobKey:
+ p.Meaning = pb.Property_BLOBKEY.Enum()
+ case time.Time:
+ p.Meaning = pb.Property_GD_WHEN.Enum()
+ case appengine.GeoPoint:
+ p.Meaning = pb.Property_GEORSS_POINT.Enum()
+ }
+ }
+ return p, ""
+}
+
+// saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer.
+func saveEntity(defaultAppID string, key *Key, src interface{}) (*pb.EntityProto, error) {
+ var err error
+ var props []Property
+ if e, ok := src.(PropertyLoadSaver); ok {
+ props, err = e.Save()
+ } else {
+ props, err = SaveStruct(src)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return propertiesToProto(defaultAppID, key, props)
+}
+
+func saveStructProperty(props *[]Property, name string, noIndex, multiple bool, v reflect.Value) error {
+ p := Property{
+ Name: name,
+ NoIndex: noIndex,
+ Multiple: multiple,
+ }
+ switch x := v.Interface().(type) {
+ case *Key:
+ p.Value = x
+ case time.Time:
+ p.Value = x
+ case appengine.BlobKey:
+ p.Value = x
+ case appengine.GeoPoint:
+ p.Value = x
+ case ByteString:
+ p.Value = x
+ default:
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p.Value = v.Int()
+ case reflect.Bool:
+ p.Value = v.Bool()
+ case reflect.String:
+ p.Value = v.String()
+ case reflect.Float32, reflect.Float64:
+ p.Value = v.Float()
+ case reflect.Slice:
+ if v.Type().Elem().Kind() == reflect.Uint8 {
+ p.NoIndex = true
+ p.Value = v.Bytes()
+ }
+ case reflect.Struct:
+ if !v.CanAddr() {
+ return fmt.Errorf("datastore: unsupported struct field: value is unaddressable")
+ }
+ sub, err := newStructPLS(v.Addr().Interface())
+ if err != nil {
+ return fmt.Errorf("datastore: unsupported struct field: %v", err)
+ }
+ return sub.(structPLS).save(props, name, noIndex, multiple)
+ }
+ }
+ if p.Value == nil {
+ return fmt.Errorf("datastore: unsupported struct field type: %v", v.Type())
+ }
+ *props = append(*props, p)
+ return nil
+}
+
+func (s structPLS) Save() ([]Property, error) {
+ var props []Property
+ if err := s.save(&props, "", false, false); err != nil {
+ return nil, err
+ }
+ return props, nil
+}
+
+func (s structPLS) save(props *[]Property, prefix string, noIndex, multiple bool) error {
+ for i, t := range s.codec.byIndex {
+ if t.name == "-" {
+ continue
+ }
+ name := t.name
+ if prefix != "" {
+ name = prefix + name
+ }
+ v := s.v.Field(i)
+ if !v.IsValid() || !v.CanSet() {
+ continue
+ }
+ noIndex1 := noIndex || t.noIndex
+ // For slice fields that aren't []byte, save each element.
+ if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
+ for j := 0; j < v.Len(); j++ {
+ if err := saveStructProperty(props, name, noIndex1, true, v.Index(j)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ // Otherwise, save the field itself.
+ if err := saveStructProperty(props, name, noIndex1, multiple, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func propertiesToProto(defaultAppID string, key *Key, props []Property) (*pb.EntityProto, error) {
+ e := &pb.EntityProto{
+ Key: keyToProto(defaultAppID, key),
+ }
+ if key.parent == nil {
+ e.EntityGroup = &pb.Path{}
+ } else {
+ e.EntityGroup = keyToProto(defaultAppID, key.root()).Path
+ }
+ prevMultiple := make(map[string]bool)
+
+ for _, p := range props {
+ if pm, ok := prevMultiple[p.Name]; ok {
+ if !pm || !p.Multiple {
+ return nil, fmt.Errorf("datastore: multiple Properties with Name %q, but Multiple is false", p.Name)
+ }
+ } else {
+ prevMultiple[p.Name] = p.Multiple
+ }
+
+ x := &pb.Property{
+ Name: proto.String(p.Name),
+ Value: new(pb.PropertyValue),
+ Multiple: proto.Bool(p.Multiple),
+ }
+ switch v := p.Value.(type) {
+ case int64:
+ x.Value.Int64Value = proto.Int64(v)
+ case bool:
+ x.Value.BooleanValue = proto.Bool(v)
+ case string:
+ x.Value.StringValue = proto.String(v)
+ if p.NoIndex {
+ x.Meaning = pb.Property_TEXT.Enum()
+ }
+ case float64:
+ x.Value.DoubleValue = proto.Float64(v)
+ case *Key:
+ if v != nil {
+ x.Value.Referencevalue = keyToReferenceValue(defaultAppID, v)
+ }
+ case time.Time:
+ if v.Before(minTime) || v.After(maxTime) {
+ return nil, fmt.Errorf("datastore: time value out of range")
+ }
+ x.Value.Int64Value = proto.Int64(toUnixMicro(v))
+ x.Meaning = pb.Property_GD_WHEN.Enum()
+ case appengine.BlobKey:
+ x.Value.StringValue = proto.String(string(v))
+ x.Meaning = pb.Property_BLOBKEY.Enum()
+ case appengine.GeoPoint:
+ if !v.Valid() {
+ return nil, fmt.Errorf("datastore: invalid GeoPoint value")
+ }
+ // NOTE: Strangely, latitude maps to X, longitude to Y.
+ x.Value.Pointvalue = &pb.PropertyValue_PointValue{X: &v.Lat, Y: &v.Lng}
+ x.Meaning = pb.Property_GEORSS_POINT.Enum()
+ case []byte:
+ x.Value.StringValue = proto.String(string(v))
+ x.Meaning = pb.Property_BLOB.Enum()
+ if !p.NoIndex {
+ return nil, fmt.Errorf("datastore: cannot index a []byte valued Property with Name %q", p.Name)
+ }
+ case ByteString:
+ x.Value.StringValue = proto.String(string(v))
+ x.Meaning = pb.Property_BYTESTRING.Enum()
+ default:
+ if p.Value != nil {
+ return nil, fmt.Errorf("datastore: invalid Value type for a Property with Name %q", p.Name)
+ }
+ }
+
+ if p.NoIndex {
+ e.RawProperty = append(e.RawProperty, x)
+ } else {
+ e.Property = append(e.Property, x)
+ if len(e.Property) > maxIndexedProperties {
+ return nil, errors.New("datastore: too many indexed properties")
+ }
+ }
+ }
+ return e, nil
+}
diff --git a/vendor/google.golang.org/appengine/datastore/time_test.go b/vendor/google.golang.org/appengine/datastore/time_test.go
new file mode 100644
index 0000000..ba74b44
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/time_test.go
@@ -0,0 +1,65 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "testing"
+ "time"
+)
+
+func TestUnixMicro(t *testing.T) {
+ // Test that all these time.Time values survive a round trip to unix micros.
+ testCases := []time.Time{
+ {},
+ time.Date(2, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(23, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(234, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(1000, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(1600, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(1700, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(1800, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Unix(-1e6, -1000),
+ time.Unix(-1e6, 0),
+ time.Unix(-1e6, +1000),
+ time.Unix(-60, -1000),
+ time.Unix(-60, 0),
+ time.Unix(-60, +1000),
+ time.Unix(-1, -1000),
+ time.Unix(-1, 0),
+ time.Unix(-1, +1000),
+ time.Unix(0, -3000),
+ time.Unix(0, -2000),
+ time.Unix(0, -1000),
+ time.Unix(0, 0),
+ time.Unix(0, +1000),
+ time.Unix(0, +2000),
+ time.Unix(+60, -1000),
+ time.Unix(+60, 0),
+ time.Unix(+60, +1000),
+ time.Unix(+1e6, -1000),
+ time.Unix(+1e6, 0),
+ time.Unix(+1e6, +1000),
+ time.Date(1999, 12, 31, 23, 59, 59, 999000, time.UTC),
+ time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(2006, 1, 2, 15, 4, 5, 678000, time.UTC),
+ time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC),
+ time.Date(3456, 1, 1, 0, 0, 0, 0, time.UTC),
+ }
+ for _, tc := range testCases {
+ got := fromUnixMicro(toUnixMicro(tc))
+ if !got.Equal(tc) {
+ t.Errorf("got %q, want %q", got, tc)
+ }
+ }
+
+ // Test that a time.Time that isn't an integral number of microseconds
+ // is not perfectly reconstructed after a round trip.
+ t0 := time.Unix(0, 123)
+ t1 := fromUnixMicro(toUnixMicro(t0))
+ if t1.Nanosecond()%1000 != 0 || t0.Nanosecond()%1000 == 0 {
+ t.Errorf("quantization to µs: got %q with %d ns, started with %d ns", t1, t1.Nanosecond(), t0.Nanosecond())
+ }
+}
diff --git a/vendor/google.golang.org/appengine/datastore/transaction.go b/vendor/google.golang.org/appengine/datastore/transaction.go
new file mode 100644
index 0000000..a7f3f2b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/transaction.go
@@ -0,0 +1,87 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "errors"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+func init() {
+ internal.RegisterTransactionSetter(func(x *pb.Query, t *pb.Transaction) {
+ x.Transaction = t
+ })
+ internal.RegisterTransactionSetter(func(x *pb.GetRequest, t *pb.Transaction) {
+ x.Transaction = t
+ })
+ internal.RegisterTransactionSetter(func(x *pb.PutRequest, t *pb.Transaction) {
+ x.Transaction = t
+ })
+ internal.RegisterTransactionSetter(func(x *pb.DeleteRequest, t *pb.Transaction) {
+ x.Transaction = t
+ })
+}
+
+// ErrConcurrentTransaction is returned when a transaction is rolled back due
+// to a conflict with a concurrent transaction.
+var ErrConcurrentTransaction = errors.New("datastore: concurrent transaction")
+
+// RunInTransaction runs f in a transaction. It calls f with a transaction
+// context tc that f should use for all App Engine operations.
+//
+// If f returns nil, RunInTransaction attempts to commit the transaction,
+// returning nil if it succeeds. If the commit fails due to a conflicting
+// transaction, RunInTransaction retries f, each time with a new transaction
+// context. It gives up and returns ErrConcurrentTransaction after three
+// failed attempts. The number of attempts can be configured by specifying
+// TransactionOptions.Attempts.
+//
+// If f returns non-nil, then any datastore changes will not be applied and
+// RunInTransaction returns that same error. The function f is not retried.
+//
+// Note that when f returns, the transaction is not yet committed. Calling code
+// must be careful not to assume that any of f's changes have been committed
+// until RunInTransaction returns nil.
+//
+// Since f may be called multiple times, f should usually be idempotent.
+// datastore.Get is not idempotent when unmarshaling slice fields.
+//
+// Nested transactions are not supported; c may not be a transaction context.
+func RunInTransaction(c context.Context, f func(tc context.Context) error, opts *TransactionOptions) error {
+ xg := false
+ if opts != nil {
+ xg = opts.XG
+ }
+ attempts := 3
+ if opts != nil && opts.Attempts > 0 {
+ attempts = opts.Attempts
+ }
+ for i := 0; i < attempts; i++ {
+ if err := internal.RunTransactionOnce(c, f, xg); err != internal.ErrConcurrentTransaction {
+ return err
+ }
+ }
+ return ErrConcurrentTransaction
+}
+
+// TransactionOptions are the options for running a transaction.
+type TransactionOptions struct {
+ // XG is whether the transaction can cross multiple entity groups. In
+ // comparison, a single group transaction is one where all datastore keys
+ // used have the same root key. Note that cross group transactions do not
+ // have the same behavior as single group transactions. In particular, it
+ // is much more likely to see partially applied transactions in different
+ // entity groups, in global queries.
+ // It is valid to set XG to true even if the transaction is within a
+ // single entity group.
+ XG bool
+ // Attempts controls the number of retries to perform when commits fail
+ // due to a conflicting transaction. If omitted, it defaults to 3.
+ Attempts int
+}
diff --git a/vendor/google.golang.org/appengine/delay/delay.go b/vendor/google.golang.org/appengine/delay/delay.go
new file mode 100644
index 0000000..9e517ca
--- /dev/null
+++ b/vendor/google.golang.org/appengine/delay/delay.go
@@ -0,0 +1,278 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package delay provides a way to execute code outside the scope of a
+user request by using the taskqueue API.
+
+To declare a function that may be executed later, call Func
+in a top-level assignment context, passing it an arbitrary string key
+and a function whose first argument is of type context.Context.
+ var laterFunc = delay.Func("key", myFunc)
+It is also possible to use a function literal.
+ var laterFunc = delay.Func("key", func(c context.Context, x string) {
+ // ...
+ })
+
+To call a function, invoke its Call method.
+ laterFunc.Call(c, "something")
+A function may be called any number of times. If the function has any
+return arguments, and the last one is of type error, the function may
+return a non-nil error to signal that the function should be retried.
+
+The arguments to functions may be of any type that is encodable by the gob
+package. If an argument is of interface type, it is the client's responsibility
+to register with the gob package whatever concrete type may be passed for that
+argument; see http://golang.org/pkg/gob/#Register for details.
+
+Any errors during initialization or execution of a function will be
+logged to the application logs. Error logs that occur during initialization will
+be associated with the request that invoked the Call method.
+
+The state of a function invocation that has not yet successfully
+executed is preserved by combining the file name in which it is declared
+with the string key that was passed to the Func function. Updating an app
+with pending function invocations is safe as long as the relevant
+functions have the (filename, key) combination preserved.
+
+The delay package uses the Task Queue API to create tasks that call the
+reserved application path "/_ah/queue/go/delay".
+This path must not be marked as "login: required" in app.yaml;
+it must be marked as "login: admin" or have no access restriction.
+*/
+package delay // import "google.golang.org/appengine/delay"
+
+import (
+ "bytes"
+ "encoding/gob"
+ "errors"
+ "fmt"
+ "net/http"
+ "reflect"
+ "runtime"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/log"
+ "google.golang.org/appengine/taskqueue"
+)
+
+// Function represents a function that may have a delayed invocation.
+type Function struct {
+ fv reflect.Value // Kind() == reflect.Func
+ key string
+ err error // any error during initialization
+}
+
+const (
+ // The HTTP path for invocations.
+ path = "/_ah/queue/go/delay"
+ // Use the default queue.
+ queue = ""
+)
+
+var (
+ // registry of all delayed functions
+ funcs = make(map[string]*Function)
+
+ // precomputed types
+ contextType = reflect.TypeOf((*context.Context)(nil)).Elem()
+ errorType = reflect.TypeOf((*error)(nil)).Elem()
+
+ // errors
+ errFirstArg = errors.New("first argument must be context.Context")
+)
+
+// Func declares a new Function. The second argument must be a function with a
+// first argument of type context.Context.
+// This function must be called at program initialization time. That means it
+// must be called in a global variable declaration or from an init function.
+// This restriction is necessary because the instance that delays a function
+// call may not be the one that executes it. Only the code executed at program
+// initialization time is guaranteed to have been run by an instance before it
+// receives a request.
+func Func(key string, i interface{}) *Function {
+ f := &Function{fv: reflect.ValueOf(i)}
+
+ // Derive unique, somewhat stable key for this func.
+ _, file, _, _ := runtime.Caller(1)
+ f.key = file + ":" + key
+
+ t := f.fv.Type()
+ if t.Kind() != reflect.Func {
+ f.err = errors.New("not a function")
+ return f
+ }
+ if t.NumIn() == 0 || t.In(0) != contextType {
+ f.err = errFirstArg
+ return f
+ }
+
+ // Register the function's arguments with the gob package.
+ // This is required because they are marshaled inside a []interface{}.
+ // gob.Register only expects to be called during initialization;
+ // that's fine because this function expects the same.
+ for i := 0; i < t.NumIn(); i++ {
+ // Only concrete types may be registered. If the argument has
+ // interface type, the client is resposible for registering the
+ // concrete types it will hold.
+ if t.In(i).Kind() == reflect.Interface {
+ continue
+ }
+ gob.Register(reflect.Zero(t.In(i)).Interface())
+ }
+
+ if old := funcs[f.key]; old != nil {
+ old.err = fmt.Errorf("multiple functions registered for %s in %s", key, file)
+ }
+ funcs[f.key] = f
+ return f
+}
+
+type invocation struct {
+ Key string
+ Args []interface{}
+}
+
+// Call invokes a delayed function.
+// err := f.Call(c, ...)
+// is equivalent to
+// t, _ := f.Task(...)
+// _, err := taskqueue.Add(c, t, "")
+func (f *Function) Call(c context.Context, args ...interface{}) error {
+ t, err := f.Task(args...)
+ if err != nil {
+ return err
+ }
+ _, err = taskqueueAdder(c, t, queue)
+ return err
+}
+
+// Task creates a Task that will invoke the function.
+// Its parameters may be tweaked before adding it to a queue.
+// Users should not modify the Path or Payload fields of the returned Task.
+func (f *Function) Task(args ...interface{}) (*taskqueue.Task, error) {
+ if f.err != nil {
+ return nil, fmt.Errorf("delay: func is invalid: %v", f.err)
+ }
+
+ nArgs := len(args) + 1 // +1 for the context.Context
+ ft := f.fv.Type()
+ minArgs := ft.NumIn()
+ if ft.IsVariadic() {
+ minArgs--
+ }
+ if nArgs < minArgs {
+ return nil, fmt.Errorf("delay: too few arguments to func: %d < %d", nArgs, minArgs)
+ }
+ if !ft.IsVariadic() && nArgs > minArgs {
+ return nil, fmt.Errorf("delay: too many arguments to func: %d > %d", nArgs, minArgs)
+ }
+
+ // Check arg types.
+ for i := 1; i < nArgs; i++ {
+ at := reflect.TypeOf(args[i-1])
+ var dt reflect.Type
+ if i < minArgs {
+ // not a variadic arg
+ dt = ft.In(i)
+ } else {
+ // a variadic arg
+ dt = ft.In(minArgs).Elem()
+ }
+ // nil arguments won't have a type, so they need special handling.
+ if at == nil {
+ // nil interface
+ switch dt.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ continue // may be nil
+ }
+ return nil, fmt.Errorf("delay: argument %d has wrong type: %v is not nilable", i, dt)
+ }
+ switch at.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ av := reflect.ValueOf(args[i-1])
+ if av.IsNil() {
+ // nil value in interface; not supported by gob, so we replace it
+ // with a nil interface value
+ args[i-1] = nil
+ }
+ }
+ if !at.AssignableTo(dt) {
+ return nil, fmt.Errorf("delay: argument %d has wrong type: %v is not assignable to %v", i, at, dt)
+ }
+ }
+
+ inv := invocation{
+ Key: f.key,
+ Args: args,
+ }
+
+ buf := new(bytes.Buffer)
+ if err := gob.NewEncoder(buf).Encode(inv); err != nil {
+ return nil, fmt.Errorf("delay: gob encoding failed: %v", err)
+ }
+
+ return &taskqueue.Task{
+ Path: path,
+ Payload: buf.Bytes(),
+ }, nil
+}
+
+var taskqueueAdder = taskqueue.Add // for testing
+
+func init() {
+ http.HandleFunc(path, func(w http.ResponseWriter, req *http.Request) {
+ runFunc(appengine.NewContext(req), w, req)
+ })
+}
+
+func runFunc(c context.Context, w http.ResponseWriter, req *http.Request) {
+ defer req.Body.Close()
+
+ var inv invocation
+ if err := gob.NewDecoder(req.Body).Decode(&inv); err != nil {
+ log.Errorf(c, "delay: failed decoding task payload: %v", err)
+ log.Warningf(c, "delay: dropping task")
+ return
+ }
+
+ f := funcs[inv.Key]
+ if f == nil {
+ log.Errorf(c, "delay: no func with key %q found", inv.Key)
+ log.Warningf(c, "delay: dropping task")
+ return
+ }
+
+ ft := f.fv.Type()
+ in := []reflect.Value{reflect.ValueOf(c)}
+ for _, arg := range inv.Args {
+ var v reflect.Value
+ if arg != nil {
+ v = reflect.ValueOf(arg)
+ } else {
+ // Task was passed a nil argument, so we must construct
+ // the zero value for the argument here.
+ n := len(in) // we're constructing the nth argument
+ var at reflect.Type
+ if !ft.IsVariadic() || n < ft.NumIn()-1 {
+ at = ft.In(n)
+ } else {
+ at = ft.In(ft.NumIn() - 1).Elem()
+ }
+ v = reflect.Zero(at)
+ }
+ in = append(in, v)
+ }
+ out := f.fv.Call(in)
+
+ if n := ft.NumOut(); n > 0 && ft.Out(n-1) == errorType {
+ if errv := out[n-1]; !errv.IsNil() {
+ log.Errorf(c, "delay: func failed (will retry): %v", errv.Interface())
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/delay/delay_test.go b/vendor/google.golang.org/appengine/delay/delay_test.go
new file mode 100644
index 0000000..1c37e79
--- /dev/null
+++ b/vendor/google.golang.org/appengine/delay/delay_test.go
@@ -0,0 +1,375 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package delay
+
+import (
+ "bytes"
+ "encoding/gob"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "reflect"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ "google.golang.org/appengine/taskqueue"
+)
+
+type CustomType struct {
+ N int
+}
+
+type CustomInterface interface {
+ N() int
+}
+
+type CustomImpl int
+
+func (c CustomImpl) N() int { return int(c) }
+
+// CustomImpl needs to be registered with gob.
+func init() {
+ gob.Register(CustomImpl(0))
+}
+
+var (
+ invalidFunc = Func("invalid", func() {})
+
+ regFuncRuns = 0
+ regFuncMsg = ""
+ regFunc = Func("reg", func(c context.Context, arg string) {
+ regFuncRuns++
+ regFuncMsg = arg
+ })
+
+ custFuncTally = 0
+ custFunc = Func("cust", func(c context.Context, ct *CustomType, ci CustomInterface) {
+ a, b := 2, 3
+ if ct != nil {
+ a = ct.N
+ }
+ if ci != nil {
+ b = ci.N()
+ }
+ custFuncTally += a + b
+ })
+
+ anotherCustFunc = Func("cust2", func(c context.Context, n int, ct *CustomType, ci CustomInterface) {
+ })
+
+ varFuncMsg = ""
+ varFunc = Func("variadic", func(c context.Context, format string, args ...int) {
+ // convert []int to []interface{} for fmt.Sprintf.
+ as := make([]interface{}, len(args))
+ for i, a := range args {
+ as[i] = a
+ }
+ varFuncMsg = fmt.Sprintf(format, as...)
+ })
+
+ errFuncRuns = 0
+ errFuncErr = errors.New("error!")
+ errFunc = Func("err", func(c context.Context) error {
+ errFuncRuns++
+ if errFuncRuns == 1 {
+ return nil
+ }
+ return errFuncErr
+ })
+
+ dupeWhich = 0
+ dupe1Func = Func("dupe", func(c context.Context) {
+ if dupeWhich == 0 {
+ dupeWhich = 1
+ }
+ })
+ dupe2Func = Func("dupe", func(c context.Context) {
+ if dupeWhich == 0 {
+ dupeWhich = 2
+ }
+ })
+)
+
+type fakeContext struct {
+ ctx context.Context
+ logging [][]interface{}
+}
+
+func newFakeContext() *fakeContext {
+ f := new(fakeContext)
+ f.ctx = internal.WithCallOverride(context.Background(), f.call)
+ f.ctx = internal.WithLogOverride(f.ctx, f.logf)
+ return f
+}
+
+func (f *fakeContext) call(ctx context.Context, service, method string, in, out proto.Message) error {
+ panic("should never be called")
+}
+
+var logLevels = map[int64]string{1: "INFO", 3: "ERROR"}
+
+func (f *fakeContext) logf(level int64, format string, args ...interface{}) {
+ f.logging = append(f.logging, append([]interface{}{logLevels[level], format}, args...))
+}
+
+func TestInvalidFunction(t *testing.T) {
+ c := newFakeContext()
+
+ if got, want := invalidFunc.Call(c.ctx), fmt.Errorf("delay: func is invalid: %s", errFirstArg); got.Error() != want.Error() {
+ t.Errorf("Incorrect error: got %q, want %q", got, want)
+ }
+}
+
+func TestVariadicFunctionArguments(t *testing.T) {
+ // Check the argument type validation for variadic functions.
+
+ c := newFakeContext()
+
+ calls := 0
+ taskqueueAdder = func(c context.Context, t *taskqueue.Task, _ string) (*taskqueue.Task, error) {
+ calls++
+ return t, nil
+ }
+
+ varFunc.Call(c.ctx, "hi")
+ varFunc.Call(c.ctx, "%d", 12)
+ varFunc.Call(c.ctx, "%d %d %d", 3, 1, 4)
+ if calls != 3 {
+ t.Errorf("Got %d calls to taskqueueAdder, want 3", calls)
+ }
+
+ if got, want := varFunc.Call(c.ctx, "%d %s", 12, "a string is bad"), errors.New("delay: argument 3 has wrong type: string is not assignable to int"); got.Error() != want.Error() {
+ t.Errorf("Incorrect error: got %q, want %q", got, want)
+ }
+}
+
+func TestBadArguments(t *testing.T) {
+ // Try running regFunc with different sets of inappropriate arguments.
+
+ c := newFakeContext()
+
+ tests := []struct {
+ args []interface{} // all except context
+ wantErr string
+ }{
+ {
+ args: nil,
+ wantErr: "delay: too few arguments to func: 1 < 2",
+ },
+ {
+ args: []interface{}{"lala", 53},
+ wantErr: "delay: too many arguments to func: 3 > 2",
+ },
+ {
+ args: []interface{}{53},
+ wantErr: "delay: argument 1 has wrong type: int is not assignable to string",
+ },
+ }
+ for i, tc := range tests {
+ got := regFunc.Call(c.ctx, tc.args...)
+ if got.Error() != tc.wantErr {
+ t.Errorf("Call %v: got %q, want %q", i, got, tc.wantErr)
+ }
+ }
+}
+
+func TestRunningFunction(t *testing.T) {
+ c := newFakeContext()
+
+ // Fake out the adding of a task.
+ var task *taskqueue.Task
+ taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
+ if queue != "" {
+ t.Errorf(`Got queue %q, expected ""`, queue)
+ }
+ task = tk
+ return tk, nil
+ }
+
+ regFuncRuns, regFuncMsg = 0, "" // reset state
+ const msg = "Why, hello!"
+ regFunc.Call(c.ctx, msg)
+
+ // Simulate the Task Queue service.
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c.ctx, rw, req)
+
+ if regFuncRuns != 1 {
+ t.Errorf("regFuncRuns: got %d, want 1", regFuncRuns)
+ }
+ if regFuncMsg != msg {
+ t.Errorf("regFuncMsg: got %q, want %q", regFuncMsg, msg)
+ }
+}
+
+func TestCustomType(t *testing.T) {
+ c := newFakeContext()
+
+ // Fake out the adding of a task.
+ var task *taskqueue.Task
+ taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
+ if queue != "" {
+ t.Errorf(`Got queue %q, expected ""`, queue)
+ }
+ task = tk
+ return tk, nil
+ }
+
+ custFuncTally = 0 // reset state
+ custFunc.Call(c.ctx, &CustomType{N: 11}, CustomImpl(13))
+
+ // Simulate the Task Queue service.
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c.ctx, rw, req)
+
+ if custFuncTally != 24 {
+ t.Errorf("custFuncTally = %d, want 24", custFuncTally)
+ }
+
+ // Try the same, but with nil values; one is a nil pointer (and thus a non-nil interface value),
+ // and the other is a nil interface value.
+ custFuncTally = 0 // reset state
+ custFunc.Call(c.ctx, (*CustomType)(nil), nil)
+
+ // Simulate the Task Queue service.
+ req, err = http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw = httptest.NewRecorder()
+ runFunc(c.ctx, rw, req)
+
+ if custFuncTally != 5 {
+ t.Errorf("custFuncTally = %d, want 5", custFuncTally)
+ }
+}
+
+func TestRunningVariadic(t *testing.T) {
+ c := newFakeContext()
+
+ // Fake out the adding of a task.
+ var task *taskqueue.Task
+ taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
+ if queue != "" {
+ t.Errorf(`Got queue %q, expected ""`, queue)
+ }
+ task = tk
+ return tk, nil
+ }
+
+ varFuncMsg = "" // reset state
+ varFunc.Call(c.ctx, "Amiga %d has %d KB RAM", 500, 512)
+
+ // Simulate the Task Queue service.
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c.ctx, rw, req)
+
+ const expected = "Amiga 500 has 512 KB RAM"
+ if varFuncMsg != expected {
+ t.Errorf("varFuncMsg = %q, want %q", varFuncMsg, expected)
+ }
+}
+
+func TestErrorFunction(t *testing.T) {
+ c := newFakeContext()
+
+ // Fake out the adding of a task.
+ var task *taskqueue.Task
+ taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
+ if queue != "" {
+ t.Errorf(`Got queue %q, expected ""`, queue)
+ }
+ task = tk
+ return tk, nil
+ }
+
+ errFunc.Call(c.ctx)
+
+ // Simulate the Task Queue service.
+ // The first call should succeed; the second call should fail.
+ {
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c.ctx, rw, req)
+ }
+ {
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c.ctx, rw, req)
+ if rw.Code != http.StatusInternalServerError {
+ t.Errorf("Got status code %d, want %d", rw.Code, http.StatusInternalServerError)
+ }
+
+ wantLogging := [][]interface{}{
+ {"ERROR", "delay: func failed (will retry): %v", errFuncErr},
+ }
+ if !reflect.DeepEqual(c.logging, wantLogging) {
+ t.Errorf("Incorrect logging: got %+v, want %+v", c.logging, wantLogging)
+ }
+ }
+}
+
+func TestDuplicateFunction(t *testing.T) {
+ c := newFakeContext()
+
+ // Fake out the adding of a task.
+ var task *taskqueue.Task
+ taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
+ if queue != "" {
+ t.Errorf(`Got queue %q, expected ""`, queue)
+ }
+ task = tk
+ return tk, nil
+ }
+
+ if err := dupe1Func.Call(c.ctx); err == nil {
+ t.Error("dupe1Func.Call did not return error")
+ }
+ if task != nil {
+ t.Error("dupe1Func.Call posted a task")
+ }
+ if err := dupe2Func.Call(c.ctx); err != nil {
+ t.Errorf("dupe2Func.Call error: %v", err)
+ }
+ if task == nil {
+ t.Fatalf("dupe2Func.Call did not post a task")
+ }
+
+ // Simulate the Task Queue service.
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c.ctx, rw, req)
+
+ if dupeWhich == 1 {
+ t.Error("dupe2Func.Call used old registered function")
+ } else if dupeWhich != 2 {
+ t.Errorf("dupeWhich = %d; want 2", dupeWhich)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/demos/guestbook/app.yaml b/vendor/google.golang.org/appengine/demos/guestbook/app.yaml
new file mode 100644
index 0000000..3342503
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/guestbook/app.yaml
@@ -0,0 +1,14 @@
+# Demo application for App Engine "flexible environment".
+runtime: go
+vm: true
+api_version: go1
+
+handlers:
+# Favicon. Without this, the browser hits this once per page view.
+- url: /favicon.ico
+ static_files: favicon.ico
+ upload: favicon.ico
+
+# Main app. All the real work is here.
+- url: /.*
+ script: _go_app
diff --git a/vendor/google.golang.org/appengine/demos/guestbook/favicon.ico b/vendor/google.golang.org/appengine/demos/guestbook/favicon.ico
new file mode 100644
index 0000000..1a71ea7
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/guestbook/favicon.ico
Binary files differ
diff --git a/vendor/google.golang.org/appengine/demos/guestbook/guestbook.go b/vendor/google.golang.org/appengine/demos/guestbook/guestbook.go
new file mode 100644
index 0000000..04a0432
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/guestbook/guestbook.go
@@ -0,0 +1,109 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// This example only works on App Engine "flexible environment".
+// +build !appengine
+
+package main
+
+import (
+ "html/template"
+ "net/http"
+ "time"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/datastore"
+ "google.golang.org/appengine/log"
+ "google.golang.org/appengine/user"
+)
+
+var initTime time.Time
+
+type Greeting struct {
+ Author string
+ Content string
+ Date time.Time
+}
+
+func main() {
+ http.HandleFunc("/", handleMainPage)
+ http.HandleFunc("/sign", handleSign)
+ appengine.Main()
+}
+
+// guestbookKey returns the key used for all guestbook entries.
+func guestbookKey(ctx context.Context) *datastore.Key {
+ // The string "default_guestbook" here could be varied to have multiple guestbooks.
+ return datastore.NewKey(ctx, "Guestbook", "default_guestbook", 0, nil)
+}
+
+var tpl = template.Must(template.ParseGlob("templates/*.html"))
+
+func handleMainPage(w http.ResponseWriter, r *http.Request) {
+ if r.Method != "GET" {
+ http.Error(w, "GET requests only", http.StatusMethodNotAllowed)
+ return
+ }
+ if r.URL.Path != "/" {
+ http.NotFound(w, r)
+ return
+ }
+
+ ctx := appengine.NewContext(r)
+ tic := time.Now()
+ q := datastore.NewQuery("Greeting").Ancestor(guestbookKey(ctx)).Order("-Date").Limit(10)
+ var gg []*Greeting
+ if _, err := q.GetAll(ctx, &gg); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ log.Errorf(ctx, "GetAll: %v", err)
+ return
+ }
+ log.Infof(ctx, "Datastore lookup took %s", time.Since(tic).String())
+ log.Infof(ctx, "Rendering %d greetings", len(gg))
+
+ var email, logout, login string
+ if u := user.Current(ctx); u != nil {
+ logout, _ = user.LogoutURL(ctx, "/")
+ email = u.Email
+ } else {
+ login, _ = user.LoginURL(ctx, "/")
+ }
+ data := struct {
+ Greetings []*Greeting
+ Login, Logout, Email string
+ }{
+ Greetings: gg,
+ Login: login,
+ Logout: logout,
+ Email: email,
+ }
+ w.Header().Set("Content-Type", "text/html; charset=utf-8")
+ if err := tpl.ExecuteTemplate(w, "guestbook.html", data); err != nil {
+ log.Errorf(ctx, "%v", err)
+ }
+}
+
+func handleSign(w http.ResponseWriter, r *http.Request) {
+ if r.Method != "POST" {
+ http.Error(w, "POST requests only", http.StatusMethodNotAllowed)
+ return
+ }
+ ctx := appengine.NewContext(r)
+ g := &Greeting{
+ Content: r.FormValue("content"),
+ Date: time.Now(),
+ }
+ if u := user.Current(ctx); u != nil {
+ g.Author = u.String()
+ }
+ key := datastore.NewIncompleteKey(ctx, "Greeting", guestbookKey(ctx))
+ if _, err := datastore.Put(ctx, key, g); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ // Redirect with 303 which causes the subsequent request to use GET.
+ http.Redirect(w, r, "/", http.StatusSeeOther)
+}
diff --git a/vendor/google.golang.org/appengine/demos/guestbook/index.yaml b/vendor/google.golang.org/appengine/demos/guestbook/index.yaml
new file mode 100644
index 0000000..315ffeb
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/guestbook/index.yaml
@@ -0,0 +1,7 @@
+indexes:
+
+- kind: Greeting
+ ancestor: yes
+ properties:
+ - name: Date
+ direction: desc
diff --git a/vendor/google.golang.org/appengine/demos/guestbook/templates/guestbook.html b/vendor/google.golang.org/appengine/demos/guestbook/templates/guestbook.html
new file mode 100644
index 0000000..322b7cf
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/guestbook/templates/guestbook.html
@@ -0,0 +1,26 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>Guestbook Demo</title>
+ </head>
+ <body>
+ <p>
+ {{with .Email}}You are currently logged in as {{.}}.{{end}}
+ {{with .Login}}<a href="{{.}}">Sign in</a>{{end}}
+ {{with .Logout}}<a href="{{.}}">Sign out</a>{{end}}
+ </p>
+
+ {{range .Greetings }}
+ <p>
+ {{with .Author}}<b>{{.}}</b>{{else}}An anonymous person{{end}}
+ on <em>{{.Date.Format "3:04pm, Mon 2 Jan"}}</em>
+ wrote <blockquote>{{.Content}}</blockquote>
+ </p>
+ {{end}}
+
+ <form action="/sign" method="post">
+ <div><textarea name="content" rows="3" cols="60"></textarea></div>
+ <div><input type="submit" value="Sign Guestbook"></div>
+ </form>
+ </body>
+</html>
diff --git a/vendor/google.golang.org/appengine/demos/helloworld/app.yaml b/vendor/google.golang.org/appengine/demos/helloworld/app.yaml
new file mode 100644
index 0000000..1509119
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/helloworld/app.yaml
@@ -0,0 +1,10 @@
+runtime: go
+api_version: go1
+vm: true
+
+handlers:
+- url: /favicon.ico
+ static_files: favicon.ico
+ upload: favicon.ico
+- url: /.*
+ script: _go_app
diff --git a/vendor/google.golang.org/appengine/demos/helloworld/favicon.ico b/vendor/google.golang.org/appengine/demos/helloworld/favicon.ico
new file mode 100644
index 0000000..f19c04d
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/helloworld/favicon.ico
Binary files differ
diff --git a/vendor/google.golang.org/appengine/demos/helloworld/helloworld.go b/vendor/google.golang.org/appengine/demos/helloworld/helloworld.go
new file mode 100644
index 0000000..fbe9f56
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/helloworld/helloworld.go
@@ -0,0 +1,50 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// This example only works on App Engine "flexible environment".
+// +build !appengine
+
+package main
+
+import (
+ "html/template"
+ "net/http"
+ "time"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/log"
+)
+
+var initTime = time.Now()
+
+func main() {
+ http.HandleFunc("/", handle)
+ appengine.Main()
+}
+
+func handle(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path != "/" {
+ http.NotFound(w, r)
+ return
+ }
+
+ ctx := appengine.NewContext(r)
+ log.Infof(ctx, "Serving the front page.")
+
+ tmpl.Execute(w, time.Since(initTime))
+}
+
+var tmpl = template.Must(template.New("front").Parse(`
+<html><body>
+
+<p>
+Hello, World! 세상아 안녕!
+</p>
+
+<p>
+This instance has been running for <em>{{.}}</em>.
+</p>
+
+</body></html>
+`))
diff --git a/vendor/google.golang.org/appengine/errors.go b/vendor/google.golang.org/appengine/errors.go
new file mode 100644
index 0000000..16d0772
--- /dev/null
+++ b/vendor/google.golang.org/appengine/errors.go
@@ -0,0 +1,46 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// This file provides error functions for common API failure modes.
+
+package appengine
+
+import (
+ "fmt"
+
+ "google.golang.org/appengine/internal"
+)
+
+// IsOverQuota reports whether err represents an API call failure
+// due to insufficient available quota.
+func IsOverQuota(err error) bool {
+ callErr, ok := err.(*internal.CallError)
+ return ok && callErr.Code == 4
+}
+
+// MultiError is returned by batch operations when there are errors with
+// particular elements. Errors will be in a one-to-one correspondence with
+// the input elements; successful elements will have a nil entry.
+type MultiError []error
+
+func (m MultiError) Error() string {
+ s, n := "", 0
+ for _, e := range m {
+ if e != nil {
+ if n == 0 {
+ s = e.Error()
+ }
+ n++
+ }
+ }
+ switch n {
+ case 0:
+ return "(0 errors)"
+ case 1:
+ return s
+ case 2:
+ return s + " (and 1 other error)"
+ }
+ return fmt.Sprintf("%s (and %d other errors)", s, n-1)
+}
diff --git a/vendor/google.golang.org/appengine/file/file.go b/vendor/google.golang.org/appengine/file/file.go
new file mode 100644
index 0000000..c3cd58b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/file/file.go
@@ -0,0 +1,28 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package file provides helper functions for using Google Cloud Storage.
+package file
+
+import (
+ "fmt"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ aipb "google.golang.org/appengine/internal/app_identity"
+)
+
+// DefaultBucketName returns the name of this application's
+// default Google Cloud Storage bucket.
+func DefaultBucketName(c context.Context) (string, error) {
+ req := &aipb.GetDefaultGcsBucketNameRequest{}
+ res := &aipb.GetDefaultGcsBucketNameResponse{}
+
+ err := internal.Call(c, "app_identity_service", "GetDefaultGcsBucketName", req, res)
+ if err != nil {
+ return "", fmt.Errorf("file: no default bucket name returned in RPC response: %v", res)
+ }
+ return res.GetDefaultGcsBucketName(), nil
+}
diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go
new file mode 100644
index 0000000..b8dcf8f
--- /dev/null
+++ b/vendor/google.golang.org/appengine/identity.go
@@ -0,0 +1,142 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import (
+ "time"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/app_identity"
+ modpb "google.golang.org/appengine/internal/modules"
+)
+
+// AppID returns the application ID for the current application.
+// The string will be a plain application ID (e.g. "appid"), with a
+// domain prefix for custom domain deployments (e.g. "example.com:appid").
+func AppID(c context.Context) string { return internal.AppID(c) }
+
+// DefaultVersionHostname returns the standard hostname of the default version
+// of the current application (e.g. "my-app.appspot.com"). This is suitable for
+// use in constructing URLs.
+func DefaultVersionHostname(c context.Context) string {
+ return internal.DefaultVersionHostname(c)
+}
+
+// ModuleName returns the module name of the current instance.
+func ModuleName(c context.Context) string {
+ return internal.ModuleName(c)
+}
+
+// ModuleHostname returns a hostname of a module instance.
+// If module is the empty string, it refers to the module of the current instance.
+// If version is empty, it refers to the version of the current instance if valid,
+// or the default version of the module of the current instance.
+// If instance is empty, ModuleHostname returns the load-balancing hostname.
+func ModuleHostname(c context.Context, module, version, instance string) (string, error) {
+ req := &modpb.GetHostnameRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ if version != "" {
+ req.Version = &version
+ }
+ if instance != "" {
+ req.Instance = &instance
+ }
+ res := &modpb.GetHostnameResponse{}
+ if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil {
+ return "", err
+ }
+ return *res.Hostname, nil
+}
+
+// VersionID returns the version ID for the current application.
+// It will be of the form "X.Y", where X is specified in app.yaml,
+// and Y is a number generated when each version of the app is uploaded.
+// It does not include a module name.
+func VersionID(c context.Context) string { return internal.VersionID(c) }
+
+// InstanceID returns a mostly-unique identifier for this instance.
+func InstanceID() string { return internal.InstanceID() }
+
+// Datacenter returns an identifier for the datacenter that the instance is running in.
+func Datacenter(c context.Context) string { return internal.Datacenter(c) }
+
+// ServerSoftware returns the App Engine release version.
+// In production, it looks like "Google App Engine/X.Y.Z".
+// In the development appserver, it looks like "Development/X.Y".
+func ServerSoftware() string { return internal.ServerSoftware() }
+
+// RequestID returns a string that uniquely identifies the request.
+func RequestID(c context.Context) string { return internal.RequestID(c) }
+
+// AccessToken generates an OAuth2 access token for the specified scopes on
+// behalf of service account of this application. This token will expire after
+// the returned time.
+func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) {
+ req := &pb.GetAccessTokenRequest{Scope: scopes}
+ res := &pb.GetAccessTokenResponse{}
+
+ err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res)
+ if err != nil {
+ return "", time.Time{}, err
+ }
+ return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil
+}
+
+// Certificate represents a public certificate for the app.
+type Certificate struct {
+ KeyName string
+ Data []byte // PEM-encoded X.509 certificate
+}
+
+// PublicCertificates retrieves the public certificates for the app.
+// They can be used to verify a signature returned by SignBytes.
+func PublicCertificates(c context.Context) ([]Certificate, error) {
+ req := &pb.GetPublicCertificateForAppRequest{}
+ res := &pb.GetPublicCertificateForAppResponse{}
+ if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil {
+ return nil, err
+ }
+ var cs []Certificate
+ for _, pc := range res.PublicCertificateList {
+ cs = append(cs, Certificate{
+ KeyName: pc.GetKeyName(),
+ Data: []byte(pc.GetX509CertificatePem()),
+ })
+ }
+ return cs, nil
+}
+
+// ServiceAccount returns a string representing the service account name, in
+// the form of an email address (typically app_id@appspot.gserviceaccount.com).
+func ServiceAccount(c context.Context) (string, error) {
+ req := &pb.GetServiceAccountNameRequest{}
+ res := &pb.GetServiceAccountNameResponse{}
+
+ err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res)
+ if err != nil {
+ return "", err
+ }
+ return res.GetServiceAccountName(), err
+}
+
+// SignBytes signs bytes using a private key unique to your application.
+func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) {
+ req := &pb.SignForAppRequest{BytesToSign: bytes}
+ res := &pb.SignForAppResponse{}
+
+ if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil {
+ return "", nil, err
+ }
+ return res.GetKeyName(), res.GetSignatureBytes(), nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name)
+ internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/image/image.go b/vendor/google.golang.org/appengine/image/image.go
new file mode 100644
index 0000000..027a41b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/image/image.go
@@ -0,0 +1,67 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package image provides image services.
+package image // import "google.golang.org/appengine/image"
+
+import (
+ "fmt"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/image"
+)
+
+type ServingURLOptions struct {
+ Secure bool // whether the URL should use HTTPS
+
+ // Size must be between zero and 1600.
+ // If Size is non-zero, a resized version of the image is served,
+ // and Size is the served image's longest dimension. The aspect ratio is preserved.
+ // If Crop is true the image is cropped from the center instead of being resized.
+ Size int
+ Crop bool
+}
+
+// ServingURL returns a URL that will serve an image from Blobstore.
+func ServingURL(c context.Context, key appengine.BlobKey, opts *ServingURLOptions) (*url.URL, error) {
+ req := &pb.ImagesGetUrlBaseRequest{
+ BlobKey: (*string)(&key),
+ }
+ if opts != nil && opts.Secure {
+ req.CreateSecureUrl = &opts.Secure
+ }
+ res := &pb.ImagesGetUrlBaseResponse{}
+ if err := internal.Call(c, "images", "GetUrlBase", req, res); err != nil {
+ return nil, err
+ }
+
+ // The URL may have suffixes added to dynamically resize or crop:
+ // - adding "=s32" will serve the image resized to 32 pixels, preserving the aspect ratio.
+ // - adding "=s32-c" is the same as "=s32" except it will be cropped.
+ u := *res.Url
+ if opts != nil && opts.Size > 0 {
+ u += fmt.Sprintf("=s%d", opts.Size)
+ if opts.Crop {
+ u += "-c"
+ }
+ }
+ return url.Parse(u)
+}
+
+// DeleteServingURL deletes the serving URL for an image.
+func DeleteServingURL(c context.Context, key appengine.BlobKey) error {
+ req := &pb.ImagesDeleteUrlBaseRequest{
+ BlobKey: (*string)(&key),
+ }
+ res := &pb.ImagesDeleteUrlBaseResponse{}
+ return internal.Call(c, "images", "DeleteUrlBase", req, res)
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("images", pb.ImagesServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/internal/aetesting/fake.go b/vendor/google.golang.org/appengine/internal/aetesting/fake.go
new file mode 100644
index 0000000..eb5b2c6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/aetesting/fake.go
@@ -0,0 +1,81 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package aetesting provides utilities for testing App Engine packages.
+// This is not for testing user applications.
+package aetesting
+
+import (
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+// FakeSingleContext returns a context whose Call invocations will be serviced
+// by f, which should be a function that has two arguments of the input and output
+// protocol buffer type, and one error return.
+func FakeSingleContext(t *testing.T, service, method string, f interface{}) context.Context {
+ fv := reflect.ValueOf(f)
+ if fv.Kind() != reflect.Func {
+ t.Fatal("not a function")
+ }
+ ft := fv.Type()
+ if ft.NumIn() != 2 || ft.NumOut() != 1 {
+ t.Fatalf("f has %d in and %d out, want 2 in and 1 out", ft.NumIn(), ft.NumOut())
+ }
+ for i := 0; i < 2; i++ {
+ at := ft.In(i)
+ if !at.Implements(protoMessageType) {
+ t.Fatalf("arg %d does not implement proto.Message", i)
+ }
+ }
+ if ft.Out(0) != errorType {
+ t.Fatalf("f's return is %v, want error", ft.Out(0))
+ }
+ s := &single{
+ t: t,
+ service: service,
+ method: method,
+ f: fv,
+ }
+ return internal.WithCallOverride(internal.ContextForTesting(&http.Request{}), s.call)
+}
+
+var (
+ protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
+ errorType = reflect.TypeOf((*error)(nil)).Elem()
+)
+
+type single struct {
+ t *testing.T
+ service, method string
+ f reflect.Value
+}
+
+func (s *single) call(ctx context.Context, service, method string, in, out proto.Message) error {
+ if service == "__go__" {
+ if method == "GetNamespace" {
+ return nil // always yield an empty namespace
+ }
+ return fmt.Errorf("Unknown API call /%s.%s", service, method)
+ }
+ if service != s.service || method != s.method {
+ s.t.Fatalf("Unexpected call to /%s.%s", service, method)
+ }
+ ins := []reflect.Value{
+ reflect.ValueOf(in),
+ reflect.ValueOf(out),
+ }
+ outs := s.f.Call(ins)
+ if outs[0].IsNil() {
+ return nil
+ }
+ return outs[0].Interface().(error)
+}
diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go
new file mode 100644
index 0000000..ec5aa59
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api.go
@@ -0,0 +1,646 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+
+ basepb "google.golang.org/appengine/internal/base"
+ logpb "google.golang.org/appengine/internal/log"
+ remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+const (
+ apiPath = "/rpc_http"
+)
+
+var (
+ // Incoming headers.
+ ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket")
+ dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo")
+ traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context")
+ curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
+ userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP")
+ remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
+
+ // Outgoing headers.
+ apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
+ apiEndpointHeaderValue = []string{"app-engine-apis"}
+ apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method")
+ apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"}
+ apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline")
+ apiContentType = http.CanonicalHeaderKey("Content-Type")
+ apiContentTypeValue = []string{"application/octet-stream"}
+ logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count")
+
+ apiHTTPClient = &http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: limitDial,
+ },
+ }
+)
+
+func apiURL() *url.URL {
+ host, port := "appengine.googleapis.internal", "10001"
+ if h := os.Getenv("API_HOST"); h != "" {
+ host = h
+ }
+ if p := os.Getenv("API_PORT"); p != "" {
+ port = p
+ }
+ return &url.URL{
+ Scheme: "http",
+ Host: host + ":" + port,
+ Path: apiPath,
+ }
+}
+
+func handleHTTP(w http.ResponseWriter, r *http.Request) {
+ c := &context{
+ req: r,
+ outHeader: w.Header(),
+ apiURL: apiURL(),
+ }
+ stopFlushing := make(chan int)
+
+ ctxs.Lock()
+ ctxs.m[r] = c
+ ctxs.Unlock()
+ defer func() {
+ ctxs.Lock()
+ delete(ctxs.m, r)
+ ctxs.Unlock()
+ }()
+
+ // Patch up RemoteAddr so it looks reasonable.
+ if addr := r.Header.Get(userIPHeader); addr != "" {
+ r.RemoteAddr = addr
+ } else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
+ r.RemoteAddr = addr
+ } else {
+ // Should not normally reach here, but pick a sensible default anyway.
+ r.RemoteAddr = "127.0.0.1"
+ }
+ // The address in the headers will most likely be of these forms:
+ // 123.123.123.123
+ // 2001:db8::1
+ // net/http.Request.RemoteAddr is specified to be in "IP:port" form.
+ if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
+ // Assume the remote address is only a host; add a default port.
+ r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
+ }
+
+ // Start goroutine responsible for flushing app logs.
+ // This is done after adding c to ctx.m (and stopped before removing it)
+ // because flushing logs requires making an API call.
+ go c.logFlusher(stopFlushing)
+
+ executeRequestSafely(c, r)
+ c.outHeader = nil // make sure header changes aren't respected any more
+
+ stopFlushing <- 1 // any logging beyond this point will be dropped
+
+ // Flush any pending logs asynchronously.
+ c.pendingLogs.Lock()
+ flushes := c.pendingLogs.flushes
+ if len(c.pendingLogs.lines) > 0 {
+ flushes++
+ }
+ c.pendingLogs.Unlock()
+ go c.flushLog(false)
+ w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
+
+ // Avoid nil Write call if c.Write is never called.
+ if c.outCode != 0 {
+ w.WriteHeader(c.outCode)
+ }
+ if c.outBody != nil {
+ w.Write(c.outBody)
+ }
+}
+
+func executeRequestSafely(c *context, r *http.Request) {
+ defer func() {
+ if x := recover(); x != nil {
+ logf(c, 4, "%s", renderPanic(x)) // 4 == critical
+ c.outCode = 500
+ }
+ }()
+
+ http.DefaultServeMux.ServeHTTP(c, r)
+}
+
+func renderPanic(x interface{}) string {
+ buf := make([]byte, 16<<10) // 16 KB should be plenty
+ buf = buf[:runtime.Stack(buf, false)]
+
+ // Remove the first few stack frames:
+ // this func
+ // the recover closure in the caller
+ // That will root the stack trace at the site of the panic.
+ const (
+ skipStart = "internal.renderPanic"
+ skipFrames = 2
+ )
+ start := bytes.Index(buf, []byte(skipStart))
+ p := start
+ for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {
+ p = bytes.IndexByte(buf[p+1:], '\n') + p + 1
+ if p < 0 {
+ break
+ }
+ }
+ if p >= 0 {
+ // buf[start:p+1] is the block to remove.
+ // Copy buf[p+1:] over buf[start:] and shrink buf.
+ copy(buf[start:], buf[p+1:])
+ buf = buf[:len(buf)-(p+1-start)]
+ }
+
+ // Add panic heading.
+ head := fmt.Sprintf("panic: %v\n\n", x)
+ if len(head) > len(buf) {
+ // Extremely unlikely to happen.
+ return head
+ }
+ copy(buf[len(head):], buf)
+ copy(buf, head)
+
+ return string(buf)
+}
+
+var ctxs = struct {
+ sync.Mutex
+ m map[*http.Request]*context
+ bg *context // background context, lazily initialized
+ // dec is used by tests to decorate the netcontext.Context returned
+ // for a given request. This allows tests to add overrides (such as
+ // WithAppIDOverride) to the context. The map is nil outside tests.
+ dec map[*http.Request]func(netcontext.Context) netcontext.Context
+}{
+ m: make(map[*http.Request]*context),
+}
+
+// context represents the context of an in-flight HTTP request.
+// It implements the appengine.Context and http.ResponseWriter interfaces.
+type context struct {
+ req *http.Request
+
+ outCode int
+ outHeader http.Header
+ outBody []byte
+
+ pendingLogs struct {
+ sync.Mutex
+ lines []*logpb.UserAppLogLine
+ flushes int
+ }
+
+ apiURL *url.URL
+}
+
+var contextKey = "holds a *context"
+
+func fromContext(ctx netcontext.Context) *context {
+ c, _ := ctx.Value(&contextKey).(*context)
+ return c
+}
+
+func withContext(parent netcontext.Context, c *context) netcontext.Context {
+ ctx := netcontext.WithValue(parent, &contextKey, c)
+ if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
+ ctx = withNamespace(ctx, ns)
+ }
+ return ctx
+}
+
+func toContext(c *context) netcontext.Context {
+ return withContext(netcontext.Background(), c)
+}
+
+func IncomingHeaders(ctx netcontext.Context) http.Header {
+ if c := fromContext(ctx); c != nil {
+ return c.req.Header
+ }
+ return nil
+}
+
+func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
+ ctxs.Lock()
+ c := ctxs.m[req]
+ d := ctxs.dec[req]
+ ctxs.Unlock()
+
+ if d != nil {
+ parent = d(parent)
+ }
+
+ if c == nil {
+ // Someone passed in an http.Request that is not in-flight.
+ // We panic here rather than panicking at a later point
+ // so that stack traces will be more sensible.
+ log.Panic("appengine: NewContext passed an unknown http.Request")
+ }
+ return withContext(parent, c)
+}
+
+func BackgroundContext() netcontext.Context {
+ ctxs.Lock()
+ defer ctxs.Unlock()
+
+ if ctxs.bg != nil {
+ return toContext(ctxs.bg)
+ }
+
+ // Compute background security ticket.
+ appID := partitionlessAppID()
+ escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
+ majVersion := VersionID(nil)
+ if i := strings.Index(majVersion, "."); i > 0 {
+ majVersion = majVersion[:i]
+ }
+ ticket := fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
+
+ ctxs.bg = &context{
+ req: &http.Request{
+ Header: http.Header{
+ ticketHeader: []string{ticket},
+ },
+ },
+ apiURL: apiURL(),
+ }
+
+ // TODO(dsymonds): Wire up the shutdown handler to do a final flush.
+ go ctxs.bg.logFlusher(make(chan int))
+
+ return toContext(ctxs.bg)
+}
+
+// RegisterTestRequest registers the HTTP request req for testing, such that
+// any API calls are sent to the provided URL. It returns a closure to delete
+// the registration.
+// It should only be used by aetest package.
+func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) func() {
+ c := &context{
+ req: req,
+ apiURL: apiURL,
+ }
+ ctxs.Lock()
+ defer ctxs.Unlock()
+ if _, ok := ctxs.m[req]; ok {
+ log.Panic("req already associated with context")
+ }
+ if _, ok := ctxs.dec[req]; ok {
+ log.Panic("req already associated with context")
+ }
+ if ctxs.dec == nil {
+ ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context)
+ }
+ ctxs.m[req] = c
+ ctxs.dec[req] = decorate
+
+ return func() {
+ ctxs.Lock()
+ delete(ctxs.m, req)
+ delete(ctxs.dec, req)
+ ctxs.Unlock()
+ }
+}
+
+var errTimeout = &CallError{
+ Detail: "Deadline exceeded",
+ Code: int32(remotepb.RpcError_CANCELLED),
+ Timeout: true,
+}
+
+func (c *context) Header() http.Header { return c.outHeader }
+
+// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
+// codes do not permit a response body (nor response entity headers such as
+// Content-Length, Content-Type, etc).
+func bodyAllowedForStatus(status int) bool {
+ switch {
+ case status >= 100 && status <= 199:
+ return false
+ case status == 204:
+ return false
+ case status == 304:
+ return false
+ }
+ return true
+}
+
+func (c *context) Write(b []byte) (int, error) {
+ if c.outCode == 0 {
+ c.WriteHeader(http.StatusOK)
+ }
+ if len(b) > 0 && !bodyAllowedForStatus(c.outCode) {
+ return 0, http.ErrBodyNotAllowed
+ }
+ c.outBody = append(c.outBody, b...)
+ return len(b), nil
+}
+
+func (c *context) WriteHeader(code int) {
+ if c.outCode != 0 {
+ logf(c, 3, "WriteHeader called multiple times on request.") // error level
+ return
+ }
+ c.outCode = code
+}
+
+func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {
+ hreq := &http.Request{
+ Method: "POST",
+ URL: c.apiURL,
+ Header: http.Header{
+ apiEndpointHeader: apiEndpointHeaderValue,
+ apiMethodHeader: apiMethodHeaderValue,
+ apiContentType: apiContentTypeValue,
+ apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},
+ },
+ Body: ioutil.NopCloser(bytes.NewReader(body)),
+ ContentLength: int64(len(body)),
+ Host: c.apiURL.Host,
+ }
+ if info := c.req.Header.Get(dapperHeader); info != "" {
+ hreq.Header.Set(dapperHeader, info)
+ }
+ if info := c.req.Header.Get(traceHeader); info != "" {
+ hreq.Header.Set(traceHeader, info)
+ }
+
+ tr := apiHTTPClient.Transport.(*http.Transport)
+
+ var timedOut int32 // atomic; set to 1 if timed out
+ t := time.AfterFunc(timeout, func() {
+ atomic.StoreInt32(&timedOut, 1)
+ tr.CancelRequest(hreq)
+ })
+ defer t.Stop()
+ defer func() {
+ // Check if timeout was exceeded.
+ if atomic.LoadInt32(&timedOut) != 0 {
+ err = errTimeout
+ }
+ }()
+
+ hresp, err := apiHTTPClient.Do(hreq)
+ if err != nil {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge HTTP failed: %v", err),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ defer hresp.Body.Close()
+ hrespBody, err := ioutil.ReadAll(hresp.Body)
+ if hresp.StatusCode != 200 {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ if err != nil {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge response bad: %v", err),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ return hrespBody, nil
+}
+
+func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
+ if ns := NamespaceFromContext(ctx); ns != "" {
+ if fn, ok := NamespaceMods[service]; ok {
+ fn(in, ns)
+ }
+ }
+
+ if f, ctx, ok := callOverrideFromContext(ctx); ok {
+ return f(ctx, service, method, in, out)
+ }
+
+ // Handle already-done contexts quickly.
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ c := fromContext(ctx)
+ if c == nil {
+ // Give a good error message rather than a panic lower down.
+ return errors.New("not an App Engine context")
+ }
+
+ // Apply transaction modifications if we're in a transaction.
+ if t := transactionFromContext(ctx); t != nil {
+ if t.finished {
+ return errors.New("transaction context has expired")
+ }
+ applyTransaction(in, &t.transaction)
+ }
+
+ // Default RPC timeout is 60s.
+ timeout := 60 * time.Second
+ if deadline, ok := ctx.Deadline(); ok {
+ timeout = deadline.Sub(time.Now())
+ }
+
+ data, err := proto.Marshal(in)
+ if err != nil {
+ return err
+ }
+
+ ticket := c.req.Header.Get(ticketHeader)
+ req := &remotepb.Request{
+ ServiceName: &service,
+ Method: &method,
+ Request: data,
+ RequestId: &ticket,
+ }
+ hreqBody, err := proto.Marshal(req)
+ if err != nil {
+ return err
+ }
+
+ hrespBody, err := c.post(hreqBody, timeout)
+ if err != nil {
+ return err
+ }
+
+ res := &remotepb.Response{}
+ if err := proto.Unmarshal(hrespBody, res); err != nil {
+ return err
+ }
+ if res.RpcError != nil {
+ ce := &CallError{
+ Detail: res.RpcError.GetDetail(),
+ Code: *res.RpcError.Code,
+ }
+ switch remotepb.RpcError_ErrorCode(ce.Code) {
+ case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
+ ce.Timeout = true
+ }
+ return ce
+ }
+ if res.ApplicationError != nil {
+ return &APIError{
+ Service: *req.ServiceName,
+ Detail: res.ApplicationError.GetDetail(),
+ Code: *res.ApplicationError.Code,
+ }
+ }
+ if res.Exception != nil || res.JavaException != nil {
+ // This shouldn't happen, but let's be defensive.
+ return &CallError{
+ Detail: "service bridge returned exception",
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ return proto.Unmarshal(res.Response, out)
+}
+
+func (c *context) Request() *http.Request {
+ return c.req
+}
+
+func (c *context) addLogLine(ll *logpb.UserAppLogLine) {
+ // Truncate long log lines.
+ // TODO(dsymonds): Check if this is still necessary.
+ const lim = 8 << 10
+ if len(*ll.Message) > lim {
+ suffix := fmt.Sprintf("...(length %d)", len(*ll.Message))
+ ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix)
+ }
+
+ c.pendingLogs.Lock()
+ c.pendingLogs.lines = append(c.pendingLogs.lines, ll)
+ c.pendingLogs.Unlock()
+}
+
+var logLevelName = map[int64]string{
+ 0: "DEBUG",
+ 1: "INFO",
+ 2: "WARNING",
+ 3: "ERROR",
+ 4: "CRITICAL",
+}
+
+func logf(c *context, level int64, format string, args ...interface{}) {
+ s := fmt.Sprintf(format, args...)
+ s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
+ c.addLogLine(&logpb.UserAppLogLine{
+ TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
+ Level: &level,
+ Message: &s,
+ })
+ log.Print(logLevelName[level] + ": " + s)
+}
+
+// flushLog attempts to flush any pending logs to the appserver.
+// It should not be called concurrently.
+func (c *context) flushLog(force bool) (flushed bool) {
+ c.pendingLogs.Lock()
+ // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
+ n, rem := 0, 30<<20
+ for ; n < len(c.pendingLogs.lines); n++ {
+ ll := c.pendingLogs.lines[n]
+ // Each log line will require about 3 bytes of overhead.
+ nb := proto.Size(ll) + 3
+ if nb > rem {
+ break
+ }
+ rem -= nb
+ }
+ lines := c.pendingLogs.lines[:n]
+ c.pendingLogs.lines = c.pendingLogs.lines[n:]
+ c.pendingLogs.Unlock()
+
+ if len(lines) == 0 && !force {
+ // Nothing to flush.
+ return false
+ }
+
+ rescueLogs := false
+ defer func() {
+ if rescueLogs {
+ c.pendingLogs.Lock()
+ c.pendingLogs.lines = append(lines, c.pendingLogs.lines...)
+ c.pendingLogs.Unlock()
+ }
+ }()
+
+ buf, err := proto.Marshal(&logpb.UserAppLogGroup{
+ LogLine: lines,
+ })
+ if err != nil {
+ log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err)
+ rescueLogs = true
+ return false
+ }
+
+ req := &logpb.FlushRequest{
+ Logs: buf,
+ }
+ res := &basepb.VoidProto{}
+ c.pendingLogs.Lock()
+ c.pendingLogs.flushes++
+ c.pendingLogs.Unlock()
+ if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil {
+ log.Printf("internal.flushLog: Flush RPC: %v", err)
+ rescueLogs = true
+ return false
+ }
+ return true
+}
+
+const (
+ // Log flushing parameters.
+ flushInterval = 1 * time.Second
+ forceFlushInterval = 60 * time.Second
+)
+
+func (c *context) logFlusher(stop <-chan int) {
+ lastFlush := time.Now()
+ tick := time.NewTicker(flushInterval)
+ for {
+ select {
+ case <-stop:
+ // Request finished.
+ tick.Stop()
+ return
+ case <-tick.C:
+ force := time.Now().Sub(lastFlush) > forceFlushInterval
+ if c.flushLog(force) {
+ lastFlush = time.Now()
+ }
+ }
+ }
+}
+
+func ContextForTesting(req *http.Request) netcontext.Context {
+ return toContext(&context{req: req})
+}
diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go
new file mode 100644
index 0000000..597f66e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_classic.go
@@ -0,0 +1,159 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "time"
+
+ "appengine"
+ "appengine_internal"
+ basepb "appengine_internal/base"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+)
+
+var contextKey = "holds an appengine.Context"
+
+func fromContext(ctx netcontext.Context) appengine.Context {
+ c, _ := ctx.Value(&contextKey).(appengine.Context)
+ return c
+}
+
+// This is only for classic App Engine adapters.
+func ClassicContextFromContext(ctx netcontext.Context) appengine.Context {
+ return fromContext(ctx)
+}
+
+func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context {
+ ctx := netcontext.WithValue(parent, &contextKey, c)
+
+ s := &basepb.StringProto{}
+ c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil)
+ if ns := s.GetValue(); ns != "" {
+ ctx = NamespacedContext(ctx, ns)
+ }
+
+ return ctx
+}
+
+func IncomingHeaders(ctx netcontext.Context) http.Header {
+ if c := fromContext(ctx); c != nil {
+ if req, ok := c.Request().(*http.Request); ok {
+ return req.Header
+ }
+ }
+ return nil
+}
+
+func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
+ c := appengine.NewContext(req)
+ return withContext(parent, c)
+}
+
+type testingContext struct {
+ appengine.Context
+
+ req *http.Request
+}
+
+func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" }
+func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error {
+ if service == "__go__" && method == "GetNamespace" {
+ return nil
+ }
+ return fmt.Errorf("testingContext: unsupported Call")
+}
+func (t *testingContext) Request() interface{} { return t.req }
+
+func ContextForTesting(req *http.Request) netcontext.Context {
+ return withContext(netcontext.Background(), &testingContext{req: req})
+}
+
+func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
+ if ns := NamespaceFromContext(ctx); ns != "" {
+ if fn, ok := NamespaceMods[service]; ok {
+ fn(in, ns)
+ }
+ }
+
+ if f, ctx, ok := callOverrideFromContext(ctx); ok {
+ return f(ctx, service, method, in, out)
+ }
+
+ // Handle already-done contexts quickly.
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ c := fromContext(ctx)
+ if c == nil {
+ // Give a good error message rather than a panic lower down.
+ return errors.New("not an App Engine context")
+ }
+
+ // Apply transaction modifications if we're in a transaction.
+ if t := transactionFromContext(ctx); t != nil {
+ if t.finished {
+ return errors.New("transaction context has expired")
+ }
+ applyTransaction(in, &t.transaction)
+ }
+
+ var opts *appengine_internal.CallOptions
+ if d, ok := ctx.Deadline(); ok {
+ opts = &appengine_internal.CallOptions{
+ Timeout: d.Sub(time.Now()),
+ }
+ }
+
+ err := c.Call(service, method, in, out, opts)
+ switch v := err.(type) {
+ case *appengine_internal.APIError:
+ return &APIError{
+ Service: v.Service,
+ Detail: v.Detail,
+ Code: v.Code,
+ }
+ case *appengine_internal.CallError:
+ return &CallError{
+ Detail: v.Detail,
+ Code: v.Code,
+ Timeout: v.Timeout,
+ }
+ }
+ return err
+}
+
+func handleHTTP(w http.ResponseWriter, r *http.Request) {
+ panic("handleHTTP called; this should be impossible")
+}
+
+func logf(c appengine.Context, level int64, format string, args ...interface{}) {
+ var fn func(format string, args ...interface{})
+ switch level {
+ case 0:
+ fn = c.Debugf
+ case 1:
+ fn = c.Infof
+ case 2:
+ fn = c.Warningf
+ case 3:
+ fn = c.Errorf
+ case 4:
+ fn = c.Criticalf
+ default:
+ // This shouldn't happen.
+ fn = c.Criticalf
+ }
+ fn(format, args...)
+}
diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go
new file mode 100644
index 0000000..2db33a7
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_common.go
@@ -0,0 +1,86 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+)
+
+type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error
+
+var callOverrideKey = "holds []CallOverrideFunc"
+
+func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context {
+ // We avoid appending to any existing call override
+ // so we don't risk overwriting a popped stack below.
+ var cofs []CallOverrideFunc
+ if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok {
+ cofs = append(cofs, uf...)
+ }
+ cofs = append(cofs, f)
+ return netcontext.WithValue(ctx, &callOverrideKey, cofs)
+}
+
+func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) {
+ cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc)
+ if len(cofs) == 0 {
+ return nil, nil, false
+ }
+ // We found a list of overrides; grab the last, and reconstitute a
+ // context that will hide it.
+ f := cofs[len(cofs)-1]
+ ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1])
+ return f, ctx, true
+}
+
+type logOverrideFunc func(level int64, format string, args ...interface{})
+
+var logOverrideKey = "holds a logOverrideFunc"
+
+func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context {
+ return netcontext.WithValue(ctx, &logOverrideKey, f)
+}
+
+var appIDOverrideKey = "holds a string, being the full app ID"
+
+func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context {
+ return netcontext.WithValue(ctx, &appIDOverrideKey, appID)
+}
+
+var namespaceKey = "holds the namespace string"
+
+func withNamespace(ctx netcontext.Context, ns string) netcontext.Context {
+ return netcontext.WithValue(ctx, &namespaceKey, ns)
+}
+
+func NamespaceFromContext(ctx netcontext.Context) string {
+ // If there's no namespace, return the empty string.
+ ns, _ := ctx.Value(&namespaceKey).(string)
+ return ns
+}
+
+// FullyQualifiedAppID returns the fully-qualified application ID.
+// This may contain a partition prefix (e.g. "s~" for High Replication apps),
+// or a domain prefix (e.g. "example.com:").
+func FullyQualifiedAppID(ctx netcontext.Context) string {
+ if id, ok := ctx.Value(&appIDOverrideKey).(string); ok {
+ return id
+ }
+ return fullyQualifiedAppID(ctx)
+}
+
+func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) {
+ if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok {
+ f(level, format, args...)
+ return
+ }
+ logf(fromContext(ctx), level, format, args...)
+}
+
+// NamespacedContext wraps a Context to support namespaces.
+func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
+ return withNamespace(ctx, namespace)
+}
diff --git a/vendor/google.golang.org/appengine/internal/api_race_test.go b/vendor/google.golang.org/appengine/internal/api_race_test.go
new file mode 100644
index 0000000..6cfe906
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_race_test.go
@@ -0,0 +1,9 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build race
+
+package internal
+
+func init() { raceDetector = true }
diff --git a/vendor/google.golang.org/appengine/internal/api_test.go b/vendor/google.golang.org/appengine/internal/api_test.go
new file mode 100644
index 0000000..386d7f6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_test.go
@@ -0,0 +1,467 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "os/exec"
+ "strings"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+
+ basepb "google.golang.org/appengine/internal/base"
+ remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+const testTicketHeader = "X-Magic-Ticket-Header"
+
+func init() {
+ ticketHeader = testTicketHeader
+}
+
+type fakeAPIHandler struct {
+ hang chan int // used for RunSlowly RPC
+
+ LogFlushes int32 // atomic
+}
+
+func (f *fakeAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ writeResponse := func(res *remotepb.Response) {
+ hresBody, err := proto.Marshal(res)
+ if err != nil {
+ http.Error(w, fmt.Sprintf("Failed encoding API response: %v", err), 500)
+ return
+ }
+ w.Write(hresBody)
+ }
+
+ if r.URL.Path != "/rpc_http" {
+ http.NotFound(w, r)
+ return
+ }
+ hreqBody, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ http.Error(w, fmt.Sprintf("Bad body: %v", err), 500)
+ return
+ }
+ apiReq := &remotepb.Request{}
+ if err := proto.Unmarshal(hreqBody, apiReq); err != nil {
+ http.Error(w, fmt.Sprintf("Bad encoded API request: %v", err), 500)
+ return
+ }
+ if *apiReq.RequestId != "s3cr3t" {
+ writeResponse(&remotepb.Response{
+ RpcError: &remotepb.RpcError{
+ Code: proto.Int32(int32(remotepb.RpcError_SECURITY_VIOLATION)),
+ Detail: proto.String("bad security ticket"),
+ },
+ })
+ return
+ }
+ if got, want := r.Header.Get(dapperHeader), "trace-001"; got != want {
+ writeResponse(&remotepb.Response{
+ RpcError: &remotepb.RpcError{
+ Code: proto.Int32(int32(remotepb.RpcError_BAD_REQUEST)),
+ Detail: proto.String(fmt.Sprintf("trace info = %q, want %q", got, want)),
+ },
+ })
+ return
+ }
+
+ service, method := *apiReq.ServiceName, *apiReq.Method
+ var resOut proto.Message
+ if service == "actordb" && method == "LookupActor" {
+ req := &basepb.StringProto{}
+ res := &basepb.StringProto{}
+ if err := proto.Unmarshal(apiReq.Request, req); err != nil {
+ http.Error(w, fmt.Sprintf("Bad encoded request: %v", err), 500)
+ return
+ }
+ if *req.Value == "Doctor Who" {
+ res.Value = proto.String("David Tennant")
+ }
+ resOut = res
+ }
+ if service == "errors" {
+ switch method {
+ case "Non200":
+ http.Error(w, "I'm a little teapot.", 418)
+ return
+ case "ShortResponse":
+ w.Header().Set("Content-Length", "100")
+ w.Write([]byte("way too short"))
+ return
+ case "OverQuota":
+ writeResponse(&remotepb.Response{
+ RpcError: &remotepb.RpcError{
+ Code: proto.Int32(int32(remotepb.RpcError_OVER_QUOTA)),
+ Detail: proto.String("you are hogging the resources!"),
+ },
+ })
+ return
+ case "RunSlowly":
+ // TestAPICallRPCFailure creates f.hang, but does not strobe it
+ // until Call returns with remotepb.RpcError_CANCELLED.
+ // This is here to force a happens-before relationship between
+ // the httptest server handler and shutdown.
+ <-f.hang
+ resOut = &basepb.VoidProto{}
+ }
+ }
+ if service == "logservice" && method == "Flush" {
+ // Pretend log flushing is slow.
+ time.Sleep(50 * time.Millisecond)
+ atomic.AddInt32(&f.LogFlushes, 1)
+ resOut = &basepb.VoidProto{}
+ }
+
+ encOut, err := proto.Marshal(resOut)
+ if err != nil {
+ http.Error(w, fmt.Sprintf("Failed encoding response: %v", err), 500)
+ return
+ }
+ writeResponse(&remotepb.Response{
+ Response: encOut,
+ })
+}
+
+func setup() (f *fakeAPIHandler, c *context, cleanup func()) {
+ f = &fakeAPIHandler{}
+ srv := httptest.NewServer(f)
+ u, err := url.Parse(srv.URL + apiPath)
+ if err != nil {
+ panic(fmt.Sprintf("url.Parse(%q): %v", srv.URL+apiPath, err))
+ }
+ return f, &context{
+ req: &http.Request{
+ Header: http.Header{
+ ticketHeader: []string{"s3cr3t"},
+ dapperHeader: []string{"trace-001"},
+ },
+ },
+ apiURL: u,
+ }, srv.Close
+}
+
+func TestAPICall(t *testing.T) {
+ _, c, cleanup := setup()
+ defer cleanup()
+
+ req := &basepb.StringProto{
+ Value: proto.String("Doctor Who"),
+ }
+ res := &basepb.StringProto{}
+ err := Call(toContext(c), "actordb", "LookupActor", req, res)
+ if err != nil {
+ t.Fatalf("API call failed: %v", err)
+ }
+ if got, want := *res.Value, "David Tennant"; got != want {
+ t.Errorf("Response is %q, want %q", got, want)
+ }
+}
+
+func TestAPICallRPCFailure(t *testing.T) {
+ f, c, cleanup := setup()
+ defer cleanup()
+
+ testCases := []struct {
+ method string
+ code remotepb.RpcError_ErrorCode
+ }{
+ {"Non200", remotepb.RpcError_UNKNOWN},
+ {"ShortResponse", remotepb.RpcError_UNKNOWN},
+ {"OverQuota", remotepb.RpcError_OVER_QUOTA},
+ {"RunSlowly", remotepb.RpcError_CANCELLED},
+ }
+ f.hang = make(chan int) // only for RunSlowly
+ for _, tc := range testCases {
+ ctx, _ := netcontext.WithTimeout(toContext(c), 100*time.Millisecond)
+ err := Call(ctx, "errors", tc.method, &basepb.VoidProto{}, &basepb.VoidProto{})
+ ce, ok := err.(*CallError)
+ if !ok {
+ t.Errorf("%s: API call error is %T (%v), want *CallError", tc.method, err, err)
+ continue
+ }
+ if ce.Code != int32(tc.code) {
+ t.Errorf("%s: ce.Code = %d, want %d", tc.method, ce.Code, tc.code)
+ }
+ if tc.method == "RunSlowly" {
+ f.hang <- 1 // release the HTTP handler
+ }
+ }
+}
+
+func TestAPICallDialFailure(t *testing.T) {
+ // See what happens if the API host is unresponsive.
+ // This should time out quickly, not hang forever.
+ _, c, cleanup := setup()
+ defer cleanup()
+ // Reset the URL to the production address so that dialing fails.
+ c.apiURL = apiURL()
+
+ start := time.Now()
+ err := Call(toContext(c), "foo", "bar", &basepb.VoidProto{}, &basepb.VoidProto{})
+ const max = 1 * time.Second
+ if taken := time.Since(start); taken > max {
+ t.Errorf("Dial hang took too long: %v > %v", taken, max)
+ }
+ if err == nil {
+ t.Error("Call did not fail")
+ }
+}
+
+func TestDelayedLogFlushing(t *testing.T) {
+ f, c, cleanup := setup()
+ defer cleanup()
+
+ http.HandleFunc("/quick_log", func(w http.ResponseWriter, r *http.Request) {
+ logC := WithContext(netcontext.Background(), r)
+ fromContext(logC).apiURL = c.apiURL // Otherwise it will try to use the default URL.
+ Logf(logC, 1, "It's a lovely day.")
+ w.WriteHeader(200)
+ w.Write(make([]byte, 100<<10)) // write 100 KB to force HTTP flush
+ })
+
+ r := &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Scheme: "http",
+ Path: "/quick_log",
+ },
+ Header: c.req.Header,
+ Body: ioutil.NopCloser(bytes.NewReader(nil)),
+ }
+ w := httptest.NewRecorder()
+
+ // Check that log flushing does not hold up the HTTP response.
+ start := time.Now()
+ handleHTTP(w, r)
+ if d := time.Since(start); d > 10*time.Millisecond {
+ t.Errorf("handleHTTP took %v, want under 10ms", d)
+ }
+ const hdr = "X-AppEngine-Log-Flush-Count"
+ if h := w.HeaderMap.Get(hdr); h != "1" {
+ t.Errorf("%s header = %q, want %q", hdr, h, "1")
+ }
+ if f := atomic.LoadInt32(&f.LogFlushes); f != 0 {
+ t.Errorf("After HTTP response: f.LogFlushes = %d, want 0", f)
+ }
+
+ // Check that the log flush eventually comes in.
+ time.Sleep(100 * time.Millisecond)
+ if f := atomic.LoadInt32(&f.LogFlushes); f != 1 {
+ t.Errorf("After 100ms: f.LogFlushes = %d, want 1", f)
+ }
+}
+
+func TestRemoteAddr(t *testing.T) {
+ var addr string
+ http.HandleFunc("/remote_addr", func(w http.ResponseWriter, r *http.Request) {
+ addr = r.RemoteAddr
+ })
+
+ testCases := []struct {
+ headers http.Header
+ addr string
+ }{
+ {http.Header{"X-Appengine-User-Ip": []string{"10.5.2.1"}}, "10.5.2.1:80"},
+ {http.Header{"X-Appengine-Remote-Addr": []string{"1.2.3.4"}}, "1.2.3.4:80"},
+ {http.Header{"X-Appengine-Remote-Addr": []string{"1.2.3.4:8080"}}, "1.2.3.4:8080"},
+ {
+ http.Header{"X-Appengine-Remote-Addr": []string{"2401:fa00:9:1:7646:a0ff:fe90:ca66"}},
+ "[2401:fa00:9:1:7646:a0ff:fe90:ca66]:80",
+ },
+ {
+ http.Header{"X-Appengine-Remote-Addr": []string{"[::1]:http"}},
+ "[::1]:http",
+ },
+ {http.Header{}, "127.0.0.1:80"},
+ }
+
+ for _, tc := range testCases {
+ r := &http.Request{
+ Method: "GET",
+ URL: &url.URL{Scheme: "http", Path: "/remote_addr"},
+ Header: tc.headers,
+ Body: ioutil.NopCloser(bytes.NewReader(nil)),
+ }
+ handleHTTP(httptest.NewRecorder(), r)
+ if addr != tc.addr {
+ t.Errorf("Header %v, got %q, want %q", tc.headers, addr, tc.addr)
+ }
+ }
+}
+
+func TestPanickingHandler(t *testing.T) {
+ http.HandleFunc("/panic", func(http.ResponseWriter, *http.Request) {
+ panic("whoops!")
+ })
+ r := &http.Request{
+ Method: "GET",
+ URL: &url.URL{Scheme: "http", Path: "/panic"},
+ Body: ioutil.NopCloser(bytes.NewReader(nil)),
+ }
+ rec := httptest.NewRecorder()
+ handleHTTP(rec, r)
+ if rec.Code != 500 {
+ t.Errorf("Panicking handler returned HTTP %d, want HTTP %d", rec.Code, 500)
+ }
+}
+
+var raceDetector = false
+
+func TestAPICallAllocations(t *testing.T) {
+ if raceDetector {
+ t.Skip("not running under race detector")
+ }
+
+ // Run the test API server in a subprocess so we aren't counting its allocations.
+ u, cleanup := launchHelperProcess(t)
+ defer cleanup()
+ c := &context{
+ req: &http.Request{
+ Header: http.Header{
+ ticketHeader: []string{"s3cr3t"},
+ dapperHeader: []string{"trace-001"},
+ },
+ },
+ apiURL: u,
+ }
+
+ req := &basepb.StringProto{
+ Value: proto.String("Doctor Who"),
+ }
+ res := &basepb.StringProto{}
+ var apiErr error
+ avg := testing.AllocsPerRun(100, func() {
+ ctx, _ := netcontext.WithTimeout(toContext(c), 100*time.Millisecond)
+ if err := Call(ctx, "actordb", "LookupActor", req, res); err != nil && apiErr == nil {
+ apiErr = err // get the first error only
+ }
+ })
+ if apiErr != nil {
+ t.Errorf("API call failed: %v", apiErr)
+ }
+
+ // Lots of room for improvement...
+ // TODO(djd): Reduce maximum to 85 once the App Engine SDK is based on 1.6.
+ const min, max float64 = 70, 90
+ if avg < min || max < avg {
+ t.Errorf("Allocations per API call = %g, want in [%g,%g]", avg, min, max)
+ }
+}
+
+func launchHelperProcess(t *testing.T) (apiURL *url.URL, cleanup func()) {
+ cmd := exec.Command(os.Args[0], "-test.run=TestHelperProcess")
+ cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
+ stdin, err := cmd.StdinPipe()
+ if err != nil {
+ t.Fatalf("StdinPipe: %v", err)
+ }
+ stdout, err := cmd.StdoutPipe()
+ if err != nil {
+ t.Fatalf("StdoutPipe: %v", err)
+ }
+ if err := cmd.Start(); err != nil {
+ t.Fatalf("Starting helper process: %v", err)
+ }
+
+ scan := bufio.NewScanner(stdout)
+ var u *url.URL
+ for scan.Scan() {
+ line := scan.Text()
+ if hp := strings.TrimPrefix(line, helperProcessMagic); hp != line {
+ var err error
+ u, err = url.Parse(hp)
+ if err != nil {
+ t.Fatalf("Failed to parse %q: %v", hp, err)
+ }
+ break
+ }
+ }
+ if err := scan.Err(); err != nil {
+ t.Fatalf("Scanning helper process stdout: %v", err)
+ }
+ if u == nil {
+ t.Fatal("Helper process never reported")
+ }
+
+ return u, func() {
+ stdin.Close()
+ if err := cmd.Wait(); err != nil {
+ t.Errorf("Helper process did not exit cleanly: %v", err)
+ }
+ }
+}
+
+const helperProcessMagic = "A lovely helper process is listening at "
+
+// This isn't a real test. It's used as a helper process.
+func TestHelperProcess(*testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
+ return
+ }
+ defer os.Exit(0)
+
+ f := &fakeAPIHandler{}
+ srv := httptest.NewServer(f)
+ defer srv.Close()
+ fmt.Println(helperProcessMagic + srv.URL + apiPath)
+
+ // Wait for stdin to be closed.
+ io.Copy(ioutil.Discard, os.Stdin)
+}
+
+func TestBackgroundContext(t *testing.T) {
+ environ := []struct {
+ key, value string
+ }{
+ {"GAE_LONG_APP_ID", "my-app-id"},
+ {"GAE_MINOR_VERSION", "067924799508853122"},
+ {"GAE_MODULE_INSTANCE", "0"},
+ {"GAE_MODULE_NAME", "default"},
+ {"GAE_MODULE_VERSION", "20150612t184001"},
+ }
+ for _, v := range environ {
+ old := os.Getenv(v.key)
+ os.Setenv(v.key, v.value)
+ v.value = old
+ }
+ defer func() { // Restore old environment after the test completes.
+ for _, v := range environ {
+ if v.value == "" {
+ os.Unsetenv(v.key)
+ continue
+ }
+ os.Setenv(v.key, v.value)
+ }
+ }()
+
+ ctx, key := fromContext(BackgroundContext()), "X-Magic-Ticket-Header"
+ if g, w := ctx.req.Header.Get(key), "my-app-id/default.20150612t184001.0"; g != w {
+ t.Errorf("%v = %q, want %q", key, g, w)
+ }
+
+ // Check that using the background context doesn't panic.
+ req := &basepb.StringProto{
+ Value: proto.String("Doctor Who"),
+ }
+ res := &basepb.StringProto{}
+ Call(BackgroundContext(), "actordb", "LookupActor", req, res) // expected to fail
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go
new file mode 100644
index 0000000..11df8c0
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_id.go
@@ -0,0 +1,28 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "strings"
+)
+
+func parseFullAppID(appid string) (partition, domain, displayID string) {
+ if i := strings.Index(appid, "~"); i != -1 {
+ partition, appid = appid[:i], appid[i+1:]
+ }
+ if i := strings.Index(appid, ":"); i != -1 {
+ domain, appid = appid[:i], appid[i+1:]
+ }
+ return partition, domain, appid
+}
+
+// appID returns "appid" or "domain.com:appid".
+func appID(fullAppID string) string {
+ _, dom, dis := parseFullAppID(fullAppID)
+ if dom != "" {
+ return dom + ":" + dis
+ }
+ return dis
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_id_test.go b/vendor/google.golang.org/appengine/internal/app_id_test.go
new file mode 100644
index 0000000..e69195c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_id_test.go
@@ -0,0 +1,34 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "testing"
+)
+
+func TestAppIDParsing(t *testing.T) {
+ testCases := []struct {
+ in string
+ partition, domain, displayID string
+ }{
+ {"simple-app-id", "", "", "simple-app-id"},
+ {"domain.com:domain-app-id", "", "domain.com", "domain-app-id"},
+ {"part~partition-app-id", "part", "", "partition-app-id"},
+ {"part~domain.com:display", "part", "domain.com", "display"},
+ }
+
+ for _, tc := range testCases {
+ part, dom, dis := parseFullAppID(tc.in)
+ if part != tc.partition {
+ t.Errorf("partition of %q: got %q, want %q", tc.in, part, tc.partition)
+ }
+ if dom != tc.domain {
+ t.Errorf("domain of %q: got %q, want %q", tc.in, dom, tc.domain)
+ }
+ if dis != tc.displayID {
+ t.Errorf("displayID of %q: got %q, want %q", tc.in, dis, tc.displayID)
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
new file mode 100644
index 0000000..87d9701
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
@@ -0,0 +1,296 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto
+// DO NOT EDIT!
+
+/*
+Package app_identity is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/app_identity/app_identity_service.proto
+
+It has these top-level messages:
+ AppIdentityServiceError
+ SignForAppRequest
+ SignForAppResponse
+ GetPublicCertificateForAppRequest
+ PublicCertificate
+ GetPublicCertificateForAppResponse
+ GetServiceAccountNameRequest
+ GetServiceAccountNameResponse
+ GetAccessTokenRequest
+ GetAccessTokenResponse
+ GetDefaultGcsBucketNameRequest
+ GetDefaultGcsBucketNameResponse
+*/
+package app_identity
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type AppIdentityServiceError_ErrorCode int32
+
+const (
+ AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0
+ AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9
+ AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000
+ AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001
+ AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002
+ AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003
+ AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005
+ AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006
+)
+
+var AppIdentityServiceError_ErrorCode_name = map[int32]string{
+ 0: "SUCCESS",
+ 9: "UNKNOWN_SCOPE",
+ 1000: "BLOB_TOO_LARGE",
+ 1001: "DEADLINE_EXCEEDED",
+ 1002: "NOT_A_VALID_APP",
+ 1003: "UNKNOWN_ERROR",
+ 1005: "NOT_ALLOWED",
+ 1006: "NOT_IMPLEMENTED",
+}
+var AppIdentityServiceError_ErrorCode_value = map[string]int32{
+ "SUCCESS": 0,
+ "UNKNOWN_SCOPE": 9,
+ "BLOB_TOO_LARGE": 1000,
+ "DEADLINE_EXCEEDED": 1001,
+ "NOT_A_VALID_APP": 1002,
+ "UNKNOWN_ERROR": 1003,
+ "NOT_ALLOWED": 1005,
+ "NOT_IMPLEMENTED": 1006,
+}
+
+func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode {
+ p := new(AppIdentityServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x AppIdentityServiceError_ErrorCode) String() string {
+ return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x))
+}
+func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = AppIdentityServiceError_ErrorCode(value)
+ return nil
+}
+
+type AppIdentityServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} }
+func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) }
+func (*AppIdentityServiceError) ProtoMessage() {}
+
+type SignForAppRequest struct {
+ BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign" json:"bytes_to_sign,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} }
+func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) }
+func (*SignForAppRequest) ProtoMessage() {}
+
+func (m *SignForAppRequest) GetBytesToSign() []byte {
+ if m != nil {
+ return m.BytesToSign
+ }
+ return nil
+}
+
+type SignForAppResponse struct {
+ KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"`
+ SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes" json:"signature_bytes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} }
+func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) }
+func (*SignForAppResponse) ProtoMessage() {}
+
+func (m *SignForAppResponse) GetKeyName() string {
+ if m != nil && m.KeyName != nil {
+ return *m.KeyName
+ }
+ return ""
+}
+
+func (m *SignForAppResponse) GetSignatureBytes() []byte {
+ if m != nil {
+ return m.SignatureBytes
+ }
+ return nil
+}
+
+type GetPublicCertificateForAppRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} }
+func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) }
+func (*GetPublicCertificateForAppRequest) ProtoMessage() {}
+
+type PublicCertificate struct {
+ KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"`
+ X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem" json:"x509_certificate_pem,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PublicCertificate) Reset() { *m = PublicCertificate{} }
+func (m *PublicCertificate) String() string { return proto.CompactTextString(m) }
+func (*PublicCertificate) ProtoMessage() {}
+
+func (m *PublicCertificate) GetKeyName() string {
+ if m != nil && m.KeyName != nil {
+ return *m.KeyName
+ }
+ return ""
+}
+
+func (m *PublicCertificate) GetX509CertificatePem() string {
+ if m != nil && m.X509CertificatePem != nil {
+ return *m.X509CertificatePem
+ }
+ return ""
+}
+
+type GetPublicCertificateForAppResponse struct {
+ PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list" json:"public_certificate_list,omitempty"`
+ MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second" json:"max_client_cache_time_in_second,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} }
+func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) }
+func (*GetPublicCertificateForAppResponse) ProtoMessage() {}
+
+func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate {
+ if m != nil {
+ return m.PublicCertificateList
+ }
+ return nil
+}
+
+func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 {
+ if m != nil && m.MaxClientCacheTimeInSecond != nil {
+ return *m.MaxClientCacheTimeInSecond
+ }
+ return 0
+}
+
+type GetServiceAccountNameRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} }
+func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetServiceAccountNameRequest) ProtoMessage() {}
+
+type GetServiceAccountNameResponse struct {
+ ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name" json:"service_account_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} }
+func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) }
+func (*GetServiceAccountNameResponse) ProtoMessage() {}
+
+func (m *GetServiceAccountNameResponse) GetServiceAccountName() string {
+ if m != nil && m.ServiceAccountName != nil {
+ return *m.ServiceAccountName
+ }
+ return ""
+}
+
+type GetAccessTokenRequest struct {
+ Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"`
+ ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id" json:"service_account_id,omitempty"`
+ ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name" json:"service_account_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} }
+func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) }
+func (*GetAccessTokenRequest) ProtoMessage() {}
+
+func (m *GetAccessTokenRequest) GetScope() []string {
+ if m != nil {
+ return m.Scope
+ }
+ return nil
+}
+
+func (m *GetAccessTokenRequest) GetServiceAccountId() int64 {
+ if m != nil && m.ServiceAccountId != nil {
+ return *m.ServiceAccountId
+ }
+ return 0
+}
+
+func (m *GetAccessTokenRequest) GetServiceAccountName() string {
+ if m != nil && m.ServiceAccountName != nil {
+ return *m.ServiceAccountName
+ }
+ return ""
+}
+
+type GetAccessTokenResponse struct {
+ AccessToken *string `protobuf:"bytes,1,opt,name=access_token" json:"access_token,omitempty"`
+ ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time" json:"expiration_time,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} }
+func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) }
+func (*GetAccessTokenResponse) ProtoMessage() {}
+
+func (m *GetAccessTokenResponse) GetAccessToken() string {
+ if m != nil && m.AccessToken != nil {
+ return *m.AccessToken
+ }
+ return ""
+}
+
+func (m *GetAccessTokenResponse) GetExpirationTime() int64 {
+ if m != nil && m.ExpirationTime != nil {
+ return *m.ExpirationTime
+ }
+ return 0
+}
+
+type GetDefaultGcsBucketNameRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} }
+func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {}
+
+type GetDefaultGcsBucketNameResponse struct {
+ DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name" json:"default_gcs_bucket_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} }
+func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {}
+
+func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string {
+ if m != nil && m.DefaultGcsBucketName != nil {
+ return *m.DefaultGcsBucketName
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
new file mode 100644
index 0000000..19610ca
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
@@ -0,0 +1,64 @@
+syntax = "proto2";
+option go_package = "app_identity";
+
+package appengine;
+
+message AppIdentityServiceError {
+ enum ErrorCode {
+ SUCCESS = 0;
+ UNKNOWN_SCOPE = 9;
+ BLOB_TOO_LARGE = 1000;
+ DEADLINE_EXCEEDED = 1001;
+ NOT_A_VALID_APP = 1002;
+ UNKNOWN_ERROR = 1003;
+ NOT_ALLOWED = 1005;
+ NOT_IMPLEMENTED = 1006;
+ }
+}
+
+message SignForAppRequest {
+ optional bytes bytes_to_sign = 1;
+}
+
+message SignForAppResponse {
+ optional string key_name = 1;
+ optional bytes signature_bytes = 2;
+}
+
+message GetPublicCertificateForAppRequest {
+}
+
+message PublicCertificate {
+ optional string key_name = 1;
+ optional string x509_certificate_pem = 2;
+}
+
+message GetPublicCertificateForAppResponse {
+ repeated PublicCertificate public_certificate_list = 1;
+ optional int64 max_client_cache_time_in_second = 2;
+}
+
+message GetServiceAccountNameRequest {
+}
+
+message GetServiceAccountNameResponse {
+ optional string service_account_name = 1;
+}
+
+message GetAccessTokenRequest {
+ repeated string scope = 1;
+ optional int64 service_account_id = 2;
+ optional string service_account_name = 3;
+}
+
+message GetAccessTokenResponse {
+ optional string access_token = 1;
+ optional int64 expiration_time = 2;
+}
+
+message GetDefaultGcsBucketNameRequest {
+}
+
+message GetDefaultGcsBucketNameResponse {
+ optional string default_gcs_bucket_name = 1;
+}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
new file mode 100644
index 0000000..36a1956
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
@@ -0,0 +1,133 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/base/api_base.proto
+// DO NOT EDIT!
+
+/*
+Package base is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/base/api_base.proto
+
+It has these top-level messages:
+ StringProto
+ Integer32Proto
+ Integer64Proto
+ BoolProto
+ DoubleProto
+ BytesProto
+ VoidProto
+*/
+package base
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type StringProto struct {
+ Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StringProto) Reset() { *m = StringProto{} }
+func (m *StringProto) String() string { return proto.CompactTextString(m) }
+func (*StringProto) ProtoMessage() {}
+
+func (m *StringProto) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Integer32Proto struct {
+ Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Integer32Proto) Reset() { *m = Integer32Proto{} }
+func (m *Integer32Proto) String() string { return proto.CompactTextString(m) }
+func (*Integer32Proto) ProtoMessage() {}
+
+func (m *Integer32Proto) GetValue() int32 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Integer64Proto struct {
+ Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Integer64Proto) Reset() { *m = Integer64Proto{} }
+func (m *Integer64Proto) String() string { return proto.CompactTextString(m) }
+func (*Integer64Proto) ProtoMessage() {}
+
+func (m *Integer64Proto) GetValue() int64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type BoolProto struct {
+ Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BoolProto) Reset() { *m = BoolProto{} }
+func (m *BoolProto) String() string { return proto.CompactTextString(m) }
+func (*BoolProto) ProtoMessage() {}
+
+func (m *BoolProto) GetValue() bool {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return false
+}
+
+type DoubleProto struct {
+ Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DoubleProto) Reset() { *m = DoubleProto{} }
+func (m *DoubleProto) String() string { return proto.CompactTextString(m) }
+func (*DoubleProto) ProtoMessage() {}
+
+func (m *DoubleProto) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type BytesProto struct {
+ Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BytesProto) Reset() { *m = BytesProto{} }
+func (m *BytesProto) String() string { return proto.CompactTextString(m) }
+func (*BytesProto) ProtoMessage() {}
+
+func (m *BytesProto) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type VoidProto struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *VoidProto) Reset() { *m = VoidProto{} }
+func (m *VoidProto) String() string { return proto.CompactTextString(m) }
+func (*VoidProto) ProtoMessage() {}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto
new file mode 100644
index 0000000..56cd7a3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/base/api_base.proto
@@ -0,0 +1,33 @@
+// Built-in base types for API calls. Primarily useful as return types.
+
+syntax = "proto2";
+option go_package = "base";
+
+package appengine.base;
+
+message StringProto {
+ required string value = 1;
+}
+
+message Integer32Proto {
+ required int32 value = 1;
+}
+
+message Integer64Proto {
+ required int64 value = 1;
+}
+
+message BoolProto {
+ required bool value = 1;
+}
+
+message DoubleProto {
+ required double value = 1;
+}
+
+message BytesProto {
+ required bytes value = 1 [ctype=CORD];
+}
+
+message VoidProto {
+}
diff --git a/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.pb.go b/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.pb.go
new file mode 100644
index 0000000..8705ec3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.pb.go
@@ -0,0 +1,347 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/blobstore/blobstore_service.proto
+// DO NOT EDIT!
+
+/*
+Package blobstore is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/blobstore/blobstore_service.proto
+
+It has these top-level messages:
+ BlobstoreServiceError
+ CreateUploadURLRequest
+ CreateUploadURLResponse
+ DeleteBlobRequest
+ FetchDataRequest
+ FetchDataResponse
+ CloneBlobRequest
+ CloneBlobResponse
+ DecodeBlobKeyRequest
+ DecodeBlobKeyResponse
+ CreateEncodedGoogleStorageKeyRequest
+ CreateEncodedGoogleStorageKeyResponse
+*/
+package blobstore
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type BlobstoreServiceError_ErrorCode int32
+
+const (
+ BlobstoreServiceError_OK BlobstoreServiceError_ErrorCode = 0
+ BlobstoreServiceError_INTERNAL_ERROR BlobstoreServiceError_ErrorCode = 1
+ BlobstoreServiceError_URL_TOO_LONG BlobstoreServiceError_ErrorCode = 2
+ BlobstoreServiceError_PERMISSION_DENIED BlobstoreServiceError_ErrorCode = 3
+ BlobstoreServiceError_BLOB_NOT_FOUND BlobstoreServiceError_ErrorCode = 4
+ BlobstoreServiceError_DATA_INDEX_OUT_OF_RANGE BlobstoreServiceError_ErrorCode = 5
+ BlobstoreServiceError_BLOB_FETCH_SIZE_TOO_LARGE BlobstoreServiceError_ErrorCode = 6
+ BlobstoreServiceError_ARGUMENT_OUT_OF_RANGE BlobstoreServiceError_ErrorCode = 8
+ BlobstoreServiceError_INVALID_BLOB_KEY BlobstoreServiceError_ErrorCode = 9
+)
+
+var BlobstoreServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INTERNAL_ERROR",
+ 2: "URL_TOO_LONG",
+ 3: "PERMISSION_DENIED",
+ 4: "BLOB_NOT_FOUND",
+ 5: "DATA_INDEX_OUT_OF_RANGE",
+ 6: "BLOB_FETCH_SIZE_TOO_LARGE",
+ 8: "ARGUMENT_OUT_OF_RANGE",
+ 9: "INVALID_BLOB_KEY",
+}
+var BlobstoreServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INTERNAL_ERROR": 1,
+ "URL_TOO_LONG": 2,
+ "PERMISSION_DENIED": 3,
+ "BLOB_NOT_FOUND": 4,
+ "DATA_INDEX_OUT_OF_RANGE": 5,
+ "BLOB_FETCH_SIZE_TOO_LARGE": 6,
+ "ARGUMENT_OUT_OF_RANGE": 8,
+ "INVALID_BLOB_KEY": 9,
+}
+
+func (x BlobstoreServiceError_ErrorCode) Enum() *BlobstoreServiceError_ErrorCode {
+ p := new(BlobstoreServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x BlobstoreServiceError_ErrorCode) String() string {
+ return proto.EnumName(BlobstoreServiceError_ErrorCode_name, int32(x))
+}
+func (x *BlobstoreServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(BlobstoreServiceError_ErrorCode_value, data, "BlobstoreServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = BlobstoreServiceError_ErrorCode(value)
+ return nil
+}
+
+type BlobstoreServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BlobstoreServiceError) Reset() { *m = BlobstoreServiceError{} }
+func (m *BlobstoreServiceError) String() string { return proto.CompactTextString(m) }
+func (*BlobstoreServiceError) ProtoMessage() {}
+
+type CreateUploadURLRequest struct {
+ SuccessPath *string `protobuf:"bytes,1,req,name=success_path" json:"success_path,omitempty"`
+ MaxUploadSizeBytes *int64 `protobuf:"varint,2,opt,name=max_upload_size_bytes" json:"max_upload_size_bytes,omitempty"`
+ MaxUploadSizePerBlobBytes *int64 `protobuf:"varint,3,opt,name=max_upload_size_per_blob_bytes" json:"max_upload_size_per_blob_bytes,omitempty"`
+ GsBucketName *string `protobuf:"bytes,4,opt,name=gs_bucket_name" json:"gs_bucket_name,omitempty"`
+ UrlExpiryTimeSeconds *int32 `protobuf:"varint,5,opt,name=url_expiry_time_seconds" json:"url_expiry_time_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateUploadURLRequest) Reset() { *m = CreateUploadURLRequest{} }
+func (m *CreateUploadURLRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateUploadURLRequest) ProtoMessage() {}
+
+func (m *CreateUploadURLRequest) GetSuccessPath() string {
+ if m != nil && m.SuccessPath != nil {
+ return *m.SuccessPath
+ }
+ return ""
+}
+
+func (m *CreateUploadURLRequest) GetMaxUploadSizeBytes() int64 {
+ if m != nil && m.MaxUploadSizeBytes != nil {
+ return *m.MaxUploadSizeBytes
+ }
+ return 0
+}
+
+func (m *CreateUploadURLRequest) GetMaxUploadSizePerBlobBytes() int64 {
+ if m != nil && m.MaxUploadSizePerBlobBytes != nil {
+ return *m.MaxUploadSizePerBlobBytes
+ }
+ return 0
+}
+
+func (m *CreateUploadURLRequest) GetGsBucketName() string {
+ if m != nil && m.GsBucketName != nil {
+ return *m.GsBucketName
+ }
+ return ""
+}
+
+func (m *CreateUploadURLRequest) GetUrlExpiryTimeSeconds() int32 {
+ if m != nil && m.UrlExpiryTimeSeconds != nil {
+ return *m.UrlExpiryTimeSeconds
+ }
+ return 0
+}
+
+type CreateUploadURLResponse struct {
+ Url *string `protobuf:"bytes,1,req,name=url" json:"url,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateUploadURLResponse) Reset() { *m = CreateUploadURLResponse{} }
+func (m *CreateUploadURLResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateUploadURLResponse) ProtoMessage() {}
+
+func (m *CreateUploadURLResponse) GetUrl() string {
+ if m != nil && m.Url != nil {
+ return *m.Url
+ }
+ return ""
+}
+
+type DeleteBlobRequest struct {
+ BlobKey []string `protobuf:"bytes,1,rep,name=blob_key" json:"blob_key,omitempty"`
+ Token *string `protobuf:"bytes,2,opt,name=token" json:"token,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteBlobRequest) Reset() { *m = DeleteBlobRequest{} }
+func (m *DeleteBlobRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteBlobRequest) ProtoMessage() {}
+
+func (m *DeleteBlobRequest) GetBlobKey() []string {
+ if m != nil {
+ return m.BlobKey
+ }
+ return nil
+}
+
+func (m *DeleteBlobRequest) GetToken() string {
+ if m != nil && m.Token != nil {
+ return *m.Token
+ }
+ return ""
+}
+
+type FetchDataRequest struct {
+ BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"`
+ StartIndex *int64 `protobuf:"varint,2,req,name=start_index" json:"start_index,omitempty"`
+ EndIndex *int64 `protobuf:"varint,3,req,name=end_index" json:"end_index,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FetchDataRequest) Reset() { *m = FetchDataRequest{} }
+func (m *FetchDataRequest) String() string { return proto.CompactTextString(m) }
+func (*FetchDataRequest) ProtoMessage() {}
+
+func (m *FetchDataRequest) GetBlobKey() string {
+ if m != nil && m.BlobKey != nil {
+ return *m.BlobKey
+ }
+ return ""
+}
+
+func (m *FetchDataRequest) GetStartIndex() int64 {
+ if m != nil && m.StartIndex != nil {
+ return *m.StartIndex
+ }
+ return 0
+}
+
+func (m *FetchDataRequest) GetEndIndex() int64 {
+ if m != nil && m.EndIndex != nil {
+ return *m.EndIndex
+ }
+ return 0
+}
+
+type FetchDataResponse struct {
+ Data []byte `protobuf:"bytes,1000,req,name=data" json:"data,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FetchDataResponse) Reset() { *m = FetchDataResponse{} }
+func (m *FetchDataResponse) String() string { return proto.CompactTextString(m) }
+func (*FetchDataResponse) ProtoMessage() {}
+
+func (m *FetchDataResponse) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+type CloneBlobRequest struct {
+ BlobKey []byte `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"`
+ MimeType []byte `protobuf:"bytes,2,req,name=mime_type" json:"mime_type,omitempty"`
+ TargetAppId []byte `protobuf:"bytes,3,req,name=target_app_id" json:"target_app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CloneBlobRequest) Reset() { *m = CloneBlobRequest{} }
+func (m *CloneBlobRequest) String() string { return proto.CompactTextString(m) }
+func (*CloneBlobRequest) ProtoMessage() {}
+
+func (m *CloneBlobRequest) GetBlobKey() []byte {
+ if m != nil {
+ return m.BlobKey
+ }
+ return nil
+}
+
+func (m *CloneBlobRequest) GetMimeType() []byte {
+ if m != nil {
+ return m.MimeType
+ }
+ return nil
+}
+
+func (m *CloneBlobRequest) GetTargetAppId() []byte {
+ if m != nil {
+ return m.TargetAppId
+ }
+ return nil
+}
+
+type CloneBlobResponse struct {
+ BlobKey []byte `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CloneBlobResponse) Reset() { *m = CloneBlobResponse{} }
+func (m *CloneBlobResponse) String() string { return proto.CompactTextString(m) }
+func (*CloneBlobResponse) ProtoMessage() {}
+
+func (m *CloneBlobResponse) GetBlobKey() []byte {
+ if m != nil {
+ return m.BlobKey
+ }
+ return nil
+}
+
+type DecodeBlobKeyRequest struct {
+ BlobKey []string `protobuf:"bytes,1,rep,name=blob_key" json:"blob_key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DecodeBlobKeyRequest) Reset() { *m = DecodeBlobKeyRequest{} }
+func (m *DecodeBlobKeyRequest) String() string { return proto.CompactTextString(m) }
+func (*DecodeBlobKeyRequest) ProtoMessage() {}
+
+func (m *DecodeBlobKeyRequest) GetBlobKey() []string {
+ if m != nil {
+ return m.BlobKey
+ }
+ return nil
+}
+
+type DecodeBlobKeyResponse struct {
+ Decoded []string `protobuf:"bytes,1,rep,name=decoded" json:"decoded,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DecodeBlobKeyResponse) Reset() { *m = DecodeBlobKeyResponse{} }
+func (m *DecodeBlobKeyResponse) String() string { return proto.CompactTextString(m) }
+func (*DecodeBlobKeyResponse) ProtoMessage() {}
+
+func (m *DecodeBlobKeyResponse) GetDecoded() []string {
+ if m != nil {
+ return m.Decoded
+ }
+ return nil
+}
+
+type CreateEncodedGoogleStorageKeyRequest struct {
+ Filename *string `protobuf:"bytes,1,req,name=filename" json:"filename,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateEncodedGoogleStorageKeyRequest) Reset() { *m = CreateEncodedGoogleStorageKeyRequest{} }
+func (m *CreateEncodedGoogleStorageKeyRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateEncodedGoogleStorageKeyRequest) ProtoMessage() {}
+
+func (m *CreateEncodedGoogleStorageKeyRequest) GetFilename() string {
+ if m != nil && m.Filename != nil {
+ return *m.Filename
+ }
+ return ""
+}
+
+type CreateEncodedGoogleStorageKeyResponse struct {
+ BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateEncodedGoogleStorageKeyResponse) Reset() { *m = CreateEncodedGoogleStorageKeyResponse{} }
+func (m *CreateEncodedGoogleStorageKeyResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateEncodedGoogleStorageKeyResponse) ProtoMessage() {}
+
+func (m *CreateEncodedGoogleStorageKeyResponse) GetBlobKey() string {
+ if m != nil && m.BlobKey != nil {
+ return *m.BlobKey
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.proto b/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.proto
new file mode 100644
index 0000000..33b2650
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.proto
@@ -0,0 +1,71 @@
+syntax = "proto2";
+option go_package = "blobstore";
+
+package appengine;
+
+message BlobstoreServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INTERNAL_ERROR = 1;
+ URL_TOO_LONG = 2;
+ PERMISSION_DENIED = 3;
+ BLOB_NOT_FOUND = 4;
+ DATA_INDEX_OUT_OF_RANGE = 5;
+ BLOB_FETCH_SIZE_TOO_LARGE = 6;
+ ARGUMENT_OUT_OF_RANGE = 8;
+ INVALID_BLOB_KEY = 9;
+ }
+}
+
+message CreateUploadURLRequest {
+ required string success_path = 1;
+ optional int64 max_upload_size_bytes = 2;
+ optional int64 max_upload_size_per_blob_bytes = 3;
+ optional string gs_bucket_name = 4;
+ optional int32 url_expiry_time_seconds = 5;
+}
+
+message CreateUploadURLResponse {
+ required string url = 1;
+}
+
+message DeleteBlobRequest {
+ repeated string blob_key = 1;
+ optional string token = 2;
+}
+
+message FetchDataRequest {
+ required string blob_key = 1;
+ required int64 start_index = 2;
+ required int64 end_index = 3;
+}
+
+message FetchDataResponse {
+ required bytes data = 1000 [ctype = CORD];
+}
+
+message CloneBlobRequest {
+ required bytes blob_key = 1;
+ required bytes mime_type = 2;
+ required bytes target_app_id = 3;
+}
+
+message CloneBlobResponse {
+ required bytes blob_key = 1;
+}
+
+message DecodeBlobKeyRequest {
+ repeated string blob_key = 1;
+}
+
+message DecodeBlobKeyResponse {
+ repeated string decoded = 1;
+}
+
+message CreateEncodedGoogleStorageKeyRequest {
+ required string filename = 1;
+}
+
+message CreateEncodedGoogleStorageKeyResponse {
+ required string blob_key = 1;
+}
diff --git a/vendor/google.golang.org/appengine/internal/capability/capability_service.pb.go b/vendor/google.golang.org/appengine/internal/capability/capability_service.pb.go
new file mode 100644
index 0000000..1736364
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/capability/capability_service.pb.go
@@ -0,0 +1,125 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/capability/capability_service.proto
+// DO NOT EDIT!
+
+/*
+Package capability is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/capability/capability_service.proto
+
+It has these top-level messages:
+ IsEnabledRequest
+ IsEnabledResponse
+*/
+package capability
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type IsEnabledResponse_SummaryStatus int32
+
+const (
+ IsEnabledResponse_DEFAULT IsEnabledResponse_SummaryStatus = 0
+ IsEnabledResponse_ENABLED IsEnabledResponse_SummaryStatus = 1
+ IsEnabledResponse_SCHEDULED_FUTURE IsEnabledResponse_SummaryStatus = 2
+ IsEnabledResponse_SCHEDULED_NOW IsEnabledResponse_SummaryStatus = 3
+ IsEnabledResponse_DISABLED IsEnabledResponse_SummaryStatus = 4
+ IsEnabledResponse_UNKNOWN IsEnabledResponse_SummaryStatus = 5
+)
+
+var IsEnabledResponse_SummaryStatus_name = map[int32]string{
+ 0: "DEFAULT",
+ 1: "ENABLED",
+ 2: "SCHEDULED_FUTURE",
+ 3: "SCHEDULED_NOW",
+ 4: "DISABLED",
+ 5: "UNKNOWN",
+}
+var IsEnabledResponse_SummaryStatus_value = map[string]int32{
+ "DEFAULT": 0,
+ "ENABLED": 1,
+ "SCHEDULED_FUTURE": 2,
+ "SCHEDULED_NOW": 3,
+ "DISABLED": 4,
+ "UNKNOWN": 5,
+}
+
+func (x IsEnabledResponse_SummaryStatus) Enum() *IsEnabledResponse_SummaryStatus {
+ p := new(IsEnabledResponse_SummaryStatus)
+ *p = x
+ return p
+}
+func (x IsEnabledResponse_SummaryStatus) String() string {
+ return proto.EnumName(IsEnabledResponse_SummaryStatus_name, int32(x))
+}
+func (x *IsEnabledResponse_SummaryStatus) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(IsEnabledResponse_SummaryStatus_value, data, "IsEnabledResponse_SummaryStatus")
+ if err != nil {
+ return err
+ }
+ *x = IsEnabledResponse_SummaryStatus(value)
+ return nil
+}
+
+type IsEnabledRequest struct {
+ Package *string `protobuf:"bytes,1,req,name=package" json:"package,omitempty"`
+ Capability []string `protobuf:"bytes,2,rep,name=capability" json:"capability,omitempty"`
+ Call []string `protobuf:"bytes,3,rep,name=call" json:"call,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IsEnabledRequest) Reset() { *m = IsEnabledRequest{} }
+func (m *IsEnabledRequest) String() string { return proto.CompactTextString(m) }
+func (*IsEnabledRequest) ProtoMessage() {}
+
+func (m *IsEnabledRequest) GetPackage() string {
+ if m != nil && m.Package != nil {
+ return *m.Package
+ }
+ return ""
+}
+
+func (m *IsEnabledRequest) GetCapability() []string {
+ if m != nil {
+ return m.Capability
+ }
+ return nil
+}
+
+func (m *IsEnabledRequest) GetCall() []string {
+ if m != nil {
+ return m.Call
+ }
+ return nil
+}
+
+type IsEnabledResponse struct {
+ SummaryStatus *IsEnabledResponse_SummaryStatus `protobuf:"varint,1,opt,name=summary_status,enum=appengine.IsEnabledResponse_SummaryStatus" json:"summary_status,omitempty"`
+ TimeUntilScheduled *int64 `protobuf:"varint,2,opt,name=time_until_scheduled" json:"time_until_scheduled,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IsEnabledResponse) Reset() { *m = IsEnabledResponse{} }
+func (m *IsEnabledResponse) String() string { return proto.CompactTextString(m) }
+func (*IsEnabledResponse) ProtoMessage() {}
+
+func (m *IsEnabledResponse) GetSummaryStatus() IsEnabledResponse_SummaryStatus {
+ if m != nil && m.SummaryStatus != nil {
+ return *m.SummaryStatus
+ }
+ return IsEnabledResponse_DEFAULT
+}
+
+func (m *IsEnabledResponse) GetTimeUntilScheduled() int64 {
+ if m != nil && m.TimeUntilScheduled != nil {
+ return *m.TimeUntilScheduled
+ }
+ return 0
+}
diff --git a/vendor/google.golang.org/appengine/internal/capability/capability_service.proto b/vendor/google.golang.org/appengine/internal/capability/capability_service.proto
new file mode 100644
index 0000000..5660ab6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/capability/capability_service.proto
@@ -0,0 +1,28 @@
+syntax = "proto2";
+option go_package = "capability";
+
+package appengine;
+
+message IsEnabledRequest {
+ required string package = 1;
+ repeated string capability = 2;
+ repeated string call = 3;
+}
+
+message IsEnabledResponse {
+ enum SummaryStatus {
+ DEFAULT = 0;
+ ENABLED = 1;
+ SCHEDULED_FUTURE = 2;
+ SCHEDULED_NOW = 3;
+ DISABLED = 4;
+ UNKNOWN = 5;
+ }
+ optional SummaryStatus summary_status = 1;
+
+ optional int64 time_until_scheduled = 2;
+}
+
+service CapabilityService {
+ rpc IsEnabled(IsEnabledRequest) returns (IsEnabledResponse) {};
+}
diff --git a/vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go b/vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go
new file mode 100644
index 0000000..7b8d00c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go
@@ -0,0 +1,154 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/channel/channel_service.proto
+// DO NOT EDIT!
+
+/*
+Package channel is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/channel/channel_service.proto
+
+It has these top-level messages:
+ ChannelServiceError
+ CreateChannelRequest
+ CreateChannelResponse
+ SendMessageRequest
+*/
+package channel
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type ChannelServiceError_ErrorCode int32
+
+const (
+ ChannelServiceError_OK ChannelServiceError_ErrorCode = 0
+ ChannelServiceError_INTERNAL_ERROR ChannelServiceError_ErrorCode = 1
+ ChannelServiceError_INVALID_CHANNEL_KEY ChannelServiceError_ErrorCode = 2
+ ChannelServiceError_BAD_MESSAGE ChannelServiceError_ErrorCode = 3
+ ChannelServiceError_INVALID_CHANNEL_TOKEN_DURATION ChannelServiceError_ErrorCode = 4
+ ChannelServiceError_APPID_ALIAS_REQUIRED ChannelServiceError_ErrorCode = 5
+)
+
+var ChannelServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INTERNAL_ERROR",
+ 2: "INVALID_CHANNEL_KEY",
+ 3: "BAD_MESSAGE",
+ 4: "INVALID_CHANNEL_TOKEN_DURATION",
+ 5: "APPID_ALIAS_REQUIRED",
+}
+var ChannelServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INTERNAL_ERROR": 1,
+ "INVALID_CHANNEL_KEY": 2,
+ "BAD_MESSAGE": 3,
+ "INVALID_CHANNEL_TOKEN_DURATION": 4,
+ "APPID_ALIAS_REQUIRED": 5,
+}
+
+func (x ChannelServiceError_ErrorCode) Enum() *ChannelServiceError_ErrorCode {
+ p := new(ChannelServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x ChannelServiceError_ErrorCode) String() string {
+ return proto.EnumName(ChannelServiceError_ErrorCode_name, int32(x))
+}
+func (x *ChannelServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ChannelServiceError_ErrorCode_value, data, "ChannelServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = ChannelServiceError_ErrorCode(value)
+ return nil
+}
+
+type ChannelServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ChannelServiceError) Reset() { *m = ChannelServiceError{} }
+func (m *ChannelServiceError) String() string { return proto.CompactTextString(m) }
+func (*ChannelServiceError) ProtoMessage() {}
+
+type CreateChannelRequest struct {
+ ApplicationKey *string `protobuf:"bytes,1,req,name=application_key" json:"application_key,omitempty"`
+ DurationMinutes *int32 `protobuf:"varint,2,opt,name=duration_minutes" json:"duration_minutes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateChannelRequest) Reset() { *m = CreateChannelRequest{} }
+func (m *CreateChannelRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateChannelRequest) ProtoMessage() {}
+
+func (m *CreateChannelRequest) GetApplicationKey() string {
+ if m != nil && m.ApplicationKey != nil {
+ return *m.ApplicationKey
+ }
+ return ""
+}
+
+func (m *CreateChannelRequest) GetDurationMinutes() int32 {
+ if m != nil && m.DurationMinutes != nil {
+ return *m.DurationMinutes
+ }
+ return 0
+}
+
+type CreateChannelResponse struct {
+ Token *string `protobuf:"bytes,2,opt,name=token" json:"token,omitempty"`
+ DurationMinutes *int32 `protobuf:"varint,3,opt,name=duration_minutes" json:"duration_minutes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateChannelResponse) Reset() { *m = CreateChannelResponse{} }
+func (m *CreateChannelResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateChannelResponse) ProtoMessage() {}
+
+func (m *CreateChannelResponse) GetToken() string {
+ if m != nil && m.Token != nil {
+ return *m.Token
+ }
+ return ""
+}
+
+func (m *CreateChannelResponse) GetDurationMinutes() int32 {
+ if m != nil && m.DurationMinutes != nil {
+ return *m.DurationMinutes
+ }
+ return 0
+}
+
+type SendMessageRequest struct {
+ ApplicationKey *string `protobuf:"bytes,1,req,name=application_key" json:"application_key,omitempty"`
+ Message *string `protobuf:"bytes,2,req,name=message" json:"message,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SendMessageRequest) Reset() { *m = SendMessageRequest{} }
+func (m *SendMessageRequest) String() string { return proto.CompactTextString(m) }
+func (*SendMessageRequest) ProtoMessage() {}
+
+func (m *SendMessageRequest) GetApplicationKey() string {
+ if m != nil && m.ApplicationKey != nil {
+ return *m.ApplicationKey
+ }
+ return ""
+}
+
+func (m *SendMessageRequest) GetMessage() string {
+ if m != nil && m.Message != nil {
+ return *m.Message
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/channel/channel_service.proto b/vendor/google.golang.org/appengine/internal/channel/channel_service.proto
new file mode 100644
index 0000000..2b5a918
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/channel/channel_service.proto
@@ -0,0 +1,30 @@
+syntax = "proto2";
+option go_package = "channel";
+
+package appengine;
+
+message ChannelServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INTERNAL_ERROR = 1;
+ INVALID_CHANNEL_KEY = 2;
+ BAD_MESSAGE = 3;
+ INVALID_CHANNEL_TOKEN_DURATION = 4;
+ APPID_ALIAS_REQUIRED = 5;
+ }
+}
+
+message CreateChannelRequest {
+ required string application_key = 1;
+ optional int32 duration_minutes = 2;
+}
+
+message CreateChannelResponse {
+ optional string token = 2;
+ optional int32 duration_minutes = 3;
+}
+
+message SendMessageRequest {
+ required string application_key = 1;
+ required string message = 2;
+}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
new file mode 100644
index 0000000..8613cb7
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
@@ -0,0 +1,2778 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto
+// DO NOT EDIT!
+
+/*
+Package datastore is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/datastore/datastore_v3.proto
+
+It has these top-level messages:
+ Action
+ PropertyValue
+ Property
+ Path
+ Reference
+ User
+ EntityProto
+ CompositeProperty
+ Index
+ CompositeIndex
+ IndexPostfix
+ IndexPosition
+ Snapshot
+ InternalHeader
+ Transaction
+ Query
+ CompiledQuery
+ CompiledCursor
+ Cursor
+ Error
+ Cost
+ GetRequest
+ GetResponse
+ PutRequest
+ PutResponse
+ TouchRequest
+ TouchResponse
+ DeleteRequest
+ DeleteResponse
+ NextRequest
+ QueryResult
+ AllocateIdsRequest
+ AllocateIdsResponse
+ CompositeIndices
+ AddActionsRequest
+ AddActionsResponse
+ BeginTransactionRequest
+ CommitResponse
+*/
+package datastore
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type Property_Meaning int32
+
+const (
+ Property_NO_MEANING Property_Meaning = 0
+ Property_BLOB Property_Meaning = 14
+ Property_TEXT Property_Meaning = 15
+ Property_BYTESTRING Property_Meaning = 16
+ Property_ATOM_CATEGORY Property_Meaning = 1
+ Property_ATOM_LINK Property_Meaning = 2
+ Property_ATOM_TITLE Property_Meaning = 3
+ Property_ATOM_CONTENT Property_Meaning = 4
+ Property_ATOM_SUMMARY Property_Meaning = 5
+ Property_ATOM_AUTHOR Property_Meaning = 6
+ Property_GD_WHEN Property_Meaning = 7
+ Property_GD_EMAIL Property_Meaning = 8
+ Property_GEORSS_POINT Property_Meaning = 9
+ Property_GD_IM Property_Meaning = 10
+ Property_GD_PHONENUMBER Property_Meaning = 11
+ Property_GD_POSTALADDRESS Property_Meaning = 12
+ Property_GD_RATING Property_Meaning = 13
+ Property_BLOBKEY Property_Meaning = 17
+ Property_ENTITY_PROTO Property_Meaning = 19
+ Property_INDEX_VALUE Property_Meaning = 18
+)
+
+var Property_Meaning_name = map[int32]string{
+ 0: "NO_MEANING",
+ 14: "BLOB",
+ 15: "TEXT",
+ 16: "BYTESTRING",
+ 1: "ATOM_CATEGORY",
+ 2: "ATOM_LINK",
+ 3: "ATOM_TITLE",
+ 4: "ATOM_CONTENT",
+ 5: "ATOM_SUMMARY",
+ 6: "ATOM_AUTHOR",
+ 7: "GD_WHEN",
+ 8: "GD_EMAIL",
+ 9: "GEORSS_POINT",
+ 10: "GD_IM",
+ 11: "GD_PHONENUMBER",
+ 12: "GD_POSTALADDRESS",
+ 13: "GD_RATING",
+ 17: "BLOBKEY",
+ 19: "ENTITY_PROTO",
+ 18: "INDEX_VALUE",
+}
+var Property_Meaning_value = map[string]int32{
+ "NO_MEANING": 0,
+ "BLOB": 14,
+ "TEXT": 15,
+ "BYTESTRING": 16,
+ "ATOM_CATEGORY": 1,
+ "ATOM_LINK": 2,
+ "ATOM_TITLE": 3,
+ "ATOM_CONTENT": 4,
+ "ATOM_SUMMARY": 5,
+ "ATOM_AUTHOR": 6,
+ "GD_WHEN": 7,
+ "GD_EMAIL": 8,
+ "GEORSS_POINT": 9,
+ "GD_IM": 10,
+ "GD_PHONENUMBER": 11,
+ "GD_POSTALADDRESS": 12,
+ "GD_RATING": 13,
+ "BLOBKEY": 17,
+ "ENTITY_PROTO": 19,
+ "INDEX_VALUE": 18,
+}
+
+func (x Property_Meaning) Enum() *Property_Meaning {
+ p := new(Property_Meaning)
+ *p = x
+ return p
+}
+func (x Property_Meaning) String() string {
+ return proto.EnumName(Property_Meaning_name, int32(x))
+}
+func (x *Property_Meaning) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning")
+ if err != nil {
+ return err
+ }
+ *x = Property_Meaning(value)
+ return nil
+}
+
+type Property_FtsTokenizationOption int32
+
+const (
+ Property_HTML Property_FtsTokenizationOption = 1
+ Property_ATOM Property_FtsTokenizationOption = 2
+)
+
+var Property_FtsTokenizationOption_name = map[int32]string{
+ 1: "HTML",
+ 2: "ATOM",
+}
+var Property_FtsTokenizationOption_value = map[string]int32{
+ "HTML": 1,
+ "ATOM": 2,
+}
+
+func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption {
+ p := new(Property_FtsTokenizationOption)
+ *p = x
+ return p
+}
+func (x Property_FtsTokenizationOption) String() string {
+ return proto.EnumName(Property_FtsTokenizationOption_name, int32(x))
+}
+func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption")
+ if err != nil {
+ return err
+ }
+ *x = Property_FtsTokenizationOption(value)
+ return nil
+}
+
+type EntityProto_Kind int32
+
+const (
+ EntityProto_GD_CONTACT EntityProto_Kind = 1
+ EntityProto_GD_EVENT EntityProto_Kind = 2
+ EntityProto_GD_MESSAGE EntityProto_Kind = 3
+)
+
+var EntityProto_Kind_name = map[int32]string{
+ 1: "GD_CONTACT",
+ 2: "GD_EVENT",
+ 3: "GD_MESSAGE",
+}
+var EntityProto_Kind_value = map[string]int32{
+ "GD_CONTACT": 1,
+ "GD_EVENT": 2,
+ "GD_MESSAGE": 3,
+}
+
+func (x EntityProto_Kind) Enum() *EntityProto_Kind {
+ p := new(EntityProto_Kind)
+ *p = x
+ return p
+}
+func (x EntityProto_Kind) String() string {
+ return proto.EnumName(EntityProto_Kind_name, int32(x))
+}
+func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind")
+ if err != nil {
+ return err
+ }
+ *x = EntityProto_Kind(value)
+ return nil
+}
+
+type Index_Property_Direction int32
+
+const (
+ Index_Property_ASCENDING Index_Property_Direction = 1
+ Index_Property_DESCENDING Index_Property_Direction = 2
+)
+
+var Index_Property_Direction_name = map[int32]string{
+ 1: "ASCENDING",
+ 2: "DESCENDING",
+}
+var Index_Property_Direction_value = map[string]int32{
+ "ASCENDING": 1,
+ "DESCENDING": 2,
+}
+
+func (x Index_Property_Direction) Enum() *Index_Property_Direction {
+ p := new(Index_Property_Direction)
+ *p = x
+ return p
+}
+func (x Index_Property_Direction) String() string {
+ return proto.EnumName(Index_Property_Direction_name, int32(x))
+}
+func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction")
+ if err != nil {
+ return err
+ }
+ *x = Index_Property_Direction(value)
+ return nil
+}
+
+type CompositeIndex_State int32
+
+const (
+ CompositeIndex_WRITE_ONLY CompositeIndex_State = 1
+ CompositeIndex_READ_WRITE CompositeIndex_State = 2
+ CompositeIndex_DELETED CompositeIndex_State = 3
+ CompositeIndex_ERROR CompositeIndex_State = 4
+)
+
+var CompositeIndex_State_name = map[int32]string{
+ 1: "WRITE_ONLY",
+ 2: "READ_WRITE",
+ 3: "DELETED",
+ 4: "ERROR",
+}
+var CompositeIndex_State_value = map[string]int32{
+ "WRITE_ONLY": 1,
+ "READ_WRITE": 2,
+ "DELETED": 3,
+ "ERROR": 4,
+}
+
+func (x CompositeIndex_State) Enum() *CompositeIndex_State {
+ p := new(CompositeIndex_State)
+ *p = x
+ return p
+}
+func (x CompositeIndex_State) String() string {
+ return proto.EnumName(CompositeIndex_State_name, int32(x))
+}
+func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State")
+ if err != nil {
+ return err
+ }
+ *x = CompositeIndex_State(value)
+ return nil
+}
+
+type Snapshot_Status int32
+
+const (
+ Snapshot_INACTIVE Snapshot_Status = 0
+ Snapshot_ACTIVE Snapshot_Status = 1
+)
+
+var Snapshot_Status_name = map[int32]string{
+ 0: "INACTIVE",
+ 1: "ACTIVE",
+}
+var Snapshot_Status_value = map[string]int32{
+ "INACTIVE": 0,
+ "ACTIVE": 1,
+}
+
+func (x Snapshot_Status) Enum() *Snapshot_Status {
+ p := new(Snapshot_Status)
+ *p = x
+ return p
+}
+func (x Snapshot_Status) String() string {
+ return proto.EnumName(Snapshot_Status_name, int32(x))
+}
+func (x *Snapshot_Status) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status")
+ if err != nil {
+ return err
+ }
+ *x = Snapshot_Status(value)
+ return nil
+}
+
+type Query_Hint int32
+
+const (
+ Query_ORDER_FIRST Query_Hint = 1
+ Query_ANCESTOR_FIRST Query_Hint = 2
+ Query_FILTER_FIRST Query_Hint = 3
+)
+
+var Query_Hint_name = map[int32]string{
+ 1: "ORDER_FIRST",
+ 2: "ANCESTOR_FIRST",
+ 3: "FILTER_FIRST",
+}
+var Query_Hint_value = map[string]int32{
+ "ORDER_FIRST": 1,
+ "ANCESTOR_FIRST": 2,
+ "FILTER_FIRST": 3,
+}
+
+func (x Query_Hint) Enum() *Query_Hint {
+ p := new(Query_Hint)
+ *p = x
+ return p
+}
+func (x Query_Hint) String() string {
+ return proto.EnumName(Query_Hint_name, int32(x))
+}
+func (x *Query_Hint) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint")
+ if err != nil {
+ return err
+ }
+ *x = Query_Hint(value)
+ return nil
+}
+
+type Query_Filter_Operator int32
+
+const (
+ Query_Filter_LESS_THAN Query_Filter_Operator = 1
+ Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2
+ Query_Filter_GREATER_THAN Query_Filter_Operator = 3
+ Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4
+ Query_Filter_EQUAL Query_Filter_Operator = 5
+ Query_Filter_IN Query_Filter_Operator = 6
+ Query_Filter_EXISTS Query_Filter_Operator = 7
+)
+
+var Query_Filter_Operator_name = map[int32]string{
+ 1: "LESS_THAN",
+ 2: "LESS_THAN_OR_EQUAL",
+ 3: "GREATER_THAN",
+ 4: "GREATER_THAN_OR_EQUAL",
+ 5: "EQUAL",
+ 6: "IN",
+ 7: "EXISTS",
+}
+var Query_Filter_Operator_value = map[string]int32{
+ "LESS_THAN": 1,
+ "LESS_THAN_OR_EQUAL": 2,
+ "GREATER_THAN": 3,
+ "GREATER_THAN_OR_EQUAL": 4,
+ "EQUAL": 5,
+ "IN": 6,
+ "EXISTS": 7,
+}
+
+func (x Query_Filter_Operator) Enum() *Query_Filter_Operator {
+ p := new(Query_Filter_Operator)
+ *p = x
+ return p
+}
+func (x Query_Filter_Operator) String() string {
+ return proto.EnumName(Query_Filter_Operator_name, int32(x))
+}
+func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator")
+ if err != nil {
+ return err
+ }
+ *x = Query_Filter_Operator(value)
+ return nil
+}
+
+type Query_Order_Direction int32
+
+const (
+ Query_Order_ASCENDING Query_Order_Direction = 1
+ Query_Order_DESCENDING Query_Order_Direction = 2
+)
+
+var Query_Order_Direction_name = map[int32]string{
+ 1: "ASCENDING",
+ 2: "DESCENDING",
+}
+var Query_Order_Direction_value = map[string]int32{
+ "ASCENDING": 1,
+ "DESCENDING": 2,
+}
+
+func (x Query_Order_Direction) Enum() *Query_Order_Direction {
+ p := new(Query_Order_Direction)
+ *p = x
+ return p
+}
+func (x Query_Order_Direction) String() string {
+ return proto.EnumName(Query_Order_Direction_name, int32(x))
+}
+func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction")
+ if err != nil {
+ return err
+ }
+ *x = Query_Order_Direction(value)
+ return nil
+}
+
+type Error_ErrorCode int32
+
+const (
+ Error_BAD_REQUEST Error_ErrorCode = 1
+ Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2
+ Error_INTERNAL_ERROR Error_ErrorCode = 3
+ Error_NEED_INDEX Error_ErrorCode = 4
+ Error_TIMEOUT Error_ErrorCode = 5
+ Error_PERMISSION_DENIED Error_ErrorCode = 6
+ Error_BIGTABLE_ERROR Error_ErrorCode = 7
+ Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8
+ Error_CAPABILITY_DISABLED Error_ErrorCode = 9
+ Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10
+ Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11
+)
+
+var Error_ErrorCode_name = map[int32]string{
+ 1: "BAD_REQUEST",
+ 2: "CONCURRENT_TRANSACTION",
+ 3: "INTERNAL_ERROR",
+ 4: "NEED_INDEX",
+ 5: "TIMEOUT",
+ 6: "PERMISSION_DENIED",
+ 7: "BIGTABLE_ERROR",
+ 8: "COMMITTED_BUT_STILL_APPLYING",
+ 9: "CAPABILITY_DISABLED",
+ 10: "TRY_ALTERNATE_BACKEND",
+ 11: "SAFE_TIME_TOO_OLD",
+}
+var Error_ErrorCode_value = map[string]int32{
+ "BAD_REQUEST": 1,
+ "CONCURRENT_TRANSACTION": 2,
+ "INTERNAL_ERROR": 3,
+ "NEED_INDEX": 4,
+ "TIMEOUT": 5,
+ "PERMISSION_DENIED": 6,
+ "BIGTABLE_ERROR": 7,
+ "COMMITTED_BUT_STILL_APPLYING": 8,
+ "CAPABILITY_DISABLED": 9,
+ "TRY_ALTERNATE_BACKEND": 10,
+ "SAFE_TIME_TOO_OLD": 11,
+}
+
+func (x Error_ErrorCode) Enum() *Error_ErrorCode {
+ p := new(Error_ErrorCode)
+ *p = x
+ return p
+}
+func (x Error_ErrorCode) String() string {
+ return proto.EnumName(Error_ErrorCode_name, int32(x))
+}
+func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = Error_ErrorCode(value)
+ return nil
+}
+
+type PutRequest_AutoIdPolicy int32
+
+const (
+ PutRequest_CURRENT PutRequest_AutoIdPolicy = 0
+ PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1
+)
+
+var PutRequest_AutoIdPolicy_name = map[int32]string{
+ 0: "CURRENT",
+ 1: "SEQUENTIAL",
+}
+var PutRequest_AutoIdPolicy_value = map[string]int32{
+ "CURRENT": 0,
+ "SEQUENTIAL": 1,
+}
+
+func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy {
+ p := new(PutRequest_AutoIdPolicy)
+ *p = x
+ return p
+}
+func (x PutRequest_AutoIdPolicy) String() string {
+ return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x))
+}
+func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy")
+ if err != nil {
+ return err
+ }
+ *x = PutRequest_AutoIdPolicy(value)
+ return nil
+}
+
+type Action struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Action) Reset() { *m = Action{} }
+func (m *Action) String() string { return proto.CompactTextString(m) }
+func (*Action) ProtoMessage() {}
+
+type PropertyValue struct {
+ Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"`
+ BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"`
+ StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"`
+ DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"`
+ Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue" json:"pointvalue,omitempty"`
+ Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue" json:"uservalue,omitempty"`
+ Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue" json:"referencevalue,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue) Reset() { *m = PropertyValue{} }
+func (m *PropertyValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue) ProtoMessage() {}
+
+func (m *PropertyValue) GetInt64Value() int64 {
+ if m != nil && m.Int64Value != nil {
+ return *m.Int64Value
+ }
+ return 0
+}
+
+func (m *PropertyValue) GetBooleanValue() bool {
+ if m != nil && m.BooleanValue != nil {
+ return *m.BooleanValue
+ }
+ return false
+}
+
+func (m *PropertyValue) GetStringValue() string {
+ if m != nil && m.StringValue != nil {
+ return *m.StringValue
+ }
+ return ""
+}
+
+func (m *PropertyValue) GetDoubleValue() float64 {
+ if m != nil && m.DoubleValue != nil {
+ return *m.DoubleValue
+ }
+ return 0
+}
+
+func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue {
+ if m != nil {
+ return m.Pointvalue
+ }
+ return nil
+}
+
+func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue {
+ if m != nil {
+ return m.Uservalue
+ }
+ return nil
+}
+
+func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue {
+ if m != nil {
+ return m.Referencevalue
+ }
+ return nil
+}
+
+type PropertyValue_PointValue struct {
+ X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"`
+ Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} }
+func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_PointValue) ProtoMessage() {}
+
+func (m *PropertyValue_PointValue) GetX() float64 {
+ if m != nil && m.X != nil {
+ return *m.X
+ }
+ return 0
+}
+
+func (m *PropertyValue_PointValue) GetY() float64 {
+ if m != nil && m.Y != nil {
+ return *m.Y
+ }
+ return 0
+}
+
+type PropertyValue_UserValue struct {
+ Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"`
+ AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain" json:"auth_domain,omitempty"`
+ Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"`
+ FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity" json:"federated_identity,omitempty"`
+ FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider" json:"federated_provider,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} }
+func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_UserValue) ProtoMessage() {}
+
+func (m *PropertyValue_UserValue) GetEmail() string {
+ if m != nil && m.Email != nil {
+ return *m.Email
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetNickname() string {
+ if m != nil && m.Nickname != nil {
+ return *m.Nickname
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetFederatedIdentity() string {
+ if m != nil && m.FederatedIdentity != nil {
+ return *m.FederatedIdentity
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetFederatedProvider() string {
+ if m != nil && m.FederatedProvider != nil {
+ return *m.FederatedProvider
+ }
+ return ""
+}
+
+type PropertyValue_ReferenceValue struct {
+ App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"`
+ Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement" json:"pathelement,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} }
+func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_ReferenceValue) ProtoMessage() {}
+
+func (m *PropertyValue_ReferenceValue) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *PropertyValue_ReferenceValue) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement {
+ if m != nil {
+ return m.Pathelement
+ }
+ return nil
+}
+
+type PropertyValue_ReferenceValue_PathElement struct {
+ Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"`
+ Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"`
+ Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) Reset() {
+ *m = PropertyValue_ReferenceValue_PathElement{}
+}
+func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return ""
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return 0
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+type Property struct {
+ Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"`
+ MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri" json:"meaning_uri,omitempty"`
+ Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+ Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"`
+ Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"`
+ Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"`
+ FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"`
+ Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Property) Reset() { *m = Property{} }
+func (m *Property) String() string { return proto.CompactTextString(m) }
+func (*Property) ProtoMessage() {}
+
+const Default_Property_Meaning Property_Meaning = Property_NO_MEANING
+const Default_Property_Searchable bool = false
+const Default_Property_Locale string = "en"
+
+func (m *Property) GetMeaning() Property_Meaning {
+ if m != nil && m.Meaning != nil {
+ return *m.Meaning
+ }
+ return Default_Property_Meaning
+}
+
+func (m *Property) GetMeaningUri() string {
+ if m != nil && m.MeaningUri != nil {
+ return *m.MeaningUri
+ }
+ return ""
+}
+
+func (m *Property) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Property) GetValue() *PropertyValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *Property) GetMultiple() bool {
+ if m != nil && m.Multiple != nil {
+ return *m.Multiple
+ }
+ return false
+}
+
+func (m *Property) GetSearchable() bool {
+ if m != nil && m.Searchable != nil {
+ return *m.Searchable
+ }
+ return Default_Property_Searchable
+}
+
+func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption {
+ if m != nil && m.FtsTokenizationOption != nil {
+ return *m.FtsTokenizationOption
+ }
+ return Property_HTML
+}
+
+func (m *Property) GetLocale() string {
+ if m != nil && m.Locale != nil {
+ return *m.Locale
+ }
+ return Default_Property_Locale
+}
+
+type Path struct {
+ Element []*Path_Element `protobuf:"group,1,rep,name=Element" json:"element,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Path) Reset() { *m = Path{} }
+func (m *Path) String() string { return proto.CompactTextString(m) }
+func (*Path) ProtoMessage() {}
+
+func (m *Path) GetElement() []*Path_Element {
+ if m != nil {
+ return m.Element
+ }
+ return nil
+}
+
+type Path_Element struct {
+ Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"`
+ Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"`
+ Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Path_Element) Reset() { *m = Path_Element{} }
+func (m *Path_Element) String() string { return proto.CompactTextString(m) }
+func (*Path_Element) ProtoMessage() {}
+
+func (m *Path_Element) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return ""
+}
+
+func (m *Path_Element) GetId() int64 {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return 0
+}
+
+func (m *Path_Element) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+type Reference struct {
+ App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"`
+ Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Reference) Reset() { *m = Reference{} }
+func (m *Reference) String() string { return proto.CompactTextString(m) }
+func (*Reference) ProtoMessage() {}
+
+func (m *Reference) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *Reference) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *Reference) GetPath() *Path {
+ if m != nil {
+ return m.Path
+ }
+ return nil
+}
+
+type User struct {
+ Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"`
+ AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain" json:"auth_domain,omitempty"`
+ Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"`
+ FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity" json:"federated_identity,omitempty"`
+ FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider" json:"federated_provider,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *User) Reset() { *m = User{} }
+func (m *User) String() string { return proto.CompactTextString(m) }
+func (*User) ProtoMessage() {}
+
+func (m *User) GetEmail() string {
+ if m != nil && m.Email != nil {
+ return *m.Email
+ }
+ return ""
+}
+
+func (m *User) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+func (m *User) GetNickname() string {
+ if m != nil && m.Nickname != nil {
+ return *m.Nickname
+ }
+ return ""
+}
+
+func (m *User) GetFederatedIdentity() string {
+ if m != nil && m.FederatedIdentity != nil {
+ return *m.FederatedIdentity
+ }
+ return ""
+}
+
+func (m *User) GetFederatedProvider() string {
+ if m != nil && m.FederatedProvider != nil {
+ return *m.FederatedProvider
+ }
+ return ""
+}
+
+type EntityProto struct {
+ Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"`
+ EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group" json:"entity_group,omitempty"`
+ Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"`
+ Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"`
+ KindUri *string `protobuf:"bytes,5,opt,name=kind_uri" json:"kind_uri,omitempty"`
+ Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
+ RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property" json:"raw_property,omitempty"`
+ Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *EntityProto) Reset() { *m = EntityProto{} }
+func (m *EntityProto) String() string { return proto.CompactTextString(m) }
+func (*EntityProto) ProtoMessage() {}
+
+func (m *EntityProto) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *EntityProto) GetEntityGroup() *Path {
+ if m != nil {
+ return m.EntityGroup
+ }
+ return nil
+}
+
+func (m *EntityProto) GetOwner() *User {
+ if m != nil {
+ return m.Owner
+ }
+ return nil
+}
+
+func (m *EntityProto) GetKind() EntityProto_Kind {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return EntityProto_GD_CONTACT
+}
+
+func (m *EntityProto) GetKindUri() string {
+ if m != nil && m.KindUri != nil {
+ return *m.KindUri
+ }
+ return ""
+}
+
+func (m *EntityProto) GetProperty() []*Property {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+func (m *EntityProto) GetRawProperty() []*Property {
+ if m != nil {
+ return m.RawProperty
+ }
+ return nil
+}
+
+func (m *EntityProto) GetRank() int32 {
+ if m != nil && m.Rank != nil {
+ return *m.Rank
+ }
+ return 0
+}
+
+type CompositeProperty struct {
+ IndexId *int64 `protobuf:"varint,1,req,name=index_id" json:"index_id,omitempty"`
+ Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompositeProperty) Reset() { *m = CompositeProperty{} }
+func (m *CompositeProperty) String() string { return proto.CompactTextString(m) }
+func (*CompositeProperty) ProtoMessage() {}
+
+func (m *CompositeProperty) GetIndexId() int64 {
+ if m != nil && m.IndexId != nil {
+ return *m.IndexId
+ }
+ return 0
+}
+
+func (m *CompositeProperty) GetValue() []string {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type Index struct {
+ EntityType *string `protobuf:"bytes,1,req,name=entity_type" json:"entity_type,omitempty"`
+ Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"`
+ Property []*Index_Property `protobuf:"group,2,rep,name=Property" json:"property,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Index) Reset() { *m = Index{} }
+func (m *Index) String() string { return proto.CompactTextString(m) }
+func (*Index) ProtoMessage() {}
+
+func (m *Index) GetEntityType() string {
+ if m != nil && m.EntityType != nil {
+ return *m.EntityType
+ }
+ return ""
+}
+
+func (m *Index) GetAncestor() bool {
+ if m != nil && m.Ancestor != nil {
+ return *m.Ancestor
+ }
+ return false
+}
+
+func (m *Index) GetProperty() []*Index_Property {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+type Index_Property struct {
+ Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+ Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Index_Property) Reset() { *m = Index_Property{} }
+func (m *Index_Property) String() string { return proto.CompactTextString(m) }
+func (*Index_Property) ProtoMessage() {}
+
+const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING
+
+func (m *Index_Property) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Index_Property) GetDirection() Index_Property_Direction {
+ if m != nil && m.Direction != nil {
+ return *m.Direction
+ }
+ return Default_Index_Property_Direction
+}
+
+type CompositeIndex struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"`
+ Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"`
+ State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"`
+ OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,def=0" json:"only_use_if_required,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompositeIndex) Reset() { *m = CompositeIndex{} }
+func (m *CompositeIndex) String() string { return proto.CompactTextString(m) }
+func (*CompositeIndex) ProtoMessage() {}
+
+const Default_CompositeIndex_OnlyUseIfRequired bool = false
+
+func (m *CompositeIndex) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *CompositeIndex) GetId() int64 {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return 0
+}
+
+func (m *CompositeIndex) GetDefinition() *Index {
+ if m != nil {
+ return m.Definition
+ }
+ return nil
+}
+
+func (m *CompositeIndex) GetState() CompositeIndex_State {
+ if m != nil && m.State != nil {
+ return *m.State
+ }
+ return CompositeIndex_WRITE_ONLY
+}
+
+func (m *CompositeIndex) GetOnlyUseIfRequired() bool {
+ if m != nil && m.OnlyUseIfRequired != nil {
+ return *m.OnlyUseIfRequired
+ }
+ return Default_CompositeIndex_OnlyUseIfRequired
+}
+
+type IndexPostfix struct {
+ IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value" json:"index_value,omitempty"`
+ Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"`
+ Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexPostfix) Reset() { *m = IndexPostfix{} }
+func (m *IndexPostfix) String() string { return proto.CompactTextString(m) }
+func (*IndexPostfix) ProtoMessage() {}
+
+const Default_IndexPostfix_Before bool = true
+
+func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue {
+ if m != nil {
+ return m.IndexValue
+ }
+ return nil
+}
+
+func (m *IndexPostfix) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *IndexPostfix) GetBefore() bool {
+ if m != nil && m.Before != nil {
+ return *m.Before
+ }
+ return Default_IndexPostfix_Before
+}
+
+type IndexPostfix_IndexValue struct {
+ PropertyName *string `protobuf:"bytes,1,req,name=property_name" json:"property_name,omitempty"`
+ Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} }
+func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) }
+func (*IndexPostfix_IndexValue) ProtoMessage() {}
+
+func (m *IndexPostfix_IndexValue) GetPropertyName() string {
+ if m != nil && m.PropertyName != nil {
+ return *m.PropertyName
+ }
+ return ""
+}
+
+func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type IndexPosition struct {
+ Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"`
+ Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexPosition) Reset() { *m = IndexPosition{} }
+func (m *IndexPosition) String() string { return proto.CompactTextString(m) }
+func (*IndexPosition) ProtoMessage() {}
+
+const Default_IndexPosition_Before bool = true
+
+func (m *IndexPosition) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *IndexPosition) GetBefore() bool {
+ if m != nil && m.Before != nil {
+ return *m.Before
+ }
+ return Default_IndexPosition_Before
+}
+
+type Snapshot struct {
+ Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Snapshot) Reset() { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage() {}
+
+func (m *Snapshot) GetTs() int64 {
+ if m != nil && m.Ts != nil {
+ return *m.Ts
+ }
+ return 0
+}
+
+type InternalHeader struct {
+ Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *InternalHeader) Reset() { *m = InternalHeader{} }
+func (m *InternalHeader) String() string { return proto.CompactTextString(m) }
+func (*InternalHeader) ProtoMessage() {}
+
+func (m *InternalHeader) GetQos() string {
+ if m != nil && m.Qos != nil {
+ return *m.Qos
+ }
+ return ""
+}
+
+type Transaction struct {
+ Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
+ Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"`
+ App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"`
+ MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Transaction) Reset() { *m = Transaction{} }
+func (m *Transaction) String() string { return proto.CompactTextString(m) }
+func (*Transaction) ProtoMessage() {}
+
+const Default_Transaction_MarkChanges bool = false
+
+func (m *Transaction) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *Transaction) GetHandle() uint64 {
+ if m != nil && m.Handle != nil {
+ return *m.Handle
+ }
+ return 0
+}
+
+func (m *Transaction) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *Transaction) GetMarkChanges() bool {
+ if m != nil && m.MarkChanges != nil {
+ return *m.MarkChanges
+ }
+ return Default_Transaction_MarkChanges
+}
+
+type Query struct {
+ Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"`
+ App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,29,opt,name=name_space" json:"name_space,omitempty"`
+ Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"`
+ Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"`
+ Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter" json:"filter,omitempty"`
+ SearchQuery *string `protobuf:"bytes,8,opt,name=search_query" json:"search_query,omitempty"`
+ Order []*Query_Order `protobuf:"group,9,rep,name=Order" json:"order,omitempty"`
+ Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"`
+ Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"`
+ Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"`
+ Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"`
+ CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"`
+ EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor" json:"end_compiled_cursor,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index" json:"composite_index,omitempty"`
+ RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,def=0" json:"require_perfect_plan,omitempty"`
+ KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,def=0" json:"keys_only,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"`
+ Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"`
+ FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms" json:"failover_ms,omitempty"`
+ Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"`
+ PropertyName []string `protobuf:"bytes,33,rep,name=property_name" json:"property_name,omitempty"`
+ GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name" json:"group_by_property_name,omitempty"`
+ Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"`
+ MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds" json:"min_safe_time_seconds,omitempty"`
+ SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name" json:"safe_replica_name,omitempty"`
+ PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,def=0" json:"persist_offset,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Query) Reset() { *m = Query{} }
+func (m *Query) String() string { return proto.CompactTextString(m) }
+func (*Query) ProtoMessage() {}
+
+const Default_Query_Offset int32 = 0
+const Default_Query_RequirePerfectPlan bool = false
+const Default_Query_KeysOnly bool = false
+const Default_Query_Compile bool = false
+const Default_Query_PersistOffset bool = false
+
+func (m *Query) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *Query) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *Query) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *Query) GetKind() string {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return ""
+}
+
+func (m *Query) GetAncestor() *Reference {
+ if m != nil {
+ return m.Ancestor
+ }
+ return nil
+}
+
+func (m *Query) GetFilter() []*Query_Filter {
+ if m != nil {
+ return m.Filter
+ }
+ return nil
+}
+
+func (m *Query) GetSearchQuery() string {
+ if m != nil && m.SearchQuery != nil {
+ return *m.SearchQuery
+ }
+ return ""
+}
+
+func (m *Query) GetOrder() []*Query_Order {
+ if m != nil {
+ return m.Order
+ }
+ return nil
+}
+
+func (m *Query) GetHint() Query_Hint {
+ if m != nil && m.Hint != nil {
+ return *m.Hint
+ }
+ return Query_ORDER_FIRST
+}
+
+func (m *Query) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *Query) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return Default_Query_Offset
+}
+
+func (m *Query) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+func (m *Query) GetCompiledCursor() *CompiledCursor {
+ if m != nil {
+ return m.CompiledCursor
+ }
+ return nil
+}
+
+func (m *Query) GetEndCompiledCursor() *CompiledCursor {
+ if m != nil {
+ return m.EndCompiledCursor
+ }
+ return nil
+}
+
+func (m *Query) GetCompositeIndex() []*CompositeIndex {
+ if m != nil {
+ return m.CompositeIndex
+ }
+ return nil
+}
+
+func (m *Query) GetRequirePerfectPlan() bool {
+ if m != nil && m.RequirePerfectPlan != nil {
+ return *m.RequirePerfectPlan
+ }
+ return Default_Query_RequirePerfectPlan
+}
+
+func (m *Query) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return Default_Query_KeysOnly
+}
+
+func (m *Query) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *Query) GetCompile() bool {
+ if m != nil && m.Compile != nil {
+ return *m.Compile
+ }
+ return Default_Query_Compile
+}
+
+func (m *Query) GetFailoverMs() int64 {
+ if m != nil && m.FailoverMs != nil {
+ return *m.FailoverMs
+ }
+ return 0
+}
+
+func (m *Query) GetStrong() bool {
+ if m != nil && m.Strong != nil {
+ return *m.Strong
+ }
+ return false
+}
+
+func (m *Query) GetPropertyName() []string {
+ if m != nil {
+ return m.PropertyName
+ }
+ return nil
+}
+
+func (m *Query) GetGroupByPropertyName() []string {
+ if m != nil {
+ return m.GroupByPropertyName
+ }
+ return nil
+}
+
+func (m *Query) GetDistinct() bool {
+ if m != nil && m.Distinct != nil {
+ return *m.Distinct
+ }
+ return false
+}
+
+func (m *Query) GetMinSafeTimeSeconds() int64 {
+ if m != nil && m.MinSafeTimeSeconds != nil {
+ return *m.MinSafeTimeSeconds
+ }
+ return 0
+}
+
+func (m *Query) GetSafeReplicaName() []string {
+ if m != nil {
+ return m.SafeReplicaName
+ }
+ return nil
+}
+
+func (m *Query) GetPersistOffset() bool {
+ if m != nil && m.PersistOffset != nil {
+ return *m.PersistOffset
+ }
+ return Default_Query_PersistOffset
+}
+
+type Query_Filter struct {
+ Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"`
+ Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Query_Filter) Reset() { *m = Query_Filter{} }
+func (m *Query_Filter) String() string { return proto.CompactTextString(m) }
+func (*Query_Filter) ProtoMessage() {}
+
+func (m *Query_Filter) GetOp() Query_Filter_Operator {
+ if m != nil && m.Op != nil {
+ return *m.Op
+ }
+ return Query_Filter_LESS_THAN
+}
+
+func (m *Query_Filter) GetProperty() []*Property {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+type Query_Order struct {
+ Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"`
+ Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Query_Order) Reset() { *m = Query_Order{} }
+func (m *Query_Order) String() string { return proto.CompactTextString(m) }
+func (*Query_Order) ProtoMessage() {}
+
+const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING
+
+func (m *Query_Order) GetProperty() string {
+ if m != nil && m.Property != nil {
+ return *m.Property
+ }
+ return ""
+}
+
+func (m *Query_Order) GetDirection() Query_Order_Direction {
+ if m != nil && m.Direction != nil {
+ return *m.Direction
+ }
+ return Default_Query_Order_Direction
+}
+
+type CompiledQuery struct {
+ Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan" json:"primaryscan,omitempty"`
+ Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan" json:"mergejoinscan,omitempty"`
+ IndexDef *Index `protobuf:"bytes,21,opt,name=index_def" json:"index_def,omitempty"`
+ Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"`
+ Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"`
+ KeysOnly *bool `protobuf:"varint,12,req,name=keys_only" json:"keys_only,omitempty"`
+ PropertyName []string `protobuf:"bytes,24,rep,name=property_name" json:"property_name,omitempty"`
+ DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size" json:"distinct_infix_size,omitempty"`
+ Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter" json:"entityfilter,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledQuery) Reset() { *m = CompiledQuery{} }
+func (m *CompiledQuery) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery) ProtoMessage() {}
+
+const Default_CompiledQuery_Offset int32 = 0
+
+func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan {
+ if m != nil {
+ return m.Primaryscan
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan {
+ if m != nil {
+ return m.Mergejoinscan
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetIndexDef() *Index {
+ if m != nil {
+ return m.IndexDef
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return Default_CompiledQuery_Offset
+}
+
+func (m *CompiledQuery) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+func (m *CompiledQuery) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return false
+}
+
+func (m *CompiledQuery) GetPropertyName() []string {
+ if m != nil {
+ return m.PropertyName
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetDistinctInfixSize() int32 {
+ if m != nil && m.DistinctInfixSize != nil {
+ return *m.DistinctInfixSize
+ }
+ return 0
+}
+
+func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter {
+ if m != nil {
+ return m.Entityfilter
+ }
+ return nil
+}
+
+type CompiledQuery_PrimaryScan struct {
+ IndexName *string `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"`
+ StartKey *string `protobuf:"bytes,3,opt,name=start_key" json:"start_key,omitempty"`
+ StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive" json:"start_inclusive,omitempty"`
+ EndKey *string `protobuf:"bytes,5,opt,name=end_key" json:"end_key,omitempty"`
+ EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive" json:"end_inclusive,omitempty"`
+ StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value" json:"start_postfix_value,omitempty"`
+ EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value" json:"end_postfix_value,omitempty"`
+ EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us" json:"end_unapplied_log_timestamp_us,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} }
+func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_PrimaryScan) ProtoMessage() {}
+
+func (m *CompiledQuery_PrimaryScan) GetIndexName() string {
+ if m != nil && m.IndexName != nil {
+ return *m.IndexName
+ }
+ return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartKey() string {
+ if m != nil && m.StartKey != nil {
+ return *m.StartKey
+ }
+ return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool {
+ if m != nil && m.StartInclusive != nil {
+ return *m.StartInclusive
+ }
+ return false
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndKey() string {
+ if m != nil && m.EndKey != nil {
+ return *m.EndKey
+ }
+ return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool {
+ if m != nil && m.EndInclusive != nil {
+ return *m.EndInclusive
+ }
+ return false
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string {
+ if m != nil {
+ return m.StartPostfixValue
+ }
+ return nil
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string {
+ if m != nil {
+ return m.EndPostfixValue
+ }
+ return nil
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 {
+ if m != nil && m.EndUnappliedLogTimestampUs != nil {
+ return *m.EndUnappliedLogTimestampUs
+ }
+ return 0
+}
+
+type CompiledQuery_MergeJoinScan struct {
+ IndexName *string `protobuf:"bytes,8,req,name=index_name" json:"index_name,omitempty"`
+ PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value" json:"prefix_value,omitempty"`
+ ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,def=0" json:"value_prefix,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} }
+func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_MergeJoinScan) ProtoMessage() {}
+
+const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false
+
+func (m *CompiledQuery_MergeJoinScan) GetIndexName() string {
+ if m != nil && m.IndexName != nil {
+ return *m.IndexName
+ }
+ return ""
+}
+
+func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string {
+ if m != nil {
+ return m.PrefixValue
+ }
+ return nil
+}
+
+func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool {
+ if m != nil && m.ValuePrefix != nil {
+ return *m.ValuePrefix
+ }
+ return Default_CompiledQuery_MergeJoinScan_ValuePrefix
+}
+
+type CompiledQuery_EntityFilter struct {
+ Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"`
+ Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"`
+ Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} }
+func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_EntityFilter) ProtoMessage() {}
+
+const Default_CompiledQuery_EntityFilter_Distinct bool = false
+
+func (m *CompiledQuery_EntityFilter) GetDistinct() bool {
+ if m != nil && m.Distinct != nil {
+ return *m.Distinct
+ }
+ return Default_CompiledQuery_EntityFilter_Distinct
+}
+
+func (m *CompiledQuery_EntityFilter) GetKind() string {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return ""
+}
+
+func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference {
+ if m != nil {
+ return m.Ancestor
+ }
+ return nil
+}
+
+type CompiledCursor struct {
+ Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position" json:"position,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledCursor) Reset() { *m = CompiledCursor{} }
+func (m *CompiledCursor) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor) ProtoMessage() {}
+
+func (m *CompiledCursor) GetPosition() *CompiledCursor_Position {
+ if m != nil {
+ return m.Position
+ }
+ return nil
+}
+
+type CompiledCursor_Position struct {
+ StartKey *string `protobuf:"bytes,27,opt,name=start_key" json:"start_key,omitempty"`
+ Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue" json:"indexvalue,omitempty"`
+ Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"`
+ StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,def=1" json:"start_inclusive,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} }
+func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor_Position) ProtoMessage() {}
+
+const Default_CompiledCursor_Position_StartInclusive bool = true
+
+func (m *CompiledCursor_Position) GetStartKey() string {
+ if m != nil && m.StartKey != nil {
+ return *m.StartKey
+ }
+ return ""
+}
+
+func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue {
+ if m != nil {
+ return m.Indexvalue
+ }
+ return nil
+}
+
+func (m *CompiledCursor_Position) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *CompiledCursor_Position) GetStartInclusive() bool {
+ if m != nil && m.StartInclusive != nil {
+ return *m.StartInclusive
+ }
+ return Default_CompiledCursor_Position_StartInclusive
+}
+
+type CompiledCursor_Position_IndexValue struct {
+ Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"`
+ Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} }
+func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor_Position_IndexValue) ProtoMessage() {}
+
+func (m *CompiledCursor_Position_IndexValue) GetProperty() string {
+ if m != nil && m.Property != nil {
+ return *m.Property
+ }
+ return ""
+}
+
+func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type Cursor struct {
+ Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"`
+ App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Cursor) Reset() { *m = Cursor{} }
+func (m *Cursor) String() string { return proto.CompactTextString(m) }
+func (*Cursor) ProtoMessage() {}
+
+func (m *Cursor) GetCursor() uint64 {
+ if m != nil && m.Cursor != nil {
+ return *m.Cursor
+ }
+ return 0
+}
+
+func (m *Cursor) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+type Error struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Error) Reset() { *m = Error{} }
+func (m *Error) String() string { return proto.CompactTextString(m) }
+func (*Error) ProtoMessage() {}
+
+type Cost struct {
+ IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes" json:"index_writes,omitempty"`
+ IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes" json:"index_write_bytes,omitempty"`
+ EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes" json:"entity_writes,omitempty"`
+ EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes" json:"entity_write_bytes,omitempty"`
+ Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost" json:"commitcost,omitempty"`
+ ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta" json:"approximate_storage_delta,omitempty"`
+ IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates" json:"id_sequence_updates,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Cost) Reset() { *m = Cost{} }
+func (m *Cost) String() string { return proto.CompactTextString(m) }
+func (*Cost) ProtoMessage() {}
+
+func (m *Cost) GetIndexWrites() int32 {
+ if m != nil && m.IndexWrites != nil {
+ return *m.IndexWrites
+ }
+ return 0
+}
+
+func (m *Cost) GetIndexWriteBytes() int32 {
+ if m != nil && m.IndexWriteBytes != nil {
+ return *m.IndexWriteBytes
+ }
+ return 0
+}
+
+func (m *Cost) GetEntityWrites() int32 {
+ if m != nil && m.EntityWrites != nil {
+ return *m.EntityWrites
+ }
+ return 0
+}
+
+func (m *Cost) GetEntityWriteBytes() int32 {
+ if m != nil && m.EntityWriteBytes != nil {
+ return *m.EntityWriteBytes
+ }
+ return 0
+}
+
+func (m *Cost) GetCommitcost() *Cost_CommitCost {
+ if m != nil {
+ return m.Commitcost
+ }
+ return nil
+}
+
+func (m *Cost) GetApproximateStorageDelta() int32 {
+ if m != nil && m.ApproximateStorageDelta != nil {
+ return *m.ApproximateStorageDelta
+ }
+ return 0
+}
+
+func (m *Cost) GetIdSequenceUpdates() int32 {
+ if m != nil && m.IdSequenceUpdates != nil {
+ return *m.IdSequenceUpdates
+ }
+ return 0
+}
+
+type Cost_CommitCost struct {
+ RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts" json:"requested_entity_puts,omitempty"`
+ RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes" json:"requested_entity_deletes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} }
+func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) }
+func (*Cost_CommitCost) ProtoMessage() {}
+
+func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 {
+ if m != nil && m.RequestedEntityPuts != nil {
+ return *m.RequestedEntityPuts
+ }
+ return 0
+}
+
+func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 {
+ if m != nil && m.RequestedEntityDeletes != nil {
+ return *m.RequestedEntityDeletes
+ }
+ return 0
+}
+
+type GetRequest struct {
+ Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
+ FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms" json:"failover_ms,omitempty"`
+ Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"`
+ AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,def=0" json:"allow_deferred,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetRequest) Reset() { *m = GetRequest{} }
+func (m *GetRequest) String() string { return proto.CompactTextString(m) }
+func (*GetRequest) ProtoMessage() {}
+
+const Default_GetRequest_AllowDeferred bool = false
+
+func (m *GetRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *GetRequest) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *GetRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *GetRequest) GetFailoverMs() int64 {
+ if m != nil && m.FailoverMs != nil {
+ return *m.FailoverMs
+ }
+ return 0
+}
+
+func (m *GetRequest) GetStrong() bool {
+ if m != nil && m.Strong != nil {
+ return *m.Strong
+ }
+ return false
+}
+
+func (m *GetRequest) GetAllowDeferred() bool {
+ if m != nil && m.AllowDeferred != nil {
+ return *m.AllowDeferred
+ }
+ return Default_GetRequest_AllowDeferred
+}
+
+type GetResponse struct {
+ Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity" json:"entity,omitempty"`
+ Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"`
+ InOrder *bool `protobuf:"varint,6,opt,name=in_order,def=1" json:"in_order,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetResponse) Reset() { *m = GetResponse{} }
+func (m *GetResponse) String() string { return proto.CompactTextString(m) }
+func (*GetResponse) ProtoMessage() {}
+
+const Default_GetResponse_InOrder bool = true
+
+func (m *GetResponse) GetEntity() []*GetResponse_Entity {
+ if m != nil {
+ return m.Entity
+ }
+ return nil
+}
+
+func (m *GetResponse) GetDeferred() []*Reference {
+ if m != nil {
+ return m.Deferred
+ }
+ return nil
+}
+
+func (m *GetResponse) GetInOrder() bool {
+ if m != nil && m.InOrder != nil {
+ return *m.InOrder
+ }
+ return Default_GetResponse_InOrder
+}
+
+type GetResponse_Entity struct {
+ Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"`
+ Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"`
+ Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} }
+func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) }
+func (*GetResponse_Entity) ProtoMessage() {}
+
+func (m *GetResponse_Entity) GetEntity() *EntityProto {
+ if m != nil {
+ return m.Entity
+ }
+ return nil
+}
+
+func (m *GetResponse_Entity) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *GetResponse_Entity) GetVersion() int64 {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return 0
+}
+
+type PutRequest struct {
+ Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"`
+ Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index" json:"composite_index,omitempty"`
+ Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
+ Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
+ MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PutRequest) Reset() { *m = PutRequest{} }
+func (m *PutRequest) String() string { return proto.CompactTextString(m) }
+func (*PutRequest) ProtoMessage() {}
+
+const Default_PutRequest_Trusted bool = false
+const Default_PutRequest_Force bool = false
+const Default_PutRequest_MarkChanges bool = false
+const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT
+
+func (m *PutRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *PutRequest) GetEntity() []*EntityProto {
+ if m != nil {
+ return m.Entity
+ }
+ return nil
+}
+
+func (m *PutRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *PutRequest) GetCompositeIndex() []*CompositeIndex {
+ if m != nil {
+ return m.CompositeIndex
+ }
+ return nil
+}
+
+func (m *PutRequest) GetTrusted() bool {
+ if m != nil && m.Trusted != nil {
+ return *m.Trusted
+ }
+ return Default_PutRequest_Trusted
+}
+
+func (m *PutRequest) GetForce() bool {
+ if m != nil && m.Force != nil {
+ return *m.Force
+ }
+ return Default_PutRequest_Force
+}
+
+func (m *PutRequest) GetMarkChanges() bool {
+ if m != nil && m.MarkChanges != nil {
+ return *m.MarkChanges
+ }
+ return Default_PutRequest_MarkChanges
+}
+
+func (m *PutRequest) GetSnapshot() []*Snapshot {
+ if m != nil {
+ return m.Snapshot
+ }
+ return nil
+}
+
+func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy {
+ if m != nil && m.AutoIdPolicy != nil {
+ return *m.AutoIdPolicy
+ }
+ return Default_PutRequest_AutoIdPolicy
+}
+
+type PutResponse struct {
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"`
+ Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PutResponse) Reset() { *m = PutResponse{} }
+func (m *PutResponse) String() string { return proto.CompactTextString(m) }
+func (*PutResponse) ProtoMessage() {}
+
+func (m *PutResponse) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *PutResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+func (m *PutResponse) GetVersion() []int64 {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type TouchRequest struct {
+ Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index" json:"composite_index,omitempty"`
+ Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TouchRequest) Reset() { *m = TouchRequest{} }
+func (m *TouchRequest) String() string { return proto.CompactTextString(m) }
+func (*TouchRequest) ProtoMessage() {}
+
+const Default_TouchRequest_Force bool = false
+
+func (m *TouchRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *TouchRequest) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex {
+ if m != nil {
+ return m.CompositeIndex
+ }
+ return nil
+}
+
+func (m *TouchRequest) GetForce() bool {
+ if m != nil && m.Force != nil {
+ return *m.Force
+ }
+ return Default_TouchRequest_Force
+}
+
+func (m *TouchRequest) GetSnapshot() []*Snapshot {
+ if m != nil {
+ return m.Snapshot
+ }
+ return nil
+}
+
+type TouchResponse struct {
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TouchResponse) Reset() { *m = TouchResponse{} }
+func (m *TouchResponse) String() string { return proto.CompactTextString(m) }
+func (*TouchResponse) ProtoMessage() {}
+
+func (m *TouchResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+type DeleteRequest struct {
+ Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"`
+ Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
+ Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
+ MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteRequest) Reset() { *m = DeleteRequest{} }
+func (m *DeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteRequest) ProtoMessage() {}
+
+const Default_DeleteRequest_Trusted bool = false
+const Default_DeleteRequest_Force bool = false
+const Default_DeleteRequest_MarkChanges bool = false
+
+func (m *DeleteRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *DeleteRequest) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *DeleteRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *DeleteRequest) GetTrusted() bool {
+ if m != nil && m.Trusted != nil {
+ return *m.Trusted
+ }
+ return Default_DeleteRequest_Trusted
+}
+
+func (m *DeleteRequest) GetForce() bool {
+ if m != nil && m.Force != nil {
+ return *m.Force
+ }
+ return Default_DeleteRequest_Force
+}
+
+func (m *DeleteRequest) GetMarkChanges() bool {
+ if m != nil && m.MarkChanges != nil {
+ return *m.MarkChanges
+ }
+ return Default_DeleteRequest_MarkChanges
+}
+
+func (m *DeleteRequest) GetSnapshot() []*Snapshot {
+ if m != nil {
+ return m.Snapshot
+ }
+ return nil
+}
+
+type DeleteResponse struct {
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteResponse) Reset() { *m = DeleteResponse{} }
+func (m *DeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteResponse) ProtoMessage() {}
+
+func (m *DeleteResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+func (m *DeleteResponse) GetVersion() []int64 {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type NextRequest struct {
+ Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"`
+ Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"`
+ Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"`
+ Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"`
+ Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *NextRequest) Reset() { *m = NextRequest{} }
+func (m *NextRequest) String() string { return proto.CompactTextString(m) }
+func (*NextRequest) ProtoMessage() {}
+
+const Default_NextRequest_Offset int32 = 0
+const Default_NextRequest_Compile bool = false
+
+func (m *NextRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *NextRequest) GetCursor() *Cursor {
+ if m != nil {
+ return m.Cursor
+ }
+ return nil
+}
+
+func (m *NextRequest) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *NextRequest) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return Default_NextRequest_Offset
+}
+
+func (m *NextRequest) GetCompile() bool {
+ if m != nil && m.Compile != nil {
+ return *m.Compile
+ }
+ return Default_NextRequest_Compile
+}
+
+type QueryResult struct {
+ Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"`
+ Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"`
+ SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results" json:"skipped_results,omitempty"`
+ MoreResults *bool `protobuf:"varint,3,req,name=more_results" json:"more_results,omitempty"`
+ KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only" json:"keys_only,omitempty"`
+ IndexOnly *bool `protobuf:"varint,9,opt,name=index_only" json:"index_only,omitempty"`
+ SmallOps *bool `protobuf:"varint,10,opt,name=small_ops" json:"small_ops,omitempty"`
+ CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query" json:"compiled_query,omitempty"`
+ CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"`
+ Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"`
+ Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *QueryResult) Reset() { *m = QueryResult{} }
+func (m *QueryResult) String() string { return proto.CompactTextString(m) }
+func (*QueryResult) ProtoMessage() {}
+
+func (m *QueryResult) GetCursor() *Cursor {
+ if m != nil {
+ return m.Cursor
+ }
+ return nil
+}
+
+func (m *QueryResult) GetResult() []*EntityProto {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+func (m *QueryResult) GetSkippedResults() int32 {
+ if m != nil && m.SkippedResults != nil {
+ return *m.SkippedResults
+ }
+ return 0
+}
+
+func (m *QueryResult) GetMoreResults() bool {
+ if m != nil && m.MoreResults != nil {
+ return *m.MoreResults
+ }
+ return false
+}
+
+func (m *QueryResult) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return false
+}
+
+func (m *QueryResult) GetIndexOnly() bool {
+ if m != nil && m.IndexOnly != nil {
+ return *m.IndexOnly
+ }
+ return false
+}
+
+func (m *QueryResult) GetSmallOps() bool {
+ if m != nil && m.SmallOps != nil {
+ return *m.SmallOps
+ }
+ return false
+}
+
+func (m *QueryResult) GetCompiledQuery() *CompiledQuery {
+ if m != nil {
+ return m.CompiledQuery
+ }
+ return nil
+}
+
+func (m *QueryResult) GetCompiledCursor() *CompiledCursor {
+ if m != nil {
+ return m.CompiledCursor
+ }
+ return nil
+}
+
+func (m *QueryResult) GetIndex() []*CompositeIndex {
+ if m != nil {
+ return m.Index
+ }
+ return nil
+}
+
+func (m *QueryResult) GetVersion() []int64 {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type AllocateIdsRequest struct {
+ Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
+ ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key" json:"model_key,omitempty"`
+ Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"`
+ Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"`
+ Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} }
+func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) }
+func (*AllocateIdsRequest) ProtoMessage() {}
+
+func (m *AllocateIdsRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AllocateIdsRequest) GetModelKey() *Reference {
+ if m != nil {
+ return m.ModelKey
+ }
+ return nil
+}
+
+func (m *AllocateIdsRequest) GetSize() int64 {
+ if m != nil && m.Size != nil {
+ return *m.Size
+ }
+ return 0
+}
+
+func (m *AllocateIdsRequest) GetMax() int64 {
+ if m != nil && m.Max != nil {
+ return *m.Max
+ }
+ return 0
+}
+
+func (m *AllocateIdsRequest) GetReserve() []*Reference {
+ if m != nil {
+ return m.Reserve
+ }
+ return nil
+}
+
+type AllocateIdsResponse struct {
+ Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"`
+ End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"`
+ Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} }
+func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) }
+func (*AllocateIdsResponse) ProtoMessage() {}
+
+func (m *AllocateIdsResponse) GetStart() int64 {
+ if m != nil && m.Start != nil {
+ return *m.Start
+ }
+ return 0
+}
+
+func (m *AllocateIdsResponse) GetEnd() int64 {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return 0
+}
+
+func (m *AllocateIdsResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+type CompositeIndices struct {
+ Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompositeIndices) Reset() { *m = CompositeIndices{} }
+func (m *CompositeIndices) String() string { return proto.CompactTextString(m) }
+func (*CompositeIndices) ProtoMessage() {}
+
+func (m *CompositeIndices) GetIndex() []*CompositeIndex {
+ if m != nil {
+ return m.Index
+ }
+ return nil
+}
+
+type AddActionsRequest struct {
+ Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"`
+ Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} }
+func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) }
+func (*AddActionsRequest) ProtoMessage() {}
+
+func (m *AddActionsRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AddActionsRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *AddActionsRequest) GetAction() []*Action {
+ if m != nil {
+ return m.Action
+ }
+ return nil
+}
+
+type AddActionsResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} }
+func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) }
+func (*AddActionsResponse) ProtoMessage() {}
+
+type BeginTransactionRequest struct {
+ Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
+ App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
+ AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,def=0" json:"allow_multiple_eg,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} }
+func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) }
+func (*BeginTransactionRequest) ProtoMessage() {}
+
+const Default_BeginTransactionRequest_AllowMultipleEg bool = false
+
+func (m *BeginTransactionRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *BeginTransactionRequest) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *BeginTransactionRequest) GetAllowMultipleEg() bool {
+ if m != nil && m.AllowMultipleEg != nil {
+ return *m.AllowMultipleEg
+ }
+ return Default_BeginTransactionRequest_AllowMultipleEg
+}
+
+type CommitResponse struct {
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CommitResponse) Reset() { *m = CommitResponse{} }
+func (m *CommitResponse) String() string { return proto.CompactTextString(m) }
+func (*CommitResponse) ProtoMessage() {}
+
+func (m *CommitResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+func (m *CommitResponse) GetVersion() []*CommitResponse_Version {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type CommitResponse_Version struct {
+ RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key" json:"root_entity_key,omitempty"`
+ Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} }
+func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) }
+func (*CommitResponse_Version) ProtoMessage() {}
+
+func (m *CommitResponse_Version) GetRootEntityKey() *Reference {
+ if m != nil {
+ return m.RootEntityKey
+ }
+ return nil
+}
+
+func (m *CommitResponse_Version) GetVersion() int64 {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return 0
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
new file mode 100755
index 0000000..e76f126
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
@@ -0,0 +1,541 @@
+syntax = "proto2";
+option go_package = "datastore";
+
+package appengine;
+
+message Action{}
+
+message PropertyValue {
+ optional int64 int64Value = 1;
+ optional bool booleanValue = 2;
+ optional string stringValue = 3;
+ optional double doubleValue = 4;
+
+ optional group PointValue = 5 {
+ required double x = 6;
+ required double y = 7;
+ }
+
+ optional group UserValue = 8 {
+ required string email = 9;
+ required string auth_domain = 10;
+ optional string nickname = 11;
+ optional string federated_identity = 21;
+ optional string federated_provider = 22;
+ }
+
+ optional group ReferenceValue = 12 {
+ required string app = 13;
+ optional string name_space = 20;
+ repeated group PathElement = 14 {
+ required string type = 15;
+ optional int64 id = 16;
+ optional string name = 17;
+ }
+ }
+}
+
+message Property {
+ enum Meaning {
+ NO_MEANING = 0;
+ BLOB = 14;
+ TEXT = 15;
+ BYTESTRING = 16;
+
+ ATOM_CATEGORY = 1;
+ ATOM_LINK = 2;
+ ATOM_TITLE = 3;
+ ATOM_CONTENT = 4;
+ ATOM_SUMMARY = 5;
+ ATOM_AUTHOR = 6;
+
+ GD_WHEN = 7;
+ GD_EMAIL = 8;
+ GEORSS_POINT = 9;
+ GD_IM = 10;
+
+ GD_PHONENUMBER = 11;
+ GD_POSTALADDRESS = 12;
+
+ GD_RATING = 13;
+
+ BLOBKEY = 17;
+ ENTITY_PROTO = 19;
+
+ INDEX_VALUE = 18;
+ };
+
+ optional Meaning meaning = 1 [default = NO_MEANING];
+ optional string meaning_uri = 2;
+
+ required string name = 3;
+
+ required PropertyValue value = 5;
+
+ required bool multiple = 4;
+
+ optional bool searchable = 6 [default=false];
+
+ enum FtsTokenizationOption {
+ HTML = 1;
+ ATOM = 2;
+ }
+
+ optional FtsTokenizationOption fts_tokenization_option = 8;
+
+ optional string locale = 9 [default = "en"];
+}
+
+message Path {
+ repeated group Element = 1 {
+ required string type = 2;
+ optional int64 id = 3;
+ optional string name = 4;
+ }
+}
+
+message Reference {
+ required string app = 13;
+ optional string name_space = 20;
+ required Path path = 14;
+}
+
+message User {
+ required string email = 1;
+ required string auth_domain = 2;
+ optional string nickname = 3;
+ optional string federated_identity = 6;
+ optional string federated_provider = 7;
+}
+
+message EntityProto {
+ required Reference key = 13;
+ required Path entity_group = 16;
+ optional User owner = 17;
+
+ enum Kind {
+ GD_CONTACT = 1;
+ GD_EVENT = 2;
+ GD_MESSAGE = 3;
+ }
+ optional Kind kind = 4;
+ optional string kind_uri = 5;
+
+ repeated Property property = 14;
+ repeated Property raw_property = 15;
+
+ optional int32 rank = 18;
+}
+
+message CompositeProperty {
+ required int64 index_id = 1;
+ repeated string value = 2;
+}
+
+message Index {
+ required string entity_type = 1;
+ required bool ancestor = 5;
+ repeated group Property = 2 {
+ required string name = 3;
+ enum Direction {
+ ASCENDING = 1;
+ DESCENDING = 2;
+ }
+ optional Direction direction = 4 [default = ASCENDING];
+ }
+}
+
+message CompositeIndex {
+ required string app_id = 1;
+ required int64 id = 2;
+ required Index definition = 3;
+
+ enum State {
+ WRITE_ONLY = 1;
+ READ_WRITE = 2;
+ DELETED = 3;
+ ERROR = 4;
+ }
+ required State state = 4;
+
+ optional bool only_use_if_required = 6 [default = false];
+}
+
+message IndexPostfix {
+ message IndexValue {
+ required string property_name = 1;
+ required PropertyValue value = 2;
+ }
+
+ repeated IndexValue index_value = 1;
+
+ optional Reference key = 2;
+
+ optional bool before = 3 [default=true];
+}
+
+message IndexPosition {
+ optional string key = 1;
+
+ optional bool before = 2 [default=true];
+}
+
+message Snapshot {
+ enum Status {
+ INACTIVE = 0;
+ ACTIVE = 1;
+ }
+
+ required int64 ts = 1;
+}
+
+message InternalHeader {
+ optional string qos = 1;
+}
+
+message Transaction {
+ optional InternalHeader header = 4;
+ required fixed64 handle = 1;
+ required string app = 2;
+ optional bool mark_changes = 3 [default = false];
+}
+
+message Query {
+ optional InternalHeader header = 39;
+
+ required string app = 1;
+ optional string name_space = 29;
+
+ optional string kind = 3;
+ optional Reference ancestor = 17;
+
+ repeated group Filter = 4 {
+ enum Operator {
+ LESS_THAN = 1;
+ LESS_THAN_OR_EQUAL = 2;
+ GREATER_THAN = 3;
+ GREATER_THAN_OR_EQUAL = 4;
+ EQUAL = 5;
+ IN = 6;
+ EXISTS = 7;
+ }
+
+ required Operator op = 6;
+ repeated Property property = 14;
+ }
+
+ optional string search_query = 8;
+
+ repeated group Order = 9 {
+ enum Direction {
+ ASCENDING = 1;
+ DESCENDING = 2;
+ }
+
+ required string property = 10;
+ optional Direction direction = 11 [default = ASCENDING];
+ }
+
+ enum Hint {
+ ORDER_FIRST = 1;
+ ANCESTOR_FIRST = 2;
+ FILTER_FIRST = 3;
+ }
+ optional Hint hint = 18;
+
+ optional int32 count = 23;
+
+ optional int32 offset = 12 [default = 0];
+
+ optional int32 limit = 16;
+
+ optional CompiledCursor compiled_cursor = 30;
+ optional CompiledCursor end_compiled_cursor = 31;
+
+ repeated CompositeIndex composite_index = 19;
+
+ optional bool require_perfect_plan = 20 [default = false];
+
+ optional bool keys_only = 21 [default = false];
+
+ optional Transaction transaction = 22;
+
+ optional bool compile = 25 [default = false];
+
+ optional int64 failover_ms = 26;
+
+ optional bool strong = 32;
+
+ repeated string property_name = 33;
+
+ repeated string group_by_property_name = 34;
+
+ optional bool distinct = 24;
+
+ optional int64 min_safe_time_seconds = 35;
+
+ repeated string safe_replica_name = 36;
+
+ optional bool persist_offset = 37 [default=false];
+}
+
+message CompiledQuery {
+ required group PrimaryScan = 1 {
+ optional string index_name = 2;
+
+ optional string start_key = 3;
+ optional bool start_inclusive = 4;
+ optional string end_key = 5;
+ optional bool end_inclusive = 6;
+
+ repeated string start_postfix_value = 22;
+ repeated string end_postfix_value = 23;
+
+ optional int64 end_unapplied_log_timestamp_us = 19;
+ }
+
+ repeated group MergeJoinScan = 7 {
+ required string index_name = 8;
+
+ repeated string prefix_value = 9;
+
+ optional bool value_prefix = 20 [default=false];
+ }
+
+ optional Index index_def = 21;
+
+ optional int32 offset = 10 [default = 0];
+
+ optional int32 limit = 11;
+
+ required bool keys_only = 12;
+
+ repeated string property_name = 24;
+
+ optional int32 distinct_infix_size = 25;
+
+ optional group EntityFilter = 13 {
+ optional bool distinct = 14 [default=false];
+
+ optional string kind = 17;
+ optional Reference ancestor = 18;
+ }
+}
+
+message CompiledCursor {
+ optional group Position = 2 {
+ optional string start_key = 27;
+
+ repeated group IndexValue = 29 {
+ optional string property = 30;
+ required PropertyValue value = 31;
+ }
+
+ optional Reference key = 32;
+
+ optional bool start_inclusive = 28 [default=true];
+ }
+}
+
+message Cursor {
+ required fixed64 cursor = 1;
+
+ optional string app = 2;
+}
+
+message Error {
+ enum ErrorCode {
+ BAD_REQUEST = 1;
+ CONCURRENT_TRANSACTION = 2;
+ INTERNAL_ERROR = 3;
+ NEED_INDEX = 4;
+ TIMEOUT = 5;
+ PERMISSION_DENIED = 6;
+ BIGTABLE_ERROR = 7;
+ COMMITTED_BUT_STILL_APPLYING = 8;
+ CAPABILITY_DISABLED = 9;
+ TRY_ALTERNATE_BACKEND = 10;
+ SAFE_TIME_TOO_OLD = 11;
+ }
+}
+
+message Cost {
+ optional int32 index_writes = 1;
+ optional int32 index_write_bytes = 2;
+ optional int32 entity_writes = 3;
+ optional int32 entity_write_bytes = 4;
+ optional group CommitCost = 5 {
+ optional int32 requested_entity_puts = 6;
+ optional int32 requested_entity_deletes = 7;
+ };
+ optional int32 approximate_storage_delta = 8;
+ optional int32 id_sequence_updates = 9;
+}
+
+message GetRequest {
+ optional InternalHeader header = 6;
+
+ repeated Reference key = 1;
+ optional Transaction transaction = 2;
+
+ optional int64 failover_ms = 3;
+
+ optional bool strong = 4;
+
+ optional bool allow_deferred = 5 [default=false];
+}
+
+message GetResponse {
+ repeated group Entity = 1 {
+ optional EntityProto entity = 2;
+ optional Reference key = 4;
+
+ optional int64 version = 3;
+ }
+
+ repeated Reference deferred = 5;
+
+ optional bool in_order = 6 [default=true];
+}
+
+message PutRequest {
+ optional InternalHeader header = 11;
+
+ repeated EntityProto entity = 1;
+ optional Transaction transaction = 2;
+ repeated CompositeIndex composite_index = 3;
+
+ optional bool trusted = 4 [default = false];
+
+ optional bool force = 7 [default = false];
+
+ optional bool mark_changes = 8 [default = false];
+ repeated Snapshot snapshot = 9;
+
+ enum AutoIdPolicy {
+ CURRENT = 0;
+ SEQUENTIAL = 1;
+ }
+ optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT];
+}
+
+message PutResponse {
+ repeated Reference key = 1;
+ optional Cost cost = 2;
+ repeated int64 version = 3;
+}
+
+message TouchRequest {
+ optional InternalHeader header = 10;
+
+ repeated Reference key = 1;
+ repeated CompositeIndex composite_index = 2;
+ optional bool force = 3 [default = false];
+ repeated Snapshot snapshot = 9;
+}
+
+message TouchResponse {
+ optional Cost cost = 1;
+}
+
+message DeleteRequest {
+ optional InternalHeader header = 10;
+
+ repeated Reference key = 6;
+ optional Transaction transaction = 5;
+
+ optional bool trusted = 4 [default = false];
+
+ optional bool force = 7 [default = false];
+
+ optional bool mark_changes = 8 [default = false];
+ repeated Snapshot snapshot = 9;
+}
+
+message DeleteResponse {
+ optional Cost cost = 1;
+ repeated int64 version = 3;
+}
+
+message NextRequest {
+ optional InternalHeader header = 5;
+
+ required Cursor cursor = 1;
+ optional int32 count = 2;
+
+ optional int32 offset = 4 [default = 0];
+
+ optional bool compile = 3 [default = false];
+}
+
+message QueryResult {
+ optional Cursor cursor = 1;
+
+ repeated EntityProto result = 2;
+
+ optional int32 skipped_results = 7;
+
+ required bool more_results = 3;
+
+ optional bool keys_only = 4;
+
+ optional bool index_only = 9;
+
+ optional bool small_ops = 10;
+
+ optional CompiledQuery compiled_query = 5;
+
+ optional CompiledCursor compiled_cursor = 6;
+
+ repeated CompositeIndex index = 8;
+
+ repeated int64 version = 11;
+}
+
+message AllocateIdsRequest {
+ optional InternalHeader header = 4;
+
+ optional Reference model_key = 1;
+
+ optional int64 size = 2;
+
+ optional int64 max = 3;
+
+ repeated Reference reserve = 5;
+}
+
+message AllocateIdsResponse {
+ required int64 start = 1;
+ required int64 end = 2;
+ optional Cost cost = 3;
+}
+
+message CompositeIndices {
+ repeated CompositeIndex index = 1;
+}
+
+message AddActionsRequest {
+ optional InternalHeader header = 3;
+
+ required Transaction transaction = 1;
+ repeated Action action = 2;
+}
+
+message AddActionsResponse {
+}
+
+message BeginTransactionRequest {
+ optional InternalHeader header = 3;
+
+ required string app = 1;
+ optional bool allow_multiple_eg = 2 [default = false];
+}
+
+message CommitResponse {
+ optional Cost cost = 1;
+
+ repeated group Version = 3 {
+ required Reference root_entity_key = 4;
+ required int64 version = 5;
+ }
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go
new file mode 100644
index 0000000..d538701
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity.go
@@ -0,0 +1,14 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import netcontext "golang.org/x/net/context"
+
+// These functions are implementations of the wrapper functions
+// in ../appengine/identity.go. See that file for commentary.
+
+func AppID(c netcontext.Context) string {
+ return appID(FullyQualifiedAppID(c))
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go
new file mode 100644
index 0000000..e6b9227
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity_classic.go
@@ -0,0 +1,27 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import (
+ "appengine"
+
+ netcontext "golang.org/x/net/context"
+)
+
+func DefaultVersionHostname(ctx netcontext.Context) string {
+ return appengine.DefaultVersionHostname(fromContext(ctx))
+}
+
+func RequestID(ctx netcontext.Context) string { return appengine.RequestID(fromContext(ctx)) }
+func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() }
+func ServerSoftware() string { return appengine.ServerSoftware() }
+func ModuleName(ctx netcontext.Context) string { return appengine.ModuleName(fromContext(ctx)) }
+func VersionID(ctx netcontext.Context) string { return appengine.VersionID(fromContext(ctx)) }
+func InstanceID() string { return appengine.InstanceID() }
+func IsDevAppServer() bool { return appengine.IsDevAppServer() }
+
+func fullyQualifiedAppID(ctx netcontext.Context) string { return fromContext(ctx).FullyQualifiedAppID() }
diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go
new file mode 100644
index 0000000..ebe68b7
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity_vm.go
@@ -0,0 +1,97 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+ "net/http"
+ "os"
+
+ netcontext "golang.org/x/net/context"
+)
+
+// These functions are implementations of the wrapper functions
+// in ../appengine/identity.go. See that file for commentary.
+
+const (
+ hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname"
+ hRequestLogId = "X-AppEngine-Request-Log-Id"
+ hDatacenter = "X-AppEngine-Datacenter"
+)
+
+func ctxHeaders(ctx netcontext.Context) http.Header {
+ return fromContext(ctx).Request().Header
+}
+
+func DefaultVersionHostname(ctx netcontext.Context) string {
+ return ctxHeaders(ctx).Get(hDefaultVersionHostname)
+}
+
+func RequestID(ctx netcontext.Context) string {
+ return ctxHeaders(ctx).Get(hRequestLogId)
+}
+
+func Datacenter(ctx netcontext.Context) string {
+ return ctxHeaders(ctx).Get(hDatacenter)
+}
+
+func ServerSoftware() string {
+ // TODO(dsymonds): Remove fallback when we've verified this.
+ if s := os.Getenv("SERVER_SOFTWARE"); s != "" {
+ return s
+ }
+ return "Google App Engine/1.x.x"
+}
+
+// TODO(dsymonds): Remove the metadata fetches.
+
+func ModuleName(_ netcontext.Context) string {
+ if s := os.Getenv("GAE_MODULE_NAME"); s != "" {
+ return s
+ }
+ return string(mustGetMetadata("instance/attributes/gae_backend_name"))
+}
+
+func VersionID(_ netcontext.Context) string {
+ if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" {
+ return s1 + "." + s2
+ }
+ return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version"))
+}
+
+func InstanceID() string {
+ if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" {
+ return s
+ }
+ return string(mustGetMetadata("instance/attributes/gae_backend_instance"))
+}
+
+func partitionlessAppID() string {
+ // gae_project has everything except the partition prefix.
+ appID := os.Getenv("GAE_LONG_APP_ID")
+ if appID == "" {
+ appID = string(mustGetMetadata("instance/attributes/gae_project"))
+ }
+ return appID
+}
+
+func fullyQualifiedAppID(_ netcontext.Context) string {
+ appID := partitionlessAppID()
+
+ part := os.Getenv("GAE_PARTITION")
+ if part == "" {
+ part = string(mustGetMetadata("instance/attributes/gae_partition"))
+ }
+
+ if part != "" {
+ appID = part + "~" + appID
+ }
+ return appID
+}
+
+func IsDevAppServer() bool {
+ return os.Getenv("RUN_WITH_DEVAPPSERVER") != ""
+}
diff --git a/vendor/google.golang.org/appengine/internal/image/images_service.pb.go b/vendor/google.golang.org/appengine/internal/image/images_service.pb.go
new file mode 100644
index 0000000..ba7c722
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/image/images_service.pb.go
@@ -0,0 +1,845 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/image/images_service.proto
+// DO NOT EDIT!
+
+/*
+Package image is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/image/images_service.proto
+
+It has these top-level messages:
+ ImagesServiceError
+ ImagesServiceTransform
+ Transform
+ ImageData
+ InputSettings
+ OutputSettings
+ ImagesTransformRequest
+ ImagesTransformResponse
+ CompositeImageOptions
+ ImagesCanvas
+ ImagesCompositeRequest
+ ImagesCompositeResponse
+ ImagesHistogramRequest
+ ImagesHistogram
+ ImagesHistogramResponse
+ ImagesGetUrlBaseRequest
+ ImagesGetUrlBaseResponse
+ ImagesDeleteUrlBaseRequest
+ ImagesDeleteUrlBaseResponse
+*/
+package image
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type ImagesServiceError_ErrorCode int32
+
+const (
+ ImagesServiceError_UNSPECIFIED_ERROR ImagesServiceError_ErrorCode = 1
+ ImagesServiceError_BAD_TRANSFORM_DATA ImagesServiceError_ErrorCode = 2
+ ImagesServiceError_NOT_IMAGE ImagesServiceError_ErrorCode = 3
+ ImagesServiceError_BAD_IMAGE_DATA ImagesServiceError_ErrorCode = 4
+ ImagesServiceError_IMAGE_TOO_LARGE ImagesServiceError_ErrorCode = 5
+ ImagesServiceError_INVALID_BLOB_KEY ImagesServiceError_ErrorCode = 6
+ ImagesServiceError_ACCESS_DENIED ImagesServiceError_ErrorCode = 7
+ ImagesServiceError_OBJECT_NOT_FOUND ImagesServiceError_ErrorCode = 8
+)
+
+var ImagesServiceError_ErrorCode_name = map[int32]string{
+ 1: "UNSPECIFIED_ERROR",
+ 2: "BAD_TRANSFORM_DATA",
+ 3: "NOT_IMAGE",
+ 4: "BAD_IMAGE_DATA",
+ 5: "IMAGE_TOO_LARGE",
+ 6: "INVALID_BLOB_KEY",
+ 7: "ACCESS_DENIED",
+ 8: "OBJECT_NOT_FOUND",
+}
+var ImagesServiceError_ErrorCode_value = map[string]int32{
+ "UNSPECIFIED_ERROR": 1,
+ "BAD_TRANSFORM_DATA": 2,
+ "NOT_IMAGE": 3,
+ "BAD_IMAGE_DATA": 4,
+ "IMAGE_TOO_LARGE": 5,
+ "INVALID_BLOB_KEY": 6,
+ "ACCESS_DENIED": 7,
+ "OBJECT_NOT_FOUND": 8,
+}
+
+func (x ImagesServiceError_ErrorCode) Enum() *ImagesServiceError_ErrorCode {
+ p := new(ImagesServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x ImagesServiceError_ErrorCode) String() string {
+ return proto.EnumName(ImagesServiceError_ErrorCode_name, int32(x))
+}
+func (x *ImagesServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ImagesServiceError_ErrorCode_value, data, "ImagesServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = ImagesServiceError_ErrorCode(value)
+ return nil
+}
+
+type ImagesServiceTransform_Type int32
+
+const (
+ ImagesServiceTransform_RESIZE ImagesServiceTransform_Type = 1
+ ImagesServiceTransform_ROTATE ImagesServiceTransform_Type = 2
+ ImagesServiceTransform_HORIZONTAL_FLIP ImagesServiceTransform_Type = 3
+ ImagesServiceTransform_VERTICAL_FLIP ImagesServiceTransform_Type = 4
+ ImagesServiceTransform_CROP ImagesServiceTransform_Type = 5
+ ImagesServiceTransform_IM_FEELING_LUCKY ImagesServiceTransform_Type = 6
+)
+
+var ImagesServiceTransform_Type_name = map[int32]string{
+ 1: "RESIZE",
+ 2: "ROTATE",
+ 3: "HORIZONTAL_FLIP",
+ 4: "VERTICAL_FLIP",
+ 5: "CROP",
+ 6: "IM_FEELING_LUCKY",
+}
+var ImagesServiceTransform_Type_value = map[string]int32{
+ "RESIZE": 1,
+ "ROTATE": 2,
+ "HORIZONTAL_FLIP": 3,
+ "VERTICAL_FLIP": 4,
+ "CROP": 5,
+ "IM_FEELING_LUCKY": 6,
+}
+
+func (x ImagesServiceTransform_Type) Enum() *ImagesServiceTransform_Type {
+ p := new(ImagesServiceTransform_Type)
+ *p = x
+ return p
+}
+func (x ImagesServiceTransform_Type) String() string {
+ return proto.EnumName(ImagesServiceTransform_Type_name, int32(x))
+}
+func (x *ImagesServiceTransform_Type) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ImagesServiceTransform_Type_value, data, "ImagesServiceTransform_Type")
+ if err != nil {
+ return err
+ }
+ *x = ImagesServiceTransform_Type(value)
+ return nil
+}
+
+type InputSettings_ORIENTATION_CORRECTION_TYPE int32
+
+const (
+ InputSettings_UNCHANGED_ORIENTATION InputSettings_ORIENTATION_CORRECTION_TYPE = 0
+ InputSettings_CORRECT_ORIENTATION InputSettings_ORIENTATION_CORRECTION_TYPE = 1
+)
+
+var InputSettings_ORIENTATION_CORRECTION_TYPE_name = map[int32]string{
+ 0: "UNCHANGED_ORIENTATION",
+ 1: "CORRECT_ORIENTATION",
+}
+var InputSettings_ORIENTATION_CORRECTION_TYPE_value = map[string]int32{
+ "UNCHANGED_ORIENTATION": 0,
+ "CORRECT_ORIENTATION": 1,
+}
+
+func (x InputSettings_ORIENTATION_CORRECTION_TYPE) Enum() *InputSettings_ORIENTATION_CORRECTION_TYPE {
+ p := new(InputSettings_ORIENTATION_CORRECTION_TYPE)
+ *p = x
+ return p
+}
+func (x InputSettings_ORIENTATION_CORRECTION_TYPE) String() string {
+ return proto.EnumName(InputSettings_ORIENTATION_CORRECTION_TYPE_name, int32(x))
+}
+func (x *InputSettings_ORIENTATION_CORRECTION_TYPE) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(InputSettings_ORIENTATION_CORRECTION_TYPE_value, data, "InputSettings_ORIENTATION_CORRECTION_TYPE")
+ if err != nil {
+ return err
+ }
+ *x = InputSettings_ORIENTATION_CORRECTION_TYPE(value)
+ return nil
+}
+
+type OutputSettings_MIME_TYPE int32
+
+const (
+ OutputSettings_PNG OutputSettings_MIME_TYPE = 0
+ OutputSettings_JPEG OutputSettings_MIME_TYPE = 1
+ OutputSettings_WEBP OutputSettings_MIME_TYPE = 2
+)
+
+var OutputSettings_MIME_TYPE_name = map[int32]string{
+ 0: "PNG",
+ 1: "JPEG",
+ 2: "WEBP",
+}
+var OutputSettings_MIME_TYPE_value = map[string]int32{
+ "PNG": 0,
+ "JPEG": 1,
+ "WEBP": 2,
+}
+
+func (x OutputSettings_MIME_TYPE) Enum() *OutputSettings_MIME_TYPE {
+ p := new(OutputSettings_MIME_TYPE)
+ *p = x
+ return p
+}
+func (x OutputSettings_MIME_TYPE) String() string {
+ return proto.EnumName(OutputSettings_MIME_TYPE_name, int32(x))
+}
+func (x *OutputSettings_MIME_TYPE) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(OutputSettings_MIME_TYPE_value, data, "OutputSettings_MIME_TYPE")
+ if err != nil {
+ return err
+ }
+ *x = OutputSettings_MIME_TYPE(value)
+ return nil
+}
+
+type CompositeImageOptions_ANCHOR int32
+
+const (
+ CompositeImageOptions_TOP_LEFT CompositeImageOptions_ANCHOR = 0
+ CompositeImageOptions_TOP CompositeImageOptions_ANCHOR = 1
+ CompositeImageOptions_TOP_RIGHT CompositeImageOptions_ANCHOR = 2
+ CompositeImageOptions_LEFT CompositeImageOptions_ANCHOR = 3
+ CompositeImageOptions_CENTER CompositeImageOptions_ANCHOR = 4
+ CompositeImageOptions_RIGHT CompositeImageOptions_ANCHOR = 5
+ CompositeImageOptions_BOTTOM_LEFT CompositeImageOptions_ANCHOR = 6
+ CompositeImageOptions_BOTTOM CompositeImageOptions_ANCHOR = 7
+ CompositeImageOptions_BOTTOM_RIGHT CompositeImageOptions_ANCHOR = 8
+)
+
+var CompositeImageOptions_ANCHOR_name = map[int32]string{
+ 0: "TOP_LEFT",
+ 1: "TOP",
+ 2: "TOP_RIGHT",
+ 3: "LEFT",
+ 4: "CENTER",
+ 5: "RIGHT",
+ 6: "BOTTOM_LEFT",
+ 7: "BOTTOM",
+ 8: "BOTTOM_RIGHT",
+}
+var CompositeImageOptions_ANCHOR_value = map[string]int32{
+ "TOP_LEFT": 0,
+ "TOP": 1,
+ "TOP_RIGHT": 2,
+ "LEFT": 3,
+ "CENTER": 4,
+ "RIGHT": 5,
+ "BOTTOM_LEFT": 6,
+ "BOTTOM": 7,
+ "BOTTOM_RIGHT": 8,
+}
+
+func (x CompositeImageOptions_ANCHOR) Enum() *CompositeImageOptions_ANCHOR {
+ p := new(CompositeImageOptions_ANCHOR)
+ *p = x
+ return p
+}
+func (x CompositeImageOptions_ANCHOR) String() string {
+ return proto.EnumName(CompositeImageOptions_ANCHOR_name, int32(x))
+}
+func (x *CompositeImageOptions_ANCHOR) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CompositeImageOptions_ANCHOR_value, data, "CompositeImageOptions_ANCHOR")
+ if err != nil {
+ return err
+ }
+ *x = CompositeImageOptions_ANCHOR(value)
+ return nil
+}
+
+type ImagesServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesServiceError) Reset() { *m = ImagesServiceError{} }
+func (m *ImagesServiceError) String() string { return proto.CompactTextString(m) }
+func (*ImagesServiceError) ProtoMessage() {}
+
+type ImagesServiceTransform struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesServiceTransform) Reset() { *m = ImagesServiceTransform{} }
+func (m *ImagesServiceTransform) String() string { return proto.CompactTextString(m) }
+func (*ImagesServiceTransform) ProtoMessage() {}
+
+type Transform struct {
+ Width *int32 `protobuf:"varint,1,opt,name=width" json:"width,omitempty"`
+ Height *int32 `protobuf:"varint,2,opt,name=height" json:"height,omitempty"`
+ CropToFit *bool `protobuf:"varint,11,opt,name=crop_to_fit,def=0" json:"crop_to_fit,omitempty"`
+ CropOffsetX *float32 `protobuf:"fixed32,12,opt,name=crop_offset_x,def=0.5" json:"crop_offset_x,omitempty"`
+ CropOffsetY *float32 `protobuf:"fixed32,13,opt,name=crop_offset_y,def=0.5" json:"crop_offset_y,omitempty"`
+ Rotate *int32 `protobuf:"varint,3,opt,name=rotate,def=0" json:"rotate,omitempty"`
+ HorizontalFlip *bool `protobuf:"varint,4,opt,name=horizontal_flip,def=0" json:"horizontal_flip,omitempty"`
+ VerticalFlip *bool `protobuf:"varint,5,opt,name=vertical_flip,def=0" json:"vertical_flip,omitempty"`
+ CropLeftX *float32 `protobuf:"fixed32,6,opt,name=crop_left_x,def=0" json:"crop_left_x,omitempty"`
+ CropTopY *float32 `protobuf:"fixed32,7,opt,name=crop_top_y,def=0" json:"crop_top_y,omitempty"`
+ CropRightX *float32 `protobuf:"fixed32,8,opt,name=crop_right_x,def=1" json:"crop_right_x,omitempty"`
+ CropBottomY *float32 `protobuf:"fixed32,9,opt,name=crop_bottom_y,def=1" json:"crop_bottom_y,omitempty"`
+ Autolevels *bool `protobuf:"varint,10,opt,name=autolevels,def=0" json:"autolevels,omitempty"`
+ AllowStretch *bool `protobuf:"varint,14,opt,name=allow_stretch,def=0" json:"allow_stretch,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Transform) Reset() { *m = Transform{} }
+func (m *Transform) String() string { return proto.CompactTextString(m) }
+func (*Transform) ProtoMessage() {}
+
+const Default_Transform_CropToFit bool = false
+const Default_Transform_CropOffsetX float32 = 0.5
+const Default_Transform_CropOffsetY float32 = 0.5
+const Default_Transform_Rotate int32 = 0
+const Default_Transform_HorizontalFlip bool = false
+const Default_Transform_VerticalFlip bool = false
+const Default_Transform_CropLeftX float32 = 0
+const Default_Transform_CropTopY float32 = 0
+const Default_Transform_CropRightX float32 = 1
+const Default_Transform_CropBottomY float32 = 1
+const Default_Transform_Autolevels bool = false
+const Default_Transform_AllowStretch bool = false
+
+func (m *Transform) GetWidth() int32 {
+ if m != nil && m.Width != nil {
+ return *m.Width
+ }
+ return 0
+}
+
+func (m *Transform) GetHeight() int32 {
+ if m != nil && m.Height != nil {
+ return *m.Height
+ }
+ return 0
+}
+
+func (m *Transform) GetCropToFit() bool {
+ if m != nil && m.CropToFit != nil {
+ return *m.CropToFit
+ }
+ return Default_Transform_CropToFit
+}
+
+func (m *Transform) GetCropOffsetX() float32 {
+ if m != nil && m.CropOffsetX != nil {
+ return *m.CropOffsetX
+ }
+ return Default_Transform_CropOffsetX
+}
+
+func (m *Transform) GetCropOffsetY() float32 {
+ if m != nil && m.CropOffsetY != nil {
+ return *m.CropOffsetY
+ }
+ return Default_Transform_CropOffsetY
+}
+
+func (m *Transform) GetRotate() int32 {
+ if m != nil && m.Rotate != nil {
+ return *m.Rotate
+ }
+ return Default_Transform_Rotate
+}
+
+func (m *Transform) GetHorizontalFlip() bool {
+ if m != nil && m.HorizontalFlip != nil {
+ return *m.HorizontalFlip
+ }
+ return Default_Transform_HorizontalFlip
+}
+
+func (m *Transform) GetVerticalFlip() bool {
+ if m != nil && m.VerticalFlip != nil {
+ return *m.VerticalFlip
+ }
+ return Default_Transform_VerticalFlip
+}
+
+func (m *Transform) GetCropLeftX() float32 {
+ if m != nil && m.CropLeftX != nil {
+ return *m.CropLeftX
+ }
+ return Default_Transform_CropLeftX
+}
+
+func (m *Transform) GetCropTopY() float32 {
+ if m != nil && m.CropTopY != nil {
+ return *m.CropTopY
+ }
+ return Default_Transform_CropTopY
+}
+
+func (m *Transform) GetCropRightX() float32 {
+ if m != nil && m.CropRightX != nil {
+ return *m.CropRightX
+ }
+ return Default_Transform_CropRightX
+}
+
+func (m *Transform) GetCropBottomY() float32 {
+ if m != nil && m.CropBottomY != nil {
+ return *m.CropBottomY
+ }
+ return Default_Transform_CropBottomY
+}
+
+func (m *Transform) GetAutolevels() bool {
+ if m != nil && m.Autolevels != nil {
+ return *m.Autolevels
+ }
+ return Default_Transform_Autolevels
+}
+
+func (m *Transform) GetAllowStretch() bool {
+ if m != nil && m.AllowStretch != nil {
+ return *m.AllowStretch
+ }
+ return Default_Transform_AllowStretch
+}
+
+type ImageData struct {
+ Content []byte `protobuf:"bytes,1,req,name=content" json:"content,omitempty"`
+ BlobKey *string `protobuf:"bytes,2,opt,name=blob_key" json:"blob_key,omitempty"`
+ Width *int32 `protobuf:"varint,3,opt,name=width" json:"width,omitempty"`
+ Height *int32 `protobuf:"varint,4,opt,name=height" json:"height,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImageData) Reset() { *m = ImageData{} }
+func (m *ImageData) String() string { return proto.CompactTextString(m) }
+func (*ImageData) ProtoMessage() {}
+
+func (m *ImageData) GetContent() []byte {
+ if m != nil {
+ return m.Content
+ }
+ return nil
+}
+
+func (m *ImageData) GetBlobKey() string {
+ if m != nil && m.BlobKey != nil {
+ return *m.BlobKey
+ }
+ return ""
+}
+
+func (m *ImageData) GetWidth() int32 {
+ if m != nil && m.Width != nil {
+ return *m.Width
+ }
+ return 0
+}
+
+func (m *ImageData) GetHeight() int32 {
+ if m != nil && m.Height != nil {
+ return *m.Height
+ }
+ return 0
+}
+
+type InputSettings struct {
+ CorrectExifOrientation *InputSettings_ORIENTATION_CORRECTION_TYPE `protobuf:"varint,1,opt,name=correct_exif_orientation,enum=appengine.InputSettings_ORIENTATION_CORRECTION_TYPE,def=0" json:"correct_exif_orientation,omitempty"`
+ ParseMetadata *bool `protobuf:"varint,2,opt,name=parse_metadata,def=0" json:"parse_metadata,omitempty"`
+ TransparentSubstitutionRgb *int32 `protobuf:"varint,3,opt,name=transparent_substitution_rgb" json:"transparent_substitution_rgb,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *InputSettings) Reset() { *m = InputSettings{} }
+func (m *InputSettings) String() string { return proto.CompactTextString(m) }
+func (*InputSettings) ProtoMessage() {}
+
+const Default_InputSettings_CorrectExifOrientation InputSettings_ORIENTATION_CORRECTION_TYPE = InputSettings_UNCHANGED_ORIENTATION
+const Default_InputSettings_ParseMetadata bool = false
+
+func (m *InputSettings) GetCorrectExifOrientation() InputSettings_ORIENTATION_CORRECTION_TYPE {
+ if m != nil && m.CorrectExifOrientation != nil {
+ return *m.CorrectExifOrientation
+ }
+ return Default_InputSettings_CorrectExifOrientation
+}
+
+func (m *InputSettings) GetParseMetadata() bool {
+ if m != nil && m.ParseMetadata != nil {
+ return *m.ParseMetadata
+ }
+ return Default_InputSettings_ParseMetadata
+}
+
+func (m *InputSettings) GetTransparentSubstitutionRgb() int32 {
+ if m != nil && m.TransparentSubstitutionRgb != nil {
+ return *m.TransparentSubstitutionRgb
+ }
+ return 0
+}
+
+type OutputSettings struct {
+ MimeType *OutputSettings_MIME_TYPE `protobuf:"varint,1,opt,name=mime_type,enum=appengine.OutputSettings_MIME_TYPE,def=0" json:"mime_type,omitempty"`
+ Quality *int32 `protobuf:"varint,2,opt,name=quality" json:"quality,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OutputSettings) Reset() { *m = OutputSettings{} }
+func (m *OutputSettings) String() string { return proto.CompactTextString(m) }
+func (*OutputSettings) ProtoMessage() {}
+
+const Default_OutputSettings_MimeType OutputSettings_MIME_TYPE = OutputSettings_PNG
+
+func (m *OutputSettings) GetMimeType() OutputSettings_MIME_TYPE {
+ if m != nil && m.MimeType != nil {
+ return *m.MimeType
+ }
+ return Default_OutputSettings_MimeType
+}
+
+func (m *OutputSettings) GetQuality() int32 {
+ if m != nil && m.Quality != nil {
+ return *m.Quality
+ }
+ return 0
+}
+
+type ImagesTransformRequest struct {
+ Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"`
+ Transform []*Transform `protobuf:"bytes,2,rep,name=transform" json:"transform,omitempty"`
+ Output *OutputSettings `protobuf:"bytes,3,req,name=output" json:"output,omitempty"`
+ Input *InputSettings `protobuf:"bytes,4,opt,name=input" json:"input,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesTransformRequest) Reset() { *m = ImagesTransformRequest{} }
+func (m *ImagesTransformRequest) String() string { return proto.CompactTextString(m) }
+func (*ImagesTransformRequest) ProtoMessage() {}
+
+func (m *ImagesTransformRequest) GetImage() *ImageData {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+func (m *ImagesTransformRequest) GetTransform() []*Transform {
+ if m != nil {
+ return m.Transform
+ }
+ return nil
+}
+
+func (m *ImagesTransformRequest) GetOutput() *OutputSettings {
+ if m != nil {
+ return m.Output
+ }
+ return nil
+}
+
+func (m *ImagesTransformRequest) GetInput() *InputSettings {
+ if m != nil {
+ return m.Input
+ }
+ return nil
+}
+
+type ImagesTransformResponse struct {
+ Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"`
+ SourceMetadata *string `protobuf:"bytes,2,opt,name=source_metadata" json:"source_metadata,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesTransformResponse) Reset() { *m = ImagesTransformResponse{} }
+func (m *ImagesTransformResponse) String() string { return proto.CompactTextString(m) }
+func (*ImagesTransformResponse) ProtoMessage() {}
+
+func (m *ImagesTransformResponse) GetImage() *ImageData {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+func (m *ImagesTransformResponse) GetSourceMetadata() string {
+ if m != nil && m.SourceMetadata != nil {
+ return *m.SourceMetadata
+ }
+ return ""
+}
+
+type CompositeImageOptions struct {
+ SourceIndex *int32 `protobuf:"varint,1,req,name=source_index" json:"source_index,omitempty"`
+ XOffset *int32 `protobuf:"varint,2,req,name=x_offset" json:"x_offset,omitempty"`
+ YOffset *int32 `protobuf:"varint,3,req,name=y_offset" json:"y_offset,omitempty"`
+ Opacity *float32 `protobuf:"fixed32,4,req,name=opacity" json:"opacity,omitempty"`
+ Anchor *CompositeImageOptions_ANCHOR `protobuf:"varint,5,req,name=anchor,enum=appengine.CompositeImageOptions_ANCHOR" json:"anchor,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompositeImageOptions) Reset() { *m = CompositeImageOptions{} }
+func (m *CompositeImageOptions) String() string { return proto.CompactTextString(m) }
+func (*CompositeImageOptions) ProtoMessage() {}
+
+func (m *CompositeImageOptions) GetSourceIndex() int32 {
+ if m != nil && m.SourceIndex != nil {
+ return *m.SourceIndex
+ }
+ return 0
+}
+
+func (m *CompositeImageOptions) GetXOffset() int32 {
+ if m != nil && m.XOffset != nil {
+ return *m.XOffset
+ }
+ return 0
+}
+
+func (m *CompositeImageOptions) GetYOffset() int32 {
+ if m != nil && m.YOffset != nil {
+ return *m.YOffset
+ }
+ return 0
+}
+
+func (m *CompositeImageOptions) GetOpacity() float32 {
+ if m != nil && m.Opacity != nil {
+ return *m.Opacity
+ }
+ return 0
+}
+
+func (m *CompositeImageOptions) GetAnchor() CompositeImageOptions_ANCHOR {
+ if m != nil && m.Anchor != nil {
+ return *m.Anchor
+ }
+ return CompositeImageOptions_TOP_LEFT
+}
+
+type ImagesCanvas struct {
+ Width *int32 `protobuf:"varint,1,req,name=width" json:"width,omitempty"`
+ Height *int32 `protobuf:"varint,2,req,name=height" json:"height,omitempty"`
+ Output *OutputSettings `protobuf:"bytes,3,req,name=output" json:"output,omitempty"`
+ Color *int32 `protobuf:"varint,4,opt,name=color,def=-1" json:"color,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesCanvas) Reset() { *m = ImagesCanvas{} }
+func (m *ImagesCanvas) String() string { return proto.CompactTextString(m) }
+func (*ImagesCanvas) ProtoMessage() {}
+
+const Default_ImagesCanvas_Color int32 = -1
+
+func (m *ImagesCanvas) GetWidth() int32 {
+ if m != nil && m.Width != nil {
+ return *m.Width
+ }
+ return 0
+}
+
+func (m *ImagesCanvas) GetHeight() int32 {
+ if m != nil && m.Height != nil {
+ return *m.Height
+ }
+ return 0
+}
+
+func (m *ImagesCanvas) GetOutput() *OutputSettings {
+ if m != nil {
+ return m.Output
+ }
+ return nil
+}
+
+func (m *ImagesCanvas) GetColor() int32 {
+ if m != nil && m.Color != nil {
+ return *m.Color
+ }
+ return Default_ImagesCanvas_Color
+}
+
+type ImagesCompositeRequest struct {
+ Image []*ImageData `protobuf:"bytes,1,rep,name=image" json:"image,omitempty"`
+ Options []*CompositeImageOptions `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"`
+ Canvas *ImagesCanvas `protobuf:"bytes,3,req,name=canvas" json:"canvas,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesCompositeRequest) Reset() { *m = ImagesCompositeRequest{} }
+func (m *ImagesCompositeRequest) String() string { return proto.CompactTextString(m) }
+func (*ImagesCompositeRequest) ProtoMessage() {}
+
+func (m *ImagesCompositeRequest) GetImage() []*ImageData {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+func (m *ImagesCompositeRequest) GetOptions() []*CompositeImageOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+func (m *ImagesCompositeRequest) GetCanvas() *ImagesCanvas {
+ if m != nil {
+ return m.Canvas
+ }
+ return nil
+}
+
+type ImagesCompositeResponse struct {
+ Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesCompositeResponse) Reset() { *m = ImagesCompositeResponse{} }
+func (m *ImagesCompositeResponse) String() string { return proto.CompactTextString(m) }
+func (*ImagesCompositeResponse) ProtoMessage() {}
+
+func (m *ImagesCompositeResponse) GetImage() *ImageData {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+type ImagesHistogramRequest struct {
+ Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesHistogramRequest) Reset() { *m = ImagesHistogramRequest{} }
+func (m *ImagesHistogramRequest) String() string { return proto.CompactTextString(m) }
+func (*ImagesHistogramRequest) ProtoMessage() {}
+
+func (m *ImagesHistogramRequest) GetImage() *ImageData {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+type ImagesHistogram struct {
+ Red []int32 `protobuf:"varint,1,rep,name=red" json:"red,omitempty"`
+ Green []int32 `protobuf:"varint,2,rep,name=green" json:"green,omitempty"`
+ Blue []int32 `protobuf:"varint,3,rep,name=blue" json:"blue,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesHistogram) Reset() { *m = ImagesHistogram{} }
+func (m *ImagesHistogram) String() string { return proto.CompactTextString(m) }
+func (*ImagesHistogram) ProtoMessage() {}
+
+func (m *ImagesHistogram) GetRed() []int32 {
+ if m != nil {
+ return m.Red
+ }
+ return nil
+}
+
+func (m *ImagesHistogram) GetGreen() []int32 {
+ if m != nil {
+ return m.Green
+ }
+ return nil
+}
+
+func (m *ImagesHistogram) GetBlue() []int32 {
+ if m != nil {
+ return m.Blue
+ }
+ return nil
+}
+
+type ImagesHistogramResponse struct {
+ Histogram *ImagesHistogram `protobuf:"bytes,1,req,name=histogram" json:"histogram,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesHistogramResponse) Reset() { *m = ImagesHistogramResponse{} }
+func (m *ImagesHistogramResponse) String() string { return proto.CompactTextString(m) }
+func (*ImagesHistogramResponse) ProtoMessage() {}
+
+func (m *ImagesHistogramResponse) GetHistogram() *ImagesHistogram {
+ if m != nil {
+ return m.Histogram
+ }
+ return nil
+}
+
+type ImagesGetUrlBaseRequest struct {
+ BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"`
+ CreateSecureUrl *bool `protobuf:"varint,2,opt,name=create_secure_url,def=0" json:"create_secure_url,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesGetUrlBaseRequest) Reset() { *m = ImagesGetUrlBaseRequest{} }
+func (m *ImagesGetUrlBaseRequest) String() string { return proto.CompactTextString(m) }
+func (*ImagesGetUrlBaseRequest) ProtoMessage() {}
+
+const Default_ImagesGetUrlBaseRequest_CreateSecureUrl bool = false
+
+func (m *ImagesGetUrlBaseRequest) GetBlobKey() string {
+ if m != nil && m.BlobKey != nil {
+ return *m.BlobKey
+ }
+ return ""
+}
+
+func (m *ImagesGetUrlBaseRequest) GetCreateSecureUrl() bool {
+ if m != nil && m.CreateSecureUrl != nil {
+ return *m.CreateSecureUrl
+ }
+ return Default_ImagesGetUrlBaseRequest_CreateSecureUrl
+}
+
+type ImagesGetUrlBaseResponse struct {
+ Url *string `protobuf:"bytes,1,req,name=url" json:"url,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesGetUrlBaseResponse) Reset() { *m = ImagesGetUrlBaseResponse{} }
+func (m *ImagesGetUrlBaseResponse) String() string { return proto.CompactTextString(m) }
+func (*ImagesGetUrlBaseResponse) ProtoMessage() {}
+
+func (m *ImagesGetUrlBaseResponse) GetUrl() string {
+ if m != nil && m.Url != nil {
+ return *m.Url
+ }
+ return ""
+}
+
+type ImagesDeleteUrlBaseRequest struct {
+ BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesDeleteUrlBaseRequest) Reset() { *m = ImagesDeleteUrlBaseRequest{} }
+func (m *ImagesDeleteUrlBaseRequest) String() string { return proto.CompactTextString(m) }
+func (*ImagesDeleteUrlBaseRequest) ProtoMessage() {}
+
+func (m *ImagesDeleteUrlBaseRequest) GetBlobKey() string {
+ if m != nil && m.BlobKey != nil {
+ return *m.BlobKey
+ }
+ return ""
+}
+
+type ImagesDeleteUrlBaseResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesDeleteUrlBaseResponse) Reset() { *m = ImagesDeleteUrlBaseResponse{} }
+func (m *ImagesDeleteUrlBaseResponse) String() string { return proto.CompactTextString(m) }
+func (*ImagesDeleteUrlBaseResponse) ProtoMessage() {}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/image/images_service.proto b/vendor/google.golang.org/appengine/internal/image/images_service.proto
new file mode 100644
index 0000000..f0d2ed5
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/image/images_service.proto
@@ -0,0 +1,162 @@
+syntax = "proto2";
+option go_package = "image";
+
+package appengine;
+
+message ImagesServiceError {
+ enum ErrorCode {
+ UNSPECIFIED_ERROR = 1;
+ BAD_TRANSFORM_DATA = 2;
+ NOT_IMAGE = 3;
+ BAD_IMAGE_DATA = 4;
+ IMAGE_TOO_LARGE = 5;
+ INVALID_BLOB_KEY = 6;
+ ACCESS_DENIED = 7;
+ OBJECT_NOT_FOUND = 8;
+ }
+}
+
+message ImagesServiceTransform {
+ enum Type {
+ RESIZE = 1;
+ ROTATE = 2;
+ HORIZONTAL_FLIP = 3;
+ VERTICAL_FLIP = 4;
+ CROP = 5;
+ IM_FEELING_LUCKY = 6;
+ }
+}
+
+message Transform {
+ optional int32 width = 1;
+ optional int32 height = 2;
+ optional bool crop_to_fit = 11 [default = false];
+ optional float crop_offset_x = 12 [default = 0.5];
+ optional float crop_offset_y = 13 [default = 0.5];
+
+ optional int32 rotate = 3 [default = 0];
+
+ optional bool horizontal_flip = 4 [default = false];
+
+ optional bool vertical_flip = 5 [default = false];
+
+ optional float crop_left_x = 6 [default = 0.0];
+ optional float crop_top_y = 7 [default = 0.0];
+ optional float crop_right_x = 8 [default = 1.0];
+ optional float crop_bottom_y = 9 [default = 1.0];
+
+ optional bool autolevels = 10 [default = false];
+
+ optional bool allow_stretch = 14 [default = false];
+}
+
+message ImageData {
+ required bytes content = 1 [ctype=CORD];
+ optional string blob_key = 2;
+
+ optional int32 width = 3;
+ optional int32 height = 4;
+}
+
+message InputSettings {
+ enum ORIENTATION_CORRECTION_TYPE {
+ UNCHANGED_ORIENTATION = 0;
+ CORRECT_ORIENTATION = 1;
+ }
+ optional ORIENTATION_CORRECTION_TYPE correct_exif_orientation = 1
+ [default=UNCHANGED_ORIENTATION];
+ optional bool parse_metadata = 2 [default=false];
+ optional int32 transparent_substitution_rgb = 3;
+}
+
+message OutputSettings {
+ enum MIME_TYPE {
+ PNG = 0;
+ JPEG = 1;
+ WEBP = 2;
+ }
+
+ optional MIME_TYPE mime_type = 1 [default=PNG];
+ optional int32 quality = 2;
+}
+
+message ImagesTransformRequest {
+ required ImageData image = 1;
+ repeated Transform transform = 2;
+ required OutputSettings output = 3;
+ optional InputSettings input = 4;
+}
+
+message ImagesTransformResponse {
+ required ImageData image = 1;
+ optional string source_metadata = 2;
+}
+
+message CompositeImageOptions {
+ required int32 source_index = 1;
+ required int32 x_offset = 2;
+ required int32 y_offset = 3;
+ required float opacity = 4;
+
+ enum ANCHOR {
+ TOP_LEFT = 0;
+ TOP = 1;
+ TOP_RIGHT = 2;
+ LEFT = 3;
+ CENTER = 4;
+ RIGHT = 5;
+ BOTTOM_LEFT = 6;
+ BOTTOM = 7;
+ BOTTOM_RIGHT = 8;
+ }
+
+ required ANCHOR anchor = 5;
+}
+
+message ImagesCanvas {
+ required int32 width = 1;
+ required int32 height = 2;
+ required OutputSettings output = 3;
+ optional int32 color = 4 [default=-1];
+}
+
+message ImagesCompositeRequest {
+ repeated ImageData image = 1;
+ repeated CompositeImageOptions options = 2;
+ required ImagesCanvas canvas = 3;
+}
+
+message ImagesCompositeResponse {
+ required ImageData image = 1;
+}
+
+message ImagesHistogramRequest {
+ required ImageData image = 1;
+}
+
+message ImagesHistogram {
+ repeated int32 red = 1;
+ repeated int32 green = 2;
+ repeated int32 blue = 3;
+}
+
+message ImagesHistogramResponse {
+ required ImagesHistogram histogram = 1;
+}
+
+message ImagesGetUrlBaseRequest {
+ required string blob_key = 1;
+
+ optional bool create_secure_url = 2 [default = false];
+}
+
+message ImagesGetUrlBaseResponse {
+ required string url = 1;
+}
+
+message ImagesDeleteUrlBaseRequest {
+ required string blob_key = 1;
+}
+
+message ImagesDeleteUrlBaseResponse {
+}
diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go
new file mode 100644
index 0000000..051ea39
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/internal.go
@@ -0,0 +1,110 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package internal provides support for package appengine.
+//
+// Programs should not use this package directly. Its API is not stable.
+// Use packages appengine and appengine/* instead.
+package internal
+
+import (
+ "fmt"
+
+ "github.com/golang/protobuf/proto"
+
+ remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+// errorCodeMaps is a map of service name to the error code map for the service.
+var errorCodeMaps = make(map[string]map[int32]string)
+
+// RegisterErrorCodeMap is called from API implementations to register their
+// error code map. This should only be called from init functions.
+func RegisterErrorCodeMap(service string, m map[int32]string) {
+ errorCodeMaps[service] = m
+}
+
+type timeoutCodeKey struct {
+ service string
+ code int32
+}
+
+// timeoutCodes is the set of service+code pairs that represent timeouts.
+var timeoutCodes = make(map[timeoutCodeKey]bool)
+
+func RegisterTimeoutErrorCode(service string, code int32) {
+ timeoutCodes[timeoutCodeKey{service, code}] = true
+}
+
+// APIError is the type returned by appengine.Context's Call method
+// when an API call fails in an API-specific way. This may be, for instance,
+// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE.
+type APIError struct {
+ Service string
+ Detail string
+ Code int32 // API-specific error code
+}
+
+func (e *APIError) Error() string {
+ if e.Code == 0 {
+ if e.Detail == "" {
+ return "APIError <empty>"
+ }
+ return e.Detail
+ }
+ s := fmt.Sprintf("API error %d", e.Code)
+ if m, ok := errorCodeMaps[e.Service]; ok {
+ s += " (" + e.Service + ": " + m[e.Code] + ")"
+ } else {
+ // Shouldn't happen, but provide a bit more detail if it does.
+ s = e.Service + " " + s
+ }
+ if e.Detail != "" {
+ s += ": " + e.Detail
+ }
+ return s
+}
+
+func (e *APIError) IsTimeout() bool {
+ return timeoutCodes[timeoutCodeKey{e.Service, e.Code}]
+}
+
+// CallError is the type returned by appengine.Context's Call method when an
+// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED.
+type CallError struct {
+ Detail string
+ Code int32
+ // TODO: Remove this if we get a distinguishable error code.
+ Timeout bool
+}
+
+func (e *CallError) Error() string {
+ var msg string
+ switch remotepb.RpcError_ErrorCode(e.Code) {
+ case remotepb.RpcError_UNKNOWN:
+ return e.Detail
+ case remotepb.RpcError_OVER_QUOTA:
+ msg = "Over quota"
+ case remotepb.RpcError_CAPABILITY_DISABLED:
+ msg = "Capability disabled"
+ case remotepb.RpcError_CANCELLED:
+ msg = "Canceled"
+ default:
+ msg = fmt.Sprintf("Call error %d", e.Code)
+ }
+ s := msg + ": " + e.Detail
+ if e.Timeout {
+ s += " (timeout)"
+ }
+ return s
+}
+
+func (e *CallError) IsTimeout() bool {
+ return e.Timeout
+}
+
+// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace.
+// The function should be prepared to be called on the same message more than once; it should only modify the
+// RPC request the first time.
+var NamespaceMods = make(map[string]func(m proto.Message, namespace string))
diff --git a/vendor/google.golang.org/appengine/internal/internal_vm_test.go b/vendor/google.golang.org/appengine/internal/internal_vm_test.go
new file mode 100644
index 0000000..f809761
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/internal_vm_test.go
@@ -0,0 +1,60 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+)
+
+func TestInstallingHealthChecker(t *testing.T) {
+ try := func(desc string, mux *http.ServeMux, wantCode int, wantBody string) {
+ installHealthChecker(mux)
+ srv := httptest.NewServer(mux)
+ defer srv.Close()
+
+ resp, err := http.Get(srv.URL + "/_ah/health")
+ if err != nil {
+ t.Errorf("%s: http.Get: %v", desc, err)
+ return
+ }
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Errorf("%s: reading body: %v", desc, err)
+ return
+ }
+
+ if resp.StatusCode != wantCode {
+ t.Errorf("%s: got HTTP %d, want %d", desc, resp.StatusCode, wantCode)
+ return
+ }
+ if wantBody != "" && string(body) != wantBody {
+ t.Errorf("%s: got HTTP body %q, want %q", desc, body, wantBody)
+ return
+ }
+ }
+
+ // If there's no handlers, or only a root handler, a health checker should be installed.
+ try("empty mux", http.NewServeMux(), 200, "ok")
+ mux := http.NewServeMux()
+ mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, "root handler")
+ })
+ try("mux with root handler", mux, 200, "ok")
+
+ // If there's a custom health check handler, one should not be installed.
+ mux = http.NewServeMux()
+ mux.HandleFunc("/_ah/health", func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(418)
+ io.WriteString(w, "I'm short and stout!")
+ })
+ try("mux with custom health checker", mux, 418, "I'm short and stout!")
+}
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
new file mode 100644
index 0000000..20c595b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
@@ -0,0 +1,899 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/log/log_service.proto
+// DO NOT EDIT!
+
+/*
+Package log is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/log/log_service.proto
+
+It has these top-level messages:
+ LogServiceError
+ UserAppLogLine
+ UserAppLogGroup
+ FlushRequest
+ SetStatusRequest
+ LogOffset
+ LogLine
+ RequestLog
+ LogModuleVersion
+ LogReadRequest
+ LogReadResponse
+ LogUsageRecord
+ LogUsageRequest
+ LogUsageResponse
+*/
+package log
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type LogServiceError_ErrorCode int32
+
+const (
+ LogServiceError_OK LogServiceError_ErrorCode = 0
+ LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1
+ LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2
+)
+
+var LogServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INVALID_REQUEST",
+ 2: "STORAGE_ERROR",
+}
+var LogServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INVALID_REQUEST": 1,
+ "STORAGE_ERROR": 2,
+}
+
+func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode {
+ p := new(LogServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x LogServiceError_ErrorCode) String() string {
+ return proto.EnumName(LogServiceError_ErrorCode_name, int32(x))
+}
+func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = LogServiceError_ErrorCode(value)
+ return nil
+}
+
+type LogServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogServiceError) Reset() { *m = LogServiceError{} }
+func (m *LogServiceError) String() string { return proto.CompactTextString(m) }
+func (*LogServiceError) ProtoMessage() {}
+
+type UserAppLogLine struct {
+ TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec" json:"timestamp_usec,omitempty"`
+ Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
+ Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} }
+func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) }
+func (*UserAppLogLine) ProtoMessage() {}
+
+func (m *UserAppLogLine) GetTimestampUsec() int64 {
+ if m != nil && m.TimestampUsec != nil {
+ return *m.TimestampUsec
+ }
+ return 0
+}
+
+func (m *UserAppLogLine) GetLevel() int64 {
+ if m != nil && m.Level != nil {
+ return *m.Level
+ }
+ return 0
+}
+
+func (m *UserAppLogLine) GetMessage() string {
+ if m != nil && m.Message != nil {
+ return *m.Message
+ }
+ return ""
+}
+
+type UserAppLogGroup struct {
+ LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line" json:"log_line,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} }
+func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) }
+func (*UserAppLogGroup) ProtoMessage() {}
+
+func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine {
+ if m != nil {
+ return m.LogLine
+ }
+ return nil
+}
+
+type FlushRequest struct {
+ Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FlushRequest) Reset() { *m = FlushRequest{} }
+func (m *FlushRequest) String() string { return proto.CompactTextString(m) }
+func (*FlushRequest) ProtoMessage() {}
+
+func (m *FlushRequest) GetLogs() []byte {
+ if m != nil {
+ return m.Logs
+ }
+ return nil
+}
+
+type SetStatusRequest struct {
+ Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} }
+func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) }
+func (*SetStatusRequest) ProtoMessage() {}
+
+func (m *SetStatusRequest) GetStatus() string {
+ if m != nil && m.Status != nil {
+ return *m.Status
+ }
+ return ""
+}
+
+type LogOffset struct {
+ RequestId []byte `protobuf:"bytes,1,opt,name=request_id" json:"request_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogOffset) Reset() { *m = LogOffset{} }
+func (m *LogOffset) String() string { return proto.CompactTextString(m) }
+func (*LogOffset) ProtoMessage() {}
+
+func (m *LogOffset) GetRequestId() []byte {
+ if m != nil {
+ return m.RequestId
+ }
+ return nil
+}
+
+type LogLine struct {
+ Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"`
+ Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
+ LogMessage *string `protobuf:"bytes,3,req,name=log_message" json:"log_message,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogLine) Reset() { *m = LogLine{} }
+func (m *LogLine) String() string { return proto.CompactTextString(m) }
+func (*LogLine) ProtoMessage() {}
+
+func (m *LogLine) GetTime() int64 {
+ if m != nil && m.Time != nil {
+ return *m.Time
+ }
+ return 0
+}
+
+func (m *LogLine) GetLevel() int32 {
+ if m != nil && m.Level != nil {
+ return *m.Level
+ }
+ return 0
+}
+
+func (m *LogLine) GetLogMessage() string {
+ if m != nil && m.LogMessage != nil {
+ return *m.LogMessage
+ }
+ return ""
+}
+
+type RequestLog struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ ModuleId *string `protobuf:"bytes,37,opt,name=module_id,def=default" json:"module_id,omitempty"`
+ VersionId *string `protobuf:"bytes,2,req,name=version_id" json:"version_id,omitempty"`
+ RequestId []byte `protobuf:"bytes,3,req,name=request_id" json:"request_id,omitempty"`
+ Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"`
+ Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"`
+ Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"`
+ StartTime *int64 `protobuf:"varint,6,req,name=start_time" json:"start_time,omitempty"`
+ EndTime *int64 `protobuf:"varint,7,req,name=end_time" json:"end_time,omitempty"`
+ Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"`
+ Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"`
+ Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"`
+ Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"`
+ HttpVersion *string `protobuf:"bytes,12,req,name=http_version" json:"http_version,omitempty"`
+ Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"`
+ ResponseSize *int64 `protobuf:"varint,14,req,name=response_size" json:"response_size,omitempty"`
+ Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"`
+ UserAgent *string `protobuf:"bytes,16,opt,name=user_agent" json:"user_agent,omitempty"`
+ UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry" json:"url_map_entry,omitempty"`
+ Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"`
+ ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles" json:"api_mcycles,omitempty"`
+ Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"`
+ Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"`
+ TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name" json:"task_queue_name,omitempty"`
+ TaskName *string `protobuf:"bytes,23,opt,name=task_name" json:"task_name,omitempty"`
+ WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request" json:"was_loading_request,omitempty"`
+ PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time" json:"pending_time,omitempty"`
+ ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,def=-1" json:"replica_index,omitempty"`
+ Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"`
+ CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key" json:"clone_key,omitempty"`
+ Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"`
+ LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete" json:"lines_incomplete,omitempty"`
+ AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release" json:"app_engine_release,omitempty"`
+ ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason" json:"exit_reason,omitempty"`
+ WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time" json:"was_throttled_for_time,omitempty"`
+ WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests" json:"was_throttled_for_requests,omitempty"`
+ ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time" json:"throttled_time,omitempty"`
+ ServerName []byte `protobuf:"bytes,34,opt,name=server_name" json:"server_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RequestLog) Reset() { *m = RequestLog{} }
+func (m *RequestLog) String() string { return proto.CompactTextString(m) }
+func (*RequestLog) ProtoMessage() {}
+
+const Default_RequestLog_ModuleId string = "default"
+const Default_RequestLog_ReplicaIndex int32 = -1
+const Default_RequestLog_Finished bool = true
+
+func (m *RequestLog) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *RequestLog) GetModuleId() string {
+ if m != nil && m.ModuleId != nil {
+ return *m.ModuleId
+ }
+ return Default_RequestLog_ModuleId
+}
+
+func (m *RequestLog) GetVersionId() string {
+ if m != nil && m.VersionId != nil {
+ return *m.VersionId
+ }
+ return ""
+}
+
+func (m *RequestLog) GetRequestId() []byte {
+ if m != nil {
+ return m.RequestId
+ }
+ return nil
+}
+
+func (m *RequestLog) GetOffset() *LogOffset {
+ if m != nil {
+ return m.Offset
+ }
+ return nil
+}
+
+func (m *RequestLog) GetIp() string {
+ if m != nil && m.Ip != nil {
+ return *m.Ip
+ }
+ return ""
+}
+
+func (m *RequestLog) GetNickname() string {
+ if m != nil && m.Nickname != nil {
+ return *m.Nickname
+ }
+ return ""
+}
+
+func (m *RequestLog) GetStartTime() int64 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetEndTime() int64 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetLatency() int64 {
+ if m != nil && m.Latency != nil {
+ return *m.Latency
+ }
+ return 0
+}
+
+func (m *RequestLog) GetMcycles() int64 {
+ if m != nil && m.Mcycles != nil {
+ return *m.Mcycles
+ }
+ return 0
+}
+
+func (m *RequestLog) GetMethod() string {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return ""
+}
+
+func (m *RequestLog) GetResource() string {
+ if m != nil && m.Resource != nil {
+ return *m.Resource
+ }
+ return ""
+}
+
+func (m *RequestLog) GetHttpVersion() string {
+ if m != nil && m.HttpVersion != nil {
+ return *m.HttpVersion
+ }
+ return ""
+}
+
+func (m *RequestLog) GetStatus() int32 {
+ if m != nil && m.Status != nil {
+ return *m.Status
+ }
+ return 0
+}
+
+func (m *RequestLog) GetResponseSize() int64 {
+ if m != nil && m.ResponseSize != nil {
+ return *m.ResponseSize
+ }
+ return 0
+}
+
+func (m *RequestLog) GetReferrer() string {
+ if m != nil && m.Referrer != nil {
+ return *m.Referrer
+ }
+ return ""
+}
+
+func (m *RequestLog) GetUserAgent() string {
+ if m != nil && m.UserAgent != nil {
+ return *m.UserAgent
+ }
+ return ""
+}
+
+func (m *RequestLog) GetUrlMapEntry() string {
+ if m != nil && m.UrlMapEntry != nil {
+ return *m.UrlMapEntry
+ }
+ return ""
+}
+
+func (m *RequestLog) GetCombined() string {
+ if m != nil && m.Combined != nil {
+ return *m.Combined
+ }
+ return ""
+}
+
+func (m *RequestLog) GetApiMcycles() int64 {
+ if m != nil && m.ApiMcycles != nil {
+ return *m.ApiMcycles
+ }
+ return 0
+}
+
+func (m *RequestLog) GetHost() string {
+ if m != nil && m.Host != nil {
+ return *m.Host
+ }
+ return ""
+}
+
+func (m *RequestLog) GetCost() float64 {
+ if m != nil && m.Cost != nil {
+ return *m.Cost
+ }
+ return 0
+}
+
+func (m *RequestLog) GetTaskQueueName() string {
+ if m != nil && m.TaskQueueName != nil {
+ return *m.TaskQueueName
+ }
+ return ""
+}
+
+func (m *RequestLog) GetTaskName() string {
+ if m != nil && m.TaskName != nil {
+ return *m.TaskName
+ }
+ return ""
+}
+
+func (m *RequestLog) GetWasLoadingRequest() bool {
+ if m != nil && m.WasLoadingRequest != nil {
+ return *m.WasLoadingRequest
+ }
+ return false
+}
+
+func (m *RequestLog) GetPendingTime() int64 {
+ if m != nil && m.PendingTime != nil {
+ return *m.PendingTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetReplicaIndex() int32 {
+ if m != nil && m.ReplicaIndex != nil {
+ return *m.ReplicaIndex
+ }
+ return Default_RequestLog_ReplicaIndex
+}
+
+func (m *RequestLog) GetFinished() bool {
+ if m != nil && m.Finished != nil {
+ return *m.Finished
+ }
+ return Default_RequestLog_Finished
+}
+
+func (m *RequestLog) GetCloneKey() []byte {
+ if m != nil {
+ return m.CloneKey
+ }
+ return nil
+}
+
+func (m *RequestLog) GetLine() []*LogLine {
+ if m != nil {
+ return m.Line
+ }
+ return nil
+}
+
+func (m *RequestLog) GetLinesIncomplete() bool {
+ if m != nil && m.LinesIncomplete != nil {
+ return *m.LinesIncomplete
+ }
+ return false
+}
+
+func (m *RequestLog) GetAppEngineRelease() []byte {
+ if m != nil {
+ return m.AppEngineRelease
+ }
+ return nil
+}
+
+func (m *RequestLog) GetExitReason() int32 {
+ if m != nil && m.ExitReason != nil {
+ return *m.ExitReason
+ }
+ return 0
+}
+
+func (m *RequestLog) GetWasThrottledForTime() bool {
+ if m != nil && m.WasThrottledForTime != nil {
+ return *m.WasThrottledForTime
+ }
+ return false
+}
+
+func (m *RequestLog) GetWasThrottledForRequests() bool {
+ if m != nil && m.WasThrottledForRequests != nil {
+ return *m.WasThrottledForRequests
+ }
+ return false
+}
+
+func (m *RequestLog) GetThrottledTime() int64 {
+ if m != nil && m.ThrottledTime != nil {
+ return *m.ThrottledTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetServerName() []byte {
+ if m != nil {
+ return m.ServerName
+ }
+ return nil
+}
+
+type LogModuleVersion struct {
+ ModuleId *string `protobuf:"bytes,1,opt,name=module_id,def=default" json:"module_id,omitempty"`
+ VersionId *string `protobuf:"bytes,2,opt,name=version_id" json:"version_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} }
+func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) }
+func (*LogModuleVersion) ProtoMessage() {}
+
+const Default_LogModuleVersion_ModuleId string = "default"
+
+func (m *LogModuleVersion) GetModuleId() string {
+ if m != nil && m.ModuleId != nil {
+ return *m.ModuleId
+ }
+ return Default_LogModuleVersion_ModuleId
+}
+
+func (m *LogModuleVersion) GetVersionId() string {
+ if m != nil && m.VersionId != nil {
+ return *m.VersionId
+ }
+ return ""
+}
+
+type LogReadRequest struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"`
+ ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version" json:"module_version,omitempty"`
+ StartTime *int64 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"`
+ EndTime *int64 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"`
+ Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"`
+ RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id" json:"request_id,omitempty"`
+ MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level" json:"minimum_log_level,omitempty"`
+ IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete" json:"include_incomplete,omitempty"`
+ Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"`
+ CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex" json:"combined_log_regex,omitempty"`
+ HostRegex *string `protobuf:"bytes,15,opt,name=host_regex" json:"host_regex,omitempty"`
+ ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index" json:"replica_index,omitempty"`
+ IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs" json:"include_app_logs,omitempty"`
+ AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request" json:"app_logs_per_request,omitempty"`
+ IncludeHost *bool `protobuf:"varint,11,opt,name=include_host" json:"include_host,omitempty"`
+ IncludeAll *bool `protobuf:"varint,12,opt,name=include_all" json:"include_all,omitempty"`
+ CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator" json:"cache_iterator,omitempty"`
+ NumShards *int32 `protobuf:"varint,18,opt,name=num_shards" json:"num_shards,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogReadRequest) Reset() { *m = LogReadRequest{} }
+func (m *LogReadRequest) String() string { return proto.CompactTextString(m) }
+func (*LogReadRequest) ProtoMessage() {}
+
+func (m *LogReadRequest) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *LogReadRequest) GetVersionId() []string {
+ if m != nil {
+ return m.VersionId
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion {
+ if m != nil {
+ return m.ModuleVersion
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetStartTime() int64 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetEndTime() int64 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetOffset() *LogOffset {
+ if m != nil {
+ return m.Offset
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetRequestId() [][]byte {
+ if m != nil {
+ return m.RequestId
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetMinimumLogLevel() int32 {
+ if m != nil && m.MinimumLogLevel != nil {
+ return *m.MinimumLogLevel
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetIncludeIncomplete() bool {
+ if m != nil && m.IncludeIncomplete != nil {
+ return *m.IncludeIncomplete
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetCount() int64 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetCombinedLogRegex() string {
+ if m != nil && m.CombinedLogRegex != nil {
+ return *m.CombinedLogRegex
+ }
+ return ""
+}
+
+func (m *LogReadRequest) GetHostRegex() string {
+ if m != nil && m.HostRegex != nil {
+ return *m.HostRegex
+ }
+ return ""
+}
+
+func (m *LogReadRequest) GetReplicaIndex() int32 {
+ if m != nil && m.ReplicaIndex != nil {
+ return *m.ReplicaIndex
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetIncludeAppLogs() bool {
+ if m != nil && m.IncludeAppLogs != nil {
+ return *m.IncludeAppLogs
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetAppLogsPerRequest() int32 {
+ if m != nil && m.AppLogsPerRequest != nil {
+ return *m.AppLogsPerRequest
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetIncludeHost() bool {
+ if m != nil && m.IncludeHost != nil {
+ return *m.IncludeHost
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetIncludeAll() bool {
+ if m != nil && m.IncludeAll != nil {
+ return *m.IncludeAll
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetCacheIterator() bool {
+ if m != nil && m.CacheIterator != nil {
+ return *m.CacheIterator
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetNumShards() int32 {
+ if m != nil && m.NumShards != nil {
+ return *m.NumShards
+ }
+ return 0
+}
+
+type LogReadResponse struct {
+ Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"`
+ Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"`
+ LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time" json:"last_end_time,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogReadResponse) Reset() { *m = LogReadResponse{} }
+func (m *LogReadResponse) String() string { return proto.CompactTextString(m) }
+func (*LogReadResponse) ProtoMessage() {}
+
+func (m *LogReadResponse) GetLog() []*RequestLog {
+ if m != nil {
+ return m.Log
+ }
+ return nil
+}
+
+func (m *LogReadResponse) GetOffset() *LogOffset {
+ if m != nil {
+ return m.Offset
+ }
+ return nil
+}
+
+func (m *LogReadResponse) GetLastEndTime() int64 {
+ if m != nil && m.LastEndTime != nil {
+ return *m.LastEndTime
+ }
+ return 0
+}
+
+type LogUsageRecord struct {
+ VersionId *string `protobuf:"bytes,1,opt,name=version_id" json:"version_id,omitempty"`
+ StartTime *int32 `protobuf:"varint,2,opt,name=start_time" json:"start_time,omitempty"`
+ EndTime *int32 `protobuf:"varint,3,opt,name=end_time" json:"end_time,omitempty"`
+ Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
+ TotalSize *int64 `protobuf:"varint,5,opt,name=total_size" json:"total_size,omitempty"`
+ Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} }
+func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) }
+func (*LogUsageRecord) ProtoMessage() {}
+
+func (m *LogUsageRecord) GetVersionId() string {
+ if m != nil && m.VersionId != nil {
+ return *m.VersionId
+ }
+ return ""
+}
+
+func (m *LogUsageRecord) GetStartTime() int32 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetEndTime() int32 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetCount() int64 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetTotalSize() int64 {
+ if m != nil && m.TotalSize != nil {
+ return *m.TotalSize
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetRecords() int32 {
+ if m != nil && m.Records != nil {
+ return *m.Records
+ }
+ return 0
+}
+
+type LogUsageRequest struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"`
+ StartTime *int32 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"`
+ EndTime *int32 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"`
+ ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,def=1" json:"resolution_hours,omitempty"`
+ CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions" json:"combine_versions,omitempty"`
+ UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version" json:"usage_version,omitempty"`
+ VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only" json:"versions_only,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} }
+func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) }
+func (*LogUsageRequest) ProtoMessage() {}
+
+const Default_LogUsageRequest_ResolutionHours uint32 = 1
+
+func (m *LogUsageRequest) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *LogUsageRequest) GetVersionId() []string {
+ if m != nil {
+ return m.VersionId
+ }
+ return nil
+}
+
+func (m *LogUsageRequest) GetStartTime() int32 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *LogUsageRequest) GetEndTime() int32 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *LogUsageRequest) GetResolutionHours() uint32 {
+ if m != nil && m.ResolutionHours != nil {
+ return *m.ResolutionHours
+ }
+ return Default_LogUsageRequest_ResolutionHours
+}
+
+func (m *LogUsageRequest) GetCombineVersions() bool {
+ if m != nil && m.CombineVersions != nil {
+ return *m.CombineVersions
+ }
+ return false
+}
+
+func (m *LogUsageRequest) GetUsageVersion() int32 {
+ if m != nil && m.UsageVersion != nil {
+ return *m.UsageVersion
+ }
+ return 0
+}
+
+func (m *LogUsageRequest) GetVersionsOnly() bool {
+ if m != nil && m.VersionsOnly != nil {
+ return *m.VersionsOnly
+ }
+ return false
+}
+
+type LogUsageResponse struct {
+ Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"`
+ Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} }
+func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) }
+func (*LogUsageResponse) ProtoMessage() {}
+
+func (m *LogUsageResponse) GetUsage() []*LogUsageRecord {
+ if m != nil {
+ return m.Usage
+ }
+ return nil
+}
+
+func (m *LogUsageResponse) GetSummary() *LogUsageRecord {
+ if m != nil {
+ return m.Summary
+ }
+ return nil
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/google.golang.org/appengine/internal/log/log_service.proto
new file mode 100644
index 0000000..8981dc4
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/log/log_service.proto
@@ -0,0 +1,150 @@
+syntax = "proto2";
+option go_package = "log";
+
+package appengine;
+
+message LogServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INVALID_REQUEST = 1;
+ STORAGE_ERROR = 2;
+ }
+}
+
+message UserAppLogLine {
+ required int64 timestamp_usec = 1;
+ required int64 level = 2;
+ required string message = 3;
+}
+
+message UserAppLogGroup {
+ repeated UserAppLogLine log_line = 2;
+}
+
+message FlushRequest {
+ optional bytes logs = 1;
+}
+
+message SetStatusRequest {
+ required string status = 1;
+}
+
+
+message LogOffset {
+ optional bytes request_id = 1;
+}
+
+message LogLine {
+ required int64 time = 1;
+ required int32 level = 2;
+ required string log_message = 3;
+}
+
+message RequestLog {
+ required string app_id = 1;
+ optional string module_id = 37 [default="default"];
+ required string version_id = 2;
+ required bytes request_id = 3;
+ optional LogOffset offset = 35;
+ required string ip = 4;
+ optional string nickname = 5;
+ required int64 start_time = 6;
+ required int64 end_time = 7;
+ required int64 latency = 8;
+ required int64 mcycles = 9;
+ required string method = 10;
+ required string resource = 11;
+ required string http_version = 12;
+ required int32 status = 13;
+ required int64 response_size = 14;
+ optional string referrer = 15;
+ optional string user_agent = 16;
+ required string url_map_entry = 17;
+ required string combined = 18;
+ optional int64 api_mcycles = 19;
+ optional string host = 20;
+ optional double cost = 21;
+
+ optional string task_queue_name = 22;
+ optional string task_name = 23;
+
+ optional bool was_loading_request = 24;
+ optional int64 pending_time = 25;
+ optional int32 replica_index = 26 [default = -1];
+ optional bool finished = 27 [default = true];
+ optional bytes clone_key = 28;
+
+ repeated LogLine line = 29;
+
+ optional bool lines_incomplete = 36;
+ optional bytes app_engine_release = 38;
+
+ optional int32 exit_reason = 30;
+ optional bool was_throttled_for_time = 31;
+ optional bool was_throttled_for_requests = 32;
+ optional int64 throttled_time = 33;
+
+ optional bytes server_name = 34;
+}
+
+message LogModuleVersion {
+ optional string module_id = 1 [default="default"];
+ optional string version_id = 2;
+}
+
+message LogReadRequest {
+ required string app_id = 1;
+ repeated string version_id = 2;
+ repeated LogModuleVersion module_version = 19;
+
+ optional int64 start_time = 3;
+ optional int64 end_time = 4;
+ optional LogOffset offset = 5;
+ repeated bytes request_id = 6;
+
+ optional int32 minimum_log_level = 7;
+ optional bool include_incomplete = 8;
+ optional int64 count = 9;
+
+ optional string combined_log_regex = 14;
+ optional string host_regex = 15;
+ optional int32 replica_index = 16;
+
+ optional bool include_app_logs = 10;
+ optional int32 app_logs_per_request = 17;
+ optional bool include_host = 11;
+ optional bool include_all = 12;
+ optional bool cache_iterator = 13;
+ optional int32 num_shards = 18;
+}
+
+message LogReadResponse {
+ repeated RequestLog log = 1;
+ optional LogOffset offset = 2;
+ optional int64 last_end_time = 3;
+}
+
+message LogUsageRecord {
+ optional string version_id = 1;
+ optional int32 start_time = 2;
+ optional int32 end_time = 3;
+ optional int64 count = 4;
+ optional int64 total_size = 5;
+ optional int32 records = 6;
+}
+
+message LogUsageRequest {
+ required string app_id = 1;
+ repeated string version_id = 2;
+ optional int32 start_time = 3;
+ optional int32 end_time = 4;
+ optional uint32 resolution_hours = 5 [default = 1];
+ optional bool combine_versions = 6;
+ optional int32 usage_version = 7;
+ optional bool versions_only = 8;
+}
+
+message LogUsageResponse {
+ repeated LogUsageRecord usage = 1;
+ optional LogUsageRecord summary = 2;
+}
diff --git a/vendor/google.golang.org/appengine/internal/mail/mail_service.pb.go b/vendor/google.golang.org/appengine/internal/mail/mail_service.pb.go
new file mode 100644
index 0000000..b8d5f03
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/mail/mail_service.pb.go
@@ -0,0 +1,229 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/mail/mail_service.proto
+// DO NOT EDIT!
+
+/*
+Package mail is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/mail/mail_service.proto
+
+It has these top-level messages:
+ MailServiceError
+ MailAttachment
+ MailHeader
+ MailMessage
+*/
+package mail
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type MailServiceError_ErrorCode int32
+
+const (
+ MailServiceError_OK MailServiceError_ErrorCode = 0
+ MailServiceError_INTERNAL_ERROR MailServiceError_ErrorCode = 1
+ MailServiceError_BAD_REQUEST MailServiceError_ErrorCode = 2
+ MailServiceError_UNAUTHORIZED_SENDER MailServiceError_ErrorCode = 3
+ MailServiceError_INVALID_ATTACHMENT_TYPE MailServiceError_ErrorCode = 4
+ MailServiceError_INVALID_HEADER_NAME MailServiceError_ErrorCode = 5
+ MailServiceError_INVALID_CONTENT_ID MailServiceError_ErrorCode = 6
+)
+
+var MailServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INTERNAL_ERROR",
+ 2: "BAD_REQUEST",
+ 3: "UNAUTHORIZED_SENDER",
+ 4: "INVALID_ATTACHMENT_TYPE",
+ 5: "INVALID_HEADER_NAME",
+ 6: "INVALID_CONTENT_ID",
+}
+var MailServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INTERNAL_ERROR": 1,
+ "BAD_REQUEST": 2,
+ "UNAUTHORIZED_SENDER": 3,
+ "INVALID_ATTACHMENT_TYPE": 4,
+ "INVALID_HEADER_NAME": 5,
+ "INVALID_CONTENT_ID": 6,
+}
+
+func (x MailServiceError_ErrorCode) Enum() *MailServiceError_ErrorCode {
+ p := new(MailServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x MailServiceError_ErrorCode) String() string {
+ return proto.EnumName(MailServiceError_ErrorCode_name, int32(x))
+}
+func (x *MailServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MailServiceError_ErrorCode_value, data, "MailServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = MailServiceError_ErrorCode(value)
+ return nil
+}
+
+type MailServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MailServiceError) Reset() { *m = MailServiceError{} }
+func (m *MailServiceError) String() string { return proto.CompactTextString(m) }
+func (*MailServiceError) ProtoMessage() {}
+
+type MailAttachment struct {
+ FileName *string `protobuf:"bytes,1,req,name=FileName" json:"FileName,omitempty"`
+ Data []byte `protobuf:"bytes,2,req,name=Data" json:"Data,omitempty"`
+ ContentID *string `protobuf:"bytes,3,opt,name=ContentID" json:"ContentID,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MailAttachment) Reset() { *m = MailAttachment{} }
+func (m *MailAttachment) String() string { return proto.CompactTextString(m) }
+func (*MailAttachment) ProtoMessage() {}
+
+func (m *MailAttachment) GetFileName() string {
+ if m != nil && m.FileName != nil {
+ return *m.FileName
+ }
+ return ""
+}
+
+func (m *MailAttachment) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (m *MailAttachment) GetContentID() string {
+ if m != nil && m.ContentID != nil {
+ return *m.ContentID
+ }
+ return ""
+}
+
+type MailHeader struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MailHeader) Reset() { *m = MailHeader{} }
+func (m *MailHeader) String() string { return proto.CompactTextString(m) }
+func (*MailHeader) ProtoMessage() {}
+
+func (m *MailHeader) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MailHeader) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type MailMessage struct {
+ Sender *string `protobuf:"bytes,1,req,name=Sender" json:"Sender,omitempty"`
+ ReplyTo *string `protobuf:"bytes,2,opt,name=ReplyTo" json:"ReplyTo,omitempty"`
+ To []string `protobuf:"bytes,3,rep,name=To" json:"To,omitempty"`
+ Cc []string `protobuf:"bytes,4,rep,name=Cc" json:"Cc,omitempty"`
+ Bcc []string `protobuf:"bytes,5,rep,name=Bcc" json:"Bcc,omitempty"`
+ Subject *string `protobuf:"bytes,6,req,name=Subject" json:"Subject,omitempty"`
+ TextBody *string `protobuf:"bytes,7,opt,name=TextBody" json:"TextBody,omitempty"`
+ HtmlBody *string `protobuf:"bytes,8,opt,name=HtmlBody" json:"HtmlBody,omitempty"`
+ Attachment []*MailAttachment `protobuf:"bytes,9,rep,name=Attachment" json:"Attachment,omitempty"`
+ Header []*MailHeader `protobuf:"bytes,10,rep,name=Header" json:"Header,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MailMessage) Reset() { *m = MailMessage{} }
+func (m *MailMessage) String() string { return proto.CompactTextString(m) }
+func (*MailMessage) ProtoMessage() {}
+
+func (m *MailMessage) GetSender() string {
+ if m != nil && m.Sender != nil {
+ return *m.Sender
+ }
+ return ""
+}
+
+func (m *MailMessage) GetReplyTo() string {
+ if m != nil && m.ReplyTo != nil {
+ return *m.ReplyTo
+ }
+ return ""
+}
+
+func (m *MailMessage) GetTo() []string {
+ if m != nil {
+ return m.To
+ }
+ return nil
+}
+
+func (m *MailMessage) GetCc() []string {
+ if m != nil {
+ return m.Cc
+ }
+ return nil
+}
+
+func (m *MailMessage) GetBcc() []string {
+ if m != nil {
+ return m.Bcc
+ }
+ return nil
+}
+
+func (m *MailMessage) GetSubject() string {
+ if m != nil && m.Subject != nil {
+ return *m.Subject
+ }
+ return ""
+}
+
+func (m *MailMessage) GetTextBody() string {
+ if m != nil && m.TextBody != nil {
+ return *m.TextBody
+ }
+ return ""
+}
+
+func (m *MailMessage) GetHtmlBody() string {
+ if m != nil && m.HtmlBody != nil {
+ return *m.HtmlBody
+ }
+ return ""
+}
+
+func (m *MailMessage) GetAttachment() []*MailAttachment {
+ if m != nil {
+ return m.Attachment
+ }
+ return nil
+}
+
+func (m *MailMessage) GetHeader() []*MailHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/mail/mail_service.proto b/vendor/google.golang.org/appengine/internal/mail/mail_service.proto
new file mode 100644
index 0000000..4e57b7a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/mail/mail_service.proto
@@ -0,0 +1,45 @@
+syntax = "proto2";
+option go_package = "mail";
+
+package appengine;
+
+message MailServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INTERNAL_ERROR = 1;
+ BAD_REQUEST = 2;
+ UNAUTHORIZED_SENDER = 3;
+ INVALID_ATTACHMENT_TYPE = 4;
+ INVALID_HEADER_NAME = 5;
+ INVALID_CONTENT_ID = 6;
+ }
+}
+
+message MailAttachment {
+ required string FileName = 1;
+ required bytes Data = 2;
+ optional string ContentID = 3;
+}
+
+message MailHeader {
+ required string name = 1;
+ required string value = 2;
+}
+
+message MailMessage {
+ required string Sender = 1;
+ optional string ReplyTo = 2;
+
+ repeated string To = 3;
+ repeated string Cc = 4;
+ repeated string Bcc = 5;
+
+ required string Subject = 6;
+
+ optional string TextBody = 7;
+ optional string HtmlBody = 8;
+
+ repeated MailAttachment Attachment = 9;
+
+ repeated MailHeader Header = 10;
+}
diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go
new file mode 100644
index 0000000..4903616
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/main.go
@@ -0,0 +1,15 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import (
+ "appengine_internal"
+)
+
+func Main() {
+ appengine_internal.Main()
+}
diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go
new file mode 100644
index 0000000..57331ad
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/main_vm.go
@@ -0,0 +1,44 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+ "io"
+ "log"
+ "net/http"
+ "net/url"
+ "os"
+)
+
+func Main() {
+ installHealthChecker(http.DefaultServeMux)
+
+ port := "8080"
+ if s := os.Getenv("PORT"); s != "" {
+ port = s
+ }
+
+ if err := http.ListenAndServe(":"+port, http.HandlerFunc(handleHTTP)); err != nil {
+ log.Fatalf("http.ListenAndServe: %v", err)
+ }
+}
+
+func installHealthChecker(mux *http.ServeMux) {
+ // If no health check handler has been installed by this point, add a trivial one.
+ const healthPath = "/_ah/health"
+ hreq := &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Path: healthPath,
+ },
+ }
+ if _, pat := mux.Handler(hreq); pat != healthPath {
+ mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, "ok")
+ })
+ }
+}
diff --git a/vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go
new file mode 100644
index 0000000..252fef8
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go
@@ -0,0 +1,938 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/memcache/memcache_service.proto
+// DO NOT EDIT!
+
+/*
+Package memcache is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/memcache/memcache_service.proto
+
+It has these top-level messages:
+ MemcacheServiceError
+ AppOverride
+ MemcacheGetRequest
+ MemcacheGetResponse
+ MemcacheSetRequest
+ MemcacheSetResponse
+ MemcacheDeleteRequest
+ MemcacheDeleteResponse
+ MemcacheIncrementRequest
+ MemcacheIncrementResponse
+ MemcacheBatchIncrementRequest
+ MemcacheBatchIncrementResponse
+ MemcacheFlushRequest
+ MemcacheFlushResponse
+ MemcacheStatsRequest
+ MergedNamespaceStats
+ MemcacheStatsResponse
+ MemcacheGrabTailRequest
+ MemcacheGrabTailResponse
+*/
+package memcache
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type MemcacheServiceError_ErrorCode int32
+
+const (
+ MemcacheServiceError_OK MemcacheServiceError_ErrorCode = 0
+ MemcacheServiceError_UNSPECIFIED_ERROR MemcacheServiceError_ErrorCode = 1
+ MemcacheServiceError_NAMESPACE_NOT_SET MemcacheServiceError_ErrorCode = 2
+ MemcacheServiceError_PERMISSION_DENIED MemcacheServiceError_ErrorCode = 3
+ MemcacheServiceError_INVALID_VALUE MemcacheServiceError_ErrorCode = 6
+)
+
+var MemcacheServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "UNSPECIFIED_ERROR",
+ 2: "NAMESPACE_NOT_SET",
+ 3: "PERMISSION_DENIED",
+ 6: "INVALID_VALUE",
+}
+var MemcacheServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "UNSPECIFIED_ERROR": 1,
+ "NAMESPACE_NOT_SET": 2,
+ "PERMISSION_DENIED": 3,
+ "INVALID_VALUE": 6,
+}
+
+func (x MemcacheServiceError_ErrorCode) Enum() *MemcacheServiceError_ErrorCode {
+ p := new(MemcacheServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x MemcacheServiceError_ErrorCode) String() string {
+ return proto.EnumName(MemcacheServiceError_ErrorCode_name, int32(x))
+}
+func (x *MemcacheServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheServiceError_ErrorCode_value, data, "MemcacheServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheServiceError_ErrorCode(value)
+ return nil
+}
+
+type MemcacheSetRequest_SetPolicy int32
+
+const (
+ MemcacheSetRequest_SET MemcacheSetRequest_SetPolicy = 1
+ MemcacheSetRequest_ADD MemcacheSetRequest_SetPolicy = 2
+ MemcacheSetRequest_REPLACE MemcacheSetRequest_SetPolicy = 3
+ MemcacheSetRequest_CAS MemcacheSetRequest_SetPolicy = 4
+)
+
+var MemcacheSetRequest_SetPolicy_name = map[int32]string{
+ 1: "SET",
+ 2: "ADD",
+ 3: "REPLACE",
+ 4: "CAS",
+}
+var MemcacheSetRequest_SetPolicy_value = map[string]int32{
+ "SET": 1,
+ "ADD": 2,
+ "REPLACE": 3,
+ "CAS": 4,
+}
+
+func (x MemcacheSetRequest_SetPolicy) Enum() *MemcacheSetRequest_SetPolicy {
+ p := new(MemcacheSetRequest_SetPolicy)
+ *p = x
+ return p
+}
+func (x MemcacheSetRequest_SetPolicy) String() string {
+ return proto.EnumName(MemcacheSetRequest_SetPolicy_name, int32(x))
+}
+func (x *MemcacheSetRequest_SetPolicy) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheSetRequest_SetPolicy_value, data, "MemcacheSetRequest_SetPolicy")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheSetRequest_SetPolicy(value)
+ return nil
+}
+
+type MemcacheSetResponse_SetStatusCode int32
+
+const (
+ MemcacheSetResponse_STORED MemcacheSetResponse_SetStatusCode = 1
+ MemcacheSetResponse_NOT_STORED MemcacheSetResponse_SetStatusCode = 2
+ MemcacheSetResponse_ERROR MemcacheSetResponse_SetStatusCode = 3
+ MemcacheSetResponse_EXISTS MemcacheSetResponse_SetStatusCode = 4
+)
+
+var MemcacheSetResponse_SetStatusCode_name = map[int32]string{
+ 1: "STORED",
+ 2: "NOT_STORED",
+ 3: "ERROR",
+ 4: "EXISTS",
+}
+var MemcacheSetResponse_SetStatusCode_value = map[string]int32{
+ "STORED": 1,
+ "NOT_STORED": 2,
+ "ERROR": 3,
+ "EXISTS": 4,
+}
+
+func (x MemcacheSetResponse_SetStatusCode) Enum() *MemcacheSetResponse_SetStatusCode {
+ p := new(MemcacheSetResponse_SetStatusCode)
+ *p = x
+ return p
+}
+func (x MemcacheSetResponse_SetStatusCode) String() string {
+ return proto.EnumName(MemcacheSetResponse_SetStatusCode_name, int32(x))
+}
+func (x *MemcacheSetResponse_SetStatusCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheSetResponse_SetStatusCode_value, data, "MemcacheSetResponse_SetStatusCode")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheSetResponse_SetStatusCode(value)
+ return nil
+}
+
+type MemcacheDeleteResponse_DeleteStatusCode int32
+
+const (
+ MemcacheDeleteResponse_DELETED MemcacheDeleteResponse_DeleteStatusCode = 1
+ MemcacheDeleteResponse_NOT_FOUND MemcacheDeleteResponse_DeleteStatusCode = 2
+)
+
+var MemcacheDeleteResponse_DeleteStatusCode_name = map[int32]string{
+ 1: "DELETED",
+ 2: "NOT_FOUND",
+}
+var MemcacheDeleteResponse_DeleteStatusCode_value = map[string]int32{
+ "DELETED": 1,
+ "NOT_FOUND": 2,
+}
+
+func (x MemcacheDeleteResponse_DeleteStatusCode) Enum() *MemcacheDeleteResponse_DeleteStatusCode {
+ p := new(MemcacheDeleteResponse_DeleteStatusCode)
+ *p = x
+ return p
+}
+func (x MemcacheDeleteResponse_DeleteStatusCode) String() string {
+ return proto.EnumName(MemcacheDeleteResponse_DeleteStatusCode_name, int32(x))
+}
+func (x *MemcacheDeleteResponse_DeleteStatusCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheDeleteResponse_DeleteStatusCode_value, data, "MemcacheDeleteResponse_DeleteStatusCode")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheDeleteResponse_DeleteStatusCode(value)
+ return nil
+}
+
+type MemcacheIncrementRequest_Direction int32
+
+const (
+ MemcacheIncrementRequest_INCREMENT MemcacheIncrementRequest_Direction = 1
+ MemcacheIncrementRequest_DECREMENT MemcacheIncrementRequest_Direction = 2
+)
+
+var MemcacheIncrementRequest_Direction_name = map[int32]string{
+ 1: "INCREMENT",
+ 2: "DECREMENT",
+}
+var MemcacheIncrementRequest_Direction_value = map[string]int32{
+ "INCREMENT": 1,
+ "DECREMENT": 2,
+}
+
+func (x MemcacheIncrementRequest_Direction) Enum() *MemcacheIncrementRequest_Direction {
+ p := new(MemcacheIncrementRequest_Direction)
+ *p = x
+ return p
+}
+func (x MemcacheIncrementRequest_Direction) String() string {
+ return proto.EnumName(MemcacheIncrementRequest_Direction_name, int32(x))
+}
+func (x *MemcacheIncrementRequest_Direction) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheIncrementRequest_Direction_value, data, "MemcacheIncrementRequest_Direction")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheIncrementRequest_Direction(value)
+ return nil
+}
+
+type MemcacheIncrementResponse_IncrementStatusCode int32
+
+const (
+ MemcacheIncrementResponse_OK MemcacheIncrementResponse_IncrementStatusCode = 1
+ MemcacheIncrementResponse_NOT_CHANGED MemcacheIncrementResponse_IncrementStatusCode = 2
+ MemcacheIncrementResponse_ERROR MemcacheIncrementResponse_IncrementStatusCode = 3
+)
+
+var MemcacheIncrementResponse_IncrementStatusCode_name = map[int32]string{
+ 1: "OK",
+ 2: "NOT_CHANGED",
+ 3: "ERROR",
+}
+var MemcacheIncrementResponse_IncrementStatusCode_value = map[string]int32{
+ "OK": 1,
+ "NOT_CHANGED": 2,
+ "ERROR": 3,
+}
+
+func (x MemcacheIncrementResponse_IncrementStatusCode) Enum() *MemcacheIncrementResponse_IncrementStatusCode {
+ p := new(MemcacheIncrementResponse_IncrementStatusCode)
+ *p = x
+ return p
+}
+func (x MemcacheIncrementResponse_IncrementStatusCode) String() string {
+ return proto.EnumName(MemcacheIncrementResponse_IncrementStatusCode_name, int32(x))
+}
+func (x *MemcacheIncrementResponse_IncrementStatusCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheIncrementResponse_IncrementStatusCode_value, data, "MemcacheIncrementResponse_IncrementStatusCode")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheIncrementResponse_IncrementStatusCode(value)
+ return nil
+}
+
+type MemcacheServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheServiceError) Reset() { *m = MemcacheServiceError{} }
+func (m *MemcacheServiceError) String() string { return proto.CompactTextString(m) }
+func (*MemcacheServiceError) ProtoMessage() {}
+
+type AppOverride struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ NumMemcachegBackends *int32 `protobuf:"varint,2,opt,name=num_memcacheg_backends" json:"num_memcacheg_backends,omitempty"`
+ IgnoreShardlock *bool `protobuf:"varint,3,opt,name=ignore_shardlock" json:"ignore_shardlock,omitempty"`
+ MemcachePoolHint *string `protobuf:"bytes,4,opt,name=memcache_pool_hint" json:"memcache_pool_hint,omitempty"`
+ MemcacheShardingStrategy []byte `protobuf:"bytes,5,opt,name=memcache_sharding_strategy" json:"memcache_sharding_strategy,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AppOverride) Reset() { *m = AppOverride{} }
+func (m *AppOverride) String() string { return proto.CompactTextString(m) }
+func (*AppOverride) ProtoMessage() {}
+
+func (m *AppOverride) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *AppOverride) GetNumMemcachegBackends() int32 {
+ if m != nil && m.NumMemcachegBackends != nil {
+ return *m.NumMemcachegBackends
+ }
+ return 0
+}
+
+func (m *AppOverride) GetIgnoreShardlock() bool {
+ if m != nil && m.IgnoreShardlock != nil {
+ return *m.IgnoreShardlock
+ }
+ return false
+}
+
+func (m *AppOverride) GetMemcachePoolHint() string {
+ if m != nil && m.MemcachePoolHint != nil {
+ return *m.MemcachePoolHint
+ }
+ return ""
+}
+
+func (m *AppOverride) GetMemcacheShardingStrategy() []byte {
+ if m != nil {
+ return m.MemcacheShardingStrategy
+ }
+ return nil
+}
+
+type MemcacheGetRequest struct {
+ Key [][]byte `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ NameSpace *string `protobuf:"bytes,2,opt,name=name_space,def=" json:"name_space,omitempty"`
+ ForCas *bool `protobuf:"varint,4,opt,name=for_cas" json:"for_cas,omitempty"`
+ Override *AppOverride `protobuf:"bytes,5,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGetRequest) Reset() { *m = MemcacheGetRequest{} }
+func (m *MemcacheGetRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGetRequest) ProtoMessage() {}
+
+func (m *MemcacheGetRequest) GetKey() [][]byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *MemcacheGetRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheGetRequest) GetForCas() bool {
+ if m != nil && m.ForCas != nil {
+ return *m.ForCas
+ }
+ return false
+}
+
+func (m *MemcacheGetRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheGetResponse struct {
+ Item []*MemcacheGetResponse_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGetResponse) Reset() { *m = MemcacheGetResponse{} }
+func (m *MemcacheGetResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGetResponse) ProtoMessage() {}
+
+func (m *MemcacheGetResponse) GetItem() []*MemcacheGetResponse_Item {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+type MemcacheGetResponse_Item struct {
+ Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"`
+ Flags *uint32 `protobuf:"fixed32,4,opt,name=flags" json:"flags,omitempty"`
+ CasId *uint64 `protobuf:"fixed64,5,opt,name=cas_id" json:"cas_id,omitempty"`
+ ExpiresInSeconds *int32 `protobuf:"varint,6,opt,name=expires_in_seconds" json:"expires_in_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGetResponse_Item) Reset() { *m = MemcacheGetResponse_Item{} }
+func (m *MemcacheGetResponse_Item) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGetResponse_Item) ProtoMessage() {}
+
+func (m *MemcacheGetResponse_Item) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *MemcacheGetResponse_Item) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *MemcacheGetResponse_Item) GetFlags() uint32 {
+ if m != nil && m.Flags != nil {
+ return *m.Flags
+ }
+ return 0
+}
+
+func (m *MemcacheGetResponse_Item) GetCasId() uint64 {
+ if m != nil && m.CasId != nil {
+ return *m.CasId
+ }
+ return 0
+}
+
+func (m *MemcacheGetResponse_Item) GetExpiresInSeconds() int32 {
+ if m != nil && m.ExpiresInSeconds != nil {
+ return *m.ExpiresInSeconds
+ }
+ return 0
+}
+
+type MemcacheSetRequest struct {
+ Item []*MemcacheSetRequest_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"`
+ NameSpace *string `protobuf:"bytes,7,opt,name=name_space,def=" json:"name_space,omitempty"`
+ Override *AppOverride `protobuf:"bytes,10,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheSetRequest) Reset() { *m = MemcacheSetRequest{} }
+func (m *MemcacheSetRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheSetRequest) ProtoMessage() {}
+
+func (m *MemcacheSetRequest) GetItem() []*MemcacheSetRequest_Item {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+func (m *MemcacheSetRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheSetRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheSetRequest_Item struct {
+ Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"`
+ Flags *uint32 `protobuf:"fixed32,4,opt,name=flags" json:"flags,omitempty"`
+ SetPolicy *MemcacheSetRequest_SetPolicy `protobuf:"varint,5,opt,name=set_policy,enum=appengine.MemcacheSetRequest_SetPolicy,def=1" json:"set_policy,omitempty"`
+ ExpirationTime *uint32 `protobuf:"fixed32,6,opt,name=expiration_time,def=0" json:"expiration_time,omitempty"`
+ CasId *uint64 `protobuf:"fixed64,8,opt,name=cas_id" json:"cas_id,omitempty"`
+ ForCas *bool `protobuf:"varint,9,opt,name=for_cas" json:"for_cas,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheSetRequest_Item) Reset() { *m = MemcacheSetRequest_Item{} }
+func (m *MemcacheSetRequest_Item) String() string { return proto.CompactTextString(m) }
+func (*MemcacheSetRequest_Item) ProtoMessage() {}
+
+const Default_MemcacheSetRequest_Item_SetPolicy MemcacheSetRequest_SetPolicy = MemcacheSetRequest_SET
+const Default_MemcacheSetRequest_Item_ExpirationTime uint32 = 0
+
+func (m *MemcacheSetRequest_Item) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *MemcacheSetRequest_Item) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *MemcacheSetRequest_Item) GetFlags() uint32 {
+ if m != nil && m.Flags != nil {
+ return *m.Flags
+ }
+ return 0
+}
+
+func (m *MemcacheSetRequest_Item) GetSetPolicy() MemcacheSetRequest_SetPolicy {
+ if m != nil && m.SetPolicy != nil {
+ return *m.SetPolicy
+ }
+ return Default_MemcacheSetRequest_Item_SetPolicy
+}
+
+func (m *MemcacheSetRequest_Item) GetExpirationTime() uint32 {
+ if m != nil && m.ExpirationTime != nil {
+ return *m.ExpirationTime
+ }
+ return Default_MemcacheSetRequest_Item_ExpirationTime
+}
+
+func (m *MemcacheSetRequest_Item) GetCasId() uint64 {
+ if m != nil && m.CasId != nil {
+ return *m.CasId
+ }
+ return 0
+}
+
+func (m *MemcacheSetRequest_Item) GetForCas() bool {
+ if m != nil && m.ForCas != nil {
+ return *m.ForCas
+ }
+ return false
+}
+
+type MemcacheSetResponse struct {
+ SetStatus []MemcacheSetResponse_SetStatusCode `protobuf:"varint,1,rep,name=set_status,enum=appengine.MemcacheSetResponse_SetStatusCode" json:"set_status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheSetResponse) Reset() { *m = MemcacheSetResponse{} }
+func (m *MemcacheSetResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheSetResponse) ProtoMessage() {}
+
+func (m *MemcacheSetResponse) GetSetStatus() []MemcacheSetResponse_SetStatusCode {
+ if m != nil {
+ return m.SetStatus
+ }
+ return nil
+}
+
+type MemcacheDeleteRequest struct {
+ Item []*MemcacheDeleteRequest_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"`
+ NameSpace *string `protobuf:"bytes,4,opt,name=name_space,def=" json:"name_space,omitempty"`
+ Override *AppOverride `protobuf:"bytes,5,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheDeleteRequest) Reset() { *m = MemcacheDeleteRequest{} }
+func (m *MemcacheDeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheDeleteRequest) ProtoMessage() {}
+
+func (m *MemcacheDeleteRequest) GetItem() []*MemcacheDeleteRequest_Item {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+func (m *MemcacheDeleteRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheDeleteRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheDeleteRequest_Item struct {
+ Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"`
+ DeleteTime *uint32 `protobuf:"fixed32,3,opt,name=delete_time,def=0" json:"delete_time,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheDeleteRequest_Item) Reset() { *m = MemcacheDeleteRequest_Item{} }
+func (m *MemcacheDeleteRequest_Item) String() string { return proto.CompactTextString(m) }
+func (*MemcacheDeleteRequest_Item) ProtoMessage() {}
+
+const Default_MemcacheDeleteRequest_Item_DeleteTime uint32 = 0
+
+func (m *MemcacheDeleteRequest_Item) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *MemcacheDeleteRequest_Item) GetDeleteTime() uint32 {
+ if m != nil && m.DeleteTime != nil {
+ return *m.DeleteTime
+ }
+ return Default_MemcacheDeleteRequest_Item_DeleteTime
+}
+
+type MemcacheDeleteResponse struct {
+ DeleteStatus []MemcacheDeleteResponse_DeleteStatusCode `protobuf:"varint,1,rep,name=delete_status,enum=appengine.MemcacheDeleteResponse_DeleteStatusCode" json:"delete_status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheDeleteResponse) Reset() { *m = MemcacheDeleteResponse{} }
+func (m *MemcacheDeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheDeleteResponse) ProtoMessage() {}
+
+func (m *MemcacheDeleteResponse) GetDeleteStatus() []MemcacheDeleteResponse_DeleteStatusCode {
+ if m != nil {
+ return m.DeleteStatus
+ }
+ return nil
+}
+
+type MemcacheIncrementRequest struct {
+ Key []byte `protobuf:"bytes,1,req,name=key" json:"key,omitempty"`
+ NameSpace *string `protobuf:"bytes,4,opt,name=name_space,def=" json:"name_space,omitempty"`
+ Delta *uint64 `protobuf:"varint,2,opt,name=delta,def=1" json:"delta,omitempty"`
+ Direction *MemcacheIncrementRequest_Direction `protobuf:"varint,3,opt,name=direction,enum=appengine.MemcacheIncrementRequest_Direction,def=1" json:"direction,omitempty"`
+ InitialValue *uint64 `protobuf:"varint,5,opt,name=initial_value" json:"initial_value,omitempty"`
+ InitialFlags *uint32 `protobuf:"fixed32,6,opt,name=initial_flags" json:"initial_flags,omitempty"`
+ Override *AppOverride `protobuf:"bytes,7,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheIncrementRequest) Reset() { *m = MemcacheIncrementRequest{} }
+func (m *MemcacheIncrementRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheIncrementRequest) ProtoMessage() {}
+
+const Default_MemcacheIncrementRequest_Delta uint64 = 1
+const Default_MemcacheIncrementRequest_Direction MemcacheIncrementRequest_Direction = MemcacheIncrementRequest_INCREMENT
+
+func (m *MemcacheIncrementRequest) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *MemcacheIncrementRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheIncrementRequest) GetDelta() uint64 {
+ if m != nil && m.Delta != nil {
+ return *m.Delta
+ }
+ return Default_MemcacheIncrementRequest_Delta
+}
+
+func (m *MemcacheIncrementRequest) GetDirection() MemcacheIncrementRequest_Direction {
+ if m != nil && m.Direction != nil {
+ return *m.Direction
+ }
+ return Default_MemcacheIncrementRequest_Direction
+}
+
+func (m *MemcacheIncrementRequest) GetInitialValue() uint64 {
+ if m != nil && m.InitialValue != nil {
+ return *m.InitialValue
+ }
+ return 0
+}
+
+func (m *MemcacheIncrementRequest) GetInitialFlags() uint32 {
+ if m != nil && m.InitialFlags != nil {
+ return *m.InitialFlags
+ }
+ return 0
+}
+
+func (m *MemcacheIncrementRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheIncrementResponse struct {
+ NewValue *uint64 `protobuf:"varint,1,opt,name=new_value" json:"new_value,omitempty"`
+ IncrementStatus *MemcacheIncrementResponse_IncrementStatusCode `protobuf:"varint,2,opt,name=increment_status,enum=appengine.MemcacheIncrementResponse_IncrementStatusCode" json:"increment_status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheIncrementResponse) Reset() { *m = MemcacheIncrementResponse{} }
+func (m *MemcacheIncrementResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheIncrementResponse) ProtoMessage() {}
+
+func (m *MemcacheIncrementResponse) GetNewValue() uint64 {
+ if m != nil && m.NewValue != nil {
+ return *m.NewValue
+ }
+ return 0
+}
+
+func (m *MemcacheIncrementResponse) GetIncrementStatus() MemcacheIncrementResponse_IncrementStatusCode {
+ if m != nil && m.IncrementStatus != nil {
+ return *m.IncrementStatus
+ }
+ return MemcacheIncrementResponse_OK
+}
+
+type MemcacheBatchIncrementRequest struct {
+ NameSpace *string `protobuf:"bytes,1,opt,name=name_space,def=" json:"name_space,omitempty"`
+ Item []*MemcacheIncrementRequest `protobuf:"bytes,2,rep,name=item" json:"item,omitempty"`
+ Override *AppOverride `protobuf:"bytes,3,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheBatchIncrementRequest) Reset() { *m = MemcacheBatchIncrementRequest{} }
+func (m *MemcacheBatchIncrementRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheBatchIncrementRequest) ProtoMessage() {}
+
+func (m *MemcacheBatchIncrementRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheBatchIncrementRequest) GetItem() []*MemcacheIncrementRequest {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+func (m *MemcacheBatchIncrementRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheBatchIncrementResponse struct {
+ Item []*MemcacheIncrementResponse `protobuf:"bytes,1,rep,name=item" json:"item,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheBatchIncrementResponse) Reset() { *m = MemcacheBatchIncrementResponse{} }
+func (m *MemcacheBatchIncrementResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheBatchIncrementResponse) ProtoMessage() {}
+
+func (m *MemcacheBatchIncrementResponse) GetItem() []*MemcacheIncrementResponse {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+type MemcacheFlushRequest struct {
+ Override *AppOverride `protobuf:"bytes,1,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheFlushRequest) Reset() { *m = MemcacheFlushRequest{} }
+func (m *MemcacheFlushRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheFlushRequest) ProtoMessage() {}
+
+func (m *MemcacheFlushRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheFlushResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheFlushResponse) Reset() { *m = MemcacheFlushResponse{} }
+func (m *MemcacheFlushResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheFlushResponse) ProtoMessage() {}
+
+type MemcacheStatsRequest struct {
+ Override *AppOverride `protobuf:"bytes,1,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheStatsRequest) Reset() { *m = MemcacheStatsRequest{} }
+func (m *MemcacheStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheStatsRequest) ProtoMessage() {}
+
+func (m *MemcacheStatsRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MergedNamespaceStats struct {
+ Hits *uint64 `protobuf:"varint,1,req,name=hits" json:"hits,omitempty"`
+ Misses *uint64 `protobuf:"varint,2,req,name=misses" json:"misses,omitempty"`
+ ByteHits *uint64 `protobuf:"varint,3,req,name=byte_hits" json:"byte_hits,omitempty"`
+ Items *uint64 `protobuf:"varint,4,req,name=items" json:"items,omitempty"`
+ Bytes *uint64 `protobuf:"varint,5,req,name=bytes" json:"bytes,omitempty"`
+ OldestItemAge *uint32 `protobuf:"fixed32,6,req,name=oldest_item_age" json:"oldest_item_age,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MergedNamespaceStats) Reset() { *m = MergedNamespaceStats{} }
+func (m *MergedNamespaceStats) String() string { return proto.CompactTextString(m) }
+func (*MergedNamespaceStats) ProtoMessage() {}
+
+func (m *MergedNamespaceStats) GetHits() uint64 {
+ if m != nil && m.Hits != nil {
+ return *m.Hits
+ }
+ return 0
+}
+
+func (m *MergedNamespaceStats) GetMisses() uint64 {
+ if m != nil && m.Misses != nil {
+ return *m.Misses
+ }
+ return 0
+}
+
+func (m *MergedNamespaceStats) GetByteHits() uint64 {
+ if m != nil && m.ByteHits != nil {
+ return *m.ByteHits
+ }
+ return 0
+}
+
+func (m *MergedNamespaceStats) GetItems() uint64 {
+ if m != nil && m.Items != nil {
+ return *m.Items
+ }
+ return 0
+}
+
+func (m *MergedNamespaceStats) GetBytes() uint64 {
+ if m != nil && m.Bytes != nil {
+ return *m.Bytes
+ }
+ return 0
+}
+
+func (m *MergedNamespaceStats) GetOldestItemAge() uint32 {
+ if m != nil && m.OldestItemAge != nil {
+ return *m.OldestItemAge
+ }
+ return 0
+}
+
+type MemcacheStatsResponse struct {
+ Stats *MergedNamespaceStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheStatsResponse) Reset() { *m = MemcacheStatsResponse{} }
+func (m *MemcacheStatsResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheStatsResponse) ProtoMessage() {}
+
+func (m *MemcacheStatsResponse) GetStats() *MergedNamespaceStats {
+ if m != nil {
+ return m.Stats
+ }
+ return nil
+}
+
+type MemcacheGrabTailRequest struct {
+ ItemCount *int32 `protobuf:"varint,1,req,name=item_count" json:"item_count,omitempty"`
+ NameSpace *string `protobuf:"bytes,2,opt,name=name_space,def=" json:"name_space,omitempty"`
+ Override *AppOverride `protobuf:"bytes,3,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGrabTailRequest) Reset() { *m = MemcacheGrabTailRequest{} }
+func (m *MemcacheGrabTailRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGrabTailRequest) ProtoMessage() {}
+
+func (m *MemcacheGrabTailRequest) GetItemCount() int32 {
+ if m != nil && m.ItemCount != nil {
+ return *m.ItemCount
+ }
+ return 0
+}
+
+func (m *MemcacheGrabTailRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheGrabTailRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheGrabTailResponse struct {
+ Item []*MemcacheGrabTailResponse_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGrabTailResponse) Reset() { *m = MemcacheGrabTailResponse{} }
+func (m *MemcacheGrabTailResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGrabTailResponse) ProtoMessage() {}
+
+func (m *MemcacheGrabTailResponse) GetItem() []*MemcacheGrabTailResponse_Item {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+type MemcacheGrabTailResponse_Item struct {
+ Value []byte `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ Flags *uint32 `protobuf:"fixed32,3,opt,name=flags" json:"flags,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGrabTailResponse_Item) Reset() { *m = MemcacheGrabTailResponse_Item{} }
+func (m *MemcacheGrabTailResponse_Item) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGrabTailResponse_Item) ProtoMessage() {}
+
+func (m *MemcacheGrabTailResponse_Item) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *MemcacheGrabTailResponse_Item) GetFlags() uint32 {
+ if m != nil && m.Flags != nil {
+ return *m.Flags
+ }
+ return 0
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto
new file mode 100644
index 0000000..5f0edcd
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto
@@ -0,0 +1,165 @@
+syntax = "proto2";
+option go_package = "memcache";
+
+package appengine;
+
+message MemcacheServiceError {
+ enum ErrorCode {
+ OK = 0;
+ UNSPECIFIED_ERROR = 1;
+ NAMESPACE_NOT_SET = 2;
+ PERMISSION_DENIED = 3;
+ INVALID_VALUE = 6;
+ }
+}
+
+message AppOverride {
+ required string app_id = 1;
+
+ optional int32 num_memcacheg_backends = 2 [deprecated=true];
+ optional bool ignore_shardlock = 3 [deprecated=true];
+ optional string memcache_pool_hint = 4 [deprecated=true];
+ optional bytes memcache_sharding_strategy = 5 [deprecated=true];
+}
+
+message MemcacheGetRequest {
+ repeated bytes key = 1;
+ optional string name_space = 2 [default = ""];
+ optional bool for_cas = 4;
+ optional AppOverride override = 5;
+}
+
+message MemcacheGetResponse {
+ repeated group Item = 1 {
+ required bytes key = 2;
+ required bytes value = 3;
+ optional fixed32 flags = 4;
+ optional fixed64 cas_id = 5;
+ optional int32 expires_in_seconds = 6;
+ }
+}
+
+message MemcacheSetRequest {
+ enum SetPolicy {
+ SET = 1;
+ ADD = 2;
+ REPLACE = 3;
+ CAS = 4;
+ }
+ repeated group Item = 1 {
+ required bytes key = 2;
+ required bytes value = 3;
+
+ optional fixed32 flags = 4;
+ optional SetPolicy set_policy = 5 [default = SET];
+ optional fixed32 expiration_time = 6 [default = 0];
+
+ optional fixed64 cas_id = 8;
+ optional bool for_cas = 9;
+ }
+ optional string name_space = 7 [default = ""];
+ optional AppOverride override = 10;
+}
+
+message MemcacheSetResponse {
+ enum SetStatusCode {
+ STORED = 1;
+ NOT_STORED = 2;
+ ERROR = 3;
+ EXISTS = 4;
+ }
+ repeated SetStatusCode set_status = 1;
+}
+
+message MemcacheDeleteRequest {
+ repeated group Item = 1 {
+ required bytes key = 2;
+ optional fixed32 delete_time = 3 [default = 0];
+ }
+ optional string name_space = 4 [default = ""];
+ optional AppOverride override = 5;
+}
+
+message MemcacheDeleteResponse {
+ enum DeleteStatusCode {
+ DELETED = 1;
+ NOT_FOUND = 2;
+ }
+ repeated DeleteStatusCode delete_status = 1;
+}
+
+message MemcacheIncrementRequest {
+ enum Direction {
+ INCREMENT = 1;
+ DECREMENT = 2;
+ }
+ required bytes key = 1;
+ optional string name_space = 4 [default = ""];
+
+ optional uint64 delta = 2 [default = 1];
+ optional Direction direction = 3 [default = INCREMENT];
+
+ optional uint64 initial_value = 5;
+ optional fixed32 initial_flags = 6;
+ optional AppOverride override = 7;
+}
+
+message MemcacheIncrementResponse {
+ enum IncrementStatusCode {
+ OK = 1;
+ NOT_CHANGED = 2;
+ ERROR = 3;
+ }
+
+ optional uint64 new_value = 1;
+ optional IncrementStatusCode increment_status = 2;
+}
+
+message MemcacheBatchIncrementRequest {
+ optional string name_space = 1 [default = ""];
+ repeated MemcacheIncrementRequest item = 2;
+ optional AppOverride override = 3;
+}
+
+message MemcacheBatchIncrementResponse {
+ repeated MemcacheIncrementResponse item = 1;
+}
+
+message MemcacheFlushRequest {
+ optional AppOverride override = 1;
+}
+
+message MemcacheFlushResponse {
+}
+
+message MemcacheStatsRequest {
+ optional AppOverride override = 1;
+}
+
+message MergedNamespaceStats {
+ required uint64 hits = 1;
+ required uint64 misses = 2;
+ required uint64 byte_hits = 3;
+
+ required uint64 items = 4;
+ required uint64 bytes = 5;
+
+ required fixed32 oldest_item_age = 6;
+}
+
+message MemcacheStatsResponse {
+ optional MergedNamespaceStats stats = 1;
+}
+
+message MemcacheGrabTailRequest {
+ required int32 item_count = 1;
+ optional string name_space = 2 [default = ""];
+ optional AppOverride override = 3;
+}
+
+message MemcacheGrabTailResponse {
+ repeated group Item = 1 {
+ required bytes value = 2;
+ optional fixed32 flags = 3;
+ }
+}
diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go
new file mode 100644
index 0000000..9cc1f71
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/metadata.go
@@ -0,0 +1,61 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file has code for accessing metadata.
+//
+// References:
+// https://cloud.google.com/compute/docs/metadata
+
+import (
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "net/url"
+)
+
+const (
+ metadataHost = "metadata"
+ metadataPath = "/computeMetadata/v1/"
+)
+
+var (
+ metadataRequestHeaders = http.Header{
+ "Metadata-Flavor": []string{"Google"},
+ }
+)
+
+// TODO(dsymonds): Do we need to support default values, like Python?
+func mustGetMetadata(key string) []byte {
+ b, err := getMetadata(key)
+ if err != nil {
+ log.Fatalf("Metadata fetch failed: %v", err)
+ }
+ return b
+}
+
+func getMetadata(key string) ([]byte, error) {
+ // TODO(dsymonds): May need to use url.Parse to support keys with query args.
+ req := &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Scheme: "http",
+ Host: metadataHost,
+ Path: metadataPath + key,
+ },
+ Header: metadataRequestHeaders,
+ Host: metadataHost,
+ }
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 200 {
+ return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode)
+ }
+ return ioutil.ReadAll(resp.Body)
+}
diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
new file mode 100644
index 0000000..a0145ed
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
@@ -0,0 +1,375 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/modules/modules_service.proto
+// DO NOT EDIT!
+
+/*
+Package modules is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/modules/modules_service.proto
+
+It has these top-level messages:
+ ModulesServiceError
+ GetModulesRequest
+ GetModulesResponse
+ GetVersionsRequest
+ GetVersionsResponse
+ GetDefaultVersionRequest
+ GetDefaultVersionResponse
+ GetNumInstancesRequest
+ GetNumInstancesResponse
+ SetNumInstancesRequest
+ SetNumInstancesResponse
+ StartModuleRequest
+ StartModuleResponse
+ StopModuleRequest
+ StopModuleResponse
+ GetHostnameRequest
+ GetHostnameResponse
+*/
+package modules
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type ModulesServiceError_ErrorCode int32
+
+const (
+ ModulesServiceError_OK ModulesServiceError_ErrorCode = 0
+ ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1
+ ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2
+ ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3
+ ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4
+ ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5
+)
+
+var ModulesServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INVALID_MODULE",
+ 2: "INVALID_VERSION",
+ 3: "INVALID_INSTANCES",
+ 4: "TRANSIENT_ERROR",
+ 5: "UNEXPECTED_STATE",
+}
+var ModulesServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INVALID_MODULE": 1,
+ "INVALID_VERSION": 2,
+ "INVALID_INSTANCES": 3,
+ "TRANSIENT_ERROR": 4,
+ "UNEXPECTED_STATE": 5,
+}
+
+func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode {
+ p := new(ModulesServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x ModulesServiceError_ErrorCode) String() string {
+ return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x))
+}
+func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = ModulesServiceError_ErrorCode(value)
+ return nil
+}
+
+type ModulesServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} }
+func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) }
+func (*ModulesServiceError) ProtoMessage() {}
+
+type GetModulesRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} }
+func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) }
+func (*GetModulesRequest) ProtoMessage() {}
+
+type GetModulesResponse struct {
+ Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} }
+func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) }
+func (*GetModulesResponse) ProtoMessage() {}
+
+func (m *GetModulesResponse) GetModule() []string {
+ if m != nil {
+ return m.Module
+ }
+ return nil
+}
+
+type GetVersionsRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} }
+func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) }
+func (*GetVersionsRequest) ProtoMessage() {}
+
+func (m *GetVersionsRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+type GetVersionsResponse struct {
+ Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} }
+func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) }
+func (*GetVersionsResponse) ProtoMessage() {}
+
+func (m *GetVersionsResponse) GetVersion() []string {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type GetDefaultVersionRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} }
+func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultVersionRequest) ProtoMessage() {}
+
+func (m *GetDefaultVersionRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+type GetDefaultVersionResponse struct {
+ Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} }
+func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultVersionResponse) ProtoMessage() {}
+
+func (m *GetDefaultVersionResponse) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+type GetNumInstancesRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} }
+func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
+func (*GetNumInstancesRequest) ProtoMessage() {}
+
+func (m *GetNumInstancesRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *GetNumInstancesRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+type GetNumInstancesResponse struct {
+ Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} }
+func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
+func (*GetNumInstancesResponse) ProtoMessage() {}
+
+func (m *GetNumInstancesResponse) GetInstances() int64 {
+ if m != nil && m.Instances != nil {
+ return *m.Instances
+ }
+ return 0
+}
+
+type SetNumInstancesRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} }
+func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
+func (*SetNumInstancesRequest) ProtoMessage() {}
+
+func (m *SetNumInstancesRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *SetNumInstancesRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+func (m *SetNumInstancesRequest) GetInstances() int64 {
+ if m != nil && m.Instances != nil {
+ return *m.Instances
+ }
+ return 0
+}
+
+type SetNumInstancesResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} }
+func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
+func (*SetNumInstancesResponse) ProtoMessage() {}
+
+type StartModuleRequest struct {
+ Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} }
+func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) }
+func (*StartModuleRequest) ProtoMessage() {}
+
+func (m *StartModuleRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *StartModuleRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+type StartModuleResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} }
+func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) }
+func (*StartModuleResponse) ProtoMessage() {}
+
+type StopModuleRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} }
+func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) }
+func (*StopModuleRequest) ProtoMessage() {}
+
+func (m *StopModuleRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *StopModuleRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+type StopModuleResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} }
+func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) }
+func (*StopModuleResponse) ProtoMessage() {}
+
+type GetHostnameRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} }
+func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetHostnameRequest) ProtoMessage() {}
+
+func (m *GetHostnameRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *GetHostnameRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+func (m *GetHostnameRequest) GetInstance() string {
+ if m != nil && m.Instance != nil {
+ return *m.Instance
+ }
+ return ""
+}
+
+type GetHostnameResponse struct {
+ Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} }
+func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) }
+func (*GetHostnameResponse) ProtoMessage() {}
+
+func (m *GetHostnameResponse) GetHostname() string {
+ if m != nil && m.Hostname != nil {
+ return *m.Hostname
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto
new file mode 100644
index 0000000..d29f006
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto
@@ -0,0 +1,80 @@
+syntax = "proto2";
+option go_package = "modules";
+
+package appengine;
+
+message ModulesServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INVALID_MODULE = 1;
+ INVALID_VERSION = 2;
+ INVALID_INSTANCES = 3;
+ TRANSIENT_ERROR = 4;
+ UNEXPECTED_STATE = 5;
+ }
+}
+
+message GetModulesRequest {
+}
+
+message GetModulesResponse {
+ repeated string module = 1;
+}
+
+message GetVersionsRequest {
+ optional string module = 1;
+}
+
+message GetVersionsResponse {
+ repeated string version = 1;
+}
+
+message GetDefaultVersionRequest {
+ optional string module = 1;
+}
+
+message GetDefaultVersionResponse {
+ required string version = 1;
+}
+
+message GetNumInstancesRequest {
+ optional string module = 1;
+ optional string version = 2;
+}
+
+message GetNumInstancesResponse {
+ required int64 instances = 1;
+}
+
+message SetNumInstancesRequest {
+ optional string module = 1;
+ optional string version = 2;
+ required int64 instances = 3;
+}
+
+message SetNumInstancesResponse {}
+
+message StartModuleRequest {
+ required string module = 1;
+ required string version = 2;
+}
+
+message StartModuleResponse {}
+
+message StopModuleRequest {
+ optional string module = 1;
+ optional string version = 2;
+}
+
+message StopModuleResponse {}
+
+message GetHostnameRequest {
+ optional string module = 1;
+ optional string version = 2;
+ optional string instance = 3;
+}
+
+message GetHostnameResponse {
+ required string hostname = 1;
+}
+
diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go
new file mode 100644
index 0000000..3b94cf0
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/net.go
@@ -0,0 +1,56 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file implements a network dialer that limits the number of concurrent connections.
+// It is only used for API calls.
+
+import (
+ "log"
+ "net"
+ "runtime"
+ "sync"
+ "time"
+)
+
+var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable.
+
+func limitRelease() {
+ // non-blocking
+ select {
+ case <-limitSem:
+ default:
+ // This should not normally happen.
+ log.Print("appengine: unbalanced limitSem release!")
+ }
+}
+
+func limitDial(network, addr string) (net.Conn, error) {
+ limitSem <- 1
+
+ // Dial with a timeout in case the API host is MIA.
+ // The connection should normally be very fast.
+ conn, err := net.DialTimeout(network, addr, 500*time.Millisecond)
+ if err != nil {
+ limitRelease()
+ return nil, err
+ }
+ lc := &limitConn{Conn: conn}
+ runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required
+ return lc, nil
+}
+
+type limitConn struct {
+ close sync.Once
+ net.Conn
+}
+
+func (lc *limitConn) Close() error {
+ defer lc.close.Do(func() {
+ limitRelease()
+ runtime.SetFinalizer(lc, nil)
+ })
+ return lc.Conn.Close()
+}
diff --git a/vendor/google.golang.org/appengine/internal/net_test.go b/vendor/google.golang.org/appengine/internal/net_test.go
new file mode 100644
index 0000000..24da8bb
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/net_test.go
@@ -0,0 +1,58 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+ "sync"
+ "testing"
+ "time"
+
+ netcontext "golang.org/x/net/context"
+
+ basepb "google.golang.org/appengine/internal/base"
+)
+
+func TestDialLimit(t *testing.T) {
+ // Fill up semaphore with false acquisitions to permit only two TCP connections at a time.
+ // We don't replace limitSem because that results in a data race when net/http lazily closes connections.
+ nFake := cap(limitSem) - 2
+ for i := 0; i < nFake; i++ {
+ limitSem <- 1
+ }
+ defer func() {
+ for i := 0; i < nFake; i++ {
+ <-limitSem
+ }
+ }()
+
+ f, c, cleanup := setup() // setup is in api_test.go
+ defer cleanup()
+ f.hang = make(chan int)
+
+ // If we make two RunSlowly RPCs (which will wait for f.hang to be strobed),
+ // then the simple Non200 RPC should hang.
+ var wg sync.WaitGroup
+ wg.Add(2)
+ for i := 0; i < 2; i++ {
+ go func() {
+ defer wg.Done()
+ Call(toContext(c), "errors", "RunSlowly", &basepb.VoidProto{}, &basepb.VoidProto{})
+ }()
+ }
+ time.Sleep(50 * time.Millisecond) // let those two RPCs start
+
+ ctx, _ := netcontext.WithTimeout(toContext(c), 50*time.Millisecond)
+ err := Call(ctx, "errors", "Non200", &basepb.VoidProto{}, &basepb.VoidProto{})
+ if err != errTimeout {
+ t.Errorf("Non200 RPC returned with err %v, want errTimeout", err)
+ }
+
+ // Drain the two RunSlowly calls.
+ f.hang <- 1
+ f.hang <- 1
+ wg.Wait()
+}
diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh
new file mode 100755
index 0000000..2fdb546
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/regen.sh
@@ -0,0 +1,40 @@
+#!/bin/bash -e
+#
+# This script rebuilds the generated code for the protocol buffers.
+# To run this you will need protoc and goprotobuf installed;
+# see https://github.com/golang/protobuf for instructions.
+
+PKG=google.golang.org/appengine
+
+function die() {
+ echo 1>&2 $*
+ exit 1
+}
+
+# Sanity check that the right tools are accessible.
+for tool in go protoc protoc-gen-go; do
+ q=$(which $tool) || die "didn't find $tool"
+ echo 1>&2 "$tool: $q"
+done
+
+echo -n 1>&2 "finding package dir... "
+pkgdir=$(go list -f '{{.Dir}}' $PKG)
+echo 1>&2 $pkgdir
+base=$(echo $pkgdir | sed "s,/$PKG\$,,")
+echo 1>&2 "base: $base"
+cd $base
+
+# Run protoc once per package.
+for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do
+ echo 1>&2 "* $dir"
+ protoc --go_out=. $dir/*.proto
+done
+
+for f in $(find $PKG/internal -name '*.pb.go'); do
+ # Remove proto.RegisterEnum calls.
+ # These cause duplicate registration panics when these packages
+ # are used on classic App Engine. proto.RegisterEnum only affects
+ # parsing the text format; we don't care about that.
+ # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17
+ sed -i '/proto.RegisterEnum/d' $f
+done
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
new file mode 100644
index 0000000..526bd39
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
@@ -0,0 +1,231 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/remote_api/remote_api.proto
+// DO NOT EDIT!
+
+/*
+Package remote_api is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/remote_api/remote_api.proto
+
+It has these top-level messages:
+ Request
+ ApplicationError
+ RpcError
+ Response
+*/
+package remote_api
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type RpcError_ErrorCode int32
+
+const (
+ RpcError_UNKNOWN RpcError_ErrorCode = 0
+ RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1
+ RpcError_PARSE_ERROR RpcError_ErrorCode = 2
+ RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3
+ RpcError_OVER_QUOTA RpcError_ErrorCode = 4
+ RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5
+ RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6
+ RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7
+ RpcError_BAD_REQUEST RpcError_ErrorCode = 8
+ RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9
+ RpcError_CANCELLED RpcError_ErrorCode = 10
+ RpcError_REPLAY_ERROR RpcError_ErrorCode = 11
+ RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12
+)
+
+var RpcError_ErrorCode_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "CALL_NOT_FOUND",
+ 2: "PARSE_ERROR",
+ 3: "SECURITY_VIOLATION",
+ 4: "OVER_QUOTA",
+ 5: "REQUEST_TOO_LARGE",
+ 6: "CAPABILITY_DISABLED",
+ 7: "FEATURE_DISABLED",
+ 8: "BAD_REQUEST",
+ 9: "RESPONSE_TOO_LARGE",
+ 10: "CANCELLED",
+ 11: "REPLAY_ERROR",
+ 12: "DEADLINE_EXCEEDED",
+}
+var RpcError_ErrorCode_value = map[string]int32{
+ "UNKNOWN": 0,
+ "CALL_NOT_FOUND": 1,
+ "PARSE_ERROR": 2,
+ "SECURITY_VIOLATION": 3,
+ "OVER_QUOTA": 4,
+ "REQUEST_TOO_LARGE": 5,
+ "CAPABILITY_DISABLED": 6,
+ "FEATURE_DISABLED": 7,
+ "BAD_REQUEST": 8,
+ "RESPONSE_TOO_LARGE": 9,
+ "CANCELLED": 10,
+ "REPLAY_ERROR": 11,
+ "DEADLINE_EXCEEDED": 12,
+}
+
+func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode {
+ p := new(RpcError_ErrorCode)
+ *p = x
+ return p
+}
+func (x RpcError_ErrorCode) String() string {
+ return proto.EnumName(RpcError_ErrorCode_name, int32(x))
+}
+func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = RpcError_ErrorCode(value)
+ return nil
+}
+
+type Request struct {
+ ServiceName *string `protobuf:"bytes,2,req,name=service_name" json:"service_name,omitempty"`
+ Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"`
+ Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"`
+ RequestId *string `protobuf:"bytes,5,opt,name=request_id" json:"request_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Request) Reset() { *m = Request{} }
+func (m *Request) String() string { return proto.CompactTextString(m) }
+func (*Request) ProtoMessage() {}
+
+func (m *Request) GetServiceName() string {
+ if m != nil && m.ServiceName != nil {
+ return *m.ServiceName
+ }
+ return ""
+}
+
+func (m *Request) GetMethod() string {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return ""
+}
+
+func (m *Request) GetRequest() []byte {
+ if m != nil {
+ return m.Request
+ }
+ return nil
+}
+
+func (m *Request) GetRequestId() string {
+ if m != nil && m.RequestId != nil {
+ return *m.RequestId
+ }
+ return ""
+}
+
+type ApplicationError struct {
+ Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
+ Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ApplicationError) Reset() { *m = ApplicationError{} }
+func (m *ApplicationError) String() string { return proto.CompactTextString(m) }
+func (*ApplicationError) ProtoMessage() {}
+
+func (m *ApplicationError) GetCode() int32 {
+ if m != nil && m.Code != nil {
+ return *m.Code
+ }
+ return 0
+}
+
+func (m *ApplicationError) GetDetail() string {
+ if m != nil && m.Detail != nil {
+ return *m.Detail
+ }
+ return ""
+}
+
+type RpcError struct {
+ Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
+ Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RpcError) Reset() { *m = RpcError{} }
+func (m *RpcError) String() string { return proto.CompactTextString(m) }
+func (*RpcError) ProtoMessage() {}
+
+func (m *RpcError) GetCode() int32 {
+ if m != nil && m.Code != nil {
+ return *m.Code
+ }
+ return 0
+}
+
+func (m *RpcError) GetDetail() string {
+ if m != nil && m.Detail != nil {
+ return *m.Detail
+ }
+ return ""
+}
+
+type Response struct {
+ Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
+ Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"`
+ ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error" json:"application_error,omitempty"`
+ JavaException []byte `protobuf:"bytes,4,opt,name=java_exception" json:"java_exception,omitempty"`
+ RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error" json:"rpc_error,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Response) Reset() { *m = Response{} }
+func (m *Response) String() string { return proto.CompactTextString(m) }
+func (*Response) ProtoMessage() {}
+
+func (m *Response) GetResponse() []byte {
+ if m != nil {
+ return m.Response
+ }
+ return nil
+}
+
+func (m *Response) GetException() []byte {
+ if m != nil {
+ return m.Exception
+ }
+ return nil
+}
+
+func (m *Response) GetApplicationError() *ApplicationError {
+ if m != nil {
+ return m.ApplicationError
+ }
+ return nil
+}
+
+func (m *Response) GetJavaException() []byte {
+ if m != nil {
+ return m.JavaException
+ }
+ return nil
+}
+
+func (m *Response) GetRpcError() *RpcError {
+ if m != nil {
+ return m.RpcError
+ }
+ return nil
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
new file mode 100644
index 0000000..f21763a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
@@ -0,0 +1,44 @@
+syntax = "proto2";
+option go_package = "remote_api";
+
+package remote_api;
+
+message Request {
+ required string service_name = 2;
+ required string method = 3;
+ required bytes request = 4;
+ optional string request_id = 5;
+}
+
+message ApplicationError {
+ required int32 code = 1;
+ required string detail = 2;
+}
+
+message RpcError {
+ enum ErrorCode {
+ UNKNOWN = 0;
+ CALL_NOT_FOUND = 1;
+ PARSE_ERROR = 2;
+ SECURITY_VIOLATION = 3;
+ OVER_QUOTA = 4;
+ REQUEST_TOO_LARGE = 5;
+ CAPABILITY_DISABLED = 6;
+ FEATURE_DISABLED = 7;
+ BAD_REQUEST = 8;
+ RESPONSE_TOO_LARGE = 9;
+ CANCELLED = 10;
+ REPLAY_ERROR = 11;
+ DEADLINE_EXCEEDED = 12;
+ }
+ required int32 code = 1;
+ optional string detail = 2;
+}
+
+message Response {
+ optional bytes response = 1;
+ optional bytes exception = 2;
+ optional ApplicationError application_error = 3;
+ optional bytes java_exception = 4;
+ optional RpcError rpc_error = 5;
+}
diff --git a/vendor/google.golang.org/appengine/internal/search/search.pb.go b/vendor/google.golang.org/appengine/internal/search/search.pb.go
new file mode 100644
index 0000000..7d8d11d
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/search/search.pb.go
@@ -0,0 +1,2127 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/search/search.proto
+// DO NOT EDIT!
+
+/*
+Package search is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/search/search.proto
+
+It has these top-level messages:
+ Scope
+ Entry
+ AccessControlList
+ FieldValue
+ Field
+ FieldTypes
+ IndexShardSettings
+ FacetValue
+ Facet
+ DocumentMetadata
+ Document
+ SearchServiceError
+ RequestStatus
+ IndexSpec
+ IndexMetadata
+ IndexDocumentParams
+ IndexDocumentRequest
+ IndexDocumentResponse
+ DeleteDocumentParams
+ DeleteDocumentRequest
+ DeleteDocumentResponse
+ ListDocumentsParams
+ ListDocumentsRequest
+ ListDocumentsResponse
+ ListIndexesParams
+ ListIndexesRequest
+ ListIndexesResponse
+ DeleteSchemaParams
+ DeleteSchemaRequest
+ DeleteSchemaResponse
+ SortSpec
+ ScorerSpec
+ FieldSpec
+ FacetRange
+ FacetRequestParam
+ FacetAutoDetectParam
+ FacetRequest
+ FacetRefinement
+ SearchParams
+ SearchRequest
+ FacetResultValue
+ FacetResult
+ SearchResult
+ SearchResponse
+*/
+package search
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type Scope_Type int32
+
+const (
+ Scope_USER_BY_CANONICAL_ID Scope_Type = 1
+ Scope_USER_BY_EMAIL Scope_Type = 2
+ Scope_GROUP_BY_CANONICAL_ID Scope_Type = 3
+ Scope_GROUP_BY_EMAIL Scope_Type = 4
+ Scope_GROUP_BY_DOMAIN Scope_Type = 5
+ Scope_ALL_USERS Scope_Type = 6
+ Scope_ALL_AUTHENTICATED_USERS Scope_Type = 7
+)
+
+var Scope_Type_name = map[int32]string{
+ 1: "USER_BY_CANONICAL_ID",
+ 2: "USER_BY_EMAIL",
+ 3: "GROUP_BY_CANONICAL_ID",
+ 4: "GROUP_BY_EMAIL",
+ 5: "GROUP_BY_DOMAIN",
+ 6: "ALL_USERS",
+ 7: "ALL_AUTHENTICATED_USERS",
+}
+var Scope_Type_value = map[string]int32{
+ "USER_BY_CANONICAL_ID": 1,
+ "USER_BY_EMAIL": 2,
+ "GROUP_BY_CANONICAL_ID": 3,
+ "GROUP_BY_EMAIL": 4,
+ "GROUP_BY_DOMAIN": 5,
+ "ALL_USERS": 6,
+ "ALL_AUTHENTICATED_USERS": 7,
+}
+
+func (x Scope_Type) Enum() *Scope_Type {
+ p := new(Scope_Type)
+ *p = x
+ return p
+}
+func (x Scope_Type) String() string {
+ return proto.EnumName(Scope_Type_name, int32(x))
+}
+func (x *Scope_Type) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Scope_Type_value, data, "Scope_Type")
+ if err != nil {
+ return err
+ }
+ *x = Scope_Type(value)
+ return nil
+}
+
+type Entry_Permission int32
+
+const (
+ Entry_READ Entry_Permission = 1
+ Entry_WRITE Entry_Permission = 2
+ Entry_FULL_CONTROL Entry_Permission = 3
+)
+
+var Entry_Permission_name = map[int32]string{
+ 1: "READ",
+ 2: "WRITE",
+ 3: "FULL_CONTROL",
+}
+var Entry_Permission_value = map[string]int32{
+ "READ": 1,
+ "WRITE": 2,
+ "FULL_CONTROL": 3,
+}
+
+func (x Entry_Permission) Enum() *Entry_Permission {
+ p := new(Entry_Permission)
+ *p = x
+ return p
+}
+func (x Entry_Permission) String() string {
+ return proto.EnumName(Entry_Permission_name, int32(x))
+}
+func (x *Entry_Permission) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Entry_Permission_value, data, "Entry_Permission")
+ if err != nil {
+ return err
+ }
+ *x = Entry_Permission(value)
+ return nil
+}
+
+type FieldValue_ContentType int32
+
+const (
+ FieldValue_TEXT FieldValue_ContentType = 0
+ FieldValue_HTML FieldValue_ContentType = 1
+ FieldValue_ATOM FieldValue_ContentType = 2
+ FieldValue_DATE FieldValue_ContentType = 3
+ FieldValue_NUMBER FieldValue_ContentType = 4
+ FieldValue_GEO FieldValue_ContentType = 5
+)
+
+var FieldValue_ContentType_name = map[int32]string{
+ 0: "TEXT",
+ 1: "HTML",
+ 2: "ATOM",
+ 3: "DATE",
+ 4: "NUMBER",
+ 5: "GEO",
+}
+var FieldValue_ContentType_value = map[string]int32{
+ "TEXT": 0,
+ "HTML": 1,
+ "ATOM": 2,
+ "DATE": 3,
+ "NUMBER": 4,
+ "GEO": 5,
+}
+
+func (x FieldValue_ContentType) Enum() *FieldValue_ContentType {
+ p := new(FieldValue_ContentType)
+ *p = x
+ return p
+}
+func (x FieldValue_ContentType) String() string {
+ return proto.EnumName(FieldValue_ContentType_name, int32(x))
+}
+func (x *FieldValue_ContentType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FieldValue_ContentType_value, data, "FieldValue_ContentType")
+ if err != nil {
+ return err
+ }
+ *x = FieldValue_ContentType(value)
+ return nil
+}
+
+type FacetValue_ContentType int32
+
+const (
+ FacetValue_ATOM FacetValue_ContentType = 2
+ FacetValue_NUMBER FacetValue_ContentType = 4
+)
+
+var FacetValue_ContentType_name = map[int32]string{
+ 2: "ATOM",
+ 4: "NUMBER",
+}
+var FacetValue_ContentType_value = map[string]int32{
+ "ATOM": 2,
+ "NUMBER": 4,
+}
+
+func (x FacetValue_ContentType) Enum() *FacetValue_ContentType {
+ p := new(FacetValue_ContentType)
+ *p = x
+ return p
+}
+func (x FacetValue_ContentType) String() string {
+ return proto.EnumName(FacetValue_ContentType_name, int32(x))
+}
+func (x *FacetValue_ContentType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FacetValue_ContentType_value, data, "FacetValue_ContentType")
+ if err != nil {
+ return err
+ }
+ *x = FacetValue_ContentType(value)
+ return nil
+}
+
+type Document_Storage int32
+
+const (
+ Document_DISK Document_Storage = 0
+)
+
+var Document_Storage_name = map[int32]string{
+ 0: "DISK",
+}
+var Document_Storage_value = map[string]int32{
+ "DISK": 0,
+}
+
+func (x Document_Storage) Enum() *Document_Storage {
+ p := new(Document_Storage)
+ *p = x
+ return p
+}
+func (x Document_Storage) String() string {
+ return proto.EnumName(Document_Storage_name, int32(x))
+}
+func (x *Document_Storage) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Document_Storage_value, data, "Document_Storage")
+ if err != nil {
+ return err
+ }
+ *x = Document_Storage(value)
+ return nil
+}
+
+type SearchServiceError_ErrorCode int32
+
+const (
+ SearchServiceError_OK SearchServiceError_ErrorCode = 0
+ SearchServiceError_INVALID_REQUEST SearchServiceError_ErrorCode = 1
+ SearchServiceError_TRANSIENT_ERROR SearchServiceError_ErrorCode = 2
+ SearchServiceError_INTERNAL_ERROR SearchServiceError_ErrorCode = 3
+ SearchServiceError_PERMISSION_DENIED SearchServiceError_ErrorCode = 4
+ SearchServiceError_TIMEOUT SearchServiceError_ErrorCode = 5
+ SearchServiceError_CONCURRENT_TRANSACTION SearchServiceError_ErrorCode = 6
+)
+
+var SearchServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INVALID_REQUEST",
+ 2: "TRANSIENT_ERROR",
+ 3: "INTERNAL_ERROR",
+ 4: "PERMISSION_DENIED",
+ 5: "TIMEOUT",
+ 6: "CONCURRENT_TRANSACTION",
+}
+var SearchServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INVALID_REQUEST": 1,
+ "TRANSIENT_ERROR": 2,
+ "INTERNAL_ERROR": 3,
+ "PERMISSION_DENIED": 4,
+ "TIMEOUT": 5,
+ "CONCURRENT_TRANSACTION": 6,
+}
+
+func (x SearchServiceError_ErrorCode) Enum() *SearchServiceError_ErrorCode {
+ p := new(SearchServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x SearchServiceError_ErrorCode) String() string {
+ return proto.EnumName(SearchServiceError_ErrorCode_name, int32(x))
+}
+func (x *SearchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(SearchServiceError_ErrorCode_value, data, "SearchServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = SearchServiceError_ErrorCode(value)
+ return nil
+}
+
+type IndexSpec_Consistency int32
+
+const (
+ IndexSpec_GLOBAL IndexSpec_Consistency = 0
+ IndexSpec_PER_DOCUMENT IndexSpec_Consistency = 1
+)
+
+var IndexSpec_Consistency_name = map[int32]string{
+ 0: "GLOBAL",
+ 1: "PER_DOCUMENT",
+}
+var IndexSpec_Consistency_value = map[string]int32{
+ "GLOBAL": 0,
+ "PER_DOCUMENT": 1,
+}
+
+func (x IndexSpec_Consistency) Enum() *IndexSpec_Consistency {
+ p := new(IndexSpec_Consistency)
+ *p = x
+ return p
+}
+func (x IndexSpec_Consistency) String() string {
+ return proto.EnumName(IndexSpec_Consistency_name, int32(x))
+}
+func (x *IndexSpec_Consistency) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(IndexSpec_Consistency_value, data, "IndexSpec_Consistency")
+ if err != nil {
+ return err
+ }
+ *x = IndexSpec_Consistency(value)
+ return nil
+}
+
+type IndexSpec_Source int32
+
+const (
+ IndexSpec_SEARCH IndexSpec_Source = 0
+ IndexSpec_DATASTORE IndexSpec_Source = 1
+ IndexSpec_CLOUD_STORAGE IndexSpec_Source = 2
+)
+
+var IndexSpec_Source_name = map[int32]string{
+ 0: "SEARCH",
+ 1: "DATASTORE",
+ 2: "CLOUD_STORAGE",
+}
+var IndexSpec_Source_value = map[string]int32{
+ "SEARCH": 0,
+ "DATASTORE": 1,
+ "CLOUD_STORAGE": 2,
+}
+
+func (x IndexSpec_Source) Enum() *IndexSpec_Source {
+ p := new(IndexSpec_Source)
+ *p = x
+ return p
+}
+func (x IndexSpec_Source) String() string {
+ return proto.EnumName(IndexSpec_Source_name, int32(x))
+}
+func (x *IndexSpec_Source) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(IndexSpec_Source_value, data, "IndexSpec_Source")
+ if err != nil {
+ return err
+ }
+ *x = IndexSpec_Source(value)
+ return nil
+}
+
+type IndexSpec_Mode int32
+
+const (
+ IndexSpec_PRIORITY IndexSpec_Mode = 0
+ IndexSpec_BACKGROUND IndexSpec_Mode = 1
+)
+
+var IndexSpec_Mode_name = map[int32]string{
+ 0: "PRIORITY",
+ 1: "BACKGROUND",
+}
+var IndexSpec_Mode_value = map[string]int32{
+ "PRIORITY": 0,
+ "BACKGROUND": 1,
+}
+
+func (x IndexSpec_Mode) Enum() *IndexSpec_Mode {
+ p := new(IndexSpec_Mode)
+ *p = x
+ return p
+}
+func (x IndexSpec_Mode) String() string {
+ return proto.EnumName(IndexSpec_Mode_name, int32(x))
+}
+func (x *IndexSpec_Mode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(IndexSpec_Mode_value, data, "IndexSpec_Mode")
+ if err != nil {
+ return err
+ }
+ *x = IndexSpec_Mode(value)
+ return nil
+}
+
+type IndexDocumentParams_Freshness int32
+
+const (
+ IndexDocumentParams_SYNCHRONOUSLY IndexDocumentParams_Freshness = 0
+ IndexDocumentParams_WHEN_CONVENIENT IndexDocumentParams_Freshness = 1
+)
+
+var IndexDocumentParams_Freshness_name = map[int32]string{
+ 0: "SYNCHRONOUSLY",
+ 1: "WHEN_CONVENIENT",
+}
+var IndexDocumentParams_Freshness_value = map[string]int32{
+ "SYNCHRONOUSLY": 0,
+ "WHEN_CONVENIENT": 1,
+}
+
+func (x IndexDocumentParams_Freshness) Enum() *IndexDocumentParams_Freshness {
+ p := new(IndexDocumentParams_Freshness)
+ *p = x
+ return p
+}
+func (x IndexDocumentParams_Freshness) String() string {
+ return proto.EnumName(IndexDocumentParams_Freshness_name, int32(x))
+}
+func (x *IndexDocumentParams_Freshness) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(IndexDocumentParams_Freshness_value, data, "IndexDocumentParams_Freshness")
+ if err != nil {
+ return err
+ }
+ *x = IndexDocumentParams_Freshness(value)
+ return nil
+}
+
+type ScorerSpec_Scorer int32
+
+const (
+ ScorerSpec_RESCORING_MATCH_SCORER ScorerSpec_Scorer = 0
+ ScorerSpec_MATCH_SCORER ScorerSpec_Scorer = 2
+)
+
+var ScorerSpec_Scorer_name = map[int32]string{
+ 0: "RESCORING_MATCH_SCORER",
+ 2: "MATCH_SCORER",
+}
+var ScorerSpec_Scorer_value = map[string]int32{
+ "RESCORING_MATCH_SCORER": 0,
+ "MATCH_SCORER": 2,
+}
+
+func (x ScorerSpec_Scorer) Enum() *ScorerSpec_Scorer {
+ p := new(ScorerSpec_Scorer)
+ *p = x
+ return p
+}
+func (x ScorerSpec_Scorer) String() string {
+ return proto.EnumName(ScorerSpec_Scorer_name, int32(x))
+}
+func (x *ScorerSpec_Scorer) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ScorerSpec_Scorer_value, data, "ScorerSpec_Scorer")
+ if err != nil {
+ return err
+ }
+ *x = ScorerSpec_Scorer(value)
+ return nil
+}
+
+type SearchParams_CursorType int32
+
+const (
+ SearchParams_NONE SearchParams_CursorType = 0
+ SearchParams_SINGLE SearchParams_CursorType = 1
+ SearchParams_PER_RESULT SearchParams_CursorType = 2
+)
+
+var SearchParams_CursorType_name = map[int32]string{
+ 0: "NONE",
+ 1: "SINGLE",
+ 2: "PER_RESULT",
+}
+var SearchParams_CursorType_value = map[string]int32{
+ "NONE": 0,
+ "SINGLE": 1,
+ "PER_RESULT": 2,
+}
+
+func (x SearchParams_CursorType) Enum() *SearchParams_CursorType {
+ p := new(SearchParams_CursorType)
+ *p = x
+ return p
+}
+func (x SearchParams_CursorType) String() string {
+ return proto.EnumName(SearchParams_CursorType_name, int32(x))
+}
+func (x *SearchParams_CursorType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(SearchParams_CursorType_value, data, "SearchParams_CursorType")
+ if err != nil {
+ return err
+ }
+ *x = SearchParams_CursorType(value)
+ return nil
+}
+
+type SearchParams_ParsingMode int32
+
+const (
+ SearchParams_STRICT SearchParams_ParsingMode = 0
+ SearchParams_RELAXED SearchParams_ParsingMode = 1
+)
+
+var SearchParams_ParsingMode_name = map[int32]string{
+ 0: "STRICT",
+ 1: "RELAXED",
+}
+var SearchParams_ParsingMode_value = map[string]int32{
+ "STRICT": 0,
+ "RELAXED": 1,
+}
+
+func (x SearchParams_ParsingMode) Enum() *SearchParams_ParsingMode {
+ p := new(SearchParams_ParsingMode)
+ *p = x
+ return p
+}
+func (x SearchParams_ParsingMode) String() string {
+ return proto.EnumName(SearchParams_ParsingMode_name, int32(x))
+}
+func (x *SearchParams_ParsingMode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(SearchParams_ParsingMode_value, data, "SearchParams_ParsingMode")
+ if err != nil {
+ return err
+ }
+ *x = SearchParams_ParsingMode(value)
+ return nil
+}
+
+type Scope struct {
+ Type *Scope_Type `protobuf:"varint,1,opt,name=type,enum=search.Scope_Type" json:"type,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Scope) Reset() { *m = Scope{} }
+func (m *Scope) String() string { return proto.CompactTextString(m) }
+func (*Scope) ProtoMessage() {}
+
+func (m *Scope) GetType() Scope_Type {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Scope_USER_BY_CANONICAL_ID
+}
+
+func (m *Scope) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Entry struct {
+ Scope *Scope `protobuf:"bytes,1,opt,name=scope" json:"scope,omitempty"`
+ Permission *Entry_Permission `protobuf:"varint,2,opt,name=permission,enum=search.Entry_Permission" json:"permission,omitempty"`
+ DisplayName *string `protobuf:"bytes,3,opt,name=display_name" json:"display_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Entry) Reset() { *m = Entry{} }
+func (m *Entry) String() string { return proto.CompactTextString(m) }
+func (*Entry) ProtoMessage() {}
+
+func (m *Entry) GetScope() *Scope {
+ if m != nil {
+ return m.Scope
+ }
+ return nil
+}
+
+func (m *Entry) GetPermission() Entry_Permission {
+ if m != nil && m.Permission != nil {
+ return *m.Permission
+ }
+ return Entry_READ
+}
+
+func (m *Entry) GetDisplayName() string {
+ if m != nil && m.DisplayName != nil {
+ return *m.DisplayName
+ }
+ return ""
+}
+
+type AccessControlList struct {
+ Owner *string `protobuf:"bytes,1,opt,name=owner" json:"owner,omitempty"`
+ Entries []*Entry `protobuf:"bytes,2,rep,name=entries" json:"entries,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AccessControlList) Reset() { *m = AccessControlList{} }
+func (m *AccessControlList) String() string { return proto.CompactTextString(m) }
+func (*AccessControlList) ProtoMessage() {}
+
+func (m *AccessControlList) GetOwner() string {
+ if m != nil && m.Owner != nil {
+ return *m.Owner
+ }
+ return ""
+}
+
+func (m *AccessControlList) GetEntries() []*Entry {
+ if m != nil {
+ return m.Entries
+ }
+ return nil
+}
+
+type FieldValue struct {
+ Type *FieldValue_ContentType `protobuf:"varint,1,opt,name=type,enum=search.FieldValue_ContentType,def=0" json:"type,omitempty"`
+ Language *string `protobuf:"bytes,2,opt,name=language,def=en" json:"language,omitempty"`
+ StringValue *string `protobuf:"bytes,3,opt,name=string_value" json:"string_value,omitempty"`
+ Geo *FieldValue_Geo `protobuf:"group,4,opt,name=Geo" json:"geo,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldValue) Reset() { *m = FieldValue{} }
+func (m *FieldValue) String() string { return proto.CompactTextString(m) }
+func (*FieldValue) ProtoMessage() {}
+
+const Default_FieldValue_Type FieldValue_ContentType = FieldValue_TEXT
+const Default_FieldValue_Language string = "en"
+
+func (m *FieldValue) GetType() FieldValue_ContentType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_FieldValue_Type
+}
+
+func (m *FieldValue) GetLanguage() string {
+ if m != nil && m.Language != nil {
+ return *m.Language
+ }
+ return Default_FieldValue_Language
+}
+
+func (m *FieldValue) GetStringValue() string {
+ if m != nil && m.StringValue != nil {
+ return *m.StringValue
+ }
+ return ""
+}
+
+func (m *FieldValue) GetGeo() *FieldValue_Geo {
+ if m != nil {
+ return m.Geo
+ }
+ return nil
+}
+
+type FieldValue_Geo struct {
+ Lat *float64 `protobuf:"fixed64,5,req,name=lat" json:"lat,omitempty"`
+ Lng *float64 `protobuf:"fixed64,6,req,name=lng" json:"lng,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldValue_Geo) Reset() { *m = FieldValue_Geo{} }
+func (m *FieldValue_Geo) String() string { return proto.CompactTextString(m) }
+func (*FieldValue_Geo) ProtoMessage() {}
+
+func (m *FieldValue_Geo) GetLat() float64 {
+ if m != nil && m.Lat != nil {
+ return *m.Lat
+ }
+ return 0
+}
+
+func (m *FieldValue_Geo) GetLng() float64 {
+ if m != nil && m.Lng != nil {
+ return *m.Lng
+ }
+ return 0
+}
+
+type Field struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Value *FieldValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Field) Reset() { *m = Field{} }
+func (m *Field) String() string { return proto.CompactTextString(m) }
+func (*Field) ProtoMessage() {}
+
+func (m *Field) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Field) GetValue() *FieldValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type FieldTypes struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Type []FieldValue_ContentType `protobuf:"varint,2,rep,name=type,enum=search.FieldValue_ContentType" json:"type,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldTypes) Reset() { *m = FieldTypes{} }
+func (m *FieldTypes) String() string { return proto.CompactTextString(m) }
+func (*FieldTypes) ProtoMessage() {}
+
+func (m *FieldTypes) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FieldTypes) GetType() []FieldValue_ContentType {
+ if m != nil {
+ return m.Type
+ }
+ return nil
+}
+
+type IndexShardSettings struct {
+ PrevNumShards []int32 `protobuf:"varint,1,rep,name=prev_num_shards" json:"prev_num_shards,omitempty"`
+ NumShards *int32 `protobuf:"varint,2,req,name=num_shards,def=1" json:"num_shards,omitempty"`
+ PrevNumShardsSearchFalse []int32 `protobuf:"varint,3,rep,name=prev_num_shards_search_false" json:"prev_num_shards_search_false,omitempty"`
+ LocalReplica *string `protobuf:"bytes,4,opt,name=local_replica,def=" json:"local_replica,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexShardSettings) Reset() { *m = IndexShardSettings{} }
+func (m *IndexShardSettings) String() string { return proto.CompactTextString(m) }
+func (*IndexShardSettings) ProtoMessage() {}
+
+const Default_IndexShardSettings_NumShards int32 = 1
+
+func (m *IndexShardSettings) GetPrevNumShards() []int32 {
+ if m != nil {
+ return m.PrevNumShards
+ }
+ return nil
+}
+
+func (m *IndexShardSettings) GetNumShards() int32 {
+ if m != nil && m.NumShards != nil {
+ return *m.NumShards
+ }
+ return Default_IndexShardSettings_NumShards
+}
+
+func (m *IndexShardSettings) GetPrevNumShardsSearchFalse() []int32 {
+ if m != nil {
+ return m.PrevNumShardsSearchFalse
+ }
+ return nil
+}
+
+func (m *IndexShardSettings) GetLocalReplica() string {
+ if m != nil && m.LocalReplica != nil {
+ return *m.LocalReplica
+ }
+ return ""
+}
+
+type FacetValue struct {
+ Type *FacetValue_ContentType `protobuf:"varint,1,opt,name=type,enum=search.FacetValue_ContentType,def=2" json:"type,omitempty"`
+ StringValue *string `protobuf:"bytes,3,opt,name=string_value" json:"string_value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetValue) Reset() { *m = FacetValue{} }
+func (m *FacetValue) String() string { return proto.CompactTextString(m) }
+func (*FacetValue) ProtoMessage() {}
+
+const Default_FacetValue_Type FacetValue_ContentType = FacetValue_ATOM
+
+func (m *FacetValue) GetType() FacetValue_ContentType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_FacetValue_Type
+}
+
+func (m *FacetValue) GetStringValue() string {
+ if m != nil && m.StringValue != nil {
+ return *m.StringValue
+ }
+ return ""
+}
+
+type Facet struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Value *FacetValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Facet) Reset() { *m = Facet{} }
+func (m *Facet) String() string { return proto.CompactTextString(m) }
+func (*Facet) ProtoMessage() {}
+
+func (m *Facet) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Facet) GetValue() *FacetValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type DocumentMetadata struct {
+ Version *int64 `protobuf:"varint,1,opt,name=version" json:"version,omitempty"`
+ CommittedStVersion *int64 `protobuf:"varint,2,opt,name=committed_st_version" json:"committed_st_version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DocumentMetadata) Reset() { *m = DocumentMetadata{} }
+func (m *DocumentMetadata) String() string { return proto.CompactTextString(m) }
+func (*DocumentMetadata) ProtoMessage() {}
+
+func (m *DocumentMetadata) GetVersion() int64 {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return 0
+}
+
+func (m *DocumentMetadata) GetCommittedStVersion() int64 {
+ if m != nil && m.CommittedStVersion != nil {
+ return *m.CommittedStVersion
+ }
+ return 0
+}
+
+type Document struct {
+ Id *string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
+ Language *string `protobuf:"bytes,2,opt,name=language,def=en" json:"language,omitempty"`
+ Field []*Field `protobuf:"bytes,3,rep,name=field" json:"field,omitempty"`
+ OrderId *int32 `protobuf:"varint,4,opt,name=order_id" json:"order_id,omitempty"`
+ Storage *Document_Storage `protobuf:"varint,5,opt,name=storage,enum=search.Document_Storage,def=0" json:"storage,omitempty"`
+ Facet []*Facet `protobuf:"bytes,8,rep,name=facet" json:"facet,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Document) Reset() { *m = Document{} }
+func (m *Document) String() string { return proto.CompactTextString(m) }
+func (*Document) ProtoMessage() {}
+
+const Default_Document_Language string = "en"
+const Default_Document_Storage Document_Storage = Document_DISK
+
+func (m *Document) GetId() string {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return ""
+}
+
+func (m *Document) GetLanguage() string {
+ if m != nil && m.Language != nil {
+ return *m.Language
+ }
+ return Default_Document_Language
+}
+
+func (m *Document) GetField() []*Field {
+ if m != nil {
+ return m.Field
+ }
+ return nil
+}
+
+func (m *Document) GetOrderId() int32 {
+ if m != nil && m.OrderId != nil {
+ return *m.OrderId
+ }
+ return 0
+}
+
+func (m *Document) GetStorage() Document_Storage {
+ if m != nil && m.Storage != nil {
+ return *m.Storage
+ }
+ return Default_Document_Storage
+}
+
+func (m *Document) GetFacet() []*Facet {
+ if m != nil {
+ return m.Facet
+ }
+ return nil
+}
+
+type SearchServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SearchServiceError) Reset() { *m = SearchServiceError{} }
+func (m *SearchServiceError) String() string { return proto.CompactTextString(m) }
+func (*SearchServiceError) ProtoMessage() {}
+
+type RequestStatus struct {
+ Code *SearchServiceError_ErrorCode `protobuf:"varint,1,req,name=code,enum=search.SearchServiceError_ErrorCode" json:"code,omitempty"`
+ ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail" json:"error_detail,omitempty"`
+ CanonicalCode *int32 `protobuf:"varint,3,opt,name=canonical_code" json:"canonical_code,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RequestStatus) Reset() { *m = RequestStatus{} }
+func (m *RequestStatus) String() string { return proto.CompactTextString(m) }
+func (*RequestStatus) ProtoMessage() {}
+
+func (m *RequestStatus) GetCode() SearchServiceError_ErrorCode {
+ if m != nil && m.Code != nil {
+ return *m.Code
+ }
+ return SearchServiceError_OK
+}
+
+func (m *RequestStatus) GetErrorDetail() string {
+ if m != nil && m.ErrorDetail != nil {
+ return *m.ErrorDetail
+ }
+ return ""
+}
+
+func (m *RequestStatus) GetCanonicalCode() int32 {
+ if m != nil && m.CanonicalCode != nil {
+ return *m.CanonicalCode
+ }
+ return 0
+}
+
+type IndexSpec struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Consistency *IndexSpec_Consistency `protobuf:"varint,2,opt,name=consistency,enum=search.IndexSpec_Consistency,def=1" json:"consistency,omitempty"`
+ Namespace *string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"`
+ Version *int32 `protobuf:"varint,4,opt,name=version" json:"version,omitempty"`
+ Source *IndexSpec_Source `protobuf:"varint,5,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"`
+ Mode *IndexSpec_Mode `protobuf:"varint,6,opt,name=mode,enum=search.IndexSpec_Mode,def=0" json:"mode,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexSpec) Reset() { *m = IndexSpec{} }
+func (m *IndexSpec) String() string { return proto.CompactTextString(m) }
+func (*IndexSpec) ProtoMessage() {}
+
+const Default_IndexSpec_Consistency IndexSpec_Consistency = IndexSpec_PER_DOCUMENT
+const Default_IndexSpec_Source IndexSpec_Source = IndexSpec_SEARCH
+const Default_IndexSpec_Mode IndexSpec_Mode = IndexSpec_PRIORITY
+
+func (m *IndexSpec) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *IndexSpec) GetConsistency() IndexSpec_Consistency {
+ if m != nil && m.Consistency != nil {
+ return *m.Consistency
+ }
+ return Default_IndexSpec_Consistency
+}
+
+func (m *IndexSpec) GetNamespace() string {
+ if m != nil && m.Namespace != nil {
+ return *m.Namespace
+ }
+ return ""
+}
+
+func (m *IndexSpec) GetVersion() int32 {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return 0
+}
+
+func (m *IndexSpec) GetSource() IndexSpec_Source {
+ if m != nil && m.Source != nil {
+ return *m.Source
+ }
+ return Default_IndexSpec_Source
+}
+
+func (m *IndexSpec) GetMode() IndexSpec_Mode {
+ if m != nil && m.Mode != nil {
+ return *m.Mode
+ }
+ return Default_IndexSpec_Mode
+}
+
+type IndexMetadata struct {
+ IndexSpec *IndexSpec `protobuf:"bytes,1,req,name=index_spec" json:"index_spec,omitempty"`
+ Field []*FieldTypes `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
+ Storage *IndexMetadata_Storage `protobuf:"bytes,3,opt,name=storage" json:"storage,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexMetadata) Reset() { *m = IndexMetadata{} }
+func (m *IndexMetadata) String() string { return proto.CompactTextString(m) }
+func (*IndexMetadata) ProtoMessage() {}
+
+func (m *IndexMetadata) GetIndexSpec() *IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+func (m *IndexMetadata) GetField() []*FieldTypes {
+ if m != nil {
+ return m.Field
+ }
+ return nil
+}
+
+func (m *IndexMetadata) GetStorage() *IndexMetadata_Storage {
+ if m != nil {
+ return m.Storage
+ }
+ return nil
+}
+
+type IndexMetadata_Storage struct {
+ AmountUsed *int64 `protobuf:"varint,1,opt,name=amount_used" json:"amount_used,omitempty"`
+ Limit *int64 `protobuf:"varint,2,opt,name=limit" json:"limit,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexMetadata_Storage) Reset() { *m = IndexMetadata_Storage{} }
+func (m *IndexMetadata_Storage) String() string { return proto.CompactTextString(m) }
+func (*IndexMetadata_Storage) ProtoMessage() {}
+
+func (m *IndexMetadata_Storage) GetAmountUsed() int64 {
+ if m != nil && m.AmountUsed != nil {
+ return *m.AmountUsed
+ }
+ return 0
+}
+
+func (m *IndexMetadata_Storage) GetLimit() int64 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+type IndexDocumentParams struct {
+ Document []*Document `protobuf:"bytes,1,rep,name=document" json:"document,omitempty"`
+ Freshness *IndexDocumentParams_Freshness `protobuf:"varint,2,opt,name=freshness,enum=search.IndexDocumentParams_Freshness,def=0" json:"freshness,omitempty"`
+ IndexSpec *IndexSpec `protobuf:"bytes,3,req,name=index_spec" json:"index_spec,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexDocumentParams) Reset() { *m = IndexDocumentParams{} }
+func (m *IndexDocumentParams) String() string { return proto.CompactTextString(m) }
+func (*IndexDocumentParams) ProtoMessage() {}
+
+const Default_IndexDocumentParams_Freshness IndexDocumentParams_Freshness = IndexDocumentParams_SYNCHRONOUSLY
+
+func (m *IndexDocumentParams) GetDocument() []*Document {
+ if m != nil {
+ return m.Document
+ }
+ return nil
+}
+
+func (m *IndexDocumentParams) GetFreshness() IndexDocumentParams_Freshness {
+ if m != nil && m.Freshness != nil {
+ return *m.Freshness
+ }
+ return Default_IndexDocumentParams_Freshness
+}
+
+func (m *IndexDocumentParams) GetIndexSpec() *IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+type IndexDocumentRequest struct {
+ Params *IndexDocumentParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexDocumentRequest) Reset() { *m = IndexDocumentRequest{} }
+func (m *IndexDocumentRequest) String() string { return proto.CompactTextString(m) }
+func (*IndexDocumentRequest) ProtoMessage() {}
+
+func (m *IndexDocumentRequest) GetParams() *IndexDocumentParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *IndexDocumentRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type IndexDocumentResponse struct {
+ Status []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"`
+ DocId []string `protobuf:"bytes,2,rep,name=doc_id" json:"doc_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexDocumentResponse) Reset() { *m = IndexDocumentResponse{} }
+func (m *IndexDocumentResponse) String() string { return proto.CompactTextString(m) }
+func (*IndexDocumentResponse) ProtoMessage() {}
+
+func (m *IndexDocumentResponse) GetStatus() []*RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+func (m *IndexDocumentResponse) GetDocId() []string {
+ if m != nil {
+ return m.DocId
+ }
+ return nil
+}
+
+type DeleteDocumentParams struct {
+ DocId []string `protobuf:"bytes,1,rep,name=doc_id" json:"doc_id,omitempty"`
+ IndexSpec *IndexSpec `protobuf:"bytes,2,req,name=index_spec" json:"index_spec,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteDocumentParams) Reset() { *m = DeleteDocumentParams{} }
+func (m *DeleteDocumentParams) String() string { return proto.CompactTextString(m) }
+func (*DeleteDocumentParams) ProtoMessage() {}
+
+func (m *DeleteDocumentParams) GetDocId() []string {
+ if m != nil {
+ return m.DocId
+ }
+ return nil
+}
+
+func (m *DeleteDocumentParams) GetIndexSpec() *IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+type DeleteDocumentRequest struct {
+ Params *DeleteDocumentParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteDocumentRequest) Reset() { *m = DeleteDocumentRequest{} }
+func (m *DeleteDocumentRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteDocumentRequest) ProtoMessage() {}
+
+func (m *DeleteDocumentRequest) GetParams() *DeleteDocumentParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *DeleteDocumentRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type DeleteDocumentResponse struct {
+ Status []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteDocumentResponse) Reset() { *m = DeleteDocumentResponse{} }
+func (m *DeleteDocumentResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteDocumentResponse) ProtoMessage() {}
+
+func (m *DeleteDocumentResponse) GetStatus() []*RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+type ListDocumentsParams struct {
+ IndexSpec *IndexSpec `protobuf:"bytes,1,req,name=index_spec" json:"index_spec,omitempty"`
+ StartDocId *string `protobuf:"bytes,2,opt,name=start_doc_id" json:"start_doc_id,omitempty"`
+ IncludeStartDoc *bool `protobuf:"varint,3,opt,name=include_start_doc,def=1" json:"include_start_doc,omitempty"`
+ Limit *int32 `protobuf:"varint,4,opt,name=limit,def=100" json:"limit,omitempty"`
+ KeysOnly *bool `protobuf:"varint,5,opt,name=keys_only" json:"keys_only,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListDocumentsParams) Reset() { *m = ListDocumentsParams{} }
+func (m *ListDocumentsParams) String() string { return proto.CompactTextString(m) }
+func (*ListDocumentsParams) ProtoMessage() {}
+
+const Default_ListDocumentsParams_IncludeStartDoc bool = true
+const Default_ListDocumentsParams_Limit int32 = 100
+
+func (m *ListDocumentsParams) GetIndexSpec() *IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+func (m *ListDocumentsParams) GetStartDocId() string {
+ if m != nil && m.StartDocId != nil {
+ return *m.StartDocId
+ }
+ return ""
+}
+
+func (m *ListDocumentsParams) GetIncludeStartDoc() bool {
+ if m != nil && m.IncludeStartDoc != nil {
+ return *m.IncludeStartDoc
+ }
+ return Default_ListDocumentsParams_IncludeStartDoc
+}
+
+func (m *ListDocumentsParams) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return Default_ListDocumentsParams_Limit
+}
+
+func (m *ListDocumentsParams) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return false
+}
+
+type ListDocumentsRequest struct {
+ Params *ListDocumentsParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,2,opt,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListDocumentsRequest) Reset() { *m = ListDocumentsRequest{} }
+func (m *ListDocumentsRequest) String() string { return proto.CompactTextString(m) }
+func (*ListDocumentsRequest) ProtoMessage() {}
+
+func (m *ListDocumentsRequest) GetParams() *ListDocumentsParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *ListDocumentsRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type ListDocumentsResponse struct {
+ Status *RequestStatus `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
+ Document []*Document `protobuf:"bytes,2,rep,name=document" json:"document,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListDocumentsResponse) Reset() { *m = ListDocumentsResponse{} }
+func (m *ListDocumentsResponse) String() string { return proto.CompactTextString(m) }
+func (*ListDocumentsResponse) ProtoMessage() {}
+
+func (m *ListDocumentsResponse) GetStatus() *RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+func (m *ListDocumentsResponse) GetDocument() []*Document {
+ if m != nil {
+ return m.Document
+ }
+ return nil
+}
+
+type ListIndexesParams struct {
+ FetchSchema *bool `protobuf:"varint,1,opt,name=fetch_schema" json:"fetch_schema,omitempty"`
+ Limit *int32 `protobuf:"varint,2,opt,name=limit,def=20" json:"limit,omitempty"`
+ Namespace *string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"`
+ StartIndexName *string `protobuf:"bytes,4,opt,name=start_index_name" json:"start_index_name,omitempty"`
+ IncludeStartIndex *bool `protobuf:"varint,5,opt,name=include_start_index,def=1" json:"include_start_index,omitempty"`
+ IndexNamePrefix *string `protobuf:"bytes,6,opt,name=index_name_prefix" json:"index_name_prefix,omitempty"`
+ Offset *int32 `protobuf:"varint,7,opt,name=offset" json:"offset,omitempty"`
+ Source *IndexSpec_Source `protobuf:"varint,8,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListIndexesParams) Reset() { *m = ListIndexesParams{} }
+func (m *ListIndexesParams) String() string { return proto.CompactTextString(m) }
+func (*ListIndexesParams) ProtoMessage() {}
+
+const Default_ListIndexesParams_Limit int32 = 20
+const Default_ListIndexesParams_IncludeStartIndex bool = true
+const Default_ListIndexesParams_Source IndexSpec_Source = IndexSpec_SEARCH
+
+func (m *ListIndexesParams) GetFetchSchema() bool {
+ if m != nil && m.FetchSchema != nil {
+ return *m.FetchSchema
+ }
+ return false
+}
+
+func (m *ListIndexesParams) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return Default_ListIndexesParams_Limit
+}
+
+func (m *ListIndexesParams) GetNamespace() string {
+ if m != nil && m.Namespace != nil {
+ return *m.Namespace
+ }
+ return ""
+}
+
+func (m *ListIndexesParams) GetStartIndexName() string {
+ if m != nil && m.StartIndexName != nil {
+ return *m.StartIndexName
+ }
+ return ""
+}
+
+func (m *ListIndexesParams) GetIncludeStartIndex() bool {
+ if m != nil && m.IncludeStartIndex != nil {
+ return *m.IncludeStartIndex
+ }
+ return Default_ListIndexesParams_IncludeStartIndex
+}
+
+func (m *ListIndexesParams) GetIndexNamePrefix() string {
+ if m != nil && m.IndexNamePrefix != nil {
+ return *m.IndexNamePrefix
+ }
+ return ""
+}
+
+func (m *ListIndexesParams) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return 0
+}
+
+func (m *ListIndexesParams) GetSource() IndexSpec_Source {
+ if m != nil && m.Source != nil {
+ return *m.Source
+ }
+ return Default_ListIndexesParams_Source
+}
+
+type ListIndexesRequest struct {
+ Params *ListIndexesParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListIndexesRequest) Reset() { *m = ListIndexesRequest{} }
+func (m *ListIndexesRequest) String() string { return proto.CompactTextString(m) }
+func (*ListIndexesRequest) ProtoMessage() {}
+
+func (m *ListIndexesRequest) GetParams() *ListIndexesParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *ListIndexesRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type ListIndexesResponse struct {
+ Status *RequestStatus `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
+ IndexMetadata []*IndexMetadata `protobuf:"bytes,2,rep,name=index_metadata" json:"index_metadata,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListIndexesResponse) Reset() { *m = ListIndexesResponse{} }
+func (m *ListIndexesResponse) String() string { return proto.CompactTextString(m) }
+func (*ListIndexesResponse) ProtoMessage() {}
+
+func (m *ListIndexesResponse) GetStatus() *RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+func (m *ListIndexesResponse) GetIndexMetadata() []*IndexMetadata {
+ if m != nil {
+ return m.IndexMetadata
+ }
+ return nil
+}
+
+type DeleteSchemaParams struct {
+ Source *IndexSpec_Source `protobuf:"varint,1,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"`
+ IndexSpec []*IndexSpec `protobuf:"bytes,2,rep,name=index_spec" json:"index_spec,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteSchemaParams) Reset() { *m = DeleteSchemaParams{} }
+func (m *DeleteSchemaParams) String() string { return proto.CompactTextString(m) }
+func (*DeleteSchemaParams) ProtoMessage() {}
+
+const Default_DeleteSchemaParams_Source IndexSpec_Source = IndexSpec_SEARCH
+
+func (m *DeleteSchemaParams) GetSource() IndexSpec_Source {
+ if m != nil && m.Source != nil {
+ return *m.Source
+ }
+ return Default_DeleteSchemaParams_Source
+}
+
+func (m *DeleteSchemaParams) GetIndexSpec() []*IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+type DeleteSchemaRequest struct {
+ Params *DeleteSchemaParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteSchemaRequest) Reset() { *m = DeleteSchemaRequest{} }
+func (m *DeleteSchemaRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteSchemaRequest) ProtoMessage() {}
+
+func (m *DeleteSchemaRequest) GetParams() *DeleteSchemaParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *DeleteSchemaRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type DeleteSchemaResponse struct {
+ Status []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteSchemaResponse) Reset() { *m = DeleteSchemaResponse{} }
+func (m *DeleteSchemaResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteSchemaResponse) ProtoMessage() {}
+
+func (m *DeleteSchemaResponse) GetStatus() []*RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+type SortSpec struct {
+ SortExpression *string `protobuf:"bytes,1,req,name=sort_expression" json:"sort_expression,omitempty"`
+ SortDescending *bool `protobuf:"varint,2,opt,name=sort_descending,def=1" json:"sort_descending,omitempty"`
+ DefaultValueText *string `protobuf:"bytes,4,opt,name=default_value_text" json:"default_value_text,omitempty"`
+ DefaultValueNumeric *float64 `protobuf:"fixed64,5,opt,name=default_value_numeric" json:"default_value_numeric,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SortSpec) Reset() { *m = SortSpec{} }
+func (m *SortSpec) String() string { return proto.CompactTextString(m) }
+func (*SortSpec) ProtoMessage() {}
+
+const Default_SortSpec_SortDescending bool = true
+
+func (m *SortSpec) GetSortExpression() string {
+ if m != nil && m.SortExpression != nil {
+ return *m.SortExpression
+ }
+ return ""
+}
+
+func (m *SortSpec) GetSortDescending() bool {
+ if m != nil && m.SortDescending != nil {
+ return *m.SortDescending
+ }
+ return Default_SortSpec_SortDescending
+}
+
+func (m *SortSpec) GetDefaultValueText() string {
+ if m != nil && m.DefaultValueText != nil {
+ return *m.DefaultValueText
+ }
+ return ""
+}
+
+func (m *SortSpec) GetDefaultValueNumeric() float64 {
+ if m != nil && m.DefaultValueNumeric != nil {
+ return *m.DefaultValueNumeric
+ }
+ return 0
+}
+
+type ScorerSpec struct {
+ Scorer *ScorerSpec_Scorer `protobuf:"varint,1,opt,name=scorer,enum=search.ScorerSpec_Scorer,def=2" json:"scorer,omitempty"`
+ Limit *int32 `protobuf:"varint,2,opt,name=limit,def=1000" json:"limit,omitempty"`
+ MatchScorerParameters *string `protobuf:"bytes,9,opt,name=match_scorer_parameters" json:"match_scorer_parameters,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ScorerSpec) Reset() { *m = ScorerSpec{} }
+func (m *ScorerSpec) String() string { return proto.CompactTextString(m) }
+func (*ScorerSpec) ProtoMessage() {}
+
+const Default_ScorerSpec_Scorer ScorerSpec_Scorer = ScorerSpec_MATCH_SCORER
+const Default_ScorerSpec_Limit int32 = 1000
+
+func (m *ScorerSpec) GetScorer() ScorerSpec_Scorer {
+ if m != nil && m.Scorer != nil {
+ return *m.Scorer
+ }
+ return Default_ScorerSpec_Scorer
+}
+
+func (m *ScorerSpec) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return Default_ScorerSpec_Limit
+}
+
+func (m *ScorerSpec) GetMatchScorerParameters() string {
+ if m != nil && m.MatchScorerParameters != nil {
+ return *m.MatchScorerParameters
+ }
+ return ""
+}
+
+type FieldSpec struct {
+ Name []string `protobuf:"bytes,1,rep,name=name" json:"name,omitempty"`
+ Expression []*FieldSpec_Expression `protobuf:"group,2,rep,name=Expression" json:"expression,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldSpec) Reset() { *m = FieldSpec{} }
+func (m *FieldSpec) String() string { return proto.CompactTextString(m) }
+func (*FieldSpec) ProtoMessage() {}
+
+func (m *FieldSpec) GetName() []string {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *FieldSpec) GetExpression() []*FieldSpec_Expression {
+ if m != nil {
+ return m.Expression
+ }
+ return nil
+}
+
+type FieldSpec_Expression struct {
+ Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+ Expression *string `protobuf:"bytes,4,req,name=expression" json:"expression,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldSpec_Expression) Reset() { *m = FieldSpec_Expression{} }
+func (m *FieldSpec_Expression) String() string { return proto.CompactTextString(m) }
+func (*FieldSpec_Expression) ProtoMessage() {}
+
+func (m *FieldSpec_Expression) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FieldSpec_Expression) GetExpression() string {
+ if m != nil && m.Expression != nil {
+ return *m.Expression
+ }
+ return ""
+}
+
+type FacetRange struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Start *string `protobuf:"bytes,2,opt,name=start" json:"start,omitempty"`
+ End *string `protobuf:"bytes,3,opt,name=end" json:"end,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetRange) Reset() { *m = FacetRange{} }
+func (m *FacetRange) String() string { return proto.CompactTextString(m) }
+func (*FacetRange) ProtoMessage() {}
+
+func (m *FacetRange) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FacetRange) GetStart() string {
+ if m != nil && m.Start != nil {
+ return *m.Start
+ }
+ return ""
+}
+
+func (m *FacetRange) GetEnd() string {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return ""
+}
+
+type FacetRequestParam struct {
+ ValueLimit *int32 `protobuf:"varint,1,opt,name=value_limit" json:"value_limit,omitempty"`
+ Range []*FacetRange `protobuf:"bytes,2,rep,name=range" json:"range,omitempty"`
+ ValueConstraint []string `protobuf:"bytes,3,rep,name=value_constraint" json:"value_constraint,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetRequestParam) Reset() { *m = FacetRequestParam{} }
+func (m *FacetRequestParam) String() string { return proto.CompactTextString(m) }
+func (*FacetRequestParam) ProtoMessage() {}
+
+func (m *FacetRequestParam) GetValueLimit() int32 {
+ if m != nil && m.ValueLimit != nil {
+ return *m.ValueLimit
+ }
+ return 0
+}
+
+func (m *FacetRequestParam) GetRange() []*FacetRange {
+ if m != nil {
+ return m.Range
+ }
+ return nil
+}
+
+func (m *FacetRequestParam) GetValueConstraint() []string {
+ if m != nil {
+ return m.ValueConstraint
+ }
+ return nil
+}
+
+type FacetAutoDetectParam struct {
+ ValueLimit *int32 `protobuf:"varint,1,opt,name=value_limit,def=10" json:"value_limit,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetAutoDetectParam) Reset() { *m = FacetAutoDetectParam{} }
+func (m *FacetAutoDetectParam) String() string { return proto.CompactTextString(m) }
+func (*FacetAutoDetectParam) ProtoMessage() {}
+
+const Default_FacetAutoDetectParam_ValueLimit int32 = 10
+
+func (m *FacetAutoDetectParam) GetValueLimit() int32 {
+ if m != nil && m.ValueLimit != nil {
+ return *m.ValueLimit
+ }
+ return Default_FacetAutoDetectParam_ValueLimit
+}
+
+type FacetRequest struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Params *FacetRequestParam `protobuf:"bytes,2,opt,name=params" json:"params,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetRequest) Reset() { *m = FacetRequest{} }
+func (m *FacetRequest) String() string { return proto.CompactTextString(m) }
+func (*FacetRequest) ProtoMessage() {}
+
+func (m *FacetRequest) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FacetRequest) GetParams() *FacetRequestParam {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+type FacetRefinement struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ Range *FacetRefinement_Range `protobuf:"bytes,3,opt,name=range" json:"range,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetRefinement) Reset() { *m = FacetRefinement{} }
+func (m *FacetRefinement) String() string { return proto.CompactTextString(m) }
+func (*FacetRefinement) ProtoMessage() {}
+
+func (m *FacetRefinement) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FacetRefinement) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+func (m *FacetRefinement) GetRange() *FacetRefinement_Range {
+ if m != nil {
+ return m.Range
+ }
+ return nil
+}
+
+type FacetRefinement_Range struct {
+ Start *string `protobuf:"bytes,1,opt,name=start" json:"start,omitempty"`
+ End *string `protobuf:"bytes,2,opt,name=end" json:"end,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetRefinement_Range) Reset() { *m = FacetRefinement_Range{} }
+func (m *FacetRefinement_Range) String() string { return proto.CompactTextString(m) }
+func (*FacetRefinement_Range) ProtoMessage() {}
+
+func (m *FacetRefinement_Range) GetStart() string {
+ if m != nil && m.Start != nil {
+ return *m.Start
+ }
+ return ""
+}
+
+func (m *FacetRefinement_Range) GetEnd() string {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return ""
+}
+
+type SearchParams struct {
+ IndexSpec *IndexSpec `protobuf:"bytes,1,req,name=index_spec" json:"index_spec,omitempty"`
+ Query *string `protobuf:"bytes,2,req,name=query" json:"query,omitempty"`
+ Cursor *string `protobuf:"bytes,4,opt,name=cursor" json:"cursor,omitempty"`
+ Offset *int32 `protobuf:"varint,11,opt,name=offset" json:"offset,omitempty"`
+ CursorType *SearchParams_CursorType `protobuf:"varint,5,opt,name=cursor_type,enum=search.SearchParams_CursorType,def=0" json:"cursor_type,omitempty"`
+ Limit *int32 `protobuf:"varint,6,opt,name=limit,def=20" json:"limit,omitempty"`
+ MatchedCountAccuracy *int32 `protobuf:"varint,7,opt,name=matched_count_accuracy" json:"matched_count_accuracy,omitempty"`
+ SortSpec []*SortSpec `protobuf:"bytes,8,rep,name=sort_spec" json:"sort_spec,omitempty"`
+ ScorerSpec *ScorerSpec `protobuf:"bytes,9,opt,name=scorer_spec" json:"scorer_spec,omitempty"`
+ FieldSpec *FieldSpec `protobuf:"bytes,10,opt,name=field_spec" json:"field_spec,omitempty"`
+ KeysOnly *bool `protobuf:"varint,12,opt,name=keys_only" json:"keys_only,omitempty"`
+ ParsingMode *SearchParams_ParsingMode `protobuf:"varint,13,opt,name=parsing_mode,enum=search.SearchParams_ParsingMode,def=0" json:"parsing_mode,omitempty"`
+ AutoDiscoverFacetCount *int32 `protobuf:"varint,15,opt,name=auto_discover_facet_count,def=0" json:"auto_discover_facet_count,omitempty"`
+ IncludeFacet []*FacetRequest `protobuf:"bytes,16,rep,name=include_facet" json:"include_facet,omitempty"`
+ FacetRefinement []*FacetRefinement `protobuf:"bytes,17,rep,name=facet_refinement" json:"facet_refinement,omitempty"`
+ FacetAutoDetectParam *FacetAutoDetectParam `protobuf:"bytes,18,opt,name=facet_auto_detect_param" json:"facet_auto_detect_param,omitempty"`
+ FacetDepth *int32 `protobuf:"varint,19,opt,name=facet_depth,def=1000" json:"facet_depth,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SearchParams) Reset() { *m = SearchParams{} }
+func (m *SearchParams) String() string { return proto.CompactTextString(m) }
+func (*SearchParams) ProtoMessage() {}
+
+const Default_SearchParams_CursorType SearchParams_CursorType = SearchParams_NONE
+const Default_SearchParams_Limit int32 = 20
+const Default_SearchParams_ParsingMode SearchParams_ParsingMode = SearchParams_STRICT
+const Default_SearchParams_AutoDiscoverFacetCount int32 = 0
+const Default_SearchParams_FacetDepth int32 = 1000
+
+func (m *SearchParams) GetIndexSpec() *IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+func (m *SearchParams) GetQuery() string {
+ if m != nil && m.Query != nil {
+ return *m.Query
+ }
+ return ""
+}
+
+func (m *SearchParams) GetCursor() string {
+ if m != nil && m.Cursor != nil {
+ return *m.Cursor
+ }
+ return ""
+}
+
+func (m *SearchParams) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return 0
+}
+
+func (m *SearchParams) GetCursorType() SearchParams_CursorType {
+ if m != nil && m.CursorType != nil {
+ return *m.CursorType
+ }
+ return Default_SearchParams_CursorType
+}
+
+func (m *SearchParams) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return Default_SearchParams_Limit
+}
+
+func (m *SearchParams) GetMatchedCountAccuracy() int32 {
+ if m != nil && m.MatchedCountAccuracy != nil {
+ return *m.MatchedCountAccuracy
+ }
+ return 0
+}
+
+func (m *SearchParams) GetSortSpec() []*SortSpec {
+ if m != nil {
+ return m.SortSpec
+ }
+ return nil
+}
+
+func (m *SearchParams) GetScorerSpec() *ScorerSpec {
+ if m != nil {
+ return m.ScorerSpec
+ }
+ return nil
+}
+
+func (m *SearchParams) GetFieldSpec() *FieldSpec {
+ if m != nil {
+ return m.FieldSpec
+ }
+ return nil
+}
+
+func (m *SearchParams) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return false
+}
+
+func (m *SearchParams) GetParsingMode() SearchParams_ParsingMode {
+ if m != nil && m.ParsingMode != nil {
+ return *m.ParsingMode
+ }
+ return Default_SearchParams_ParsingMode
+}
+
+func (m *SearchParams) GetAutoDiscoverFacetCount() int32 {
+ if m != nil && m.AutoDiscoverFacetCount != nil {
+ return *m.AutoDiscoverFacetCount
+ }
+ return Default_SearchParams_AutoDiscoverFacetCount
+}
+
+func (m *SearchParams) GetIncludeFacet() []*FacetRequest {
+ if m != nil {
+ return m.IncludeFacet
+ }
+ return nil
+}
+
+func (m *SearchParams) GetFacetRefinement() []*FacetRefinement {
+ if m != nil {
+ return m.FacetRefinement
+ }
+ return nil
+}
+
+func (m *SearchParams) GetFacetAutoDetectParam() *FacetAutoDetectParam {
+ if m != nil {
+ return m.FacetAutoDetectParam
+ }
+ return nil
+}
+
+func (m *SearchParams) GetFacetDepth() int32 {
+ if m != nil && m.FacetDepth != nil {
+ return *m.FacetDepth
+ }
+ return Default_SearchParams_FacetDepth
+}
+
+type SearchRequest struct {
+ Params *SearchParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SearchRequest) Reset() { *m = SearchRequest{} }
+func (m *SearchRequest) String() string { return proto.CompactTextString(m) }
+func (*SearchRequest) ProtoMessage() {}
+
+func (m *SearchRequest) GetParams() *SearchParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *SearchRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type FacetResultValue struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Count *int32 `protobuf:"varint,2,req,name=count" json:"count,omitempty"`
+ Refinement *FacetRefinement `protobuf:"bytes,3,req,name=refinement" json:"refinement,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetResultValue) Reset() { *m = FacetResultValue{} }
+func (m *FacetResultValue) String() string { return proto.CompactTextString(m) }
+func (*FacetResultValue) ProtoMessage() {}
+
+func (m *FacetResultValue) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FacetResultValue) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *FacetResultValue) GetRefinement() *FacetRefinement {
+ if m != nil {
+ return m.Refinement
+ }
+ return nil
+}
+
+type FacetResult struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Value []*FacetResultValue `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetResult) Reset() { *m = FacetResult{} }
+func (m *FacetResult) String() string { return proto.CompactTextString(m) }
+func (*FacetResult) ProtoMessage() {}
+
+func (m *FacetResult) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FacetResult) GetValue() []*FacetResultValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type SearchResult struct {
+ Document *Document `protobuf:"bytes,1,req,name=document" json:"document,omitempty"`
+ Expression []*Field `protobuf:"bytes,4,rep,name=expression" json:"expression,omitempty"`
+ Score []float64 `protobuf:"fixed64,2,rep,name=score" json:"score,omitempty"`
+ Cursor *string `protobuf:"bytes,3,opt,name=cursor" json:"cursor,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SearchResult) Reset() { *m = SearchResult{} }
+func (m *SearchResult) String() string { return proto.CompactTextString(m) }
+func (*SearchResult) ProtoMessage() {}
+
+func (m *SearchResult) GetDocument() *Document {
+ if m != nil {
+ return m.Document
+ }
+ return nil
+}
+
+func (m *SearchResult) GetExpression() []*Field {
+ if m != nil {
+ return m.Expression
+ }
+ return nil
+}
+
+func (m *SearchResult) GetScore() []float64 {
+ if m != nil {
+ return m.Score
+ }
+ return nil
+}
+
+func (m *SearchResult) GetCursor() string {
+ if m != nil && m.Cursor != nil {
+ return *m.Cursor
+ }
+ return ""
+}
+
+type SearchResponse struct {
+ Result []*SearchResult `protobuf:"bytes,1,rep,name=result" json:"result,omitempty"`
+ MatchedCount *int64 `protobuf:"varint,2,req,name=matched_count" json:"matched_count,omitempty"`
+ Status *RequestStatus `protobuf:"bytes,3,req,name=status" json:"status,omitempty"`
+ Cursor *string `protobuf:"bytes,4,opt,name=cursor" json:"cursor,omitempty"`
+ FacetResult []*FacetResult `protobuf:"bytes,5,rep,name=facet_result" json:"facet_result,omitempty"`
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SearchResponse) Reset() { *m = SearchResponse{} }
+func (m *SearchResponse) String() string { return proto.CompactTextString(m) }
+func (*SearchResponse) ProtoMessage() {}
+
+var extRange_SearchResponse = []proto.ExtensionRange{
+ {1000, 9999},
+}
+
+func (*SearchResponse) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_SearchResponse
+}
+func (m *SearchResponse) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+func (m *SearchResponse) GetResult() []*SearchResult {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+func (m *SearchResponse) GetMatchedCount() int64 {
+ if m != nil && m.MatchedCount != nil {
+ return *m.MatchedCount
+ }
+ return 0
+}
+
+func (m *SearchResponse) GetStatus() *RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+func (m *SearchResponse) GetCursor() string {
+ if m != nil && m.Cursor != nil {
+ return *m.Cursor
+ }
+ return ""
+}
+
+func (m *SearchResponse) GetFacetResult() []*FacetResult {
+ if m != nil {
+ return m.FacetResult
+ }
+ return nil
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/search/search.proto b/vendor/google.golang.org/appengine/internal/search/search.proto
new file mode 100644
index 0000000..219f4c3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/search/search.proto
@@ -0,0 +1,388 @@
+syntax = "proto2";
+option go_package = "search";
+
+package search;
+
+message Scope {
+ enum Type {
+ USER_BY_CANONICAL_ID = 1;
+ USER_BY_EMAIL = 2;
+ GROUP_BY_CANONICAL_ID = 3;
+ GROUP_BY_EMAIL = 4;
+ GROUP_BY_DOMAIN = 5;
+ ALL_USERS = 6;
+ ALL_AUTHENTICATED_USERS = 7;
+ }
+
+ optional Type type = 1;
+ optional string value = 2;
+}
+
+message Entry {
+ enum Permission {
+ READ = 1;
+ WRITE = 2;
+ FULL_CONTROL = 3;
+ }
+
+ optional Scope scope = 1;
+ optional Permission permission = 2;
+ optional string display_name = 3;
+}
+
+message AccessControlList {
+ optional string owner = 1;
+ repeated Entry entries = 2;
+}
+
+message FieldValue {
+ enum ContentType {
+ TEXT = 0;
+ HTML = 1;
+ ATOM = 2;
+ DATE = 3;
+ NUMBER = 4;
+ GEO = 5;
+ }
+
+ optional ContentType type = 1 [default = TEXT];
+
+ optional string language = 2 [default = "en"];
+
+ optional string string_value = 3;
+
+ optional group Geo = 4 {
+ required double lat = 5;
+ required double lng = 6;
+ }
+}
+
+message Field {
+ required string name = 1;
+ required FieldValue value = 2;
+}
+
+message FieldTypes {
+ required string name = 1;
+ repeated FieldValue.ContentType type = 2;
+}
+
+message IndexShardSettings {
+ repeated int32 prev_num_shards = 1;
+ required int32 num_shards = 2 [default=1];
+ repeated int32 prev_num_shards_search_false = 3;
+ optional string local_replica = 4 [default = ""];
+}
+
+message FacetValue {
+ enum ContentType {
+ ATOM = 2;
+ NUMBER = 4;
+ }
+
+ optional ContentType type = 1 [default = ATOM];
+ optional string string_value = 3;
+}
+
+message Facet {
+ required string name = 1;
+ required FacetValue value = 2;
+}
+
+message DocumentMetadata {
+ optional int64 version = 1;
+ optional int64 committed_st_version = 2;
+}
+
+message Document {
+ optional string id = 1;
+ optional string language = 2 [default = "en"];
+ repeated Field field = 3;
+ optional int32 order_id = 4;
+
+ enum Storage {
+ DISK = 0;
+ }
+
+ optional Storage storage = 5 [default = DISK];
+ repeated Facet facet = 8;
+}
+
+message SearchServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INVALID_REQUEST = 1;
+ TRANSIENT_ERROR = 2;
+ INTERNAL_ERROR = 3;
+ PERMISSION_DENIED = 4;
+ TIMEOUT = 5;
+ CONCURRENT_TRANSACTION = 6;
+ }
+}
+
+message RequestStatus {
+ required SearchServiceError.ErrorCode code = 1;
+ optional string error_detail = 2;
+ optional int32 canonical_code = 3;
+}
+
+message IndexSpec {
+ required string name = 1;
+
+ enum Consistency {
+ GLOBAL = 0;
+ PER_DOCUMENT = 1;
+ }
+ optional Consistency consistency = 2 [default = PER_DOCUMENT];
+
+ optional string namespace = 3;
+ optional int32 version = 4;
+
+ enum Source {
+ SEARCH = 0;
+ DATASTORE = 1;
+ CLOUD_STORAGE = 2;
+ }
+ optional Source source = 5 [default = SEARCH];
+
+ enum Mode {
+ PRIORITY = 0;
+ BACKGROUND = 1;
+ }
+ optional Mode mode = 6 [default = PRIORITY];
+}
+
+message IndexMetadata {
+ required IndexSpec index_spec = 1;
+
+ repeated FieldTypes field = 2;
+
+ message Storage {
+ optional int64 amount_used = 1;
+ optional int64 limit = 2;
+ }
+ optional Storage storage = 3;
+}
+
+message IndexDocumentParams {
+ repeated Document document = 1;
+
+ enum Freshness {
+ SYNCHRONOUSLY = 0;
+ WHEN_CONVENIENT = 1;
+ }
+ optional Freshness freshness = 2 [default = SYNCHRONOUSLY, deprecated=true];
+
+ required IndexSpec index_spec = 3;
+}
+
+message IndexDocumentRequest {
+ required IndexDocumentParams params = 1;
+
+ optional bytes app_id = 3;
+}
+
+message IndexDocumentResponse {
+ repeated RequestStatus status = 1;
+
+ repeated string doc_id = 2;
+}
+
+message DeleteDocumentParams {
+ repeated string doc_id = 1;
+
+ required IndexSpec index_spec = 2;
+}
+
+message DeleteDocumentRequest {
+ required DeleteDocumentParams params = 1;
+
+ optional bytes app_id = 3;
+}
+
+message DeleteDocumentResponse {
+ repeated RequestStatus status = 1;
+}
+
+message ListDocumentsParams {
+ required IndexSpec index_spec = 1;
+ optional string start_doc_id = 2;
+ optional bool include_start_doc = 3 [default = true];
+ optional int32 limit = 4 [default = 100];
+ optional bool keys_only = 5;
+}
+
+message ListDocumentsRequest {
+ required ListDocumentsParams params = 1;
+
+ optional bytes app_id = 2;
+}
+
+message ListDocumentsResponse {
+ required RequestStatus status = 1;
+
+ repeated Document document = 2;
+}
+
+message ListIndexesParams {
+ optional bool fetch_schema = 1;
+ optional int32 limit = 2 [default = 20];
+ optional string namespace = 3;
+ optional string start_index_name = 4;
+ optional bool include_start_index = 5 [default = true];
+ optional string index_name_prefix = 6;
+ optional int32 offset = 7;
+ optional IndexSpec.Source source = 8 [default = SEARCH];
+}
+
+message ListIndexesRequest {
+ required ListIndexesParams params = 1;
+
+ optional bytes app_id = 3;
+}
+
+message ListIndexesResponse {
+ required RequestStatus status = 1;
+ repeated IndexMetadata index_metadata = 2;
+}
+
+message DeleteSchemaParams {
+ optional IndexSpec.Source source = 1 [default = SEARCH];
+ repeated IndexSpec index_spec = 2;
+}
+
+message DeleteSchemaRequest {
+ required DeleteSchemaParams params = 1;
+
+ optional bytes app_id = 3;
+}
+
+message DeleteSchemaResponse {
+ repeated RequestStatus status = 1;
+}
+
+message SortSpec {
+ required string sort_expression = 1;
+ optional bool sort_descending = 2 [default = true];
+ optional string default_value_text = 4;
+ optional double default_value_numeric = 5;
+}
+
+message ScorerSpec {
+ enum Scorer {
+ RESCORING_MATCH_SCORER = 0;
+ MATCH_SCORER = 2;
+ }
+ optional Scorer scorer = 1 [default = MATCH_SCORER];
+
+ optional int32 limit = 2 [default = 1000];
+ optional string match_scorer_parameters = 9;
+}
+
+message FieldSpec {
+ repeated string name = 1;
+
+ repeated group Expression = 2 {
+ required string name = 3;
+ required string expression = 4;
+ }
+}
+
+message FacetRange {
+ optional string name = 1;
+ optional string start = 2;
+ optional string end = 3;
+}
+
+message FacetRequestParam {
+ optional int32 value_limit = 1;
+ repeated FacetRange range = 2;
+ repeated string value_constraint = 3;
+}
+
+message FacetAutoDetectParam {
+ optional int32 value_limit = 1 [default = 10];
+}
+
+message FacetRequest {
+ required string name = 1;
+ optional FacetRequestParam params = 2;
+}
+
+message FacetRefinement {
+ required string name = 1;
+ optional string value = 2;
+
+ message Range {
+ optional string start = 1;
+ optional string end = 2;
+ }
+ optional Range range = 3;
+}
+
+message SearchParams {
+ required IndexSpec index_spec = 1;
+ required string query = 2;
+ optional string cursor = 4;
+ optional int32 offset = 11;
+
+ enum CursorType {
+ NONE = 0;
+ SINGLE = 1;
+ PER_RESULT = 2;
+ }
+ optional CursorType cursor_type = 5 [default = NONE];
+
+ optional int32 limit = 6 [default = 20];
+ optional int32 matched_count_accuracy = 7;
+ repeated SortSpec sort_spec = 8;
+ optional ScorerSpec scorer_spec = 9;
+ optional FieldSpec field_spec = 10;
+ optional bool keys_only = 12;
+
+ enum ParsingMode {
+ STRICT = 0;
+ RELAXED = 1;
+ }
+ optional ParsingMode parsing_mode = 13 [default = STRICT];
+
+ optional int32 auto_discover_facet_count = 15 [default = 0];
+ repeated FacetRequest include_facet = 16;
+ repeated FacetRefinement facet_refinement = 17;
+ optional FacetAutoDetectParam facet_auto_detect_param = 18;
+ optional int32 facet_depth = 19 [default=1000];
+}
+
+message SearchRequest {
+ required SearchParams params = 1;
+
+ optional bytes app_id = 3;
+}
+
+message FacetResultValue {
+ required string name = 1;
+ required int32 count = 2;
+ required FacetRefinement refinement = 3;
+}
+
+message FacetResult {
+ required string name = 1;
+ repeated FacetResultValue value = 2;
+}
+
+message SearchResult {
+ required Document document = 1;
+ repeated Field expression = 4;
+ repeated double score = 2;
+ optional string cursor = 3;
+}
+
+message SearchResponse {
+ repeated SearchResult result = 1;
+ required int64 matched_count = 2;
+ required RequestStatus status = 3;
+ optional string cursor = 4;
+ repeated FacetResult facet_result = 5;
+
+ extensions 1000 to 9999;
+}
diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go
new file mode 100644
index 0000000..60628ec
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go
@@ -0,0 +1,1858 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/socket/socket_service.proto
+// DO NOT EDIT!
+
+/*
+Package socket is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/socket/socket_service.proto
+
+It has these top-level messages:
+ RemoteSocketServiceError
+ AddressPort
+ CreateSocketRequest
+ CreateSocketReply
+ BindRequest
+ BindReply
+ GetSocketNameRequest
+ GetSocketNameReply
+ GetPeerNameRequest
+ GetPeerNameReply
+ SocketOption
+ SetSocketOptionsRequest
+ SetSocketOptionsReply
+ GetSocketOptionsRequest
+ GetSocketOptionsReply
+ ConnectRequest
+ ConnectReply
+ ListenRequest
+ ListenReply
+ AcceptRequest
+ AcceptReply
+ ShutDownRequest
+ ShutDownReply
+ CloseRequest
+ CloseReply
+ SendRequest
+ SendReply
+ ReceiveRequest
+ ReceiveReply
+ PollEvent
+ PollRequest
+ PollReply
+ ResolveRequest
+ ResolveReply
+*/
+package socket
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type RemoteSocketServiceError_ErrorCode int32
+
+const (
+ RemoteSocketServiceError_SYSTEM_ERROR RemoteSocketServiceError_ErrorCode = 1
+ RemoteSocketServiceError_GAI_ERROR RemoteSocketServiceError_ErrorCode = 2
+ RemoteSocketServiceError_FAILURE RemoteSocketServiceError_ErrorCode = 4
+ RemoteSocketServiceError_PERMISSION_DENIED RemoteSocketServiceError_ErrorCode = 5
+ RemoteSocketServiceError_INVALID_REQUEST RemoteSocketServiceError_ErrorCode = 6
+ RemoteSocketServiceError_SOCKET_CLOSED RemoteSocketServiceError_ErrorCode = 7
+)
+
+var RemoteSocketServiceError_ErrorCode_name = map[int32]string{
+ 1: "SYSTEM_ERROR",
+ 2: "GAI_ERROR",
+ 4: "FAILURE",
+ 5: "PERMISSION_DENIED",
+ 6: "INVALID_REQUEST",
+ 7: "SOCKET_CLOSED",
+}
+var RemoteSocketServiceError_ErrorCode_value = map[string]int32{
+ "SYSTEM_ERROR": 1,
+ "GAI_ERROR": 2,
+ "FAILURE": 4,
+ "PERMISSION_DENIED": 5,
+ "INVALID_REQUEST": 6,
+ "SOCKET_CLOSED": 7,
+}
+
+func (x RemoteSocketServiceError_ErrorCode) Enum() *RemoteSocketServiceError_ErrorCode {
+ p := new(RemoteSocketServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x RemoteSocketServiceError_ErrorCode) String() string {
+ return proto.EnumName(RemoteSocketServiceError_ErrorCode_name, int32(x))
+}
+func (x *RemoteSocketServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_ErrorCode_value, data, "RemoteSocketServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = RemoteSocketServiceError_ErrorCode(value)
+ return nil
+}
+
+type RemoteSocketServiceError_SystemError int32
+
+const (
+ RemoteSocketServiceError_SYS_SUCCESS RemoteSocketServiceError_SystemError = 0
+ RemoteSocketServiceError_SYS_EPERM RemoteSocketServiceError_SystemError = 1
+ RemoteSocketServiceError_SYS_ENOENT RemoteSocketServiceError_SystemError = 2
+ RemoteSocketServiceError_SYS_ESRCH RemoteSocketServiceError_SystemError = 3
+ RemoteSocketServiceError_SYS_EINTR RemoteSocketServiceError_SystemError = 4
+ RemoteSocketServiceError_SYS_EIO RemoteSocketServiceError_SystemError = 5
+ RemoteSocketServiceError_SYS_ENXIO RemoteSocketServiceError_SystemError = 6
+ RemoteSocketServiceError_SYS_E2BIG RemoteSocketServiceError_SystemError = 7
+ RemoteSocketServiceError_SYS_ENOEXEC RemoteSocketServiceError_SystemError = 8
+ RemoteSocketServiceError_SYS_EBADF RemoteSocketServiceError_SystemError = 9
+ RemoteSocketServiceError_SYS_ECHILD RemoteSocketServiceError_SystemError = 10
+ RemoteSocketServiceError_SYS_EAGAIN RemoteSocketServiceError_SystemError = 11
+ RemoteSocketServiceError_SYS_EWOULDBLOCK RemoteSocketServiceError_SystemError = 11
+ RemoteSocketServiceError_SYS_ENOMEM RemoteSocketServiceError_SystemError = 12
+ RemoteSocketServiceError_SYS_EACCES RemoteSocketServiceError_SystemError = 13
+ RemoteSocketServiceError_SYS_EFAULT RemoteSocketServiceError_SystemError = 14
+ RemoteSocketServiceError_SYS_ENOTBLK RemoteSocketServiceError_SystemError = 15
+ RemoteSocketServiceError_SYS_EBUSY RemoteSocketServiceError_SystemError = 16
+ RemoteSocketServiceError_SYS_EEXIST RemoteSocketServiceError_SystemError = 17
+ RemoteSocketServiceError_SYS_EXDEV RemoteSocketServiceError_SystemError = 18
+ RemoteSocketServiceError_SYS_ENODEV RemoteSocketServiceError_SystemError = 19
+ RemoteSocketServiceError_SYS_ENOTDIR RemoteSocketServiceError_SystemError = 20
+ RemoteSocketServiceError_SYS_EISDIR RemoteSocketServiceError_SystemError = 21
+ RemoteSocketServiceError_SYS_EINVAL RemoteSocketServiceError_SystemError = 22
+ RemoteSocketServiceError_SYS_ENFILE RemoteSocketServiceError_SystemError = 23
+ RemoteSocketServiceError_SYS_EMFILE RemoteSocketServiceError_SystemError = 24
+ RemoteSocketServiceError_SYS_ENOTTY RemoteSocketServiceError_SystemError = 25
+ RemoteSocketServiceError_SYS_ETXTBSY RemoteSocketServiceError_SystemError = 26
+ RemoteSocketServiceError_SYS_EFBIG RemoteSocketServiceError_SystemError = 27
+ RemoteSocketServiceError_SYS_ENOSPC RemoteSocketServiceError_SystemError = 28
+ RemoteSocketServiceError_SYS_ESPIPE RemoteSocketServiceError_SystemError = 29
+ RemoteSocketServiceError_SYS_EROFS RemoteSocketServiceError_SystemError = 30
+ RemoteSocketServiceError_SYS_EMLINK RemoteSocketServiceError_SystemError = 31
+ RemoteSocketServiceError_SYS_EPIPE RemoteSocketServiceError_SystemError = 32
+ RemoteSocketServiceError_SYS_EDOM RemoteSocketServiceError_SystemError = 33
+ RemoteSocketServiceError_SYS_ERANGE RemoteSocketServiceError_SystemError = 34
+ RemoteSocketServiceError_SYS_EDEADLK RemoteSocketServiceError_SystemError = 35
+ RemoteSocketServiceError_SYS_EDEADLOCK RemoteSocketServiceError_SystemError = 35
+ RemoteSocketServiceError_SYS_ENAMETOOLONG RemoteSocketServiceError_SystemError = 36
+ RemoteSocketServiceError_SYS_ENOLCK RemoteSocketServiceError_SystemError = 37
+ RemoteSocketServiceError_SYS_ENOSYS RemoteSocketServiceError_SystemError = 38
+ RemoteSocketServiceError_SYS_ENOTEMPTY RemoteSocketServiceError_SystemError = 39
+ RemoteSocketServiceError_SYS_ELOOP RemoteSocketServiceError_SystemError = 40
+ RemoteSocketServiceError_SYS_ENOMSG RemoteSocketServiceError_SystemError = 42
+ RemoteSocketServiceError_SYS_EIDRM RemoteSocketServiceError_SystemError = 43
+ RemoteSocketServiceError_SYS_ECHRNG RemoteSocketServiceError_SystemError = 44
+ RemoteSocketServiceError_SYS_EL2NSYNC RemoteSocketServiceError_SystemError = 45
+ RemoteSocketServiceError_SYS_EL3HLT RemoteSocketServiceError_SystemError = 46
+ RemoteSocketServiceError_SYS_EL3RST RemoteSocketServiceError_SystemError = 47
+ RemoteSocketServiceError_SYS_ELNRNG RemoteSocketServiceError_SystemError = 48
+ RemoteSocketServiceError_SYS_EUNATCH RemoteSocketServiceError_SystemError = 49
+ RemoteSocketServiceError_SYS_ENOCSI RemoteSocketServiceError_SystemError = 50
+ RemoteSocketServiceError_SYS_EL2HLT RemoteSocketServiceError_SystemError = 51
+ RemoteSocketServiceError_SYS_EBADE RemoteSocketServiceError_SystemError = 52
+ RemoteSocketServiceError_SYS_EBADR RemoteSocketServiceError_SystemError = 53
+ RemoteSocketServiceError_SYS_EXFULL RemoteSocketServiceError_SystemError = 54
+ RemoteSocketServiceError_SYS_ENOANO RemoteSocketServiceError_SystemError = 55
+ RemoteSocketServiceError_SYS_EBADRQC RemoteSocketServiceError_SystemError = 56
+ RemoteSocketServiceError_SYS_EBADSLT RemoteSocketServiceError_SystemError = 57
+ RemoteSocketServiceError_SYS_EBFONT RemoteSocketServiceError_SystemError = 59
+ RemoteSocketServiceError_SYS_ENOSTR RemoteSocketServiceError_SystemError = 60
+ RemoteSocketServiceError_SYS_ENODATA RemoteSocketServiceError_SystemError = 61
+ RemoteSocketServiceError_SYS_ETIME RemoteSocketServiceError_SystemError = 62
+ RemoteSocketServiceError_SYS_ENOSR RemoteSocketServiceError_SystemError = 63
+ RemoteSocketServiceError_SYS_ENONET RemoteSocketServiceError_SystemError = 64
+ RemoteSocketServiceError_SYS_ENOPKG RemoteSocketServiceError_SystemError = 65
+ RemoteSocketServiceError_SYS_EREMOTE RemoteSocketServiceError_SystemError = 66
+ RemoteSocketServiceError_SYS_ENOLINK RemoteSocketServiceError_SystemError = 67
+ RemoteSocketServiceError_SYS_EADV RemoteSocketServiceError_SystemError = 68
+ RemoteSocketServiceError_SYS_ESRMNT RemoteSocketServiceError_SystemError = 69
+ RemoteSocketServiceError_SYS_ECOMM RemoteSocketServiceError_SystemError = 70
+ RemoteSocketServiceError_SYS_EPROTO RemoteSocketServiceError_SystemError = 71
+ RemoteSocketServiceError_SYS_EMULTIHOP RemoteSocketServiceError_SystemError = 72
+ RemoteSocketServiceError_SYS_EDOTDOT RemoteSocketServiceError_SystemError = 73
+ RemoteSocketServiceError_SYS_EBADMSG RemoteSocketServiceError_SystemError = 74
+ RemoteSocketServiceError_SYS_EOVERFLOW RemoteSocketServiceError_SystemError = 75
+ RemoteSocketServiceError_SYS_ENOTUNIQ RemoteSocketServiceError_SystemError = 76
+ RemoteSocketServiceError_SYS_EBADFD RemoteSocketServiceError_SystemError = 77
+ RemoteSocketServiceError_SYS_EREMCHG RemoteSocketServiceError_SystemError = 78
+ RemoteSocketServiceError_SYS_ELIBACC RemoteSocketServiceError_SystemError = 79
+ RemoteSocketServiceError_SYS_ELIBBAD RemoteSocketServiceError_SystemError = 80
+ RemoteSocketServiceError_SYS_ELIBSCN RemoteSocketServiceError_SystemError = 81
+ RemoteSocketServiceError_SYS_ELIBMAX RemoteSocketServiceError_SystemError = 82
+ RemoteSocketServiceError_SYS_ELIBEXEC RemoteSocketServiceError_SystemError = 83
+ RemoteSocketServiceError_SYS_EILSEQ RemoteSocketServiceError_SystemError = 84
+ RemoteSocketServiceError_SYS_ERESTART RemoteSocketServiceError_SystemError = 85
+ RemoteSocketServiceError_SYS_ESTRPIPE RemoteSocketServiceError_SystemError = 86
+ RemoteSocketServiceError_SYS_EUSERS RemoteSocketServiceError_SystemError = 87
+ RemoteSocketServiceError_SYS_ENOTSOCK RemoteSocketServiceError_SystemError = 88
+ RemoteSocketServiceError_SYS_EDESTADDRREQ RemoteSocketServiceError_SystemError = 89
+ RemoteSocketServiceError_SYS_EMSGSIZE RemoteSocketServiceError_SystemError = 90
+ RemoteSocketServiceError_SYS_EPROTOTYPE RemoteSocketServiceError_SystemError = 91
+ RemoteSocketServiceError_SYS_ENOPROTOOPT RemoteSocketServiceError_SystemError = 92
+ RemoteSocketServiceError_SYS_EPROTONOSUPPORT RemoteSocketServiceError_SystemError = 93
+ RemoteSocketServiceError_SYS_ESOCKTNOSUPPORT RemoteSocketServiceError_SystemError = 94
+ RemoteSocketServiceError_SYS_EOPNOTSUPP RemoteSocketServiceError_SystemError = 95
+ RemoteSocketServiceError_SYS_ENOTSUP RemoteSocketServiceError_SystemError = 95
+ RemoteSocketServiceError_SYS_EPFNOSUPPORT RemoteSocketServiceError_SystemError = 96
+ RemoteSocketServiceError_SYS_EAFNOSUPPORT RemoteSocketServiceError_SystemError = 97
+ RemoteSocketServiceError_SYS_EADDRINUSE RemoteSocketServiceError_SystemError = 98
+ RemoteSocketServiceError_SYS_EADDRNOTAVAIL RemoteSocketServiceError_SystemError = 99
+ RemoteSocketServiceError_SYS_ENETDOWN RemoteSocketServiceError_SystemError = 100
+ RemoteSocketServiceError_SYS_ENETUNREACH RemoteSocketServiceError_SystemError = 101
+ RemoteSocketServiceError_SYS_ENETRESET RemoteSocketServiceError_SystemError = 102
+ RemoteSocketServiceError_SYS_ECONNABORTED RemoteSocketServiceError_SystemError = 103
+ RemoteSocketServiceError_SYS_ECONNRESET RemoteSocketServiceError_SystemError = 104
+ RemoteSocketServiceError_SYS_ENOBUFS RemoteSocketServiceError_SystemError = 105
+ RemoteSocketServiceError_SYS_EISCONN RemoteSocketServiceError_SystemError = 106
+ RemoteSocketServiceError_SYS_ENOTCONN RemoteSocketServiceError_SystemError = 107
+ RemoteSocketServiceError_SYS_ESHUTDOWN RemoteSocketServiceError_SystemError = 108
+ RemoteSocketServiceError_SYS_ETOOMANYREFS RemoteSocketServiceError_SystemError = 109
+ RemoteSocketServiceError_SYS_ETIMEDOUT RemoteSocketServiceError_SystemError = 110
+ RemoteSocketServiceError_SYS_ECONNREFUSED RemoteSocketServiceError_SystemError = 111
+ RemoteSocketServiceError_SYS_EHOSTDOWN RemoteSocketServiceError_SystemError = 112
+ RemoteSocketServiceError_SYS_EHOSTUNREACH RemoteSocketServiceError_SystemError = 113
+ RemoteSocketServiceError_SYS_EALREADY RemoteSocketServiceError_SystemError = 114
+ RemoteSocketServiceError_SYS_EINPROGRESS RemoteSocketServiceError_SystemError = 115
+ RemoteSocketServiceError_SYS_ESTALE RemoteSocketServiceError_SystemError = 116
+ RemoteSocketServiceError_SYS_EUCLEAN RemoteSocketServiceError_SystemError = 117
+ RemoteSocketServiceError_SYS_ENOTNAM RemoteSocketServiceError_SystemError = 118
+ RemoteSocketServiceError_SYS_ENAVAIL RemoteSocketServiceError_SystemError = 119
+ RemoteSocketServiceError_SYS_EISNAM RemoteSocketServiceError_SystemError = 120
+ RemoteSocketServiceError_SYS_EREMOTEIO RemoteSocketServiceError_SystemError = 121
+ RemoteSocketServiceError_SYS_EDQUOT RemoteSocketServiceError_SystemError = 122
+ RemoteSocketServiceError_SYS_ENOMEDIUM RemoteSocketServiceError_SystemError = 123
+ RemoteSocketServiceError_SYS_EMEDIUMTYPE RemoteSocketServiceError_SystemError = 124
+ RemoteSocketServiceError_SYS_ECANCELED RemoteSocketServiceError_SystemError = 125
+ RemoteSocketServiceError_SYS_ENOKEY RemoteSocketServiceError_SystemError = 126
+ RemoteSocketServiceError_SYS_EKEYEXPIRED RemoteSocketServiceError_SystemError = 127
+ RemoteSocketServiceError_SYS_EKEYREVOKED RemoteSocketServiceError_SystemError = 128
+ RemoteSocketServiceError_SYS_EKEYREJECTED RemoteSocketServiceError_SystemError = 129
+ RemoteSocketServiceError_SYS_EOWNERDEAD RemoteSocketServiceError_SystemError = 130
+ RemoteSocketServiceError_SYS_ENOTRECOVERABLE RemoteSocketServiceError_SystemError = 131
+ RemoteSocketServiceError_SYS_ERFKILL RemoteSocketServiceError_SystemError = 132
+)
+
+var RemoteSocketServiceError_SystemError_name = map[int32]string{
+ 0: "SYS_SUCCESS",
+ 1: "SYS_EPERM",
+ 2: "SYS_ENOENT",
+ 3: "SYS_ESRCH",
+ 4: "SYS_EINTR",
+ 5: "SYS_EIO",
+ 6: "SYS_ENXIO",
+ 7: "SYS_E2BIG",
+ 8: "SYS_ENOEXEC",
+ 9: "SYS_EBADF",
+ 10: "SYS_ECHILD",
+ 11: "SYS_EAGAIN",
+ // Duplicate value: 11: "SYS_EWOULDBLOCK",
+ 12: "SYS_ENOMEM",
+ 13: "SYS_EACCES",
+ 14: "SYS_EFAULT",
+ 15: "SYS_ENOTBLK",
+ 16: "SYS_EBUSY",
+ 17: "SYS_EEXIST",
+ 18: "SYS_EXDEV",
+ 19: "SYS_ENODEV",
+ 20: "SYS_ENOTDIR",
+ 21: "SYS_EISDIR",
+ 22: "SYS_EINVAL",
+ 23: "SYS_ENFILE",
+ 24: "SYS_EMFILE",
+ 25: "SYS_ENOTTY",
+ 26: "SYS_ETXTBSY",
+ 27: "SYS_EFBIG",
+ 28: "SYS_ENOSPC",
+ 29: "SYS_ESPIPE",
+ 30: "SYS_EROFS",
+ 31: "SYS_EMLINK",
+ 32: "SYS_EPIPE",
+ 33: "SYS_EDOM",
+ 34: "SYS_ERANGE",
+ 35: "SYS_EDEADLK",
+ // Duplicate value: 35: "SYS_EDEADLOCK",
+ 36: "SYS_ENAMETOOLONG",
+ 37: "SYS_ENOLCK",
+ 38: "SYS_ENOSYS",
+ 39: "SYS_ENOTEMPTY",
+ 40: "SYS_ELOOP",
+ 42: "SYS_ENOMSG",
+ 43: "SYS_EIDRM",
+ 44: "SYS_ECHRNG",
+ 45: "SYS_EL2NSYNC",
+ 46: "SYS_EL3HLT",
+ 47: "SYS_EL3RST",
+ 48: "SYS_ELNRNG",
+ 49: "SYS_EUNATCH",
+ 50: "SYS_ENOCSI",
+ 51: "SYS_EL2HLT",
+ 52: "SYS_EBADE",
+ 53: "SYS_EBADR",
+ 54: "SYS_EXFULL",
+ 55: "SYS_ENOANO",
+ 56: "SYS_EBADRQC",
+ 57: "SYS_EBADSLT",
+ 59: "SYS_EBFONT",
+ 60: "SYS_ENOSTR",
+ 61: "SYS_ENODATA",
+ 62: "SYS_ETIME",
+ 63: "SYS_ENOSR",
+ 64: "SYS_ENONET",
+ 65: "SYS_ENOPKG",
+ 66: "SYS_EREMOTE",
+ 67: "SYS_ENOLINK",
+ 68: "SYS_EADV",
+ 69: "SYS_ESRMNT",
+ 70: "SYS_ECOMM",
+ 71: "SYS_EPROTO",
+ 72: "SYS_EMULTIHOP",
+ 73: "SYS_EDOTDOT",
+ 74: "SYS_EBADMSG",
+ 75: "SYS_EOVERFLOW",
+ 76: "SYS_ENOTUNIQ",
+ 77: "SYS_EBADFD",
+ 78: "SYS_EREMCHG",
+ 79: "SYS_ELIBACC",
+ 80: "SYS_ELIBBAD",
+ 81: "SYS_ELIBSCN",
+ 82: "SYS_ELIBMAX",
+ 83: "SYS_ELIBEXEC",
+ 84: "SYS_EILSEQ",
+ 85: "SYS_ERESTART",
+ 86: "SYS_ESTRPIPE",
+ 87: "SYS_EUSERS",
+ 88: "SYS_ENOTSOCK",
+ 89: "SYS_EDESTADDRREQ",
+ 90: "SYS_EMSGSIZE",
+ 91: "SYS_EPROTOTYPE",
+ 92: "SYS_ENOPROTOOPT",
+ 93: "SYS_EPROTONOSUPPORT",
+ 94: "SYS_ESOCKTNOSUPPORT",
+ 95: "SYS_EOPNOTSUPP",
+ // Duplicate value: 95: "SYS_ENOTSUP",
+ 96: "SYS_EPFNOSUPPORT",
+ 97: "SYS_EAFNOSUPPORT",
+ 98: "SYS_EADDRINUSE",
+ 99: "SYS_EADDRNOTAVAIL",
+ 100: "SYS_ENETDOWN",
+ 101: "SYS_ENETUNREACH",
+ 102: "SYS_ENETRESET",
+ 103: "SYS_ECONNABORTED",
+ 104: "SYS_ECONNRESET",
+ 105: "SYS_ENOBUFS",
+ 106: "SYS_EISCONN",
+ 107: "SYS_ENOTCONN",
+ 108: "SYS_ESHUTDOWN",
+ 109: "SYS_ETOOMANYREFS",
+ 110: "SYS_ETIMEDOUT",
+ 111: "SYS_ECONNREFUSED",
+ 112: "SYS_EHOSTDOWN",
+ 113: "SYS_EHOSTUNREACH",
+ 114: "SYS_EALREADY",
+ 115: "SYS_EINPROGRESS",
+ 116: "SYS_ESTALE",
+ 117: "SYS_EUCLEAN",
+ 118: "SYS_ENOTNAM",
+ 119: "SYS_ENAVAIL",
+ 120: "SYS_EISNAM",
+ 121: "SYS_EREMOTEIO",
+ 122: "SYS_EDQUOT",
+ 123: "SYS_ENOMEDIUM",
+ 124: "SYS_EMEDIUMTYPE",
+ 125: "SYS_ECANCELED",
+ 126: "SYS_ENOKEY",
+ 127: "SYS_EKEYEXPIRED",
+ 128: "SYS_EKEYREVOKED",
+ 129: "SYS_EKEYREJECTED",
+ 130: "SYS_EOWNERDEAD",
+ 131: "SYS_ENOTRECOVERABLE",
+ 132: "SYS_ERFKILL",
+}
+var RemoteSocketServiceError_SystemError_value = map[string]int32{
+ "SYS_SUCCESS": 0,
+ "SYS_EPERM": 1,
+ "SYS_ENOENT": 2,
+ "SYS_ESRCH": 3,
+ "SYS_EINTR": 4,
+ "SYS_EIO": 5,
+ "SYS_ENXIO": 6,
+ "SYS_E2BIG": 7,
+ "SYS_ENOEXEC": 8,
+ "SYS_EBADF": 9,
+ "SYS_ECHILD": 10,
+ "SYS_EAGAIN": 11,
+ "SYS_EWOULDBLOCK": 11,
+ "SYS_ENOMEM": 12,
+ "SYS_EACCES": 13,
+ "SYS_EFAULT": 14,
+ "SYS_ENOTBLK": 15,
+ "SYS_EBUSY": 16,
+ "SYS_EEXIST": 17,
+ "SYS_EXDEV": 18,
+ "SYS_ENODEV": 19,
+ "SYS_ENOTDIR": 20,
+ "SYS_EISDIR": 21,
+ "SYS_EINVAL": 22,
+ "SYS_ENFILE": 23,
+ "SYS_EMFILE": 24,
+ "SYS_ENOTTY": 25,
+ "SYS_ETXTBSY": 26,
+ "SYS_EFBIG": 27,
+ "SYS_ENOSPC": 28,
+ "SYS_ESPIPE": 29,
+ "SYS_EROFS": 30,
+ "SYS_EMLINK": 31,
+ "SYS_EPIPE": 32,
+ "SYS_EDOM": 33,
+ "SYS_ERANGE": 34,
+ "SYS_EDEADLK": 35,
+ "SYS_EDEADLOCK": 35,
+ "SYS_ENAMETOOLONG": 36,
+ "SYS_ENOLCK": 37,
+ "SYS_ENOSYS": 38,
+ "SYS_ENOTEMPTY": 39,
+ "SYS_ELOOP": 40,
+ "SYS_ENOMSG": 42,
+ "SYS_EIDRM": 43,
+ "SYS_ECHRNG": 44,
+ "SYS_EL2NSYNC": 45,
+ "SYS_EL3HLT": 46,
+ "SYS_EL3RST": 47,
+ "SYS_ELNRNG": 48,
+ "SYS_EUNATCH": 49,
+ "SYS_ENOCSI": 50,
+ "SYS_EL2HLT": 51,
+ "SYS_EBADE": 52,
+ "SYS_EBADR": 53,
+ "SYS_EXFULL": 54,
+ "SYS_ENOANO": 55,
+ "SYS_EBADRQC": 56,
+ "SYS_EBADSLT": 57,
+ "SYS_EBFONT": 59,
+ "SYS_ENOSTR": 60,
+ "SYS_ENODATA": 61,
+ "SYS_ETIME": 62,
+ "SYS_ENOSR": 63,
+ "SYS_ENONET": 64,
+ "SYS_ENOPKG": 65,
+ "SYS_EREMOTE": 66,
+ "SYS_ENOLINK": 67,
+ "SYS_EADV": 68,
+ "SYS_ESRMNT": 69,
+ "SYS_ECOMM": 70,
+ "SYS_EPROTO": 71,
+ "SYS_EMULTIHOP": 72,
+ "SYS_EDOTDOT": 73,
+ "SYS_EBADMSG": 74,
+ "SYS_EOVERFLOW": 75,
+ "SYS_ENOTUNIQ": 76,
+ "SYS_EBADFD": 77,
+ "SYS_EREMCHG": 78,
+ "SYS_ELIBACC": 79,
+ "SYS_ELIBBAD": 80,
+ "SYS_ELIBSCN": 81,
+ "SYS_ELIBMAX": 82,
+ "SYS_ELIBEXEC": 83,
+ "SYS_EILSEQ": 84,
+ "SYS_ERESTART": 85,
+ "SYS_ESTRPIPE": 86,
+ "SYS_EUSERS": 87,
+ "SYS_ENOTSOCK": 88,
+ "SYS_EDESTADDRREQ": 89,
+ "SYS_EMSGSIZE": 90,
+ "SYS_EPROTOTYPE": 91,
+ "SYS_ENOPROTOOPT": 92,
+ "SYS_EPROTONOSUPPORT": 93,
+ "SYS_ESOCKTNOSUPPORT": 94,
+ "SYS_EOPNOTSUPP": 95,
+ "SYS_ENOTSUP": 95,
+ "SYS_EPFNOSUPPORT": 96,
+ "SYS_EAFNOSUPPORT": 97,
+ "SYS_EADDRINUSE": 98,
+ "SYS_EADDRNOTAVAIL": 99,
+ "SYS_ENETDOWN": 100,
+ "SYS_ENETUNREACH": 101,
+ "SYS_ENETRESET": 102,
+ "SYS_ECONNABORTED": 103,
+ "SYS_ECONNRESET": 104,
+ "SYS_ENOBUFS": 105,
+ "SYS_EISCONN": 106,
+ "SYS_ENOTCONN": 107,
+ "SYS_ESHUTDOWN": 108,
+ "SYS_ETOOMANYREFS": 109,
+ "SYS_ETIMEDOUT": 110,
+ "SYS_ECONNREFUSED": 111,
+ "SYS_EHOSTDOWN": 112,
+ "SYS_EHOSTUNREACH": 113,
+ "SYS_EALREADY": 114,
+ "SYS_EINPROGRESS": 115,
+ "SYS_ESTALE": 116,
+ "SYS_EUCLEAN": 117,
+ "SYS_ENOTNAM": 118,
+ "SYS_ENAVAIL": 119,
+ "SYS_EISNAM": 120,
+ "SYS_EREMOTEIO": 121,
+ "SYS_EDQUOT": 122,
+ "SYS_ENOMEDIUM": 123,
+ "SYS_EMEDIUMTYPE": 124,
+ "SYS_ECANCELED": 125,
+ "SYS_ENOKEY": 126,
+ "SYS_EKEYEXPIRED": 127,
+ "SYS_EKEYREVOKED": 128,
+ "SYS_EKEYREJECTED": 129,
+ "SYS_EOWNERDEAD": 130,
+ "SYS_ENOTRECOVERABLE": 131,
+ "SYS_ERFKILL": 132,
+}
+
+func (x RemoteSocketServiceError_SystemError) Enum() *RemoteSocketServiceError_SystemError {
+ p := new(RemoteSocketServiceError_SystemError)
+ *p = x
+ return p
+}
+func (x RemoteSocketServiceError_SystemError) String() string {
+ return proto.EnumName(RemoteSocketServiceError_SystemError_name, int32(x))
+}
+func (x *RemoteSocketServiceError_SystemError) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_SystemError_value, data, "RemoteSocketServiceError_SystemError")
+ if err != nil {
+ return err
+ }
+ *x = RemoteSocketServiceError_SystemError(value)
+ return nil
+}
+
+type CreateSocketRequest_SocketFamily int32
+
+const (
+ CreateSocketRequest_IPv4 CreateSocketRequest_SocketFamily = 1
+ CreateSocketRequest_IPv6 CreateSocketRequest_SocketFamily = 2
+)
+
+var CreateSocketRequest_SocketFamily_name = map[int32]string{
+ 1: "IPv4",
+ 2: "IPv6",
+}
+var CreateSocketRequest_SocketFamily_value = map[string]int32{
+ "IPv4": 1,
+ "IPv6": 2,
+}
+
+func (x CreateSocketRequest_SocketFamily) Enum() *CreateSocketRequest_SocketFamily {
+ p := new(CreateSocketRequest_SocketFamily)
+ *p = x
+ return p
+}
+func (x CreateSocketRequest_SocketFamily) String() string {
+ return proto.EnumName(CreateSocketRequest_SocketFamily_name, int32(x))
+}
+func (x *CreateSocketRequest_SocketFamily) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketFamily_value, data, "CreateSocketRequest_SocketFamily")
+ if err != nil {
+ return err
+ }
+ *x = CreateSocketRequest_SocketFamily(value)
+ return nil
+}
+
+type CreateSocketRequest_SocketProtocol int32
+
+const (
+ CreateSocketRequest_TCP CreateSocketRequest_SocketProtocol = 1
+ CreateSocketRequest_UDP CreateSocketRequest_SocketProtocol = 2
+)
+
+var CreateSocketRequest_SocketProtocol_name = map[int32]string{
+ 1: "TCP",
+ 2: "UDP",
+}
+var CreateSocketRequest_SocketProtocol_value = map[string]int32{
+ "TCP": 1,
+ "UDP": 2,
+}
+
+func (x CreateSocketRequest_SocketProtocol) Enum() *CreateSocketRequest_SocketProtocol {
+ p := new(CreateSocketRequest_SocketProtocol)
+ *p = x
+ return p
+}
+func (x CreateSocketRequest_SocketProtocol) String() string {
+ return proto.EnumName(CreateSocketRequest_SocketProtocol_name, int32(x))
+}
+func (x *CreateSocketRequest_SocketProtocol) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketProtocol_value, data, "CreateSocketRequest_SocketProtocol")
+ if err != nil {
+ return err
+ }
+ *x = CreateSocketRequest_SocketProtocol(value)
+ return nil
+}
+
+type SocketOption_SocketOptionLevel int32
+
+const (
+ SocketOption_SOCKET_SOL_IP SocketOption_SocketOptionLevel = 0
+ SocketOption_SOCKET_SOL_SOCKET SocketOption_SocketOptionLevel = 1
+ SocketOption_SOCKET_SOL_TCP SocketOption_SocketOptionLevel = 6
+ SocketOption_SOCKET_SOL_UDP SocketOption_SocketOptionLevel = 17
+)
+
+var SocketOption_SocketOptionLevel_name = map[int32]string{
+ 0: "SOCKET_SOL_IP",
+ 1: "SOCKET_SOL_SOCKET",
+ 6: "SOCKET_SOL_TCP",
+ 17: "SOCKET_SOL_UDP",
+}
+var SocketOption_SocketOptionLevel_value = map[string]int32{
+ "SOCKET_SOL_IP": 0,
+ "SOCKET_SOL_SOCKET": 1,
+ "SOCKET_SOL_TCP": 6,
+ "SOCKET_SOL_UDP": 17,
+}
+
+func (x SocketOption_SocketOptionLevel) Enum() *SocketOption_SocketOptionLevel {
+ p := new(SocketOption_SocketOptionLevel)
+ *p = x
+ return p
+}
+func (x SocketOption_SocketOptionLevel) String() string {
+ return proto.EnumName(SocketOption_SocketOptionLevel_name, int32(x))
+}
+func (x *SocketOption_SocketOptionLevel) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionLevel_value, data, "SocketOption_SocketOptionLevel")
+ if err != nil {
+ return err
+ }
+ *x = SocketOption_SocketOptionLevel(value)
+ return nil
+}
+
+type SocketOption_SocketOptionName int32
+
+const (
+ SocketOption_SOCKET_SO_DEBUG SocketOption_SocketOptionName = 1
+ SocketOption_SOCKET_SO_REUSEADDR SocketOption_SocketOptionName = 2
+ SocketOption_SOCKET_SO_TYPE SocketOption_SocketOptionName = 3
+ SocketOption_SOCKET_SO_ERROR SocketOption_SocketOptionName = 4
+ SocketOption_SOCKET_SO_DONTROUTE SocketOption_SocketOptionName = 5
+ SocketOption_SOCKET_SO_BROADCAST SocketOption_SocketOptionName = 6
+ SocketOption_SOCKET_SO_SNDBUF SocketOption_SocketOptionName = 7
+ SocketOption_SOCKET_SO_RCVBUF SocketOption_SocketOptionName = 8
+ SocketOption_SOCKET_SO_KEEPALIVE SocketOption_SocketOptionName = 9
+ SocketOption_SOCKET_SO_OOBINLINE SocketOption_SocketOptionName = 10
+ SocketOption_SOCKET_SO_LINGER SocketOption_SocketOptionName = 13
+ SocketOption_SOCKET_SO_RCVTIMEO SocketOption_SocketOptionName = 20
+ SocketOption_SOCKET_SO_SNDTIMEO SocketOption_SocketOptionName = 21
+ SocketOption_SOCKET_IP_TOS SocketOption_SocketOptionName = 1
+ SocketOption_SOCKET_IP_TTL SocketOption_SocketOptionName = 2
+ SocketOption_SOCKET_IP_HDRINCL SocketOption_SocketOptionName = 3
+ SocketOption_SOCKET_IP_OPTIONS SocketOption_SocketOptionName = 4
+ SocketOption_SOCKET_TCP_NODELAY SocketOption_SocketOptionName = 1
+ SocketOption_SOCKET_TCP_MAXSEG SocketOption_SocketOptionName = 2
+ SocketOption_SOCKET_TCP_CORK SocketOption_SocketOptionName = 3
+ SocketOption_SOCKET_TCP_KEEPIDLE SocketOption_SocketOptionName = 4
+ SocketOption_SOCKET_TCP_KEEPINTVL SocketOption_SocketOptionName = 5
+ SocketOption_SOCKET_TCP_KEEPCNT SocketOption_SocketOptionName = 6
+ SocketOption_SOCKET_TCP_SYNCNT SocketOption_SocketOptionName = 7
+ SocketOption_SOCKET_TCP_LINGER2 SocketOption_SocketOptionName = 8
+ SocketOption_SOCKET_TCP_DEFER_ACCEPT SocketOption_SocketOptionName = 9
+ SocketOption_SOCKET_TCP_WINDOW_CLAMP SocketOption_SocketOptionName = 10
+ SocketOption_SOCKET_TCP_INFO SocketOption_SocketOptionName = 11
+ SocketOption_SOCKET_TCP_QUICKACK SocketOption_SocketOptionName = 12
+)
+
+var SocketOption_SocketOptionName_name = map[int32]string{
+ 1: "SOCKET_SO_DEBUG",
+ 2: "SOCKET_SO_REUSEADDR",
+ 3: "SOCKET_SO_TYPE",
+ 4: "SOCKET_SO_ERROR",
+ 5: "SOCKET_SO_DONTROUTE",
+ 6: "SOCKET_SO_BROADCAST",
+ 7: "SOCKET_SO_SNDBUF",
+ 8: "SOCKET_SO_RCVBUF",
+ 9: "SOCKET_SO_KEEPALIVE",
+ 10: "SOCKET_SO_OOBINLINE",
+ 13: "SOCKET_SO_LINGER",
+ 20: "SOCKET_SO_RCVTIMEO",
+ 21: "SOCKET_SO_SNDTIMEO",
+ // Duplicate value: 1: "SOCKET_IP_TOS",
+ // Duplicate value: 2: "SOCKET_IP_TTL",
+ // Duplicate value: 3: "SOCKET_IP_HDRINCL",
+ // Duplicate value: 4: "SOCKET_IP_OPTIONS",
+ // Duplicate value: 1: "SOCKET_TCP_NODELAY",
+ // Duplicate value: 2: "SOCKET_TCP_MAXSEG",
+ // Duplicate value: 3: "SOCKET_TCP_CORK",
+ // Duplicate value: 4: "SOCKET_TCP_KEEPIDLE",
+ // Duplicate value: 5: "SOCKET_TCP_KEEPINTVL",
+ // Duplicate value: 6: "SOCKET_TCP_KEEPCNT",
+ // Duplicate value: 7: "SOCKET_TCP_SYNCNT",
+ // Duplicate value: 8: "SOCKET_TCP_LINGER2",
+ // Duplicate value: 9: "SOCKET_TCP_DEFER_ACCEPT",
+ // Duplicate value: 10: "SOCKET_TCP_WINDOW_CLAMP",
+ 11: "SOCKET_TCP_INFO",
+ 12: "SOCKET_TCP_QUICKACK",
+}
+var SocketOption_SocketOptionName_value = map[string]int32{
+ "SOCKET_SO_DEBUG": 1,
+ "SOCKET_SO_REUSEADDR": 2,
+ "SOCKET_SO_TYPE": 3,
+ "SOCKET_SO_ERROR": 4,
+ "SOCKET_SO_DONTROUTE": 5,
+ "SOCKET_SO_BROADCAST": 6,
+ "SOCKET_SO_SNDBUF": 7,
+ "SOCKET_SO_RCVBUF": 8,
+ "SOCKET_SO_KEEPALIVE": 9,
+ "SOCKET_SO_OOBINLINE": 10,
+ "SOCKET_SO_LINGER": 13,
+ "SOCKET_SO_RCVTIMEO": 20,
+ "SOCKET_SO_SNDTIMEO": 21,
+ "SOCKET_IP_TOS": 1,
+ "SOCKET_IP_TTL": 2,
+ "SOCKET_IP_HDRINCL": 3,
+ "SOCKET_IP_OPTIONS": 4,
+ "SOCKET_TCP_NODELAY": 1,
+ "SOCKET_TCP_MAXSEG": 2,
+ "SOCKET_TCP_CORK": 3,
+ "SOCKET_TCP_KEEPIDLE": 4,
+ "SOCKET_TCP_KEEPINTVL": 5,
+ "SOCKET_TCP_KEEPCNT": 6,
+ "SOCKET_TCP_SYNCNT": 7,
+ "SOCKET_TCP_LINGER2": 8,
+ "SOCKET_TCP_DEFER_ACCEPT": 9,
+ "SOCKET_TCP_WINDOW_CLAMP": 10,
+ "SOCKET_TCP_INFO": 11,
+ "SOCKET_TCP_QUICKACK": 12,
+}
+
+func (x SocketOption_SocketOptionName) Enum() *SocketOption_SocketOptionName {
+ p := new(SocketOption_SocketOptionName)
+ *p = x
+ return p
+}
+func (x SocketOption_SocketOptionName) String() string {
+ return proto.EnumName(SocketOption_SocketOptionName_name, int32(x))
+}
+func (x *SocketOption_SocketOptionName) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionName_value, data, "SocketOption_SocketOptionName")
+ if err != nil {
+ return err
+ }
+ *x = SocketOption_SocketOptionName(value)
+ return nil
+}
+
+type ShutDownRequest_How int32
+
+const (
+ ShutDownRequest_SOCKET_SHUT_RD ShutDownRequest_How = 1
+ ShutDownRequest_SOCKET_SHUT_WR ShutDownRequest_How = 2
+ ShutDownRequest_SOCKET_SHUT_RDWR ShutDownRequest_How = 3
+)
+
+var ShutDownRequest_How_name = map[int32]string{
+ 1: "SOCKET_SHUT_RD",
+ 2: "SOCKET_SHUT_WR",
+ 3: "SOCKET_SHUT_RDWR",
+}
+var ShutDownRequest_How_value = map[string]int32{
+ "SOCKET_SHUT_RD": 1,
+ "SOCKET_SHUT_WR": 2,
+ "SOCKET_SHUT_RDWR": 3,
+}
+
+func (x ShutDownRequest_How) Enum() *ShutDownRequest_How {
+ p := new(ShutDownRequest_How)
+ *p = x
+ return p
+}
+func (x ShutDownRequest_How) String() string {
+ return proto.EnumName(ShutDownRequest_How_name, int32(x))
+}
+func (x *ShutDownRequest_How) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ShutDownRequest_How_value, data, "ShutDownRequest_How")
+ if err != nil {
+ return err
+ }
+ *x = ShutDownRequest_How(value)
+ return nil
+}
+
+type ReceiveRequest_Flags int32
+
+const (
+ ReceiveRequest_MSG_OOB ReceiveRequest_Flags = 1
+ ReceiveRequest_MSG_PEEK ReceiveRequest_Flags = 2
+)
+
+var ReceiveRequest_Flags_name = map[int32]string{
+ 1: "MSG_OOB",
+ 2: "MSG_PEEK",
+}
+var ReceiveRequest_Flags_value = map[string]int32{
+ "MSG_OOB": 1,
+ "MSG_PEEK": 2,
+}
+
+func (x ReceiveRequest_Flags) Enum() *ReceiveRequest_Flags {
+ p := new(ReceiveRequest_Flags)
+ *p = x
+ return p
+}
+func (x ReceiveRequest_Flags) String() string {
+ return proto.EnumName(ReceiveRequest_Flags_name, int32(x))
+}
+func (x *ReceiveRequest_Flags) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ReceiveRequest_Flags_value, data, "ReceiveRequest_Flags")
+ if err != nil {
+ return err
+ }
+ *x = ReceiveRequest_Flags(value)
+ return nil
+}
+
+type PollEvent_PollEventFlag int32
+
+const (
+ PollEvent_SOCKET_POLLNONE PollEvent_PollEventFlag = 0
+ PollEvent_SOCKET_POLLIN PollEvent_PollEventFlag = 1
+ PollEvent_SOCKET_POLLPRI PollEvent_PollEventFlag = 2
+ PollEvent_SOCKET_POLLOUT PollEvent_PollEventFlag = 4
+ PollEvent_SOCKET_POLLERR PollEvent_PollEventFlag = 8
+ PollEvent_SOCKET_POLLHUP PollEvent_PollEventFlag = 16
+ PollEvent_SOCKET_POLLNVAL PollEvent_PollEventFlag = 32
+ PollEvent_SOCKET_POLLRDNORM PollEvent_PollEventFlag = 64
+ PollEvent_SOCKET_POLLRDBAND PollEvent_PollEventFlag = 128
+ PollEvent_SOCKET_POLLWRNORM PollEvent_PollEventFlag = 256
+ PollEvent_SOCKET_POLLWRBAND PollEvent_PollEventFlag = 512
+ PollEvent_SOCKET_POLLMSG PollEvent_PollEventFlag = 1024
+ PollEvent_SOCKET_POLLREMOVE PollEvent_PollEventFlag = 4096
+ PollEvent_SOCKET_POLLRDHUP PollEvent_PollEventFlag = 8192
+)
+
+var PollEvent_PollEventFlag_name = map[int32]string{
+ 0: "SOCKET_POLLNONE",
+ 1: "SOCKET_POLLIN",
+ 2: "SOCKET_POLLPRI",
+ 4: "SOCKET_POLLOUT",
+ 8: "SOCKET_POLLERR",
+ 16: "SOCKET_POLLHUP",
+ 32: "SOCKET_POLLNVAL",
+ 64: "SOCKET_POLLRDNORM",
+ 128: "SOCKET_POLLRDBAND",
+ 256: "SOCKET_POLLWRNORM",
+ 512: "SOCKET_POLLWRBAND",
+ 1024: "SOCKET_POLLMSG",
+ 4096: "SOCKET_POLLREMOVE",
+ 8192: "SOCKET_POLLRDHUP",
+}
+var PollEvent_PollEventFlag_value = map[string]int32{
+ "SOCKET_POLLNONE": 0,
+ "SOCKET_POLLIN": 1,
+ "SOCKET_POLLPRI": 2,
+ "SOCKET_POLLOUT": 4,
+ "SOCKET_POLLERR": 8,
+ "SOCKET_POLLHUP": 16,
+ "SOCKET_POLLNVAL": 32,
+ "SOCKET_POLLRDNORM": 64,
+ "SOCKET_POLLRDBAND": 128,
+ "SOCKET_POLLWRNORM": 256,
+ "SOCKET_POLLWRBAND": 512,
+ "SOCKET_POLLMSG": 1024,
+ "SOCKET_POLLREMOVE": 4096,
+ "SOCKET_POLLRDHUP": 8192,
+}
+
+func (x PollEvent_PollEventFlag) Enum() *PollEvent_PollEventFlag {
+ p := new(PollEvent_PollEventFlag)
+ *p = x
+ return p
+}
+func (x PollEvent_PollEventFlag) String() string {
+ return proto.EnumName(PollEvent_PollEventFlag_name, int32(x))
+}
+func (x *PollEvent_PollEventFlag) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(PollEvent_PollEventFlag_value, data, "PollEvent_PollEventFlag")
+ if err != nil {
+ return err
+ }
+ *x = PollEvent_PollEventFlag(value)
+ return nil
+}
+
+type ResolveReply_ErrorCode int32
+
+const (
+ ResolveReply_SOCKET_EAI_ADDRFAMILY ResolveReply_ErrorCode = 1
+ ResolveReply_SOCKET_EAI_AGAIN ResolveReply_ErrorCode = 2
+ ResolveReply_SOCKET_EAI_BADFLAGS ResolveReply_ErrorCode = 3
+ ResolveReply_SOCKET_EAI_FAIL ResolveReply_ErrorCode = 4
+ ResolveReply_SOCKET_EAI_FAMILY ResolveReply_ErrorCode = 5
+ ResolveReply_SOCKET_EAI_MEMORY ResolveReply_ErrorCode = 6
+ ResolveReply_SOCKET_EAI_NODATA ResolveReply_ErrorCode = 7
+ ResolveReply_SOCKET_EAI_NONAME ResolveReply_ErrorCode = 8
+ ResolveReply_SOCKET_EAI_SERVICE ResolveReply_ErrorCode = 9
+ ResolveReply_SOCKET_EAI_SOCKTYPE ResolveReply_ErrorCode = 10
+ ResolveReply_SOCKET_EAI_SYSTEM ResolveReply_ErrorCode = 11
+ ResolveReply_SOCKET_EAI_BADHINTS ResolveReply_ErrorCode = 12
+ ResolveReply_SOCKET_EAI_PROTOCOL ResolveReply_ErrorCode = 13
+ ResolveReply_SOCKET_EAI_OVERFLOW ResolveReply_ErrorCode = 14
+ ResolveReply_SOCKET_EAI_MAX ResolveReply_ErrorCode = 15
+)
+
+var ResolveReply_ErrorCode_name = map[int32]string{
+ 1: "SOCKET_EAI_ADDRFAMILY",
+ 2: "SOCKET_EAI_AGAIN",
+ 3: "SOCKET_EAI_BADFLAGS",
+ 4: "SOCKET_EAI_FAIL",
+ 5: "SOCKET_EAI_FAMILY",
+ 6: "SOCKET_EAI_MEMORY",
+ 7: "SOCKET_EAI_NODATA",
+ 8: "SOCKET_EAI_NONAME",
+ 9: "SOCKET_EAI_SERVICE",
+ 10: "SOCKET_EAI_SOCKTYPE",
+ 11: "SOCKET_EAI_SYSTEM",
+ 12: "SOCKET_EAI_BADHINTS",
+ 13: "SOCKET_EAI_PROTOCOL",
+ 14: "SOCKET_EAI_OVERFLOW",
+ 15: "SOCKET_EAI_MAX",
+}
+var ResolveReply_ErrorCode_value = map[string]int32{
+ "SOCKET_EAI_ADDRFAMILY": 1,
+ "SOCKET_EAI_AGAIN": 2,
+ "SOCKET_EAI_BADFLAGS": 3,
+ "SOCKET_EAI_FAIL": 4,
+ "SOCKET_EAI_FAMILY": 5,
+ "SOCKET_EAI_MEMORY": 6,
+ "SOCKET_EAI_NODATA": 7,
+ "SOCKET_EAI_NONAME": 8,
+ "SOCKET_EAI_SERVICE": 9,
+ "SOCKET_EAI_SOCKTYPE": 10,
+ "SOCKET_EAI_SYSTEM": 11,
+ "SOCKET_EAI_BADHINTS": 12,
+ "SOCKET_EAI_PROTOCOL": 13,
+ "SOCKET_EAI_OVERFLOW": 14,
+ "SOCKET_EAI_MAX": 15,
+}
+
+func (x ResolveReply_ErrorCode) Enum() *ResolveReply_ErrorCode {
+ p := new(ResolveReply_ErrorCode)
+ *p = x
+ return p
+}
+func (x ResolveReply_ErrorCode) String() string {
+ return proto.EnumName(ResolveReply_ErrorCode_name, int32(x))
+}
+func (x *ResolveReply_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ResolveReply_ErrorCode_value, data, "ResolveReply_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = ResolveReply_ErrorCode(value)
+ return nil
+}
+
+type RemoteSocketServiceError struct {
+ SystemError *int32 `protobuf:"varint,1,opt,name=system_error,def=0" json:"system_error,omitempty"`
+ ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail" json:"error_detail,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RemoteSocketServiceError) Reset() { *m = RemoteSocketServiceError{} }
+func (m *RemoteSocketServiceError) String() string { return proto.CompactTextString(m) }
+func (*RemoteSocketServiceError) ProtoMessage() {}
+
+const Default_RemoteSocketServiceError_SystemError int32 = 0
+
+func (m *RemoteSocketServiceError) GetSystemError() int32 {
+ if m != nil && m.SystemError != nil {
+ return *m.SystemError
+ }
+ return Default_RemoteSocketServiceError_SystemError
+}
+
+func (m *RemoteSocketServiceError) GetErrorDetail() string {
+ if m != nil && m.ErrorDetail != nil {
+ return *m.ErrorDetail
+ }
+ return ""
+}
+
+type AddressPort struct {
+ Port *int32 `protobuf:"varint,1,req,name=port" json:"port,omitempty"`
+ PackedAddress []byte `protobuf:"bytes,2,opt,name=packed_address" json:"packed_address,omitempty"`
+ HostnameHint *string `protobuf:"bytes,3,opt,name=hostname_hint" json:"hostname_hint,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AddressPort) Reset() { *m = AddressPort{} }
+func (m *AddressPort) String() string { return proto.CompactTextString(m) }
+func (*AddressPort) ProtoMessage() {}
+
+func (m *AddressPort) GetPort() int32 {
+ if m != nil && m.Port != nil {
+ return *m.Port
+ }
+ return 0
+}
+
+func (m *AddressPort) GetPackedAddress() []byte {
+ if m != nil {
+ return m.PackedAddress
+ }
+ return nil
+}
+
+func (m *AddressPort) GetHostnameHint() string {
+ if m != nil && m.HostnameHint != nil {
+ return *m.HostnameHint
+ }
+ return ""
+}
+
+type CreateSocketRequest struct {
+ Family *CreateSocketRequest_SocketFamily `protobuf:"varint,1,req,name=family,enum=appengine.CreateSocketRequest_SocketFamily" json:"family,omitempty"`
+ Protocol *CreateSocketRequest_SocketProtocol `protobuf:"varint,2,req,name=protocol,enum=appengine.CreateSocketRequest_SocketProtocol" json:"protocol,omitempty"`
+ SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options" json:"socket_options,omitempty"`
+ ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"`
+ ListenBacklog *int32 `protobuf:"varint,5,opt,name=listen_backlog,def=0" json:"listen_backlog,omitempty"`
+ RemoteIp *AddressPort `protobuf:"bytes,6,opt,name=remote_ip" json:"remote_ip,omitempty"`
+ AppId *string `protobuf:"bytes,9,opt,name=app_id" json:"app_id,omitempty"`
+ ProjectId *int64 `protobuf:"varint,10,opt,name=project_id" json:"project_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateSocketRequest) Reset() { *m = CreateSocketRequest{} }
+func (m *CreateSocketRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateSocketRequest) ProtoMessage() {}
+
+const Default_CreateSocketRequest_ListenBacklog int32 = 0
+
+func (m *CreateSocketRequest) GetFamily() CreateSocketRequest_SocketFamily {
+ if m != nil && m.Family != nil {
+ return *m.Family
+ }
+ return CreateSocketRequest_IPv4
+}
+
+func (m *CreateSocketRequest) GetProtocol() CreateSocketRequest_SocketProtocol {
+ if m != nil && m.Protocol != nil {
+ return *m.Protocol
+ }
+ return CreateSocketRequest_TCP
+}
+
+func (m *CreateSocketRequest) GetSocketOptions() []*SocketOption {
+ if m != nil {
+ return m.SocketOptions
+ }
+ return nil
+}
+
+func (m *CreateSocketRequest) GetProxyExternalIp() *AddressPort {
+ if m != nil {
+ return m.ProxyExternalIp
+ }
+ return nil
+}
+
+func (m *CreateSocketRequest) GetListenBacklog() int32 {
+ if m != nil && m.ListenBacklog != nil {
+ return *m.ListenBacklog
+ }
+ return Default_CreateSocketRequest_ListenBacklog
+}
+
+func (m *CreateSocketRequest) GetRemoteIp() *AddressPort {
+ if m != nil {
+ return m.RemoteIp
+ }
+ return nil
+}
+
+func (m *CreateSocketRequest) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *CreateSocketRequest) GetProjectId() int64 {
+ if m != nil && m.ProjectId != nil {
+ return *m.ProjectId
+ }
+ return 0
+}
+
+type CreateSocketReply struct {
+ SocketDescriptor *string `protobuf:"bytes,1,opt,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ ServerAddress *AddressPort `protobuf:"bytes,3,opt,name=server_address" json:"server_address,omitempty"`
+ ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"`
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateSocketReply) Reset() { *m = CreateSocketReply{} }
+func (m *CreateSocketReply) String() string { return proto.CompactTextString(m) }
+func (*CreateSocketReply) ProtoMessage() {}
+
+var extRange_CreateSocketReply = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*CreateSocketReply) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_CreateSocketReply
+}
+func (m *CreateSocketReply) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+func (m *CreateSocketReply) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *CreateSocketReply) GetServerAddress() *AddressPort {
+ if m != nil {
+ return m.ServerAddress
+ }
+ return nil
+}
+
+func (m *CreateSocketReply) GetProxyExternalIp() *AddressPort {
+ if m != nil {
+ return m.ProxyExternalIp
+ }
+ return nil
+}
+
+type BindRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ ProxyExternalIp *AddressPort `protobuf:"bytes,2,req,name=proxy_external_ip" json:"proxy_external_ip,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BindRequest) Reset() { *m = BindRequest{} }
+func (m *BindRequest) String() string { return proto.CompactTextString(m) }
+func (*BindRequest) ProtoMessage() {}
+
+func (m *BindRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *BindRequest) GetProxyExternalIp() *AddressPort {
+ if m != nil {
+ return m.ProxyExternalIp
+ }
+ return nil
+}
+
+type BindReply struct {
+ ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BindReply) Reset() { *m = BindReply{} }
+func (m *BindReply) String() string { return proto.CompactTextString(m) }
+func (*BindReply) ProtoMessage() {}
+
+func (m *BindReply) GetProxyExternalIp() *AddressPort {
+ if m != nil {
+ return m.ProxyExternalIp
+ }
+ return nil
+}
+
+type GetSocketNameRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetSocketNameRequest) Reset() { *m = GetSocketNameRequest{} }
+func (m *GetSocketNameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetSocketNameRequest) ProtoMessage() {}
+
+func (m *GetSocketNameRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+type GetSocketNameReply struct {
+ ProxyExternalIp *AddressPort `protobuf:"bytes,2,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetSocketNameReply) Reset() { *m = GetSocketNameReply{} }
+func (m *GetSocketNameReply) String() string { return proto.CompactTextString(m) }
+func (*GetSocketNameReply) ProtoMessage() {}
+
+func (m *GetSocketNameReply) GetProxyExternalIp() *AddressPort {
+ if m != nil {
+ return m.ProxyExternalIp
+ }
+ return nil
+}
+
+type GetPeerNameRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetPeerNameRequest) Reset() { *m = GetPeerNameRequest{} }
+func (m *GetPeerNameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetPeerNameRequest) ProtoMessage() {}
+
+func (m *GetPeerNameRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+type GetPeerNameReply struct {
+ PeerIp *AddressPort `protobuf:"bytes,2,opt,name=peer_ip" json:"peer_ip,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetPeerNameReply) Reset() { *m = GetPeerNameReply{} }
+func (m *GetPeerNameReply) String() string { return proto.CompactTextString(m) }
+func (*GetPeerNameReply) ProtoMessage() {}
+
+func (m *GetPeerNameReply) GetPeerIp() *AddressPort {
+ if m != nil {
+ return m.PeerIp
+ }
+ return nil
+}
+
+type SocketOption struct {
+ Level *SocketOption_SocketOptionLevel `protobuf:"varint,1,req,name=level,enum=appengine.SocketOption_SocketOptionLevel" json:"level,omitempty"`
+ Option *SocketOption_SocketOptionName `protobuf:"varint,2,req,name=option,enum=appengine.SocketOption_SocketOptionName" json:"option,omitempty"`
+ Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SocketOption) Reset() { *m = SocketOption{} }
+func (m *SocketOption) String() string { return proto.CompactTextString(m) }
+func (*SocketOption) ProtoMessage() {}
+
+func (m *SocketOption) GetLevel() SocketOption_SocketOptionLevel {
+ if m != nil && m.Level != nil {
+ return *m.Level
+ }
+ return SocketOption_SOCKET_SOL_IP
+}
+
+func (m *SocketOption) GetOption() SocketOption_SocketOptionName {
+ if m != nil && m.Option != nil {
+ return *m.Option
+ }
+ return SocketOption_SOCKET_SO_DEBUG
+}
+
+func (m *SocketOption) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type SetSocketOptionsRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SetSocketOptionsRequest) Reset() { *m = SetSocketOptionsRequest{} }
+func (m *SetSocketOptionsRequest) String() string { return proto.CompactTextString(m) }
+func (*SetSocketOptionsRequest) ProtoMessage() {}
+
+func (m *SetSocketOptionsRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *SetSocketOptionsRequest) GetOptions() []*SocketOption {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+type SetSocketOptionsReply struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SetSocketOptionsReply) Reset() { *m = SetSocketOptionsReply{} }
+func (m *SetSocketOptionsReply) String() string { return proto.CompactTextString(m) }
+func (*SetSocketOptionsReply) ProtoMessage() {}
+
+type GetSocketOptionsRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetSocketOptionsRequest) Reset() { *m = GetSocketOptionsRequest{} }
+func (m *GetSocketOptionsRequest) String() string { return proto.CompactTextString(m) }
+func (*GetSocketOptionsRequest) ProtoMessage() {}
+
+func (m *GetSocketOptionsRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *GetSocketOptionsRequest) GetOptions() []*SocketOption {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+type GetSocketOptionsReply struct {
+ Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetSocketOptionsReply) Reset() { *m = GetSocketOptionsReply{} }
+func (m *GetSocketOptionsReply) String() string { return proto.CompactTextString(m) }
+func (*GetSocketOptionsReply) ProtoMessage() {}
+
+func (m *GetSocketOptionsReply) GetOptions() []*SocketOption {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+type ConnectRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ RemoteIp *AddressPort `protobuf:"bytes,2,req,name=remote_ip" json:"remote_ip,omitempty"`
+ TimeoutSeconds *float64 `protobuf:"fixed64,3,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ConnectRequest) Reset() { *m = ConnectRequest{} }
+func (m *ConnectRequest) String() string { return proto.CompactTextString(m) }
+func (*ConnectRequest) ProtoMessage() {}
+
+const Default_ConnectRequest_TimeoutSeconds float64 = -1
+
+func (m *ConnectRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *ConnectRequest) GetRemoteIp() *AddressPort {
+ if m != nil {
+ return m.RemoteIp
+ }
+ return nil
+}
+
+func (m *ConnectRequest) GetTimeoutSeconds() float64 {
+ if m != nil && m.TimeoutSeconds != nil {
+ return *m.TimeoutSeconds
+ }
+ return Default_ConnectRequest_TimeoutSeconds
+}
+
+type ConnectReply struct {
+ ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"`
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ConnectReply) Reset() { *m = ConnectReply{} }
+func (m *ConnectReply) String() string { return proto.CompactTextString(m) }
+func (*ConnectReply) ProtoMessage() {}
+
+var extRange_ConnectReply = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*ConnectReply) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_ConnectReply
+}
+func (m *ConnectReply) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+func (m *ConnectReply) GetProxyExternalIp() *AddressPort {
+ if m != nil {
+ return m.ProxyExternalIp
+ }
+ return nil
+}
+
+type ListenRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ Backlog *int32 `protobuf:"varint,2,req,name=backlog" json:"backlog,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListenRequest) Reset() { *m = ListenRequest{} }
+func (m *ListenRequest) String() string { return proto.CompactTextString(m) }
+func (*ListenRequest) ProtoMessage() {}
+
+func (m *ListenRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *ListenRequest) GetBacklog() int32 {
+ if m != nil && m.Backlog != nil {
+ return *m.Backlog
+ }
+ return 0
+}
+
+type ListenReply struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListenReply) Reset() { *m = ListenReply{} }
+func (m *ListenReply) String() string { return proto.CompactTextString(m) }
+func (*ListenReply) ProtoMessage() {}
+
+type AcceptRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AcceptRequest) Reset() { *m = AcceptRequest{} }
+func (m *AcceptRequest) String() string { return proto.CompactTextString(m) }
+func (*AcceptRequest) ProtoMessage() {}
+
+const Default_AcceptRequest_TimeoutSeconds float64 = -1
+
+func (m *AcceptRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *AcceptRequest) GetTimeoutSeconds() float64 {
+ if m != nil && m.TimeoutSeconds != nil {
+ return *m.TimeoutSeconds
+ }
+ return Default_AcceptRequest_TimeoutSeconds
+}
+
+type AcceptReply struct {
+ NewSocketDescriptor []byte `protobuf:"bytes,2,opt,name=new_socket_descriptor" json:"new_socket_descriptor,omitempty"`
+ RemoteAddress *AddressPort `protobuf:"bytes,3,opt,name=remote_address" json:"remote_address,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AcceptReply) Reset() { *m = AcceptReply{} }
+func (m *AcceptReply) String() string { return proto.CompactTextString(m) }
+func (*AcceptReply) ProtoMessage() {}
+
+func (m *AcceptReply) GetNewSocketDescriptor() []byte {
+ if m != nil {
+ return m.NewSocketDescriptor
+ }
+ return nil
+}
+
+func (m *AcceptReply) GetRemoteAddress() *AddressPort {
+ if m != nil {
+ return m.RemoteAddress
+ }
+ return nil
+}
+
+type ShutDownRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ How *ShutDownRequest_How `protobuf:"varint,2,req,name=how,enum=appengine.ShutDownRequest_How" json:"how,omitempty"`
+ SendOffset *int64 `protobuf:"varint,3,req,name=send_offset" json:"send_offset,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ShutDownRequest) Reset() { *m = ShutDownRequest{} }
+func (m *ShutDownRequest) String() string { return proto.CompactTextString(m) }
+func (*ShutDownRequest) ProtoMessage() {}
+
+func (m *ShutDownRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *ShutDownRequest) GetHow() ShutDownRequest_How {
+ if m != nil && m.How != nil {
+ return *m.How
+ }
+ return ShutDownRequest_SOCKET_SHUT_RD
+}
+
+func (m *ShutDownRequest) GetSendOffset() int64 {
+ if m != nil && m.SendOffset != nil {
+ return *m.SendOffset
+ }
+ return 0
+}
+
+type ShutDownReply struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ShutDownReply) Reset() { *m = ShutDownReply{} }
+func (m *ShutDownReply) String() string { return proto.CompactTextString(m) }
+func (*ShutDownReply) ProtoMessage() {}
+
+type CloseRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ SendOffset *int64 `protobuf:"varint,2,opt,name=send_offset,def=-1" json:"send_offset,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CloseRequest) Reset() { *m = CloseRequest{} }
+func (m *CloseRequest) String() string { return proto.CompactTextString(m) }
+func (*CloseRequest) ProtoMessage() {}
+
+const Default_CloseRequest_SendOffset int64 = -1
+
+func (m *CloseRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *CloseRequest) GetSendOffset() int64 {
+ if m != nil && m.SendOffset != nil {
+ return *m.SendOffset
+ }
+ return Default_CloseRequest_SendOffset
+}
+
+type CloseReply struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CloseReply) Reset() { *m = CloseReply{} }
+func (m *CloseReply) String() string { return proto.CompactTextString(m) }
+func (*CloseReply) ProtoMessage() {}
+
+type SendRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ Data []byte `protobuf:"bytes,2,req,name=data" json:"data,omitempty"`
+ StreamOffset *int64 `protobuf:"varint,3,req,name=stream_offset" json:"stream_offset,omitempty"`
+ Flags *int32 `protobuf:"varint,4,opt,name=flags,def=0" json:"flags,omitempty"`
+ SendTo *AddressPort `protobuf:"bytes,5,opt,name=send_to" json:"send_to,omitempty"`
+ TimeoutSeconds *float64 `protobuf:"fixed64,6,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SendRequest) Reset() { *m = SendRequest{} }
+func (m *SendRequest) String() string { return proto.CompactTextString(m) }
+func (*SendRequest) ProtoMessage() {}
+
+const Default_SendRequest_Flags int32 = 0
+const Default_SendRequest_TimeoutSeconds float64 = -1
+
+func (m *SendRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *SendRequest) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (m *SendRequest) GetStreamOffset() int64 {
+ if m != nil && m.StreamOffset != nil {
+ return *m.StreamOffset
+ }
+ return 0
+}
+
+func (m *SendRequest) GetFlags() int32 {
+ if m != nil && m.Flags != nil {
+ return *m.Flags
+ }
+ return Default_SendRequest_Flags
+}
+
+func (m *SendRequest) GetSendTo() *AddressPort {
+ if m != nil {
+ return m.SendTo
+ }
+ return nil
+}
+
+func (m *SendRequest) GetTimeoutSeconds() float64 {
+ if m != nil && m.TimeoutSeconds != nil {
+ return *m.TimeoutSeconds
+ }
+ return Default_SendRequest_TimeoutSeconds
+}
+
+type SendReply struct {
+ DataSent *int32 `protobuf:"varint,1,opt,name=data_sent" json:"data_sent,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SendReply) Reset() { *m = SendReply{} }
+func (m *SendReply) String() string { return proto.CompactTextString(m) }
+func (*SendReply) ProtoMessage() {}
+
+func (m *SendReply) GetDataSent() int32 {
+ if m != nil && m.DataSent != nil {
+ return *m.DataSent
+ }
+ return 0
+}
+
+type ReceiveRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ DataSize *int32 `protobuf:"varint,2,req,name=data_size" json:"data_size,omitempty"`
+ Flags *int32 `protobuf:"varint,3,opt,name=flags,def=0" json:"flags,omitempty"`
+ TimeoutSeconds *float64 `protobuf:"fixed64,5,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ReceiveRequest) Reset() { *m = ReceiveRequest{} }
+func (m *ReceiveRequest) String() string { return proto.CompactTextString(m) }
+func (*ReceiveRequest) ProtoMessage() {}
+
+const Default_ReceiveRequest_Flags int32 = 0
+const Default_ReceiveRequest_TimeoutSeconds float64 = -1
+
+func (m *ReceiveRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *ReceiveRequest) GetDataSize() int32 {
+ if m != nil && m.DataSize != nil {
+ return *m.DataSize
+ }
+ return 0
+}
+
+func (m *ReceiveRequest) GetFlags() int32 {
+ if m != nil && m.Flags != nil {
+ return *m.Flags
+ }
+ return Default_ReceiveRequest_Flags
+}
+
+func (m *ReceiveRequest) GetTimeoutSeconds() float64 {
+ if m != nil && m.TimeoutSeconds != nil {
+ return *m.TimeoutSeconds
+ }
+ return Default_ReceiveRequest_TimeoutSeconds
+}
+
+type ReceiveReply struct {
+ StreamOffset *int64 `protobuf:"varint,2,opt,name=stream_offset" json:"stream_offset,omitempty"`
+ Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"`
+ ReceivedFrom *AddressPort `protobuf:"bytes,4,opt,name=received_from" json:"received_from,omitempty"`
+ BufferSize *int32 `protobuf:"varint,5,opt,name=buffer_size" json:"buffer_size,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ReceiveReply) Reset() { *m = ReceiveReply{} }
+func (m *ReceiveReply) String() string { return proto.CompactTextString(m) }
+func (*ReceiveReply) ProtoMessage() {}
+
+func (m *ReceiveReply) GetStreamOffset() int64 {
+ if m != nil && m.StreamOffset != nil {
+ return *m.StreamOffset
+ }
+ return 0
+}
+
+func (m *ReceiveReply) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (m *ReceiveReply) GetReceivedFrom() *AddressPort {
+ if m != nil {
+ return m.ReceivedFrom
+ }
+ return nil
+}
+
+func (m *ReceiveReply) GetBufferSize() int32 {
+ if m != nil && m.BufferSize != nil {
+ return *m.BufferSize
+ }
+ return 0
+}
+
+type PollEvent struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ RequestedEvents *int32 `protobuf:"varint,2,req,name=requested_events" json:"requested_events,omitempty"`
+ ObservedEvents *int32 `protobuf:"varint,3,req,name=observed_events" json:"observed_events,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PollEvent) Reset() { *m = PollEvent{} }
+func (m *PollEvent) String() string { return proto.CompactTextString(m) }
+func (*PollEvent) ProtoMessage() {}
+
+func (m *PollEvent) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *PollEvent) GetRequestedEvents() int32 {
+ if m != nil && m.RequestedEvents != nil {
+ return *m.RequestedEvents
+ }
+ return 0
+}
+
+func (m *PollEvent) GetObservedEvents() int32 {
+ if m != nil && m.ObservedEvents != nil {
+ return *m.ObservedEvents
+ }
+ return 0
+}
+
+type PollRequest struct {
+ Events []*PollEvent `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"`
+ TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PollRequest) Reset() { *m = PollRequest{} }
+func (m *PollRequest) String() string { return proto.CompactTextString(m) }
+func (*PollRequest) ProtoMessage() {}
+
+const Default_PollRequest_TimeoutSeconds float64 = -1
+
+func (m *PollRequest) GetEvents() []*PollEvent {
+ if m != nil {
+ return m.Events
+ }
+ return nil
+}
+
+func (m *PollRequest) GetTimeoutSeconds() float64 {
+ if m != nil && m.TimeoutSeconds != nil {
+ return *m.TimeoutSeconds
+ }
+ return Default_PollRequest_TimeoutSeconds
+}
+
+type PollReply struct {
+ Events []*PollEvent `protobuf:"bytes,2,rep,name=events" json:"events,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PollReply) Reset() { *m = PollReply{} }
+func (m *PollReply) String() string { return proto.CompactTextString(m) }
+func (*PollReply) ProtoMessage() {}
+
+func (m *PollReply) GetEvents() []*PollEvent {
+ if m != nil {
+ return m.Events
+ }
+ return nil
+}
+
+type ResolveRequest struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ AddressFamilies []CreateSocketRequest_SocketFamily `protobuf:"varint,2,rep,name=address_families,enum=appengine.CreateSocketRequest_SocketFamily" json:"address_families,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ResolveRequest) Reset() { *m = ResolveRequest{} }
+func (m *ResolveRequest) String() string { return proto.CompactTextString(m) }
+func (*ResolveRequest) ProtoMessage() {}
+
+func (m *ResolveRequest) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *ResolveRequest) GetAddressFamilies() []CreateSocketRequest_SocketFamily {
+ if m != nil {
+ return m.AddressFamilies
+ }
+ return nil
+}
+
+type ResolveReply struct {
+ PackedAddress [][]byte `protobuf:"bytes,2,rep,name=packed_address" json:"packed_address,omitempty"`
+ CanonicalName *string `protobuf:"bytes,3,opt,name=canonical_name" json:"canonical_name,omitempty"`
+ Aliases []string `protobuf:"bytes,4,rep,name=aliases" json:"aliases,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ResolveReply) Reset() { *m = ResolveReply{} }
+func (m *ResolveReply) String() string { return proto.CompactTextString(m) }
+func (*ResolveReply) ProtoMessage() {}
+
+func (m *ResolveReply) GetPackedAddress() [][]byte {
+ if m != nil {
+ return m.PackedAddress
+ }
+ return nil
+}
+
+func (m *ResolveReply) GetCanonicalName() string {
+ if m != nil && m.CanonicalName != nil {
+ return *m.CanonicalName
+ }
+ return ""
+}
+
+func (m *ResolveReply) GetAliases() []string {
+ if m != nil {
+ return m.Aliases
+ }
+ return nil
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.proto b/vendor/google.golang.org/appengine/internal/socket/socket_service.proto
new file mode 100644
index 0000000..2fcc795
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/socket/socket_service.proto
@@ -0,0 +1,460 @@
+syntax = "proto2";
+option go_package = "socket";
+
+package appengine;
+
+message RemoteSocketServiceError {
+ enum ErrorCode {
+ SYSTEM_ERROR = 1;
+ GAI_ERROR = 2;
+ FAILURE = 4;
+ PERMISSION_DENIED = 5;
+ INVALID_REQUEST = 6;
+ SOCKET_CLOSED = 7;
+ }
+
+ enum SystemError {
+ option allow_alias = true;
+
+ SYS_SUCCESS = 0;
+ SYS_EPERM = 1;
+ SYS_ENOENT = 2;
+ SYS_ESRCH = 3;
+ SYS_EINTR = 4;
+ SYS_EIO = 5;
+ SYS_ENXIO = 6;
+ SYS_E2BIG = 7;
+ SYS_ENOEXEC = 8;
+ SYS_EBADF = 9;
+ SYS_ECHILD = 10;
+ SYS_EAGAIN = 11;
+ SYS_EWOULDBLOCK = 11;
+ SYS_ENOMEM = 12;
+ SYS_EACCES = 13;
+ SYS_EFAULT = 14;
+ SYS_ENOTBLK = 15;
+ SYS_EBUSY = 16;
+ SYS_EEXIST = 17;
+ SYS_EXDEV = 18;
+ SYS_ENODEV = 19;
+ SYS_ENOTDIR = 20;
+ SYS_EISDIR = 21;
+ SYS_EINVAL = 22;
+ SYS_ENFILE = 23;
+ SYS_EMFILE = 24;
+ SYS_ENOTTY = 25;
+ SYS_ETXTBSY = 26;
+ SYS_EFBIG = 27;
+ SYS_ENOSPC = 28;
+ SYS_ESPIPE = 29;
+ SYS_EROFS = 30;
+ SYS_EMLINK = 31;
+ SYS_EPIPE = 32;
+ SYS_EDOM = 33;
+ SYS_ERANGE = 34;
+ SYS_EDEADLK = 35;
+ SYS_EDEADLOCK = 35;
+ SYS_ENAMETOOLONG = 36;
+ SYS_ENOLCK = 37;
+ SYS_ENOSYS = 38;
+ SYS_ENOTEMPTY = 39;
+ SYS_ELOOP = 40;
+ SYS_ENOMSG = 42;
+ SYS_EIDRM = 43;
+ SYS_ECHRNG = 44;
+ SYS_EL2NSYNC = 45;
+ SYS_EL3HLT = 46;
+ SYS_EL3RST = 47;
+ SYS_ELNRNG = 48;
+ SYS_EUNATCH = 49;
+ SYS_ENOCSI = 50;
+ SYS_EL2HLT = 51;
+ SYS_EBADE = 52;
+ SYS_EBADR = 53;
+ SYS_EXFULL = 54;
+ SYS_ENOANO = 55;
+ SYS_EBADRQC = 56;
+ SYS_EBADSLT = 57;
+ SYS_EBFONT = 59;
+ SYS_ENOSTR = 60;
+ SYS_ENODATA = 61;
+ SYS_ETIME = 62;
+ SYS_ENOSR = 63;
+ SYS_ENONET = 64;
+ SYS_ENOPKG = 65;
+ SYS_EREMOTE = 66;
+ SYS_ENOLINK = 67;
+ SYS_EADV = 68;
+ SYS_ESRMNT = 69;
+ SYS_ECOMM = 70;
+ SYS_EPROTO = 71;
+ SYS_EMULTIHOP = 72;
+ SYS_EDOTDOT = 73;
+ SYS_EBADMSG = 74;
+ SYS_EOVERFLOW = 75;
+ SYS_ENOTUNIQ = 76;
+ SYS_EBADFD = 77;
+ SYS_EREMCHG = 78;
+ SYS_ELIBACC = 79;
+ SYS_ELIBBAD = 80;
+ SYS_ELIBSCN = 81;
+ SYS_ELIBMAX = 82;
+ SYS_ELIBEXEC = 83;
+ SYS_EILSEQ = 84;
+ SYS_ERESTART = 85;
+ SYS_ESTRPIPE = 86;
+ SYS_EUSERS = 87;
+ SYS_ENOTSOCK = 88;
+ SYS_EDESTADDRREQ = 89;
+ SYS_EMSGSIZE = 90;
+ SYS_EPROTOTYPE = 91;
+ SYS_ENOPROTOOPT = 92;
+ SYS_EPROTONOSUPPORT = 93;
+ SYS_ESOCKTNOSUPPORT = 94;
+ SYS_EOPNOTSUPP = 95;
+ SYS_ENOTSUP = 95;
+ SYS_EPFNOSUPPORT = 96;
+ SYS_EAFNOSUPPORT = 97;
+ SYS_EADDRINUSE = 98;
+ SYS_EADDRNOTAVAIL = 99;
+ SYS_ENETDOWN = 100;
+ SYS_ENETUNREACH = 101;
+ SYS_ENETRESET = 102;
+ SYS_ECONNABORTED = 103;
+ SYS_ECONNRESET = 104;
+ SYS_ENOBUFS = 105;
+ SYS_EISCONN = 106;
+ SYS_ENOTCONN = 107;
+ SYS_ESHUTDOWN = 108;
+ SYS_ETOOMANYREFS = 109;
+ SYS_ETIMEDOUT = 110;
+ SYS_ECONNREFUSED = 111;
+ SYS_EHOSTDOWN = 112;
+ SYS_EHOSTUNREACH = 113;
+ SYS_EALREADY = 114;
+ SYS_EINPROGRESS = 115;
+ SYS_ESTALE = 116;
+ SYS_EUCLEAN = 117;
+ SYS_ENOTNAM = 118;
+ SYS_ENAVAIL = 119;
+ SYS_EISNAM = 120;
+ SYS_EREMOTEIO = 121;
+ SYS_EDQUOT = 122;
+ SYS_ENOMEDIUM = 123;
+ SYS_EMEDIUMTYPE = 124;
+ SYS_ECANCELED = 125;
+ SYS_ENOKEY = 126;
+ SYS_EKEYEXPIRED = 127;
+ SYS_EKEYREVOKED = 128;
+ SYS_EKEYREJECTED = 129;
+ SYS_EOWNERDEAD = 130;
+ SYS_ENOTRECOVERABLE = 131;
+ SYS_ERFKILL = 132;
+ }
+
+ optional int32 system_error = 1 [default=0];
+ optional string error_detail = 2;
+}
+
+message AddressPort {
+ required int32 port = 1;
+ optional bytes packed_address = 2;
+
+ optional string hostname_hint = 3;
+}
+
+
+
+message CreateSocketRequest {
+ enum SocketFamily {
+ IPv4 = 1;
+ IPv6 = 2;
+ }
+
+ enum SocketProtocol {
+ TCP = 1;
+ UDP = 2;
+ }
+
+ required SocketFamily family = 1;
+ required SocketProtocol protocol = 2;
+
+ repeated SocketOption socket_options = 3;
+
+ optional AddressPort proxy_external_ip = 4;
+
+ optional int32 listen_backlog = 5 [default=0];
+
+ optional AddressPort remote_ip = 6;
+
+ optional string app_id = 9;
+
+ optional int64 project_id = 10;
+}
+
+message CreateSocketReply {
+ optional string socket_descriptor = 1;
+
+ optional AddressPort server_address = 3;
+
+ optional AddressPort proxy_external_ip = 4;
+
+ extensions 1000 to max;
+}
+
+
+
+message BindRequest {
+ required string socket_descriptor = 1;
+ required AddressPort proxy_external_ip = 2;
+}
+
+message BindReply {
+ optional AddressPort proxy_external_ip = 1;
+}
+
+
+
+message GetSocketNameRequest {
+ required string socket_descriptor = 1;
+}
+
+message GetSocketNameReply {
+ optional AddressPort proxy_external_ip = 2;
+}
+
+
+
+message GetPeerNameRequest {
+ required string socket_descriptor = 1;
+}
+
+message GetPeerNameReply {
+ optional AddressPort peer_ip = 2;
+}
+
+
+message SocketOption {
+
+ enum SocketOptionLevel {
+ SOCKET_SOL_IP = 0;
+ SOCKET_SOL_SOCKET = 1;
+ SOCKET_SOL_TCP = 6;
+ SOCKET_SOL_UDP = 17;
+ }
+
+ enum SocketOptionName {
+ option allow_alias = true;
+
+ SOCKET_SO_DEBUG = 1;
+ SOCKET_SO_REUSEADDR = 2;
+ SOCKET_SO_TYPE = 3;
+ SOCKET_SO_ERROR = 4;
+ SOCKET_SO_DONTROUTE = 5;
+ SOCKET_SO_BROADCAST = 6;
+ SOCKET_SO_SNDBUF = 7;
+ SOCKET_SO_RCVBUF = 8;
+ SOCKET_SO_KEEPALIVE = 9;
+ SOCKET_SO_OOBINLINE = 10;
+ SOCKET_SO_LINGER = 13;
+ SOCKET_SO_RCVTIMEO = 20;
+ SOCKET_SO_SNDTIMEO = 21;
+
+ SOCKET_IP_TOS = 1;
+ SOCKET_IP_TTL = 2;
+ SOCKET_IP_HDRINCL = 3;
+ SOCKET_IP_OPTIONS = 4;
+
+ SOCKET_TCP_NODELAY = 1;
+ SOCKET_TCP_MAXSEG = 2;
+ SOCKET_TCP_CORK = 3;
+ SOCKET_TCP_KEEPIDLE = 4;
+ SOCKET_TCP_KEEPINTVL = 5;
+ SOCKET_TCP_KEEPCNT = 6;
+ SOCKET_TCP_SYNCNT = 7;
+ SOCKET_TCP_LINGER2 = 8;
+ SOCKET_TCP_DEFER_ACCEPT = 9;
+ SOCKET_TCP_WINDOW_CLAMP = 10;
+ SOCKET_TCP_INFO = 11;
+ SOCKET_TCP_QUICKACK = 12;
+ }
+
+ required SocketOptionLevel level = 1;
+ required SocketOptionName option = 2;
+ required bytes value = 3;
+}
+
+
+message SetSocketOptionsRequest {
+ required string socket_descriptor = 1;
+ repeated SocketOption options = 2;
+}
+
+message SetSocketOptionsReply {
+}
+
+message GetSocketOptionsRequest {
+ required string socket_descriptor = 1;
+ repeated SocketOption options = 2;
+}
+
+message GetSocketOptionsReply {
+ repeated SocketOption options = 2;
+}
+
+
+message ConnectRequest {
+ required string socket_descriptor = 1;
+ required AddressPort remote_ip = 2;
+ optional double timeout_seconds = 3 [default=-1];
+}
+
+message ConnectReply {
+ optional AddressPort proxy_external_ip = 1;
+
+ extensions 1000 to max;
+}
+
+
+message ListenRequest {
+ required string socket_descriptor = 1;
+ required int32 backlog = 2;
+}
+
+message ListenReply {
+}
+
+
+message AcceptRequest {
+ required string socket_descriptor = 1;
+ optional double timeout_seconds = 2 [default=-1];
+}
+
+message AcceptReply {
+ optional bytes new_socket_descriptor = 2;
+ optional AddressPort remote_address = 3;
+}
+
+
+
+message ShutDownRequest {
+ enum How {
+ SOCKET_SHUT_RD = 1;
+ SOCKET_SHUT_WR = 2;
+ SOCKET_SHUT_RDWR = 3;
+ }
+ required string socket_descriptor = 1;
+ required How how = 2;
+ required int64 send_offset = 3;
+}
+
+message ShutDownReply {
+}
+
+
+
+message CloseRequest {
+ required string socket_descriptor = 1;
+ optional int64 send_offset = 2 [default=-1];
+}
+
+message CloseReply {
+}
+
+
+
+message SendRequest {
+ required string socket_descriptor = 1;
+ required bytes data = 2 [ctype=CORD];
+ required int64 stream_offset = 3;
+ optional int32 flags = 4 [default=0];
+ optional AddressPort send_to = 5;
+ optional double timeout_seconds = 6 [default=-1];
+}
+
+message SendReply {
+ optional int32 data_sent = 1;
+}
+
+
+message ReceiveRequest {
+ enum Flags {
+ MSG_OOB = 1;
+ MSG_PEEK = 2;
+ }
+ required string socket_descriptor = 1;
+ required int32 data_size = 2;
+ optional int32 flags = 3 [default=0];
+ optional double timeout_seconds = 5 [default=-1];
+}
+
+message ReceiveReply {
+ optional int64 stream_offset = 2;
+ optional bytes data = 3 [ctype=CORD];
+ optional AddressPort received_from = 4;
+ optional int32 buffer_size = 5;
+}
+
+
+
+message PollEvent {
+
+ enum PollEventFlag {
+ SOCKET_POLLNONE = 0;
+ SOCKET_POLLIN = 1;
+ SOCKET_POLLPRI = 2;
+ SOCKET_POLLOUT = 4;
+ SOCKET_POLLERR = 8;
+ SOCKET_POLLHUP = 16;
+ SOCKET_POLLNVAL = 32;
+ SOCKET_POLLRDNORM = 64;
+ SOCKET_POLLRDBAND = 128;
+ SOCKET_POLLWRNORM = 256;
+ SOCKET_POLLWRBAND = 512;
+ SOCKET_POLLMSG = 1024;
+ SOCKET_POLLREMOVE = 4096;
+ SOCKET_POLLRDHUP = 8192;
+ };
+
+ required string socket_descriptor = 1;
+ required int32 requested_events = 2;
+ required int32 observed_events = 3;
+}
+
+message PollRequest {
+ repeated PollEvent events = 1;
+ optional double timeout_seconds = 2 [default=-1];
+}
+
+message PollReply {
+ repeated PollEvent events = 2;
+}
+
+message ResolveRequest {
+ required string name = 1;
+ repeated CreateSocketRequest.SocketFamily address_families = 2;
+}
+
+message ResolveReply {
+ enum ErrorCode {
+ SOCKET_EAI_ADDRFAMILY = 1;
+ SOCKET_EAI_AGAIN = 2;
+ SOCKET_EAI_BADFLAGS = 3;
+ SOCKET_EAI_FAIL = 4;
+ SOCKET_EAI_FAMILY = 5;
+ SOCKET_EAI_MEMORY = 6;
+ SOCKET_EAI_NODATA = 7;
+ SOCKET_EAI_NONAME = 8;
+ SOCKET_EAI_SERVICE = 9;
+ SOCKET_EAI_SOCKTYPE = 10;
+ SOCKET_EAI_SYSTEM = 11;
+ SOCKET_EAI_BADHINTS = 12;
+ SOCKET_EAI_PROTOCOL = 13;
+ SOCKET_EAI_OVERFLOW = 14;
+ SOCKET_EAI_MAX = 15;
+ };
+
+ repeated bytes packed_address = 2;
+ optional string canonical_name = 3;
+ repeated string aliases = 4;
+}
diff --git a/vendor/google.golang.org/appengine/internal/system/system_service.pb.go b/vendor/google.golang.org/appengine/internal/system/system_service.pb.go
new file mode 100644
index 0000000..56cc3f8
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/system/system_service.pb.go
@@ -0,0 +1,198 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/system/system_service.proto
+// DO NOT EDIT!
+
+/*
+Package system is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/system/system_service.proto
+
+It has these top-level messages:
+ SystemServiceError
+ SystemStat
+ GetSystemStatsRequest
+ GetSystemStatsResponse
+ StartBackgroundRequestRequest
+ StartBackgroundRequestResponse
+*/
+package system
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type SystemServiceError_ErrorCode int32
+
+const (
+ SystemServiceError_OK SystemServiceError_ErrorCode = 0
+ SystemServiceError_INTERNAL_ERROR SystemServiceError_ErrorCode = 1
+ SystemServiceError_BACKEND_REQUIRED SystemServiceError_ErrorCode = 2
+ SystemServiceError_LIMIT_REACHED SystemServiceError_ErrorCode = 3
+)
+
+var SystemServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INTERNAL_ERROR",
+ 2: "BACKEND_REQUIRED",
+ 3: "LIMIT_REACHED",
+}
+var SystemServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INTERNAL_ERROR": 1,
+ "BACKEND_REQUIRED": 2,
+ "LIMIT_REACHED": 3,
+}
+
+func (x SystemServiceError_ErrorCode) Enum() *SystemServiceError_ErrorCode {
+ p := new(SystemServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x SystemServiceError_ErrorCode) String() string {
+ return proto.EnumName(SystemServiceError_ErrorCode_name, int32(x))
+}
+func (x *SystemServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(SystemServiceError_ErrorCode_value, data, "SystemServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = SystemServiceError_ErrorCode(value)
+ return nil
+}
+
+type SystemServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SystemServiceError) Reset() { *m = SystemServiceError{} }
+func (m *SystemServiceError) String() string { return proto.CompactTextString(m) }
+func (*SystemServiceError) ProtoMessage() {}
+
+type SystemStat struct {
+ // Instaneous value of this stat.
+ Current *float64 `protobuf:"fixed64,1,opt,name=current" json:"current,omitempty"`
+ // Average over time, if this stat has an instaneous value.
+ Average1M *float64 `protobuf:"fixed64,3,opt,name=average1m" json:"average1m,omitempty"`
+ Average10M *float64 `protobuf:"fixed64,4,opt,name=average10m" json:"average10m,omitempty"`
+ // Total value, if the stat accumulates over time.
+ Total *float64 `protobuf:"fixed64,2,opt,name=total" json:"total,omitempty"`
+ // Rate over time, if this stat accumulates.
+ Rate1M *float64 `protobuf:"fixed64,5,opt,name=rate1m" json:"rate1m,omitempty"`
+ Rate10M *float64 `protobuf:"fixed64,6,opt,name=rate10m" json:"rate10m,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SystemStat) Reset() { *m = SystemStat{} }
+func (m *SystemStat) String() string { return proto.CompactTextString(m) }
+func (*SystemStat) ProtoMessage() {}
+
+func (m *SystemStat) GetCurrent() float64 {
+ if m != nil && m.Current != nil {
+ return *m.Current
+ }
+ return 0
+}
+
+func (m *SystemStat) GetAverage1M() float64 {
+ if m != nil && m.Average1M != nil {
+ return *m.Average1M
+ }
+ return 0
+}
+
+func (m *SystemStat) GetAverage10M() float64 {
+ if m != nil && m.Average10M != nil {
+ return *m.Average10M
+ }
+ return 0
+}
+
+func (m *SystemStat) GetTotal() float64 {
+ if m != nil && m.Total != nil {
+ return *m.Total
+ }
+ return 0
+}
+
+func (m *SystemStat) GetRate1M() float64 {
+ if m != nil && m.Rate1M != nil {
+ return *m.Rate1M
+ }
+ return 0
+}
+
+func (m *SystemStat) GetRate10M() float64 {
+ if m != nil && m.Rate10M != nil {
+ return *m.Rate10M
+ }
+ return 0
+}
+
+type GetSystemStatsRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetSystemStatsRequest) Reset() { *m = GetSystemStatsRequest{} }
+func (m *GetSystemStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*GetSystemStatsRequest) ProtoMessage() {}
+
+type GetSystemStatsResponse struct {
+ // CPU used by this instance, in mcycles.
+ Cpu *SystemStat `protobuf:"bytes,1,opt,name=cpu" json:"cpu,omitempty"`
+ // Physical memory (RAM) used by this instance, in megabytes.
+ Memory *SystemStat `protobuf:"bytes,2,opt,name=memory" json:"memory,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetSystemStatsResponse) Reset() { *m = GetSystemStatsResponse{} }
+func (m *GetSystemStatsResponse) String() string { return proto.CompactTextString(m) }
+func (*GetSystemStatsResponse) ProtoMessage() {}
+
+func (m *GetSystemStatsResponse) GetCpu() *SystemStat {
+ if m != nil {
+ return m.Cpu
+ }
+ return nil
+}
+
+func (m *GetSystemStatsResponse) GetMemory() *SystemStat {
+ if m != nil {
+ return m.Memory
+ }
+ return nil
+}
+
+type StartBackgroundRequestRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StartBackgroundRequestRequest) Reset() { *m = StartBackgroundRequestRequest{} }
+func (m *StartBackgroundRequestRequest) String() string { return proto.CompactTextString(m) }
+func (*StartBackgroundRequestRequest) ProtoMessage() {}
+
+type StartBackgroundRequestResponse struct {
+ // Every /_ah/background request will have an X-AppEngine-BackgroundRequest
+ // header, whose value will be equal to this parameter, the request_id.
+ RequestId *string `protobuf:"bytes,1,opt,name=request_id" json:"request_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StartBackgroundRequestResponse) Reset() { *m = StartBackgroundRequestResponse{} }
+func (m *StartBackgroundRequestResponse) String() string { return proto.CompactTextString(m) }
+func (*StartBackgroundRequestResponse) ProtoMessage() {}
+
+func (m *StartBackgroundRequestResponse) GetRequestId() string {
+ if m != nil && m.RequestId != nil {
+ return *m.RequestId
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/system/system_service.proto b/vendor/google.golang.org/appengine/internal/system/system_service.proto
new file mode 100644
index 0000000..32c0bf8
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/system/system_service.proto
@@ -0,0 +1,49 @@
+syntax = "proto2";
+option go_package = "system";
+
+package appengine;
+
+message SystemServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INTERNAL_ERROR = 1;
+ BACKEND_REQUIRED = 2;
+ LIMIT_REACHED = 3;
+ }
+}
+
+message SystemStat {
+ // Instaneous value of this stat.
+ optional double current = 1;
+
+ // Average over time, if this stat has an instaneous value.
+ optional double average1m = 3;
+ optional double average10m = 4;
+
+ // Total value, if the stat accumulates over time.
+ optional double total = 2;
+
+ // Rate over time, if this stat accumulates.
+ optional double rate1m = 5;
+ optional double rate10m = 6;
+}
+
+message GetSystemStatsRequest {
+}
+
+message GetSystemStatsResponse {
+ // CPU used by this instance, in mcycles.
+ optional SystemStat cpu = 1;
+
+ // Physical memory (RAM) used by this instance, in megabytes.
+ optional SystemStat memory = 2;
+}
+
+message StartBackgroundRequestRequest {
+}
+
+message StartBackgroundRequestResponse {
+ // Every /_ah/background request will have an X-AppEngine-BackgroundRequest
+ // header, whose value will be equal to this parameter, the request_id.
+ optional string request_id = 1;
+}
diff --git a/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go b/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go
new file mode 100644
index 0000000..c3d428e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go
@@ -0,0 +1,1888 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto
+// DO NOT EDIT!
+
+/*
+Package taskqueue is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto
+
+It has these top-level messages:
+ TaskQueueServiceError
+ TaskPayload
+ TaskQueueRetryParameters
+ TaskQueueAcl
+ TaskQueueHttpHeader
+ TaskQueueMode
+ TaskQueueAddRequest
+ TaskQueueAddResponse
+ TaskQueueBulkAddRequest
+ TaskQueueBulkAddResponse
+ TaskQueueDeleteRequest
+ TaskQueueDeleteResponse
+ TaskQueueForceRunRequest
+ TaskQueueForceRunResponse
+ TaskQueueUpdateQueueRequest
+ TaskQueueUpdateQueueResponse
+ TaskQueueFetchQueuesRequest
+ TaskQueueFetchQueuesResponse
+ TaskQueueFetchQueueStatsRequest
+ TaskQueueScannerQueueInfo
+ TaskQueueFetchQueueStatsResponse
+ TaskQueuePauseQueueRequest
+ TaskQueuePauseQueueResponse
+ TaskQueuePurgeQueueRequest
+ TaskQueuePurgeQueueResponse
+ TaskQueueDeleteQueueRequest
+ TaskQueueDeleteQueueResponse
+ TaskQueueDeleteGroupRequest
+ TaskQueueDeleteGroupResponse
+ TaskQueueQueryTasksRequest
+ TaskQueueQueryTasksResponse
+ TaskQueueFetchTaskRequest
+ TaskQueueFetchTaskResponse
+ TaskQueueUpdateStorageLimitRequest
+ TaskQueueUpdateStorageLimitResponse
+ TaskQueueQueryAndOwnTasksRequest
+ TaskQueueQueryAndOwnTasksResponse
+ TaskQueueModifyTaskLeaseRequest
+ TaskQueueModifyTaskLeaseResponse
+*/
+package taskqueue
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import appengine "google.golang.org/appengine/internal/datastore"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type TaskQueueServiceError_ErrorCode int32
+
+const (
+ TaskQueueServiceError_OK TaskQueueServiceError_ErrorCode = 0
+ TaskQueueServiceError_UNKNOWN_QUEUE TaskQueueServiceError_ErrorCode = 1
+ TaskQueueServiceError_TRANSIENT_ERROR TaskQueueServiceError_ErrorCode = 2
+ TaskQueueServiceError_INTERNAL_ERROR TaskQueueServiceError_ErrorCode = 3
+ TaskQueueServiceError_TASK_TOO_LARGE TaskQueueServiceError_ErrorCode = 4
+ TaskQueueServiceError_INVALID_TASK_NAME TaskQueueServiceError_ErrorCode = 5
+ TaskQueueServiceError_INVALID_QUEUE_NAME TaskQueueServiceError_ErrorCode = 6
+ TaskQueueServiceError_INVALID_URL TaskQueueServiceError_ErrorCode = 7
+ TaskQueueServiceError_INVALID_QUEUE_RATE TaskQueueServiceError_ErrorCode = 8
+ TaskQueueServiceError_PERMISSION_DENIED TaskQueueServiceError_ErrorCode = 9
+ TaskQueueServiceError_TASK_ALREADY_EXISTS TaskQueueServiceError_ErrorCode = 10
+ TaskQueueServiceError_TOMBSTONED_TASK TaskQueueServiceError_ErrorCode = 11
+ TaskQueueServiceError_INVALID_ETA TaskQueueServiceError_ErrorCode = 12
+ TaskQueueServiceError_INVALID_REQUEST TaskQueueServiceError_ErrorCode = 13
+ TaskQueueServiceError_UNKNOWN_TASK TaskQueueServiceError_ErrorCode = 14
+ TaskQueueServiceError_TOMBSTONED_QUEUE TaskQueueServiceError_ErrorCode = 15
+ TaskQueueServiceError_DUPLICATE_TASK_NAME TaskQueueServiceError_ErrorCode = 16
+ TaskQueueServiceError_SKIPPED TaskQueueServiceError_ErrorCode = 17
+ TaskQueueServiceError_TOO_MANY_TASKS TaskQueueServiceError_ErrorCode = 18
+ TaskQueueServiceError_INVALID_PAYLOAD TaskQueueServiceError_ErrorCode = 19
+ TaskQueueServiceError_INVALID_RETRY_PARAMETERS TaskQueueServiceError_ErrorCode = 20
+ TaskQueueServiceError_INVALID_QUEUE_MODE TaskQueueServiceError_ErrorCode = 21
+ TaskQueueServiceError_ACL_LOOKUP_ERROR TaskQueueServiceError_ErrorCode = 22
+ TaskQueueServiceError_TRANSACTIONAL_REQUEST_TOO_LARGE TaskQueueServiceError_ErrorCode = 23
+ TaskQueueServiceError_INCORRECT_CREATOR_NAME TaskQueueServiceError_ErrorCode = 24
+ TaskQueueServiceError_TASK_LEASE_EXPIRED TaskQueueServiceError_ErrorCode = 25
+ TaskQueueServiceError_QUEUE_PAUSED TaskQueueServiceError_ErrorCode = 26
+ TaskQueueServiceError_INVALID_TAG TaskQueueServiceError_ErrorCode = 27
+ // Reserved range for the Datastore error codes.
+ // Original Datastore error code is shifted by DATASTORE_ERROR offset.
+ TaskQueueServiceError_DATASTORE_ERROR TaskQueueServiceError_ErrorCode = 10000
+)
+
+var TaskQueueServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "UNKNOWN_QUEUE",
+ 2: "TRANSIENT_ERROR",
+ 3: "INTERNAL_ERROR",
+ 4: "TASK_TOO_LARGE",
+ 5: "INVALID_TASK_NAME",
+ 6: "INVALID_QUEUE_NAME",
+ 7: "INVALID_URL",
+ 8: "INVALID_QUEUE_RATE",
+ 9: "PERMISSION_DENIED",
+ 10: "TASK_ALREADY_EXISTS",
+ 11: "TOMBSTONED_TASK",
+ 12: "INVALID_ETA",
+ 13: "INVALID_REQUEST",
+ 14: "UNKNOWN_TASK",
+ 15: "TOMBSTONED_QUEUE",
+ 16: "DUPLICATE_TASK_NAME",
+ 17: "SKIPPED",
+ 18: "TOO_MANY_TASKS",
+ 19: "INVALID_PAYLOAD",
+ 20: "INVALID_RETRY_PARAMETERS",
+ 21: "INVALID_QUEUE_MODE",
+ 22: "ACL_LOOKUP_ERROR",
+ 23: "TRANSACTIONAL_REQUEST_TOO_LARGE",
+ 24: "INCORRECT_CREATOR_NAME",
+ 25: "TASK_LEASE_EXPIRED",
+ 26: "QUEUE_PAUSED",
+ 27: "INVALID_TAG",
+ 10000: "DATASTORE_ERROR",
+}
+var TaskQueueServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "UNKNOWN_QUEUE": 1,
+ "TRANSIENT_ERROR": 2,
+ "INTERNAL_ERROR": 3,
+ "TASK_TOO_LARGE": 4,
+ "INVALID_TASK_NAME": 5,
+ "INVALID_QUEUE_NAME": 6,
+ "INVALID_URL": 7,
+ "INVALID_QUEUE_RATE": 8,
+ "PERMISSION_DENIED": 9,
+ "TASK_ALREADY_EXISTS": 10,
+ "TOMBSTONED_TASK": 11,
+ "INVALID_ETA": 12,
+ "INVALID_REQUEST": 13,
+ "UNKNOWN_TASK": 14,
+ "TOMBSTONED_QUEUE": 15,
+ "DUPLICATE_TASK_NAME": 16,
+ "SKIPPED": 17,
+ "TOO_MANY_TASKS": 18,
+ "INVALID_PAYLOAD": 19,
+ "INVALID_RETRY_PARAMETERS": 20,
+ "INVALID_QUEUE_MODE": 21,
+ "ACL_LOOKUP_ERROR": 22,
+ "TRANSACTIONAL_REQUEST_TOO_LARGE": 23,
+ "INCORRECT_CREATOR_NAME": 24,
+ "TASK_LEASE_EXPIRED": 25,
+ "QUEUE_PAUSED": 26,
+ "INVALID_TAG": 27,
+ "DATASTORE_ERROR": 10000,
+}
+
+func (x TaskQueueServiceError_ErrorCode) Enum() *TaskQueueServiceError_ErrorCode {
+ p := new(TaskQueueServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x TaskQueueServiceError_ErrorCode) String() string {
+ return proto.EnumName(TaskQueueServiceError_ErrorCode_name, int32(x))
+}
+func (x *TaskQueueServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(TaskQueueServiceError_ErrorCode_value, data, "TaskQueueServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = TaskQueueServiceError_ErrorCode(value)
+ return nil
+}
+
+type TaskQueueMode_Mode int32
+
+const (
+ TaskQueueMode_PUSH TaskQueueMode_Mode = 0
+ TaskQueueMode_PULL TaskQueueMode_Mode = 1
+)
+
+var TaskQueueMode_Mode_name = map[int32]string{
+ 0: "PUSH",
+ 1: "PULL",
+}
+var TaskQueueMode_Mode_value = map[string]int32{
+ "PUSH": 0,
+ "PULL": 1,
+}
+
+func (x TaskQueueMode_Mode) Enum() *TaskQueueMode_Mode {
+ p := new(TaskQueueMode_Mode)
+ *p = x
+ return p
+}
+func (x TaskQueueMode_Mode) String() string {
+ return proto.EnumName(TaskQueueMode_Mode_name, int32(x))
+}
+func (x *TaskQueueMode_Mode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(TaskQueueMode_Mode_value, data, "TaskQueueMode_Mode")
+ if err != nil {
+ return err
+ }
+ *x = TaskQueueMode_Mode(value)
+ return nil
+}
+
+type TaskQueueAddRequest_RequestMethod int32
+
+const (
+ TaskQueueAddRequest_GET TaskQueueAddRequest_RequestMethod = 1
+ TaskQueueAddRequest_POST TaskQueueAddRequest_RequestMethod = 2
+ TaskQueueAddRequest_HEAD TaskQueueAddRequest_RequestMethod = 3
+ TaskQueueAddRequest_PUT TaskQueueAddRequest_RequestMethod = 4
+ TaskQueueAddRequest_DELETE TaskQueueAddRequest_RequestMethod = 5
+)
+
+var TaskQueueAddRequest_RequestMethod_name = map[int32]string{
+ 1: "GET",
+ 2: "POST",
+ 3: "HEAD",
+ 4: "PUT",
+ 5: "DELETE",
+}
+var TaskQueueAddRequest_RequestMethod_value = map[string]int32{
+ "GET": 1,
+ "POST": 2,
+ "HEAD": 3,
+ "PUT": 4,
+ "DELETE": 5,
+}
+
+func (x TaskQueueAddRequest_RequestMethod) Enum() *TaskQueueAddRequest_RequestMethod {
+ p := new(TaskQueueAddRequest_RequestMethod)
+ *p = x
+ return p
+}
+func (x TaskQueueAddRequest_RequestMethod) String() string {
+ return proto.EnumName(TaskQueueAddRequest_RequestMethod_name, int32(x))
+}
+func (x *TaskQueueAddRequest_RequestMethod) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(TaskQueueAddRequest_RequestMethod_value, data, "TaskQueueAddRequest_RequestMethod")
+ if err != nil {
+ return err
+ }
+ *x = TaskQueueAddRequest_RequestMethod(value)
+ return nil
+}
+
+type TaskQueueQueryTasksResponse_Task_RequestMethod int32
+
+const (
+ TaskQueueQueryTasksResponse_Task_GET TaskQueueQueryTasksResponse_Task_RequestMethod = 1
+ TaskQueueQueryTasksResponse_Task_POST TaskQueueQueryTasksResponse_Task_RequestMethod = 2
+ TaskQueueQueryTasksResponse_Task_HEAD TaskQueueQueryTasksResponse_Task_RequestMethod = 3
+ TaskQueueQueryTasksResponse_Task_PUT TaskQueueQueryTasksResponse_Task_RequestMethod = 4
+ TaskQueueQueryTasksResponse_Task_DELETE TaskQueueQueryTasksResponse_Task_RequestMethod = 5
+)
+
+var TaskQueueQueryTasksResponse_Task_RequestMethod_name = map[int32]string{
+ 1: "GET",
+ 2: "POST",
+ 3: "HEAD",
+ 4: "PUT",
+ 5: "DELETE",
+}
+var TaskQueueQueryTasksResponse_Task_RequestMethod_value = map[string]int32{
+ "GET": 1,
+ "POST": 2,
+ "HEAD": 3,
+ "PUT": 4,
+ "DELETE": 5,
+}
+
+func (x TaskQueueQueryTasksResponse_Task_RequestMethod) Enum() *TaskQueueQueryTasksResponse_Task_RequestMethod {
+ p := new(TaskQueueQueryTasksResponse_Task_RequestMethod)
+ *p = x
+ return p
+}
+func (x TaskQueueQueryTasksResponse_Task_RequestMethod) String() string {
+ return proto.EnumName(TaskQueueQueryTasksResponse_Task_RequestMethod_name, int32(x))
+}
+func (x *TaskQueueQueryTasksResponse_Task_RequestMethod) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(TaskQueueQueryTasksResponse_Task_RequestMethod_value, data, "TaskQueueQueryTasksResponse_Task_RequestMethod")
+ if err != nil {
+ return err
+ }
+ *x = TaskQueueQueryTasksResponse_Task_RequestMethod(value)
+ return nil
+}
+
+type TaskQueueServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueServiceError) Reset() { *m = TaskQueueServiceError{} }
+func (m *TaskQueueServiceError) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueServiceError) ProtoMessage() {}
+
+type TaskPayload struct {
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskPayload) Reset() { *m = TaskPayload{} }
+func (m *TaskPayload) String() string { return proto.CompactTextString(m) }
+func (*TaskPayload) ProtoMessage() {}
+
+func (m *TaskPayload) Marshal() ([]byte, error) {
+ return proto.MarshalMessageSet(m.ExtensionMap())
+}
+func (m *TaskPayload) Unmarshal(buf []byte) error {
+ return proto.UnmarshalMessageSet(buf, m.ExtensionMap())
+}
+func (m *TaskPayload) MarshalJSON() ([]byte, error) {
+ return proto.MarshalMessageSetJSON(m.XXX_extensions)
+}
+func (m *TaskPayload) UnmarshalJSON(buf []byte) error {
+ return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions)
+}
+
+// ensure TaskPayload satisfies proto.Marshaler and proto.Unmarshaler
+var _ proto.Marshaler = (*TaskPayload)(nil)
+var _ proto.Unmarshaler = (*TaskPayload)(nil)
+
+var extRange_TaskPayload = []proto.ExtensionRange{
+ {10, 2147483646},
+}
+
+func (*TaskPayload) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_TaskPayload
+}
+func (m *TaskPayload) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+type TaskQueueRetryParameters struct {
+ RetryLimit *int32 `protobuf:"varint,1,opt,name=retry_limit" json:"retry_limit,omitempty"`
+ AgeLimitSec *int64 `protobuf:"varint,2,opt,name=age_limit_sec" json:"age_limit_sec,omitempty"`
+ MinBackoffSec *float64 `protobuf:"fixed64,3,opt,name=min_backoff_sec,def=0.1" json:"min_backoff_sec,omitempty"`
+ MaxBackoffSec *float64 `protobuf:"fixed64,4,opt,name=max_backoff_sec,def=3600" json:"max_backoff_sec,omitempty"`
+ MaxDoublings *int32 `protobuf:"varint,5,opt,name=max_doublings,def=16" json:"max_doublings,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueRetryParameters) Reset() { *m = TaskQueueRetryParameters{} }
+func (m *TaskQueueRetryParameters) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueRetryParameters) ProtoMessage() {}
+
+const Default_TaskQueueRetryParameters_MinBackoffSec float64 = 0.1
+const Default_TaskQueueRetryParameters_MaxBackoffSec float64 = 3600
+const Default_TaskQueueRetryParameters_MaxDoublings int32 = 16
+
+func (m *TaskQueueRetryParameters) GetRetryLimit() int32 {
+ if m != nil && m.RetryLimit != nil {
+ return *m.RetryLimit
+ }
+ return 0
+}
+
+func (m *TaskQueueRetryParameters) GetAgeLimitSec() int64 {
+ if m != nil && m.AgeLimitSec != nil {
+ return *m.AgeLimitSec
+ }
+ return 0
+}
+
+func (m *TaskQueueRetryParameters) GetMinBackoffSec() float64 {
+ if m != nil && m.MinBackoffSec != nil {
+ return *m.MinBackoffSec
+ }
+ return Default_TaskQueueRetryParameters_MinBackoffSec
+}
+
+func (m *TaskQueueRetryParameters) GetMaxBackoffSec() float64 {
+ if m != nil && m.MaxBackoffSec != nil {
+ return *m.MaxBackoffSec
+ }
+ return Default_TaskQueueRetryParameters_MaxBackoffSec
+}
+
+func (m *TaskQueueRetryParameters) GetMaxDoublings() int32 {
+ if m != nil && m.MaxDoublings != nil {
+ return *m.MaxDoublings
+ }
+ return Default_TaskQueueRetryParameters_MaxDoublings
+}
+
+type TaskQueueAcl struct {
+ UserEmail [][]byte `protobuf:"bytes,1,rep,name=user_email" json:"user_email,omitempty"`
+ WriterEmail [][]byte `protobuf:"bytes,2,rep,name=writer_email" json:"writer_email,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueAcl) Reset() { *m = TaskQueueAcl{} }
+func (m *TaskQueueAcl) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueAcl) ProtoMessage() {}
+
+func (m *TaskQueueAcl) GetUserEmail() [][]byte {
+ if m != nil {
+ return m.UserEmail
+ }
+ return nil
+}
+
+func (m *TaskQueueAcl) GetWriterEmail() [][]byte {
+ if m != nil {
+ return m.WriterEmail
+ }
+ return nil
+}
+
+type TaskQueueHttpHeader struct {
+ Key []byte `protobuf:"bytes,1,req,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueHttpHeader) Reset() { *m = TaskQueueHttpHeader{} }
+func (m *TaskQueueHttpHeader) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueHttpHeader) ProtoMessage() {}
+
+func (m *TaskQueueHttpHeader) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *TaskQueueHttpHeader) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type TaskQueueMode struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueMode) Reset() { *m = TaskQueueMode{} }
+func (m *TaskQueueMode) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueMode) ProtoMessage() {}
+
+type TaskQueueAddRequest struct {
+ QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"`
+ TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"`
+ EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"`
+ Method *TaskQueueAddRequest_RequestMethod `protobuf:"varint,5,opt,name=method,enum=appengine.TaskQueueAddRequest_RequestMethod,def=2" json:"method,omitempty"`
+ Url []byte `protobuf:"bytes,4,opt,name=url" json:"url,omitempty"`
+ Header []*TaskQueueAddRequest_Header `protobuf:"group,6,rep,name=Header" json:"header,omitempty"`
+ Body []byte `protobuf:"bytes,9,opt,name=body" json:"body,omitempty"`
+ Transaction *appengine.Transaction `protobuf:"bytes,10,opt,name=transaction" json:"transaction,omitempty"`
+ AppId []byte `protobuf:"bytes,11,opt,name=app_id" json:"app_id,omitempty"`
+ Crontimetable *TaskQueueAddRequest_CronTimetable `protobuf:"group,12,opt,name=CronTimetable" json:"crontimetable,omitempty"`
+ Description []byte `protobuf:"bytes,15,opt,name=description" json:"description,omitempty"`
+ Payload *TaskPayload `protobuf:"bytes,16,opt,name=payload" json:"payload,omitempty"`
+ RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,17,opt,name=retry_parameters" json:"retry_parameters,omitempty"`
+ Mode *TaskQueueMode_Mode `protobuf:"varint,18,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0" json:"mode,omitempty"`
+ Tag []byte `protobuf:"bytes,19,opt,name=tag" json:"tag,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueAddRequest) Reset() { *m = TaskQueueAddRequest{} }
+func (m *TaskQueueAddRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueAddRequest) ProtoMessage() {}
+
+const Default_TaskQueueAddRequest_Method TaskQueueAddRequest_RequestMethod = TaskQueueAddRequest_POST
+const Default_TaskQueueAddRequest_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH
+
+func (m *TaskQueueAddRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetEtaUsec() int64 {
+ if m != nil && m.EtaUsec != nil {
+ return *m.EtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueAddRequest) GetMethod() TaskQueueAddRequest_RequestMethod {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return Default_TaskQueueAddRequest_Method
+}
+
+func (m *TaskQueueAddRequest) GetUrl() []byte {
+ if m != nil {
+ return m.Url
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetHeader() []*TaskQueueAddRequest_Header {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetBody() []byte {
+ if m != nil {
+ return m.Body
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetTransaction() *appengine.Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetCrontimetable() *TaskQueueAddRequest_CronTimetable {
+ if m != nil {
+ return m.Crontimetable
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetDescription() []byte {
+ if m != nil {
+ return m.Description
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetPayload() *TaskPayload {
+ if m != nil {
+ return m.Payload
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetRetryParameters() *TaskQueueRetryParameters {
+ if m != nil {
+ return m.RetryParameters
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetMode() TaskQueueMode_Mode {
+ if m != nil && m.Mode != nil {
+ return *m.Mode
+ }
+ return Default_TaskQueueAddRequest_Mode
+}
+
+func (m *TaskQueueAddRequest) GetTag() []byte {
+ if m != nil {
+ return m.Tag
+ }
+ return nil
+}
+
+type TaskQueueAddRequest_Header struct {
+ Key []byte `protobuf:"bytes,7,req,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,8,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueAddRequest_Header) Reset() { *m = TaskQueueAddRequest_Header{} }
+func (m *TaskQueueAddRequest_Header) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueAddRequest_Header) ProtoMessage() {}
+
+func (m *TaskQueueAddRequest_Header) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest_Header) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type TaskQueueAddRequest_CronTimetable struct {
+ Schedule []byte `protobuf:"bytes,13,req,name=schedule" json:"schedule,omitempty"`
+ Timezone []byte `protobuf:"bytes,14,req,name=timezone" json:"timezone,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueAddRequest_CronTimetable) Reset() { *m = TaskQueueAddRequest_CronTimetable{} }
+func (m *TaskQueueAddRequest_CronTimetable) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueAddRequest_CronTimetable) ProtoMessage() {}
+
+func (m *TaskQueueAddRequest_CronTimetable) GetSchedule() []byte {
+ if m != nil {
+ return m.Schedule
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest_CronTimetable) GetTimezone() []byte {
+ if m != nil {
+ return m.Timezone
+ }
+ return nil
+}
+
+type TaskQueueAddResponse struct {
+ ChosenTaskName []byte `protobuf:"bytes,1,opt,name=chosen_task_name" json:"chosen_task_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueAddResponse) Reset() { *m = TaskQueueAddResponse{} }
+func (m *TaskQueueAddResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueAddResponse) ProtoMessage() {}
+
+func (m *TaskQueueAddResponse) GetChosenTaskName() []byte {
+ if m != nil {
+ return m.ChosenTaskName
+ }
+ return nil
+}
+
+type TaskQueueBulkAddRequest struct {
+ AddRequest []*TaskQueueAddRequest `protobuf:"bytes,1,rep,name=add_request" json:"add_request,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueBulkAddRequest) Reset() { *m = TaskQueueBulkAddRequest{} }
+func (m *TaskQueueBulkAddRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueBulkAddRequest) ProtoMessage() {}
+
+func (m *TaskQueueBulkAddRequest) GetAddRequest() []*TaskQueueAddRequest {
+ if m != nil {
+ return m.AddRequest
+ }
+ return nil
+}
+
+type TaskQueueBulkAddResponse struct {
+ Taskresult []*TaskQueueBulkAddResponse_TaskResult `protobuf:"group,1,rep,name=TaskResult" json:"taskresult,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueBulkAddResponse) Reset() { *m = TaskQueueBulkAddResponse{} }
+func (m *TaskQueueBulkAddResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueBulkAddResponse) ProtoMessage() {}
+
+func (m *TaskQueueBulkAddResponse) GetTaskresult() []*TaskQueueBulkAddResponse_TaskResult {
+ if m != nil {
+ return m.Taskresult
+ }
+ return nil
+}
+
+type TaskQueueBulkAddResponse_TaskResult struct {
+ Result *TaskQueueServiceError_ErrorCode `protobuf:"varint,2,req,name=result,enum=appengine.TaskQueueServiceError_ErrorCode" json:"result,omitempty"`
+ ChosenTaskName []byte `protobuf:"bytes,3,opt,name=chosen_task_name" json:"chosen_task_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueBulkAddResponse_TaskResult) Reset() { *m = TaskQueueBulkAddResponse_TaskResult{} }
+func (m *TaskQueueBulkAddResponse_TaskResult) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueBulkAddResponse_TaskResult) ProtoMessage() {}
+
+func (m *TaskQueueBulkAddResponse_TaskResult) GetResult() TaskQueueServiceError_ErrorCode {
+ if m != nil && m.Result != nil {
+ return *m.Result
+ }
+ return TaskQueueServiceError_OK
+}
+
+func (m *TaskQueueBulkAddResponse_TaskResult) GetChosenTaskName() []byte {
+ if m != nil {
+ return m.ChosenTaskName
+ }
+ return nil
+}
+
+type TaskQueueDeleteRequest struct {
+ QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"`
+ TaskName [][]byte `protobuf:"bytes,2,rep,name=task_name" json:"task_name,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteRequest) Reset() { *m = TaskQueueDeleteRequest{} }
+func (m *TaskQueueDeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteRequest) ProtoMessage() {}
+
+func (m *TaskQueueDeleteRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueDeleteRequest) GetTaskName() [][]byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueDeleteRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type TaskQueueDeleteResponse struct {
+ Result []TaskQueueServiceError_ErrorCode `protobuf:"varint,3,rep,name=result,enum=appengine.TaskQueueServiceError_ErrorCode" json:"result,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteResponse) Reset() { *m = TaskQueueDeleteResponse{} }
+func (m *TaskQueueDeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteResponse) ProtoMessage() {}
+
+func (m *TaskQueueDeleteResponse) GetResult() []TaskQueueServiceError_ErrorCode {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+type TaskQueueForceRunRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ TaskName []byte `protobuf:"bytes,3,req,name=task_name" json:"task_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueForceRunRequest) Reset() { *m = TaskQueueForceRunRequest{} }
+func (m *TaskQueueForceRunRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueForceRunRequest) ProtoMessage() {}
+
+func (m *TaskQueueForceRunRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueForceRunRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueForceRunRequest) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+type TaskQueueForceRunResponse struct {
+ Result *TaskQueueServiceError_ErrorCode `protobuf:"varint,3,req,name=result,enum=appengine.TaskQueueServiceError_ErrorCode" json:"result,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueForceRunResponse) Reset() { *m = TaskQueueForceRunResponse{} }
+func (m *TaskQueueForceRunResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueForceRunResponse) ProtoMessage() {}
+
+func (m *TaskQueueForceRunResponse) GetResult() TaskQueueServiceError_ErrorCode {
+ if m != nil && m.Result != nil {
+ return *m.Result
+ }
+ return TaskQueueServiceError_OK
+}
+
+type TaskQueueUpdateQueueRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ BucketRefillPerSecond *float64 `protobuf:"fixed64,3,req,name=bucket_refill_per_second" json:"bucket_refill_per_second,omitempty"`
+ BucketCapacity *int32 `protobuf:"varint,4,req,name=bucket_capacity" json:"bucket_capacity,omitempty"`
+ UserSpecifiedRate *string `protobuf:"bytes,5,opt,name=user_specified_rate" json:"user_specified_rate,omitempty"`
+ RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,6,opt,name=retry_parameters" json:"retry_parameters,omitempty"`
+ MaxConcurrentRequests *int32 `protobuf:"varint,7,opt,name=max_concurrent_requests" json:"max_concurrent_requests,omitempty"`
+ Mode *TaskQueueMode_Mode `protobuf:"varint,8,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0" json:"mode,omitempty"`
+ Acl *TaskQueueAcl `protobuf:"bytes,9,opt,name=acl" json:"acl,omitempty"`
+ HeaderOverride []*TaskQueueHttpHeader `protobuf:"bytes,10,rep,name=header_override" json:"header_override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueUpdateQueueRequest) Reset() { *m = TaskQueueUpdateQueueRequest{} }
+func (m *TaskQueueUpdateQueueRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueUpdateQueueRequest) ProtoMessage() {}
+
+const Default_TaskQueueUpdateQueueRequest_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH
+
+func (m *TaskQueueUpdateQueueRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetBucketRefillPerSecond() float64 {
+ if m != nil && m.BucketRefillPerSecond != nil {
+ return *m.BucketRefillPerSecond
+ }
+ return 0
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetBucketCapacity() int32 {
+ if m != nil && m.BucketCapacity != nil {
+ return *m.BucketCapacity
+ }
+ return 0
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetUserSpecifiedRate() string {
+ if m != nil && m.UserSpecifiedRate != nil {
+ return *m.UserSpecifiedRate
+ }
+ return ""
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetRetryParameters() *TaskQueueRetryParameters {
+ if m != nil {
+ return m.RetryParameters
+ }
+ return nil
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetMaxConcurrentRequests() int32 {
+ if m != nil && m.MaxConcurrentRequests != nil {
+ return *m.MaxConcurrentRequests
+ }
+ return 0
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetMode() TaskQueueMode_Mode {
+ if m != nil && m.Mode != nil {
+ return *m.Mode
+ }
+ return Default_TaskQueueUpdateQueueRequest_Mode
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetAcl() *TaskQueueAcl {
+ if m != nil {
+ return m.Acl
+ }
+ return nil
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetHeaderOverride() []*TaskQueueHttpHeader {
+ if m != nil {
+ return m.HeaderOverride
+ }
+ return nil
+}
+
+type TaskQueueUpdateQueueResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueUpdateQueueResponse) Reset() { *m = TaskQueueUpdateQueueResponse{} }
+func (m *TaskQueueUpdateQueueResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueUpdateQueueResponse) ProtoMessage() {}
+
+type TaskQueueFetchQueuesRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ MaxRows *int32 `protobuf:"varint,2,req,name=max_rows" json:"max_rows,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueuesRequest) Reset() { *m = TaskQueueFetchQueuesRequest{} }
+func (m *TaskQueueFetchQueuesRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchQueuesRequest) ProtoMessage() {}
+
+func (m *TaskQueueFetchQueuesRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueuesRequest) GetMaxRows() int32 {
+ if m != nil && m.MaxRows != nil {
+ return *m.MaxRows
+ }
+ return 0
+}
+
+type TaskQueueFetchQueuesResponse struct {
+ Queue []*TaskQueueFetchQueuesResponse_Queue `protobuf:"group,1,rep,name=Queue" json:"queue,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueuesResponse) Reset() { *m = TaskQueueFetchQueuesResponse{} }
+func (m *TaskQueueFetchQueuesResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchQueuesResponse) ProtoMessage() {}
+
+func (m *TaskQueueFetchQueuesResponse) GetQueue() []*TaskQueueFetchQueuesResponse_Queue {
+ if m != nil {
+ return m.Queue
+ }
+ return nil
+}
+
+type TaskQueueFetchQueuesResponse_Queue struct {
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ BucketRefillPerSecond *float64 `protobuf:"fixed64,3,req,name=bucket_refill_per_second" json:"bucket_refill_per_second,omitempty"`
+ BucketCapacity *float64 `protobuf:"fixed64,4,req,name=bucket_capacity" json:"bucket_capacity,omitempty"`
+ UserSpecifiedRate *string `protobuf:"bytes,5,opt,name=user_specified_rate" json:"user_specified_rate,omitempty"`
+ Paused *bool `protobuf:"varint,6,req,name=paused,def=0" json:"paused,omitempty"`
+ RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,7,opt,name=retry_parameters" json:"retry_parameters,omitempty"`
+ MaxConcurrentRequests *int32 `protobuf:"varint,8,opt,name=max_concurrent_requests" json:"max_concurrent_requests,omitempty"`
+ Mode *TaskQueueMode_Mode `protobuf:"varint,9,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0" json:"mode,omitempty"`
+ Acl *TaskQueueAcl `protobuf:"bytes,10,opt,name=acl" json:"acl,omitempty"`
+ HeaderOverride []*TaskQueueHttpHeader `protobuf:"bytes,11,rep,name=header_override" json:"header_override,omitempty"`
+ CreatorName *string `protobuf:"bytes,12,opt,name=creator_name,def=apphosting" json:"creator_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) Reset() { *m = TaskQueueFetchQueuesResponse_Queue{} }
+func (m *TaskQueueFetchQueuesResponse_Queue) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchQueuesResponse_Queue) ProtoMessage() {}
+
+const Default_TaskQueueFetchQueuesResponse_Queue_Paused bool = false
+const Default_TaskQueueFetchQueuesResponse_Queue_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH
+const Default_TaskQueueFetchQueuesResponse_Queue_CreatorName string = "apphosting"
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetBucketRefillPerSecond() float64 {
+ if m != nil && m.BucketRefillPerSecond != nil {
+ return *m.BucketRefillPerSecond
+ }
+ return 0
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetBucketCapacity() float64 {
+ if m != nil && m.BucketCapacity != nil {
+ return *m.BucketCapacity
+ }
+ return 0
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetUserSpecifiedRate() string {
+ if m != nil && m.UserSpecifiedRate != nil {
+ return *m.UserSpecifiedRate
+ }
+ return ""
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetPaused() bool {
+ if m != nil && m.Paused != nil {
+ return *m.Paused
+ }
+ return Default_TaskQueueFetchQueuesResponse_Queue_Paused
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetRetryParameters() *TaskQueueRetryParameters {
+ if m != nil {
+ return m.RetryParameters
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetMaxConcurrentRequests() int32 {
+ if m != nil && m.MaxConcurrentRequests != nil {
+ return *m.MaxConcurrentRequests
+ }
+ return 0
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetMode() TaskQueueMode_Mode {
+ if m != nil && m.Mode != nil {
+ return *m.Mode
+ }
+ return Default_TaskQueueFetchQueuesResponse_Queue_Mode
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetAcl() *TaskQueueAcl {
+ if m != nil {
+ return m.Acl
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetHeaderOverride() []*TaskQueueHttpHeader {
+ if m != nil {
+ return m.HeaderOverride
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetCreatorName() string {
+ if m != nil && m.CreatorName != nil {
+ return *m.CreatorName
+ }
+ return Default_TaskQueueFetchQueuesResponse_Queue_CreatorName
+}
+
+type TaskQueueFetchQueueStatsRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName [][]byte `protobuf:"bytes,2,rep,name=queue_name" json:"queue_name,omitempty"`
+ MaxNumTasks *int32 `protobuf:"varint,3,opt,name=max_num_tasks,def=0" json:"max_num_tasks,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueueStatsRequest) Reset() { *m = TaskQueueFetchQueueStatsRequest{} }
+func (m *TaskQueueFetchQueueStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchQueueStatsRequest) ProtoMessage() {}
+
+const Default_TaskQueueFetchQueueStatsRequest_MaxNumTasks int32 = 0
+
+func (m *TaskQueueFetchQueueStatsRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueueStatsRequest) GetQueueName() [][]byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueueStatsRequest) GetMaxNumTasks() int32 {
+ if m != nil && m.MaxNumTasks != nil {
+ return *m.MaxNumTasks
+ }
+ return Default_TaskQueueFetchQueueStatsRequest_MaxNumTasks
+}
+
+type TaskQueueScannerQueueInfo struct {
+ ExecutedLastMinute *int64 `protobuf:"varint,1,req,name=executed_last_minute" json:"executed_last_minute,omitempty"`
+ ExecutedLastHour *int64 `protobuf:"varint,2,req,name=executed_last_hour" json:"executed_last_hour,omitempty"`
+ SamplingDurationSeconds *float64 `protobuf:"fixed64,3,req,name=sampling_duration_seconds" json:"sampling_duration_seconds,omitempty"`
+ RequestsInFlight *int32 `protobuf:"varint,4,opt,name=requests_in_flight" json:"requests_in_flight,omitempty"`
+ EnforcedRate *float64 `protobuf:"fixed64,5,opt,name=enforced_rate" json:"enforced_rate,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueScannerQueueInfo) Reset() { *m = TaskQueueScannerQueueInfo{} }
+func (m *TaskQueueScannerQueueInfo) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueScannerQueueInfo) ProtoMessage() {}
+
+func (m *TaskQueueScannerQueueInfo) GetExecutedLastMinute() int64 {
+ if m != nil && m.ExecutedLastMinute != nil {
+ return *m.ExecutedLastMinute
+ }
+ return 0
+}
+
+func (m *TaskQueueScannerQueueInfo) GetExecutedLastHour() int64 {
+ if m != nil && m.ExecutedLastHour != nil {
+ return *m.ExecutedLastHour
+ }
+ return 0
+}
+
+func (m *TaskQueueScannerQueueInfo) GetSamplingDurationSeconds() float64 {
+ if m != nil && m.SamplingDurationSeconds != nil {
+ return *m.SamplingDurationSeconds
+ }
+ return 0
+}
+
+func (m *TaskQueueScannerQueueInfo) GetRequestsInFlight() int32 {
+ if m != nil && m.RequestsInFlight != nil {
+ return *m.RequestsInFlight
+ }
+ return 0
+}
+
+func (m *TaskQueueScannerQueueInfo) GetEnforcedRate() float64 {
+ if m != nil && m.EnforcedRate != nil {
+ return *m.EnforcedRate
+ }
+ return 0
+}
+
+type TaskQueueFetchQueueStatsResponse struct {
+ Queuestats []*TaskQueueFetchQueueStatsResponse_QueueStats `protobuf:"group,1,rep,name=QueueStats" json:"queuestats,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueueStatsResponse) Reset() { *m = TaskQueueFetchQueueStatsResponse{} }
+func (m *TaskQueueFetchQueueStatsResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchQueueStatsResponse) ProtoMessage() {}
+
+func (m *TaskQueueFetchQueueStatsResponse) GetQueuestats() []*TaskQueueFetchQueueStatsResponse_QueueStats {
+ if m != nil {
+ return m.Queuestats
+ }
+ return nil
+}
+
+type TaskQueueFetchQueueStatsResponse_QueueStats struct {
+ NumTasks *int32 `protobuf:"varint,2,req,name=num_tasks" json:"num_tasks,omitempty"`
+ OldestEtaUsec *int64 `protobuf:"varint,3,req,name=oldest_eta_usec" json:"oldest_eta_usec,omitempty"`
+ ScannerInfo *TaskQueueScannerQueueInfo `protobuf:"bytes,4,opt,name=scanner_info" json:"scanner_info,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueueStatsResponse_QueueStats) Reset() {
+ *m = TaskQueueFetchQueueStatsResponse_QueueStats{}
+}
+func (m *TaskQueueFetchQueueStatsResponse_QueueStats) String() string {
+ return proto.CompactTextString(m)
+}
+func (*TaskQueueFetchQueueStatsResponse_QueueStats) ProtoMessage() {}
+
+func (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetNumTasks() int32 {
+ if m != nil && m.NumTasks != nil {
+ return *m.NumTasks
+ }
+ return 0
+}
+
+func (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetOldestEtaUsec() int64 {
+ if m != nil && m.OldestEtaUsec != nil {
+ return *m.OldestEtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetScannerInfo() *TaskQueueScannerQueueInfo {
+ if m != nil {
+ return m.ScannerInfo
+ }
+ return nil
+}
+
+type TaskQueuePauseQueueRequest struct {
+ AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ Pause *bool `protobuf:"varint,3,req,name=pause" json:"pause,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueuePauseQueueRequest) Reset() { *m = TaskQueuePauseQueueRequest{} }
+func (m *TaskQueuePauseQueueRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueuePauseQueueRequest) ProtoMessage() {}
+
+func (m *TaskQueuePauseQueueRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueuePauseQueueRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueuePauseQueueRequest) GetPause() bool {
+ if m != nil && m.Pause != nil {
+ return *m.Pause
+ }
+ return false
+}
+
+type TaskQueuePauseQueueResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueuePauseQueueResponse) Reset() { *m = TaskQueuePauseQueueResponse{} }
+func (m *TaskQueuePauseQueueResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueuePauseQueueResponse) ProtoMessage() {}
+
+type TaskQueuePurgeQueueRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueuePurgeQueueRequest) Reset() { *m = TaskQueuePurgeQueueRequest{} }
+func (m *TaskQueuePurgeQueueRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueuePurgeQueueRequest) ProtoMessage() {}
+
+func (m *TaskQueuePurgeQueueRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueuePurgeQueueRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+type TaskQueuePurgeQueueResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueuePurgeQueueResponse) Reset() { *m = TaskQueuePurgeQueueResponse{} }
+func (m *TaskQueuePurgeQueueResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueuePurgeQueueResponse) ProtoMessage() {}
+
+type TaskQueueDeleteQueueRequest struct {
+ AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteQueueRequest) Reset() { *m = TaskQueueDeleteQueueRequest{} }
+func (m *TaskQueueDeleteQueueRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteQueueRequest) ProtoMessage() {}
+
+func (m *TaskQueueDeleteQueueRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueDeleteQueueRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+type TaskQueueDeleteQueueResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteQueueResponse) Reset() { *m = TaskQueueDeleteQueueResponse{} }
+func (m *TaskQueueDeleteQueueResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteQueueResponse) ProtoMessage() {}
+
+type TaskQueueDeleteGroupRequest struct {
+ AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteGroupRequest) Reset() { *m = TaskQueueDeleteGroupRequest{} }
+func (m *TaskQueueDeleteGroupRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteGroupRequest) ProtoMessage() {}
+
+func (m *TaskQueueDeleteGroupRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type TaskQueueDeleteGroupResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteGroupResponse) Reset() { *m = TaskQueueDeleteGroupResponse{} }
+func (m *TaskQueueDeleteGroupResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteGroupResponse) ProtoMessage() {}
+
+type TaskQueueQueryTasksRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ StartTaskName []byte `protobuf:"bytes,3,opt,name=start_task_name" json:"start_task_name,omitempty"`
+ StartEtaUsec *int64 `protobuf:"varint,4,opt,name=start_eta_usec" json:"start_eta_usec,omitempty"`
+ StartTag []byte `protobuf:"bytes,6,opt,name=start_tag" json:"start_tag,omitempty"`
+ MaxRows *int32 `protobuf:"varint,5,opt,name=max_rows,def=1" json:"max_rows,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksRequest) Reset() { *m = TaskQueueQueryTasksRequest{} }
+func (m *TaskQueueQueryTasksRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryTasksRequest) ProtoMessage() {}
+
+const Default_TaskQueueQueryTasksRequest_MaxRows int32 = 1
+
+func (m *TaskQueueQueryTasksRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksRequest) GetStartTaskName() []byte {
+ if m != nil {
+ return m.StartTaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksRequest) GetStartEtaUsec() int64 {
+ if m != nil && m.StartEtaUsec != nil {
+ return *m.StartEtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksRequest) GetStartTag() []byte {
+ if m != nil {
+ return m.StartTag
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksRequest) GetMaxRows() int32 {
+ if m != nil && m.MaxRows != nil {
+ return *m.MaxRows
+ }
+ return Default_TaskQueueQueryTasksRequest_MaxRows
+}
+
+type TaskQueueQueryTasksResponse struct {
+ Task []*TaskQueueQueryTasksResponse_Task `protobuf:"group,1,rep,name=Task" json:"task,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksResponse) Reset() { *m = TaskQueueQueryTasksResponse{} }
+func (m *TaskQueueQueryTasksResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryTasksResponse) ProtoMessage() {}
+
+func (m *TaskQueueQueryTasksResponse) GetTask() []*TaskQueueQueryTasksResponse_Task {
+ if m != nil {
+ return m.Task
+ }
+ return nil
+}
+
+type TaskQueueQueryTasksResponse_Task struct {
+ TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"`
+ EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"`
+ Url []byte `protobuf:"bytes,4,opt,name=url" json:"url,omitempty"`
+ Method *TaskQueueQueryTasksResponse_Task_RequestMethod `protobuf:"varint,5,opt,name=method,enum=appengine.TaskQueueQueryTasksResponse_Task_RequestMethod" json:"method,omitempty"`
+ RetryCount *int32 `protobuf:"varint,6,opt,name=retry_count,def=0" json:"retry_count,omitempty"`
+ Header []*TaskQueueQueryTasksResponse_Task_Header `protobuf:"group,7,rep,name=Header" json:"header,omitempty"`
+ BodySize *int32 `protobuf:"varint,10,opt,name=body_size" json:"body_size,omitempty"`
+ Body []byte `protobuf:"bytes,11,opt,name=body" json:"body,omitempty"`
+ CreationTimeUsec *int64 `protobuf:"varint,12,req,name=creation_time_usec" json:"creation_time_usec,omitempty"`
+ Crontimetable *TaskQueueQueryTasksResponse_Task_CronTimetable `protobuf:"group,13,opt,name=CronTimetable" json:"crontimetable,omitempty"`
+ Runlog *TaskQueueQueryTasksResponse_Task_RunLog `protobuf:"group,16,opt,name=RunLog" json:"runlog,omitempty"`
+ Description []byte `protobuf:"bytes,21,opt,name=description" json:"description,omitempty"`
+ Payload *TaskPayload `protobuf:"bytes,22,opt,name=payload" json:"payload,omitempty"`
+ RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,23,opt,name=retry_parameters" json:"retry_parameters,omitempty"`
+ FirstTryUsec *int64 `protobuf:"varint,24,opt,name=first_try_usec" json:"first_try_usec,omitempty"`
+ Tag []byte `protobuf:"bytes,25,opt,name=tag" json:"tag,omitempty"`
+ ExecutionCount *int32 `protobuf:"varint,26,opt,name=execution_count,def=0" json:"execution_count,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) Reset() { *m = TaskQueueQueryTasksResponse_Task{} }
+func (m *TaskQueueQueryTasksResponse_Task) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryTasksResponse_Task) ProtoMessage() {}
+
+const Default_TaskQueueQueryTasksResponse_Task_RetryCount int32 = 0
+const Default_TaskQueueQueryTasksResponse_Task_ExecutionCount int32 = 0
+
+func (m *TaskQueueQueryTasksResponse_Task) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetEtaUsec() int64 {
+ if m != nil && m.EtaUsec != nil {
+ return *m.EtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetUrl() []byte {
+ if m != nil {
+ return m.Url
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetMethod() TaskQueueQueryTasksResponse_Task_RequestMethod {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return TaskQueueQueryTasksResponse_Task_GET
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetRetryCount() int32 {
+ if m != nil && m.RetryCount != nil {
+ return *m.RetryCount
+ }
+ return Default_TaskQueueQueryTasksResponse_Task_RetryCount
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetHeader() []*TaskQueueQueryTasksResponse_Task_Header {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetBodySize() int32 {
+ if m != nil && m.BodySize != nil {
+ return *m.BodySize
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetBody() []byte {
+ if m != nil {
+ return m.Body
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetCreationTimeUsec() int64 {
+ if m != nil && m.CreationTimeUsec != nil {
+ return *m.CreationTimeUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetCrontimetable() *TaskQueueQueryTasksResponse_Task_CronTimetable {
+ if m != nil {
+ return m.Crontimetable
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetRunlog() *TaskQueueQueryTasksResponse_Task_RunLog {
+ if m != nil {
+ return m.Runlog
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetDescription() []byte {
+ if m != nil {
+ return m.Description
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetPayload() *TaskPayload {
+ if m != nil {
+ return m.Payload
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetRetryParameters() *TaskQueueRetryParameters {
+ if m != nil {
+ return m.RetryParameters
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetFirstTryUsec() int64 {
+ if m != nil && m.FirstTryUsec != nil {
+ return *m.FirstTryUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetTag() []byte {
+ if m != nil {
+ return m.Tag
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetExecutionCount() int32 {
+ if m != nil && m.ExecutionCount != nil {
+ return *m.ExecutionCount
+ }
+ return Default_TaskQueueQueryTasksResponse_Task_ExecutionCount
+}
+
+type TaskQueueQueryTasksResponse_Task_Header struct {
+ Key []byte `protobuf:"bytes,8,req,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,9,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_Header) Reset() {
+ *m = TaskQueueQueryTasksResponse_Task_Header{}
+}
+func (m *TaskQueueQueryTasksResponse_Task_Header) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryTasksResponse_Task_Header) ProtoMessage() {}
+
+func (m *TaskQueueQueryTasksResponse_Task_Header) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_Header) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type TaskQueueQueryTasksResponse_Task_CronTimetable struct {
+ Schedule []byte `protobuf:"bytes,14,req,name=schedule" json:"schedule,omitempty"`
+ Timezone []byte `protobuf:"bytes,15,req,name=timezone" json:"timezone,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) Reset() {
+ *m = TaskQueueQueryTasksResponse_Task_CronTimetable{}
+}
+func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) String() string {
+ return proto.CompactTextString(m)
+}
+func (*TaskQueueQueryTasksResponse_Task_CronTimetable) ProtoMessage() {}
+
+func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) GetSchedule() []byte {
+ if m != nil {
+ return m.Schedule
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) GetTimezone() []byte {
+ if m != nil {
+ return m.Timezone
+ }
+ return nil
+}
+
+type TaskQueueQueryTasksResponse_Task_RunLog struct {
+ DispatchedUsec *int64 `protobuf:"varint,17,req,name=dispatched_usec" json:"dispatched_usec,omitempty"`
+ LagUsec *int64 `protobuf:"varint,18,req,name=lag_usec" json:"lag_usec,omitempty"`
+ ElapsedUsec *int64 `protobuf:"varint,19,req,name=elapsed_usec" json:"elapsed_usec,omitempty"`
+ ResponseCode *int64 `protobuf:"varint,20,opt,name=response_code" json:"response_code,omitempty"`
+ RetryReason *string `protobuf:"bytes,27,opt,name=retry_reason" json:"retry_reason,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) Reset() {
+ *m = TaskQueueQueryTasksResponse_Task_RunLog{}
+}
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryTasksResponse_Task_RunLog) ProtoMessage() {}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetDispatchedUsec() int64 {
+ if m != nil && m.DispatchedUsec != nil {
+ return *m.DispatchedUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetLagUsec() int64 {
+ if m != nil && m.LagUsec != nil {
+ return *m.LagUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetElapsedUsec() int64 {
+ if m != nil && m.ElapsedUsec != nil {
+ return *m.ElapsedUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetResponseCode() int64 {
+ if m != nil && m.ResponseCode != nil {
+ return *m.ResponseCode
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetRetryReason() string {
+ if m != nil && m.RetryReason != nil {
+ return *m.RetryReason
+ }
+ return ""
+}
+
+type TaskQueueFetchTaskRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ TaskName []byte `protobuf:"bytes,3,req,name=task_name" json:"task_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchTaskRequest) Reset() { *m = TaskQueueFetchTaskRequest{} }
+func (m *TaskQueueFetchTaskRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchTaskRequest) ProtoMessage() {}
+
+func (m *TaskQueueFetchTaskRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchTaskRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchTaskRequest) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+type TaskQueueFetchTaskResponse struct {
+ Task *TaskQueueQueryTasksResponse `protobuf:"bytes,1,req,name=task" json:"task,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchTaskResponse) Reset() { *m = TaskQueueFetchTaskResponse{} }
+func (m *TaskQueueFetchTaskResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchTaskResponse) ProtoMessage() {}
+
+func (m *TaskQueueFetchTaskResponse) GetTask() *TaskQueueQueryTasksResponse {
+ if m != nil {
+ return m.Task
+ }
+ return nil
+}
+
+type TaskQueueUpdateStorageLimitRequest struct {
+ AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ Limit *int64 `protobuf:"varint,2,req,name=limit" json:"limit,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueUpdateStorageLimitRequest) Reset() { *m = TaskQueueUpdateStorageLimitRequest{} }
+func (m *TaskQueueUpdateStorageLimitRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueUpdateStorageLimitRequest) ProtoMessage() {}
+
+func (m *TaskQueueUpdateStorageLimitRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueUpdateStorageLimitRequest) GetLimit() int64 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+type TaskQueueUpdateStorageLimitResponse struct {
+ NewLimit *int64 `protobuf:"varint,1,req,name=new_limit" json:"new_limit,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueUpdateStorageLimitResponse) Reset() { *m = TaskQueueUpdateStorageLimitResponse{} }
+func (m *TaskQueueUpdateStorageLimitResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueUpdateStorageLimitResponse) ProtoMessage() {}
+
+func (m *TaskQueueUpdateStorageLimitResponse) GetNewLimit() int64 {
+ if m != nil && m.NewLimit != nil {
+ return *m.NewLimit
+ }
+ return 0
+}
+
+type TaskQueueQueryAndOwnTasksRequest struct {
+ QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"`
+ LeaseSeconds *float64 `protobuf:"fixed64,2,req,name=lease_seconds" json:"lease_seconds,omitempty"`
+ MaxTasks *int64 `protobuf:"varint,3,req,name=max_tasks" json:"max_tasks,omitempty"`
+ GroupByTag *bool `protobuf:"varint,4,opt,name=group_by_tag,def=0" json:"group_by_tag,omitempty"`
+ Tag []byte `protobuf:"bytes,5,opt,name=tag" json:"tag,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryAndOwnTasksRequest) Reset() { *m = TaskQueueQueryAndOwnTasksRequest{} }
+func (m *TaskQueueQueryAndOwnTasksRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryAndOwnTasksRequest) ProtoMessage() {}
+
+const Default_TaskQueueQueryAndOwnTasksRequest_GroupByTag bool = false
+
+func (m *TaskQueueQueryAndOwnTasksRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryAndOwnTasksRequest) GetLeaseSeconds() float64 {
+ if m != nil && m.LeaseSeconds != nil {
+ return *m.LeaseSeconds
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryAndOwnTasksRequest) GetMaxTasks() int64 {
+ if m != nil && m.MaxTasks != nil {
+ return *m.MaxTasks
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryAndOwnTasksRequest) GetGroupByTag() bool {
+ if m != nil && m.GroupByTag != nil {
+ return *m.GroupByTag
+ }
+ return Default_TaskQueueQueryAndOwnTasksRequest_GroupByTag
+}
+
+func (m *TaskQueueQueryAndOwnTasksRequest) GetTag() []byte {
+ if m != nil {
+ return m.Tag
+ }
+ return nil
+}
+
+type TaskQueueQueryAndOwnTasksResponse struct {
+ Task []*TaskQueueQueryAndOwnTasksResponse_Task `protobuf:"group,1,rep,name=Task" json:"task,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse) Reset() { *m = TaskQueueQueryAndOwnTasksResponse{} }
+func (m *TaskQueueQueryAndOwnTasksResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryAndOwnTasksResponse) ProtoMessage() {}
+
+func (m *TaskQueueQueryAndOwnTasksResponse) GetTask() []*TaskQueueQueryAndOwnTasksResponse_Task {
+ if m != nil {
+ return m.Task
+ }
+ return nil
+}
+
+type TaskQueueQueryAndOwnTasksResponse_Task struct {
+ TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"`
+ EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"`
+ RetryCount *int32 `protobuf:"varint,4,opt,name=retry_count,def=0" json:"retry_count,omitempty"`
+ Body []byte `protobuf:"bytes,5,opt,name=body" json:"body,omitempty"`
+ Tag []byte `protobuf:"bytes,6,opt,name=tag" json:"tag,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) Reset() {
+ *m = TaskQueueQueryAndOwnTasksResponse_Task{}
+}
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryAndOwnTasksResponse_Task) ProtoMessage() {}
+
+const Default_TaskQueueQueryAndOwnTasksResponse_Task_RetryCount int32 = 0
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetEtaUsec() int64 {
+ if m != nil && m.EtaUsec != nil {
+ return *m.EtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetRetryCount() int32 {
+ if m != nil && m.RetryCount != nil {
+ return *m.RetryCount
+ }
+ return Default_TaskQueueQueryAndOwnTasksResponse_Task_RetryCount
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetBody() []byte {
+ if m != nil {
+ return m.Body
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetTag() []byte {
+ if m != nil {
+ return m.Tag
+ }
+ return nil
+}
+
+type TaskQueueModifyTaskLeaseRequest struct {
+ QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"`
+ TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"`
+ EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"`
+ LeaseSeconds *float64 `protobuf:"fixed64,4,req,name=lease_seconds" json:"lease_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueModifyTaskLeaseRequest) Reset() { *m = TaskQueueModifyTaskLeaseRequest{} }
+func (m *TaskQueueModifyTaskLeaseRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueModifyTaskLeaseRequest) ProtoMessage() {}
+
+func (m *TaskQueueModifyTaskLeaseRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueModifyTaskLeaseRequest) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueModifyTaskLeaseRequest) GetEtaUsec() int64 {
+ if m != nil && m.EtaUsec != nil {
+ return *m.EtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueModifyTaskLeaseRequest) GetLeaseSeconds() float64 {
+ if m != nil && m.LeaseSeconds != nil {
+ return *m.LeaseSeconds
+ }
+ return 0
+}
+
+type TaskQueueModifyTaskLeaseResponse struct {
+ UpdatedEtaUsec *int64 `protobuf:"varint,1,req,name=updated_eta_usec" json:"updated_eta_usec,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueModifyTaskLeaseResponse) Reset() { *m = TaskQueueModifyTaskLeaseResponse{} }
+func (m *TaskQueueModifyTaskLeaseResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueModifyTaskLeaseResponse) ProtoMessage() {}
+
+func (m *TaskQueueModifyTaskLeaseResponse) GetUpdatedEtaUsec() int64 {
+ if m != nil && m.UpdatedEtaUsec != nil {
+ return *m.UpdatedEtaUsec
+ }
+ return 0
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto b/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto
new file mode 100644
index 0000000..419aaf5
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto
@@ -0,0 +1,342 @@
+syntax = "proto2";
+option go_package = "taskqueue";
+
+import "google.golang.org/appengine/internal/datastore/datastore_v3.proto";
+
+package appengine;
+
+message TaskQueueServiceError {
+ enum ErrorCode {
+ OK = 0;
+ UNKNOWN_QUEUE = 1;
+ TRANSIENT_ERROR = 2;
+ INTERNAL_ERROR = 3;
+ TASK_TOO_LARGE = 4;
+ INVALID_TASK_NAME = 5;
+ INVALID_QUEUE_NAME = 6;
+ INVALID_URL = 7;
+ INVALID_QUEUE_RATE = 8;
+ PERMISSION_DENIED = 9;
+ TASK_ALREADY_EXISTS = 10;
+ TOMBSTONED_TASK = 11;
+ INVALID_ETA = 12;
+ INVALID_REQUEST = 13;
+ UNKNOWN_TASK = 14;
+ TOMBSTONED_QUEUE = 15;
+ DUPLICATE_TASK_NAME = 16;
+ SKIPPED = 17;
+ TOO_MANY_TASKS = 18;
+ INVALID_PAYLOAD = 19;
+ INVALID_RETRY_PARAMETERS = 20;
+ INVALID_QUEUE_MODE = 21;
+ ACL_LOOKUP_ERROR = 22;
+ TRANSACTIONAL_REQUEST_TOO_LARGE = 23;
+ INCORRECT_CREATOR_NAME = 24;
+ TASK_LEASE_EXPIRED = 25;
+ QUEUE_PAUSED = 26;
+ INVALID_TAG = 27;
+
+ // Reserved range for the Datastore error codes.
+ // Original Datastore error code is shifted by DATASTORE_ERROR offset.
+ DATASTORE_ERROR = 10000;
+ }
+}
+
+message TaskPayload {
+ extensions 10 to max;
+ option message_set_wire_format = true;
+}
+
+message TaskQueueRetryParameters {
+ optional int32 retry_limit = 1;
+ optional int64 age_limit_sec = 2;
+
+ optional double min_backoff_sec = 3 [default = 0.1];
+ optional double max_backoff_sec = 4 [default = 3600];
+ optional int32 max_doublings = 5 [default = 16];
+}
+
+message TaskQueueAcl {
+ repeated bytes user_email = 1;
+ repeated bytes writer_email = 2;
+}
+
+message TaskQueueHttpHeader {
+ required bytes key = 1;
+ required bytes value = 2;
+}
+
+message TaskQueueMode {
+ enum Mode {
+ PUSH = 0;
+ PULL = 1;
+ }
+}
+
+message TaskQueueAddRequest {
+ required bytes queue_name = 1;
+ required bytes task_name = 2;
+ required int64 eta_usec = 3;
+
+ enum RequestMethod {
+ GET = 1;
+ POST = 2;
+ HEAD = 3;
+ PUT = 4;
+ DELETE = 5;
+ }
+ optional RequestMethod method = 5 [default=POST];
+
+ optional bytes url = 4;
+
+ repeated group Header = 6 {
+ required bytes key = 7;
+ required bytes value = 8;
+ }
+
+ optional bytes body = 9 [ctype=CORD];
+ optional Transaction transaction = 10;
+ optional bytes app_id = 11;
+
+ optional group CronTimetable = 12 {
+ required bytes schedule = 13;
+ required bytes timezone = 14;
+ }
+
+ optional bytes description = 15;
+ optional TaskPayload payload = 16;
+ optional TaskQueueRetryParameters retry_parameters = 17;
+ optional TaskQueueMode.Mode mode = 18 [default=PUSH];
+ optional bytes tag = 19;
+}
+
+message TaskQueueAddResponse {
+ optional bytes chosen_task_name = 1;
+}
+
+message TaskQueueBulkAddRequest {
+ repeated TaskQueueAddRequest add_request = 1;
+}
+
+message TaskQueueBulkAddResponse {
+ repeated group TaskResult = 1 {
+ required TaskQueueServiceError.ErrorCode result = 2;
+ optional bytes chosen_task_name = 3;
+ }
+}
+
+message TaskQueueDeleteRequest {
+ required bytes queue_name = 1;
+ repeated bytes task_name = 2;
+ optional bytes app_id = 3;
+}
+
+message TaskQueueDeleteResponse {
+ repeated TaskQueueServiceError.ErrorCode result = 3;
+}
+
+message TaskQueueForceRunRequest {
+ optional bytes app_id = 1;
+ required bytes queue_name = 2;
+ required bytes task_name = 3;
+}
+
+message TaskQueueForceRunResponse {
+ required TaskQueueServiceError.ErrorCode result = 3;
+}
+
+message TaskQueueUpdateQueueRequest {
+ optional bytes app_id = 1;
+ required bytes queue_name = 2;
+ required double bucket_refill_per_second = 3;
+ required int32 bucket_capacity = 4;
+ optional string user_specified_rate = 5;
+ optional TaskQueueRetryParameters retry_parameters = 6;
+ optional int32 max_concurrent_requests = 7;
+ optional TaskQueueMode.Mode mode = 8 [default = PUSH];
+ optional TaskQueueAcl acl = 9;
+ repeated TaskQueueHttpHeader header_override = 10;
+}
+
+message TaskQueueUpdateQueueResponse {
+}
+
+message TaskQueueFetchQueuesRequest {
+ optional bytes app_id = 1;
+ required int32 max_rows = 2;
+}
+
+message TaskQueueFetchQueuesResponse {
+ repeated group Queue = 1 {
+ required bytes queue_name = 2;
+ required double bucket_refill_per_second = 3;
+ required double bucket_capacity = 4;
+ optional string user_specified_rate = 5;
+ required bool paused = 6 [default=false];
+ optional TaskQueueRetryParameters retry_parameters = 7;
+ optional int32 max_concurrent_requests = 8;
+ optional TaskQueueMode.Mode mode = 9 [default = PUSH];
+ optional TaskQueueAcl acl = 10;
+ repeated TaskQueueHttpHeader header_override = 11;
+ optional string creator_name = 12 [ctype=CORD, default="apphosting"];
+ }
+}
+
+message TaskQueueFetchQueueStatsRequest {
+ optional bytes app_id = 1;
+ repeated bytes queue_name = 2;
+ optional int32 max_num_tasks = 3 [default = 0];
+}
+
+message TaskQueueScannerQueueInfo {
+ required int64 executed_last_minute = 1;
+ required int64 executed_last_hour = 2;
+ required double sampling_duration_seconds = 3;
+ optional int32 requests_in_flight = 4;
+ optional double enforced_rate = 5;
+}
+
+message TaskQueueFetchQueueStatsResponse {
+ repeated group QueueStats = 1 {
+ required int32 num_tasks = 2;
+ required int64 oldest_eta_usec = 3;
+ optional TaskQueueScannerQueueInfo scanner_info = 4;
+ }
+}
+message TaskQueuePauseQueueRequest {
+ required bytes app_id = 1;
+ required bytes queue_name = 2;
+ required bool pause = 3;
+}
+
+message TaskQueuePauseQueueResponse {
+}
+
+message TaskQueuePurgeQueueRequest {
+ optional bytes app_id = 1;
+ required bytes queue_name = 2;
+}
+
+message TaskQueuePurgeQueueResponse {
+}
+
+message TaskQueueDeleteQueueRequest {
+ required bytes app_id = 1;
+ required bytes queue_name = 2;
+}
+
+message TaskQueueDeleteQueueResponse {
+}
+
+message TaskQueueDeleteGroupRequest {
+ required bytes app_id = 1;
+}
+
+message TaskQueueDeleteGroupResponse {
+}
+
+message TaskQueueQueryTasksRequest {
+ optional bytes app_id = 1;
+ required bytes queue_name = 2;
+
+ optional bytes start_task_name = 3;
+ optional int64 start_eta_usec = 4;
+ optional bytes start_tag = 6;
+ optional int32 max_rows = 5 [default = 1];
+}
+
+message TaskQueueQueryTasksResponse {
+ repeated group Task = 1 {
+ required bytes task_name = 2;
+ required int64 eta_usec = 3;
+ optional bytes url = 4;
+
+ enum RequestMethod {
+ GET = 1;
+ POST = 2;
+ HEAD = 3;
+ PUT = 4;
+ DELETE = 5;
+ }
+ optional RequestMethod method = 5;
+
+ optional int32 retry_count = 6 [default=0];
+
+ repeated group Header = 7 {
+ required bytes key = 8;
+ required bytes value = 9;
+ }
+
+ optional int32 body_size = 10;
+ optional bytes body = 11 [ctype=CORD];
+ required int64 creation_time_usec = 12;
+
+ optional group CronTimetable = 13 {
+ required bytes schedule = 14;
+ required bytes timezone = 15;
+ }
+
+ optional group RunLog = 16 {
+ required int64 dispatched_usec = 17;
+ required int64 lag_usec = 18;
+ required int64 elapsed_usec = 19;
+ optional int64 response_code = 20;
+ optional string retry_reason = 27;
+ }
+
+ optional bytes description = 21;
+ optional TaskPayload payload = 22;
+ optional TaskQueueRetryParameters retry_parameters = 23;
+ optional int64 first_try_usec = 24;
+ optional bytes tag = 25;
+ optional int32 execution_count = 26 [default=0];
+ }
+}
+
+message TaskQueueFetchTaskRequest {
+ optional bytes app_id = 1;
+ required bytes queue_name = 2;
+ required bytes task_name = 3;
+}
+
+message TaskQueueFetchTaskResponse {
+ required TaskQueueQueryTasksResponse task = 1;
+}
+
+message TaskQueueUpdateStorageLimitRequest {
+ required bytes app_id = 1;
+ required int64 limit = 2;
+}
+
+message TaskQueueUpdateStorageLimitResponse {
+ required int64 new_limit = 1;
+}
+
+message TaskQueueQueryAndOwnTasksRequest {
+ required bytes queue_name = 1;
+ required double lease_seconds = 2;
+ required int64 max_tasks = 3;
+ optional bool group_by_tag = 4 [default=false];
+ optional bytes tag = 5;
+}
+
+message TaskQueueQueryAndOwnTasksResponse {
+ repeated group Task = 1 {
+ required bytes task_name = 2;
+ required int64 eta_usec = 3;
+ optional int32 retry_count = 4 [default=0];
+ optional bytes body = 5 [ctype=CORD];
+ optional bytes tag = 6;
+ }
+}
+
+message TaskQueueModifyTaskLeaseRequest {
+ required bytes queue_name = 1;
+ required bytes task_name = 2;
+ required int64 eta_usec = 3;
+ required double lease_seconds = 4;
+}
+
+message TaskQueueModifyTaskLeaseResponse {
+ required int64 updated_eta_usec = 1;
+}
diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go
new file mode 100644
index 0000000..28a6d18
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/transaction.go
@@ -0,0 +1,107 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file implements hooks for applying datastore transactions.
+
+import (
+ "errors"
+ "reflect"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+
+ basepb "google.golang.org/appengine/internal/base"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+var transactionSetters = make(map[reflect.Type]reflect.Value)
+
+// RegisterTransactionSetter registers a function that sets transaction information
+// in a protocol buffer message. f should be a function with two arguments,
+// the first being a protocol buffer type, and the second being *datastore.Transaction.
+func RegisterTransactionSetter(f interface{}) {
+ v := reflect.ValueOf(f)
+ transactionSetters[v.Type().In(0)] = v
+}
+
+// applyTransaction applies the transaction t to message pb
+// by using the relevant setter passed to RegisterTransactionSetter.
+func applyTransaction(pb proto.Message, t *pb.Transaction) {
+ v := reflect.ValueOf(pb)
+ if f, ok := transactionSetters[v.Type()]; ok {
+ f.Call([]reflect.Value{v, reflect.ValueOf(t)})
+ }
+}
+
+var transactionKey = "used for *Transaction"
+
+func transactionFromContext(ctx netcontext.Context) *transaction {
+ t, _ := ctx.Value(&transactionKey).(*transaction)
+ return t
+}
+
+func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context {
+ return netcontext.WithValue(ctx, &transactionKey, t)
+}
+
+type transaction struct {
+ transaction pb.Transaction
+ finished bool
+}
+
+var ErrConcurrentTransaction = errors.New("internal: concurrent transaction")
+
+func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool) error {
+ if transactionFromContext(c) != nil {
+ return errors.New("nested transactions are not supported")
+ }
+
+ // Begin the transaction.
+ t := &transaction{}
+ req := &pb.BeginTransactionRequest{
+ App: proto.String(FullyQualifiedAppID(c)),
+ }
+ if xg {
+ req.AllowMultipleEg = proto.Bool(true)
+ }
+ if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil {
+ return err
+ }
+
+ // Call f, rolling back the transaction if f returns a non-nil error, or panics.
+ // The panic is not recovered.
+ defer func() {
+ if t.finished {
+ return
+ }
+ t.finished = true
+ // Ignore the error return value, since we are already returning a non-nil
+ // error (or we're panicking).
+ Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{})
+ }()
+ if err := f(withTransaction(c, t)); err != nil {
+ return err
+ }
+ t.finished = true
+
+ // Commit the transaction.
+ res := &pb.CommitResponse{}
+ err := Call(c, "datastore_v3", "Commit", &t.transaction, res)
+ if ae, ok := err.(*APIError); ok {
+ /* TODO: restore this conditional
+ if appengine.IsDevAppServer() {
+ */
+ // The Python Dev AppServer raises an ApplicationError with error code 2 (which is
+ // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.".
+ if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." {
+ return ErrConcurrentTransaction
+ }
+ if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) {
+ return ErrConcurrentTransaction
+ }
+ }
+ return err
+}
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
new file mode 100644
index 0000000..af463fb
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
@@ -0,0 +1,355 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
+// DO NOT EDIT!
+
+/*
+Package urlfetch is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
+
+It has these top-level messages:
+ URLFetchServiceError
+ URLFetchRequest
+ URLFetchResponse
+*/
+package urlfetch
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type URLFetchServiceError_ErrorCode int32
+
+const (
+ URLFetchServiceError_OK URLFetchServiceError_ErrorCode = 0
+ URLFetchServiceError_INVALID_URL URLFetchServiceError_ErrorCode = 1
+ URLFetchServiceError_FETCH_ERROR URLFetchServiceError_ErrorCode = 2
+ URLFetchServiceError_UNSPECIFIED_ERROR URLFetchServiceError_ErrorCode = 3
+ URLFetchServiceError_RESPONSE_TOO_LARGE URLFetchServiceError_ErrorCode = 4
+ URLFetchServiceError_DEADLINE_EXCEEDED URLFetchServiceError_ErrorCode = 5
+ URLFetchServiceError_SSL_CERTIFICATE_ERROR URLFetchServiceError_ErrorCode = 6
+ URLFetchServiceError_DNS_ERROR URLFetchServiceError_ErrorCode = 7
+ URLFetchServiceError_CLOSED URLFetchServiceError_ErrorCode = 8
+ URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9
+ URLFetchServiceError_TOO_MANY_REDIRECTS URLFetchServiceError_ErrorCode = 10
+ URLFetchServiceError_MALFORMED_REPLY URLFetchServiceError_ErrorCode = 11
+ URLFetchServiceError_CONNECTION_ERROR URLFetchServiceError_ErrorCode = 12
+)
+
+var URLFetchServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INVALID_URL",
+ 2: "FETCH_ERROR",
+ 3: "UNSPECIFIED_ERROR",
+ 4: "RESPONSE_TOO_LARGE",
+ 5: "DEADLINE_EXCEEDED",
+ 6: "SSL_CERTIFICATE_ERROR",
+ 7: "DNS_ERROR",
+ 8: "CLOSED",
+ 9: "INTERNAL_TRANSIENT_ERROR",
+ 10: "TOO_MANY_REDIRECTS",
+ 11: "MALFORMED_REPLY",
+ 12: "CONNECTION_ERROR",
+}
+var URLFetchServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INVALID_URL": 1,
+ "FETCH_ERROR": 2,
+ "UNSPECIFIED_ERROR": 3,
+ "RESPONSE_TOO_LARGE": 4,
+ "DEADLINE_EXCEEDED": 5,
+ "SSL_CERTIFICATE_ERROR": 6,
+ "DNS_ERROR": 7,
+ "CLOSED": 8,
+ "INTERNAL_TRANSIENT_ERROR": 9,
+ "TOO_MANY_REDIRECTS": 10,
+ "MALFORMED_REPLY": 11,
+ "CONNECTION_ERROR": 12,
+}
+
+func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode {
+ p := new(URLFetchServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x URLFetchServiceError_ErrorCode) String() string {
+ return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x))
+}
+func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = URLFetchServiceError_ErrorCode(value)
+ return nil
+}
+
+type URLFetchRequest_RequestMethod int32
+
+const (
+ URLFetchRequest_GET URLFetchRequest_RequestMethod = 1
+ URLFetchRequest_POST URLFetchRequest_RequestMethod = 2
+ URLFetchRequest_HEAD URLFetchRequest_RequestMethod = 3
+ URLFetchRequest_PUT URLFetchRequest_RequestMethod = 4
+ URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5
+ URLFetchRequest_PATCH URLFetchRequest_RequestMethod = 6
+)
+
+var URLFetchRequest_RequestMethod_name = map[int32]string{
+ 1: "GET",
+ 2: "POST",
+ 3: "HEAD",
+ 4: "PUT",
+ 5: "DELETE",
+ 6: "PATCH",
+}
+var URLFetchRequest_RequestMethod_value = map[string]int32{
+ "GET": 1,
+ "POST": 2,
+ "HEAD": 3,
+ "PUT": 4,
+ "DELETE": 5,
+ "PATCH": 6,
+}
+
+func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod {
+ p := new(URLFetchRequest_RequestMethod)
+ *p = x
+ return p
+}
+func (x URLFetchRequest_RequestMethod) String() string {
+ return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x))
+}
+func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod")
+ if err != nil {
+ return err
+ }
+ *x = URLFetchRequest_RequestMethod(value)
+ return nil
+}
+
+type URLFetchServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} }
+func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) }
+func (*URLFetchServiceError) ProtoMessage() {}
+
+type URLFetchRequest struct {
+ Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"`
+ Url *string `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"`
+ Header []*URLFetchRequest_Header `protobuf:"group,3,rep,name=Header" json:"header,omitempty"`
+ Payload []byte `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"`
+ FollowRedirects *bool `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"`
+ Deadline *float64 `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"`
+ MustValidateServerCertificate *bool `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} }
+func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) }
+func (*URLFetchRequest) ProtoMessage() {}
+
+const Default_URLFetchRequest_FollowRedirects bool = true
+const Default_URLFetchRequest_MustValidateServerCertificate bool = true
+
+func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return URLFetchRequest_GET
+}
+
+func (m *URLFetchRequest) GetUrl() string {
+ if m != nil && m.Url != nil {
+ return *m.Url
+ }
+ return ""
+}
+
+func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *URLFetchRequest) GetPayload() []byte {
+ if m != nil {
+ return m.Payload
+ }
+ return nil
+}
+
+func (m *URLFetchRequest) GetFollowRedirects() bool {
+ if m != nil && m.FollowRedirects != nil {
+ return *m.FollowRedirects
+ }
+ return Default_URLFetchRequest_FollowRedirects
+}
+
+func (m *URLFetchRequest) GetDeadline() float64 {
+ if m != nil && m.Deadline != nil {
+ return *m.Deadline
+ }
+ return 0
+}
+
+func (m *URLFetchRequest) GetMustValidateServerCertificate() bool {
+ if m != nil && m.MustValidateServerCertificate != nil {
+ return *m.MustValidateServerCertificate
+ }
+ return Default_URLFetchRequest_MustValidateServerCertificate
+}
+
+type URLFetchRequest_Header struct {
+ Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
+ Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} }
+func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) }
+func (*URLFetchRequest_Header) ProtoMessage() {}
+
+func (m *URLFetchRequest_Header) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *URLFetchRequest_Header) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type URLFetchResponse struct {
+ Content []byte `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"`
+ StatusCode *int32 `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"`
+ Header []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header" json:"header,omitempty"`
+ ContentWasTruncated *bool `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"`
+ ExternalBytesSent *int64 `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"`
+ ExternalBytesReceived *int64 `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"`
+ FinalUrl *string `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"`
+ ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"`
+ ApiBytesSent *int64 `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"`
+ ApiBytesReceived *int64 `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} }
+func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) }
+func (*URLFetchResponse) ProtoMessage() {}
+
+const Default_URLFetchResponse_ContentWasTruncated bool = false
+const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0
+const Default_URLFetchResponse_ApiBytesSent int64 = 0
+const Default_URLFetchResponse_ApiBytesReceived int64 = 0
+
+func (m *URLFetchResponse) GetContent() []byte {
+ if m != nil {
+ return m.Content
+ }
+ return nil
+}
+
+func (m *URLFetchResponse) GetStatusCode() int32 {
+ if m != nil && m.StatusCode != nil {
+ return *m.StatusCode
+ }
+ return 0
+}
+
+func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *URLFetchResponse) GetContentWasTruncated() bool {
+ if m != nil && m.ContentWasTruncated != nil {
+ return *m.ContentWasTruncated
+ }
+ return Default_URLFetchResponse_ContentWasTruncated
+}
+
+func (m *URLFetchResponse) GetExternalBytesSent() int64 {
+ if m != nil && m.ExternalBytesSent != nil {
+ return *m.ExternalBytesSent
+ }
+ return 0
+}
+
+func (m *URLFetchResponse) GetExternalBytesReceived() int64 {
+ if m != nil && m.ExternalBytesReceived != nil {
+ return *m.ExternalBytesReceived
+ }
+ return 0
+}
+
+func (m *URLFetchResponse) GetFinalUrl() string {
+ if m != nil && m.FinalUrl != nil {
+ return *m.FinalUrl
+ }
+ return ""
+}
+
+func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 {
+ if m != nil && m.ApiCpuMilliseconds != nil {
+ return *m.ApiCpuMilliseconds
+ }
+ return Default_URLFetchResponse_ApiCpuMilliseconds
+}
+
+func (m *URLFetchResponse) GetApiBytesSent() int64 {
+ if m != nil && m.ApiBytesSent != nil {
+ return *m.ApiBytesSent
+ }
+ return Default_URLFetchResponse_ApiBytesSent
+}
+
+func (m *URLFetchResponse) GetApiBytesReceived() int64 {
+ if m != nil && m.ApiBytesReceived != nil {
+ return *m.ApiBytesReceived
+ }
+ return Default_URLFetchResponse_ApiBytesReceived
+}
+
+type URLFetchResponse_Header struct {
+ Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
+ Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} }
+func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) }
+func (*URLFetchResponse_Header) ProtoMessage() {}
+
+func (m *URLFetchResponse_Header) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *URLFetchResponse_Header) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
new file mode 100644
index 0000000..f695edf
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
@@ -0,0 +1,64 @@
+syntax = "proto2";
+option go_package = "urlfetch";
+
+package appengine;
+
+message URLFetchServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INVALID_URL = 1;
+ FETCH_ERROR = 2;
+ UNSPECIFIED_ERROR = 3;
+ RESPONSE_TOO_LARGE = 4;
+ DEADLINE_EXCEEDED = 5;
+ SSL_CERTIFICATE_ERROR = 6;
+ DNS_ERROR = 7;
+ CLOSED = 8;
+ INTERNAL_TRANSIENT_ERROR = 9;
+ TOO_MANY_REDIRECTS = 10;
+ MALFORMED_REPLY = 11;
+ CONNECTION_ERROR = 12;
+ }
+}
+
+message URLFetchRequest {
+ enum RequestMethod {
+ GET = 1;
+ POST = 2;
+ HEAD = 3;
+ PUT = 4;
+ DELETE = 5;
+ PATCH = 6;
+ }
+ required RequestMethod Method = 1;
+ required string Url = 2;
+ repeated group Header = 3 {
+ required string Key = 4;
+ required string Value = 5;
+ }
+ optional bytes Payload = 6 [ctype=CORD];
+
+ optional bool FollowRedirects = 7 [default=true];
+
+ optional double Deadline = 8;
+
+ optional bool MustValidateServerCertificate = 9 [default=true];
+}
+
+message URLFetchResponse {
+ optional bytes Content = 1;
+ required int32 StatusCode = 2;
+ repeated group Header = 3 {
+ required string Key = 4;
+ required string Value = 5;
+ }
+ optional bool ContentWasTruncated = 6 [default=false];
+ optional int64 ExternalBytesSent = 7;
+ optional int64 ExternalBytesReceived = 8;
+
+ optional string FinalUrl = 9;
+
+ optional int64 ApiCpuMilliseconds = 10 [default=0];
+ optional int64 ApiBytesSent = 11 [default=0];
+ optional int64 ApiBytesReceived = 12 [default=0];
+}
diff --git a/vendor/google.golang.org/appengine/internal/user/user_service.pb.go b/vendor/google.golang.org/appengine/internal/user/user_service.pb.go
new file mode 100644
index 0000000..6b52ffc
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/user/user_service.pb.go
@@ -0,0 +1,289 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/user/user_service.proto
+// DO NOT EDIT!
+
+/*
+Package user is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/user/user_service.proto
+
+It has these top-level messages:
+ UserServiceError
+ CreateLoginURLRequest
+ CreateLoginURLResponse
+ CreateLogoutURLRequest
+ CreateLogoutURLResponse
+ GetOAuthUserRequest
+ GetOAuthUserResponse
+ CheckOAuthSignatureRequest
+ CheckOAuthSignatureResponse
+*/
+package user
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type UserServiceError_ErrorCode int32
+
+const (
+ UserServiceError_OK UserServiceError_ErrorCode = 0
+ UserServiceError_REDIRECT_URL_TOO_LONG UserServiceError_ErrorCode = 1
+ UserServiceError_NOT_ALLOWED UserServiceError_ErrorCode = 2
+ UserServiceError_OAUTH_INVALID_TOKEN UserServiceError_ErrorCode = 3
+ UserServiceError_OAUTH_INVALID_REQUEST UserServiceError_ErrorCode = 4
+ UserServiceError_OAUTH_ERROR UserServiceError_ErrorCode = 5
+)
+
+var UserServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "REDIRECT_URL_TOO_LONG",
+ 2: "NOT_ALLOWED",
+ 3: "OAUTH_INVALID_TOKEN",
+ 4: "OAUTH_INVALID_REQUEST",
+ 5: "OAUTH_ERROR",
+}
+var UserServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "REDIRECT_URL_TOO_LONG": 1,
+ "NOT_ALLOWED": 2,
+ "OAUTH_INVALID_TOKEN": 3,
+ "OAUTH_INVALID_REQUEST": 4,
+ "OAUTH_ERROR": 5,
+}
+
+func (x UserServiceError_ErrorCode) Enum() *UserServiceError_ErrorCode {
+ p := new(UserServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x UserServiceError_ErrorCode) String() string {
+ return proto.EnumName(UserServiceError_ErrorCode_name, int32(x))
+}
+func (x *UserServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(UserServiceError_ErrorCode_value, data, "UserServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = UserServiceError_ErrorCode(value)
+ return nil
+}
+
+type UserServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UserServiceError) Reset() { *m = UserServiceError{} }
+func (m *UserServiceError) String() string { return proto.CompactTextString(m) }
+func (*UserServiceError) ProtoMessage() {}
+
+type CreateLoginURLRequest struct {
+ DestinationUrl *string `protobuf:"bytes,1,req,name=destination_url" json:"destination_url,omitempty"`
+ AuthDomain *string `protobuf:"bytes,2,opt,name=auth_domain" json:"auth_domain,omitempty"`
+ FederatedIdentity *string `protobuf:"bytes,3,opt,name=federated_identity,def=" json:"federated_identity,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateLoginURLRequest) Reset() { *m = CreateLoginURLRequest{} }
+func (m *CreateLoginURLRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateLoginURLRequest) ProtoMessage() {}
+
+func (m *CreateLoginURLRequest) GetDestinationUrl() string {
+ if m != nil && m.DestinationUrl != nil {
+ return *m.DestinationUrl
+ }
+ return ""
+}
+
+func (m *CreateLoginURLRequest) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+func (m *CreateLoginURLRequest) GetFederatedIdentity() string {
+ if m != nil && m.FederatedIdentity != nil {
+ return *m.FederatedIdentity
+ }
+ return ""
+}
+
+type CreateLoginURLResponse struct {
+ LoginUrl *string `protobuf:"bytes,1,req,name=login_url" json:"login_url,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateLoginURLResponse) Reset() { *m = CreateLoginURLResponse{} }
+func (m *CreateLoginURLResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateLoginURLResponse) ProtoMessage() {}
+
+func (m *CreateLoginURLResponse) GetLoginUrl() string {
+ if m != nil && m.LoginUrl != nil {
+ return *m.LoginUrl
+ }
+ return ""
+}
+
+type CreateLogoutURLRequest struct {
+ DestinationUrl *string `protobuf:"bytes,1,req,name=destination_url" json:"destination_url,omitempty"`
+ AuthDomain *string `protobuf:"bytes,2,opt,name=auth_domain" json:"auth_domain,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateLogoutURLRequest) Reset() { *m = CreateLogoutURLRequest{} }
+func (m *CreateLogoutURLRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateLogoutURLRequest) ProtoMessage() {}
+
+func (m *CreateLogoutURLRequest) GetDestinationUrl() string {
+ if m != nil && m.DestinationUrl != nil {
+ return *m.DestinationUrl
+ }
+ return ""
+}
+
+func (m *CreateLogoutURLRequest) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+type CreateLogoutURLResponse struct {
+ LogoutUrl *string `protobuf:"bytes,1,req,name=logout_url" json:"logout_url,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateLogoutURLResponse) Reset() { *m = CreateLogoutURLResponse{} }
+func (m *CreateLogoutURLResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateLogoutURLResponse) ProtoMessage() {}
+
+func (m *CreateLogoutURLResponse) GetLogoutUrl() string {
+ if m != nil && m.LogoutUrl != nil {
+ return *m.LogoutUrl
+ }
+ return ""
+}
+
+type GetOAuthUserRequest struct {
+ Scope *string `protobuf:"bytes,1,opt,name=scope" json:"scope,omitempty"`
+ Scopes []string `protobuf:"bytes,2,rep,name=scopes" json:"scopes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetOAuthUserRequest) Reset() { *m = GetOAuthUserRequest{} }
+func (m *GetOAuthUserRequest) String() string { return proto.CompactTextString(m) }
+func (*GetOAuthUserRequest) ProtoMessage() {}
+
+func (m *GetOAuthUserRequest) GetScope() string {
+ if m != nil && m.Scope != nil {
+ return *m.Scope
+ }
+ return ""
+}
+
+func (m *GetOAuthUserRequest) GetScopes() []string {
+ if m != nil {
+ return m.Scopes
+ }
+ return nil
+}
+
+type GetOAuthUserResponse struct {
+ Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"`
+ UserId *string `protobuf:"bytes,2,req,name=user_id" json:"user_id,omitempty"`
+ AuthDomain *string `protobuf:"bytes,3,req,name=auth_domain" json:"auth_domain,omitempty"`
+ UserOrganization *string `protobuf:"bytes,4,opt,name=user_organization,def=" json:"user_organization,omitempty"`
+ IsAdmin *bool `protobuf:"varint,5,opt,name=is_admin,def=0" json:"is_admin,omitempty"`
+ ClientId *string `protobuf:"bytes,6,opt,name=client_id,def=" json:"client_id,omitempty"`
+ Scopes []string `protobuf:"bytes,7,rep,name=scopes" json:"scopes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetOAuthUserResponse) Reset() { *m = GetOAuthUserResponse{} }
+func (m *GetOAuthUserResponse) String() string { return proto.CompactTextString(m) }
+func (*GetOAuthUserResponse) ProtoMessage() {}
+
+const Default_GetOAuthUserResponse_IsAdmin bool = false
+
+func (m *GetOAuthUserResponse) GetEmail() string {
+ if m != nil && m.Email != nil {
+ return *m.Email
+ }
+ return ""
+}
+
+func (m *GetOAuthUserResponse) GetUserId() string {
+ if m != nil && m.UserId != nil {
+ return *m.UserId
+ }
+ return ""
+}
+
+func (m *GetOAuthUserResponse) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+func (m *GetOAuthUserResponse) GetUserOrganization() string {
+ if m != nil && m.UserOrganization != nil {
+ return *m.UserOrganization
+ }
+ return ""
+}
+
+func (m *GetOAuthUserResponse) GetIsAdmin() bool {
+ if m != nil && m.IsAdmin != nil {
+ return *m.IsAdmin
+ }
+ return Default_GetOAuthUserResponse_IsAdmin
+}
+
+func (m *GetOAuthUserResponse) GetClientId() string {
+ if m != nil && m.ClientId != nil {
+ return *m.ClientId
+ }
+ return ""
+}
+
+func (m *GetOAuthUserResponse) GetScopes() []string {
+ if m != nil {
+ return m.Scopes
+ }
+ return nil
+}
+
+type CheckOAuthSignatureRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CheckOAuthSignatureRequest) Reset() { *m = CheckOAuthSignatureRequest{} }
+func (m *CheckOAuthSignatureRequest) String() string { return proto.CompactTextString(m) }
+func (*CheckOAuthSignatureRequest) ProtoMessage() {}
+
+type CheckOAuthSignatureResponse struct {
+ OauthConsumerKey *string `protobuf:"bytes,1,req,name=oauth_consumer_key" json:"oauth_consumer_key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CheckOAuthSignatureResponse) Reset() { *m = CheckOAuthSignatureResponse{} }
+func (m *CheckOAuthSignatureResponse) String() string { return proto.CompactTextString(m) }
+func (*CheckOAuthSignatureResponse) ProtoMessage() {}
+
+func (m *CheckOAuthSignatureResponse) GetOauthConsumerKey() string {
+ if m != nil && m.OauthConsumerKey != nil {
+ return *m.OauthConsumerKey
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/user/user_service.proto b/vendor/google.golang.org/appengine/internal/user/user_service.proto
new file mode 100644
index 0000000..f3e9693
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/user/user_service.proto
@@ -0,0 +1,58 @@
+syntax = "proto2";
+option go_package = "user";
+
+package appengine;
+
+message UserServiceError {
+ enum ErrorCode {
+ OK = 0;
+ REDIRECT_URL_TOO_LONG = 1;
+ NOT_ALLOWED = 2;
+ OAUTH_INVALID_TOKEN = 3;
+ OAUTH_INVALID_REQUEST = 4;
+ OAUTH_ERROR = 5;
+ }
+}
+
+message CreateLoginURLRequest {
+ required string destination_url = 1;
+ optional string auth_domain = 2;
+ optional string federated_identity = 3 [default = ""];
+}
+
+message CreateLoginURLResponse {
+ required string login_url = 1;
+}
+
+message CreateLogoutURLRequest {
+ required string destination_url = 1;
+ optional string auth_domain = 2;
+}
+
+message CreateLogoutURLResponse {
+ required string logout_url = 1;
+}
+
+message GetOAuthUserRequest {
+ optional string scope = 1;
+
+ repeated string scopes = 2;
+}
+
+message GetOAuthUserResponse {
+ required string email = 1;
+ required string user_id = 2;
+ required string auth_domain = 3;
+ optional string user_organization = 4 [default = ""];
+ optional bool is_admin = 5 [default = false];
+ optional string client_id = 6 [default = ""];
+
+ repeated string scopes = 7;
+}
+
+message CheckOAuthSignatureRequest {
+}
+
+message CheckOAuthSignatureResponse {
+ required string oauth_consumer_key = 1;
+}
diff --git a/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go b/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go
new file mode 100644
index 0000000..6d5b0ae
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go
@@ -0,0 +1,427 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/xmpp/xmpp_service.proto
+// DO NOT EDIT!
+
+/*
+Package xmpp is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/xmpp/xmpp_service.proto
+
+It has these top-level messages:
+ XmppServiceError
+ PresenceRequest
+ PresenceResponse
+ BulkPresenceRequest
+ BulkPresenceResponse
+ XmppMessageRequest
+ XmppMessageResponse
+ XmppSendPresenceRequest
+ XmppSendPresenceResponse
+ XmppInviteRequest
+ XmppInviteResponse
+*/
+package xmpp
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type XmppServiceError_ErrorCode int32
+
+const (
+ XmppServiceError_UNSPECIFIED_ERROR XmppServiceError_ErrorCode = 1
+ XmppServiceError_INVALID_JID XmppServiceError_ErrorCode = 2
+ XmppServiceError_NO_BODY XmppServiceError_ErrorCode = 3
+ XmppServiceError_INVALID_XML XmppServiceError_ErrorCode = 4
+ XmppServiceError_INVALID_TYPE XmppServiceError_ErrorCode = 5
+ XmppServiceError_INVALID_SHOW XmppServiceError_ErrorCode = 6
+ XmppServiceError_EXCEEDED_MAX_SIZE XmppServiceError_ErrorCode = 7
+ XmppServiceError_APPID_ALIAS_REQUIRED XmppServiceError_ErrorCode = 8
+ XmppServiceError_NONDEFAULT_MODULE XmppServiceError_ErrorCode = 9
+)
+
+var XmppServiceError_ErrorCode_name = map[int32]string{
+ 1: "UNSPECIFIED_ERROR",
+ 2: "INVALID_JID",
+ 3: "NO_BODY",
+ 4: "INVALID_XML",
+ 5: "INVALID_TYPE",
+ 6: "INVALID_SHOW",
+ 7: "EXCEEDED_MAX_SIZE",
+ 8: "APPID_ALIAS_REQUIRED",
+ 9: "NONDEFAULT_MODULE",
+}
+var XmppServiceError_ErrorCode_value = map[string]int32{
+ "UNSPECIFIED_ERROR": 1,
+ "INVALID_JID": 2,
+ "NO_BODY": 3,
+ "INVALID_XML": 4,
+ "INVALID_TYPE": 5,
+ "INVALID_SHOW": 6,
+ "EXCEEDED_MAX_SIZE": 7,
+ "APPID_ALIAS_REQUIRED": 8,
+ "NONDEFAULT_MODULE": 9,
+}
+
+func (x XmppServiceError_ErrorCode) Enum() *XmppServiceError_ErrorCode {
+ p := new(XmppServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x XmppServiceError_ErrorCode) String() string {
+ return proto.EnumName(XmppServiceError_ErrorCode_name, int32(x))
+}
+func (x *XmppServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(XmppServiceError_ErrorCode_value, data, "XmppServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = XmppServiceError_ErrorCode(value)
+ return nil
+}
+
+type PresenceResponse_SHOW int32
+
+const (
+ PresenceResponse_NORMAL PresenceResponse_SHOW = 0
+ PresenceResponse_AWAY PresenceResponse_SHOW = 1
+ PresenceResponse_DO_NOT_DISTURB PresenceResponse_SHOW = 2
+ PresenceResponse_CHAT PresenceResponse_SHOW = 3
+ PresenceResponse_EXTENDED_AWAY PresenceResponse_SHOW = 4
+)
+
+var PresenceResponse_SHOW_name = map[int32]string{
+ 0: "NORMAL",
+ 1: "AWAY",
+ 2: "DO_NOT_DISTURB",
+ 3: "CHAT",
+ 4: "EXTENDED_AWAY",
+}
+var PresenceResponse_SHOW_value = map[string]int32{
+ "NORMAL": 0,
+ "AWAY": 1,
+ "DO_NOT_DISTURB": 2,
+ "CHAT": 3,
+ "EXTENDED_AWAY": 4,
+}
+
+func (x PresenceResponse_SHOW) Enum() *PresenceResponse_SHOW {
+ p := new(PresenceResponse_SHOW)
+ *p = x
+ return p
+}
+func (x PresenceResponse_SHOW) String() string {
+ return proto.EnumName(PresenceResponse_SHOW_name, int32(x))
+}
+func (x *PresenceResponse_SHOW) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(PresenceResponse_SHOW_value, data, "PresenceResponse_SHOW")
+ if err != nil {
+ return err
+ }
+ *x = PresenceResponse_SHOW(value)
+ return nil
+}
+
+type XmppMessageResponse_XmppMessageStatus int32
+
+const (
+ XmppMessageResponse_NO_ERROR XmppMessageResponse_XmppMessageStatus = 0
+ XmppMessageResponse_INVALID_JID XmppMessageResponse_XmppMessageStatus = 1
+ XmppMessageResponse_OTHER_ERROR XmppMessageResponse_XmppMessageStatus = 2
+)
+
+var XmppMessageResponse_XmppMessageStatus_name = map[int32]string{
+ 0: "NO_ERROR",
+ 1: "INVALID_JID",
+ 2: "OTHER_ERROR",
+}
+var XmppMessageResponse_XmppMessageStatus_value = map[string]int32{
+ "NO_ERROR": 0,
+ "INVALID_JID": 1,
+ "OTHER_ERROR": 2,
+}
+
+func (x XmppMessageResponse_XmppMessageStatus) Enum() *XmppMessageResponse_XmppMessageStatus {
+ p := new(XmppMessageResponse_XmppMessageStatus)
+ *p = x
+ return p
+}
+func (x XmppMessageResponse_XmppMessageStatus) String() string {
+ return proto.EnumName(XmppMessageResponse_XmppMessageStatus_name, int32(x))
+}
+func (x *XmppMessageResponse_XmppMessageStatus) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(XmppMessageResponse_XmppMessageStatus_value, data, "XmppMessageResponse_XmppMessageStatus")
+ if err != nil {
+ return err
+ }
+ *x = XmppMessageResponse_XmppMessageStatus(value)
+ return nil
+}
+
+type XmppServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppServiceError) Reset() { *m = XmppServiceError{} }
+func (m *XmppServiceError) String() string { return proto.CompactTextString(m) }
+func (*XmppServiceError) ProtoMessage() {}
+
+type PresenceRequest struct {
+ Jid *string `protobuf:"bytes,1,req,name=jid" json:"jid,omitempty"`
+ FromJid *string `protobuf:"bytes,2,opt,name=from_jid" json:"from_jid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PresenceRequest) Reset() { *m = PresenceRequest{} }
+func (m *PresenceRequest) String() string { return proto.CompactTextString(m) }
+func (*PresenceRequest) ProtoMessage() {}
+
+func (m *PresenceRequest) GetJid() string {
+ if m != nil && m.Jid != nil {
+ return *m.Jid
+ }
+ return ""
+}
+
+func (m *PresenceRequest) GetFromJid() string {
+ if m != nil && m.FromJid != nil {
+ return *m.FromJid
+ }
+ return ""
+}
+
+type PresenceResponse struct {
+ IsAvailable *bool `protobuf:"varint,1,req,name=is_available" json:"is_available,omitempty"`
+ Presence *PresenceResponse_SHOW `protobuf:"varint,2,opt,name=presence,enum=appengine.PresenceResponse_SHOW" json:"presence,omitempty"`
+ Valid *bool `protobuf:"varint,3,opt,name=valid" json:"valid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PresenceResponse) Reset() { *m = PresenceResponse{} }
+func (m *PresenceResponse) String() string { return proto.CompactTextString(m) }
+func (*PresenceResponse) ProtoMessage() {}
+
+func (m *PresenceResponse) GetIsAvailable() bool {
+ if m != nil && m.IsAvailable != nil {
+ return *m.IsAvailable
+ }
+ return false
+}
+
+func (m *PresenceResponse) GetPresence() PresenceResponse_SHOW {
+ if m != nil && m.Presence != nil {
+ return *m.Presence
+ }
+ return PresenceResponse_NORMAL
+}
+
+func (m *PresenceResponse) GetValid() bool {
+ if m != nil && m.Valid != nil {
+ return *m.Valid
+ }
+ return false
+}
+
+type BulkPresenceRequest struct {
+ Jid []string `protobuf:"bytes,1,rep,name=jid" json:"jid,omitempty"`
+ FromJid *string `protobuf:"bytes,2,opt,name=from_jid" json:"from_jid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BulkPresenceRequest) Reset() { *m = BulkPresenceRequest{} }
+func (m *BulkPresenceRequest) String() string { return proto.CompactTextString(m) }
+func (*BulkPresenceRequest) ProtoMessage() {}
+
+func (m *BulkPresenceRequest) GetJid() []string {
+ if m != nil {
+ return m.Jid
+ }
+ return nil
+}
+
+func (m *BulkPresenceRequest) GetFromJid() string {
+ if m != nil && m.FromJid != nil {
+ return *m.FromJid
+ }
+ return ""
+}
+
+type BulkPresenceResponse struct {
+ PresenceResponse []*PresenceResponse `protobuf:"bytes,1,rep,name=presence_response" json:"presence_response,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BulkPresenceResponse) Reset() { *m = BulkPresenceResponse{} }
+func (m *BulkPresenceResponse) String() string { return proto.CompactTextString(m) }
+func (*BulkPresenceResponse) ProtoMessage() {}
+
+func (m *BulkPresenceResponse) GetPresenceResponse() []*PresenceResponse {
+ if m != nil {
+ return m.PresenceResponse
+ }
+ return nil
+}
+
+type XmppMessageRequest struct {
+ Jid []string `protobuf:"bytes,1,rep,name=jid" json:"jid,omitempty"`
+ Body *string `protobuf:"bytes,2,req,name=body" json:"body,omitempty"`
+ RawXml *bool `protobuf:"varint,3,opt,name=raw_xml,def=0" json:"raw_xml,omitempty"`
+ Type *string `protobuf:"bytes,4,opt,name=type,def=chat" json:"type,omitempty"`
+ FromJid *string `protobuf:"bytes,5,opt,name=from_jid" json:"from_jid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppMessageRequest) Reset() { *m = XmppMessageRequest{} }
+func (m *XmppMessageRequest) String() string { return proto.CompactTextString(m) }
+func (*XmppMessageRequest) ProtoMessage() {}
+
+const Default_XmppMessageRequest_RawXml bool = false
+const Default_XmppMessageRequest_Type string = "chat"
+
+func (m *XmppMessageRequest) GetJid() []string {
+ if m != nil {
+ return m.Jid
+ }
+ return nil
+}
+
+func (m *XmppMessageRequest) GetBody() string {
+ if m != nil && m.Body != nil {
+ return *m.Body
+ }
+ return ""
+}
+
+func (m *XmppMessageRequest) GetRawXml() bool {
+ if m != nil && m.RawXml != nil {
+ return *m.RawXml
+ }
+ return Default_XmppMessageRequest_RawXml
+}
+
+func (m *XmppMessageRequest) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_XmppMessageRequest_Type
+}
+
+func (m *XmppMessageRequest) GetFromJid() string {
+ if m != nil && m.FromJid != nil {
+ return *m.FromJid
+ }
+ return ""
+}
+
+type XmppMessageResponse struct {
+ Status []XmppMessageResponse_XmppMessageStatus `protobuf:"varint,1,rep,name=status,enum=appengine.XmppMessageResponse_XmppMessageStatus" json:"status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppMessageResponse) Reset() { *m = XmppMessageResponse{} }
+func (m *XmppMessageResponse) String() string { return proto.CompactTextString(m) }
+func (*XmppMessageResponse) ProtoMessage() {}
+
+func (m *XmppMessageResponse) GetStatus() []XmppMessageResponse_XmppMessageStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+type XmppSendPresenceRequest struct {
+ Jid *string `protobuf:"bytes,1,req,name=jid" json:"jid,omitempty"`
+ Type *string `protobuf:"bytes,2,opt,name=type" json:"type,omitempty"`
+ Show *string `protobuf:"bytes,3,opt,name=show" json:"show,omitempty"`
+ Status *string `protobuf:"bytes,4,opt,name=status" json:"status,omitempty"`
+ FromJid *string `protobuf:"bytes,5,opt,name=from_jid" json:"from_jid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppSendPresenceRequest) Reset() { *m = XmppSendPresenceRequest{} }
+func (m *XmppSendPresenceRequest) String() string { return proto.CompactTextString(m) }
+func (*XmppSendPresenceRequest) ProtoMessage() {}
+
+func (m *XmppSendPresenceRequest) GetJid() string {
+ if m != nil && m.Jid != nil {
+ return *m.Jid
+ }
+ return ""
+}
+
+func (m *XmppSendPresenceRequest) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return ""
+}
+
+func (m *XmppSendPresenceRequest) GetShow() string {
+ if m != nil && m.Show != nil {
+ return *m.Show
+ }
+ return ""
+}
+
+func (m *XmppSendPresenceRequest) GetStatus() string {
+ if m != nil && m.Status != nil {
+ return *m.Status
+ }
+ return ""
+}
+
+func (m *XmppSendPresenceRequest) GetFromJid() string {
+ if m != nil && m.FromJid != nil {
+ return *m.FromJid
+ }
+ return ""
+}
+
+type XmppSendPresenceResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppSendPresenceResponse) Reset() { *m = XmppSendPresenceResponse{} }
+func (m *XmppSendPresenceResponse) String() string { return proto.CompactTextString(m) }
+func (*XmppSendPresenceResponse) ProtoMessage() {}
+
+type XmppInviteRequest struct {
+ Jid *string `protobuf:"bytes,1,req,name=jid" json:"jid,omitempty"`
+ FromJid *string `protobuf:"bytes,2,opt,name=from_jid" json:"from_jid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppInviteRequest) Reset() { *m = XmppInviteRequest{} }
+func (m *XmppInviteRequest) String() string { return proto.CompactTextString(m) }
+func (*XmppInviteRequest) ProtoMessage() {}
+
+func (m *XmppInviteRequest) GetJid() string {
+ if m != nil && m.Jid != nil {
+ return *m.Jid
+ }
+ return ""
+}
+
+func (m *XmppInviteRequest) GetFromJid() string {
+ if m != nil && m.FromJid != nil {
+ return *m.FromJid
+ }
+ return ""
+}
+
+type XmppInviteResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppInviteResponse) Reset() { *m = XmppInviteResponse{} }
+func (m *XmppInviteResponse) String() string { return proto.CompactTextString(m) }
+func (*XmppInviteResponse) ProtoMessage() {}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.proto b/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.proto
new file mode 100644
index 0000000..472d52e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.proto
@@ -0,0 +1,83 @@
+syntax = "proto2";
+option go_package = "xmpp";
+
+package appengine;
+
+message XmppServiceError {
+ enum ErrorCode {
+ UNSPECIFIED_ERROR = 1;
+ INVALID_JID = 2;
+ NO_BODY = 3;
+ INVALID_XML = 4;
+ INVALID_TYPE = 5;
+ INVALID_SHOW = 6;
+ EXCEEDED_MAX_SIZE = 7;
+ APPID_ALIAS_REQUIRED = 8;
+ NONDEFAULT_MODULE = 9;
+ }
+}
+
+message PresenceRequest {
+ required string jid = 1;
+ optional string from_jid = 2;
+}
+
+message PresenceResponse {
+ enum SHOW {
+ NORMAL = 0;
+ AWAY = 1;
+ DO_NOT_DISTURB = 2;
+ CHAT = 3;
+ EXTENDED_AWAY = 4;
+ }
+
+ required bool is_available = 1;
+ optional SHOW presence = 2;
+ optional bool valid = 3;
+}
+
+message BulkPresenceRequest {
+ repeated string jid = 1;
+ optional string from_jid = 2;
+}
+
+message BulkPresenceResponse {
+ repeated PresenceResponse presence_response = 1;
+}
+
+message XmppMessageRequest {
+ repeated string jid = 1;
+ required string body = 2;
+ optional bool raw_xml = 3 [ default = false ];
+ optional string type = 4 [ default = "chat" ];
+ optional string from_jid = 5;
+}
+
+message XmppMessageResponse {
+ enum XmppMessageStatus {
+ NO_ERROR = 0;
+ INVALID_JID = 1;
+ OTHER_ERROR = 2;
+ }
+
+ repeated XmppMessageStatus status = 1;
+}
+
+message XmppSendPresenceRequest {
+ required string jid = 1;
+ optional string type = 2;
+ optional string show = 3;
+ optional string status = 4;
+ optional string from_jid = 5;
+}
+
+message XmppSendPresenceResponse {
+}
+
+message XmppInviteRequest {
+ required string jid = 1;
+ optional string from_jid = 2;
+}
+
+message XmppInviteResponse {
+}
diff --git a/vendor/google.golang.org/appengine/log/api.go b/vendor/google.golang.org/appengine/log/api.go
new file mode 100644
index 0000000..24d5860
--- /dev/null
+++ b/vendor/google.golang.org/appengine/log/api.go
@@ -0,0 +1,40 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package log
+
+// This file implements the logging API.
+
+import (
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+// Debugf formats its arguments according to the format, analogous to fmt.Printf,
+// and records the text as a log message at Debug level. The message will be associated
+// with the request linked with the provided context.
+func Debugf(ctx context.Context, format string, args ...interface{}) {
+ internal.Logf(ctx, 0, format, args...)
+}
+
+// Infof is like Debugf, but at Info level.
+func Infof(ctx context.Context, format string, args ...interface{}) {
+ internal.Logf(ctx, 1, format, args...)
+}
+
+// Warningf is like Debugf, but at Warning level.
+func Warningf(ctx context.Context, format string, args ...interface{}) {
+ internal.Logf(ctx, 2, format, args...)
+}
+
+// Errorf is like Debugf, but at Error level.
+func Errorf(ctx context.Context, format string, args ...interface{}) {
+ internal.Logf(ctx, 3, format, args...)
+}
+
+// Criticalf is like Debugf, but at Critical level.
+func Criticalf(ctx context.Context, format string, args ...interface{}) {
+ internal.Logf(ctx, 4, format, args...)
+}
diff --git a/vendor/google.golang.org/appengine/log/log.go b/vendor/google.golang.org/appengine/log/log.go
new file mode 100644
index 0000000..b54fe47
--- /dev/null
+++ b/vendor/google.golang.org/appengine/log/log.go
@@ -0,0 +1,323 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package log provides the means of querying an application's logs from
+within an App Engine application.
+
+Example:
+ c := appengine.NewContext(r)
+ query := &log.Query{
+ AppLogs: true,
+ Versions: []string{"1"},
+ }
+
+ for results := query.Run(c); ; {
+ record, err := results.Next()
+ if err == log.Done {
+ log.Infof(c, "Done processing results")
+ break
+ }
+ if err != nil {
+ log.Errorf(c, "Failed to retrieve next log: %v", err)
+ break
+ }
+ log.Infof(c, "Saw record %v", record)
+ }
+*/
+package log // import "google.golang.org/appengine/log"
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/log"
+)
+
+// Query defines a logs query.
+type Query struct {
+ // Start time specifies the earliest log to return (inclusive).
+ StartTime time.Time
+
+ // End time specifies the latest log to return (exclusive).
+ EndTime time.Time
+
+ // Offset specifies a position within the log stream to resume reading from,
+ // and should come from a previously returned Record's field of the same name.
+ Offset []byte
+
+ // Incomplete controls whether active (incomplete) requests should be included.
+ Incomplete bool
+
+ // AppLogs indicates if application-level logs should be included.
+ AppLogs bool
+
+ // ApplyMinLevel indicates if MinLevel should be used to filter results.
+ ApplyMinLevel bool
+
+ // If ApplyMinLevel is true, only logs for requests with at least one
+ // application log of MinLevel or higher will be returned.
+ MinLevel int
+
+ // Versions is the major version IDs whose logs should be retrieved.
+ // Logs for specific modules can be retrieved by the specifying versions
+ // in the form "module:version"; the default module is used if no module
+ // is specified.
+ Versions []string
+
+ // A list of requests to search for instead of a time-based scan. Cannot be
+ // combined with filtering options such as StartTime, EndTime, Offset,
+ // Incomplete, ApplyMinLevel, or Versions.
+ RequestIDs []string
+}
+
+// AppLog represents a single application-level log.
+type AppLog struct {
+ Time time.Time
+ Level int
+ Message string
+}
+
+// Record contains all the information for a single web request.
+type Record struct {
+ AppID string
+ ModuleID string
+ VersionID string
+ RequestID []byte
+ IP string
+ Nickname string
+ AppEngineRelease string
+
+ // The time when this request started.
+ StartTime time.Time
+
+ // The time when this request finished.
+ EndTime time.Time
+
+ // Opaque cursor into the result stream.
+ Offset []byte
+
+ // The time required to process the request.
+ Latency time.Duration
+ MCycles int64
+ Method string
+ Resource string
+ HTTPVersion string
+ Status int32
+
+ // The size of the request sent back to the client, in bytes.
+ ResponseSize int64
+ Referrer string
+ UserAgent string
+ URLMapEntry string
+ Combined string
+ Host string
+
+ // The estimated cost of this request, in dollars.
+ Cost float64
+ TaskQueueName string
+ TaskName string
+ WasLoadingRequest bool
+ PendingTime time.Duration
+ Finished bool
+ AppLogs []AppLog
+
+ // Mostly-unique identifier for the instance that handled the request if available.
+ InstanceID string
+}
+
+// Result represents the result of a query.
+type Result struct {
+ logs []*Record
+ context context.Context
+ request *pb.LogReadRequest
+ resultsSeen bool
+ err error
+}
+
+// Next returns the next log record,
+func (qr *Result) Next() (*Record, error) {
+ if qr.err != nil {
+ return nil, qr.err
+ }
+ if len(qr.logs) > 0 {
+ lr := qr.logs[0]
+ qr.logs = qr.logs[1:]
+ return lr, nil
+ }
+
+ if qr.request.Offset == nil && qr.resultsSeen {
+ return nil, Done
+ }
+
+ if err := qr.run(); err != nil {
+ // Errors here may be retried, so don't store the error.
+ return nil, err
+ }
+
+ return qr.Next()
+}
+
+// Done is returned when a query iteration has completed.
+var Done = errors.New("log: query has no more results")
+
+// protoToAppLogs takes as input an array of pointers to LogLines, the internal
+// Protocol Buffer representation of a single application-level log,
+// and converts it to an array of AppLogs, the external representation
+// of an application-level log.
+func protoToAppLogs(logLines []*pb.LogLine) []AppLog {
+ appLogs := make([]AppLog, len(logLines))
+
+ for i, line := range logLines {
+ appLogs[i] = AppLog{
+ Time: time.Unix(0, *line.Time*1e3),
+ Level: int(*line.Level),
+ Message: *line.LogMessage,
+ }
+ }
+
+ return appLogs
+}
+
+// protoToRecord converts a RequestLog, the internal Protocol Buffer
+// representation of a single request-level log, to a Record, its
+// corresponding external representation.
+func protoToRecord(rl *pb.RequestLog) *Record {
+ offset, err := proto.Marshal(rl.Offset)
+ if err != nil {
+ offset = nil
+ }
+ return &Record{
+ AppID: *rl.AppId,
+ ModuleID: rl.GetModuleId(),
+ VersionID: *rl.VersionId,
+ RequestID: rl.RequestId,
+ Offset: offset,
+ IP: *rl.Ip,
+ Nickname: rl.GetNickname(),
+ AppEngineRelease: string(rl.GetAppEngineRelease()),
+ StartTime: time.Unix(0, *rl.StartTime*1e3),
+ EndTime: time.Unix(0, *rl.EndTime*1e3),
+ Latency: time.Duration(*rl.Latency) * time.Microsecond,
+ MCycles: *rl.Mcycles,
+ Method: *rl.Method,
+ Resource: *rl.Resource,
+ HTTPVersion: *rl.HttpVersion,
+ Status: *rl.Status,
+ ResponseSize: *rl.ResponseSize,
+ Referrer: rl.GetReferrer(),
+ UserAgent: rl.GetUserAgent(),
+ URLMapEntry: *rl.UrlMapEntry,
+ Combined: *rl.Combined,
+ Host: rl.GetHost(),
+ Cost: rl.GetCost(),
+ TaskQueueName: rl.GetTaskQueueName(),
+ TaskName: rl.GetTaskName(),
+ WasLoadingRequest: rl.GetWasLoadingRequest(),
+ PendingTime: time.Duration(rl.GetPendingTime()) * time.Microsecond,
+ Finished: rl.GetFinished(),
+ AppLogs: protoToAppLogs(rl.Line),
+ InstanceID: string(rl.GetCloneKey()),
+ }
+}
+
+// Run starts a query for log records, which contain request and application
+// level log information.
+func (params *Query) Run(c context.Context) *Result {
+ req, err := makeRequest(params, internal.FullyQualifiedAppID(c), appengine.VersionID(c))
+ return &Result{
+ context: c,
+ request: req,
+ err: err,
+ }
+}
+
+func makeRequest(params *Query, appID, versionID string) (*pb.LogReadRequest, error) {
+ req := &pb.LogReadRequest{}
+ req.AppId = &appID
+ if !params.StartTime.IsZero() {
+ req.StartTime = proto.Int64(params.StartTime.UnixNano() / 1e3)
+ }
+ if !params.EndTime.IsZero() {
+ req.EndTime = proto.Int64(params.EndTime.UnixNano() / 1e3)
+ }
+ if len(params.Offset) > 0 {
+ var offset pb.LogOffset
+ if err := proto.Unmarshal(params.Offset, &offset); err != nil {
+ return nil, fmt.Errorf("bad Offset: %v", err)
+ }
+ req.Offset = &offset
+ }
+ if params.Incomplete {
+ req.IncludeIncomplete = &params.Incomplete
+ }
+ if params.AppLogs {
+ req.IncludeAppLogs = &params.AppLogs
+ }
+ if params.ApplyMinLevel {
+ req.MinimumLogLevel = proto.Int32(int32(params.MinLevel))
+ }
+ if params.Versions == nil {
+ // If no versions were specified, default to the default module at
+ // the major version being used by this module.
+ if i := strings.Index(versionID, "."); i >= 0 {
+ versionID = versionID[:i]
+ }
+ req.VersionId = []string{versionID}
+ } else {
+ req.ModuleVersion = make([]*pb.LogModuleVersion, 0, len(params.Versions))
+ for _, v := range params.Versions {
+ var m *string
+ if i := strings.Index(v, ":"); i >= 0 {
+ m, v = proto.String(v[:i]), v[i+1:]
+ }
+ req.ModuleVersion = append(req.ModuleVersion, &pb.LogModuleVersion{
+ ModuleId: m,
+ VersionId: proto.String(v),
+ })
+ }
+ }
+ if params.RequestIDs != nil {
+ ids := make([][]byte, len(params.RequestIDs))
+ for i, v := range params.RequestIDs {
+ ids[i] = []byte(v)
+ }
+ req.RequestId = ids
+ }
+
+ return req, nil
+}
+
+// run takes the query Result produced by a call to Run and updates it with
+// more Records. The updated Result contains a new set of logs as well as an
+// offset to where more logs can be found. We also convert the items in the
+// response from their internal representations to external versions of the
+// same structs.
+func (r *Result) run() error {
+ res := &pb.LogReadResponse{}
+ if err := internal.Call(r.context, "logservice", "Read", r.request, res); err != nil {
+ return err
+ }
+
+ r.logs = make([]*Record, len(res.Log))
+ r.request.Offset = res.Offset
+ r.resultsSeen = true
+
+ for i, log := range res.Log {
+ r.logs[i] = protoToRecord(log)
+ }
+
+ return nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("logservice", pb.LogServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/log/log_test.go b/vendor/google.golang.org/appengine/log/log_test.go
new file mode 100644
index 0000000..726468e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/log/log_test.go
@@ -0,0 +1,112 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package log
+
+import (
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ pb "google.golang.org/appengine/internal/log"
+)
+
+func TestQueryToRequest(t *testing.T) {
+ testCases := []struct {
+ desc string
+ query *Query
+ want *pb.LogReadRequest
+ }{
+ {
+ desc: "Empty",
+ query: &Query{},
+ want: &pb.LogReadRequest{
+ AppId: proto.String("s~fake"),
+ VersionId: []string{"v12"},
+ },
+ },
+ {
+ desc: "Versions",
+ query: &Query{
+ Versions: []string{"alpha", "backend:beta"},
+ },
+ want: &pb.LogReadRequest{
+ AppId: proto.String("s~fake"),
+ ModuleVersion: []*pb.LogModuleVersion{
+ {
+ VersionId: proto.String("alpha"),
+ }, {
+ ModuleId: proto.String("backend"),
+ VersionId: proto.String("beta"),
+ },
+ },
+ },
+ },
+ }
+
+ for _, tt := range testCases {
+ req, err := makeRequest(tt.query, "s~fake", "v12")
+
+ if err != nil {
+ t.Errorf("%s: got err %v, want nil", tt.desc, err)
+ continue
+ }
+ if !proto.Equal(req, tt.want) {
+ t.Errorf("%s request:\ngot %v\nwant %v", tt.desc, req, tt.want)
+ }
+ }
+}
+
+func TestProtoToRecord(t *testing.T) {
+ // We deliberately leave ModuleId and other optional fields unset.
+ p := &pb.RequestLog{
+ AppId: proto.String("s~fake"),
+ VersionId: proto.String("1"),
+ RequestId: []byte("deadbeef"),
+ Ip: proto.String("127.0.0.1"),
+ StartTime: proto.Int64(431044244000000),
+ EndTime: proto.Int64(431044724000000),
+ Latency: proto.Int64(480000000),
+ Mcycles: proto.Int64(7),
+ Method: proto.String("GET"),
+ Resource: proto.String("/app"),
+ HttpVersion: proto.String("1.1"),
+ Status: proto.Int32(418),
+ ResponseSize: proto.Int64(1337),
+ UrlMapEntry: proto.String("_go_app"),
+ Combined: proto.String("apache log"),
+ }
+ // Sanity check that all required fields are set.
+ if _, err := proto.Marshal(p); err != nil {
+ t.Fatalf("proto.Marshal: %v", err)
+ }
+ want := &Record{
+ AppID: "s~fake",
+ ModuleID: "default",
+ VersionID: "1",
+ RequestID: []byte("deadbeef"),
+ IP: "127.0.0.1",
+ StartTime: time.Date(1983, 8, 29, 22, 30, 44, 0, time.UTC),
+ EndTime: time.Date(1983, 8, 29, 22, 38, 44, 0, time.UTC),
+ Latency: 8 * time.Minute,
+ MCycles: 7,
+ Method: "GET",
+ Resource: "/app",
+ HTTPVersion: "1.1",
+ Status: 418,
+ ResponseSize: 1337,
+ URLMapEntry: "_go_app",
+ Combined: "apache log",
+ Finished: true,
+ AppLogs: []AppLog{},
+ }
+ got := protoToRecord(p)
+ // Coerce locations to UTC since otherwise they will be in local.
+ got.StartTime, got.EndTime = got.StartTime.UTC(), got.EndTime.UTC()
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("protoToRecord:\ngot: %v\nwant: %v", got, want)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/mail/mail.go b/vendor/google.golang.org/appengine/mail/mail.go
new file mode 100644
index 0000000..f7955aa
--- /dev/null
+++ b/vendor/google.golang.org/appengine/mail/mail.go
@@ -0,0 +1,123 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package mail provides the means of sending email from an
+App Engine application.
+
+Example:
+ msg := &mail.Message{
+ Sender: "romeo@montague.com",
+ To: []string{"Juliet <juliet@capulet.org>"},
+ Subject: "See you tonight",
+ Body: "Don't forget our plans. Hark, 'til later.",
+ }
+ if err := mail.Send(c, msg); err != nil {
+ log.Errorf(c, "Alas, my user, the email failed to sendeth: %v", err)
+ }
+*/
+package mail // import "google.golang.org/appengine/mail"
+
+import (
+ "net/mail"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ bpb "google.golang.org/appengine/internal/base"
+ pb "google.golang.org/appengine/internal/mail"
+)
+
+// A Message represents an email message.
+// Addresses may be of any form permitted by RFC 822.
+type Message struct {
+ // Sender must be set, and must be either an application admin
+ // or the currently signed-in user.
+ Sender string
+ ReplyTo string // may be empty
+
+ // At least one of these slices must have a non-zero length,
+ // except when calling SendToAdmins.
+ To, Cc, Bcc []string
+
+ Subject string
+
+ // At least one of Body or HTMLBody must be non-empty.
+ Body string
+ HTMLBody string
+
+ Attachments []Attachment
+
+ // Extra mail headers.
+ // See https://cloud.google.com/appengine/docs/go/mail/
+ // for permissible headers.
+ Headers mail.Header
+}
+
+// An Attachment represents an email attachment.
+type Attachment struct {
+ // Name must be set to a valid file name.
+ Name string
+ Data []byte
+ ContentID string
+}
+
+// Send sends an email message.
+func Send(c context.Context, msg *Message) error {
+ return send(c, "Send", msg)
+}
+
+// SendToAdmins sends an email message to the application's administrators.
+func SendToAdmins(c context.Context, msg *Message) error {
+ return send(c, "SendToAdmins", msg)
+}
+
+func send(c context.Context, method string, msg *Message) error {
+ req := &pb.MailMessage{
+ Sender: &msg.Sender,
+ To: msg.To,
+ Cc: msg.Cc,
+ Bcc: msg.Bcc,
+ Subject: &msg.Subject,
+ }
+ if msg.ReplyTo != "" {
+ req.ReplyTo = &msg.ReplyTo
+ }
+ if msg.Body != "" {
+ req.TextBody = &msg.Body
+ }
+ if msg.HTMLBody != "" {
+ req.HtmlBody = &msg.HTMLBody
+ }
+ if len(msg.Attachments) > 0 {
+ req.Attachment = make([]*pb.MailAttachment, len(msg.Attachments))
+ for i, att := range msg.Attachments {
+ req.Attachment[i] = &pb.MailAttachment{
+ FileName: proto.String(att.Name),
+ Data: att.Data,
+ }
+ if att.ContentID != "" {
+ req.Attachment[i].ContentID = proto.String(att.ContentID)
+ }
+ }
+ }
+ for key, vs := range msg.Headers {
+ for _, v := range vs {
+ req.Header = append(req.Header, &pb.MailHeader{
+ Name: proto.String(key),
+ Value: proto.String(v),
+ })
+ }
+ }
+ res := &bpb.VoidProto{}
+ if err := internal.Call(c, "mail", method, req, res); err != nil {
+ return err
+ }
+ return nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("mail", pb.MailServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/mail/mail_test.go b/vendor/google.golang.org/appengine/mail/mail_test.go
new file mode 100644
index 0000000..7502c59
--- /dev/null
+++ b/vendor/google.golang.org/appengine/mail/mail_test.go
@@ -0,0 +1,65 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package mail
+
+import (
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine/internal/aetesting"
+ basepb "google.golang.org/appengine/internal/base"
+ pb "google.golang.org/appengine/internal/mail"
+)
+
+func TestMessageConstruction(t *testing.T) {
+ var got *pb.MailMessage
+ c := aetesting.FakeSingleContext(t, "mail", "Send", func(in *pb.MailMessage, out *basepb.VoidProto) error {
+ got = in
+ return nil
+ })
+
+ msg := &Message{
+ Sender: "dsymonds@example.com",
+ To: []string{"nigeltao@example.com"},
+ Body: "Hey, lunch time?",
+ Attachments: []Attachment{
+ // Regression test for a prod bug. The address of a range variable was used when
+ // constructing the outgoing proto, so multiple attachments used the same name.
+ {
+ Name: "att1.txt",
+ Data: []byte("data1"),
+ ContentID: "<att1>",
+ },
+ {
+ Name: "att2.txt",
+ Data: []byte("data2"),
+ },
+ },
+ }
+ if err := Send(c, msg); err != nil {
+ t.Fatalf("Send: %v", err)
+ }
+ want := &pb.MailMessage{
+ Sender: proto.String("dsymonds@example.com"),
+ To: []string{"nigeltao@example.com"},
+ Subject: proto.String(""),
+ TextBody: proto.String("Hey, lunch time?"),
+ Attachment: []*pb.MailAttachment{
+ {
+ FileName: proto.String("att1.txt"),
+ Data: []byte("data1"),
+ ContentID: proto.String("<att1>"),
+ },
+ {
+ FileName: proto.String("att2.txt"),
+ Data: []byte("data2"),
+ },
+ },
+ }
+ if !proto.Equal(got, want) {
+ t.Errorf("Bad proto for %+v\n got %v\nwant %v", msg, got, want)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/memcache/memcache.go b/vendor/google.golang.org/appengine/memcache/memcache.go
new file mode 100644
index 0000000..d8eed4b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/memcache/memcache.go
@@ -0,0 +1,526 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package memcache provides a client for App Engine's distributed in-memory
+// key-value store for small chunks of arbitrary data.
+//
+// The fundamental operations get and set items, keyed by a string.
+//
+// item0, err := memcache.Get(c, "key")
+// if err != nil && err != memcache.ErrCacheMiss {
+// return err
+// }
+// if err == nil {
+// fmt.Fprintf(w, "memcache hit: Key=%q Val=[% x]\n", item0.Key, item0.Value)
+// } else {
+// fmt.Fprintf(w, "memcache miss\n")
+// }
+//
+// and
+//
+// item1 := &memcache.Item{
+// Key: "foo",
+// Value: []byte("bar"),
+// }
+// if err := memcache.Set(c, item1); err != nil {
+// return err
+// }
+package memcache // import "google.golang.org/appengine/memcache"
+
+import (
+ "bytes"
+ "encoding/gob"
+ "encoding/json"
+ "errors"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/memcache"
+)
+
+var (
+ // ErrCacheMiss means that an operation failed
+ // because the item wasn't present.
+ ErrCacheMiss = errors.New("memcache: cache miss")
+ // ErrCASConflict means that a CompareAndSwap call failed due to the
+ // cached value being modified between the Get and the CompareAndSwap.
+ // If the cached value was simply evicted rather than replaced,
+ // ErrNotStored will be returned instead.
+ ErrCASConflict = errors.New("memcache: compare-and-swap conflict")
+ // ErrNoStats means that no statistics were available.
+ ErrNoStats = errors.New("memcache: no statistics available")
+ // ErrNotStored means that a conditional write operation (i.e. Add or
+ // CompareAndSwap) failed because the condition was not satisfied.
+ ErrNotStored = errors.New("memcache: item not stored")
+ // ErrServerError means that a server error occurred.
+ ErrServerError = errors.New("memcache: server error")
+)
+
+// Item is the unit of memcache gets and sets.
+type Item struct {
+ // Key is the Item's key (250 bytes maximum).
+ Key string
+ // Value is the Item's value.
+ Value []byte
+ // Object is the Item's value for use with a Codec.
+ Object interface{}
+ // Flags are server-opaque flags whose semantics are entirely up to the
+ // App Engine app.
+ Flags uint32
+ // Expiration is the maximum duration that the item will stay
+ // in the cache.
+ // The zero value means the Item has no expiration time.
+ // Subsecond precision is ignored.
+ // This is not set when getting items.
+ Expiration time.Duration
+ // casID is a client-opaque value used for compare-and-swap operations.
+ // Zero means that compare-and-swap is not used.
+ casID uint64
+}
+
+const (
+ secondsIn30Years = 60 * 60 * 24 * 365 * 30 // from memcache server code
+ thirtyYears = time.Duration(secondsIn30Years) * time.Second
+)
+
+// protoToItem converts a protocol buffer item to a Go struct.
+func protoToItem(p *pb.MemcacheGetResponse_Item) *Item {
+ return &Item{
+ Key: string(p.Key),
+ Value: p.Value,
+ Flags: p.GetFlags(),
+ casID: p.GetCasId(),
+ }
+}
+
+// If err is an appengine.MultiError, return its first element. Otherwise, return err.
+func singleError(err error) error {
+ if me, ok := err.(appengine.MultiError); ok {
+ return me[0]
+ }
+ return err
+}
+
+// Get gets the item for the given key. ErrCacheMiss is returned for a memcache
+// cache miss. The key must be at most 250 bytes in length.
+func Get(c context.Context, key string) (*Item, error) {
+ m, err := GetMulti(c, []string{key})
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := m[key]; !ok {
+ return nil, ErrCacheMiss
+ }
+ return m[key], nil
+}
+
+// GetMulti is a batch version of Get. The returned map from keys to items may
+// have fewer elements than the input slice, due to memcache cache misses.
+// Each key must be at most 250 bytes in length.
+func GetMulti(c context.Context, key []string) (map[string]*Item, error) {
+ if len(key) == 0 {
+ return nil, nil
+ }
+ keyAsBytes := make([][]byte, len(key))
+ for i, k := range key {
+ keyAsBytes[i] = []byte(k)
+ }
+ req := &pb.MemcacheGetRequest{
+ Key: keyAsBytes,
+ ForCas: proto.Bool(true),
+ }
+ res := &pb.MemcacheGetResponse{}
+ if err := internal.Call(c, "memcache", "Get", req, res); err != nil {
+ return nil, err
+ }
+ m := make(map[string]*Item, len(res.Item))
+ for _, p := range res.Item {
+ t := protoToItem(p)
+ m[t.Key] = t
+ }
+ return m, nil
+}
+
+// Delete deletes the item for the given key.
+// ErrCacheMiss is returned if the specified item can not be found.
+// The key must be at most 250 bytes in length.
+func Delete(c context.Context, key string) error {
+ return singleError(DeleteMulti(c, []string{key}))
+}
+
+// DeleteMulti is a batch version of Delete.
+// If any keys cannot be found, an appengine.MultiError is returned.
+// Each key must be at most 250 bytes in length.
+func DeleteMulti(c context.Context, key []string) error {
+ if len(key) == 0 {
+ return nil
+ }
+ req := &pb.MemcacheDeleteRequest{
+ Item: make([]*pb.MemcacheDeleteRequest_Item, len(key)),
+ }
+ for i, k := range key {
+ req.Item[i] = &pb.MemcacheDeleteRequest_Item{Key: []byte(k)}
+ }
+ res := &pb.MemcacheDeleteResponse{}
+ if err := internal.Call(c, "memcache", "Delete", req, res); err != nil {
+ return err
+ }
+ if len(res.DeleteStatus) != len(key) {
+ return ErrServerError
+ }
+ me, any := make(appengine.MultiError, len(key)), false
+ for i, s := range res.DeleteStatus {
+ switch s {
+ case pb.MemcacheDeleteResponse_DELETED:
+ // OK
+ case pb.MemcacheDeleteResponse_NOT_FOUND:
+ me[i] = ErrCacheMiss
+ any = true
+ default:
+ me[i] = ErrServerError
+ any = true
+ }
+ }
+ if any {
+ return me
+ }
+ return nil
+}
+
+// Increment atomically increments the decimal value in the given key
+// by delta and returns the new value. The value must fit in a uint64.
+// Overflow wraps around, and underflow is capped to zero. The
+// provided delta may be negative. If the key doesn't exist in
+// memcache, the provided initial value is used to atomically
+// populate it before the delta is applied.
+// The key must be at most 250 bytes in length.
+func Increment(c context.Context, key string, delta int64, initialValue uint64) (newValue uint64, err error) {
+ return incr(c, key, delta, &initialValue)
+}
+
+// IncrementExisting works like Increment but assumes that the key
+// already exists in memcache and doesn't take an initial value.
+// IncrementExisting can save work if calculating the initial value is
+// expensive.
+// An error is returned if the specified item can not be found.
+func IncrementExisting(c context.Context, key string, delta int64) (newValue uint64, err error) {
+ return incr(c, key, delta, nil)
+}
+
+func incr(c context.Context, key string, delta int64, initialValue *uint64) (newValue uint64, err error) {
+ req := &pb.MemcacheIncrementRequest{
+ Key: []byte(key),
+ InitialValue: initialValue,
+ }
+ if delta >= 0 {
+ req.Delta = proto.Uint64(uint64(delta))
+ } else {
+ req.Delta = proto.Uint64(uint64(-delta))
+ req.Direction = pb.MemcacheIncrementRequest_DECREMENT.Enum()
+ }
+ res := &pb.MemcacheIncrementResponse{}
+ err = internal.Call(c, "memcache", "Increment", req, res)
+ if err != nil {
+ return
+ }
+ if res.NewValue == nil {
+ return 0, ErrCacheMiss
+ }
+ return *res.NewValue, nil
+}
+
+// set sets the given items using the given conflict resolution policy.
+// appengine.MultiError may be returned.
+func set(c context.Context, item []*Item, value [][]byte, policy pb.MemcacheSetRequest_SetPolicy) error {
+ if len(item) == 0 {
+ return nil
+ }
+ req := &pb.MemcacheSetRequest{
+ Item: make([]*pb.MemcacheSetRequest_Item, len(item)),
+ }
+ for i, t := range item {
+ p := &pb.MemcacheSetRequest_Item{
+ Key: []byte(t.Key),
+ }
+ if value == nil {
+ p.Value = t.Value
+ } else {
+ p.Value = value[i]
+ }
+ if t.Flags != 0 {
+ p.Flags = proto.Uint32(t.Flags)
+ }
+ if t.Expiration != 0 {
+ // In the .proto file, MemcacheSetRequest_Item uses a fixed32 (i.e. unsigned)
+ // for expiration time, while MemcacheGetRequest_Item uses int32 (i.e. signed).
+ // Throughout this .go file, we use int32.
+ // Also, in the proto, the expiration value is either a duration (in seconds)
+ // or an absolute Unix timestamp (in seconds), depending on whether the
+ // value is less than or greater than or equal to 30 years, respectively.
+ if t.Expiration < time.Second {
+ // Because an Expiration of 0 means no expiration, we take
+ // care here to translate an item with an expiration
+ // Duration between 0-1 seconds as immediately expiring
+ // (saying it expired a few seconds ago), rather than
+ // rounding it down to 0 and making it live forever.
+ p.ExpirationTime = proto.Uint32(uint32(time.Now().Unix()) - 5)
+ } else if t.Expiration >= thirtyYears {
+ p.ExpirationTime = proto.Uint32(uint32(time.Now().Unix()) + uint32(t.Expiration/time.Second))
+ } else {
+ p.ExpirationTime = proto.Uint32(uint32(t.Expiration / time.Second))
+ }
+ }
+ if t.casID != 0 {
+ p.CasId = proto.Uint64(t.casID)
+ p.ForCas = proto.Bool(true)
+ }
+ p.SetPolicy = policy.Enum()
+ req.Item[i] = p
+ }
+ res := &pb.MemcacheSetResponse{}
+ if err := internal.Call(c, "memcache", "Set", req, res); err != nil {
+ return err
+ }
+ if len(res.SetStatus) != len(item) {
+ return ErrServerError
+ }
+ me, any := make(appengine.MultiError, len(item)), false
+ for i, st := range res.SetStatus {
+ var err error
+ switch st {
+ case pb.MemcacheSetResponse_STORED:
+ // OK
+ case pb.MemcacheSetResponse_NOT_STORED:
+ err = ErrNotStored
+ case pb.MemcacheSetResponse_EXISTS:
+ err = ErrCASConflict
+ default:
+ err = ErrServerError
+ }
+ if err != nil {
+ me[i] = err
+ any = true
+ }
+ }
+ if any {
+ return me
+ }
+ return nil
+}
+
+// Set writes the given item, unconditionally.
+func Set(c context.Context, item *Item) error {
+ return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_SET))
+}
+
+// SetMulti is a batch version of Set.
+// appengine.MultiError may be returned.
+func SetMulti(c context.Context, item []*Item) error {
+ return set(c, item, nil, pb.MemcacheSetRequest_SET)
+}
+
+// Add writes the given item, if no value already exists for its key.
+// ErrNotStored is returned if that condition is not met.
+func Add(c context.Context, item *Item) error {
+ return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_ADD))
+}
+
+// AddMulti is a batch version of Add.
+// appengine.MultiError may be returned.
+func AddMulti(c context.Context, item []*Item) error {
+ return set(c, item, nil, pb.MemcacheSetRequest_ADD)
+}
+
+// CompareAndSwap writes the given item that was previously returned by Get,
+// if the value was neither modified or evicted between the Get and the
+// CompareAndSwap calls. The item's Key should not change between calls but
+// all other item fields may differ.
+// ErrCASConflict is returned if the value was modified in between the calls.
+// ErrNotStored is returned if the value was evicted in between the calls.
+func CompareAndSwap(c context.Context, item *Item) error {
+ return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_CAS))
+}
+
+// CompareAndSwapMulti is a batch version of CompareAndSwap.
+// appengine.MultiError may be returned.
+func CompareAndSwapMulti(c context.Context, item []*Item) error {
+ return set(c, item, nil, pb.MemcacheSetRequest_CAS)
+}
+
+// Codec represents a symmetric pair of functions that implement a codec.
+// Items stored into or retrieved from memcache using a Codec have their
+// values marshaled or unmarshaled.
+//
+// All the methods provided for Codec behave analogously to the package level
+// function with same name.
+type Codec struct {
+ Marshal func(interface{}) ([]byte, error)
+ Unmarshal func([]byte, interface{}) error
+}
+
+// Get gets the item for the given key and decodes the obtained value into v.
+// ErrCacheMiss is returned for a memcache cache miss.
+// The key must be at most 250 bytes in length.
+func (cd Codec) Get(c context.Context, key string, v interface{}) (*Item, error) {
+ i, err := Get(c, key)
+ if err != nil {
+ return nil, err
+ }
+ if err := cd.Unmarshal(i.Value, v); err != nil {
+ return nil, err
+ }
+ return i, nil
+}
+
+func (cd Codec) set(c context.Context, items []*Item, policy pb.MemcacheSetRequest_SetPolicy) error {
+ var vs [][]byte
+ var me appengine.MultiError
+ for i, item := range items {
+ v, err := cd.Marshal(item.Object)
+ if err != nil {
+ if me == nil {
+ me = make(appengine.MultiError, len(items))
+ }
+ me[i] = err
+ continue
+ }
+ if me == nil {
+ vs = append(vs, v)
+ }
+ }
+ if me != nil {
+ return me
+ }
+
+ return set(c, items, vs, policy)
+}
+
+// Set writes the given item, unconditionally.
+func (cd Codec) Set(c context.Context, item *Item) error {
+ return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_SET))
+}
+
+// SetMulti is a batch version of Set.
+// appengine.MultiError may be returned.
+func (cd Codec) SetMulti(c context.Context, items []*Item) error {
+ return cd.set(c, items, pb.MemcacheSetRequest_SET)
+}
+
+// Add writes the given item, if no value already exists for its key.
+// ErrNotStored is returned if that condition is not met.
+func (cd Codec) Add(c context.Context, item *Item) error {
+ return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_ADD))
+}
+
+// AddMulti is a batch version of Add.
+// appengine.MultiError may be returned.
+func (cd Codec) AddMulti(c context.Context, items []*Item) error {
+ return cd.set(c, items, pb.MemcacheSetRequest_ADD)
+}
+
+// CompareAndSwap writes the given item that was previously returned by Get,
+// if the value was neither modified or evicted between the Get and the
+// CompareAndSwap calls. The item's Key should not change between calls but
+// all other item fields may differ.
+// ErrCASConflict is returned if the value was modified in between the calls.
+// ErrNotStored is returned if the value was evicted in between the calls.
+func (cd Codec) CompareAndSwap(c context.Context, item *Item) error {
+ return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_CAS))
+}
+
+// CompareAndSwapMulti is a batch version of CompareAndSwap.
+// appengine.MultiError may be returned.
+func (cd Codec) CompareAndSwapMulti(c context.Context, items []*Item) error {
+ return cd.set(c, items, pb.MemcacheSetRequest_CAS)
+}
+
+var (
+ // Gob is a Codec that uses the gob package.
+ Gob = Codec{gobMarshal, gobUnmarshal}
+ // JSON is a Codec that uses the json package.
+ JSON = Codec{json.Marshal, json.Unmarshal}
+)
+
+func gobMarshal(v interface{}) ([]byte, error) {
+ var buf bytes.Buffer
+ if err := gob.NewEncoder(&buf).Encode(v); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func gobUnmarshal(data []byte, v interface{}) error {
+ return gob.NewDecoder(bytes.NewBuffer(data)).Decode(v)
+}
+
+// Statistics represents a set of statistics about the memcache cache.
+// This may include items that have expired but have not yet been removed from the cache.
+type Statistics struct {
+ Hits uint64 // Counter of cache hits
+ Misses uint64 // Counter of cache misses
+ ByteHits uint64 // Counter of bytes transferred for gets
+
+ Items uint64 // Items currently in the cache
+ Bytes uint64 // Size of all items currently in the cache
+
+ Oldest int64 // Age of access of the oldest item, in seconds
+}
+
+// Stats retrieves the current memcache statistics.
+func Stats(c context.Context) (*Statistics, error) {
+ req := &pb.MemcacheStatsRequest{}
+ res := &pb.MemcacheStatsResponse{}
+ if err := internal.Call(c, "memcache", "Stats", req, res); err != nil {
+ return nil, err
+ }
+ if res.Stats == nil {
+ return nil, ErrNoStats
+ }
+ return &Statistics{
+ Hits: *res.Stats.Hits,
+ Misses: *res.Stats.Misses,
+ ByteHits: *res.Stats.ByteHits,
+ Items: *res.Stats.Items,
+ Bytes: *res.Stats.Bytes,
+ Oldest: int64(*res.Stats.OldestItemAge),
+ }, nil
+}
+
+// Flush flushes all items from memcache.
+func Flush(c context.Context) error {
+ req := &pb.MemcacheFlushRequest{}
+ res := &pb.MemcacheFlushResponse{}
+ return internal.Call(c, "memcache", "FlushAll", req, res)
+}
+
+func namespaceMod(m proto.Message, namespace string) {
+ switch m := m.(type) {
+ case *pb.MemcacheDeleteRequest:
+ if m.NameSpace == nil {
+ m.NameSpace = &namespace
+ }
+ case *pb.MemcacheGetRequest:
+ if m.NameSpace == nil {
+ m.NameSpace = &namespace
+ }
+ case *pb.MemcacheIncrementRequest:
+ if m.NameSpace == nil {
+ m.NameSpace = &namespace
+ }
+ case *pb.MemcacheSetRequest:
+ if m.NameSpace == nil {
+ m.NameSpace = &namespace
+ }
+ // MemcacheFlushRequest, MemcacheStatsRequest do not apply namespace.
+ }
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("memcache", pb.MemcacheServiceError_ErrorCode_name)
+ internal.NamespaceMods["memcache"] = namespaceMod
+}
diff --git a/vendor/google.golang.org/appengine/memcache/memcache_test.go b/vendor/google.golang.org/appengine/memcache/memcache_test.go
new file mode 100644
index 0000000..1dc7da4
--- /dev/null
+++ b/vendor/google.golang.org/appengine/memcache/memcache_test.go
@@ -0,0 +1,263 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package memcache
+
+import (
+ "fmt"
+ "testing"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/memcache"
+)
+
+var errRPC = fmt.Errorf("RPC error")
+
+func TestGetRequest(t *testing.T) {
+ serviceCalled := false
+ apiKey := "lyric"
+
+ c := aetesting.FakeSingleContext(t, "memcache", "Get", func(req *pb.MemcacheGetRequest, _ *pb.MemcacheGetResponse) error {
+ // Test request.
+ if n := len(req.Key); n != 1 {
+ t.Errorf("got %d want 1", n)
+ return nil
+ }
+ if k := string(req.Key[0]); k != apiKey {
+ t.Errorf("got %q want %q", k, apiKey)
+ }
+
+ serviceCalled = true
+ return nil
+ })
+
+ // Test the "forward" path from the API call parameters to the
+ // protobuf request object. (The "backward" path from the
+ // protobuf response object to the API call response,
+ // including the error response, are handled in the next few
+ // tests).
+ Get(c, apiKey)
+ if !serviceCalled {
+ t.Error("Service was not called as expected")
+ }
+}
+
+func TestGetResponseHit(t *testing.T) {
+ key := "lyric"
+ value := "Where the buffalo roam"
+
+ c := aetesting.FakeSingleContext(t, "memcache", "Get", func(_ *pb.MemcacheGetRequest, res *pb.MemcacheGetResponse) error {
+ res.Item = []*pb.MemcacheGetResponse_Item{
+ {Key: []byte(key), Value: []byte(value)},
+ }
+ return nil
+ })
+ apiItem, err := Get(c, key)
+ if apiItem == nil || apiItem.Key != key || string(apiItem.Value) != value {
+ t.Errorf("got %q, %q want {%q,%q}, nil", apiItem, err, key, value)
+ }
+}
+
+func TestGetResponseMiss(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Get", func(_ *pb.MemcacheGetRequest, res *pb.MemcacheGetResponse) error {
+ // don't fill in any of the response
+ return nil
+ })
+ _, err := Get(c, "something")
+ if err != ErrCacheMiss {
+ t.Errorf("got %v want ErrCacheMiss", err)
+ }
+}
+
+func TestGetResponseRPCError(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Get", func(_ *pb.MemcacheGetRequest, res *pb.MemcacheGetResponse) error {
+ return errRPC
+ })
+
+ if _, err := Get(c, "something"); err != errRPC {
+ t.Errorf("got %v want errRPC", err)
+ }
+}
+
+func TestAddRequest(t *testing.T) {
+ var apiItem = &Item{
+ Key: "lyric",
+ Value: []byte("Oh, give me a home"),
+ }
+
+ serviceCalled := false
+
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(req *pb.MemcacheSetRequest, _ *pb.MemcacheSetResponse) error {
+ // Test request.
+ pbItem := req.Item[0]
+ if k := string(pbItem.Key); k != apiItem.Key {
+ t.Errorf("got %q want %q", k, apiItem.Key)
+ }
+ if v := string(apiItem.Value); v != string(pbItem.Value) {
+ t.Errorf("got %q want %q", v, string(pbItem.Value))
+ }
+ if p := *pbItem.SetPolicy; p != pb.MemcacheSetRequest_ADD {
+ t.Errorf("got %v want %v", p, pb.MemcacheSetRequest_ADD)
+ }
+
+ serviceCalled = true
+ return nil
+ })
+
+ Add(c, apiItem)
+ if !serviceCalled {
+ t.Error("Service was not called as expected")
+ }
+}
+
+func TestAddResponseStored(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error {
+ res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_STORED}
+ return nil
+ })
+
+ if err := Add(c, &Item{}); err != nil {
+ t.Errorf("got %v want nil", err)
+ }
+}
+
+func TestAddResponseNotStored(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error {
+ res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_NOT_STORED}
+ return nil
+ })
+
+ if err := Add(c, &Item{}); err != ErrNotStored {
+ t.Errorf("got %v want ErrNotStored", err)
+ }
+}
+
+func TestAddResponseError(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error {
+ res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_ERROR}
+ return nil
+ })
+
+ if err := Add(c, &Item{}); err != ErrServerError {
+ t.Errorf("got %v want ErrServerError", err)
+ }
+}
+
+func TestAddResponseRPCError(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error {
+ return errRPC
+ })
+
+ if err := Add(c, &Item{}); err != errRPC {
+ t.Errorf("got %v want errRPC", err)
+ }
+}
+
+func TestSetRequest(t *testing.T) {
+ var apiItem = &Item{
+ Key: "lyric",
+ Value: []byte("Where the buffalo roam"),
+ }
+
+ serviceCalled := false
+
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(req *pb.MemcacheSetRequest, _ *pb.MemcacheSetResponse) error {
+ // Test request.
+ if n := len(req.Item); n != 1 {
+ t.Errorf("got %d want 1", n)
+ return nil
+ }
+ pbItem := req.Item[0]
+ if k := string(pbItem.Key); k != apiItem.Key {
+ t.Errorf("got %q want %q", k, apiItem.Key)
+ }
+ if v := string(pbItem.Value); v != string(apiItem.Value) {
+ t.Errorf("got %q want %q", v, string(apiItem.Value))
+ }
+ if p := *pbItem.SetPolicy; p != pb.MemcacheSetRequest_SET {
+ t.Errorf("got %v want %v", p, pb.MemcacheSetRequest_SET)
+ }
+
+ serviceCalled = true
+ return nil
+ })
+
+ Set(c, apiItem)
+ if !serviceCalled {
+ t.Error("Service was not called as expected")
+ }
+}
+
+func TestSetResponse(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error {
+ res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_STORED}
+ return nil
+ })
+
+ if err := Set(c, &Item{}); err != nil {
+ t.Errorf("got %v want nil", err)
+ }
+}
+
+func TestSetResponseError(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error {
+ res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_ERROR}
+ return nil
+ })
+
+ if err := Set(c, &Item{}); err != ErrServerError {
+ t.Errorf("got %v want ErrServerError", err)
+ }
+}
+
+func TestNamespaceResetting(t *testing.T) {
+ namec := make(chan *string, 1)
+ c0 := aetesting.FakeSingleContext(t, "memcache", "Get", func(req *pb.MemcacheGetRequest, res *pb.MemcacheGetResponse) error {
+ namec <- req.NameSpace
+ return errRPC
+ })
+
+ // Check that wrapping c0 in a namespace twice works correctly.
+ c1, err := appengine.Namespace(c0, "A")
+ if err != nil {
+ t.Fatalf("appengine.Namespace: %v", err)
+ }
+ c2, err := appengine.Namespace(c1, "") // should act as the original context
+ if err != nil {
+ t.Fatalf("appengine.Namespace: %v", err)
+ }
+
+ Get(c0, "key")
+ if ns := <-namec; ns != nil {
+ t.Errorf(`Get with c0: ns = %q, want nil`, *ns)
+ }
+
+ Get(c1, "key")
+ if ns := <-namec; ns == nil {
+ t.Error(`Get with c1: ns = nil, want "A"`)
+ } else if *ns != "A" {
+ t.Errorf(`Get with c1: ns = %q, want "A"`, *ns)
+ }
+
+ Get(c2, "key")
+ if ns := <-namec; ns != nil {
+ t.Errorf(`Get with c2: ns = %q, want nil`, *ns)
+ }
+}
+
+func TestGetMultiEmpty(t *testing.T) {
+ serviceCalled := false
+ c := aetesting.FakeSingleContext(t, "memcache", "Get", func(req *pb.MemcacheGetRequest, _ *pb.MemcacheGetResponse) error {
+ serviceCalled = true
+ return nil
+ })
+
+ // Test that the Memcache service is not called when
+ // GetMulti is passed an empty slice of keys.
+ GetMulti(c, []string{})
+ if serviceCalled {
+ t.Error("Service was called but should not have been")
+ }
+}
diff --git a/vendor/google.golang.org/appengine/module/module.go b/vendor/google.golang.org/appengine/module/module.go
new file mode 100644
index 0000000..88e6629
--- /dev/null
+++ b/vendor/google.golang.org/appengine/module/module.go
@@ -0,0 +1,113 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package module provides functions for interacting with modules.
+
+The appengine package contains functions that report the identity of the app,
+including the module name.
+*/
+package module // import "google.golang.org/appengine/module"
+
+import (
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/modules"
+)
+
+// List returns the names of modules belonging to this application.
+func List(c context.Context) ([]string, error) {
+ req := &pb.GetModulesRequest{}
+ res := &pb.GetModulesResponse{}
+ err := internal.Call(c, "modules", "GetModules", req, res)
+ return res.Module, err
+}
+
+// NumInstances returns the number of instances of the given module/version.
+// If either argument is the empty string it means the default.
+func NumInstances(c context.Context, module, version string) (int, error) {
+ req := &pb.GetNumInstancesRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ if version != "" {
+ req.Version = &version
+ }
+ res := &pb.GetNumInstancesResponse{}
+
+ if err := internal.Call(c, "modules", "GetNumInstances", req, res); err != nil {
+ return 0, err
+ }
+ return int(*res.Instances), nil
+}
+
+// SetNumInstances sets the number of instances of the given module.version to the
+// specified value. If either module or version are the empty string it means the
+// default.
+func SetNumInstances(c context.Context, module, version string, instances int) error {
+ req := &pb.SetNumInstancesRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ if version != "" {
+ req.Version = &version
+ }
+ req.Instances = proto.Int64(int64(instances))
+ res := &pb.SetNumInstancesResponse{}
+ return internal.Call(c, "modules", "SetNumInstances", req, res)
+}
+
+// Versions returns the names of the versions that belong to the specified module.
+// If module is the empty string, it means the default module.
+func Versions(c context.Context, module string) ([]string, error) {
+ req := &pb.GetVersionsRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ res := &pb.GetVersionsResponse{}
+ err := internal.Call(c, "modules", "GetVersions", req, res)
+ return res.GetVersion(), err
+}
+
+// DefaultVersion returns the default version of the specified module.
+// If module is the empty string, it means the default module.
+func DefaultVersion(c context.Context, module string) (string, error) {
+ req := &pb.GetDefaultVersionRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ res := &pb.GetDefaultVersionResponse{}
+ err := internal.Call(c, "modules", "GetDefaultVersion", req, res)
+ return res.GetVersion(), err
+}
+
+// Start starts the specified version of the specified module.
+// If either module or version are the empty string, it means the default.
+func Start(c context.Context, module, version string) error {
+ req := &pb.StartModuleRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ if version != "" {
+ req.Version = &version
+ }
+ res := &pb.StartModuleResponse{}
+ return internal.Call(c, "modules", "StartModule", req, res)
+}
+
+// Stop stops the specified version of the specified module.
+// If either module or version are the empty string, it means the default.
+func Stop(c context.Context, module, version string) error {
+ req := &pb.StopModuleRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ if version != "" {
+ req.Version = &version
+ }
+ res := &pb.StopModuleResponse{}
+ return internal.Call(c, "modules", "StopModule", req, res)
+}
diff --git a/vendor/google.golang.org/appengine/module/module_test.go b/vendor/google.golang.org/appengine/module/module_test.go
new file mode 100644
index 0000000..73e8971
--- /dev/null
+++ b/vendor/google.golang.org/appengine/module/module_test.go
@@ -0,0 +1,124 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package module
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/modules"
+)
+
+const version = "test-version"
+const module = "test-module"
+const instances = 3
+
+func TestList(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "modules", "GetModules", func(req *pb.GetModulesRequest, res *pb.GetModulesResponse) error {
+ res.Module = []string{"default", "mod1"}
+ return nil
+ })
+ got, err := List(c)
+ if err != nil {
+ t.Fatalf("List: %v", err)
+ }
+ want := []string{"default", "mod1"}
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("List = %v, want %v", got, want)
+ }
+}
+
+func TestSetNumInstances(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "modules", "SetNumInstances", func(req *pb.SetNumInstancesRequest, res *pb.SetNumInstancesResponse) error {
+ if *req.Module != module {
+ t.Errorf("Module = %v, want %v", req.Module, module)
+ }
+ if *req.Version != version {
+ t.Errorf("Version = %v, want %v", req.Version, version)
+ }
+ if *req.Instances != instances {
+ t.Errorf("Instances = %v, want %d", req.Instances, instances)
+ }
+ return nil
+ })
+ err := SetNumInstances(c, module, version, instances)
+ if err != nil {
+ t.Fatalf("SetNumInstances: %v", err)
+ }
+}
+
+func TestVersions(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "modules", "GetVersions", func(req *pb.GetVersionsRequest, res *pb.GetVersionsResponse) error {
+ if *req.Module != module {
+ t.Errorf("Module = %v, want %v", req.Module, module)
+ }
+ res.Version = []string{"v1", "v2", "v3"}
+ return nil
+ })
+ got, err := Versions(c, module)
+ if err != nil {
+ t.Fatalf("Versions: %v", err)
+ }
+ want := []string{"v1", "v2", "v3"}
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("Versions = %v, want %v", got, want)
+ }
+}
+
+func TestDefaultVersion(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "modules", "GetDefaultVersion", func(req *pb.GetDefaultVersionRequest, res *pb.GetDefaultVersionResponse) error {
+ if *req.Module != module {
+ t.Errorf("Module = %v, want %v", req.Module, module)
+ }
+ res.Version = proto.String(version)
+ return nil
+ })
+ got, err := DefaultVersion(c, module)
+ if err != nil {
+ t.Fatalf("DefaultVersion: %v", err)
+ }
+ if got != version {
+ t.Errorf("Version = %v, want %v", got, version)
+ }
+}
+
+func TestStart(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "modules", "StartModule", func(req *pb.StartModuleRequest, res *pb.StartModuleResponse) error {
+ if *req.Module != module {
+ t.Errorf("Module = %v, want %v", req.Module, module)
+ }
+ if *req.Version != version {
+ t.Errorf("Version = %v, want %v", req.Version, version)
+ }
+ return nil
+ })
+
+ err := Start(c, module, version)
+ if err != nil {
+ t.Fatalf("Start: %v", err)
+ }
+}
+
+func TestStop(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "modules", "StopModule", func(req *pb.StopModuleRequest, res *pb.StopModuleResponse) error {
+ version := "test-version"
+ module := "test-module"
+ if *req.Module != module {
+ t.Errorf("Module = %v, want %v", req.Module, module)
+ }
+ if *req.Version != version {
+ t.Errorf("Version = %v, want %v", req.Version, version)
+ }
+ return nil
+ })
+
+ err := Stop(c, module, version)
+ if err != nil {
+ t.Fatalf("Stop: %v", err)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go
new file mode 100644
index 0000000..21860ca
--- /dev/null
+++ b/vendor/google.golang.org/appengine/namespace.go
@@ -0,0 +1,25 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import (
+ "fmt"
+ "regexp"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+// Namespace returns a replacement context that operates within the given namespace.
+func Namespace(c context.Context, namespace string) (context.Context, error) {
+ if !validNamespace.MatchString(namespace) {
+ return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace)
+ }
+ return internal.NamespacedContext(c, namespace), nil
+}
+
+// validNamespace matches valid namespace names.
+var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`)
diff --git a/vendor/google.golang.org/appengine/namespace_test.go b/vendor/google.golang.org/appengine/namespace_test.go
new file mode 100644
index 0000000..847f640
--- /dev/null
+++ b/vendor/google.golang.org/appengine/namespace_test.go
@@ -0,0 +1,39 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import (
+ "testing"
+
+ "golang.org/x/net/context"
+)
+
+func TestNamespaceValidity(t *testing.T) {
+ testCases := []struct {
+ namespace string
+ ok bool
+ }{
+ // data from Python's namespace_manager_test.py
+ {"", true},
+ {"__a.namespace.123__", true},
+ {"-_A....NAMESPACE-_", true},
+ {"-", true},
+ {".", true},
+ {".-", true},
+
+ {"?", false},
+ {"+", false},
+ {"!", false},
+ {" ", false},
+ }
+ for _, tc := range testCases {
+ _, err := Namespace(context.Background(), tc.namespace)
+ if err == nil && !tc.ok {
+ t.Errorf("Namespace %q should be rejected, but wasn't", tc.namespace)
+ } else if err != nil && tc.ok {
+ t.Errorf("Namespace %q should be accepted, but wasn't", tc.namespace)
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/remote_api/client.go b/vendor/google.golang.org/appengine/remote_api/client.go
new file mode 100644
index 0000000..dbe219d
--- /dev/null
+++ b/vendor/google.golang.org/appengine/remote_api/client.go
@@ -0,0 +1,174 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package remote_api
+
+// This file provides the client for connecting remotely to a user's production
+// application.
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "math/rand"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/remote_api"
+)
+
+// NewRemoteContext returns a context that gives access to the production
+// APIs for the application at the given host. All communication will be
+// performed over SSL unless the host is localhost.
+func NewRemoteContext(host string, client *http.Client) (context.Context, error) {
+ // Add an appcfg header to outgoing requests.
+ t := client.Transport
+ if t == nil {
+ t = http.DefaultTransport
+ }
+ client.Transport = &headerAddingRoundTripper{t}
+
+ url := url.URL{
+ Scheme: "https",
+ Host: host,
+ Path: "/_ah/remote_api",
+ }
+ if host == "localhost" || strings.HasPrefix(host, "localhost:") {
+ url.Scheme = "http"
+ }
+ u := url.String()
+ appID, err := getAppID(client, u)
+ if err != nil {
+ return nil, fmt.Errorf("unable to contact server: %v", err)
+ }
+ rc := &remoteContext{
+ client: client,
+ url: u,
+ }
+ ctx := internal.WithCallOverride(context.Background(), rc.call)
+ ctx = internal.WithLogOverride(ctx, rc.logf)
+ ctx = internal.WithAppIDOverride(ctx, appID)
+ return ctx, nil
+}
+
+type remoteContext struct {
+ client *http.Client
+ url string
+}
+
+var logLevels = map[int64]string{
+ 0: "DEBUG",
+ 1: "INFO",
+ 2: "WARNING",
+ 3: "ERROR",
+ 4: "CRITICAL",
+}
+
+func (c *remoteContext) logf(level int64, format string, args ...interface{}) {
+ log.Printf(logLevels[level]+": "+format, args...)
+}
+
+func (c *remoteContext) call(ctx context.Context, service, method string, in, out proto.Message) error {
+ req, err := proto.Marshal(in)
+ if err != nil {
+ return fmt.Errorf("error marshalling request: %v", err)
+ }
+
+ remReq := &pb.Request{
+ ServiceName: proto.String(service),
+ Method: proto.String(method),
+ Request: req,
+ // NOTE(djd): RequestId is unused in the server.
+ }
+
+ req, err = proto.Marshal(remReq)
+ if err != nil {
+ return fmt.Errorf("proto.Marshal: %v", err)
+ }
+
+ // TODO(djd): Respect ctx.Deadline()?
+ resp, err := c.client.Post(c.url, "application/octet-stream", bytes.NewReader(req))
+ if err != nil {
+ return fmt.Errorf("error sending request: %v", err)
+ }
+ defer resp.Body.Close()
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("bad response %d; body: %q", resp.StatusCode, body)
+ }
+ if err != nil {
+ return fmt.Errorf("failed reading response: %v", err)
+ }
+ remResp := &pb.Response{}
+ if err := proto.Unmarshal(body, remResp); err != nil {
+ return fmt.Errorf("error unmarshalling response: %v", err)
+ }
+
+ if ae := remResp.GetApplicationError(); ae != nil {
+ return &internal.APIError{
+ Code: ae.GetCode(),
+ Detail: ae.GetDetail(),
+ Service: service,
+ }
+ }
+
+ if remResp.Response == nil {
+ return fmt.Errorf("unexpected response: %s", proto.MarshalTextString(remResp))
+ }
+
+ return proto.Unmarshal(remResp.Response, out)
+}
+
+// This is a forgiving regexp designed to parse the app ID from YAML.
+var appIDRE = regexp.MustCompile(`app_id["']?\s*:\s*['"]?([-a-z0-9.:~]+)`)
+
+func getAppID(client *http.Client, url string) (string, error) {
+ // Generate a pseudo-random token for handshaking.
+ token := strconv.Itoa(rand.New(rand.NewSource(time.Now().UnixNano())).Int())
+
+ resp, err := client.Get(fmt.Sprintf("%s?rtok=%s", url, token))
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if resp.StatusCode != http.StatusOK {
+ return "", fmt.Errorf("bad response %d; body: %q", resp.StatusCode, body)
+ }
+ if err != nil {
+ return "", fmt.Errorf("failed reading response: %v", err)
+ }
+
+ // Check the token is present in response.
+ if !bytes.Contains(body, []byte(token)) {
+ return "", fmt.Errorf("token not found: want %q; body %q", token, body)
+ }
+
+ match := appIDRE.FindSubmatch(body)
+ if match == nil {
+ return "", fmt.Errorf("app ID not found: body %q", body)
+ }
+
+ return string(match[1]), nil
+}
+
+type headerAddingRoundTripper struct {
+ Wrapped http.RoundTripper
+}
+
+func (t *headerAddingRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
+ r.Header.Set("X-Appcfg-Api-Version", "1")
+ return t.Wrapped.RoundTrip(r)
+}
diff --git a/vendor/google.golang.org/appengine/remote_api/client_test.go b/vendor/google.golang.org/appengine/remote_api/client_test.go
new file mode 100644
index 0000000..2e892a0
--- /dev/null
+++ b/vendor/google.golang.org/appengine/remote_api/client_test.go
@@ -0,0 +1,24 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package remote_api
+
+import (
+ "testing"
+)
+
+func TestAppIDRE(t *testing.T) {
+ appID := "s~my-appid-539"
+ tests := []string{
+ "{rtok: 8306111115908860449, app_id: s~my-appid-539}\n",
+ "{rtok: 8306111115908860449, app_id: 's~my-appid-539'}\n",
+ `{rtok: 8306111115908860449, app_id: "s~my-appid-539"}`,
+ `{rtok: 8306111115908860449, "app_id":"s~my-appid-539"}`,
+ }
+ for _, v := range tests {
+ if g := appIDRE.FindStringSubmatch(v); g == nil || g[1] != appID {
+ t.Errorf("appIDRE.FindStringSubmatch(%s) got %q, want %q", v, g, appID)
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/remote_api/remote_api.go b/vendor/google.golang.org/appengine/remote_api/remote_api.go
new file mode 100644
index 0000000..68cd7d9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/remote_api/remote_api.go
@@ -0,0 +1,152 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package remote_api implements the /_ah/remote_api endpoint.
+This endpoint is used by offline tools such as the bulk loader.
+*/
+package remote_api // import "google.golang.org/appengine/remote_api"
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strconv"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/remote_api"
+ "google.golang.org/appengine/log"
+ "google.golang.org/appengine/user"
+)
+
+func init() {
+ http.HandleFunc("/_ah/remote_api", handle)
+}
+
+func handle(w http.ResponseWriter, req *http.Request) {
+ c := appengine.NewContext(req)
+
+ u := user.Current(c)
+ if u == nil {
+ u, _ = user.CurrentOAuth(c,
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/appengine.apis",
+ )
+ }
+
+ if u == nil || !u.Admin {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.WriteHeader(http.StatusUnauthorized)
+ io.WriteString(w, "You must be logged in as an administrator to access this.\n")
+ return
+ }
+ if req.Header.Get("X-Appcfg-Api-Version") == "" {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.WriteHeader(http.StatusForbidden)
+ io.WriteString(w, "This request did not contain a necessary header.\n")
+ return
+ }
+
+ if req.Method != "POST" {
+ // Response must be YAML.
+ rtok := req.FormValue("rtok")
+ if rtok == "" {
+ rtok = "0"
+ }
+ w.Header().Set("Content-Type", "text/yaml; charset=utf-8")
+ fmt.Fprintf(w, `{app_id: %q, rtok: %q}`, internal.FullyQualifiedAppID(c), rtok)
+ return
+ }
+
+ defer req.Body.Close()
+ body, err := ioutil.ReadAll(req.Body)
+ if err != nil {
+ w.WriteHeader(http.StatusBadRequest)
+ log.Errorf(c, "Failed reading body: %v", err)
+ return
+ }
+ remReq := &pb.Request{}
+ if err := proto.Unmarshal(body, remReq); err != nil {
+ w.WriteHeader(http.StatusBadRequest)
+ log.Errorf(c, "Bad body: %v", err)
+ return
+ }
+
+ service, method := *remReq.ServiceName, *remReq.Method
+ if !requestSupported(service, method) {
+ w.WriteHeader(http.StatusBadRequest)
+ log.Errorf(c, "Unsupported RPC /%s.%s", service, method)
+ return
+ }
+
+ rawReq := &rawMessage{remReq.Request}
+ rawRes := &rawMessage{}
+ err = internal.Call(c, service, method, rawReq, rawRes)
+
+ remRes := &pb.Response{}
+ if err == nil {
+ remRes.Response = rawRes.buf
+ } else if ae, ok := err.(*internal.APIError); ok {
+ remRes.ApplicationError = &pb.ApplicationError{
+ Code: &ae.Code,
+ Detail: &ae.Detail,
+ }
+ } else {
+ // This shouldn't normally happen.
+ log.Errorf(c, "appengine/remote_api: Unexpected error of type %T: %v", err, err)
+ remRes.ApplicationError = &pb.ApplicationError{
+ Code: proto.Int32(0),
+ Detail: proto.String(err.Error()),
+ }
+ }
+ out, err := proto.Marshal(remRes)
+ if err != nil {
+ // This should not be possible.
+ w.WriteHeader(500)
+ log.Errorf(c, "proto.Marshal: %v", err)
+ return
+ }
+
+ log.Infof(c, "Spooling %d bytes of response to /%s.%s", len(out), service, method)
+ w.Header().Set("Content-Type", "application/octet-stream")
+ w.Header().Set("Content-Length", strconv.Itoa(len(out)))
+ w.Write(out)
+}
+
+// rawMessage is a protocol buffer type that is already serialised.
+// This allows the remote_api code here to handle messages
+// without having to know the real type.
+type rawMessage struct {
+ buf []byte
+}
+
+func (rm *rawMessage) Marshal() ([]byte, error) {
+ return rm.buf, nil
+}
+
+func (rm *rawMessage) Unmarshal(buf []byte) error {
+ rm.buf = make([]byte, len(buf))
+ copy(rm.buf, buf)
+ return nil
+}
+
+func requestSupported(service, method string) bool {
+ // This list of supported services is taken from SERVICE_PB_MAP in remote_api_services.py
+ switch service {
+ case "app_identity_service", "blobstore", "capability_service", "channel", "datastore_v3",
+ "datastore_v4", "file", "images", "logservice", "mail", "matcher", "memcache", "remote_datastore",
+ "remote_socket", "search", "modules", "system", "taskqueue", "urlfetch", "user", "xmpp":
+ return true
+ }
+ return false
+}
+
+// Methods to satisfy proto.Message.
+func (rm *rawMessage) Reset() { rm.buf = nil }
+func (rm *rawMessage) String() string { return strconv.Quote(string(rm.buf)) }
+func (*rawMessage) ProtoMessage() {}
diff --git a/vendor/google.golang.org/appengine/runtime/runtime.go b/vendor/google.golang.org/appengine/runtime/runtime.go
new file mode 100644
index 0000000..fa6c12b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/runtime/runtime.go
@@ -0,0 +1,148 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package runtime exposes information about the resource usage of the application.
+It also provides a way to run code in a new background context of a module.
+
+This package does not work on App Engine "flexible environment".
+*/
+package runtime // import "google.golang.org/appengine/runtime"
+
+import (
+ "net/http"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/system"
+)
+
+// Statistics represents the system's statistics.
+type Statistics struct {
+ // CPU records the CPU consumed by this instance, in megacycles.
+ CPU struct {
+ Total float64
+ Rate1M float64 // consumption rate over one minute
+ Rate10M float64 // consumption rate over ten minutes
+ }
+ // RAM records the memory used by the instance, in megabytes.
+ RAM struct {
+ Current float64
+ Average1M float64 // average usage over one minute
+ Average10M float64 // average usage over ten minutes
+ }
+}
+
+func Stats(c context.Context) (*Statistics, error) {
+ req := &pb.GetSystemStatsRequest{}
+ res := &pb.GetSystemStatsResponse{}
+ if err := internal.Call(c, "system", "GetSystemStats", req, res); err != nil {
+ return nil, err
+ }
+ s := &Statistics{}
+ if res.Cpu != nil {
+ s.CPU.Total = res.Cpu.GetTotal()
+ s.CPU.Rate1M = res.Cpu.GetRate1M()
+ s.CPU.Rate10M = res.Cpu.GetRate10M()
+ }
+ if res.Memory != nil {
+ s.RAM.Current = res.Memory.GetCurrent()
+ s.RAM.Average1M = res.Memory.GetAverage1M()
+ s.RAM.Average10M = res.Memory.GetAverage10M()
+ }
+ return s, nil
+}
+
+/*
+RunInBackground makes an API call that triggers an /_ah/background request.
+
+There are two independent code paths that need to make contact:
+the RunInBackground code, and the /_ah/background handler. The matchmaker
+loop arranges for the two paths to meet. The RunInBackground code passes
+a send to the matchmaker, the /_ah/background passes a recv to the matchmaker,
+and the matchmaker hooks them up.
+*/
+
+func init() {
+ http.HandleFunc("/_ah/background", handleBackground)
+
+ sc := make(chan send)
+ rc := make(chan recv)
+ sendc, recvc = sc, rc
+ go matchmaker(sc, rc)
+}
+
+var (
+ sendc chan<- send // RunInBackground sends to this
+ recvc chan<- recv // handleBackground sends to this
+)
+
+type send struct {
+ id string
+ f func(context.Context)
+}
+
+type recv struct {
+ id string
+ ch chan<- func(context.Context)
+}
+
+func matchmaker(sendc <-chan send, recvc <-chan recv) {
+ // When one side of the match arrives before the other
+ // it is inserted in the corresponding map.
+ waitSend := make(map[string]send)
+ waitRecv := make(map[string]recv)
+
+ for {
+ select {
+ case s := <-sendc:
+ if r, ok := waitRecv[s.id]; ok {
+ // meet!
+ delete(waitRecv, s.id)
+ r.ch <- s.f
+ } else {
+ // waiting for r
+ waitSend[s.id] = s
+ }
+ case r := <-recvc:
+ if s, ok := waitSend[r.id]; ok {
+ // meet!
+ delete(waitSend, r.id)
+ r.ch <- s.f
+ } else {
+ // waiting for s
+ waitRecv[r.id] = r
+ }
+ }
+ }
+}
+
+var newContext = appengine.NewContext // for testing
+
+func handleBackground(w http.ResponseWriter, req *http.Request) {
+ id := req.Header.Get("X-AppEngine-BackgroundRequest")
+
+ ch := make(chan func(context.Context))
+ recvc <- recv{id, ch}
+ (<-ch)(newContext(req))
+}
+
+// RunInBackground runs f in a background goroutine in this process.
+// f is provided a context that may outlast the context provided to RunInBackground.
+// This is only valid to invoke from a service set to basic or manual scaling.
+func RunInBackground(c context.Context, f func(c context.Context)) error {
+ req := &pb.StartBackgroundRequestRequest{}
+ res := &pb.StartBackgroundRequestResponse{}
+ if err := internal.Call(c, "system", "StartBackgroundRequest", req, res); err != nil {
+ return err
+ }
+ sendc <- send{res.GetRequestId(), f}
+ return nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("system", pb.SystemServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/runtime/runtime_test.go b/vendor/google.golang.org/appengine/runtime/runtime_test.go
new file mode 100644
index 0000000..8f3a124
--- /dev/null
+++ b/vendor/google.golang.org/appengine/runtime/runtime_test.go
@@ -0,0 +1,101 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/system"
+)
+
+func TestRunInBackgroundSendFirst(t *testing.T) { testRunInBackground(t, true) }
+func TestRunInBackgroundRecvFirst(t *testing.T) { testRunInBackground(t, false) }
+
+func testRunInBackground(t *testing.T, sendFirst bool) {
+ srv := httptest.NewServer(nil)
+ defer srv.Close()
+
+ const id = "f00bar"
+ sendWait, recvWait := make(chan bool), make(chan bool)
+ sbr := make(chan bool) // strobed when system.StartBackgroundRequest has started
+
+ calls := 0
+ c := aetesting.FakeSingleContext(t, "system", "StartBackgroundRequest", func(req *pb.StartBackgroundRequestRequest, res *pb.StartBackgroundRequestResponse) error {
+ calls++
+ if calls > 1 {
+ t.Errorf("Too many calls to system.StartBackgroundRequest")
+ }
+ sbr <- true
+ res.RequestId = proto.String(id)
+ <-sendWait
+ return nil
+ })
+
+ var c2 context.Context // a fake
+ newContext = func(*http.Request) context.Context {
+ return c2
+ }
+
+ var fRun int
+ f := func(c3 context.Context) {
+ fRun++
+ if c3 != c2 {
+ t.Errorf("f got a different context than expected")
+ }
+ }
+
+ ribErrc := make(chan error)
+ go func() {
+ ribErrc <- RunInBackground(c, f)
+ }()
+
+ brErrc := make(chan error)
+ go func() {
+ <-sbr
+ req, err := http.NewRequest("GET", srv.URL+"/_ah/background", nil)
+ if err != nil {
+ brErrc <- fmt.Errorf("http.NewRequest: %v", err)
+ return
+ }
+ req.Header.Set("X-AppEngine-BackgroundRequest", id)
+ client := &http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ },
+ }
+
+ <-recvWait
+ _, err = client.Do(req)
+ brErrc <- err
+ }()
+
+ // Send and receive are both waiting at this point.
+ waits := [2]chan bool{sendWait, recvWait}
+ if !sendFirst {
+ waits[0], waits[1] = waits[1], waits[0]
+ }
+ waits[0] <- true
+ time.Sleep(100 * time.Millisecond)
+ waits[1] <- true
+
+ if err := <-ribErrc; err != nil {
+ t.Fatalf("RunInBackground: %v", err)
+ }
+ if err := <-brErrc; err != nil {
+ t.Fatalf("background request: %v", err)
+ }
+
+ if fRun != 1 {
+ t.Errorf("Got %d runs of f, want 1", fRun)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/search/doc.go b/vendor/google.golang.org/appengine/search/doc.go
new file mode 100644
index 0000000..da331ce
--- /dev/null
+++ b/vendor/google.golang.org/appengine/search/doc.go
@@ -0,0 +1,209 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package search provides a client for App Engine's search service.
+
+
+Basic Operations
+
+Indexes contain documents. Each index is identified by its name: a
+human-readable ASCII string.
+
+Within an index, documents are associated with an ID, which is also
+a human-readable ASCII string. A document's contents are a mapping from
+case-sensitive field names to values. Valid types for field values are:
+ - string,
+ - search.Atom,
+ - search.HTML,
+ - time.Time (stored with millisecond precision),
+ - float64 (value between -2,147,483,647 and 2,147,483,647 inclusive),
+ - appengine.GeoPoint.
+
+The Get and Put methods on an Index load and save a document.
+A document's contents are typically represented by a struct pointer.
+
+Example code:
+
+ type Doc struct {
+ Author string
+ Comment string
+ Creation time.Time
+ }
+
+ index, err := search.Open("comments")
+ if err != nil {
+ return err
+ }
+ newID, err := index.Put(ctx, "", &Doc{
+ Author: "gopher",
+ Comment: "the truth of the matter",
+ Creation: time.Now(),
+ })
+ if err != nil {
+ return err
+ }
+
+A single document can be retrieved by its ID. Pass a destination struct
+to Get to hold the resulting document.
+
+ var doc Doc
+ err := index.Get(ctx, id, &doc)
+ if err != nil {
+ return err
+ }
+
+
+Search and Listing Documents
+
+Indexes have two methods for retrieving multiple documents at once: Search and
+List.
+
+Searching an index for a query will result in an iterator. As with an iterator
+from package datastore, pass a destination struct to Next to decode the next
+result. Next will return Done when the iterator is exhausted.
+
+ for t := index.Search(ctx, "Comment:truth", nil); ; {
+ var doc Doc
+ id, err := t.Next(&doc)
+ if err == search.Done {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(w, "%s -> %#v\n", id, doc)
+ }
+
+Search takes a string query to determine which documents to return. The query
+can be simple, such as a single word to match, or complex. The query
+language is described at
+https://cloud.google.com/appengine/docs/go/search/query_strings
+
+Search also takes an optional SearchOptions struct which gives much more
+control over how results are calculated and returned.
+
+Call List to iterate over all documents in an index.
+
+ for t := index.List(ctx, nil); ; {
+ var doc Doc
+ id, err := t.Next(&doc)
+ if err == search.Done {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(w, "%s -> %#v\n", id, doc)
+ }
+
+
+Fields and Facets
+
+A document's contents can be represented by a variety of types. These are
+typically struct pointers, but they can also be represented by any type
+implementing the FieldLoadSaver interface. The FieldLoadSaver allows metadata
+to be set for the document with the DocumentMetadata type. Struct pointers are
+more strongly typed and are easier to use; FieldLoadSavers are more flexible.
+
+A document's contents can be expressed in two ways: fields and facets.
+
+Fields are the most common way of providing content for documents. Fields can
+store data in multiple types and can be matched in searches using query
+strings.
+
+Facets provide a way to attach categorical information to a document. The only
+valid types for facets are search.Atom and float64. Facets allow search
+results to contain summaries of the categories matched in a search, and to
+restrict searches to only match against specific categories.
+
+By default, for struct pointers, all of the struct fields are used as document
+fields, and the field name used is the same as on the struct (and hence must
+start with an upper case letter). Struct fields may have a
+`search:"name,options"` tag. The name must start with a letter and be
+composed only of word characters. A "-" tag name means that the field will be
+ignored. If options is "facet" then the struct field will be used as a
+document facet. If options is "" then the comma may be omitted. There are no
+other recognized options.
+
+Example code:
+
+ // A and B are renamed to a and b.
+ // A, C and I are facets.
+ // D's tag is equivalent to having no tag at all (E).
+ // F and G are ignored entirely by the search package.
+ // I has tag information for both the search and json packages.
+ type TaggedStruct struct {
+ A float64 `search:"a,facet"`
+ B float64 `search:"b"`
+ C float64 `search:",facet"`
+ D float64 `search:""`
+ E float64
+ F float64 `search:"-"`
+ G float64 `search:"-,facet"`
+ I float64 `search:",facet" json:"i"`
+ }
+
+
+The FieldLoadSaver Interface
+
+A document's contents can also be represented by any type that implements the
+FieldLoadSaver interface. This type may be a struct pointer, but it
+does not have to be. The search package will call Load when loading the
+document's contents, and Save when saving them. In addition to a slice of
+Fields, the Load and Save methods also use the DocumentMetadata type to
+provide additional information about a document (such as its Rank, or set of
+Facets). Possible uses for this interface include deriving non-stored fields,
+verifying fields or setting specific languages for string and HTML fields.
+
+Example code:
+
+ type CustomFieldsExample struct {
+ // Item's title and which language it is in.
+ Title string
+ Lang string
+ // Mass, in grams.
+ Mass int
+ }
+
+ func (x *CustomFieldsExample) Load(fields []search.Field, meta *search.DocumentMetadata) error {
+ // Load the title field, failing if any other field is found.
+ for _, f := range fields {
+ if f.Name != "title" {
+ return fmt.Errorf("unknown field %q", f.Name)
+ }
+ s, ok := f.Value.(string)
+ if !ok {
+ return fmt.Errorf("unsupported type %T for field %q", f.Value, f.Name)
+ }
+ x.Title = s
+ x.Lang = f.Language
+ }
+ // Load the mass facet, failing if any other facet is found.
+ for _, f := range meta.Facets {
+ if f.Name != "mass" {
+ return fmt.Errorf("unknown facet %q", f.Name)
+ }
+ m, ok := f.Value.(float64)
+ if !ok {
+ return fmt.Errorf("unsupported type %T for facet %q", f.Value, f.Name)
+ }
+ x.Mass = int(m)
+ }
+ return nil
+ }
+
+ func (x *CustomFieldsExample) Save() ([]search.Field, *search.DocumentMetadata, error) {
+ fields := []search.Field{
+ {Name: "title", Value: x.Title, Language: x.Lang},
+ }
+ meta := &search.DocumentMetadata{
+ Facets: {
+ {Name: "mass", Value: float64(x.Mass)},
+ },
+ }
+ return fields, meta, nil
+ }
+*/
+package search
diff --git a/vendor/google.golang.org/appengine/search/field.go b/vendor/google.golang.org/appengine/search/field.go
new file mode 100644
index 0000000..707c2d8
--- /dev/null
+++ b/vendor/google.golang.org/appengine/search/field.go
@@ -0,0 +1,82 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package search
+
+// Field is a name/value pair. A search index's document can be loaded and
+// saved as a sequence of Fields.
+type Field struct {
+ // Name is the field name. A valid field name matches /[A-Za-z][A-Za-z0-9_]*/.
+ Name string
+ // Value is the field value. The valid types are:
+ // - string,
+ // - search.Atom,
+ // - search.HTML,
+ // - time.Time (stored with millisecond precision),
+ // - float64,
+ // - GeoPoint.
+ Value interface{}
+ // Language is a two-letter ISO 639-1 code for the field's language,
+ // defaulting to "en" if nothing is specified. It may only be specified for
+ // fields of type string and search.HTML.
+ Language string
+ // Derived marks fields that were calculated as a result of a
+ // FieldExpression provided to Search. This field is ignored when saving a
+ // document.
+ Derived bool
+}
+
+// Facet is a name/value pair which is used to add categorical information to a
+// document.
+type Facet struct {
+ // Name is the facet name. A valid facet name matches /[A-Za-z][A-Za-z0-9_]*/.
+ // A facet name cannot be longer than 500 characters.
+ Name string
+ // Value is the facet value.
+ //
+ // When being used in documents (for example, in
+ // DocumentMetadata.Facets), the valid types are:
+ // - search.Atom,
+ // - float64.
+ //
+ // When being used in SearchOptions.Refinements or being returned
+ // in FacetResult, the valid types are:
+ // - search.Atom,
+ // - search.Range.
+ Value interface{}
+}
+
+// DocumentMetadata is a struct containing information describing a given document.
+type DocumentMetadata struct {
+ // Rank is an integer specifying the order the document will be returned in
+ // search results. If zero, the rank will be set to the number of seconds since
+ // 2011-01-01 00:00:00 UTC when being Put into an index.
+ Rank int
+ // Facets is the set of facets for this document.
+ Facets []Facet
+}
+
+// FieldLoadSaver can be converted from and to a slice of Fields
+// with additional document metadata.
+type FieldLoadSaver interface {
+ Load([]Field, *DocumentMetadata) error
+ Save() ([]Field, *DocumentMetadata, error)
+}
+
+// FieldList converts a []Field to implement FieldLoadSaver.
+type FieldList []Field
+
+// Load loads all of the provided fields into l.
+// It does not first reset *l to an empty slice.
+func (l *FieldList) Load(f []Field, _ *DocumentMetadata) error {
+ *l = append(*l, f...)
+ return nil
+}
+
+// Save returns all of l's fields as a slice of Fields.
+func (l *FieldList) Save() ([]Field, *DocumentMetadata, error) {
+ return *l, nil, nil
+}
+
+var _ FieldLoadSaver = (*FieldList)(nil)
diff --git a/vendor/google.golang.org/appengine/search/search.go b/vendor/google.golang.org/appengine/search/search.go
new file mode 100644
index 0000000..774b051
--- /dev/null
+++ b/vendor/google.golang.org/appengine/search/search.go
@@ -0,0 +1,1121 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package search // import "google.golang.org/appengine/search"
+
+// TODO: let Put specify the document language: "en", "fr", etc. Also: order_id?? storage??
+// TODO: Index.GetAll (or Iterator.GetAll)?
+// TODO: struct <-> protobuf tests.
+// TODO: enforce Python's MIN_NUMBER_VALUE and MIN_DATE (which would disallow a zero
+// time.Time)? _MAXIMUM_STRING_LENGTH?
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/search"
+)
+
+var (
+ // ErrInvalidDocumentType is returned when methods like Put, Get or Next
+ // are passed a dst or src argument of invalid type.
+ ErrInvalidDocumentType = errors.New("search: invalid document type")
+
+ // ErrNoSuchDocument is returned when no document was found for a given ID.
+ ErrNoSuchDocument = errors.New("search: no such document")
+)
+
+// Atom is a document field whose contents are indexed as a single indivisible
+// string.
+type Atom string
+
+// HTML is a document field whose contents are indexed as HTML. Only text nodes
+// are indexed: "foo<b>bar" will be treated as "foobar".
+type HTML string
+
+// validIndexNameOrDocID is the Go equivalent of Python's
+// _ValidateVisiblePrintableAsciiNotReserved.
+func validIndexNameOrDocID(s string) bool {
+ if strings.HasPrefix(s, "!") {
+ return false
+ }
+ for _, c := range s {
+ if c < 0x21 || 0x7f <= c {
+ return false
+ }
+ }
+ return true
+}
+
+var (
+ fieldNameRE = regexp.MustCompile(`^[A-Za-z][A-Za-z0-9_]*$`)
+ languageRE = regexp.MustCompile(`^[a-z]{2}$`)
+)
+
+// validFieldName is the Go equivalent of Python's _CheckFieldName. It checks
+// the validity of both field and facet names.
+func validFieldName(s string) bool {
+ return len(s) <= 500 && fieldNameRE.MatchString(s)
+}
+
+// validDocRank checks that the ranks is in the range [0, 2^31).
+func validDocRank(r int) bool {
+ return 0 <= r && r <= (1<<31-1)
+}
+
+// validLanguage checks that a language looks like ISO 639-1.
+func validLanguage(s string) bool {
+ return languageRE.MatchString(s)
+}
+
+// validFloat checks that f is in the range [-2147483647, 2147483647].
+func validFloat(f float64) bool {
+ return -(1<<31-1) <= f && f <= (1<<31-1)
+}
+
+// Index is an index of documents.
+type Index struct {
+ spec pb.IndexSpec
+}
+
+// orderIDEpoch forms the basis for populating OrderId on documents.
+var orderIDEpoch = time.Date(2011, 1, 1, 0, 0, 0, 0, time.UTC)
+
+// Open opens the index with the given name. The index is created if it does
+// not already exist.
+//
+// The name is a human-readable ASCII string. It must contain no whitespace
+// characters and not start with "!".
+func Open(name string) (*Index, error) {
+ if !validIndexNameOrDocID(name) {
+ return nil, fmt.Errorf("search: invalid index name %q", name)
+ }
+ return &Index{
+ spec: pb.IndexSpec{
+ Name: &name,
+ },
+ }, nil
+}
+
+// Put saves src to the index. If id is empty, a new ID is allocated by the
+// service and returned. If id is not empty, any existing index entry for that
+// ID is replaced.
+//
+// The ID is a human-readable ASCII string. It must contain no whitespace
+// characters and not start with "!".
+//
+// src must be a non-nil struct pointer or implement the FieldLoadSaver
+// interface.
+func (x *Index) Put(c context.Context, id string, src interface{}) (string, error) {
+ d, err := saveDoc(src)
+ if err != nil {
+ return "", err
+ }
+ if id != "" {
+ if !validIndexNameOrDocID(id) {
+ return "", fmt.Errorf("search: invalid ID %q", id)
+ }
+ d.Id = proto.String(id)
+ }
+ // spec is modified by Call when applying the current Namespace, so copy it to
+ // avoid retaining the namespace beyond the scope of the Call.
+ spec := x.spec
+ req := &pb.IndexDocumentRequest{
+ Params: &pb.IndexDocumentParams{
+ Document: []*pb.Document{d},
+ IndexSpec: &spec,
+ },
+ }
+ res := &pb.IndexDocumentResponse{}
+ if err := internal.Call(c, "search", "IndexDocument", req, res); err != nil {
+ return "", err
+ }
+ if len(res.Status) > 0 {
+ if s := res.Status[0]; s.GetCode() != pb.SearchServiceError_OK {
+ return "", fmt.Errorf("search: %s: %s", s.GetCode(), s.GetErrorDetail())
+ }
+ }
+ if len(res.Status) != 1 || len(res.DocId) != 1 {
+ return "", fmt.Errorf("search: internal error: wrong number of results (%d Statuses, %d DocIDs)",
+ len(res.Status), len(res.DocId))
+ }
+ return res.DocId[0], nil
+}
+
+// Get loads the document with the given ID into dst.
+//
+// The ID is a human-readable ASCII string. It must be non-empty, contain no
+// whitespace characters and not start with "!".
+//
+// dst must be a non-nil struct pointer or implement the FieldLoadSaver
+// interface.
+//
+// ErrFieldMismatch is returned when a field is to be loaded into a different
+// type than the one it was stored from, or when a field is missing or
+// unexported in the destination struct. ErrFieldMismatch is only returned if
+// dst is a struct pointer. It is up to the callee to decide whether this error
+// is fatal, recoverable, or ignorable.
+func (x *Index) Get(c context.Context, id string, dst interface{}) error {
+ if id == "" || !validIndexNameOrDocID(id) {
+ return fmt.Errorf("search: invalid ID %q", id)
+ }
+ req := &pb.ListDocumentsRequest{
+ Params: &pb.ListDocumentsParams{
+ IndexSpec: &x.spec,
+ StartDocId: proto.String(id),
+ Limit: proto.Int32(1),
+ },
+ }
+ res := &pb.ListDocumentsResponse{}
+ if err := internal.Call(c, "search", "ListDocuments", req, res); err != nil {
+ return err
+ }
+ if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK {
+ return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail())
+ }
+ if len(res.Document) != 1 || res.Document[0].GetId() != id {
+ return ErrNoSuchDocument
+ }
+ return loadDoc(dst, res.Document[0], nil)
+}
+
+// Delete deletes a document from the index.
+func (x *Index) Delete(c context.Context, id string) error {
+ req := &pb.DeleteDocumentRequest{
+ Params: &pb.DeleteDocumentParams{
+ DocId: []string{id},
+ IndexSpec: &x.spec,
+ },
+ }
+ res := &pb.DeleteDocumentResponse{}
+ if err := internal.Call(c, "search", "DeleteDocument", req, res); err != nil {
+ return err
+ }
+ if len(res.Status) != 1 {
+ return fmt.Errorf("search: internal error: wrong number of results (%d)", len(res.Status))
+ }
+ if s := res.Status[0]; s.GetCode() != pb.SearchServiceError_OK {
+ return fmt.Errorf("search: %s: %s", s.GetCode(), s.GetErrorDetail())
+ }
+ return nil
+}
+
+// List lists all of the documents in an index. The documents are returned in
+// increasing ID order.
+func (x *Index) List(c context.Context, opts *ListOptions) *Iterator {
+ t := &Iterator{
+ c: c,
+ index: x,
+ count: -1,
+ listInclusive: true,
+ more: moreList,
+ }
+ if opts != nil {
+ t.listStartID = opts.StartID
+ t.limit = opts.Limit
+ t.idsOnly = opts.IDsOnly
+ }
+ return t
+}
+
+func moreList(t *Iterator) error {
+ req := &pb.ListDocumentsRequest{
+ Params: &pb.ListDocumentsParams{
+ IndexSpec: &t.index.spec,
+ },
+ }
+ if t.listStartID != "" {
+ req.Params.StartDocId = &t.listStartID
+ req.Params.IncludeStartDoc = &t.listInclusive
+ }
+ if t.limit > 0 {
+ req.Params.Limit = proto.Int32(int32(t.limit))
+ }
+ if t.idsOnly {
+ req.Params.KeysOnly = &t.idsOnly
+ }
+
+ res := &pb.ListDocumentsResponse{}
+ if err := internal.Call(t.c, "search", "ListDocuments", req, res); err != nil {
+ return err
+ }
+ if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK {
+ return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail())
+ }
+ t.listRes = res.Document
+ t.listStartID, t.listInclusive, t.more = "", false, nil
+ if len(res.Document) != 0 && t.limit <= 0 {
+ if id := res.Document[len(res.Document)-1].GetId(); id != "" {
+ t.listStartID, t.more = id, moreList
+ }
+ }
+ return nil
+}
+
+// ListOptions are the options for listing documents in an index. Passing a nil
+// *ListOptions is equivalent to using the default values.
+type ListOptions struct {
+ // StartID is the inclusive lower bound for the ID of the returned
+ // documents. The zero value means all documents will be returned.
+ StartID string
+
+ // Limit is the maximum number of documents to return. The zero value
+ // indicates no limit.
+ Limit int
+
+ // IDsOnly indicates that only document IDs should be returned for the list
+ // operation; no document fields are populated.
+ IDsOnly bool
+}
+
+// Search searches the index for the given query.
+func (x *Index) Search(c context.Context, query string, opts *SearchOptions) *Iterator {
+ t := &Iterator{
+ c: c,
+ index: x,
+ searchQuery: query,
+ more: moreSearch,
+ }
+ if opts != nil {
+ if opts.Cursor != "" {
+ if opts.Offset != 0 {
+ return errIter("at most one of Cursor and Offset may be specified")
+ }
+ t.searchCursor = proto.String(string(opts.Cursor))
+ }
+ t.limit = opts.Limit
+ t.fields = opts.Fields
+ t.idsOnly = opts.IDsOnly
+ t.sort = opts.Sort
+ t.exprs = opts.Expressions
+ t.refinements = opts.Refinements
+ t.facetOpts = opts.Facets
+ t.searchOffset = opts.Offset
+ t.countAccuracy = opts.CountAccuracy
+ }
+ return t
+}
+
+func moreSearch(t *Iterator) error {
+ // We use per-result (rather than single/per-page) cursors since this
+ // lets us return a Cursor for every iterator document. The two cursor
+ // types are largely interchangeable: a page cursor is the same as the
+ // last per-result cursor in a given search response.
+ req := &pb.SearchRequest{
+ Params: &pb.SearchParams{
+ IndexSpec: &t.index.spec,
+ Query: &t.searchQuery,
+ Cursor: t.searchCursor,
+ CursorType: pb.SearchParams_PER_RESULT.Enum(),
+ FieldSpec: &pb.FieldSpec{
+ Name: t.fields,
+ },
+ },
+ }
+ if t.limit > 0 {
+ req.Params.Limit = proto.Int32(int32(t.limit))
+ }
+ if t.searchOffset > 0 {
+ req.Params.Offset = proto.Int32(int32(t.searchOffset))
+ t.searchOffset = 0
+ }
+ if t.countAccuracy > 0 {
+ req.Params.MatchedCountAccuracy = proto.Int32(int32(t.countAccuracy))
+ }
+ if t.idsOnly {
+ req.Params.KeysOnly = &t.idsOnly
+ }
+ if t.sort != nil {
+ if err := sortToProto(t.sort, req.Params); err != nil {
+ return err
+ }
+ }
+ if t.refinements != nil {
+ if err := refinementsToProto(t.refinements, req.Params); err != nil {
+ return err
+ }
+ }
+ for _, e := range t.exprs {
+ req.Params.FieldSpec.Expression = append(req.Params.FieldSpec.Expression, &pb.FieldSpec_Expression{
+ Name: proto.String(e.Name),
+ Expression: proto.String(e.Expr),
+ })
+ }
+ for _, f := range t.facetOpts {
+ if err := f.setParams(req.Params); err != nil {
+ return fmt.Errorf("bad FacetSearchOption: %v", err)
+ }
+ }
+ // Don't repeat facet search.
+ t.facetOpts = nil
+
+ res := &pb.SearchResponse{}
+ if err := internal.Call(t.c, "search", "Search", req, res); err != nil {
+ return err
+ }
+ if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK {
+ return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail())
+ }
+ t.searchRes = res.Result
+ if len(res.FacetResult) > 0 {
+ t.facetRes = res.FacetResult
+ }
+ t.count = int(*res.MatchedCount)
+ if t.limit > 0 {
+ t.more = nil
+ } else {
+ t.more = moreSearch
+ }
+ return nil
+}
+
+// SearchOptions are the options for searching an index. Passing a nil
+// *SearchOptions is equivalent to using the default values.
+type SearchOptions struct {
+ // Limit is the maximum number of documents to return. The zero value
+ // indicates no limit.
+ Limit int
+
+ // IDsOnly indicates that only document IDs should be returned for the search
+ // operation; no document fields are populated.
+ IDsOnly bool
+
+ // Sort controls the ordering of search results.
+ Sort *SortOptions
+
+ // Fields specifies which document fields to include in the results. If omitted,
+ // all document fields are returned. No more than 100 fields may be specified.
+ Fields []string
+
+ // Expressions specifies additional computed fields to add to each returned
+ // document.
+ Expressions []FieldExpression
+
+ // Facets controls what facet information is returned for these search results.
+ // If no options are specified, no facet results will be returned.
+ Facets []FacetSearchOption
+
+ // Refinements filters the returned documents by requiring them to contain facets
+ // with specific values. Refinements are applied in conjunction for facets with
+ // different names, and in disjunction otherwise.
+ Refinements []Facet
+
+ // Cursor causes the results to commence with the first document after
+ // the document associated with the cursor.
+ Cursor Cursor
+
+ // Offset specifies the number of documents to skip over before returning results.
+ // When specified, Cursor must be nil.
+ Offset int
+
+ // CountAccuracy specifies the maximum result count that can be expected to
+ // be accurate. If zero, the count accuracy defaults to 20.
+ CountAccuracy int
+}
+
+// Cursor represents an iterator's position.
+//
+// The string value of a cursor is web-safe. It can be saved and restored
+// for later use.
+type Cursor string
+
+// FieldExpression defines a custom expression to evaluate for each result.
+type FieldExpression struct {
+ // Name is the name to use for the computed field.
+ Name string
+
+ // Expr is evaluated to provide a custom content snippet for each document.
+ // See https://cloud.google.com/appengine/docs/go/search/options for
+ // the supported expression syntax.
+ Expr string
+}
+
+// FacetSearchOption controls what facet information is returned in search results.
+type FacetSearchOption interface {
+ setParams(*pb.SearchParams) error
+}
+
+// AutoFacetDiscovery returns a FacetSearchOption which enables automatic facet
+// discovery for the search. Automatic facet discovery looks for the facets
+// which appear the most often in the aggregate in the matched documents.
+//
+// The maximum number of facets returned is controlled by facetLimit, and the
+// maximum number of values per facet by facetLimit. A limit of zero indicates
+// a default limit should be used.
+func AutoFacetDiscovery(facetLimit, valueLimit int) FacetSearchOption {
+ return &autoFacetOpt{facetLimit, valueLimit}
+}
+
+type autoFacetOpt struct {
+ facetLimit, valueLimit int
+}
+
+const defaultAutoFacetLimit = 10 // As per python runtime search.py.
+
+func (o *autoFacetOpt) setParams(params *pb.SearchParams) error {
+ lim := int32(o.facetLimit)
+ if lim == 0 {
+ lim = defaultAutoFacetLimit
+ }
+ params.AutoDiscoverFacetCount = &lim
+ if o.valueLimit > 0 {
+ params.FacetAutoDetectParam = &pb.FacetAutoDetectParam{
+ ValueLimit: proto.Int32(int32(o.valueLimit)),
+ }
+ }
+ return nil
+}
+
+// FacetDiscovery returns a FacetSearchOption which selects a facet to be
+// returned with the search results. By default, the most frequently
+// occurring values for that facet will be returned. However, you can also
+// specify a list of particular Atoms or specific Ranges to return.
+func FacetDiscovery(name string, value ...interface{}) FacetSearchOption {
+ return &facetOpt{name, value}
+}
+
+type facetOpt struct {
+ name string
+ values []interface{}
+}
+
+func (o *facetOpt) setParams(params *pb.SearchParams) error {
+ req := &pb.FacetRequest{Name: &o.name}
+ params.IncludeFacet = append(params.IncludeFacet, req)
+ if len(o.values) == 0 {
+ return nil
+ }
+ vtype := reflect.TypeOf(o.values[0])
+ reqParam := &pb.FacetRequestParam{}
+ for _, v := range o.values {
+ if reflect.TypeOf(v) != vtype {
+ return errors.New("values must all be Atom, or must all be Range")
+ }
+ switch v := v.(type) {
+ case Atom:
+ reqParam.ValueConstraint = append(reqParam.ValueConstraint, string(v))
+ case Range:
+ rng, err := rangeToProto(v)
+ if err != nil {
+ return fmt.Errorf("invalid range: %v", err)
+ }
+ reqParam.Range = append(reqParam.Range, rng)
+ default:
+ return fmt.Errorf("unsupported value type %T", v)
+ }
+ }
+ req.Params = reqParam
+ return nil
+}
+
+// FacetDocumentDepth returns a FacetSearchOption which controls the number of
+// documents to be evaluated with preparing facet results.
+func FacetDocumentDepth(depth int) FacetSearchOption {
+ return facetDepthOpt(depth)
+}
+
+type facetDepthOpt int
+
+func (o facetDepthOpt) setParams(params *pb.SearchParams) error {
+ params.FacetDepth = proto.Int32(int32(o))
+ return nil
+}
+
+// FacetResult represents the number of times a particular facet and value
+// appeared in the documents matching a search request.
+type FacetResult struct {
+ Facet
+
+ // Count is the number of times this specific facet and value appeared in the
+ // matching documents.
+ Count int
+}
+
+// Range represents a numeric range with inclusive start and exclusive end.
+// Start may be specified as math.Inf(-1) to indicate there is no minimum
+// value, and End may similarly be specified as math.Inf(1); at least one of
+// Start or End must be a finite number.
+type Range struct {
+ Start, End float64
+}
+
+var (
+ negInf = math.Inf(-1)
+ posInf = math.Inf(1)
+)
+
+// AtLeast returns a Range matching any value greater than, or equal to, min.
+func AtLeast(min float64) Range {
+ return Range{Start: min, End: posInf}
+}
+
+// LessThan returns a Range matching any value less than max.
+func LessThan(max float64) Range {
+ return Range{Start: negInf, End: max}
+}
+
+// SortOptions control the ordering and scoring of search results.
+type SortOptions struct {
+ // Expressions is a slice of expressions representing a multi-dimensional
+ // sort.
+ Expressions []SortExpression
+
+ // Scorer, when specified, will cause the documents to be scored according to
+ // search term frequency.
+ Scorer Scorer
+
+ // Limit is the maximum number of objects to score and/or sort. Limit cannot
+ // be more than 10,000. The zero value indicates a default limit.
+ Limit int
+}
+
+// SortExpression defines a single dimension for sorting a document.
+type SortExpression struct {
+ // Expr is evaluated to provide a sorting value for each document.
+ // See https://cloud.google.com/appengine/docs/go/search/options for
+ // the supported expression syntax.
+ Expr string
+
+ // Reverse causes the documents to be sorted in ascending order.
+ Reverse bool
+
+ // The default value to use when no field is present or the expresion
+ // cannot be calculated for a document. For text sorts, Default must
+ // be of type string; for numeric sorts, float64.
+ Default interface{}
+}
+
+// A Scorer defines how a document is scored.
+type Scorer interface {
+ toProto(*pb.ScorerSpec)
+}
+
+type enumScorer struct {
+ enum pb.ScorerSpec_Scorer
+}
+
+func (e enumScorer) toProto(spec *pb.ScorerSpec) {
+ spec.Scorer = e.enum.Enum()
+}
+
+var (
+ // MatchScorer assigns a score based on term frequency in a document.
+ MatchScorer Scorer = enumScorer{pb.ScorerSpec_MATCH_SCORER}
+
+ // RescoringMatchScorer assigns a score based on the quality of the query
+ // match. It is similar to a MatchScorer but uses a more complex scoring
+ // algorithm based on match term frequency and other factors like field type.
+ // Please be aware that this algorithm is continually refined and can change
+ // over time without notice. This means that the ordering of search results
+ // that use this scorer can also change without notice.
+ RescoringMatchScorer Scorer = enumScorer{pb.ScorerSpec_RESCORING_MATCH_SCORER}
+)
+
+func sortToProto(sort *SortOptions, params *pb.SearchParams) error {
+ for _, e := range sort.Expressions {
+ spec := &pb.SortSpec{
+ SortExpression: proto.String(e.Expr),
+ }
+ if e.Reverse {
+ spec.SortDescending = proto.Bool(false)
+ }
+ if e.Default != nil {
+ switch d := e.Default.(type) {
+ case float64:
+ spec.DefaultValueNumeric = &d
+ case string:
+ spec.DefaultValueText = &d
+ default:
+ return fmt.Errorf("search: invalid Default type %T for expression %q", d, e.Expr)
+ }
+ }
+ params.SortSpec = append(params.SortSpec, spec)
+ }
+
+ spec := &pb.ScorerSpec{}
+ if sort.Limit > 0 {
+ spec.Limit = proto.Int32(int32(sort.Limit))
+ params.ScorerSpec = spec
+ }
+ if sort.Scorer != nil {
+ sort.Scorer.toProto(spec)
+ params.ScorerSpec = spec
+ }
+
+ return nil
+}
+
+func refinementsToProto(refinements []Facet, params *pb.SearchParams) error {
+ for _, r := range refinements {
+ ref := &pb.FacetRefinement{
+ Name: proto.String(r.Name),
+ }
+ switch v := r.Value.(type) {
+ case Atom:
+ ref.Value = proto.String(string(v))
+ case Range:
+ rng, err := rangeToProto(v)
+ if err != nil {
+ return fmt.Errorf("search: refinement for facet %q: %v", r.Name, err)
+ }
+ // Unfortunately there are two identical messages for identify Facet ranges.
+ ref.Range = &pb.FacetRefinement_Range{Start: rng.Start, End: rng.End}
+ default:
+ return fmt.Errorf("search: unsupported refinement for facet %q of type %T", r.Name, v)
+ }
+ params.FacetRefinement = append(params.FacetRefinement, ref)
+ }
+ return nil
+}
+
+func rangeToProto(r Range) (*pb.FacetRange, error) {
+ rng := &pb.FacetRange{}
+ if r.Start != negInf {
+ if !validFloat(r.Start) {
+ return nil, errors.New("invalid value for Start")
+ }
+ rng.Start = proto.String(strconv.FormatFloat(r.Start, 'e', -1, 64))
+ } else if r.End == posInf {
+ return nil, errors.New("either Start or End must be finite")
+ }
+ if r.End != posInf {
+ if !validFloat(r.End) {
+ return nil, errors.New("invalid value for End")
+ }
+ rng.End = proto.String(strconv.FormatFloat(r.End, 'e', -1, 64))
+ }
+ return rng, nil
+}
+
+func protoToRange(rng *pb.FacetRefinement_Range) Range {
+ r := Range{Start: negInf, End: posInf}
+ if x, err := strconv.ParseFloat(rng.GetStart(), 64); err != nil {
+ r.Start = x
+ }
+ if x, err := strconv.ParseFloat(rng.GetEnd(), 64); err != nil {
+ r.End = x
+ }
+ return r
+}
+
+// Iterator is the result of searching an index for a query or listing an
+// index.
+type Iterator struct {
+ c context.Context
+ index *Index
+ err error
+
+ listRes []*pb.Document
+ listStartID string
+ listInclusive bool
+
+ searchRes []*pb.SearchResult
+ facetRes []*pb.FacetResult
+ searchQuery string
+ searchCursor *string
+ searchOffset int
+ sort *SortOptions
+
+ fields []string
+ exprs []FieldExpression
+ refinements []Facet
+ facetOpts []FacetSearchOption
+
+ more func(*Iterator) error
+
+ count int
+ countAccuracy int
+ limit int // items left to return; 0 for unlimited.
+ idsOnly bool
+}
+
+// errIter returns an iterator that only returns the given error.
+func errIter(err string) *Iterator {
+ return &Iterator{
+ err: errors.New(err),
+ }
+}
+
+// Done is returned when a query iteration has completed.
+var Done = errors.New("search: query has no more results")
+
+// Count returns an approximation of the number of documents matched by the
+// query. It is only valid to call for iterators returned by Search.
+func (t *Iterator) Count() int { return t.count }
+
+// fetchMore retrieves more results, if there are no errors or pending results.
+func (t *Iterator) fetchMore() {
+ if t.err == nil && len(t.listRes)+len(t.searchRes) == 0 && t.more != nil {
+ t.err = t.more(t)
+ }
+}
+
+// Next returns the ID of the next result. When there are no more results,
+// Done is returned as the error.
+//
+// dst must be a non-nil struct pointer, implement the FieldLoadSaver
+// interface, or be a nil interface value. If a non-nil dst is provided, it
+// will be filled with the indexed fields. dst is ignored if this iterator was
+// created with an IDsOnly option.
+func (t *Iterator) Next(dst interface{}) (string, error) {
+ t.fetchMore()
+ if t.err != nil {
+ return "", t.err
+ }
+
+ var doc *pb.Document
+ var exprs []*pb.Field
+ switch {
+ case len(t.listRes) != 0:
+ doc = t.listRes[0]
+ t.listRes = t.listRes[1:]
+ case len(t.searchRes) != 0:
+ doc = t.searchRes[0].Document
+ exprs = t.searchRes[0].Expression
+ t.searchCursor = t.searchRes[0].Cursor
+ t.searchRes = t.searchRes[1:]
+ default:
+ return "", Done
+ }
+ if doc == nil {
+ return "", errors.New("search: internal error: no document returned")
+ }
+ if !t.idsOnly && dst != nil {
+ if err := loadDoc(dst, doc, exprs); err != nil {
+ return "", err
+ }
+ }
+ return doc.GetId(), nil
+}
+
+// Cursor returns the cursor associated with the current document (that is,
+// the document most recently returned by a call to Next).
+//
+// Passing this cursor in a future call to Search will cause those results
+// to commence with the first document after the current document.
+func (t *Iterator) Cursor() Cursor {
+ if t.searchCursor == nil {
+ return ""
+ }
+ return Cursor(*t.searchCursor)
+}
+
+// Facets returns the facets found within the search results, if any facets
+// were requested in the SearchOptions.
+func (t *Iterator) Facets() ([][]FacetResult, error) {
+ t.fetchMore()
+ if t.err != nil && t.err != Done {
+ return nil, t.err
+ }
+
+ var facets [][]FacetResult
+ for _, f := range t.facetRes {
+ fres := make([]FacetResult, 0, len(f.Value))
+ for _, v := range f.Value {
+ ref := v.Refinement
+ facet := FacetResult{
+ Facet: Facet{Name: ref.GetName()},
+ Count: int(v.GetCount()),
+ }
+ if ref.Value != nil {
+ facet.Value = Atom(*ref.Value)
+ } else {
+ facet.Value = protoToRange(ref.Range)
+ }
+ fres = append(fres, facet)
+ }
+ facets = append(facets, fres)
+ }
+ return facets, nil
+}
+
+// saveDoc converts from a struct pointer or
+// FieldLoadSaver/FieldMetadataLoadSaver to the Document protobuf.
+func saveDoc(src interface{}) (*pb.Document, error) {
+ var err error
+ var fields []Field
+ var meta *DocumentMetadata
+ switch x := src.(type) {
+ case FieldLoadSaver:
+ fields, meta, err = x.Save()
+ default:
+ fields, meta, err = saveStructWithMeta(src)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ fieldsProto, err := fieldsToProto(fields)
+ if err != nil {
+ return nil, err
+ }
+ d := &pb.Document{
+ Field: fieldsProto,
+ OrderId: proto.Int32(int32(time.Since(orderIDEpoch).Seconds())),
+ }
+ if meta != nil {
+ if meta.Rank != 0 {
+ if !validDocRank(meta.Rank) {
+ return nil, fmt.Errorf("search: invalid rank %d, must be [0, 2^31)", meta.Rank)
+ }
+ *d.OrderId = int32(meta.Rank)
+ }
+ if len(meta.Facets) > 0 {
+ facets, err := facetsToProto(meta.Facets)
+ if err != nil {
+ return nil, err
+ }
+ d.Facet = facets
+ }
+ }
+ return d, nil
+}
+
+func fieldsToProto(src []Field) ([]*pb.Field, error) {
+ // Maps to catch duplicate time or numeric fields.
+ timeFields, numericFields := make(map[string]bool), make(map[string]bool)
+ dst := make([]*pb.Field, 0, len(src))
+ for _, f := range src {
+ if !validFieldName(f.Name) {
+ return nil, fmt.Errorf("search: invalid field name %q", f.Name)
+ }
+ fieldValue := &pb.FieldValue{}
+ switch x := f.Value.(type) {
+ case string:
+ fieldValue.Type = pb.FieldValue_TEXT.Enum()
+ fieldValue.StringValue = proto.String(x)
+ case Atom:
+ fieldValue.Type = pb.FieldValue_ATOM.Enum()
+ fieldValue.StringValue = proto.String(string(x))
+ case HTML:
+ fieldValue.Type = pb.FieldValue_HTML.Enum()
+ fieldValue.StringValue = proto.String(string(x))
+ case time.Time:
+ if timeFields[f.Name] {
+ return nil, fmt.Errorf("search: duplicate time field %q", f.Name)
+ }
+ timeFields[f.Name] = true
+ fieldValue.Type = pb.FieldValue_DATE.Enum()
+ fieldValue.StringValue = proto.String(strconv.FormatInt(x.UnixNano()/1e6, 10))
+ case float64:
+ if numericFields[f.Name] {
+ return nil, fmt.Errorf("search: duplicate numeric field %q", f.Name)
+ }
+ if !validFloat(x) {
+ return nil, fmt.Errorf("search: numeric field %q with invalid value %f", f.Name, x)
+ }
+ numericFields[f.Name] = true
+ fieldValue.Type = pb.FieldValue_NUMBER.Enum()
+ fieldValue.StringValue = proto.String(strconv.FormatFloat(x, 'e', -1, 64))
+ case appengine.GeoPoint:
+ if !x.Valid() {
+ return nil, fmt.Errorf(
+ "search: GeoPoint field %q with invalid value %v",
+ f.Name, x)
+ }
+ fieldValue.Type = pb.FieldValue_GEO.Enum()
+ fieldValue.Geo = &pb.FieldValue_Geo{
+ Lat: proto.Float64(x.Lat),
+ Lng: proto.Float64(x.Lng),
+ }
+ default:
+ return nil, fmt.Errorf("search: unsupported field type: %v", reflect.TypeOf(f.Value))
+ }
+ if f.Language != "" {
+ switch f.Value.(type) {
+ case string, HTML:
+ if !validLanguage(f.Language) {
+ return nil, fmt.Errorf("search: invalid language for field %q: %q", f.Name, f.Language)
+ }
+ fieldValue.Language = proto.String(f.Language)
+ default:
+ return nil, fmt.Errorf("search: setting language not supported for field %q of type %T", f.Name, f.Value)
+ }
+ }
+ if p := fieldValue.StringValue; p != nil && !utf8.ValidString(*p) {
+ return nil, fmt.Errorf("search: %q field is invalid UTF-8: %q", f.Name, *p)
+ }
+ dst = append(dst, &pb.Field{
+ Name: proto.String(f.Name),
+ Value: fieldValue,
+ })
+ }
+ return dst, nil
+}
+
+func facetsToProto(src []Facet) ([]*pb.Facet, error) {
+ dst := make([]*pb.Facet, 0, len(src))
+ for _, f := range src {
+ if !validFieldName(f.Name) {
+ return nil, fmt.Errorf("search: invalid facet name %q", f.Name)
+ }
+ facetValue := &pb.FacetValue{}
+ switch x := f.Value.(type) {
+ case Atom:
+ if !utf8.ValidString(string(x)) {
+ return nil, fmt.Errorf("search: %q facet is invalid UTF-8: %q", f.Name, x)
+ }
+ facetValue.Type = pb.FacetValue_ATOM.Enum()
+ facetValue.StringValue = proto.String(string(x))
+ case float64:
+ if !validFloat(x) {
+ return nil, fmt.Errorf("search: numeric facet %q with invalid value %f", f.Name, x)
+ }
+ facetValue.Type = pb.FacetValue_NUMBER.Enum()
+ facetValue.StringValue = proto.String(strconv.FormatFloat(x, 'e', -1, 64))
+ default:
+ return nil, fmt.Errorf("search: unsupported facet type: %v", reflect.TypeOf(f.Value))
+ }
+ dst = append(dst, &pb.Facet{
+ Name: proto.String(f.Name),
+ Value: facetValue,
+ })
+ }
+ return dst, nil
+}
+
+// loadDoc converts from protobufs to a struct pointer or
+// FieldLoadSaver/FieldMetadataLoadSaver. The src param provides the document's
+// stored fields and facets, and any document metadata. An additional slice of
+// fields, exprs, may optionally be provided to contain any derived expressions
+// requested by the developer.
+func loadDoc(dst interface{}, src *pb.Document, exprs []*pb.Field) (err error) {
+ fields, err := protoToFields(src.Field)
+ if err != nil {
+ return err
+ }
+ facets, err := protoToFacets(src.Facet)
+ if err != nil {
+ return err
+ }
+ if len(exprs) > 0 {
+ exprFields, err := protoToFields(exprs)
+ if err != nil {
+ return err
+ }
+ // Mark each field as derived.
+ for i := range exprFields {
+ exprFields[i].Derived = true
+ }
+ fields = append(fields, exprFields...)
+ }
+ meta := &DocumentMetadata{
+ Rank: int(src.GetOrderId()),
+ Facets: facets,
+ }
+ switch x := dst.(type) {
+ case FieldLoadSaver:
+ return x.Load(fields, meta)
+ default:
+ return loadStructWithMeta(dst, fields, meta)
+ }
+}
+
+func protoToFields(fields []*pb.Field) ([]Field, error) {
+ dst := make([]Field, 0, len(fields))
+ for _, field := range fields {
+ fieldValue := field.GetValue()
+ f := Field{
+ Name: field.GetName(),
+ }
+ switch fieldValue.GetType() {
+ case pb.FieldValue_TEXT:
+ f.Value = fieldValue.GetStringValue()
+ f.Language = fieldValue.GetLanguage()
+ case pb.FieldValue_ATOM:
+ f.Value = Atom(fieldValue.GetStringValue())
+ case pb.FieldValue_HTML:
+ f.Value = HTML(fieldValue.GetStringValue())
+ f.Language = fieldValue.GetLanguage()
+ case pb.FieldValue_DATE:
+ sv := fieldValue.GetStringValue()
+ millis, err := strconv.ParseInt(sv, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("search: internal error: bad time.Time encoding %q: %v", sv, err)
+ }
+ f.Value = time.Unix(0, millis*1e6)
+ case pb.FieldValue_NUMBER:
+ sv := fieldValue.GetStringValue()
+ x, err := strconv.ParseFloat(sv, 64)
+ if err != nil {
+ return nil, err
+ }
+ f.Value = x
+ case pb.FieldValue_GEO:
+ geoValue := fieldValue.GetGeo()
+ geoPoint := appengine.GeoPoint{geoValue.GetLat(), geoValue.GetLng()}
+ if !geoPoint.Valid() {
+ return nil, fmt.Errorf("search: internal error: invalid GeoPoint encoding: %v", geoPoint)
+ }
+ f.Value = geoPoint
+ default:
+ return nil, fmt.Errorf("search: internal error: unknown data type %s", fieldValue.GetType())
+ }
+ dst = append(dst, f)
+ }
+ return dst, nil
+}
+
+func protoToFacets(facets []*pb.Facet) ([]Facet, error) {
+ if len(facets) == 0 {
+ return nil, nil
+ }
+ dst := make([]Facet, 0, len(facets))
+ for _, facet := range facets {
+ facetValue := facet.GetValue()
+ f := Facet{
+ Name: facet.GetName(),
+ }
+ switch facetValue.GetType() {
+ case pb.FacetValue_ATOM:
+ f.Value = Atom(facetValue.GetStringValue())
+ case pb.FacetValue_NUMBER:
+ sv := facetValue.GetStringValue()
+ x, err := strconv.ParseFloat(sv, 64)
+ if err != nil {
+ return nil, err
+ }
+ f.Value = x
+ default:
+ return nil, fmt.Errorf("search: internal error: unknown data type %s", facetValue.GetType())
+ }
+ dst = append(dst, f)
+ }
+ return dst, nil
+}
+
+func namespaceMod(m proto.Message, namespace string) {
+ set := func(s **string) {
+ if *s == nil {
+ *s = &namespace
+ }
+ }
+ switch m := m.(type) {
+ case *pb.IndexDocumentRequest:
+ set(&m.Params.IndexSpec.Namespace)
+ case *pb.ListDocumentsRequest:
+ set(&m.Params.IndexSpec.Namespace)
+ case *pb.DeleteDocumentRequest:
+ set(&m.Params.IndexSpec.Namespace)
+ case *pb.SearchRequest:
+ set(&m.Params.IndexSpec.Namespace)
+ }
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("search", pb.SearchServiceError_ErrorCode_name)
+ internal.NamespaceMods["search"] = namespaceMod
+}
diff --git a/vendor/google.golang.org/appengine/search/search_test.go b/vendor/google.golang.org/appengine/search/search_test.go
new file mode 100644
index 0000000..f7c339b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/search/search_test.go
@@ -0,0 +1,1000 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package search
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/search"
+)
+
+type TestDoc struct {
+ String string
+ Atom Atom
+ HTML HTML
+ Float float64
+ Location appengine.GeoPoint
+ Time time.Time
+}
+
+type FieldListWithMeta struct {
+ Fields FieldList
+ Meta *DocumentMetadata
+}
+
+func (f *FieldListWithMeta) Load(fields []Field, meta *DocumentMetadata) error {
+ f.Meta = meta
+ return f.Fields.Load(fields, nil)
+}
+
+func (f *FieldListWithMeta) Save() ([]Field, *DocumentMetadata, error) {
+ fields, _, err := f.Fields.Save()
+ return fields, f.Meta, err
+}
+
+// Assert that FieldListWithMeta satisfies FieldLoadSaver
+var _ FieldLoadSaver = &FieldListWithMeta{}
+
+var (
+ float = 3.14159
+ floatOut = "3.14159e+00"
+ latitude = 37.3894
+ longitude = 122.0819
+ testGeo = appengine.GeoPoint{latitude, longitude}
+ testString = "foo<b>bar"
+ testTime = time.Unix(1337324400, 0)
+ testTimeOut = "1337324400000"
+ searchMeta = &DocumentMetadata{
+ Rank: 42,
+ }
+ searchDoc = TestDoc{
+ String: testString,
+ Atom: Atom(testString),
+ HTML: HTML(testString),
+ Float: float,
+ Location: testGeo,
+ Time: testTime,
+ }
+ searchFields = FieldList{
+ Field{Name: "String", Value: testString},
+ Field{Name: "Atom", Value: Atom(testString)},
+ Field{Name: "HTML", Value: HTML(testString)},
+ Field{Name: "Float", Value: float},
+ Field{Name: "Location", Value: testGeo},
+ Field{Name: "Time", Value: testTime},
+ }
+ // searchFieldsWithLang is a copy of the searchFields with the Language field
+ // set on text/HTML Fields.
+ searchFieldsWithLang = FieldList{}
+ protoFields = []*pb.Field{
+ newStringValueField("String", testString, pb.FieldValue_TEXT),
+ newStringValueField("Atom", testString, pb.FieldValue_ATOM),
+ newStringValueField("HTML", testString, pb.FieldValue_HTML),
+ newStringValueField("Float", floatOut, pb.FieldValue_NUMBER),
+ {
+ Name: proto.String("Location"),
+ Value: &pb.FieldValue{
+ Geo: &pb.FieldValue_Geo{
+ Lat: proto.Float64(latitude),
+ Lng: proto.Float64(longitude),
+ },
+ Type: pb.FieldValue_GEO.Enum(),
+ },
+ },
+ newStringValueField("Time", testTimeOut, pb.FieldValue_DATE),
+ }
+)
+
+func init() {
+ for _, f := range searchFields {
+ if f.Name == "String" || f.Name == "HTML" {
+ f.Language = "en"
+ }
+ searchFieldsWithLang = append(searchFieldsWithLang, f)
+ }
+}
+
+func newStringValueField(name, value string, valueType pb.FieldValue_ContentType) *pb.Field {
+ return &pb.Field{
+ Name: proto.String(name),
+ Value: &pb.FieldValue{
+ StringValue: proto.String(value),
+ Type: valueType.Enum(),
+ },
+ }
+}
+
+func newFacet(name, value string, valueType pb.FacetValue_ContentType) *pb.Facet {
+ return &pb.Facet{
+ Name: proto.String(name),
+ Value: &pb.FacetValue{
+ StringValue: proto.String(value),
+ Type: valueType.Enum(),
+ },
+ }
+}
+
+func TestValidIndexNameOrDocID(t *testing.T) {
+ testCases := []struct {
+ s string
+ want bool
+ }{
+ {"", true},
+ {"!", false},
+ {"$", true},
+ {"!bad", false},
+ {"good!", true},
+ {"alsoGood", true},
+ {"has spaces", false},
+ {"is_inva\xffid_UTF-8", false},
+ {"is_non-ASCïI", false},
+ {"underscores_are_ok", true},
+ }
+ for _, tc := range testCases {
+ if got := validIndexNameOrDocID(tc.s); got != tc.want {
+ t.Errorf("%q: got %v, want %v", tc.s, got, tc.want)
+ }
+ }
+}
+
+func TestLoadDoc(t *testing.T) {
+ got, want := TestDoc{}, searchDoc
+ if err := loadDoc(&got, &pb.Document{Field: protoFields}, nil); err != nil {
+ t.Fatalf("loadDoc: %v", err)
+ }
+ if got != want {
+ t.Errorf("loadDoc: got %v, wanted %v", got, want)
+ }
+}
+
+func TestSaveDoc(t *testing.T) {
+ got, err := saveDoc(&searchDoc)
+ if err != nil {
+ t.Fatalf("saveDoc: %v", err)
+ }
+ want := protoFields
+ if !reflect.DeepEqual(got.Field, want) {
+ t.Errorf("\ngot %v\nwant %v", got, want)
+ }
+}
+
+func TestLoadFieldList(t *testing.T) {
+ var got FieldList
+ want := searchFieldsWithLang
+ if err := loadDoc(&got, &pb.Document{Field: protoFields}, nil); err != nil {
+ t.Fatalf("loadDoc: %v", err)
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("\ngot %v\nwant %v", got, want)
+ }
+}
+
+func TestLangFields(t *testing.T) {
+ fl := &FieldList{
+ {Name: "Foo", Value: "I am English", Language: "en"},
+ {Name: "Bar", Value: "私は日本人だ", Language: "jp"},
+ }
+ var got FieldList
+ doc, err := saveDoc(fl)
+ if err != nil {
+ t.Fatalf("saveDoc: %v", err)
+ }
+ if err := loadDoc(&got, doc, nil); err != nil {
+ t.Fatalf("loadDoc: %v", err)
+ }
+ if want := fl; !reflect.DeepEqual(&got, want) {
+ t.Errorf("got %v\nwant %v", got, want)
+ }
+}
+
+func TestSaveFieldList(t *testing.T) {
+ got, err := saveDoc(&searchFields)
+ if err != nil {
+ t.Fatalf("saveDoc: %v", err)
+ }
+ want := protoFields
+ if !reflect.DeepEqual(got.Field, want) {
+ t.Errorf("\ngot %v\nwant %v", got, want)
+ }
+}
+
+func TestLoadFieldAndExprList(t *testing.T) {
+ var got, want FieldList
+ for i, f := range searchFieldsWithLang {
+ f.Derived = (i >= 2) // First 2 elements are "fields", next are "expressions".
+ want = append(want, f)
+ }
+ doc, expr := &pb.Document{Field: protoFields[:2]}, protoFields[2:]
+ if err := loadDoc(&got, doc, expr); err != nil {
+ t.Fatalf("loadDoc: %v", err)
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("got %v\nwant %v", got, want)
+ }
+}
+
+func TestLoadMeta(t *testing.T) {
+ var got FieldListWithMeta
+ want := FieldListWithMeta{
+ Meta: searchMeta,
+ Fields: searchFieldsWithLang,
+ }
+ doc := &pb.Document{
+ Field: protoFields,
+ OrderId: proto.Int32(42),
+ }
+ if err := loadDoc(&got, doc, nil); err != nil {
+ t.Fatalf("loadDoc: %v", err)
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("\ngot %v\nwant %v", got, want)
+ }
+}
+
+func TestSaveMeta(t *testing.T) {
+ got, err := saveDoc(&FieldListWithMeta{
+ Meta: searchMeta,
+ Fields: searchFields,
+ })
+ if err != nil {
+ t.Fatalf("saveDoc: %v", err)
+ }
+ want := &pb.Document{
+ Field: protoFields,
+ OrderId: proto.Int32(42),
+ }
+ if !proto.Equal(got, want) {
+ t.Errorf("\ngot %v\nwant %v", got, want)
+ }
+}
+
+func TestLoadSaveWithStruct(t *testing.T) {
+ type gopher struct {
+ Name string
+ Info string `search:"about"`
+ Legs float64 `search:",facet"`
+ Fuzz Atom `search:"Fur,facet"`
+ }
+
+ doc := gopher{"Gopher", "Likes slide rules.", 4, Atom("furry")}
+ pb := &pb.Document{
+ Field: []*pb.Field{
+ newStringValueField("Name", "Gopher", pb.FieldValue_TEXT),
+ newStringValueField("about", "Likes slide rules.", pb.FieldValue_TEXT),
+ },
+ Facet: []*pb.Facet{
+ newFacet("Legs", "4e+00", pb.FacetValue_NUMBER),
+ newFacet("Fur", "furry", pb.FacetValue_ATOM),
+ },
+ }
+
+ var gotDoc gopher
+ if err := loadDoc(&gotDoc, pb, nil); err != nil {
+ t.Fatalf("loadDoc: %v", err)
+ }
+ if !reflect.DeepEqual(gotDoc, doc) {
+ t.Errorf("loading doc\ngot %v\nwant %v", gotDoc, doc)
+ }
+
+ gotPB, err := saveDoc(&doc)
+ if err != nil {
+ t.Fatalf("saveDoc: %v", err)
+ }
+ gotPB.OrderId = nil // Don't test: it's time dependent.
+ if !proto.Equal(gotPB, pb) {
+ t.Errorf("saving doc\ngot %v\nwant %v", gotPB, pb)
+ }
+}
+
+func TestValidFieldNames(t *testing.T) {
+ testCases := []struct {
+ name string
+ valid bool
+ }{
+ {"Normal", true},
+ {"Also_OK_123", true},
+ {"Not so great", false},
+ {"lower_case", true},
+ {"Exclaim!", false},
+ {"Hello세상아 안녕", false},
+ {"", false},
+ {"Hεllo", false},
+ {strings.Repeat("A", 500), true},
+ {strings.Repeat("A", 501), false},
+ }
+
+ for _, tc := range testCases {
+ _, err := saveDoc(&FieldList{
+ Field{Name: tc.name, Value: "val"},
+ })
+ if err != nil && !strings.Contains(err.Error(), "invalid field name") {
+ t.Errorf("unexpected err %q for field name %q", err, tc.name)
+ }
+ if (err == nil) != tc.valid {
+ t.Errorf("field %q: expected valid %t, received err %v", tc.name, tc.valid, err)
+ }
+ }
+}
+
+func TestValidLangs(t *testing.T) {
+ testCases := []struct {
+ field Field
+ valid bool
+ }{
+ {Field{Name: "Foo", Value: "String", Language: ""}, true},
+ {Field{Name: "Foo", Value: "String", Language: "en"}, true},
+ {Field{Name: "Foo", Value: "String", Language: "aussie"}, false},
+ {Field{Name: "Foo", Value: "String", Language: "12"}, false},
+ {Field{Name: "Foo", Value: HTML("String"), Language: "en"}, true},
+ {Field{Name: "Foo", Value: Atom("String"), Language: "en"}, false},
+ {Field{Name: "Foo", Value: 42, Language: "en"}, false},
+ }
+
+ for _, tt := range testCases {
+ _, err := saveDoc(&FieldList{tt.field})
+ if err == nil != tt.valid {
+ t.Errorf("Field %v, got error %v, wanted valid %t", tt.field, err, tt.valid)
+ }
+ }
+}
+
+func TestDuplicateFields(t *testing.T) {
+ testCases := []struct {
+ desc string
+ fields FieldList
+ errMsg string // Non-empty if we expect an error
+ }{
+ {
+ desc: "multi string",
+ fields: FieldList{{Name: "FieldA", Value: "val1"}, {Name: "FieldA", Value: "val2"}, {Name: "FieldA", Value: "val3"}},
+ },
+ {
+ desc: "multi atom",
+ fields: FieldList{{Name: "FieldA", Value: Atom("val1")}, {Name: "FieldA", Value: Atom("val2")}, {Name: "FieldA", Value: Atom("val3")}},
+ },
+ {
+ desc: "mixed",
+ fields: FieldList{{Name: "FieldA", Value: testString}, {Name: "FieldA", Value: testTime}, {Name: "FieldA", Value: float}},
+ },
+ {
+ desc: "multi time",
+ fields: FieldList{{Name: "FieldA", Value: testTime}, {Name: "FieldA", Value: testTime}},
+ errMsg: `duplicate time field "FieldA"`,
+ },
+ {
+ desc: "multi num",
+ fields: FieldList{{Name: "FieldA", Value: float}, {Name: "FieldA", Value: float}},
+ errMsg: `duplicate numeric field "FieldA"`,
+ },
+ }
+ for _, tc := range testCases {
+ _, err := saveDoc(&tc.fields)
+ if (err == nil) != (tc.errMsg == "") || (err != nil && !strings.Contains(err.Error(), tc.errMsg)) {
+ t.Errorf("%s: got err %v, wanted %q", tc.desc, err, tc.errMsg)
+ }
+ }
+}
+
+func TestLoadErrFieldMismatch(t *testing.T) {
+ testCases := []struct {
+ desc string
+ dst interface{}
+ src []*pb.Field
+ err error
+ }{
+ {
+ desc: "missing",
+ dst: &struct{ One string }{},
+ src: []*pb.Field{newStringValueField("Two", "woop!", pb.FieldValue_TEXT)},
+ err: &ErrFieldMismatch{
+ FieldName: "Two",
+ Reason: "no such struct field",
+ },
+ },
+ {
+ desc: "wrong type",
+ dst: &struct{ Num float64 }{},
+ src: []*pb.Field{newStringValueField("Num", "woop!", pb.FieldValue_TEXT)},
+ err: &ErrFieldMismatch{
+ FieldName: "Num",
+ Reason: "type mismatch: float64 for string data",
+ },
+ },
+ {
+ desc: "unsettable",
+ dst: &struct{ lower string }{},
+ src: []*pb.Field{newStringValueField("lower", "woop!", pb.FieldValue_TEXT)},
+ err: &ErrFieldMismatch{
+ FieldName: "lower",
+ Reason: "cannot set struct field",
+ },
+ },
+ }
+ for _, tc := range testCases {
+ err := loadDoc(tc.dst, &pb.Document{Field: tc.src}, nil)
+ if !reflect.DeepEqual(err, tc.err) {
+ t.Errorf("%s, got err %v, wanted %v", tc.desc, err, tc.err)
+ }
+ }
+}
+
+func TestLimit(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+ c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, res *pb.SearchResponse) error {
+ limit := 20 // Default per page.
+ if req.Params.Limit != nil {
+ limit = int(*req.Params.Limit)
+ }
+ res.Status = &pb.RequestStatus{Code: pb.SearchServiceError_OK.Enum()}
+ res.MatchedCount = proto.Int64(int64(limit))
+ for i := 0; i < limit; i++ {
+ res.Result = append(res.Result, &pb.SearchResult{Document: &pb.Document{}})
+ res.Cursor = proto.String("moreresults")
+ }
+ return nil
+ })
+
+ const maxDocs = 500 // Limit maximum number of docs.
+ testCases := []struct {
+ limit, want int
+ }{
+ {limit: 0, want: maxDocs},
+ {limit: 42, want: 42},
+ {limit: 100, want: 100},
+ {limit: 1000, want: maxDocs},
+ }
+
+ for _, tt := range testCases {
+ it := index.Search(c, "gopher", &SearchOptions{Limit: tt.limit, IDsOnly: true})
+ count := 0
+ for ; count < maxDocs; count++ {
+ _, err := it.Next(nil)
+ if err == Done {
+ break
+ }
+ if err != nil {
+ t.Fatalf("err after %d: %v", count, err)
+ }
+ }
+ if count != tt.want {
+ t.Errorf("got %d results, expected %d", count, tt.want)
+ }
+ }
+}
+
+func TestPut(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(in *pb.IndexDocumentRequest, out *pb.IndexDocumentResponse) error {
+ expectedIn := &pb.IndexDocumentRequest{
+ Params: &pb.IndexDocumentParams{
+ Document: []*pb.Document{
+ {Field: protoFields, OrderId: proto.Int32(42)},
+ },
+ IndexSpec: &pb.IndexSpec{
+ Name: proto.String("Doc"),
+ },
+ },
+ }
+ if !proto.Equal(in, expectedIn) {
+ return fmt.Errorf("unsupported argument:\ngot %v\nwant %v", in, expectedIn)
+ }
+ *out = pb.IndexDocumentResponse{
+ Status: []*pb.RequestStatus{
+ {Code: pb.SearchServiceError_OK.Enum()},
+ },
+ DocId: []string{
+ "doc_id",
+ },
+ }
+ return nil
+ })
+
+ id, err := index.Put(c, "", &FieldListWithMeta{
+ Meta: searchMeta,
+ Fields: searchFields,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if want := "doc_id"; id != want {
+ t.Errorf("Got doc ID %q, want %q", id, want)
+ }
+}
+
+func TestPutAutoOrderID(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(in *pb.IndexDocumentRequest, out *pb.IndexDocumentResponse) error {
+ if len(in.Params.GetDocument()) < 1 {
+ return fmt.Errorf("expected at least one Document, got %v", in)
+ }
+ got, want := in.Params.Document[0].GetOrderId(), int32(time.Since(orderIDEpoch).Seconds())
+ if d := got - want; -5 > d || d > 5 {
+ return fmt.Errorf("got OrderId %d, want near %d", got, want)
+ }
+ *out = pb.IndexDocumentResponse{
+ Status: []*pb.RequestStatus{
+ {Code: pb.SearchServiceError_OK.Enum()},
+ },
+ DocId: []string{
+ "doc_id",
+ },
+ }
+ return nil
+ })
+
+ if _, err := index.Put(c, "", &searchFields); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestPutBadStatus(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(_ *pb.IndexDocumentRequest, out *pb.IndexDocumentResponse) error {
+ *out = pb.IndexDocumentResponse{
+ Status: []*pb.RequestStatus{
+ {
+ Code: pb.SearchServiceError_INVALID_REQUEST.Enum(),
+ ErrorDetail: proto.String("insufficient gophers"),
+ },
+ },
+ }
+ return nil
+ })
+
+ wantErr := "search: INVALID_REQUEST: insufficient gophers"
+ if _, err := index.Put(c, "", &searchFields); err == nil || err.Error() != wantErr {
+ t.Fatalf("Put: got %v error, want %q", err, wantErr)
+ }
+}
+
+func TestSortOptions(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ noErr := errors.New("") // Sentinel err to return to prevent sending request.
+
+ testCases := []struct {
+ desc string
+ sort *SortOptions
+ wantSort []*pb.SortSpec
+ wantScorer *pb.ScorerSpec
+ wantErr string
+ }{
+ {
+ desc: "No SortOptions",
+ },
+ {
+ desc: "Basic",
+ sort: &SortOptions{
+ Expressions: []SortExpression{
+ {Expr: "dog"},
+ {Expr: "cat", Reverse: true},
+ {Expr: "gopher", Default: "blue"},
+ {Expr: "fish", Default: 2.0},
+ },
+ Limit: 42,
+ Scorer: MatchScorer,
+ },
+ wantSort: []*pb.SortSpec{
+ {SortExpression: proto.String("dog")},
+ {SortExpression: proto.String("cat"), SortDescending: proto.Bool(false)},
+ {SortExpression: proto.String("gopher"), DefaultValueText: proto.String("blue")},
+ {SortExpression: proto.String("fish"), DefaultValueNumeric: proto.Float64(2)},
+ },
+ wantScorer: &pb.ScorerSpec{
+ Limit: proto.Int32(42),
+ Scorer: pb.ScorerSpec_MATCH_SCORER.Enum(),
+ },
+ },
+ {
+ desc: "Bad expression default",
+ sort: &SortOptions{
+ Expressions: []SortExpression{
+ {Expr: "dog", Default: true},
+ },
+ },
+ wantErr: `search: invalid Default type bool for expression "dog"`,
+ },
+ {
+ desc: "RescoringMatchScorer",
+ sort: &SortOptions{Scorer: RescoringMatchScorer},
+ wantScorer: &pb.ScorerSpec{Scorer: pb.ScorerSpec_RESCORING_MATCH_SCORER.Enum()},
+ },
+ }
+
+ for _, tt := range testCases {
+ c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, _ *pb.SearchResponse) error {
+ params := req.Params
+ if !reflect.DeepEqual(params.SortSpec, tt.wantSort) {
+ t.Errorf("%s: params.SortSpec=%v; want %v", tt.desc, params.SortSpec, tt.wantSort)
+ }
+ if !reflect.DeepEqual(params.ScorerSpec, tt.wantScorer) {
+ t.Errorf("%s: params.ScorerSpec=%v; want %v", tt.desc, params.ScorerSpec, tt.wantScorer)
+ }
+ return noErr // Always return some error to prevent response parsing.
+ })
+
+ it := index.Search(c, "gopher", &SearchOptions{Sort: tt.sort})
+ _, err := it.Next(nil)
+ if err == nil {
+ t.Fatalf("%s: err==nil; should not happen", tt.desc)
+ }
+ if err.Error() != tt.wantErr {
+ t.Errorf("%s: got error %q, want %q", tt.desc, err, tt.wantErr)
+ }
+ }
+}
+
+func TestFieldSpec(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ errFoo := errors.New("foo") // sentinel error when there isn't one.
+
+ testCases := []struct {
+ desc string
+ opts *SearchOptions
+ want *pb.FieldSpec
+ }{
+ {
+ desc: "No options",
+ want: &pb.FieldSpec{},
+ },
+ {
+ desc: "Fields",
+ opts: &SearchOptions{
+ Fields: []string{"one", "two"},
+ },
+ want: &pb.FieldSpec{
+ Name: []string{"one", "two"},
+ },
+ },
+ {
+ desc: "Expressions",
+ opts: &SearchOptions{
+ Expressions: []FieldExpression{
+ {Name: "one", Expr: "price * quantity"},
+ {Name: "two", Expr: "min(daily_use, 10) * rate"},
+ },
+ },
+ want: &pb.FieldSpec{
+ Expression: []*pb.FieldSpec_Expression{
+ {Name: proto.String("one"), Expression: proto.String("price * quantity")},
+ {Name: proto.String("two"), Expression: proto.String("min(daily_use, 10) * rate")},
+ },
+ },
+ },
+ }
+
+ for _, tt := range testCases {
+ c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, _ *pb.SearchResponse) error {
+ params := req.Params
+ if !reflect.DeepEqual(params.FieldSpec, tt.want) {
+ t.Errorf("%s: params.FieldSpec=%v; want %v", tt.desc, params.FieldSpec, tt.want)
+ }
+ return errFoo // Always return some error to prevent response parsing.
+ })
+
+ it := index.Search(c, "gopher", tt.opts)
+ if _, err := it.Next(nil); err != errFoo {
+ t.Fatalf("%s: got error %v; want %v", tt.desc, err, errFoo)
+ }
+ }
+}
+
+func TestBasicSearchOpts(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ noErr := errors.New("") // Sentinel err to return to prevent sending request.
+
+ testCases := []struct {
+ desc string
+ facetOpts []FacetSearchOption
+ cursor Cursor
+ offset int
+ countAccuracy int
+ want *pb.SearchParams
+ wantErr string
+ }{
+ {
+ desc: "No options",
+ want: &pb.SearchParams{},
+ },
+ {
+ desc: "Default auto discovery",
+ facetOpts: []FacetSearchOption{
+ AutoFacetDiscovery(0, 0),
+ },
+ want: &pb.SearchParams{
+ AutoDiscoverFacetCount: proto.Int32(10),
+ },
+ },
+ {
+ desc: "Auto discovery",
+ facetOpts: []FacetSearchOption{
+ AutoFacetDiscovery(7, 12),
+ },
+ want: &pb.SearchParams{
+ AutoDiscoverFacetCount: proto.Int32(7),
+ FacetAutoDetectParam: &pb.FacetAutoDetectParam{
+ ValueLimit: proto.Int32(12),
+ },
+ },
+ },
+ {
+ desc: "Param Depth",
+ facetOpts: []FacetSearchOption{
+ AutoFacetDiscovery(7, 12),
+ },
+ want: &pb.SearchParams{
+ AutoDiscoverFacetCount: proto.Int32(7),
+ FacetAutoDetectParam: &pb.FacetAutoDetectParam{
+ ValueLimit: proto.Int32(12),
+ },
+ },
+ },
+ {
+ desc: "Doc depth",
+ facetOpts: []FacetSearchOption{
+ FacetDocumentDepth(123),
+ },
+ want: &pb.SearchParams{
+ FacetDepth: proto.Int32(123),
+ },
+ },
+ {
+ desc: "Facet discovery",
+ facetOpts: []FacetSearchOption{
+ FacetDiscovery("colour"),
+ FacetDiscovery("size", Atom("M"), Atom("L")),
+ FacetDiscovery("price", LessThan(7), Range{7, 14}, AtLeast(14)),
+ },
+ want: &pb.SearchParams{
+ IncludeFacet: []*pb.FacetRequest{
+ {Name: proto.String("colour")},
+ {Name: proto.String("size"), Params: &pb.FacetRequestParam{
+ ValueConstraint: []string{"M", "L"},
+ }},
+ {Name: proto.String("price"), Params: &pb.FacetRequestParam{
+ Range: []*pb.FacetRange{
+ {End: proto.String("7e+00")},
+ {Start: proto.String("7e+00"), End: proto.String("1.4e+01")},
+ {Start: proto.String("1.4e+01")},
+ },
+ }},
+ },
+ },
+ },
+ {
+ desc: "Facet discovery - bad value",
+ facetOpts: []FacetSearchOption{
+ FacetDiscovery("colour", true),
+ },
+ wantErr: "bad FacetSearchOption: unsupported value type bool",
+ },
+ {
+ desc: "Facet discovery - mix value types",
+ facetOpts: []FacetSearchOption{
+ FacetDiscovery("colour", Atom("blue"), AtLeast(7)),
+ },
+ wantErr: "bad FacetSearchOption: values must all be Atom, or must all be Range",
+ },
+ {
+ desc: "Facet discovery - invalid range",
+ facetOpts: []FacetSearchOption{
+ FacetDiscovery("colour", Range{negInf, posInf}),
+ },
+ wantErr: "bad FacetSearchOption: invalid range: either Start or End must be finite",
+ },
+ {
+ desc: "Cursor",
+ cursor: Cursor("mycursor"),
+ want: &pb.SearchParams{
+ Cursor: proto.String("mycursor"),
+ },
+ },
+ {
+ desc: "Offset",
+ offset: 121,
+ want: &pb.SearchParams{
+ Offset: proto.Int32(121),
+ },
+ },
+ {
+ desc: "Cursor and Offset set",
+ cursor: Cursor("mycursor"),
+ offset: 121,
+ wantErr: "at most one of Cursor and Offset may be specified",
+ },
+ {
+ desc: "Count accuracy",
+ countAccuracy: 100,
+ want: &pb.SearchParams{
+ MatchedCountAccuracy: proto.Int32(100),
+ },
+ },
+ }
+
+ for _, tt := range testCases {
+ c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, _ *pb.SearchResponse) error {
+ if tt.want == nil {
+ t.Errorf("%s: expected call to fail", tt.desc)
+ return nil
+ }
+ // Set default fields.
+ tt.want.Query = proto.String("gopher")
+ tt.want.IndexSpec = &pb.IndexSpec{Name: proto.String("Doc")}
+ tt.want.CursorType = pb.SearchParams_PER_RESULT.Enum()
+ tt.want.FieldSpec = &pb.FieldSpec{}
+ if got := req.Params; !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("%s: params=%v; want %v", tt.desc, got, tt.want)
+ }
+ return noErr // Always return some error to prevent response parsing.
+ })
+
+ it := index.Search(c, "gopher", &SearchOptions{
+ Facets: tt.facetOpts,
+ Cursor: tt.cursor,
+ Offset: tt.offset,
+ CountAccuracy: tt.countAccuracy,
+ })
+ _, err := it.Next(nil)
+ if err == nil {
+ t.Fatalf("%s: err==nil; should not happen", tt.desc)
+ }
+ if err.Error() != tt.wantErr {
+ t.Errorf("%s: got error %q, want %q", tt.desc, err, tt.wantErr)
+ }
+ }
+}
+
+func TestFacetRefinements(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ noErr := errors.New("") // Sentinel err to return to prevent sending request.
+
+ testCases := []struct {
+ desc string
+ refine []Facet
+ want []*pb.FacetRefinement
+ wantErr string
+ }{
+ {
+ desc: "No refinements",
+ },
+ {
+ desc: "Basic",
+ refine: []Facet{
+ {Name: "fur", Value: Atom("fluffy")},
+ {Name: "age", Value: LessThan(123)},
+ {Name: "age", Value: AtLeast(0)},
+ {Name: "legs", Value: Range{Start: 3, End: 5}},
+ },
+ want: []*pb.FacetRefinement{
+ {Name: proto.String("fur"), Value: proto.String("fluffy")},
+ {Name: proto.String("age"), Range: &pb.FacetRefinement_Range{End: proto.String("1.23e+02")}},
+ {Name: proto.String("age"), Range: &pb.FacetRefinement_Range{Start: proto.String("0e+00")}},
+ {Name: proto.String("legs"), Range: &pb.FacetRefinement_Range{Start: proto.String("3e+00"), End: proto.String("5e+00")}},
+ },
+ },
+ {
+ desc: "Infinite range",
+ refine: []Facet{
+ {Name: "age", Value: Range{Start: negInf, End: posInf}},
+ },
+ wantErr: `search: refinement for facet "age": either Start or End must be finite`,
+ },
+ {
+ desc: "Bad End value in range",
+ refine: []Facet{
+ {Name: "age", Value: LessThan(2147483648)},
+ },
+ wantErr: `search: refinement for facet "age": invalid value for End`,
+ },
+ {
+ desc: "Bad Start value in range",
+ refine: []Facet{
+ {Name: "age", Value: AtLeast(-2147483649)},
+ },
+ wantErr: `search: refinement for facet "age": invalid value for Start`,
+ },
+ {
+ desc: "Unknown value type",
+ refine: []Facet{
+ {Name: "age", Value: "you can't use strings!"},
+ },
+ wantErr: `search: unsupported refinement for facet "age" of type string`,
+ },
+ }
+
+ for _, tt := range testCases {
+ c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, _ *pb.SearchResponse) error {
+ if got := req.Params.FacetRefinement; !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("%s: params.FacetRefinement=%v; want %v", tt.desc, got, tt.want)
+ }
+ return noErr // Always return some error to prevent response parsing.
+ })
+
+ it := index.Search(c, "gopher", &SearchOptions{Refinements: tt.refine})
+ _, err := it.Next(nil)
+ if err == nil {
+ t.Fatalf("%s: err==nil; should not happen", tt.desc)
+ }
+ if err.Error() != tt.wantErr {
+ t.Errorf("%s: got error %q, want %q", tt.desc, err, tt.wantErr)
+ }
+ }
+}
+
+func TestNamespaceResetting(t *testing.T) {
+ namec := make(chan *string, 1)
+ c0 := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(req *pb.IndexDocumentRequest, res *pb.IndexDocumentResponse) error {
+ namec <- req.Params.IndexSpec.Namespace
+ return fmt.Errorf("RPC error")
+ })
+
+ // Check that wrapping c0 in a namespace twice works correctly.
+ c1, err := appengine.Namespace(c0, "A")
+ if err != nil {
+ t.Fatalf("appengine.Namespace: %v", err)
+ }
+ c2, err := appengine.Namespace(c1, "") // should act as the original context
+ if err != nil {
+ t.Fatalf("appengine.Namespace: %v", err)
+ }
+
+ i := (&Index{})
+
+ i.Put(c0, "something", &searchDoc)
+ if ns := <-namec; ns != nil {
+ t.Errorf(`Put with c0: ns = %q, want nil`, *ns)
+ }
+
+ i.Put(c1, "something", &searchDoc)
+ if ns := <-namec; ns == nil {
+ t.Error(`Put with c1: ns = nil, want "A"`)
+ } else if *ns != "A" {
+ t.Errorf(`Put with c1: ns = %q, want "A"`, *ns)
+ }
+
+ i.Put(c2, "something", &searchDoc)
+ if ns := <-namec; ns != nil {
+ t.Errorf(`Put with c2: ns = %q, want nil`, *ns)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/search/struct.go b/vendor/google.golang.org/appengine/search/struct.go
new file mode 100644
index 0000000..e73d2f2
--- /dev/null
+++ b/vendor/google.golang.org/appengine/search/struct.go
@@ -0,0 +1,251 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package search
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// ErrFieldMismatch is returned when a field is to be loaded into a different
+// than the one it was stored from, or when a field is missing or unexported in
+// the destination struct.
+type ErrFieldMismatch struct {
+ FieldName string
+ Reason string
+}
+
+func (e *ErrFieldMismatch) Error() string {
+ return fmt.Sprintf("search: cannot load field %q: %s", e.FieldName, e.Reason)
+}
+
+// ErrFacetMismatch is returned when a facet is to be loaded into a different
+// type than the one it was stored from, or when a field is missing or
+// unexported in the destination struct. StructType is the type of the struct
+// pointed to by the destination argument passed to Iterator.Next.
+type ErrFacetMismatch struct {
+ StructType reflect.Type
+ FacetName string
+ Reason string
+}
+
+func (e *ErrFacetMismatch) Error() string {
+ return fmt.Sprintf("search: cannot load facet %q into a %q: %s", e.FacetName, e.StructType, e.Reason)
+}
+
+// structCodec defines how to convert a given struct to/from a search document.
+type structCodec struct {
+ // byIndex returns the struct tag for the i'th struct field.
+ byIndex []structTag
+
+ // fieldByName returns the index of the struct field for the given field name.
+ fieldByName map[string]int
+
+ // facetByName returns the index of the struct field for the given facet name,
+ facetByName map[string]int
+}
+
+// structTag holds a structured version of each struct field's parsed tag.
+type structTag struct {
+ name string
+ facet bool
+ ignore bool
+}
+
+var (
+ codecsMu sync.RWMutex
+ codecs = map[reflect.Type]*structCodec{}
+)
+
+func loadCodec(t reflect.Type) (*structCodec, error) {
+ codecsMu.RLock()
+ codec, ok := codecs[t]
+ codecsMu.RUnlock()
+ if ok {
+ return codec, nil
+ }
+
+ codecsMu.Lock()
+ defer codecsMu.Unlock()
+ if codec, ok := codecs[t]; ok {
+ return codec, nil
+ }
+
+ codec = &structCodec{
+ fieldByName: make(map[string]int),
+ facetByName: make(map[string]int),
+ }
+
+ for i, I := 0, t.NumField(); i < I; i++ {
+ f := t.Field(i)
+ name, opts := f.Tag.Get("search"), ""
+ if i := strings.Index(name, ","); i != -1 {
+ name, opts = name[:i], name[i+1:]
+ }
+ ignore := false
+ if name == "-" {
+ ignore = true
+ } else if name == "" {
+ name = f.Name
+ } else if !validFieldName(name) {
+ return nil, fmt.Errorf("search: struct tag has invalid field name: %q", name)
+ }
+ facet := opts == "facet"
+ codec.byIndex = append(codec.byIndex, structTag{name: name, facet: facet, ignore: ignore})
+ if facet {
+ codec.facetByName[name] = i
+ } else {
+ codec.fieldByName[name] = i
+ }
+ }
+
+ codecs[t] = codec
+ return codec, nil
+}
+
+// structFLS adapts a struct to be a FieldLoadSaver.
+type structFLS struct {
+ v reflect.Value
+ codec *structCodec
+}
+
+func (s structFLS) Load(fields []Field, meta *DocumentMetadata) error {
+ var err error
+ for _, field := range fields {
+ i, ok := s.codec.fieldByName[field.Name]
+ if !ok {
+ // Note the error, but keep going.
+ err = &ErrFieldMismatch{
+ FieldName: field.Name,
+ Reason: "no such struct field",
+ }
+ continue
+
+ }
+ f := s.v.Field(i)
+ if !f.CanSet() {
+ // Note the error, but keep going.
+ err = &ErrFieldMismatch{
+ FieldName: field.Name,
+ Reason: "cannot set struct field",
+ }
+ continue
+ }
+ v := reflect.ValueOf(field.Value)
+ if ft, vt := f.Type(), v.Type(); ft != vt {
+ err = &ErrFieldMismatch{
+ FieldName: field.Name,
+ Reason: fmt.Sprintf("type mismatch: %v for %v data", ft, vt),
+ }
+ continue
+ }
+ f.Set(v)
+ }
+ if meta == nil {
+ return err
+ }
+ for _, facet := range meta.Facets {
+ i, ok := s.codec.facetByName[facet.Name]
+ if !ok {
+ // Note the error, but keep going.
+ if err == nil {
+ err = &ErrFacetMismatch{
+ StructType: s.v.Type(),
+ FacetName: facet.Name,
+ Reason: "no matching field found",
+ }
+ }
+ continue
+ }
+ f := s.v.Field(i)
+ if !f.CanSet() {
+ // Note the error, but keep going.
+ if err == nil {
+ err = &ErrFacetMismatch{
+ StructType: s.v.Type(),
+ FacetName: facet.Name,
+ Reason: "unable to set unexported field of struct",
+ }
+ }
+ continue
+ }
+ v := reflect.ValueOf(facet.Value)
+ if ft, vt := f.Type(), v.Type(); ft != vt {
+ if err == nil {
+ err = &ErrFacetMismatch{
+ StructType: s.v.Type(),
+ FacetName: facet.Name,
+ Reason: fmt.Sprintf("type mismatch: %v for %d data", ft, vt),
+ }
+ continue
+ }
+ }
+ f.Set(v)
+ }
+ return err
+}
+
+func (s structFLS) Save() ([]Field, *DocumentMetadata, error) {
+ fields := make([]Field, 0, len(s.codec.fieldByName))
+ var facets []Facet
+ for i, tag := range s.codec.byIndex {
+ if tag.ignore {
+ continue
+ }
+ f := s.v.Field(i)
+ if !f.CanSet() {
+ continue
+ }
+ if tag.facet {
+ facets = append(facets, Facet{Name: tag.name, Value: f.Interface()})
+ } else {
+ fields = append(fields, Field{Name: tag.name, Value: f.Interface()})
+ }
+ }
+ return fields, &DocumentMetadata{Facets: facets}, nil
+}
+
+// newStructFLS returns a FieldLoadSaver for the struct pointer p.
+func newStructFLS(p interface{}) (FieldLoadSaver, error) {
+ v := reflect.ValueOf(p)
+ if v.Kind() != reflect.Ptr || v.IsNil() || v.Elem().Kind() != reflect.Struct {
+ return nil, ErrInvalidDocumentType
+ }
+ codec, err := loadCodec(v.Elem().Type())
+ if err != nil {
+ return nil, err
+ }
+ return structFLS{v.Elem(), codec}, nil
+}
+
+func loadStructWithMeta(dst interface{}, f []Field, meta *DocumentMetadata) error {
+ x, err := newStructFLS(dst)
+ if err != nil {
+ return err
+ }
+ return x.Load(f, meta)
+}
+
+func saveStructWithMeta(src interface{}) ([]Field, *DocumentMetadata, error) {
+ x, err := newStructFLS(src)
+ if err != nil {
+ return nil, nil, err
+ }
+ return x.Save()
+}
+
+// LoadStruct loads the fields from f to dst. dst must be a struct pointer.
+func LoadStruct(dst interface{}, f []Field) error {
+ return loadStructWithMeta(dst, f, nil)
+}
+
+// SaveStruct returns the fields from src as a slice of Field.
+// src must be a struct pointer.
+func SaveStruct(src interface{}) ([]Field, error) {
+ f, _, err := saveStructWithMeta(src)
+ return f, err
+}
diff --git a/vendor/google.golang.org/appengine/search/struct_test.go b/vendor/google.golang.org/appengine/search/struct_test.go
new file mode 100644
index 0000000..4e5b5d1
--- /dev/null
+++ b/vendor/google.golang.org/appengine/search/struct_test.go
@@ -0,0 +1,213 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package search
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestLoadingStruct(t *testing.T) {
+ testCases := []struct {
+ desc string
+ fields []Field
+ meta *DocumentMetadata
+ want interface{}
+ wantErr bool
+ }{
+ {
+ desc: "Basic struct",
+ fields: []Field{
+ {Name: "Name", Value: "Gopher"},
+ {Name: "Legs", Value: float64(4)},
+ },
+ want: &struct {
+ Name string
+ Legs float64
+ }{"Gopher", 4},
+ },
+ {
+ desc: "Struct with tags",
+ fields: []Field{
+ {Name: "Name", Value: "Gopher"},
+ {Name: "about", Value: "Likes slide rules."},
+ },
+ meta: &DocumentMetadata{Facets: []Facet{
+ {Name: "Legs", Value: float64(4)},
+ {Name: "Fur", Value: Atom("furry")},
+ }},
+ want: &struct {
+ Name string
+ Info string `search:"about"`
+ Legs float64 `search:",facet"`
+ Fuzz Atom `search:"Fur,facet"`
+ }{"Gopher", "Likes slide rules.", 4, Atom("furry")},
+ },
+ {
+ desc: "Bad field from tag",
+ want: &struct {
+ AlphaBeta string `search:"αβ"`
+ }{},
+ wantErr: true,
+ },
+ {
+ desc: "Ignore missing field",
+ fields: []Field{
+ {Name: "Meaning", Value: float64(42)},
+ },
+ want: &struct{}{},
+ wantErr: true,
+ },
+ {
+ desc: "Ignore unsettable field",
+ fields: []Field{
+ {Name: "meaning", Value: float64(42)},
+ },
+ want: &struct{ meaning float64 }{}, // field not populated.
+ wantErr: true,
+ },
+ {
+ desc: "Error on missing facet",
+ meta: &DocumentMetadata{Facets: []Facet{
+ {Name: "Set", Value: Atom("yes")},
+ {Name: "Missing", Value: Atom("no")},
+ }},
+ want: &struct {
+ Set Atom `search:",facet"`
+ }{Atom("yes")},
+ wantErr: true,
+ },
+ {
+ desc: "Error on unsettable facet",
+ meta: &DocumentMetadata{Facets: []Facet{
+ {Name: "Set", Value: Atom("yes")},
+ {Name: "unset", Value: Atom("no")},
+ }},
+ want: &struct {
+ Set Atom `search:",facet"`
+ }{Atom("yes")},
+ wantErr: true,
+ },
+ {
+ desc: "Error setting ignored field",
+ fields: []Field{
+ {Name: "Set", Value: "yes"},
+ {Name: "Ignored", Value: "no"},
+ },
+ want: &struct {
+ Set string
+ Ignored string `search:"-"`
+ }{Set: "yes"},
+ wantErr: true,
+ },
+ {
+ desc: "Error setting ignored facet",
+ meta: &DocumentMetadata{Facets: []Facet{
+ {Name: "Set", Value: Atom("yes")},
+ {Name: "Ignored", Value: Atom("no")},
+ }},
+ want: &struct {
+ Set Atom `search:",facet"`
+ Ignored Atom `search:"-,facet"`
+ }{Set: Atom("yes")},
+ wantErr: true,
+ },
+ }
+
+ for _, tt := range testCases {
+ // Make a pointer to an empty version of what want points to.
+ dst := reflect.New(reflect.TypeOf(tt.want).Elem()).Interface()
+ err := loadStructWithMeta(dst, tt.fields, tt.meta)
+ if err != nil != tt.wantErr {
+ t.Errorf("%s: got err %v; want err %t", tt.desc, err, tt.wantErr)
+ continue
+ }
+ if !reflect.DeepEqual(dst, tt.want) {
+ t.Errorf("%s: doesn't match\ngot: %v\nwant: %v", tt.desc, dst, tt.want)
+ }
+ }
+}
+
+func TestSavingStruct(t *testing.T) {
+ testCases := []struct {
+ desc string
+ doc interface{}
+ wantFields []Field
+ wantFacets []Facet
+ }{
+ {
+ desc: "Basic struct",
+ doc: &struct {
+ Name string
+ Legs float64
+ }{"Gopher", 4},
+ wantFields: []Field{
+ {Name: "Name", Value: "Gopher"},
+ {Name: "Legs", Value: float64(4)},
+ },
+ },
+ {
+ desc: "Struct with tags",
+ doc: &struct {
+ Name string
+ Info string `search:"about"`
+ Legs float64 `search:",facet"`
+ Fuzz Atom `search:"Fur,facet"`
+ }{"Gopher", "Likes slide rules.", 4, Atom("furry")},
+ wantFields: []Field{
+ {Name: "Name", Value: "Gopher"},
+ {Name: "about", Value: "Likes slide rules."},
+ },
+ wantFacets: []Facet{
+ {Name: "Legs", Value: float64(4)},
+ {Name: "Fur", Value: Atom("furry")},
+ },
+ },
+ {
+ desc: "Ignore unexported struct fields",
+ doc: &struct {
+ Name string
+ info string
+ Legs float64 `search:",facet"`
+ fuzz Atom `search:",facet"`
+ }{"Gopher", "Likes slide rules.", 4, Atom("furry")},
+ wantFields: []Field{
+ {Name: "Name", Value: "Gopher"},
+ },
+ wantFacets: []Facet{
+ {Name: "Legs", Value: float64(4)},
+ },
+ },
+ {
+ desc: "Ignore fields marked -",
+ doc: &struct {
+ Name string
+ Info string `search:"-"`
+ Legs float64 `search:",facet"`
+ Fuzz Atom `search:"-,facet"`
+ }{"Gopher", "Likes slide rules.", 4, Atom("furry")},
+ wantFields: []Field{
+ {Name: "Name", Value: "Gopher"},
+ },
+ wantFacets: []Facet{
+ {Name: "Legs", Value: float64(4)},
+ },
+ },
+ }
+
+ for _, tt := range testCases {
+ fields, meta, err := saveStructWithMeta(tt.doc)
+ if err != nil {
+ t.Errorf("%s: got err %v; want nil", tt.desc, err)
+ continue
+ }
+ if !reflect.DeepEqual(fields, tt.wantFields) {
+ t.Errorf("%s: fields don't match\ngot: %v\nwant: %v", tt.desc, fields, tt.wantFields)
+ }
+ if facets := meta.Facets; !reflect.DeepEqual(facets, tt.wantFacets) {
+ t.Errorf("%s: facets don't match\ngot: %v\nwant: %v", tt.desc, facets, tt.wantFacets)
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/socket/doc.go b/vendor/google.golang.org/appengine/socket/doc.go
new file mode 100644
index 0000000..3de46df
--- /dev/null
+++ b/vendor/google.golang.org/appengine/socket/doc.go
@@ -0,0 +1,10 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package socket provides outbound network sockets.
+//
+// This package is only required in the classic App Engine environment.
+// Applications running only in App Engine "flexible environment" should
+// use the standard library's net package.
+package socket
diff --git a/vendor/google.golang.org/appengine/socket/socket_classic.go b/vendor/google.golang.org/appengine/socket/socket_classic.go
new file mode 100644
index 0000000..0ad50e2
--- /dev/null
+++ b/vendor/google.golang.org/appengine/socket/socket_classic.go
@@ -0,0 +1,290 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package socket
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "strconv"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+ "google.golang.org/appengine/internal"
+
+ pb "google.golang.org/appengine/internal/socket"
+)
+
+// Dial connects to the address addr on the network protocol.
+// The address format is host:port, where host may be a hostname or an IP address.
+// Known protocols are "tcp" and "udp".
+// The returned connection satisfies net.Conn, and is valid while ctx is valid;
+// if the connection is to be used after ctx becomes invalid, invoke SetContext
+// with the new context.
+func Dial(ctx context.Context, protocol, addr string) (*Conn, error) {
+ return DialTimeout(ctx, protocol, addr, 0)
+}
+
+var ipFamilies = []pb.CreateSocketRequest_SocketFamily{
+ pb.CreateSocketRequest_IPv4,
+ pb.CreateSocketRequest_IPv6,
+}
+
+// DialTimeout is like Dial but takes a timeout.
+// The timeout includes name resolution, if required.
+func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) {
+ dialCtx := ctx // Used for dialing and name resolution, but not stored in the *Conn.
+ if timeout > 0 {
+ var cancel context.CancelFunc
+ dialCtx, cancel = context.WithTimeout(ctx, timeout)
+ defer cancel()
+ }
+
+ host, portStr, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+ port, err := strconv.Atoi(portStr)
+ if err != nil {
+ return nil, fmt.Errorf("socket: bad port %q: %v", portStr, err)
+ }
+
+ var prot pb.CreateSocketRequest_SocketProtocol
+ switch protocol {
+ case "tcp":
+ prot = pb.CreateSocketRequest_TCP
+ case "udp":
+ prot = pb.CreateSocketRequest_UDP
+ default:
+ return nil, fmt.Errorf("socket: unknown protocol %q", protocol)
+ }
+
+ packedAddrs, resolved, err := resolve(dialCtx, ipFamilies, host)
+ if err != nil {
+ return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err)
+ }
+ if len(packedAddrs) == 0 {
+ return nil, fmt.Errorf("no addresses for %q", host)
+ }
+
+ packedAddr := packedAddrs[0] // use first address
+ fam := pb.CreateSocketRequest_IPv4
+ if len(packedAddr) == net.IPv6len {
+ fam = pb.CreateSocketRequest_IPv6
+ }
+
+ req := &pb.CreateSocketRequest{
+ Family: fam.Enum(),
+ Protocol: prot.Enum(),
+ RemoteIp: &pb.AddressPort{
+ Port: proto.Int32(int32(port)),
+ PackedAddress: packedAddr,
+ },
+ }
+ if resolved {
+ req.RemoteIp.HostnameHint = &host
+ }
+ res := &pb.CreateSocketReply{}
+ if err := internal.Call(dialCtx, "remote_socket", "CreateSocket", req, res); err != nil {
+ return nil, err
+ }
+
+ return &Conn{
+ ctx: ctx,
+ desc: res.GetSocketDescriptor(),
+ prot: prot,
+ local: res.ProxyExternalIp,
+ remote: req.RemoteIp,
+ }, nil
+}
+
+// LookupIP returns the given host's IP addresses.
+func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) {
+ packedAddrs, _, err := resolve(ctx, ipFamilies, host)
+ if err != nil {
+ return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err)
+ }
+ addrs = make([]net.IP, len(packedAddrs))
+ for i, pa := range packedAddrs {
+ addrs[i] = net.IP(pa)
+ }
+ return addrs, nil
+}
+
+func resolve(ctx context.Context, fams []pb.CreateSocketRequest_SocketFamily, host string) ([][]byte, bool, error) {
+ // Check if it's an IP address.
+ if ip := net.ParseIP(host); ip != nil {
+ if ip := ip.To4(); ip != nil {
+ return [][]byte{ip}, false, nil
+ }
+ return [][]byte{ip}, false, nil
+ }
+
+ req := &pb.ResolveRequest{
+ Name: &host,
+ AddressFamilies: fams,
+ }
+ res := &pb.ResolveReply{}
+ if err := internal.Call(ctx, "remote_socket", "Resolve", req, res); err != nil {
+ // XXX: need to map to pb.ResolveReply_ErrorCode?
+ return nil, false, err
+ }
+ return res.PackedAddress, true, nil
+}
+
+// withDeadline is like context.WithDeadline, except it ignores the zero deadline.
+func withDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) {
+ if deadline.IsZero() {
+ return parent, func() {}
+ }
+ return context.WithDeadline(parent, deadline)
+}
+
+// Conn represents a socket connection.
+// It implements net.Conn.
+type Conn struct {
+ ctx context.Context
+ desc string
+ offset int64
+
+ prot pb.CreateSocketRequest_SocketProtocol
+ local, remote *pb.AddressPort
+
+ readDeadline, writeDeadline time.Time // optional
+}
+
+// SetContext sets the context that is used by this Conn.
+// It is usually used only when using a Conn that was created in a different context,
+// such as when a connection is created during a warmup request but used while
+// servicing a user request.
+func (cn *Conn) SetContext(ctx context.Context) {
+ cn.ctx = ctx
+}
+
+func (cn *Conn) Read(b []byte) (n int, err error) {
+ const maxRead = 1 << 20
+ if len(b) > maxRead {
+ b = b[:maxRead]
+ }
+
+ req := &pb.ReceiveRequest{
+ SocketDescriptor: &cn.desc,
+ DataSize: proto.Int32(int32(len(b))),
+ }
+ res := &pb.ReceiveReply{}
+ if !cn.readDeadline.IsZero() {
+ req.TimeoutSeconds = proto.Float64(cn.readDeadline.Sub(time.Now()).Seconds())
+ }
+ ctx, cancel := withDeadline(cn.ctx, cn.readDeadline)
+ defer cancel()
+ if err := internal.Call(ctx, "remote_socket", "Receive", req, res); err != nil {
+ return 0, err
+ }
+ if len(res.Data) == 0 {
+ return 0, io.EOF
+ }
+ if len(res.Data) > len(b) {
+ return 0, fmt.Errorf("socket: internal error: read too much data: %d > %d", len(res.Data), len(b))
+ }
+ return copy(b, res.Data), nil
+}
+
+func (cn *Conn) Write(b []byte) (n int, err error) {
+ const lim = 1 << 20 // max per chunk
+
+ for n < len(b) {
+ chunk := b[n:]
+ if len(chunk) > lim {
+ chunk = chunk[:lim]
+ }
+
+ req := &pb.SendRequest{
+ SocketDescriptor: &cn.desc,
+ Data: chunk,
+ StreamOffset: &cn.offset,
+ }
+ res := &pb.SendReply{}
+ if !cn.writeDeadline.IsZero() {
+ req.TimeoutSeconds = proto.Float64(cn.writeDeadline.Sub(time.Now()).Seconds())
+ }
+ ctx, cancel := withDeadline(cn.ctx, cn.writeDeadline)
+ defer cancel()
+ if err = internal.Call(ctx, "remote_socket", "Send", req, res); err != nil {
+ // assume zero bytes were sent in this RPC
+ break
+ }
+ n += int(res.GetDataSent())
+ cn.offset += int64(res.GetDataSent())
+ }
+
+ return
+}
+
+func (cn *Conn) Close() error {
+ req := &pb.CloseRequest{
+ SocketDescriptor: &cn.desc,
+ }
+ res := &pb.CloseReply{}
+ if err := internal.Call(cn.ctx, "remote_socket", "Close", req, res); err != nil {
+ return err
+ }
+ cn.desc = "CLOSED"
+ return nil
+}
+
+func addr(prot pb.CreateSocketRequest_SocketProtocol, ap *pb.AddressPort) net.Addr {
+ if ap == nil {
+ return nil
+ }
+ switch prot {
+ case pb.CreateSocketRequest_TCP:
+ return &net.TCPAddr{
+ IP: net.IP(ap.PackedAddress),
+ Port: int(*ap.Port),
+ }
+ case pb.CreateSocketRequest_UDP:
+ return &net.UDPAddr{
+ IP: net.IP(ap.PackedAddress),
+ Port: int(*ap.Port),
+ }
+ }
+ panic("unknown protocol " + prot.String())
+}
+
+func (cn *Conn) LocalAddr() net.Addr { return addr(cn.prot, cn.local) }
+func (cn *Conn) RemoteAddr() net.Addr { return addr(cn.prot, cn.remote) }
+
+func (cn *Conn) SetDeadline(t time.Time) error {
+ cn.readDeadline = t
+ cn.writeDeadline = t
+ return nil
+}
+
+func (cn *Conn) SetReadDeadline(t time.Time) error {
+ cn.readDeadline = t
+ return nil
+}
+
+func (cn *Conn) SetWriteDeadline(t time.Time) error {
+ cn.writeDeadline = t
+ return nil
+}
+
+// KeepAlive signals that the connection is still in use.
+// It may be called to prevent the socket being closed due to inactivity.
+func (cn *Conn) KeepAlive() error {
+ req := &pb.GetSocketNameRequest{
+ SocketDescriptor: &cn.desc,
+ }
+ res := &pb.GetSocketNameReply{}
+ return internal.Call(cn.ctx, "remote_socket", "GetSocketName", req, res)
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("remote_socket", pb.RemoteSocketServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/socket/socket_vm.go b/vendor/google.golang.org/appengine/socket/socket_vm.go
new file mode 100644
index 0000000..c804169
--- /dev/null
+++ b/vendor/google.golang.org/appengine/socket/socket_vm.go
@@ -0,0 +1,64 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package socket
+
+import (
+ "net"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+// Dial connects to the address addr on the network protocol.
+// The address format is host:port, where host may be a hostname or an IP address.
+// Known protocols are "tcp" and "udp".
+// The returned connection satisfies net.Conn, and is valid while ctx is valid;
+// if the connection is to be used after ctx becomes invalid, invoke SetContext
+// with the new context.
+func Dial(ctx context.Context, protocol, addr string) (*Conn, error) {
+ conn, err := net.Dial(protocol, addr)
+ if err != nil {
+ return nil, err
+ }
+ return &Conn{conn}, nil
+}
+
+// DialTimeout is like Dial but takes a timeout.
+// The timeout includes name resolution, if required.
+func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) {
+ conn, err := net.DialTimeout(protocol, addr, timeout)
+ if err != nil {
+ return nil, err
+ }
+ return &Conn{conn}, nil
+}
+
+// LookupIP returns the given host's IP addresses.
+func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) {
+ return net.LookupIP(host)
+}
+
+// Conn represents a socket connection.
+// It implements net.Conn.
+type Conn struct {
+ net.Conn
+}
+
+// SetContext sets the context that is used by this Conn.
+// It is usually used only when using a Conn that was created in a different context,
+// such as when a connection is created during a warmup request but used while
+// servicing a user request.
+func (cn *Conn) SetContext(ctx context.Context) {
+ // This function is not required in App Engine "flexible environment".
+}
+
+// KeepAlive signals that the connection is still in use.
+// It may be called to prevent the socket being closed due to inactivity.
+func (cn *Conn) KeepAlive() error {
+ // This function is not required in App Engine "flexible environment".
+ return nil
+}
diff --git a/vendor/google.golang.org/appengine/taskqueue/taskqueue.go b/vendor/google.golang.org/appengine/taskqueue/taskqueue.go
new file mode 100644
index 0000000..9b62fac
--- /dev/null
+++ b/vendor/google.golang.org/appengine/taskqueue/taskqueue.go
@@ -0,0 +1,496 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package taskqueue provides a client for App Engine's taskqueue service.
+Using this service, applications may perform work outside a user's request.
+
+A Task may be constructed manually; alternatively, since the most common
+taskqueue operation is to add a single POST task, NewPOSTTask makes it easy.
+
+ t := taskqueue.NewPOSTTask("/worker", url.Values{
+ "key": {key},
+ })
+ taskqueue.Add(c, t, "") // add t to the default queue
+*/
+package taskqueue // import "google.golang.org/appengine/taskqueue"
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ dspb "google.golang.org/appengine/internal/datastore"
+ pb "google.golang.org/appengine/internal/taskqueue"
+)
+
+var (
+ // ErrTaskAlreadyAdded is the error returned by Add and AddMulti when a task has already been added with a particular name.
+ ErrTaskAlreadyAdded = errors.New("taskqueue: task has already been added")
+)
+
+// RetryOptions let you control whether to retry a task and the backoff intervals between tries.
+type RetryOptions struct {
+ // Number of tries/leases after which the task fails permanently and is deleted.
+ // If AgeLimit is also set, both limits must be exceeded for the task to fail permanently.
+ RetryLimit int32
+
+ // Maximum time allowed since the task's first try before the task fails permanently and is deleted (only for push tasks).
+ // If RetryLimit is also set, both limits must be exceeded for the task to fail permanently.
+ AgeLimit time.Duration
+
+ // Minimum time between successive tries (only for push tasks).
+ MinBackoff time.Duration
+
+ // Maximum time between successive tries (only for push tasks).
+ MaxBackoff time.Duration
+
+ // Maximum number of times to double the interval between successive tries before the intervals increase linearly (only for push tasks).
+ MaxDoublings int32
+
+ // If MaxDoublings is zero, set ApplyZeroMaxDoublings to true to override the default non-zero value.
+ // Otherwise a zero MaxDoublings is ignored and the default is used.
+ ApplyZeroMaxDoublings bool
+}
+
+// toRetryParameter converts RetryOptions to pb.TaskQueueRetryParameters.
+func (opt *RetryOptions) toRetryParameters() *pb.TaskQueueRetryParameters {
+ params := &pb.TaskQueueRetryParameters{}
+ if opt.RetryLimit > 0 {
+ params.RetryLimit = proto.Int32(opt.RetryLimit)
+ }
+ if opt.AgeLimit > 0 {
+ params.AgeLimitSec = proto.Int64(int64(opt.AgeLimit.Seconds()))
+ }
+ if opt.MinBackoff > 0 {
+ params.MinBackoffSec = proto.Float64(opt.MinBackoff.Seconds())
+ }
+ if opt.MaxBackoff > 0 {
+ params.MaxBackoffSec = proto.Float64(opt.MaxBackoff.Seconds())
+ }
+ if opt.MaxDoublings > 0 || (opt.MaxDoublings == 0 && opt.ApplyZeroMaxDoublings) {
+ params.MaxDoublings = proto.Int32(opt.MaxDoublings)
+ }
+ return params
+}
+
+// A Task represents a task to be executed.
+type Task struct {
+ // Path is the worker URL for the task.
+ // If unset, it will default to /_ah/queue/<queue_name>.
+ Path string
+
+ // Payload is the data for the task.
+ // This will be delivered as the HTTP request body.
+ // It is only used when Method is POST, PUT or PULL.
+ // url.Values' Encode method may be used to generate this for POST requests.
+ Payload []byte
+
+ // Additional HTTP headers to pass at the task's execution time.
+ // To schedule the task to be run with an alternate app version
+ // or backend, set the "Host" header.
+ Header http.Header
+
+ // Method is the HTTP method for the task ("GET", "POST", etc.),
+ // or "PULL" if this is task is destined for a pull-based queue.
+ // If empty, this defaults to "POST".
+ Method string
+
+ // A name for the task.
+ // If empty, a name will be chosen.
+ Name string
+
+ // Delay specifies the duration the task queue service must wait
+ // before executing the task.
+ // Either Delay or ETA may be set, but not both.
+ Delay time.Duration
+
+ // ETA specifies the earliest time a task may be executed (push queues)
+ // or leased (pull queues).
+ // Either Delay or ETA may be set, but not both.
+ ETA time.Time
+
+ // The number of times the task has been dispatched or leased.
+ RetryCount int32
+
+ // Tag for the task. Only used when Method is PULL.
+ Tag string
+
+ // Retry options for this task. May be nil.
+ RetryOptions *RetryOptions
+}
+
+func (t *Task) method() string {
+ if t.Method == "" {
+ return "POST"
+ }
+ return t.Method
+}
+
+// NewPOSTTask creates a Task that will POST to a path with the given form data.
+func NewPOSTTask(path string, params url.Values) *Task {
+ h := make(http.Header)
+ h.Set("Content-Type", "application/x-www-form-urlencoded")
+ return &Task{
+ Path: path,
+ Payload: []byte(params.Encode()),
+ Header: h,
+ Method: "POST",
+ }
+}
+
+var (
+ currentNamespace = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
+ defaultNamespace = http.CanonicalHeaderKey("X-AppEngine-Default-Namespace")
+)
+
+func getDefaultNamespace(ctx context.Context) string {
+ return internal.IncomingHeaders(ctx).Get(defaultNamespace)
+}
+
+func newAddReq(c context.Context, task *Task, queueName string) (*pb.TaskQueueAddRequest, error) {
+ if queueName == "" {
+ queueName = "default"
+ }
+ path := task.Path
+ if path == "" {
+ path = "/_ah/queue/" + queueName
+ }
+ eta := task.ETA
+ if eta.IsZero() {
+ eta = time.Now().Add(task.Delay)
+ } else if task.Delay != 0 {
+ panic("taskqueue: both Delay and ETA are set")
+ }
+ req := &pb.TaskQueueAddRequest{
+ QueueName: []byte(queueName),
+ TaskName: []byte(task.Name),
+ EtaUsec: proto.Int64(eta.UnixNano() / 1e3),
+ }
+ method := task.method()
+ if method == "PULL" {
+ // Pull-based task
+ req.Body = task.Payload
+ req.Mode = pb.TaskQueueMode_PULL.Enum()
+ if task.Tag != "" {
+ req.Tag = []byte(task.Tag)
+ }
+ } else {
+ // HTTP-based task
+ if v, ok := pb.TaskQueueAddRequest_RequestMethod_value[method]; ok {
+ req.Method = pb.TaskQueueAddRequest_RequestMethod(v).Enum()
+ } else {
+ return nil, fmt.Errorf("taskqueue: bad method %q", method)
+ }
+ req.Url = []byte(path)
+ for k, vs := range task.Header {
+ for _, v := range vs {
+ req.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{
+ Key: []byte(k),
+ Value: []byte(v),
+ })
+ }
+ }
+ if method == "POST" || method == "PUT" {
+ req.Body = task.Payload
+ }
+
+ // Namespace headers.
+ if _, ok := task.Header[currentNamespace]; !ok {
+ // Fetch the current namespace of this request.
+ ns := internal.NamespaceFromContext(c)
+ req.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{
+ Key: []byte(currentNamespace),
+ Value: []byte(ns),
+ })
+ }
+ if _, ok := task.Header[defaultNamespace]; !ok {
+ // Fetch the X-AppEngine-Default-Namespace header of this request.
+ if ns := getDefaultNamespace(c); ns != "" {
+ req.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{
+ Key: []byte(defaultNamespace),
+ Value: []byte(ns),
+ })
+ }
+ }
+ }
+
+ if task.RetryOptions != nil {
+ req.RetryParameters = task.RetryOptions.toRetryParameters()
+ }
+
+ return req, nil
+}
+
+var alreadyAddedErrors = map[pb.TaskQueueServiceError_ErrorCode]bool{
+ pb.TaskQueueServiceError_TASK_ALREADY_EXISTS: true,
+ pb.TaskQueueServiceError_TOMBSTONED_TASK: true,
+}
+
+// Add adds the task to a named queue.
+// An empty queue name means that the default queue will be used.
+// Add returns an equivalent Task with defaults filled in, including setting
+// the task's Name field to the chosen name if the original was empty.
+func Add(c context.Context, task *Task, queueName string) (*Task, error) {
+ req, err := newAddReq(c, task, queueName)
+ if err != nil {
+ return nil, err
+ }
+ res := &pb.TaskQueueAddResponse{}
+ if err := internal.Call(c, "taskqueue", "Add", req, res); err != nil {
+ apiErr, ok := err.(*internal.APIError)
+ if ok && alreadyAddedErrors[pb.TaskQueueServiceError_ErrorCode(apiErr.Code)] {
+ return nil, ErrTaskAlreadyAdded
+ }
+ return nil, err
+ }
+ resultTask := *task
+ resultTask.Method = task.method()
+ if task.Name == "" {
+ resultTask.Name = string(res.ChosenTaskName)
+ }
+ return &resultTask, nil
+}
+
+// AddMulti adds multiple tasks to a named queue.
+// An empty queue name means that the default queue will be used.
+// AddMulti returns a slice of equivalent tasks with defaults filled in, including setting
+// each task's Name field to the chosen name if the original was empty.
+// If a given task is badly formed or could not be added, an appengine.MultiError is returned.
+func AddMulti(c context.Context, tasks []*Task, queueName string) ([]*Task, error) {
+ req := &pb.TaskQueueBulkAddRequest{
+ AddRequest: make([]*pb.TaskQueueAddRequest, len(tasks)),
+ }
+ me, any := make(appengine.MultiError, len(tasks)), false
+ for i, t := range tasks {
+ req.AddRequest[i], me[i] = newAddReq(c, t, queueName)
+ any = any || me[i] != nil
+ }
+ if any {
+ return nil, me
+ }
+ res := &pb.TaskQueueBulkAddResponse{}
+ if err := internal.Call(c, "taskqueue", "BulkAdd", req, res); err != nil {
+ return nil, err
+ }
+ if len(res.Taskresult) != len(tasks) {
+ return nil, errors.New("taskqueue: server error")
+ }
+ tasksOut := make([]*Task, len(tasks))
+ for i, tr := range res.Taskresult {
+ tasksOut[i] = new(Task)
+ *tasksOut[i] = *tasks[i]
+ tasksOut[i].Method = tasksOut[i].method()
+ if tasksOut[i].Name == "" {
+ tasksOut[i].Name = string(tr.ChosenTaskName)
+ }
+ if *tr.Result != pb.TaskQueueServiceError_OK {
+ if alreadyAddedErrors[*tr.Result] {
+ me[i] = ErrTaskAlreadyAdded
+ } else {
+ me[i] = &internal.APIError{
+ Service: "taskqueue",
+ Code: int32(*tr.Result),
+ }
+ }
+ any = true
+ }
+ }
+ if any {
+ return tasksOut, me
+ }
+ return tasksOut, nil
+}
+
+// Delete deletes a task from a named queue.
+func Delete(c context.Context, task *Task, queueName string) error {
+ err := DeleteMulti(c, []*Task{task}, queueName)
+ if me, ok := err.(appengine.MultiError); ok {
+ return me[0]
+ }
+ return err
+}
+
+// DeleteMulti deletes multiple tasks from a named queue.
+// If a given task could not be deleted, an appengine.MultiError is returned.
+func DeleteMulti(c context.Context, tasks []*Task, queueName string) error {
+ taskNames := make([][]byte, len(tasks))
+ for i, t := range tasks {
+ taskNames[i] = []byte(t.Name)
+ }
+ if queueName == "" {
+ queueName = "default"
+ }
+ req := &pb.TaskQueueDeleteRequest{
+ QueueName: []byte(queueName),
+ TaskName: taskNames,
+ }
+ res := &pb.TaskQueueDeleteResponse{}
+ if err := internal.Call(c, "taskqueue", "Delete", req, res); err != nil {
+ return err
+ }
+ if a, b := len(req.TaskName), len(res.Result); a != b {
+ return fmt.Errorf("taskqueue: internal error: requested deletion of %d tasks, got %d results", a, b)
+ }
+ me, any := make(appengine.MultiError, len(res.Result)), false
+ for i, ec := range res.Result {
+ if ec != pb.TaskQueueServiceError_OK {
+ me[i] = &internal.APIError{
+ Service: "taskqueue",
+ Code: int32(ec),
+ }
+ any = true
+ }
+ }
+ if any {
+ return me
+ }
+ return nil
+}
+
+func lease(c context.Context, maxTasks int, queueName string, leaseTime int, groupByTag bool, tag []byte) ([]*Task, error) {
+ if queueName == "" {
+ queueName = "default"
+ }
+ req := &pb.TaskQueueQueryAndOwnTasksRequest{
+ QueueName: []byte(queueName),
+ LeaseSeconds: proto.Float64(float64(leaseTime)),
+ MaxTasks: proto.Int64(int64(maxTasks)),
+ GroupByTag: proto.Bool(groupByTag),
+ Tag: tag,
+ }
+ res := &pb.TaskQueueQueryAndOwnTasksResponse{}
+ if err := internal.Call(c, "taskqueue", "QueryAndOwnTasks", req, res); err != nil {
+ return nil, err
+ }
+ tasks := make([]*Task, len(res.Task))
+ for i, t := range res.Task {
+ tasks[i] = &Task{
+ Payload: t.Body,
+ Name: string(t.TaskName),
+ Method: "PULL",
+ ETA: time.Unix(0, *t.EtaUsec*1e3),
+ RetryCount: *t.RetryCount,
+ Tag: string(t.Tag),
+ }
+ }
+ return tasks, nil
+}
+
+// Lease leases tasks from a queue.
+// leaseTime is in seconds.
+// The number of tasks fetched will be at most maxTasks.
+func Lease(c context.Context, maxTasks int, queueName string, leaseTime int) ([]*Task, error) {
+ return lease(c, maxTasks, queueName, leaseTime, false, nil)
+}
+
+// LeaseByTag leases tasks from a queue, grouped by tag.
+// If tag is empty, then the returned tasks are grouped by the tag of the task with earliest ETA.
+// leaseTime is in seconds.
+// The number of tasks fetched will be at most maxTasks.
+func LeaseByTag(c context.Context, maxTasks int, queueName string, leaseTime int, tag string) ([]*Task, error) {
+ return lease(c, maxTasks, queueName, leaseTime, true, []byte(tag))
+}
+
+// Purge removes all tasks from a queue.
+func Purge(c context.Context, queueName string) error {
+ if queueName == "" {
+ queueName = "default"
+ }
+ req := &pb.TaskQueuePurgeQueueRequest{
+ QueueName: []byte(queueName),
+ }
+ res := &pb.TaskQueuePurgeQueueResponse{}
+ return internal.Call(c, "taskqueue", "PurgeQueue", req, res)
+}
+
+// ModifyLease modifies the lease of a task.
+// Used to request more processing time, or to abandon processing.
+// leaseTime is in seconds and must not be negative.
+func ModifyLease(c context.Context, task *Task, queueName string, leaseTime int) error {
+ if queueName == "" {
+ queueName = "default"
+ }
+ req := &pb.TaskQueueModifyTaskLeaseRequest{
+ QueueName: []byte(queueName),
+ TaskName: []byte(task.Name),
+ EtaUsec: proto.Int64(task.ETA.UnixNano() / 1e3), // Used to verify ownership.
+ LeaseSeconds: proto.Float64(float64(leaseTime)),
+ }
+ res := &pb.TaskQueueModifyTaskLeaseResponse{}
+ if err := internal.Call(c, "taskqueue", "ModifyTaskLease", req, res); err != nil {
+ return err
+ }
+ task.ETA = time.Unix(0, *res.UpdatedEtaUsec*1e3)
+ return nil
+}
+
+// QueueStatistics represents statistics about a single task queue.
+type QueueStatistics struct {
+ Tasks int // may be an approximation
+ OldestETA time.Time // zero if there are no pending tasks
+
+ Executed1Minute int // tasks executed in the last minute
+ InFlight int // tasks executing now
+ EnforcedRate float64 // requests per second
+}
+
+// QueueStats retrieves statistics about queues.
+func QueueStats(c context.Context, queueNames []string) ([]QueueStatistics, error) {
+ req := &pb.TaskQueueFetchQueueStatsRequest{
+ QueueName: make([][]byte, len(queueNames)),
+ }
+ for i, q := range queueNames {
+ if q == "" {
+ q = "default"
+ }
+ req.QueueName[i] = []byte(q)
+ }
+ res := &pb.TaskQueueFetchQueueStatsResponse{}
+ if err := internal.Call(c, "taskqueue", "FetchQueueStats", req, res); err != nil {
+ return nil, err
+ }
+ qs := make([]QueueStatistics, len(res.Queuestats))
+ for i, qsg := range res.Queuestats {
+ qs[i] = QueueStatistics{
+ Tasks: int(*qsg.NumTasks),
+ }
+ if eta := *qsg.OldestEtaUsec; eta > -1 {
+ qs[i].OldestETA = time.Unix(0, eta*1e3)
+ }
+ if si := qsg.ScannerInfo; si != nil {
+ qs[i].Executed1Minute = int(*si.ExecutedLastMinute)
+ qs[i].InFlight = int(si.GetRequestsInFlight())
+ qs[i].EnforcedRate = si.GetEnforcedRate()
+ }
+ }
+ return qs, nil
+}
+
+func setTransaction(x *pb.TaskQueueAddRequest, t *dspb.Transaction) {
+ x.Transaction = t
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("taskqueue", pb.TaskQueueServiceError_ErrorCode_name)
+
+ // Datastore error codes are shifted by DATASTORE_ERROR when presented through taskqueue.
+ dsCode := int32(pb.TaskQueueServiceError_DATASTORE_ERROR) + int32(dspb.Error_TIMEOUT)
+ internal.RegisterTimeoutErrorCode("taskqueue", dsCode)
+
+ // Transaction registration.
+ internal.RegisterTransactionSetter(setTransaction)
+ internal.RegisterTransactionSetter(func(x *pb.TaskQueueBulkAddRequest, t *dspb.Transaction) {
+ for _, req := range x.AddRequest {
+ setTransaction(req, t)
+ }
+ })
+}
diff --git a/vendor/google.golang.org/appengine/taskqueue/taskqueue_test.go b/vendor/google.golang.org/appengine/taskqueue/taskqueue_test.go
new file mode 100644
index 0000000..0c14015
--- /dev/null
+++ b/vendor/google.golang.org/appengine/taskqueue/taskqueue_test.go
@@ -0,0 +1,116 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package taskqueue
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "testing"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/taskqueue"
+)
+
+func TestAddErrors(t *testing.T) {
+ var tests = []struct {
+ err, want error
+ sameErr bool // if true, should return err exactly
+ }{
+ {
+ err: &internal.APIError{
+ Service: "taskqueue",
+ Code: int32(pb.TaskQueueServiceError_TASK_ALREADY_EXISTS),
+ },
+ want: ErrTaskAlreadyAdded,
+ },
+ {
+ err: &internal.APIError{
+ Service: "taskqueue",
+ Code: int32(pb.TaskQueueServiceError_TOMBSTONED_TASK),
+ },
+ want: ErrTaskAlreadyAdded,
+ },
+ {
+ err: &internal.APIError{
+ Service: "taskqueue",
+ Code: int32(pb.TaskQueueServiceError_UNKNOWN_QUEUE),
+ },
+ want: errors.New("not used"),
+ sameErr: true,
+ },
+ }
+ for _, tc := range tests {
+ c := aetesting.FakeSingleContext(t, "taskqueue", "Add", func(req *pb.TaskQueueAddRequest, res *pb.TaskQueueAddResponse) error {
+ // don't fill in any of the response
+ return tc.err
+ })
+ task := &Task{Path: "/worker", Method: "PULL"}
+ _, err := Add(c, task, "a-queue")
+ want := tc.want
+ if tc.sameErr {
+ want = tc.err
+ }
+ if err != want {
+ t.Errorf("Add with tc.err = %v, got %#v, want = %#v", tc.err, err, want)
+ }
+ }
+}
+
+func TestAddMulti(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "taskqueue", "BulkAdd", func(req *pb.TaskQueueBulkAddRequest, res *pb.TaskQueueBulkAddResponse) error {
+ res.Taskresult = []*pb.TaskQueueBulkAddResponse_TaskResult{
+ {
+ Result: pb.TaskQueueServiceError_OK.Enum(),
+ },
+ {
+ Result: pb.TaskQueueServiceError_TASK_ALREADY_EXISTS.Enum(),
+ },
+ {
+ Result: pb.TaskQueueServiceError_TOMBSTONED_TASK.Enum(),
+ },
+ {
+ Result: pb.TaskQueueServiceError_INTERNAL_ERROR.Enum(),
+ },
+ }
+ return nil
+ })
+ tasks := []*Task{
+ {Path: "/worker", Method: "PULL"},
+ {Path: "/worker", Method: "PULL"},
+ {Path: "/worker", Method: "PULL"},
+ {Path: "/worker", Method: "PULL"},
+ }
+ r, err := AddMulti(c, tasks, "a-queue")
+ if len(r) != len(tasks) {
+ t.Fatalf("AddMulti returned %d tasks, want %d", len(r), len(tasks))
+ }
+ want := appengine.MultiError{
+ nil,
+ ErrTaskAlreadyAdded,
+ ErrTaskAlreadyAdded,
+ &internal.APIError{
+ Service: "taskqueue",
+ Code: int32(pb.TaskQueueServiceError_INTERNAL_ERROR),
+ },
+ }
+ if !reflect.DeepEqual(err, want) {
+ t.Errorf("AddMulti got %v, wanted %v", err, want)
+ }
+}
+
+func TestAddWithEmptyPath(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "taskqueue", "Add", func(req *pb.TaskQueueAddRequest, res *pb.TaskQueueAddResponse) error {
+ if got, want := string(req.Url), "/_ah/queue/a-queue"; got != want {
+ return fmt.Errorf("req.Url = %q; want %q", got, want)
+ }
+ return nil
+ })
+ if _, err := Add(c, &Task{}, "a-queue"); err != nil {
+ t.Fatalf("Add: %v", err)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go
new file mode 100644
index 0000000..05642a9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/timeout.go
@@ -0,0 +1,20 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import "golang.org/x/net/context"
+
+// IsTimeoutError reports whether err is a timeout error.
+func IsTimeoutError(err error) bool {
+ if err == context.DeadlineExceeded {
+ return true
+ }
+ if t, ok := err.(interface {
+ IsTimeout() bool
+ }); ok {
+ return t.IsTimeout()
+ }
+ return false
+}
diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
new file mode 100644
index 0000000..6ffe1e6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
@@ -0,0 +1,210 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package urlfetch provides an http.RoundTripper implementation
+// for fetching URLs via App Engine's urlfetch service.
+package urlfetch // import "google.golang.org/appengine/urlfetch"
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/urlfetch"
+)
+
+// Transport is an implementation of http.RoundTripper for
+// App Engine. Users should generally create an http.Client using
+// this transport and use the Client rather than using this transport
+// directly.
+type Transport struct {
+ Context context.Context
+
+ // Controls whether the application checks the validity of SSL certificates
+ // over HTTPS connections. A value of false (the default) instructs the
+ // application to send a request to the server only if the certificate is
+ // valid and signed by a trusted certificate authority (CA), and also
+ // includes a hostname that matches the certificate. A value of true
+ // instructs the application to perform no certificate validation.
+ AllowInvalidServerCertificate bool
+}
+
+// Verify statically that *Transport implements http.RoundTripper.
+var _ http.RoundTripper = (*Transport)(nil)
+
+// Client returns an *http.Client using a default urlfetch Transport. This
+// client will have the default deadline of 5 seconds, and will check the
+// validity of SSL certificates.
+//
+// Any deadline of the provided context will be used for requests through this client;
+// if the client does not have a deadline then a 5 second default is used.
+func Client(ctx context.Context) *http.Client {
+ return &http.Client{
+ Transport: &Transport{
+ Context: ctx,
+ },
+ }
+}
+
+type bodyReader struct {
+ content []byte
+ truncated bool
+ closed bool
+}
+
+// ErrTruncatedBody is the error returned after the final Read() from a
+// response's Body if the body has been truncated by App Engine's proxy.
+var ErrTruncatedBody = errors.New("urlfetch: truncated body")
+
+func statusCodeToText(code int) string {
+ if t := http.StatusText(code); t != "" {
+ return t
+ }
+ return strconv.Itoa(code)
+}
+
+func (br *bodyReader) Read(p []byte) (n int, err error) {
+ if br.closed {
+ if br.truncated {
+ return 0, ErrTruncatedBody
+ }
+ return 0, io.EOF
+ }
+ n = copy(p, br.content)
+ if n > 0 {
+ br.content = br.content[n:]
+ return
+ }
+ if br.truncated {
+ br.closed = true
+ return 0, ErrTruncatedBody
+ }
+ return 0, io.EOF
+}
+
+func (br *bodyReader) Close() error {
+ br.closed = true
+ br.content = nil
+ return nil
+}
+
+// A map of the URL Fetch-accepted methods that take a request body.
+var methodAcceptsRequestBody = map[string]bool{
+ "POST": true,
+ "PUT": true,
+ "PATCH": true,
+}
+
+// urlString returns a valid string given a URL. This function is necessary because
+// the String method of URL doesn't correctly handle URLs with non-empty Opaque values.
+// See http://code.google.com/p/go/issues/detail?id=4860.
+func urlString(u *url.URL) string {
+ if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") {
+ return u.String()
+ }
+ aux := *u
+ aux.Opaque = "//" + aux.Host + aux.Opaque
+ return aux.String()
+}
+
+// RoundTrip issues a single HTTP request and returns its response. Per the
+// http.RoundTripper interface, RoundTrip only returns an error if there
+// was an unsupported request or the URL Fetch proxy fails.
+// Note that HTTP response codes such as 5xx, 403, 404, etc are not
+// errors as far as the transport is concerned and will be returned
+// with err set to nil.
+func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) {
+ methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method]
+ if !ok {
+ return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method)
+ }
+
+ method := pb.URLFetchRequest_RequestMethod(methNum)
+
+ freq := &pb.URLFetchRequest{
+ Method: &method,
+ Url: proto.String(urlString(req.URL)),
+ FollowRedirects: proto.Bool(false), // http.Client's responsibility
+ MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate),
+ }
+ if deadline, ok := t.Context.Deadline(); ok {
+ freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds())
+ }
+
+ for k, vals := range req.Header {
+ for _, val := range vals {
+ freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{
+ Key: proto.String(k),
+ Value: proto.String(val),
+ })
+ }
+ }
+ if methodAcceptsRequestBody[req.Method] && req.Body != nil {
+ // Avoid a []byte copy if req.Body has a Bytes method.
+ switch b := req.Body.(type) {
+ case interface {
+ Bytes() []byte
+ }:
+ freq.Payload = b.Bytes()
+ default:
+ freq.Payload, err = ioutil.ReadAll(req.Body)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ fres := &pb.URLFetchResponse{}
+ if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil {
+ return nil, err
+ }
+
+ res = &http.Response{}
+ res.StatusCode = int(*fres.StatusCode)
+ res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode))
+ res.Header = make(http.Header)
+ res.Request = req
+
+ // Faked:
+ res.ProtoMajor = 1
+ res.ProtoMinor = 1
+ res.Proto = "HTTP/1.1"
+ res.Close = true
+
+ for _, h := range fres.Header {
+ hkey := http.CanonicalHeaderKey(*h.Key)
+ hval := *h.Value
+ if hkey == "Content-Length" {
+ // Will get filled in below for all but HEAD requests.
+ if req.Method == "HEAD" {
+ res.ContentLength, _ = strconv.ParseInt(hval, 10, 64)
+ }
+ continue
+ }
+ res.Header.Add(hkey, hval)
+ }
+
+ if req.Method != "HEAD" {
+ res.ContentLength = int64(len(fres.Content))
+ }
+
+ truncated := fres.GetContentWasTruncated()
+ res.Body = &bodyReader{content: fres.Content, truncated: truncated}
+ return
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name)
+ internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED))
+}
diff --git a/vendor/google.golang.org/appengine/user/oauth.go b/vendor/google.golang.org/appengine/user/oauth.go
new file mode 100644
index 0000000..ffad571
--- /dev/null
+++ b/vendor/google.golang.org/appengine/user/oauth.go
@@ -0,0 +1,52 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package user
+
+import (
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/user"
+)
+
+// CurrentOAuth returns the user associated with the OAuth consumer making this
+// request. If the OAuth consumer did not make a valid OAuth request, or the
+// scopes is non-empty and the current user does not have at least one of the
+// scopes, this method will return an error.
+func CurrentOAuth(c context.Context, scopes ...string) (*User, error) {
+ req := &pb.GetOAuthUserRequest{}
+ if len(scopes) != 1 || scopes[0] != "" {
+ // The signature for this function used to be CurrentOAuth(Context, string).
+ // Ignore the singular "" scope to preserve existing behavior.
+ req.Scopes = scopes
+ }
+
+ res := &pb.GetOAuthUserResponse{}
+
+ err := internal.Call(c, "user", "GetOAuthUser", req, res)
+ if err != nil {
+ return nil, err
+ }
+ return &User{
+ Email: *res.Email,
+ AuthDomain: *res.AuthDomain,
+ Admin: res.GetIsAdmin(),
+ ID: *res.UserId,
+ ClientID: res.GetClientId(),
+ }, nil
+}
+
+// OAuthConsumerKey returns the OAuth consumer key provided with the current
+// request. This method will return an error if the OAuth request was invalid.
+func OAuthConsumerKey(c context.Context) (string, error) {
+ req := &pb.CheckOAuthSignatureRequest{}
+ res := &pb.CheckOAuthSignatureResponse{}
+
+ err := internal.Call(c, "user", "CheckOAuthSignature", req, res)
+ if err != nil {
+ return "", err
+ }
+ return *res.OauthConsumerKey, err
+}
diff --git a/vendor/google.golang.org/appengine/user/user.go b/vendor/google.golang.org/appengine/user/user.go
new file mode 100644
index 0000000..eb76f59
--- /dev/null
+++ b/vendor/google.golang.org/appengine/user/user.go
@@ -0,0 +1,84 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package user provides a client for App Engine's user authentication service.
+package user // import "google.golang.org/appengine/user"
+
+import (
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/user"
+)
+
+// User represents a user of the application.
+type User struct {
+ Email string
+ AuthDomain string
+ Admin bool
+
+ // ID is the unique permanent ID of the user.
+ // It is populated if the Email is associated
+ // with a Google account, or empty otherwise.
+ ID string
+
+ // ClientID is the ID of the pre-registered client so its identity can be verified.
+ // See https://developers.google.com/console/help/#generatingoauth2 for more information.
+ ClientID string
+
+ FederatedIdentity string
+ FederatedProvider string
+}
+
+// String returns a displayable name for the user.
+func (u *User) String() string {
+ if u.AuthDomain != "" && strings.HasSuffix(u.Email, "@"+u.AuthDomain) {
+ return u.Email[:len(u.Email)-len("@"+u.AuthDomain)]
+ }
+ if u.FederatedIdentity != "" {
+ return u.FederatedIdentity
+ }
+ return u.Email
+}
+
+// LoginURL returns a URL that, when visited, prompts the user to sign in,
+// then redirects the user to the URL specified by dest.
+func LoginURL(c context.Context, dest string) (string, error) {
+ return LoginURLFederated(c, dest, "")
+}
+
+// LoginURLFederated is like LoginURL but accepts a user's OpenID identifier.
+func LoginURLFederated(c context.Context, dest, identity string) (string, error) {
+ req := &pb.CreateLoginURLRequest{
+ DestinationUrl: proto.String(dest),
+ }
+ if identity != "" {
+ req.FederatedIdentity = proto.String(identity)
+ }
+ res := &pb.CreateLoginURLResponse{}
+ if err := internal.Call(c, "user", "CreateLoginURL", req, res); err != nil {
+ return "", err
+ }
+ return *res.LoginUrl, nil
+}
+
+// LogoutURL returns a URL that, when visited, signs the user out,
+// then redirects the user to the URL specified by dest.
+func LogoutURL(c context.Context, dest string) (string, error) {
+ req := &pb.CreateLogoutURLRequest{
+ DestinationUrl: proto.String(dest),
+ }
+ res := &pb.CreateLogoutURLResponse{}
+ if err := internal.Call(c, "user", "CreateLogoutURL", req, res); err != nil {
+ return "", err
+ }
+ return *res.LogoutUrl, nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("user", pb.UserServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/user/user_classic.go b/vendor/google.golang.org/appengine/user/user_classic.go
new file mode 100644
index 0000000..a747ef3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/user/user_classic.go
@@ -0,0 +1,35 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package user
+
+import (
+ "appengine/user"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+func Current(ctx context.Context) *User {
+ u := user.Current(internal.ClassicContextFromContext(ctx))
+ if u == nil {
+ return nil
+ }
+ // Map appengine/user.User to this package's User type.
+ return &User{
+ Email: u.Email,
+ AuthDomain: u.AuthDomain,
+ Admin: u.Admin,
+ ID: u.ID,
+ FederatedIdentity: u.FederatedIdentity,
+ FederatedProvider: u.FederatedProvider,
+ }
+}
+
+func IsAdmin(ctx context.Context) bool {
+ return user.IsAdmin(internal.ClassicContextFromContext(ctx))
+}
diff --git a/vendor/google.golang.org/appengine/user/user_test.go b/vendor/google.golang.org/appengine/user/user_test.go
new file mode 100644
index 0000000..5fc5957
--- /dev/null
+++ b/vendor/google.golang.org/appengine/user/user_test.go
@@ -0,0 +1,99 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package user
+
+import (
+ "fmt"
+ "net/http"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine/internal"
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/user"
+)
+
+func baseReq() *http.Request {
+ return &http.Request{
+ Header: http.Header{},
+ }
+}
+
+type basicUserTest struct {
+ nickname, email, authDomain, admin string
+ // expectations
+ isNil, isAdmin bool
+ displayName string
+}
+
+var basicUserTests = []basicUserTest{
+ {"", "", "", "0", true, false, ""},
+ {"ken", "ken@example.com", "example.com", "0", false, false, "ken"},
+ {"ken", "ken@example.com", "auth_domain.com", "1", false, true, "ken@example.com"},
+}
+
+func TestBasicUserAPI(t *testing.T) {
+ for i, tc := range basicUserTests {
+ req := baseReq()
+ req.Header.Set("X-AppEngine-User-Nickname", tc.nickname)
+ req.Header.Set("X-AppEngine-User-Email", tc.email)
+ req.Header.Set("X-AppEngine-Auth-Domain", tc.authDomain)
+ req.Header.Set("X-AppEngine-User-Is-Admin", tc.admin)
+
+ c := internal.ContextForTesting(req)
+
+ if ga := IsAdmin(c); ga != tc.isAdmin {
+ t.Errorf("test %d: expected IsAdmin(c) = %v, got %v", i, tc.isAdmin, ga)
+ }
+
+ u := Current(c)
+ if tc.isNil {
+ if u != nil {
+ t.Errorf("test %d: expected u == nil, got %+v", i, u)
+ }
+ continue
+ }
+ if u == nil {
+ t.Errorf("test %d: expected u != nil, got nil", i)
+ continue
+ }
+ if u.Email != tc.email {
+ t.Errorf("test %d: expected u.Email = %q, got %q", i, tc.email, u.Email)
+ }
+ if gs := u.String(); gs != tc.displayName {
+ t.Errorf("test %d: expected u.String() = %q, got %q", i, tc.displayName, gs)
+ }
+ if u.Admin != tc.isAdmin {
+ t.Errorf("test %d: expected u.Admin = %v, got %v", i, tc.isAdmin, u.Admin)
+ }
+ }
+}
+
+func TestLoginURL(t *testing.T) {
+ expectedQuery := &pb.CreateLoginURLRequest{
+ DestinationUrl: proto.String("/destination"),
+ }
+ const expectedDest = "/redir/dest"
+ c := aetesting.FakeSingleContext(t, "user", "CreateLoginURL", func(req *pb.CreateLoginURLRequest, res *pb.CreateLoginURLResponse) error {
+ if !proto.Equal(req, expectedQuery) {
+ return fmt.Errorf("got %v, want %v", req, expectedQuery)
+ }
+ res.LoginUrl = proto.String(expectedDest)
+ return nil
+ })
+
+ url, err := LoginURL(c, "/destination")
+ if err != nil {
+ t.Fatalf("LoginURL failed: %v", err)
+ }
+ if url != expectedDest {
+ t.Errorf("got %v, want %v", url, expectedDest)
+ }
+}
+
+// TODO(dsymonds): Add test for LogoutURL.
diff --git a/vendor/google.golang.org/appengine/user/user_vm.go b/vendor/google.golang.org/appengine/user/user_vm.go
new file mode 100644
index 0000000..8dc672e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/user/user_vm.go
@@ -0,0 +1,38 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package user
+
+import (
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+// Current returns the currently logged-in user,
+// or nil if the user is not signed in.
+func Current(c context.Context) *User {
+ h := internal.IncomingHeaders(c)
+ u := &User{
+ Email: h.Get("X-AppEngine-User-Email"),
+ AuthDomain: h.Get("X-AppEngine-Auth-Domain"),
+ ID: h.Get("X-AppEngine-User-Id"),
+ Admin: h.Get("X-AppEngine-User-Is-Admin") == "1",
+ FederatedIdentity: h.Get("X-AppEngine-Federated-Identity"),
+ FederatedProvider: h.Get("X-AppEngine-Federated-Provider"),
+ }
+ if u.Email == "" && u.FederatedIdentity == "" {
+ return nil
+ }
+ return u
+}
+
+// IsAdmin returns true if the current user is signed in and
+// is currently registered as an administrator of the application.
+func IsAdmin(c context.Context) bool {
+ h := internal.IncomingHeaders(c)
+ return h.Get("X-AppEngine-User-Is-Admin") == "1"
+}
diff --git a/vendor/google.golang.org/appengine/xmpp/xmpp.go b/vendor/google.golang.org/appengine/xmpp/xmpp.go
new file mode 100644
index 0000000..3a561fd
--- /dev/null
+++ b/vendor/google.golang.org/appengine/xmpp/xmpp.go
@@ -0,0 +1,253 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package xmpp provides the means to send and receive instant messages
+to and from users of XMPP-compatible services.
+
+To send a message,
+ m := &xmpp.Message{
+ To: []string{"kaylee@example.com"},
+ Body: `Hi! How's the carrot?`,
+ }
+ err := m.Send(c)
+
+To receive messages,
+ func init() {
+ xmpp.Handle(handleChat)
+ }
+
+ func handleChat(c context.Context, m *xmpp.Message) {
+ // ...
+ }
+*/
+package xmpp // import "google.golang.org/appengine/xmpp"
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/xmpp"
+)
+
+// Message represents an incoming chat message.
+type Message struct {
+ // Sender is the JID of the sender.
+ // Optional for outgoing messages.
+ Sender string
+
+ // To is the intended recipients of the message.
+ // Incoming messages will have exactly one element.
+ To []string
+
+ // Body is the body of the message.
+ Body string
+
+ // Type is the message type, per RFC 3921.
+ // It defaults to "chat".
+ Type string
+
+ // RawXML is whether the body contains raw XML.
+ RawXML bool
+}
+
+// Presence represents an outgoing presence update.
+type Presence struct {
+ // Sender is the JID (optional).
+ Sender string
+
+ // The intended recipient of the presence update.
+ To string
+
+ // Type, per RFC 3921 (optional). Defaults to "available".
+ Type string
+
+ // State of presence (optional).
+ // Valid values: "away", "chat", "xa", "dnd" (RFC 3921).
+ State string
+
+ // Free text status message (optional).
+ Status string
+}
+
+var (
+ ErrPresenceUnavailable = errors.New("xmpp: presence unavailable")
+ ErrInvalidJID = errors.New("xmpp: invalid JID")
+)
+
+// Handle arranges for f to be called for incoming XMPP messages.
+// Only messages of type "chat" or "normal" will be handled.
+func Handle(f func(c context.Context, m *Message)) {
+ http.HandleFunc("/_ah/xmpp/message/chat/", func(_ http.ResponseWriter, r *http.Request) {
+ f(appengine.NewContext(r), &Message{
+ Sender: r.FormValue("from"),
+ To: []string{r.FormValue("to")},
+ Body: r.FormValue("body"),
+ })
+ })
+}
+
+// Send sends a message.
+// If any failures occur with specific recipients, the error will be an appengine.MultiError.
+func (m *Message) Send(c context.Context) error {
+ req := &pb.XmppMessageRequest{
+ Jid: m.To,
+ Body: &m.Body,
+ RawXml: &m.RawXML,
+ }
+ if m.Type != "" && m.Type != "chat" {
+ req.Type = &m.Type
+ }
+ if m.Sender != "" {
+ req.FromJid = &m.Sender
+ }
+ res := &pb.XmppMessageResponse{}
+ if err := internal.Call(c, "xmpp", "SendMessage", req, res); err != nil {
+ return err
+ }
+
+ if len(res.Status) != len(req.Jid) {
+ return fmt.Errorf("xmpp: sent message to %d JIDs, but only got %d statuses back", len(req.Jid), len(res.Status))
+ }
+ me, any := make(appengine.MultiError, len(req.Jid)), false
+ for i, st := range res.Status {
+ if st != pb.XmppMessageResponse_NO_ERROR {
+ me[i] = errors.New(st.String())
+ any = true
+ }
+ }
+ if any {
+ return me
+ }
+ return nil
+}
+
+// Invite sends an invitation. If the from address is an empty string
+// the default (yourapp@appspot.com/bot) will be used.
+func Invite(c context.Context, to, from string) error {
+ req := &pb.XmppInviteRequest{
+ Jid: &to,
+ }
+ if from != "" {
+ req.FromJid = &from
+ }
+ res := &pb.XmppInviteResponse{}
+ return internal.Call(c, "xmpp", "SendInvite", req, res)
+}
+
+// Send sends a presence update.
+func (p *Presence) Send(c context.Context) error {
+ req := &pb.XmppSendPresenceRequest{
+ Jid: &p.To,
+ }
+ if p.State != "" {
+ req.Show = &p.State
+ }
+ if p.Type != "" {
+ req.Type = &p.Type
+ }
+ if p.Sender != "" {
+ req.FromJid = &p.Sender
+ }
+ if p.Status != "" {
+ req.Status = &p.Status
+ }
+ res := &pb.XmppSendPresenceResponse{}
+ return internal.Call(c, "xmpp", "SendPresence", req, res)
+}
+
+var presenceMap = map[pb.PresenceResponse_SHOW]string{
+ pb.PresenceResponse_NORMAL: "",
+ pb.PresenceResponse_AWAY: "away",
+ pb.PresenceResponse_DO_NOT_DISTURB: "dnd",
+ pb.PresenceResponse_CHAT: "chat",
+ pb.PresenceResponse_EXTENDED_AWAY: "xa",
+}
+
+// GetPresence retrieves a user's presence.
+// If the from address is an empty string the default
+// (yourapp@appspot.com/bot) will be used.
+// Possible return values are "", "away", "dnd", "chat", "xa".
+// ErrPresenceUnavailable is returned if the presence is unavailable.
+func GetPresence(c context.Context, to string, from string) (string, error) {
+ req := &pb.PresenceRequest{
+ Jid: &to,
+ }
+ if from != "" {
+ req.FromJid = &from
+ }
+ res := &pb.PresenceResponse{}
+ if err := internal.Call(c, "xmpp", "GetPresence", req, res); err != nil {
+ return "", err
+ }
+ if !*res.IsAvailable || res.Presence == nil {
+ return "", ErrPresenceUnavailable
+ }
+ presence, ok := presenceMap[*res.Presence]
+ if ok {
+ return presence, nil
+ }
+ return "", fmt.Errorf("xmpp: unknown presence %v", *res.Presence)
+}
+
+// GetPresenceMulti retrieves multiple users' presence.
+// If the from address is an empty string the default
+// (yourapp@appspot.com/bot) will be used.
+// Possible return values are "", "away", "dnd", "chat", "xa".
+// If any presence is unavailable, an appengine.MultiError is returned
+func GetPresenceMulti(c context.Context, to []string, from string) ([]string, error) {
+ req := &pb.BulkPresenceRequest{
+ Jid: to,
+ }
+ if from != "" {
+ req.FromJid = &from
+ }
+ res := &pb.BulkPresenceResponse{}
+
+ if err := internal.Call(c, "xmpp", "BulkGetPresence", req, res); err != nil {
+ return nil, err
+ }
+
+ presences := make([]string, 0, len(res.PresenceResponse))
+ errs := appengine.MultiError{}
+
+ addResult := func(presence string, err error) {
+ presences = append(presences, presence)
+ errs = append(errs, err)
+ }
+
+ anyErr := false
+ for _, subres := range res.PresenceResponse {
+ if !subres.GetValid() {
+ anyErr = true
+ addResult("", ErrInvalidJID)
+ continue
+ }
+ if !*subres.IsAvailable || subres.Presence == nil {
+ anyErr = true
+ addResult("", ErrPresenceUnavailable)
+ continue
+ }
+ presence, ok := presenceMap[*subres.Presence]
+ if ok {
+ addResult(presence, nil)
+ } else {
+ anyErr = true
+ addResult("", fmt.Errorf("xmpp: unknown presence %q", *subres.Presence))
+ }
+ }
+ if anyErr {
+ return presences, errs
+ }
+ return presences, nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("xmpp", pb.XmppServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/xmpp/xmpp_test.go b/vendor/google.golang.org/appengine/xmpp/xmpp_test.go
new file mode 100644
index 0000000..c3030d3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/xmpp/xmpp_test.go
@@ -0,0 +1,173 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package xmpp
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/xmpp"
+)
+
+func newPresenceResponse(isAvailable bool, presence pb.PresenceResponse_SHOW, valid bool) *pb.PresenceResponse {
+ return &pb.PresenceResponse{
+ IsAvailable: proto.Bool(isAvailable),
+ Presence: presence.Enum(),
+ Valid: proto.Bool(valid),
+ }
+}
+
+func setPresenceResponse(m *pb.PresenceResponse, isAvailable bool, presence pb.PresenceResponse_SHOW, valid bool) {
+ m.IsAvailable = &isAvailable
+ m.Presence = presence.Enum()
+ m.Valid = &valid
+}
+
+func TestGetPresence(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "xmpp", "GetPresence", func(in *pb.PresenceRequest, out *pb.PresenceResponse) error {
+ if jid := in.GetJid(); jid != "user@example.com" {
+ return fmt.Errorf("bad jid %q", jid)
+ }
+ setPresenceResponse(out, true, pb.PresenceResponse_CHAT, true)
+ return nil
+ })
+
+ presence, err := GetPresence(c, "user@example.com", "")
+ if err != nil {
+ t.Fatalf("GetPresence: %v", err)
+ }
+
+ if presence != "chat" {
+ t.Errorf("GetPresence: got %#v, want %#v", presence, pb.PresenceResponse_CHAT)
+ }
+}
+
+func TestGetPresenceMultiSingleJID(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error {
+ if !reflect.DeepEqual(in.Jid, []string{"user@example.com"}) {
+ return fmt.Errorf("bad request jids %#v", in.Jid)
+ }
+ out.PresenceResponse = []*pb.PresenceResponse{
+ newPresenceResponse(true, pb.PresenceResponse_NORMAL, true),
+ }
+ return nil
+ })
+
+ presence, err := GetPresenceMulti(c, []string{"user@example.com"}, "")
+ if err != nil {
+ t.Fatalf("GetPresenceMulti: %v", err)
+ }
+ if !reflect.DeepEqual(presence, []string{""}) {
+ t.Errorf("GetPresenceMulti: got %s, want %s", presence, []string{""})
+ }
+}
+
+func TestGetPresenceMultiJID(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error {
+ if !reflect.DeepEqual(in.Jid, []string{"user@example.com", "user2@example.com"}) {
+ return fmt.Errorf("bad request jids %#v", in.Jid)
+ }
+ out.PresenceResponse = []*pb.PresenceResponse{
+ newPresenceResponse(true, pb.PresenceResponse_NORMAL, true),
+ newPresenceResponse(true, pb.PresenceResponse_AWAY, true),
+ }
+ return nil
+ })
+
+ jids := []string{"user@example.com", "user2@example.com"}
+ presence, err := GetPresenceMulti(c, jids, "")
+ if err != nil {
+ t.Fatalf("GetPresenceMulti: %v", err)
+ }
+ want := []string{"", "away"}
+ if !reflect.DeepEqual(presence, want) {
+ t.Errorf("GetPresenceMulti: got %v, want %v", presence, want)
+ }
+}
+
+func TestGetPresenceMultiFromJID(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error {
+ if !reflect.DeepEqual(in.Jid, []string{"user@example.com", "user2@example.com"}) {
+ return fmt.Errorf("bad request jids %#v", in.Jid)
+ }
+ if jid := in.GetFromJid(); jid != "bot@appspot.com" {
+ return fmt.Errorf("bad from jid %q", jid)
+ }
+ out.PresenceResponse = []*pb.PresenceResponse{
+ newPresenceResponse(true, pb.PresenceResponse_NORMAL, true),
+ newPresenceResponse(true, pb.PresenceResponse_CHAT, true),
+ }
+ return nil
+ })
+
+ jids := []string{"user@example.com", "user2@example.com"}
+ presence, err := GetPresenceMulti(c, jids, "bot@appspot.com")
+ if err != nil {
+ t.Fatalf("GetPresenceMulti: %v", err)
+ }
+ want := []string{"", "chat"}
+ if !reflect.DeepEqual(presence, want) {
+ t.Errorf("GetPresenceMulti: got %v, want %v", presence, want)
+ }
+}
+
+func TestGetPresenceMultiInvalid(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error {
+ if !reflect.DeepEqual(in.Jid, []string{"user@example.com", "user2@example.com"}) {
+ return fmt.Errorf("bad request jids %#v", in.Jid)
+ }
+ out.PresenceResponse = []*pb.PresenceResponse{
+ newPresenceResponse(true, pb.PresenceResponse_EXTENDED_AWAY, true),
+ newPresenceResponse(true, pb.PresenceResponse_CHAT, false),
+ }
+ return nil
+ })
+
+ jids := []string{"user@example.com", "user2@example.com"}
+ presence, err := GetPresenceMulti(c, jids, "")
+
+ wantErr := appengine.MultiError{nil, ErrInvalidJID}
+ if !reflect.DeepEqual(err, wantErr) {
+ t.Fatalf("GetPresenceMulti: got %#v, want %#v", err, wantErr)
+ }
+
+ want := []string{"xa", ""}
+ if !reflect.DeepEqual(presence, want) {
+ t.Errorf("GetPresenceMulti: got %#v, want %#v", presence, want)
+ }
+}
+
+func TestGetPresenceMultiUnavailable(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error {
+ if !reflect.DeepEqual(in.Jid, []string{"user@example.com", "user2@example.com"}) {
+ return fmt.Errorf("bad request jids %#v", in.Jid)
+ }
+ out.PresenceResponse = []*pb.PresenceResponse{
+ newPresenceResponse(false, pb.PresenceResponse_AWAY, true),
+ newPresenceResponse(false, pb.PresenceResponse_DO_NOT_DISTURB, true),
+ }
+ return nil
+ })
+
+ jids := []string{"user@example.com", "user2@example.com"}
+ presence, err := GetPresenceMulti(c, jids, "")
+
+ wantErr := appengine.MultiError{
+ ErrPresenceUnavailable,
+ ErrPresenceUnavailable,
+ }
+ if !reflect.DeepEqual(err, wantErr) {
+ t.Fatalf("GetPresenceMulti: got %#v, want %#v", err, wantErr)
+ }
+ want := []string{"", ""}
+ if !reflect.DeepEqual(presence, want) {
+ t.Errorf("GetPresenceMulti: got %#v, want %#v", presence, want)
+ }
+}