1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
|
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_LIB_IOMGR_ENDPOINT_H
#define GRPC_CORE_LIB_IOMGR_ENDPOINT_H
#include <grpc/slice.h>
#include <grpc/slice_buffer.h>
#include <grpc/support/time.h>
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/iomgr/resource_quota.h"
#ifdef __cplusplus
extern "C" {
#endif
/* An endpoint caps a streaming channel between two communicating processes.
Examples may be: a tcp socket, <stdin+stdout>, or some shared memory. */
typedef struct grpc_endpoint grpc_endpoint;
typedef struct grpc_endpoint_vtable grpc_endpoint_vtable;
struct grpc_endpoint_vtable {
void (*read)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_slice_buffer *slices, grpc_closure *cb);
void (*write)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_slice_buffer *slices, grpc_closure *cb);
void (*add_to_pollset)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_pollset *pollset);
void (*add_to_pollset_set)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_pollset_set *pollset);
void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, grpc_error *why);
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep);
grpc_resource_user *(*get_resource_user)(grpc_endpoint *ep);
char *(*get_peer)(grpc_endpoint *ep);
int (*get_fd)(grpc_endpoint *ep);
};
/* When data is available on the connection, calls the callback with slices.
Callback success indicates that the endpoint can accept more reads, failure
indicates the endpoint is closed.
Valid slices may be placed into \a slices even when the callback is
invoked with error != GRPC_ERROR_NONE. */
void grpc_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_slice_buffer *slices, grpc_closure *cb);
char *grpc_endpoint_get_peer(grpc_endpoint *ep);
/* Get the file descriptor used by \a ep. Return -1 if \a ep is not using an fd.
*/
int grpc_endpoint_get_fd(grpc_endpoint *ep);
/* Write slices out to the socket.
If the connection is ready for more data after the end of the call, it
returns GRPC_ENDPOINT_DONE.
Otherwise it returns GRPC_ENDPOINT_PENDING and calls cb when the
connection is ready for more data.
\a slices may be mutated at will by the endpoint until cb is called.
No guarantee is made to the content of slices after a write EXCEPT that
it is a valid slice buffer.
*/
void grpc_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_slice_buffer *slices, grpc_closure *cb);
/* Causes any pending and future read/write callbacks to run immediately with
success==0 */
void grpc_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_error *why);
void grpc_endpoint_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep);
/* Add an endpoint to a pollset, so that when the pollset is polled, events from
this endpoint are considered */
void grpc_endpoint_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_pollset *pollset);
void grpc_endpoint_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_endpoint *ep,
grpc_pollset_set *pollset_set);
grpc_resource_user *grpc_endpoint_get_resource_user(grpc_endpoint *endpoint);
struct grpc_endpoint {
const grpc_endpoint_vtable *vtable;
};
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_LIB_IOMGR_ENDPOINT_H */
|