1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
|
/*
* Copyright (C) Max Romanov
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_PORT_MEMORY_INT_H_INCLUDED_
#define _NXT_PORT_MEMORY_INT_H_INCLUDED_
#include <stdint.h>
#include <nxt_atomic.h>
#ifdef NXT_MMAP_TINY_CHUNK
#define PORT_MMAP_CHUNK_SIZE 16
#define PORT_MMAP_HEADER_SIZE 1024
#define PORT_MMAP_DATA_SIZE 1024
#else
#define PORT_MMAP_CHUNK_SIZE (1024 * 16)
#define PORT_MMAP_HEADER_SIZE (1024 * 4)
#define PORT_MMAP_DATA_SIZE (1024 * 1024 * 10)
#endif
#define PORT_MMAP_SIZE (PORT_MMAP_HEADER_SIZE + PORT_MMAP_DATA_SIZE)
#define PORT_MMAP_CHUNK_COUNT (PORT_MMAP_DATA_SIZE / PORT_MMAP_CHUNK_SIZE)
typedef uint32_t nxt_chunk_id_t;
typedef nxt_atomic_uint_t nxt_free_map_t;
#define FREE_BITS (sizeof(nxt_free_map_t) * 8)
#define FREE_IDX(nchunk) ((nchunk) / FREE_BITS)
#define FREE_MASK(nchunk) \
( 1ULL << ( (nchunk) % FREE_BITS ) )
#define MAX_FREE_IDX FREE_IDX(PORT_MMAP_CHUNK_COUNT)
/* Mapped at the start of shared memory segment. */
struct nxt_port_mmap_header_s {
uint32_t id;
nxt_pid_t src_pid; /* For sanity check. */
nxt_pid_t dst_pid; /* For sanity check. */
nxt_port_id_t sent_over;
nxt_atomic_t oosm;
nxt_free_map_t free_map[MAX_FREE_IDX];
nxt_free_map_t free_map_padding;
nxt_free_map_t free_tracking_map[MAX_FREE_IDX];
nxt_free_map_t free_tracking_map_padding;
nxt_atomic_t tracking[PORT_MMAP_CHUNK_COUNT];
};
struct nxt_port_mmap_handler_s {
nxt_port_mmap_header_t *hdr;
nxt_atomic_t use_count;
nxt_fd_t fd;
};
/*
* Element of nxt_process_t.incoming/outgoing, shared memory segment
* descriptor.
*/
struct nxt_port_mmap_s {
nxt_port_mmap_handler_t *mmap_handler;
};
typedef struct nxt_port_mmap_msg_s nxt_port_mmap_msg_t;
/* Passed as a second iov chunk when 'mmap' bit in nxt_port_msg_t is 1. */
struct nxt_port_mmap_msg_s {
uint32_t mmap_id; /* Mmap index in nxt_process_t.outgoing. */
nxt_chunk_id_t chunk_id; /* Mmap chunk index. */
uint32_t size; /* Payload data size. */
};
typedef struct nxt_port_mmap_tracking_msg_s nxt_port_mmap_tracking_msg_t;
struct nxt_port_mmap_tracking_msg_s {
uint32_t mmap_id; /* Mmap index in nxt_process_t.outgoing. */
nxt_chunk_id_t tracking_id; /* Tracking index. */
};
nxt_inline nxt_bool_t
nxt_port_mmap_get_free_chunk(nxt_free_map_t *m, nxt_chunk_id_t *c);
#define nxt_port_mmap_get_chunk_busy(m, c) \
((m[FREE_IDX(c)] & FREE_MASK(c)) == 0)
nxt_inline void
nxt_port_mmap_set_chunk_busy(nxt_free_map_t *m, nxt_chunk_id_t c);
nxt_inline nxt_bool_t
nxt_port_mmap_chk_set_chunk_busy(nxt_free_map_t *m, nxt_chunk_id_t c);
nxt_inline void
nxt_port_mmap_set_chunk_free(nxt_free_map_t *m, nxt_chunk_id_t c);
nxt_inline nxt_chunk_id_t
nxt_port_mmap_chunk_id(nxt_port_mmap_header_t *hdr, u_char *p)
{
u_char *mm_start;
mm_start = (u_char *) hdr;
return ((p - mm_start) - PORT_MMAP_HEADER_SIZE) / PORT_MMAP_CHUNK_SIZE;
}
nxt_inline u_char *
nxt_port_mmap_chunk_start(nxt_port_mmap_header_t *hdr, nxt_chunk_id_t c)
{
u_char *mm_start;
mm_start = (u_char *) hdr;
return mm_start + PORT_MMAP_HEADER_SIZE + c * PORT_MMAP_CHUNK_SIZE;
}
nxt_inline nxt_bool_t
nxt_port_mmap_get_free_chunk(nxt_free_map_t *m, nxt_chunk_id_t *c)
{
const nxt_free_map_t default_mask = (nxt_free_map_t) -1;
int ffs;
size_t i, start;
nxt_chunk_id_t chunk;
nxt_free_map_t bits, mask;
start = FREE_IDX(*c);
mask = default_mask << ((*c) % FREE_BITS);
for (i = start; i < MAX_FREE_IDX; i++) {
bits = m[i] & mask;
mask = default_mask;
if (bits == 0) {
continue;
}
ffs = __builtin_ffsll(bits);
if (ffs != 0) {
chunk = i * FREE_BITS + ffs - 1;
if (nxt_port_mmap_chk_set_chunk_busy(m, chunk)) {
*c = chunk;
return 1;
}
}
}
return 0;
}
nxt_inline void
nxt_port_mmap_set_chunk_busy(nxt_free_map_t *m, nxt_chunk_id_t c)
{
nxt_atomic_and_fetch(m + FREE_IDX(c), ~FREE_MASK(c));
}
nxt_inline nxt_bool_t
nxt_port_mmap_chk_set_chunk_busy(nxt_free_map_t *m, nxt_chunk_id_t c)
{
nxt_free_map_t *f;
nxt_free_map_t free_val, busy_val;
f = m + FREE_IDX(c);
while ( (*f & FREE_MASK(c)) != 0 ) {
free_val = *f | FREE_MASK(c);
busy_val = free_val & ~FREE_MASK(c);
if (nxt_atomic_cmp_set(f, free_val, busy_val) != 0) {
return 1;
}
}
return 0;
}
nxt_inline void
nxt_port_mmap_set_chunk_free(nxt_free_map_t *m, nxt_chunk_id_t c)
{
nxt_atomic_or_fetch(m + FREE_IDX(c), FREE_MASK(c));
}
#endif /* _NXT_PORT_MEMORY_INT_H_INCLUDED_ */
|