1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
|
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_MEM_POOL_H_INCLUDED_
#define _NXT_MEM_POOL_H_INCLUDED_
#define NXT_MEM_POOL_MIN_EXT_SIZE nxt_pagesize
typedef void (*nxt_mem_pool_cleanup_handler_t)(nxt_task_t *task, void *data);
typedef struct nxt_mem_pool_cleanup_s nxt_mem_pool_cleanup_t;
typedef struct nxt_mem_pool_cache_s nxt_mem_pool_cache_t;
typedef struct nxt_mem_pool_chunk_s nxt_mem_pool_chunk_t;
typedef struct nxt_mem_pool_ext_s nxt_mem_pool_ext_t;
struct nxt_mem_pool_cleanup_s {
nxt_mem_pool_cleanup_handler_t handler;
void *data;
nxt_mem_pool_cleanup_t *next;
};
struct nxt_mem_pool_ext_s {
void *data;
nxt_mem_pool_ext_t *next;
};
struct nxt_mem_pool_chunk_s {
u_char *free;
u_char *end;
nxt_mem_pool_chunk_t *next;
uint32_t fails; /* 8 bits */
};
struct nxt_mem_pool_cache_s {
uint32_t size;
uint32_t nalloc;
void *free;
nxt_mem_pool_cache_t *next;
};
struct nxt_mem_pool_s {
nxt_mem_pool_chunk_t chunk;
uint32_t min_ext_size;
uint32_t chunk_size;
nxt_mem_pool_chunk_t *current;
nxt_mem_pool_ext_t *ext;
nxt_mem_pool_cache_t *cache;
nxt_mem_pool_cleanup_t *cleanup;
#if (NXT_DEBUG)
nxt_pid_t pid;
nxt_tid_t tid;
#endif
};
NXT_EXPORT nxt_mem_pool_t *nxt_mem_pool_create(size_t size)
NXT_MALLOC_LIKE;
NXT_EXPORT void nxt_mem_pool_destroy(nxt_mem_pool_t *mp);
/*
* Generic aligned allocation, suitable for struct allocations
* without "long double" and SIMD values.
*/
#define \
nxt_mem_alloc(mp, size) \
nxt_mem_align((mp), NXT_ALIGNMENT, (size))
NXT_EXPORT void *nxt_mem_align(nxt_mem_pool_t *mp, size_t alignment,
size_t size)
NXT_MALLOC_LIKE;
NXT_EXPORT void *nxt_mem_zalign(nxt_mem_pool_t *mp, size_t alignment,
size_t size)
NXT_MALLOC_LIKE;
NXT_EXPORT void *nxt_mem_nalloc(nxt_mem_pool_t *mp, size_t size)
NXT_MALLOC_LIKE;
NXT_EXPORT void *nxt_mem_zalloc(nxt_mem_pool_t *mp, size_t size)
NXT_MALLOC_LIKE;
/*
* nxt_mem_buf() is intended to allocate I/O buffers.
* Unix network buffers usually have no size restrictions, so
* NXT_MEM_BUF_CUTBACK and NXT_MEM_BUF_USABLE options allow to
* utilize better allocated memory (details in unix/nxt_malloc.h).
* Windows locks buffers in kernel memory on page basis for both
* network and file operations, so nxt_mem_buf() should minimize
* number of allocated pages. However, these allocations are not
* necessary page-aligned.
*/
#define NXT_MEM_BUF_CUTBACK 1
#define NXT_MEM_BUF_USABLE 2
NXT_EXPORT void *nxt_mem_buf(nxt_mem_pool_t *mp, size_t *sizep,
nxt_uint_t flags);
/*
* Aligned allocation, suitable for generic allocations compatible
* with malloc() alignment.
*/
#define \
nxt_mem_malloc(mp, size) \
nxt_mem_align((mp), NXT_MAX_ALIGNMENT, (size))
NXT_EXPORT nxt_int_t nxt_mem_free(nxt_mem_pool_t *mp, void *p);
NXT_EXPORT nxt_mem_pool_cleanup_t *nxt_mem_pool_cleanup(nxt_mem_pool_t *mp,
size_t size);
NXT_EXPORT void *nxt_mem_cache_alloc0(nxt_mem_pool_t *mp, size_t size)
NXT_MALLOC_LIKE;
NXT_EXPORT void *nxt_mem_cache_zalloc0(nxt_mem_pool_t *mp, size_t size)
NXT_MALLOC_LIKE;
NXT_EXPORT void nxt_mem_cache_free0(nxt_mem_pool_t *mp, void *p, size_t size);
NXT_EXPORT void *nxt_mem_lvlhsh_alloc(void *ctx, size_t size,
nxt_uint_t nalloc);
NXT_EXPORT void nxt_mem_lvlhsh_free(void *ctx, void *p, size_t size);
#if (NXT_DEBUG)
#define \
nxt_mem_pool_debug_lock(_mp, _tid) \
(_mp->tid) = _tid
#else
#define \
nxt_mem_pool_debug_lock(_mp, _tid)
#endif
#endif /* _NXT_MEM_POOL_H_INCLUDED_ */
|