summaryrefslogtreecommitdiffhomepage
path: root/src/nxt_lvlhsh_pool.c
blob: 5de319f93eab1e1414bb715f26a0ac47a2a2e938 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153

/*
 * Copyright (C) Igor Sysoev
 * Copyright (C) NGINX, Inc.
 */

#include <nxt_main.h>


typedef struct nxt_lvlhsh_pool_cache_s  nxt_lvlhsh_pool_cache_t;

struct nxt_lvlhsh_pool_cache_s {
    uint32_t                 size;
    uint32_t                 nalloc;
    void                     *free;
    nxt_lvlhsh_pool_cache_t  *next;
};


typedef struct {
    nxt_mem_pool_t           *mem_pool;
    void                     *free;
    nxt_lvlhsh_pool_cache_t  *next;
} nxt_lvlhsh_pool_t;


/*
 * lvlhsh requires allocations aligned to a size of the allocations.
 * This is not issue for slab-like allocators, but glibc allocator may
 * waste memory on such aligned allocations.  So nxt_lvlhsh_pool_alloc()
 * allocates memory in chunks specified by the "nalloc" parameter
 * except the first allocation.  The first lvlhsh allocation is a bucket
 * allocation and it is enough for a small hash or for early stage of
 * a large hash.  By default lvlhsh uses 128-bytes or 64-bytes buckets
 * and levels on 64-bit and 32-bit platforms respectively.
 * This allows to search up to 10 entries in one memory access and
 * up to 160 entries in two memory accesses on 64-bit platform.
 * And on 32-bit platform up to 7 entries and up to 112 entries
 * respectively.
 *
 * After the bucket has been filled up with 10 64-bit entries
 * or 7 32-bit entries, lvlhsh expands it to a level and spreads
 * content of the first bucket to the level's new buckets.
 * Number of the new allocations may be up to 11 on 64-bit or
 * 8 on 32-bit platforms.  It's better to allocate them together
 * to eliminate wasting memory and CPU time.
 *
 * The "nalloc" should be 16.
 */


static void *nxt_lvlhsh_pool_alloc_chunk(nxt_mem_pool_cache_t *cache,
    size_t size, nxt_uint_t nalloc);


/* Allocation of lvlhsh level or bucket with specified size. */

void *
nxt_lvlhsh_pool_alloc(void *ctx, size_t size, nxt_uint_t nalloc)
{
    void                  *p, **pp;
    nxt_mem_pool_t        *mp;
    nxt_mem_pool_cache_t  *cache;

    mp = ctx;

    for (cache = mp->cache; cache != NULL; cache = cache->next) {

        if (cache->size == size && cache->nalloc != 0) {

            if (cache->free != NULL) {
                pp = cache->free;
                cache->free = *pp;
                return pp;
            }

            return nxt_lvlhsh_pool_alloc_chunk(cache, size, nalloc);
        }
    }

    cache = nxt_mem_alloc(mp, sizeof(nxt_mem_pool_cache_t));

    if (nxt_fast_path(cache != NULL)) {

        p = nxt_memalign(size, size);

        if (nxt_fast_path(p != NULL)) {
            cache->size = size;
            cache->nalloc = nalloc;
            cache->free = NULL;
            cache->next = mp->cache;
            mp->cache = cache;
            return p;
        }
    }

    return NULL;
}


static void *
nxt_lvlhsh_pool_alloc_chunk(nxt_mem_pool_cache_t *cache, size_t size,
    nxt_uint_t nalloc)
{
    char    *m, *p, *end;
    void    **pp;
    size_t  n;

    n = (nalloc == 0) ? 1 : nalloc;
    n *= size;

    m = nxt_memalign(size, n);

    if (nxt_fast_path(m != NULL)) {

        pp = &cache->free;
        end = m + n;

        for (p = m + size; p < end; p = p + size) {
            *pp = p;
            pp = (void **) p;
        }

        *pp = NULL;
    }

    return m;
}



/* Deallocation of lvlhsh level or bucket with specified size. */

void
nxt_lvlhsh_pool_free(void *ctx, void *p, size_t size)
{
    void                  **pp;
    nxt_mem_pool_t        *mp;
    nxt_mem_pool_cache_t  *cache;

    mp = ctx;

    pp = p;

    for (cache = mp->cache; cache != NULL; cache = cache->next) {

        if (cache->size == size && cache->nalloc != 0) {
            *pp = cache->free;
            cache->free = p;
            return;
        }
    }
}