summaryrefslogtreecommitdiffhomepage
path: root/src/nxt_nncq.h
blob: 6c9ab326213ab210c8b691b76e790badbfd6ec29 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162

/*
 * Copyright (C) NGINX, Inc.
 */

#ifndef _NXT_NNCQ_H_INCLUDED_
#define _NXT_NNCQ_H_INCLUDED_


/* Numeric Naive Circular Queue */

#define NXT_NNCQ_SIZE  16384

typedef uint32_t nxt_nncq_atomic_t;
typedef uint16_t nxt_nncq_cycle_t;

typedef struct {
    nxt_nncq_atomic_t  head;
    nxt_nncq_atomic_t  entries[NXT_NNCQ_SIZE];
    nxt_nncq_atomic_t  tail;
} nxt_nncq_t;


static inline nxt_nncq_atomic_t
nxt_nncq_head(nxt_nncq_t const volatile *q)
{
    return q->head;
}


static inline nxt_nncq_atomic_t
nxt_nncq_tail(nxt_nncq_t const volatile *q)
{
    return q->tail;
}


static inline void
nxt_nncq_tail_cmp_inc(nxt_nncq_t volatile *q, nxt_nncq_atomic_t t)
{
    nxt_atomic_cmp_set(&q->tail, t, t + 1);
}


static inline nxt_nncq_atomic_t
nxt_nncq_index(nxt_nncq_t const volatile *q, nxt_nncq_atomic_t i)
{
    return i % NXT_NNCQ_SIZE;
}


static inline nxt_nncq_atomic_t
nxt_nncq_map(nxt_nncq_t const volatile *q, nxt_nncq_atomic_t i)
{
    return i % NXT_NNCQ_SIZE;
}


static inline nxt_nncq_cycle_t
nxt_nncq_cycle(nxt_nncq_t const volatile *q, nxt_nncq_atomic_t i)
{
    return i / NXT_NNCQ_SIZE;
}


static inline nxt_nncq_cycle_t
nxt_nncq_next_cycle(nxt_nncq_t const volatile *q, nxt_nncq_cycle_t i)
{
    return i + 1;
}


static inline nxt_nncq_atomic_t
nxt_nncq_new_entry(nxt_nncq_t const volatile *q, nxt_nncq_cycle_t cycle,
    nxt_nncq_atomic_t i)
{
    return cycle * NXT_NNCQ_SIZE + (i % NXT_NNCQ_SIZE);
}


static inline nxt_nncq_atomic_t
nxt_nncq_empty(nxt_nncq_t const volatile *q)
{
    return NXT_NNCQ_SIZE;
}


static inline void
nxt_nncq_init(nxt_nncq_t volatile *q)
{
    q->head = NXT_NNCQ_SIZE;
    nxt_memzero((void *) q->entries, NXT_NNCQ_SIZE * sizeof(nxt_nncq_atomic_t));
    q->tail = NXT_NNCQ_SIZE;
}


static inline void
nxt_nncq_enqueue(nxt_nncq_t volatile *q, nxt_nncq_atomic_t val)
{
    nxt_nncq_cycle_t   e_cycle, t_cycle;
    nxt_nncq_atomic_t  n, t, e, j;

    for ( ;; ) {
        t = nxt_nncq_tail(q);
        j = nxt_nncq_map(q, t);
        e = q->entries[j];

        e_cycle = nxt_nncq_cycle(q, e);
        t_cycle = nxt_nncq_cycle(q, t);

        if (e_cycle == t_cycle) {
            nxt_nncq_tail_cmp_inc(q, t);
            continue;
        }

        if (nxt_nncq_next_cycle(q, e_cycle) != t_cycle) {
            continue;
        }

        n = nxt_nncq_new_entry(q, t_cycle, val);

        if (nxt_atomic_cmp_set(&q->entries[j], e, n)) {
            break;
        }
    }

    nxt_nncq_tail_cmp_inc(q, t);
}


static inline nxt_nncq_atomic_t
nxt_nncq_dequeue(nxt_nncq_t volatile *q)
{
    nxt_nncq_cycle_t   e_cycle, h_cycle;
    nxt_nncq_atomic_t  h, j, e;

    for ( ;; ) {
        h = nxt_nncq_head(q);
        j = nxt_nncq_map(q, h);
        e = q->entries[j];

        e_cycle = nxt_nncq_cycle(q, e);
        h_cycle = nxt_nncq_cycle(q, h);

        if (e_cycle != h_cycle) {
            if (nxt_nncq_next_cycle(q, e_cycle) == h_cycle) {
                return nxt_nncq_empty(q);
            }

            continue;
        }

        if (nxt_atomic_cmp_set(&q->head, h, h + 1)) {
            break;
        }
    }

    return nxt_nncq_index(q, e);
}


#endif /* _NXT_NNCQ_H_INCLUDED_ */