mTower
queue.h
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* $NetBSD: queue.h,v 1.49.6.1 2008/11/20 03:22:38 snj Exp $ */
3 
4 /*
5  * Copyright (c) 1991, 1993
6  * The Regents of the University of California. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in the
15  * documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  * may be used to endorse or promote products derived from this software
18  * without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * @(#)queue.h 8.5 (Berkeley) 8/20/94
33  */
34 
35 #ifndef _SYS_QUEUE_H_
36 #define _SYS_QUEUE_H_
37 
38 /*#include <sys/null.h> */
39 
40 /*
41  * This file defines five types of data structures: singly-linked lists,
42  * lists, simple queues, tail queues, and circular queues.
43  *
44  * A singly-linked list is headed by a single forward pointer. The
45  * elements are singly linked for minimum space and pointer manipulation
46  * overhead at the expense of O(n) removal for arbitrary elements. New
47  * elements can be added to the list after an existing element or at the
48  * head of the list. Elements being removed from the head of the list
49  * should use the explicit macro for this purpose for optimum
50  * efficiency. A singly-linked list may only be traversed in the forward
51  * direction. Singly-linked lists are ideal for applications with large
52  * datasets and few or no removals or for implementing a LIFO queue.
53  *
54  * A list is headed by a single forward pointer (or an array of forward
55  * pointers for a hash table header). The elements are doubly linked
56  * so that an arbitrary element can be removed without a need to
57  * traverse the list. New elements can be added to the list before
58  * or after an existing element or at the head of the list. A list
59  * may only be traversed in the forward direction.
60  *
61  * A simple queue is headed by a pair of pointers, one the head of the
62  * list and the other to the tail of the list. The elements are singly
63  * linked to save space, so elements can only be removed from the
64  * head of the list. New elements can be added to the list after
65  * an existing element, at the head of the list, or at the end of the
66  * list. A simple queue may only be traversed in the forward direction.
67  *
68  * A tail queue is headed by a pair of pointers, one to the head of the
69  * list and the other to the tail of the list. The elements are doubly
70  * linked so that an arbitrary element can be removed without a need to
71  * traverse the list. New elements can be added to the list before or
72  * after an existing element, at the head of the list, or at the end of
73  * the list. A tail queue may be traversed in either direction.
74  *
75  * A circle queue is headed by a pair of pointers, one to the head of the
76  * list and the other to the tail of the list. The elements are doubly
77  * linked so that an arbitrary element can be removed without a need to
78  * traverse the list. New elements can be added to the list before or after
79  * an existing element, at the head of the list, or at the end of the list.
80  * A circle queue may be traversed in either direction, but has a more
81  * complex end of list detection.
82  *
83  * For details on the use of these macros, see the queue(3) manual page.
84  */
85 
86 /*
87  * List definitions.
88  */
89 #define LIST_HEAD(name, type) \
90 struct name { \
91  struct type *lh_first; /* first element */ \
92 }
93 
94 #define LIST_HEAD_INITIALIZER(head) \
95  { NULL }
96 
97 #define LIST_ENTRY(type) \
98 struct { \
99  struct type *le_next; /* next element */ \
100  struct type **le_prev; /* address of previous next element */ \
101 }
102 
103 /*
104  * List functions.
105  */
106 #if defined(_KERNEL) && defined(QUEUEDEBUG)
107 #define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) \
108  if ((head)->lh_first && \
109  (head)->lh_first->field.le_prev != &(head)->lh_first) \
110  panic("LIST_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__);
111 #define QUEUEDEBUG_LIST_OP(elm, field) \
112  if ((elm)->field.le_next && \
113  (elm)->field.le_next->field.le_prev != \
114  &(elm)->field.le_next) \
115  panic("LIST_* forw %p %s:%d", (elm), __FILE__, __LINE__);\
116  if (*(elm)->field.le_prev != (elm)) \
117  panic("LIST_* back %p %s:%d", (elm), __FILE__, __LINE__);
118 #define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) \
119  (elm)->field.le_next = (void *)1L; \
120  (elm)->field.le_prev = (void *)1L;
121 #else
122 #define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)
123 #define QUEUEDEBUG_LIST_OP(elm, field)
124 #define QUEUEDEBUG_LIST_POSTREMOVE(elm, field)
125 #endif
126 
127 #define LIST_INIT(head) do { \
128  (head)->lh_first = NULL; \
129 } while (/* CONSTCOND */0)
130 
131 #define LIST_INSERT_AFTER(listelm, elm, field) do { \
132  QUEUEDEBUG_LIST_OP((listelm), field) \
133  if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
134  (listelm)->field.le_next->field.le_prev = \
135  &(elm)->field.le_next; \
136  (listelm)->field.le_next = (elm); \
137  (elm)->field.le_prev = &(listelm)->field.le_next; \
138 } while (/* CONSTCOND */0)
139 
140 #define LIST_INSERT_BEFORE(listelm, elm, field) do { \
141  QUEUEDEBUG_LIST_OP((listelm), field) \
142  (elm)->field.le_prev = (listelm)->field.le_prev; \
143  (elm)->field.le_next = (listelm); \
144  *(listelm)->field.le_prev = (elm); \
145  (listelm)->field.le_prev = &(elm)->field.le_next; \
146 } while (/* CONSTCOND */0)
147 
148 #define LIST_INSERT_HEAD(head, elm, field) do { \
149  QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field) \
150  if (((elm)->field.le_next = (head)->lh_first) != NULL) \
151  (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
152  (head)->lh_first = (elm); \
153  (elm)->field.le_prev = &(head)->lh_first; \
154 } while (/* CONSTCOND */0)
155 
156 #define LIST_REMOVE(elm, field) do { \
157  QUEUEDEBUG_LIST_OP((elm), field) \
158  if ((elm)->field.le_next != NULL) \
159  (elm)->field.le_next->field.le_prev = \
160  (elm)->field.le_prev; \
161  *(elm)->field.le_prev = (elm)->field.le_next; \
162  QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \
163 } while (/* CONSTCOND */0)
164 
165 #define LIST_FOREACH(var, head, field) \
166  for ((var) = ((head)->lh_first); \
167  (var); \
168  (var) = ((var)->field.le_next))
169 
170 /*
171  * List access methods.
172  */
173 #define LIST_EMPTY(head) ((head)->lh_first == NULL)
174 #define LIST_FIRST(head) ((head)->lh_first)
175 #define LIST_NEXT(elm, field) ((elm)->field.le_next)
176 
177 /*
178  * Singly-linked List definitions.
179  */
180 #define SLIST_HEAD(name, type) \
181 struct name { \
182  struct type *slh_first; /* first element */ \
183 }
184 
185 #define SLIST_HEAD_INITIALIZER(head) \
186  { NULL }
187 
188 #define SLIST_ENTRY(type) \
189 struct { \
190  struct type *sle_next; /* next element */ \
191 }
192 
193 /*
194  * Singly-linked List functions.
195  */
196 #define SLIST_INIT(head) do { \
197  (head)->slh_first = NULL; \
198 } while (/* CONSTCOND */0)
199 
200 #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
201  (elm)->field.sle_next = (slistelm)->field.sle_next; \
202  (slistelm)->field.sle_next = (elm); \
203 } while (/* CONSTCOND */0)
204 
205 #define SLIST_INSERT_HEAD(head, elm, field) do { \
206  (elm)->field.sle_next = (head)->slh_first; \
207  (head)->slh_first = (elm); \
208 } while (/* CONSTCOND */0)
209 
210 #define SLIST_REMOVE_HEAD(head, field) do { \
211  (head)->slh_first = (head)->slh_first->field.sle_next; \
212 } while (/* CONSTCOND */0)
213 
214 #define SLIST_REMOVE(head, elm, type, field) do { \
215  if ((head)->slh_first == (elm)) { \
216  SLIST_REMOVE_HEAD((head), field); \
217  } \
218  else { \
219  struct type *curelm = (head)->slh_first; \
220  while(curelm->field.sle_next != (elm)) \
221  curelm = curelm->field.sle_next; \
222  curelm->field.sle_next = \
223  curelm->field.sle_next->field.sle_next; \
224  } \
225 } while (/* CONSTCOND */0)
226 
227 #define SLIST_REMOVE_AFTER(slistelm, field) do { \
228  (slistelm)->field.sle_next = \
229  SLIST_NEXT(SLIST_NEXT((slistelm), field), field); \
230 } while (/* CONSTCOND */0)
231 
232 #define SLIST_FOREACH(var, head, field) \
233  for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next)
234 
235 /*
236  * Singly-linked List access methods.
237  */
238 #define SLIST_EMPTY(head) ((head)->slh_first == NULL)
239 #define SLIST_FIRST(head) ((head)->slh_first)
240 #define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
241 
242 /*
243  * Singly-linked Tail queue declarations.
244  */
245 #define STAILQ_HEAD(name, type) \
246 struct name { \
247  struct type *stqh_first; /* first element */ \
248  struct type **stqh_last; /* addr of last next element */ \
249 }
250 
251 #define STAILQ_HEAD_INITIALIZER(head) \
252  { NULL, &(head).stqh_first }
253 
254 #define STAILQ_ENTRY(type) \
255 struct { \
256  struct type *stqe_next; /* next element */ \
257 }
258 
259 /*
260  * Singly-linked Tail queue functions.
261  */
262 #define STAILQ_INIT(head) do { \
263  (head)->stqh_first = NULL; \
264  (head)->stqh_last = &(head)->stqh_first; \
265 } while (/* CONSTCOND */0)
266 
267 #define STAILQ_INSERT_HEAD(head, elm, field) do { \
268  if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \
269  (head)->stqh_last = &(elm)->field.stqe_next; \
270  (head)->stqh_first = (elm); \
271 } while (/* CONSTCOND */0)
272 
273 #define STAILQ_INSERT_TAIL(head, elm, field) do { \
274  (elm)->field.stqe_next = NULL; \
275  *(head)->stqh_last = (elm); \
276  (head)->stqh_last = &(elm)->field.stqe_next; \
277 } while (/* CONSTCOND */0)
278 
279 #define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
280  if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\
281  (head)->stqh_last = &(elm)->field.stqe_next; \
282  (listelm)->field.stqe_next = (elm); \
283 } while (/* CONSTCOND */0)
284 
285 #define STAILQ_REMOVE_HEAD(head, field) do { \
286  if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \
287  (head)->stqh_last = &(head)->stqh_first; \
288 } while (/* CONSTCOND */0)
289 
290 #define STAILQ_REMOVE(head, elm, type, field) do { \
291  if ((head)->stqh_first == (elm)) { \
292  STAILQ_REMOVE_HEAD((head), field); \
293  } else { \
294  struct type *curelm = (head)->stqh_first; \
295  while (curelm->field.stqe_next != (elm)) \
296  curelm = curelm->field.stqe_next; \
297  if ((curelm->field.stqe_next = \
298  curelm->field.stqe_next->field.stqe_next) == NULL) \
299  (head)->stqh_last = &(curelm)->field.stqe_next; \
300  } \
301 } while (/* CONSTCOND */0)
302 
303 #define STAILQ_FOREACH(var, head, field) \
304  for ((var) = ((head)->stqh_first); \
305  (var); \
306  (var) = ((var)->field.stqe_next))
307 
308 #define STAILQ_CONCAT(head1, head2) do { \
309  if (!STAILQ_EMPTY((head2))) { \
310  *(head1)->stqh_last = (head2)->stqh_first; \
311  (head1)->stqh_last = (head2)->stqh_last; \
312  STAILQ_INIT((head2)); \
313  } \
314 } while (/* CONSTCOND */0)
315 
316 /*
317  * Singly-linked Tail queue access methods.
318  */
319 #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
320 #define STAILQ_FIRST(head) ((head)->stqh_first)
321 #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
322 
323 /*
324  * Simple queue definitions.
325  */
326 #define SIMPLEQ_HEAD(name, type) \
327 struct name { \
328  struct type *sqh_first; /* first element */ \
329  struct type **sqh_last; /* addr of last next element */ \
330 }
331 
332 #define SIMPLEQ_HEAD_INITIALIZER(head) \
333  { NULL, &(head).sqh_first }
334 
335 #define SIMPLEQ_ENTRY(type) \
336 struct { \
337  struct type *sqe_next; /* next element */ \
338 }
339 
340 /*
341  * Simple queue functions.
342  */
343 #define SIMPLEQ_INIT(head) do { \
344  (head)->sqh_first = NULL; \
345  (head)->sqh_last = &(head)->sqh_first; \
346 } while (/* CONSTCOND */0)
347 
348 #define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
349  if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
350  (head)->sqh_last = &(elm)->field.sqe_next; \
351  (head)->sqh_first = (elm); \
352 } while (/* CONSTCOND */0)
353 
354 #define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
355  (elm)->field.sqe_next = NULL; \
356  *(head)->sqh_last = (elm); \
357  (head)->sqh_last = &(elm)->field.sqe_next; \
358 } while (/* CONSTCOND */0)
359 
360 #define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
361  if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
362  (head)->sqh_last = &(elm)->field.sqe_next; \
363  (listelm)->field.sqe_next = (elm); \
364 } while (/* CONSTCOND */0)
365 
366 #define SIMPLEQ_REMOVE_HEAD(head, field) do { \
367  if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
368  (head)->sqh_last = &(head)->sqh_first; \
369 } while (/* CONSTCOND */0)
370 
371 #define SIMPLEQ_REMOVE(head, elm, type, field) do { \
372  if ((head)->sqh_first == (elm)) { \
373  SIMPLEQ_REMOVE_HEAD((head), field); \
374  } else { \
375  struct type *curelm = (head)->sqh_first; \
376  while (curelm->field.sqe_next != (elm)) \
377  curelm = curelm->field.sqe_next; \
378  if ((curelm->field.sqe_next = \
379  curelm->field.sqe_next->field.sqe_next) == NULL) \
380  (head)->sqh_last = &(curelm)->field.sqe_next; \
381  } \
382 } while (/* CONSTCOND */0)
383 
384 #define SIMPLEQ_FOREACH(var, head, field) \
385  for ((var) = ((head)->sqh_first); \
386  (var); \
387  (var) = ((var)->field.sqe_next))
388 
389 /*
390  * Simple queue access methods.
391  */
392 #define SIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL)
393 #define SIMPLEQ_FIRST(head) ((head)->sqh_first)
394 #define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
395 
396 /*
397  * Tail queue definitions.
398  */
399 #define _TAILQ_HEAD(name, type, qual) \
400 struct name { \
401  qual type *tqh_first; /* first element */ \
402  qual type *qual *tqh_last; /* addr of last next element */ \
403 }
404 #define TAILQ_HEAD(name, type) _TAILQ_HEAD(name, struct type,)
405 
406 #define TAILQ_HEAD_INITIALIZER(head) \
407  { NULL, &(head).tqh_first }
408 
409 #define _TAILQ_ENTRY(type, qual) \
410 struct { \
411  qual type *tqe_next; /* next element */ \
412  qual type *qual *tqe_prev; /* address of previous next element */\
413 }
414 #define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type,)
415 
416 /*
417  * Tail queue functions.
418  */
419 #if defined(_KERNEL) && defined(QUEUEDEBUG)
420 #define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) \
421  if ((head)->tqh_first && \
422  (head)->tqh_first->field.tqe_prev != &(head)->tqh_first) \
423  panic("TAILQ_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__);
424 #define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) \
425  if (*(head)->tqh_last != NULL) \
426  panic("TAILQ_INSERT_TAIL %p %s:%d", (head), __FILE__, __LINE__);
427 #define QUEUEDEBUG_TAILQ_OP(elm, field) \
428  if ((elm)->field.tqe_next && \
429  (elm)->field.tqe_next->field.tqe_prev != \
430  &(elm)->field.tqe_next) \
431  panic("TAILQ_* forw %p %s:%d", (elm), __FILE__, __LINE__);\
432  if (*(elm)->field.tqe_prev != (elm)) \
433  panic("TAILQ_* back %p %s:%d", (elm), __FILE__, __LINE__);
434 #define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field) \
435  if ((elm)->field.tqe_next == NULL && \
436  (head)->tqh_last != &(elm)->field.tqe_next) \
437  panic("TAILQ_PREREMOVE head %p elm %p %s:%d", \
438  (head), (elm), __FILE__, __LINE__);
439 #define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field) \
440  (elm)->field.tqe_next = (void *)1L; \
441  (elm)->field.tqe_prev = (void *)1L;
442 #else
443 #define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field)
444 #define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field)
445 #define QUEUEDEBUG_TAILQ_OP(elm, field)
446 #define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field)
447 #define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field)
448 #endif
449 
450 #define TAILQ_INIT(head) do { \
451  (head)->tqh_first = NULL; \
452  (head)->tqh_last = &(head)->tqh_first; \
453 } while (/* CONSTCOND */0)
454 
455 #define TAILQ_INSERT_HEAD(head, elm, field) do { \
456  QUEUEDEBUG_TAILQ_INSERT_HEAD((head), (elm), field) \
457  if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
458  (head)->tqh_first->field.tqe_prev = \
459  &(elm)->field.tqe_next; \
460  else \
461  (head)->tqh_last = &(elm)->field.tqe_next; \
462  (head)->tqh_first = (elm); \
463  (elm)->field.tqe_prev = &(head)->tqh_first; \
464 } while (/* CONSTCOND */0)
465 
466 #define TAILQ_INSERT_TAIL(head, elm, field) do { \
467  QUEUEDEBUG_TAILQ_INSERT_TAIL((head), (elm), field) \
468  (elm)->field.tqe_next = NULL; \
469  (elm)->field.tqe_prev = (head)->tqh_last; \
470  *(head)->tqh_last = (elm); \
471  (head)->tqh_last = &(elm)->field.tqe_next; \
472 } while (/* CONSTCOND */0)
473 
474 #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
475  QUEUEDEBUG_TAILQ_OP((listelm), field) \
476  if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
477  (elm)->field.tqe_next->field.tqe_prev = \
478  &(elm)->field.tqe_next; \
479  else \
480  (head)->tqh_last = &(elm)->field.tqe_next; \
481  (listelm)->field.tqe_next = (elm); \
482  (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
483 } while (/* CONSTCOND */0)
484 
485 #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
486  QUEUEDEBUG_TAILQ_OP((listelm), field) \
487  (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
488  (elm)->field.tqe_next = (listelm); \
489  *(listelm)->field.tqe_prev = (elm); \
490  (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
491 } while (/* CONSTCOND */0)
492 
493 #define TAILQ_REMOVE(head, elm, field) do { \
494  QUEUEDEBUG_TAILQ_PREREMOVE((head), (elm), field) \
495  QUEUEDEBUG_TAILQ_OP((elm), field) \
496  if (((elm)->field.tqe_next) != NULL) \
497  (elm)->field.tqe_next->field.tqe_prev = \
498  (elm)->field.tqe_prev; \
499  else \
500  (head)->tqh_last = (elm)->field.tqe_prev; \
501  *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
502  QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \
503 } while (/* CONSTCOND */0)
504 
505 #define TAILQ_FOREACH(var, head, field) \
506  for ((var) = ((head)->tqh_first); \
507  (var); \
508  (var) = ((var)->field.tqe_next))
509 
510 #define TAILQ_FOREACH_SAFE(var, head, field, next) \
511  for ((var) = ((head)->tqh_first); \
512  (var) != NULL && ((next) = TAILQ_NEXT(var, field), 1); \
513  (var) = (next))
514 
515 #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
516  for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \
517  (var); \
518  (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
519 
520 #define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev) \
521  for ((var) = TAILQ_LAST((head), headname); \
522  (var) && ((prev) = TAILQ_PREV((var), headname, field), 1);\
523  (var) = (prev))
524 
525 #define TAILQ_CONCAT(head1, head2, field) do { \
526  if (!TAILQ_EMPTY(head2)) { \
527  *(head1)->tqh_last = (head2)->tqh_first; \
528  (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
529  (head1)->tqh_last = (head2)->tqh_last; \
530  TAILQ_INIT((head2)); \
531  } \
532 } while (/* CONSTCOND */0)
533 
534 /*
535  * Tail queue access methods.
536  */
537 #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
538 #define TAILQ_FIRST(head) ((head)->tqh_first)
539 #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
540 
541 #define TAILQ_LAST(head, headname) \
542  (*(((struct headname *)((head)->tqh_last))->tqh_last))
543 #define TAILQ_PREV(elm, headname, field) \
544  (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
545 
546 /*
547  * Circular queue definitions.
548  */
549 #if defined(_KERNEL) && defined(QUEUEDEBUG)
550 #define QUEUEDEBUG_CIRCLEQ_HEAD(head, field) \
551  if ((head)->cqh_first != (void *)(head) && \
552  (head)->cqh_first->field.cqe_prev != (void *)(head)) \
553  panic("CIRCLEQ head forw %p %s:%d", (head), \
554  __FILE__, __LINE__); \
555  if ((head)->cqh_last != (void *)(head) && \
556  (head)->cqh_last->field.cqe_next != (void *)(head)) \
557  panic("CIRCLEQ head back %p %s:%d", (head), \
558  __FILE__, __LINE__);
559 #define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field) \
560  if ((elm)->field.cqe_next == (void *)(head)) { \
561  if ((head)->cqh_last != (elm)) \
562  panic("CIRCLEQ elm last %p %s:%d", (elm), \
563  __FILE__, __LINE__); \
564  } else { \
565  if ((elm)->field.cqe_next->field.cqe_prev != (elm)) \
566  panic("CIRCLEQ elm forw %p %s:%d", (elm), \
567  __FILE__, __LINE__); \
568  } \
569  if ((elm)->field.cqe_prev == (void *)(head)) { \
570  if ((head)->cqh_first != (elm)) \
571  panic("CIRCLEQ elm first %p %s:%d", (elm), \
572  __FILE__, __LINE__); \
573  } else { \
574  if ((elm)->field.cqe_prev->field.cqe_next != (elm)) \
575  panic("CIRCLEQ elm prev %p %s:%d", (elm), \
576  __FILE__, __LINE__); \
577  }
578 #define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field) \
579  (elm)->field.cqe_next = (void *)1L; \
580  (elm)->field.cqe_prev = (void *)1L;
581 #else
582 #define QUEUEDEBUG_CIRCLEQ_HEAD(head, field)
583 #define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field)
584 #define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field)
585 #endif
586 
587 #define CIRCLEQ_HEAD(name, type) \
588 struct name { \
589  struct type *cqh_first; /* first element */ \
590  struct type *cqh_last; /* last element */ \
591 }
592 
593 #define CIRCLEQ_HEAD_INITIALIZER(head) \
594  { (void *)&head, (void *)&head }
595 
596 #define CIRCLEQ_ENTRY(type) \
597 struct { \
598  struct type *cqe_next; /* next element */ \
599  struct type *cqe_prev; /* previous element */ \
600 }
601 
602 /*
603  * Circular queue functions.
604  */
605 #define CIRCLEQ_INIT(head) do { \
606  (head)->cqh_first = (void *)(head); \
607  (head)->cqh_last = (void *)(head); \
608 } while (/* CONSTCOND */0)
609 
610 #define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
611  QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
612  QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \
613  (elm)->field.cqe_next = (listelm)->field.cqe_next; \
614  (elm)->field.cqe_prev = (listelm); \
615  if ((listelm)->field.cqe_next == (void *)(head)) \
616  (head)->cqh_last = (elm); \
617  else \
618  (listelm)->field.cqe_next->field.cqe_prev = (elm); \
619  (listelm)->field.cqe_next = (elm); \
620 } while (/* CONSTCOND */0)
621 
622 #define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
623  QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
624  QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \
625  (elm)->field.cqe_next = (listelm); \
626  (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
627  if ((listelm)->field.cqe_prev == (void *)(head)) \
628  (head)->cqh_first = (elm); \
629  else \
630  (listelm)->field.cqe_prev->field.cqe_next = (elm); \
631  (listelm)->field.cqe_prev = (elm); \
632 } while (/* CONSTCOND */0)
633 
634 #define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
635  QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
636  (elm)->field.cqe_next = (head)->cqh_first; \
637  (elm)->field.cqe_prev = (void *)(head); \
638  if ((head)->cqh_last == (void *)(head)) \
639  (head)->cqh_last = (elm); \
640  else \
641  (head)->cqh_first->field.cqe_prev = (elm); \
642  (head)->cqh_first = (elm); \
643 } while (/* CONSTCOND */0)
644 
645 #define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
646  QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
647  (elm)->field.cqe_next = (void *)(head); \
648  (elm)->field.cqe_prev = (head)->cqh_last; \
649  if ((head)->cqh_first == (void *)(head)) \
650  (head)->cqh_first = (elm); \
651  else \
652  (head)->cqh_last->field.cqe_next = (elm); \
653  (head)->cqh_last = (elm); \
654 } while (/* CONSTCOND */0)
655 
656 #define CIRCLEQ_REMOVE(head, elm, field) do { \
657  QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
658  QUEUEDEBUG_CIRCLEQ_ELM((head), (elm), field) \
659  if ((elm)->field.cqe_next == (void *)(head)) \
660  (head)->cqh_last = (elm)->field.cqe_prev; \
661  else \
662  (elm)->field.cqe_next->field.cqe_prev = \
663  (elm)->field.cqe_prev; \
664  if ((elm)->field.cqe_prev == (void *)(head)) \
665  (head)->cqh_first = (elm)->field.cqe_next; \
666  else \
667  (elm)->field.cqe_prev->field.cqe_next = \
668  (elm)->field.cqe_next; \
669  QUEUEDEBUG_CIRCLEQ_POSTREMOVE((elm), field) \
670 } while (/* CONSTCOND */0)
671 
672 #define CIRCLEQ_FOREACH(var, head, field) \
673  for ((var) = ((head)->cqh_first); \
674  (var) != (const void *)(head); \
675  (var) = ((var)->field.cqe_next))
676 
677 #define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
678  for ((var) = ((head)->cqh_last); \
679  (var) != (const void *)(head); \
680  (var) = ((var)->field.cqe_prev))
681 
682 /*
683  * Circular queue access methods.
684  */
685 #define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head))
686 #define CIRCLEQ_FIRST(head) ((head)->cqh_first)
687 #define CIRCLEQ_LAST(head) ((head)->cqh_last)
688 #define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
689 #define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
690 
691 #define CIRCLEQ_LOOP_NEXT(head, elm, field) \
692  (((elm)->field.cqe_next == (void *)(head)) \
693  ? ((head)->cqh_first) \
694  : (elm->field.cqe_next))
695 #define CIRCLEQ_LOOP_PREV(head, elm, field) \
696  (((elm)->field.cqe_prev == (void *)(head)) \
697  ? ((head)->cqh_last) \
698  : (elm->field.cqe_prev))
699 
700 #endif /* !_SYS_QUEUE_H_ */