1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
|
// Copyright © Tavian Barnes <tavianator@tavianator.com>
// SPDX-License-Identifier: 0BSD
#include "tests.h"
#include "../src/ioq.h"
#include "../src/bfstd.h"
#include "../src/diag.h"
#include "../src/dir.h"
#include <errno.h>
#include <fcntl.h>
/**
* Test for blocking within ioq_slot_push().
*
* struct ioqq only supports non-blocking reads; if a write encounters a full
* slot, it must block until someone pops from that slot:
*
* Reader Writer
* ────────────────────────── ─────────────────────────
* tail: 0 → 1
* slots[0]: empty → full
* tail: 1 → 0
* slots[1]: empty → full
* tail: 0 → 1
* slots[0]: full → full* (IOQ_BLOCKED)
* ioq_slot_wait() ...
* head: 0 → 1
* slots[0]: full* → empty
* ioq_slot_wake()
* ...
* slots[0]: empty → full
*
* To reproduce this unlikely scenario, we must fill up the ready queue, then
* call ioq_cancel() which pushes an additional sentinel IOQ_STOP operation.
*/
static void check_ioq_push_block(void) {
// Must be a power of two to fill the entire queue
const size_t depth = 2;
struct ioq *ioq = ioq_create(depth, 1);
bfs_verify(ioq, "ioq_create(): %s", xstrerror(errno));
// Push enough operations to fill the queue
for (size_t i = 0; i < depth; ++i) {
struct bfs_dir *dir = bfs_allocdir();
bfs_verify(dir, "bfs_allocdir(): %s", xstrerror(errno));
int ret = ioq_opendir(ioq, dir, AT_FDCWD, ".", 0, NULL);
bfs_verify(ret == 0, "ioq_opendir(): %s", xstrerror(errno));
}
bfs_verify(ioq_capacity(ioq) == 0);
// Now cancel the queue, pushing an additional IOQ_STOP message
ioq_cancel(ioq);
// Drain the queue
struct ioq_ent *ent;
while ((ent = ioq_pop(ioq, true))) {
bfs_verify(ent->op == IOQ_OPENDIR);
if (ent->result >= 0) {
bfs_closedir(ent->opendir.dir);
}
free(ent->opendir.dir);
ioq_free(ioq, ent);
}
ioq_destroy(ioq);
}
bool check_ioq(void) {
check_ioq_push_block();
return true;
}
|