init
I will never get tired of vendoring dependencies. ha ha. It is possible I am insane. I had to do a lot of pruning to get these not to be ridiculous (especially the unicode data, which had nearly 1 million lines of... stuff).
This commit is contained in:
829
deps/libxev/src/ThreadPool.zig
vendored
Normal file
829
deps/libxev/src/ThreadPool.zig
vendored
Normal file
@@ -0,0 +1,829 @@
|
||||
//! Thread pool copied almost directly from Zap[1]. In @kprotty's own words:
|
||||
//! lock-free, allocation-free* (excluding spawning threads), supports batch
|
||||
//! scheduling, and dynamically spawns threads while handling thread spawn
|
||||
//! failure. I highly recommend reading @kprotty's incredible blog post[2] on
|
||||
//! this topic.
|
||||
//!
|
||||
//! The original file in Zap is licensed under the MIT license, and the
|
||||
//! license and copyright is reproduced below. The libxev project is also
|
||||
//! MIT licensed so the entire project (including this file) are equally
|
||||
//! licensed. This is just a convenience note for any OSS users, contributors,
|
||||
//! etc.
|
||||
//!
|
||||
//! MIT License
|
||||
//!
|
||||
//! Copyright (c) 2021 kprotty
|
||||
//!
|
||||
//! Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
//! of this software and associated documentation files (the "Software"), to deal
|
||||
//! in the Software without restriction, including without limitation the rights
|
||||
//! to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
//! copies of the Software, and to permit persons to whom the Software is
|
||||
//! furnished to do so, subject to the following conditions:
|
||||
//!
|
||||
//! The above copyright notice and this permission notice shall be included in all
|
||||
//! copies or substantial portions of the Software.
|
||||
//!
|
||||
//! THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
//! IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
//! FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
//! AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
//! LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
//! OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
//! SOFTWARE.
|
||||
//!
|
||||
//! [1]: https://github.com/kprotty/zap
|
||||
//! [2]: https://zig.news/kprotty/resource-efficient-thread-pools-with-zig-3291
|
||||
const ThreadPool = @This();
|
||||
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const Atomic = std.atomic.Value;
|
||||
|
||||
stack_size: u32,
|
||||
max_threads: u32,
|
||||
sync: Atomic(u32) = Atomic(u32).init(@bitCast(Sync{})),
|
||||
idle_event: Event = .{},
|
||||
join_event: Event = .{},
|
||||
run_queue: Node.Queue = .{},
|
||||
threads: Atomic(?*Thread) = Atomic(?*Thread).init(null),
|
||||
|
||||
const Sync = packed struct {
|
||||
/// Tracks the number of threads not searching for Tasks
|
||||
idle: u14 = 0,
|
||||
/// Tracks the number of threads spawned
|
||||
spawned: u14 = 0,
|
||||
/// What you see is what you get
|
||||
unused: bool = false,
|
||||
/// Used to not miss notifications while state = waking
|
||||
notified: bool = false,
|
||||
/// The current state of the thread pool
|
||||
state: enum(u2) {
|
||||
/// A notification can be issued to wake up a sleeping as the "waking thread".
|
||||
pending = 0,
|
||||
/// The state was notifiied with a signal. A thread is woken up.
|
||||
/// The first thread to transition to `waking` becomes the "waking thread".
|
||||
signaled,
|
||||
/// There is a "waking thread" among us.
|
||||
/// No other thread should be woken up until the waking thread transitions the state.
|
||||
waking,
|
||||
/// The thread pool was terminated. Start decremented `spawned` so that it can be joined.
|
||||
shutdown,
|
||||
} = .pending,
|
||||
};
|
||||
|
||||
/// Configuration options for the thread pool.
|
||||
/// TODO: add CPU core affinity?
|
||||
pub const Config = struct {
|
||||
stack_size: u32 = (std.Thread.SpawnConfig{}).stack_size,
|
||||
max_threads: u32 = 0,
|
||||
};
|
||||
|
||||
/// Statically initialize the thread pool using the configuration.
|
||||
pub fn init(config: Config) ThreadPool {
|
||||
return .{
|
||||
.stack_size = @max(1, config.stack_size),
|
||||
.max_threads = if (config.max_threads > 0)
|
||||
config.max_threads
|
||||
else
|
||||
@intCast(std.Thread.getCpuCount() catch 1),
|
||||
};
|
||||
}
|
||||
|
||||
/// Wait for a thread to call shutdown() on the thread pool and kill the worker threads.
|
||||
pub fn deinit(self: *ThreadPool) void {
|
||||
self.join();
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
/// A Task represents the unit of Work / Job / Execution that the ThreadPool schedules.
|
||||
/// The user provides a `callback` which is invoked when the *Task can run on a thread.
|
||||
pub const Task = struct {
|
||||
node: Node = .{},
|
||||
callback: *const fn (*Task) void,
|
||||
};
|
||||
|
||||
/// An unordered collection of Tasks which can be submitted for scheduling as a group.
|
||||
pub const Batch = struct {
|
||||
len: usize = 0,
|
||||
head: ?*Task = null,
|
||||
tail: ?*Task = null,
|
||||
|
||||
/// Create a batch from a single task.
|
||||
pub fn from(task: *Task) Batch {
|
||||
return Batch{
|
||||
.len = 1,
|
||||
.head = task,
|
||||
.tail = task,
|
||||
};
|
||||
}
|
||||
|
||||
/// Another batch into this one, taking ownership of its tasks.
|
||||
pub fn push(self: *Batch, batch: Batch) void {
|
||||
if (batch.len == 0) return;
|
||||
if (self.len == 0) {
|
||||
self.* = batch;
|
||||
} else {
|
||||
self.tail.?.node.next = if (batch.head) |h| &h.node else null;
|
||||
self.tail = batch.tail;
|
||||
self.len += batch.len;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/// Schedule a batch of tasks to be executed by some thread on the thread pool.
|
||||
pub fn schedule(self: *ThreadPool, batch: Batch) void {
|
||||
// Sanity check
|
||||
if (batch.len == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Extract out the Node's from the Tasks
|
||||
var list = Node.List{
|
||||
.head = &batch.head.?.node,
|
||||
.tail = &batch.tail.?.node,
|
||||
};
|
||||
|
||||
// Push the task Nodes to the most approriate queue
|
||||
if (Thread.current) |thread| {
|
||||
thread.run_buffer.push(&list) catch thread.run_queue.push(list);
|
||||
} else {
|
||||
self.run_queue.push(list);
|
||||
}
|
||||
|
||||
// Try to notify a thread
|
||||
const is_waking = false;
|
||||
return self.notify(is_waking);
|
||||
}
|
||||
|
||||
inline fn notify(self: *ThreadPool, is_waking: bool) void {
|
||||
// Fast path to check the Sync state to avoid calling into notifySlow().
|
||||
// If we're waking, then we need to update the state regardless
|
||||
if (!is_waking) {
|
||||
const sync: Sync = @bitCast(self.sync.load(.monotonic));
|
||||
if (sync.notified) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
return self.notifySlow(is_waking);
|
||||
}
|
||||
|
||||
noinline fn notifySlow(self: *ThreadPool, is_waking: bool) void {
|
||||
var sync: Sync = @bitCast(self.sync.load(.monotonic));
|
||||
while (sync.state != .shutdown) {
|
||||
const can_wake = is_waking or (sync.state == .pending);
|
||||
if (is_waking) {
|
||||
assert(sync.state == .waking);
|
||||
}
|
||||
|
||||
var new_sync = sync;
|
||||
new_sync.notified = true;
|
||||
if (can_wake and sync.idle > 0) { // wake up an idle thread
|
||||
new_sync.state = .signaled;
|
||||
} else if (can_wake and sync.spawned < self.max_threads) { // spawn a new thread
|
||||
new_sync.state = .signaled;
|
||||
new_sync.spawned += 1;
|
||||
} else if (is_waking) { // no other thread to pass on "waking" status
|
||||
new_sync.state = .pending;
|
||||
} else if (sync.notified) { // nothing to update
|
||||
return;
|
||||
}
|
||||
|
||||
// Release barrier synchronizes with Acquire in wait()
|
||||
// to ensure pushes to run queues happen before observing a posted notification.
|
||||
sync = @bitCast(self.sync.cmpxchgWeak(
|
||||
@bitCast(sync),
|
||||
@bitCast(new_sync),
|
||||
.release,
|
||||
.monotonic,
|
||||
) orelse {
|
||||
// We signaled to notify an idle thread
|
||||
if (can_wake and sync.idle > 0) {
|
||||
return self.idle_event.notify();
|
||||
}
|
||||
|
||||
// We signaled to spawn a new thread
|
||||
if (can_wake and sync.spawned < self.max_threads) {
|
||||
const spawn_config = std.Thread.SpawnConfig{ .stack_size = self.stack_size };
|
||||
const thread = std.Thread.spawn(spawn_config, Thread.run, .{self}) catch return self.unregister(null);
|
||||
return thread.detach();
|
||||
}
|
||||
|
||||
return;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
noinline fn wait(self: *ThreadPool, _is_waking: bool) error{Shutdown}!bool {
|
||||
var is_idle = false;
|
||||
var is_waking = _is_waking;
|
||||
var sync: Sync = @bitCast(self.sync.load(.monotonic));
|
||||
|
||||
while (true) {
|
||||
if (sync.state == .shutdown) return error.Shutdown;
|
||||
if (is_waking) assert(sync.state == .waking);
|
||||
|
||||
// Consume a notification made by notify().
|
||||
if (sync.notified) {
|
||||
var new_sync = sync;
|
||||
new_sync.notified = false;
|
||||
if (is_idle)
|
||||
new_sync.idle -= 1;
|
||||
if (sync.state == .signaled)
|
||||
new_sync.state = .waking;
|
||||
|
||||
// Acquire barrier synchronizes with notify()
|
||||
// to ensure that pushes to run queue are observed after wait() returns.
|
||||
sync = @bitCast(self.sync.cmpxchgWeak(
|
||||
@bitCast(sync),
|
||||
@bitCast(new_sync),
|
||||
.acquire,
|
||||
.monotonic,
|
||||
) orelse {
|
||||
return is_waking or (sync.state == .signaled);
|
||||
});
|
||||
|
||||
// No notification to consume.
|
||||
// Mark this thread as idle before sleeping on the idle_event.
|
||||
} else if (!is_idle) {
|
||||
var new_sync = sync;
|
||||
new_sync.idle += 1;
|
||||
if (is_waking)
|
||||
new_sync.state = .pending;
|
||||
|
||||
sync = @bitCast(self.sync.cmpxchgWeak(
|
||||
@bitCast(sync),
|
||||
@bitCast(new_sync),
|
||||
.monotonic,
|
||||
.monotonic,
|
||||
) orelse {
|
||||
is_waking = false;
|
||||
is_idle = true;
|
||||
continue;
|
||||
});
|
||||
|
||||
// Wait for a signal by either notify() or shutdown() without wasting cpu cycles.
|
||||
// TODO: Add I/O polling here.
|
||||
} else {
|
||||
self.idle_event.wait();
|
||||
sync = @bitCast(self.sync.load(.monotonic));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Marks the thread pool as shutdown
|
||||
pub noinline fn shutdown(self: *ThreadPool) void {
|
||||
var sync: Sync = @bitCast(self.sync.load(.monotonic));
|
||||
while (sync.state != .shutdown) {
|
||||
var new_sync = sync;
|
||||
new_sync.notified = true;
|
||||
new_sync.state = .shutdown;
|
||||
new_sync.idle = 0;
|
||||
|
||||
// Full barrier to synchronize with both wait() and notify()
|
||||
sync = @bitCast(self.sync.cmpxchgWeak(
|
||||
@bitCast(sync),
|
||||
@bitCast(new_sync),
|
||||
.acq_rel,
|
||||
.monotonic,
|
||||
) orelse {
|
||||
// Wake up any threads sleeping on the idle_event.
|
||||
// TODO: I/O polling notification here.
|
||||
if (sync.idle > 0) self.idle_event.shutdown();
|
||||
return;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
fn register(noalias self: *ThreadPool, noalias thread: *Thread) void {
|
||||
// Push the thread onto the threads stack in a lock-free manner.
|
||||
var threads = self.threads.load(.monotonic);
|
||||
while (true) {
|
||||
thread.next = threads;
|
||||
threads = self.threads.cmpxchgWeak(
|
||||
threads,
|
||||
thread,
|
||||
.release,
|
||||
.monotonic,
|
||||
) orelse break;
|
||||
}
|
||||
}
|
||||
|
||||
fn unregister(noalias self: *ThreadPool, noalias maybe_thread: ?*Thread) void {
|
||||
// Un-spawn one thread, either due to a failed OS thread spawning or the thread is exitting.
|
||||
const one_spawned: u32 = @bitCast(Sync{ .spawned = 1 });
|
||||
const sync: Sync = @bitCast(self.sync.fetchSub(one_spawned, .release));
|
||||
assert(sync.spawned > 0);
|
||||
|
||||
// The last thread to exit must wake up the thread pool join()er
|
||||
// who will start the chain to shutdown all the threads.
|
||||
if (sync.state == .shutdown and sync.spawned == 1) {
|
||||
self.join_event.notify();
|
||||
}
|
||||
|
||||
// If this is a thread pool thread, wait for a shutdown signal by the thread pool join()er.
|
||||
const thread = maybe_thread orelse return;
|
||||
thread.join_event.wait();
|
||||
|
||||
// After receiving the shutdown signal, shutdown the next thread in the pool.
|
||||
// We have to do that without touching the thread pool itself since it's memory is invalidated by now.
|
||||
// So just follow our .next link.
|
||||
const next_thread = thread.next orelse return;
|
||||
next_thread.join_event.notify();
|
||||
}
|
||||
|
||||
fn join(self: *ThreadPool) void {
|
||||
// Wait for the thread pool to be shutdown() then for all threads to enter a joinable state
|
||||
var sync: Sync = @bitCast(self.sync.load(.monotonic));
|
||||
if (!(sync.state == .shutdown and sync.spawned == 0)) {
|
||||
self.join_event.wait();
|
||||
sync = @bitCast(self.sync.load(.monotonic));
|
||||
}
|
||||
|
||||
assert(sync.state == .shutdown);
|
||||
assert(sync.spawned == 0);
|
||||
|
||||
// If there are threads, start off the chain sending it the shutdown signal.
|
||||
// The thread receives the shutdown signal and sends it to the next thread, and the next..
|
||||
const thread = self.threads.load(.acquire) orelse return;
|
||||
thread.join_event.notify();
|
||||
}
|
||||
|
||||
const Thread = struct {
|
||||
next: ?*Thread = null,
|
||||
target: ?*Thread = null,
|
||||
join_event: Event = .{},
|
||||
run_queue: Node.Queue = .{},
|
||||
run_buffer: Node.Buffer = .{},
|
||||
|
||||
threadlocal var current: ?*Thread = null;
|
||||
|
||||
/// Thread entry point which runs a worker for the ThreadPool
|
||||
fn run(thread_pool: *ThreadPool) void {
|
||||
var self = Thread{};
|
||||
current = &self;
|
||||
|
||||
thread_pool.register(&self);
|
||||
defer thread_pool.unregister(&self);
|
||||
|
||||
var is_waking = false;
|
||||
while (true) {
|
||||
is_waking = thread_pool.wait(is_waking) catch return;
|
||||
|
||||
while (self.pop(thread_pool)) |result| {
|
||||
if (result.pushed or is_waking)
|
||||
thread_pool.notify(is_waking);
|
||||
is_waking = false;
|
||||
|
||||
const task: *Task = @fieldParentPtr("node", result.node);
|
||||
(task.callback)(task);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to dequeue a Node/Task from the ThreadPool.
|
||||
/// Spurious reports of dequeue() returning empty are allowed.
|
||||
fn pop(noalias self: *Thread, noalias thread_pool: *ThreadPool) ?Node.Buffer.Stole {
|
||||
// Check our local buffer first
|
||||
if (self.run_buffer.pop()) |node| {
|
||||
return Node.Buffer.Stole{
|
||||
.node = node,
|
||||
.pushed = false,
|
||||
};
|
||||
}
|
||||
|
||||
// Then check our local queue
|
||||
if (self.run_buffer.consume(&self.run_queue)) |stole| {
|
||||
return stole;
|
||||
}
|
||||
|
||||
// Then the global queue
|
||||
if (self.run_buffer.consume(&thread_pool.run_queue)) |stole| {
|
||||
return stole;
|
||||
}
|
||||
|
||||
// TODO: add optimistic I/O polling here
|
||||
|
||||
// Then try work stealing from other threads
|
||||
var num_threads: u32 = @as(Sync, @bitCast(thread_pool.sync.load(.monotonic))).spawned;
|
||||
while (num_threads > 0) : (num_threads -= 1) {
|
||||
// Traverse the stack of registered threads on the thread pool
|
||||
const target = self.target orelse thread_pool.threads.load(.acquire) orelse unreachable;
|
||||
self.target = target.next;
|
||||
|
||||
// Try to steal from their queue first to avoid contention (the target steal's from queue last).
|
||||
if (self.run_buffer.consume(&target.run_queue)) |stole| {
|
||||
return stole;
|
||||
}
|
||||
|
||||
// Skip stealing from the buffer if we're the target.
|
||||
// We still steal from our own queue above given it may have just been locked the first time we tried.
|
||||
if (target == self) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Steal from the buffer of a remote thread as a last resort
|
||||
if (self.run_buffer.steal(&target.run_buffer)) |stole| {
|
||||
return stole;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
/// An event which stores 1 semaphore token and is multi-threaded safe.
|
||||
/// The event can be shutdown(), waking up all wait()ing threads and
|
||||
/// making subsequent wait()'s return immediately.
|
||||
const Event = struct {
|
||||
state: Atomic(u32) = Atomic(u32).init(EMPTY),
|
||||
|
||||
const EMPTY = 0;
|
||||
const WAITING = 1;
|
||||
const NOTIFIED = 2;
|
||||
const SHUTDOWN = 3;
|
||||
|
||||
/// Wait for and consume a notification
|
||||
/// or wait for the event to be shutdown entirely
|
||||
noinline fn wait(self: *Event) void {
|
||||
var acquire_with: u32 = EMPTY;
|
||||
var state = self.state.load(.monotonic);
|
||||
|
||||
while (true) {
|
||||
// If we're shutdown then exit early.
|
||||
// Acquire barrier to ensure operations before the shutdown() are seen after the wait().
|
||||
// Shutdown is rare so it's better to have an Acquire barrier here instead of on CAS failure + load which are common.
|
||||
if (state == SHUTDOWN) {
|
||||
@fence(.acquire);
|
||||
return;
|
||||
}
|
||||
|
||||
// Consume a notification when it pops up.
|
||||
// Acquire barrier to ensure operations before the notify() appear after the wait().
|
||||
if (state == NOTIFIED) {
|
||||
state = self.state.cmpxchgWeak(
|
||||
state,
|
||||
acquire_with,
|
||||
.acquire,
|
||||
.monotonic,
|
||||
) orelse return;
|
||||
continue;
|
||||
}
|
||||
|
||||
// There is no notification to consume, we should wait on the event by ensuring its WAITING.
|
||||
if (state != WAITING) blk: {
|
||||
state = self.state.cmpxchgWeak(
|
||||
state,
|
||||
WAITING,
|
||||
.monotonic,
|
||||
.monotonic,
|
||||
) orelse break :blk;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Wait on the event until a notify() or shutdown().
|
||||
// If we wake up to a notification, we must acquire it with WAITING instead of EMPTY
|
||||
// since there may be other threads sleeping on the Futex who haven't been woken up yet.
|
||||
//
|
||||
// Acquiring to WAITING will make the next notify() or shutdown() wake a sleeping futex thread
|
||||
// who will either exit on SHUTDOWN or acquire with WAITING again, ensuring all threads are awoken.
|
||||
// This unfortunately results in the last notify() or shutdown() doing an extra futex wake but that's fine.
|
||||
std.Thread.Futex.wait(&self.state, WAITING);
|
||||
state = self.state.load(.monotonic);
|
||||
acquire_with = WAITING;
|
||||
}
|
||||
}
|
||||
|
||||
/// Post a notification to the event if it doesn't have one already
|
||||
/// then wake up a waiting thread if there is one as well.
|
||||
fn notify(self: *Event) void {
|
||||
return self.wake(NOTIFIED, 1);
|
||||
}
|
||||
|
||||
/// Marks the event as shutdown, making all future wait()'s return immediately.
|
||||
/// Then wakes up any threads currently waiting on the Event.
|
||||
fn shutdown(self: *Event) void {
|
||||
return self.wake(SHUTDOWN, std.math.maxInt(u32));
|
||||
}
|
||||
|
||||
fn wake(self: *Event, release_with: u32, wake_threads: u32) void {
|
||||
// Update the Event to notifty it with the new `release_with` state (either NOTIFIED or SHUTDOWN).
|
||||
// Release barrier to ensure any operations before this are this to happen before the wait() in the other threads.
|
||||
const state = self.state.swap(release_with, .release);
|
||||
|
||||
// Only wake threads sleeping in futex if the state is WAITING.
|
||||
// Avoids unnecessary wake ups.
|
||||
if (state == WAITING) {
|
||||
std.Thread.Futex.wake(&self.state, wake_threads);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/// Linked list intrusive memory node and lock-free data structures to operate with it
|
||||
const Node = struct {
|
||||
next: ?*Node = null,
|
||||
|
||||
/// A linked list of Nodes
|
||||
const List = struct {
|
||||
head: *Node,
|
||||
tail: *Node,
|
||||
};
|
||||
|
||||
/// An unbounded multi-producer-(non blocking)-multi-consumer queue of Node pointers.
|
||||
const Queue = struct {
|
||||
stack: Atomic(usize) = Atomic(usize).init(0),
|
||||
cache: ?*Node = null,
|
||||
|
||||
const HAS_CACHE: usize = 0b01;
|
||||
const IS_CONSUMING: usize = 0b10;
|
||||
const PTR_MASK: usize = ~(HAS_CACHE | IS_CONSUMING);
|
||||
|
||||
comptime {
|
||||
assert(@alignOf(Node) >= ((IS_CONSUMING | HAS_CACHE) + 1));
|
||||
}
|
||||
|
||||
fn push(noalias self: *Queue, list: List) void {
|
||||
var stack = self.stack.load(.monotonic);
|
||||
while (true) {
|
||||
// Attach the list to the stack (pt. 1)
|
||||
list.tail.next = @ptrFromInt(stack & PTR_MASK);
|
||||
|
||||
// Update the stack with the list (pt. 2).
|
||||
// Don't change the HAS_CACHE and IS_CONSUMING bits of the consumer.
|
||||
var new_stack = @intFromPtr(list.head);
|
||||
assert(new_stack & ~PTR_MASK == 0);
|
||||
new_stack |= (stack & ~PTR_MASK);
|
||||
|
||||
// Push to the stack with a release barrier for the consumer to see the proper list links.
|
||||
stack = self.stack.cmpxchgWeak(
|
||||
stack,
|
||||
new_stack,
|
||||
.release,
|
||||
.monotonic,
|
||||
) orelse break;
|
||||
}
|
||||
}
|
||||
|
||||
fn tryAcquireConsumer(self: *Queue) error{ Empty, Contended }!?*Node {
|
||||
var stack = self.stack.load(.monotonic);
|
||||
while (true) {
|
||||
if (stack & IS_CONSUMING != 0)
|
||||
return error.Contended; // The queue already has a consumer.
|
||||
if (stack & (HAS_CACHE | PTR_MASK) == 0)
|
||||
return error.Empty; // The queue is empty when there's nothing cached and nothing in the stack.
|
||||
|
||||
// When we acquire the consumer, also consume the pushed stack if the cache is empty.
|
||||
var new_stack = stack | HAS_CACHE | IS_CONSUMING;
|
||||
if (stack & HAS_CACHE == 0) {
|
||||
assert(stack & PTR_MASK != 0);
|
||||
new_stack &= ~PTR_MASK;
|
||||
}
|
||||
|
||||
// Acquire barrier on getting the consumer to see cache/Node updates done by previous consumers
|
||||
// and to ensure our cache/Node updates in pop() happen after that of previous consumers.
|
||||
stack = self.stack.cmpxchgWeak(
|
||||
stack,
|
||||
new_stack,
|
||||
.acquire,
|
||||
.monotonic,
|
||||
) orelse return self.cache orelse @ptrFromInt(stack & PTR_MASK);
|
||||
}
|
||||
}
|
||||
|
||||
fn releaseConsumer(noalias self: *Queue, noalias consumer: ?*Node) void {
|
||||
// Stop consuming and remove the HAS_CACHE bit as well if the consumer's cache is empty.
|
||||
// When HAS_CACHE bit is zeroed, the next consumer will acquire the pushed stack nodes.
|
||||
var remove = IS_CONSUMING;
|
||||
if (consumer == null)
|
||||
remove |= HAS_CACHE;
|
||||
|
||||
// Release the consumer with a release barrier to ensure cache/node accesses
|
||||
// happen before the consumer was released and before the next consumer starts using the cache.
|
||||
self.cache = consumer;
|
||||
const stack = self.stack.fetchSub(remove, .release);
|
||||
assert(stack & remove != 0);
|
||||
}
|
||||
|
||||
fn pop(noalias self: *Queue, noalias consumer_ref: *?*Node) ?*Node {
|
||||
// Check the consumer cache (fast path)
|
||||
if (consumer_ref.*) |node| {
|
||||
consumer_ref.* = node.next;
|
||||
return node;
|
||||
}
|
||||
|
||||
// Load the stack to see if there was anything pushed that we could grab.
|
||||
var stack = self.stack.load(.monotonic);
|
||||
assert(stack & IS_CONSUMING != 0);
|
||||
if (stack & PTR_MASK == 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Nodes have been pushed to the stack, grab then with an Acquire barrier to see the Node links.
|
||||
stack = self.stack.swap(HAS_CACHE | IS_CONSUMING, .acquire);
|
||||
assert(stack & IS_CONSUMING != 0);
|
||||
assert(stack & PTR_MASK != 0);
|
||||
|
||||
const node: *Node = @ptrFromInt(stack & PTR_MASK);
|
||||
consumer_ref.* = node.next;
|
||||
return node;
|
||||
}
|
||||
};
|
||||
|
||||
/// A bounded single-producer, multi-consumer ring buffer for node pointers.
|
||||
const Buffer = struct {
|
||||
head: Atomic(Index) = Atomic(Index).init(0),
|
||||
tail: Atomic(Index) = Atomic(Index).init(0),
|
||||
array: [capacity]Atomic(*Node) = undefined,
|
||||
|
||||
const Index = u32;
|
||||
const capacity = 256; // Appears to be a pretty good trade-off in space vs contended throughput
|
||||
comptime {
|
||||
assert(std.math.maxInt(Index) >= capacity);
|
||||
assert(std.math.isPowerOfTwo(capacity));
|
||||
}
|
||||
|
||||
fn push(noalias self: *Buffer, noalias list: *List) error{Overflow}!void {
|
||||
var head = self.head.load(.monotonic);
|
||||
var tail = self.tail.raw; // we're the only thread that can change this
|
||||
|
||||
while (true) {
|
||||
var size = tail -% head;
|
||||
assert(size <= capacity);
|
||||
|
||||
// Push nodes from the list to the buffer if it's not empty..
|
||||
if (size < capacity) {
|
||||
var nodes: ?*Node = list.head;
|
||||
while (size < capacity) : (size += 1) {
|
||||
const node = nodes orelse break;
|
||||
nodes = node.next;
|
||||
|
||||
// Array written atomically with weakest ordering since it could be getting atomically read by steal().
|
||||
self.array[tail % capacity].store(node, .unordered);
|
||||
tail +%= 1;
|
||||
}
|
||||
|
||||
// Release barrier synchronizes with Acquire loads for steal()ers to see the array writes.
|
||||
self.tail.store(tail, .release);
|
||||
|
||||
// Update the list with the nodes we pushed to the buffer and try again if there's more.
|
||||
list.head = nodes orelse return;
|
||||
std.atomic.spinLoopHint();
|
||||
head = self.head.load(.monotonic);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Try to steal/overflow half of the tasks in the buffer to make room for future push()es.
|
||||
// Migrating half amortizes the cost of stealing while requiring future pops to still use the buffer.
|
||||
// Acquire barrier to ensure the linked list creation after the steal only happens after we succesfully steal.
|
||||
var migrate = size / 2;
|
||||
head = self.head.cmpxchgWeak(
|
||||
head,
|
||||
head +% migrate,
|
||||
.acquire,
|
||||
.monotonic,
|
||||
) orelse {
|
||||
// Link the migrated Nodes together
|
||||
const first = self.array[head % capacity].raw;
|
||||
while (migrate > 0) : (migrate -= 1) {
|
||||
const prev = self.array[head % capacity].raw;
|
||||
head +%= 1;
|
||||
prev.next = self.array[head % capacity].raw;
|
||||
}
|
||||
|
||||
// Append the list that was supposed to be pushed to the end of the migrated Nodes
|
||||
const last = self.array[(head -% 1) % capacity].raw;
|
||||
last.next = list.head;
|
||||
list.tail.next = null;
|
||||
|
||||
// Return the migrated nodes + the original list as overflowed
|
||||
list.head = first;
|
||||
return error.Overflow;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
fn pop(self: *Buffer) ?*Node {
|
||||
var head = self.head.load(.monotonic);
|
||||
const tail = self.tail.raw; // we're the only thread that can change this
|
||||
|
||||
while (true) {
|
||||
// Quick sanity check and return null when not empty
|
||||
const size = tail -% head;
|
||||
assert(size <= capacity);
|
||||
if (size == 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Dequeue with an acquire barrier to ensure any writes done to the Node
|
||||
// only happen after we succesfully claim it from the array.
|
||||
head = self.head.cmpxchgWeak(
|
||||
head,
|
||||
head +% 1,
|
||||
.acquire,
|
||||
.monotonic,
|
||||
) orelse return self.array[head % capacity].raw;
|
||||
}
|
||||
}
|
||||
|
||||
const Stole = struct {
|
||||
node: *Node,
|
||||
pushed: bool,
|
||||
};
|
||||
|
||||
fn consume(noalias self: *Buffer, noalias queue: *Queue) ?Stole {
|
||||
var consumer = queue.tryAcquireConsumer() catch return null;
|
||||
defer queue.releaseConsumer(consumer);
|
||||
|
||||
const head = self.head.load(.monotonic);
|
||||
const tail = self.tail.raw; // we're the only thread that can change this
|
||||
|
||||
const size = tail -% head;
|
||||
assert(size <= capacity);
|
||||
assert(size == 0); // we should only be consuming if our array is empty
|
||||
|
||||
// Pop nodes from the queue and push them to our array.
|
||||
// Atomic stores to the array as steal() threads may be atomically reading from it.
|
||||
var pushed: Index = 0;
|
||||
while (pushed < capacity) : (pushed += 1) {
|
||||
const node = queue.pop(&consumer) orelse break;
|
||||
self.array[(tail +% pushed) % capacity].store(node, .unordered);
|
||||
}
|
||||
|
||||
// We will be returning one node that we stole from the queue.
|
||||
// Get an extra, and if that's not possible, take one from our array.
|
||||
const node = queue.pop(&consumer) orelse blk: {
|
||||
if (pushed == 0) return null;
|
||||
pushed -= 1;
|
||||
break :blk self.array[(tail +% pushed) % capacity].raw;
|
||||
};
|
||||
|
||||
// Update the array tail with the nodes we pushed to it.
|
||||
// Release barrier to synchronize with Acquire barrier in steal()'s to see the written array Nodes.
|
||||
if (pushed > 0) self.tail.store(tail +% pushed, .release);
|
||||
return Stole{
|
||||
.node = node,
|
||||
.pushed = pushed > 0,
|
||||
};
|
||||
}
|
||||
|
||||
fn steal(noalias self: *Buffer, noalias buffer: *Buffer) ?Stole {
|
||||
const head = self.head.load(.monotonic);
|
||||
const tail = self.tail.raw; // we're the only thread that can change this
|
||||
|
||||
const size = tail -% head;
|
||||
assert(size <= capacity);
|
||||
assert(size == 0); // we should only be stealing if our array is empty
|
||||
|
||||
while (true) : (std.atomic.spinLoopHint()) {
|
||||
const buffer_head = buffer.head.load(.acquire);
|
||||
const buffer_tail = buffer.tail.load(.acquire);
|
||||
|
||||
// Overly large size indicates the the tail was updated a lot after the head was loaded.
|
||||
// Reload both and try again.
|
||||
const buffer_size = buffer_tail -% buffer_head;
|
||||
if (buffer_size > capacity) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Try to steal half (divCeil) to amortize the cost of stealing from other threads.
|
||||
const steal_size = buffer_size - (buffer_size / 2);
|
||||
if (steal_size == 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Copy the nodes we will steal from the target's array to our own.
|
||||
// Atomically load from the target buffer array as it may be pushing and atomically storing to it.
|
||||
// Atomic store to our array as other steal() threads may be atomically loading from it as above.
|
||||
var i: Index = 0;
|
||||
while (i < steal_size) : (i += 1) {
|
||||
const node = buffer.array[(buffer_head +% i) % capacity].load(.unordered);
|
||||
self.array[(tail +% i) % capacity].store(node, .unordered);
|
||||
}
|
||||
|
||||
// Try to commit the steal from the target buffer using:
|
||||
// - an Acquire barrier to ensure that we only interact with the stolen Nodes after the steal was committed.
|
||||
// - a Release barrier to ensure that the Nodes are copied above prior to the committing of the steal
|
||||
// because if they're copied after the steal, the could be getting rewritten by the target's push().
|
||||
_ = buffer.head.cmpxchgStrong(
|
||||
buffer_head,
|
||||
buffer_head +% steal_size,
|
||||
.acq_rel,
|
||||
.monotonic,
|
||||
) orelse {
|
||||
// Pop one from the nodes we stole as we'll be returning it
|
||||
const pushed = steal_size - 1;
|
||||
const node = self.array[(tail +% pushed) % capacity].raw;
|
||||
|
||||
// Update the array tail with the nodes we pushed to it.
|
||||
// Release barrier to synchronize with Acquire barrier in steal()'s to see the written array Nodes.
|
||||
if (pushed > 0) self.tail.store(tail +% pushed, .release);
|
||||
return Stole{
|
||||
.node = node,
|
||||
.pushed = pushed > 0,
|
||||
};
|
||||
};
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
1971
deps/libxev/src/backend/epoll.zig
vendored
Normal file
1971
deps/libxev/src/backend/epoll.zig
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1745
deps/libxev/src/backend/io_uring.zig
vendored
Normal file
1745
deps/libxev/src/backend/io_uring.zig
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2356
deps/libxev/src/backend/iocp.zig
vendored
Normal file
2356
deps/libxev/src/backend/iocp.zig
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2645
deps/libxev/src/backend/kqueue.zig
vendored
Normal file
2645
deps/libxev/src/backend/kqueue.zig
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1637
deps/libxev/src/backend/wasi_poll.zig
vendored
Normal file
1637
deps/libxev/src/backend/wasi_poll.zig
vendored
Normal file
File diff suppressed because it is too large
Load Diff
105
deps/libxev/src/bench/async1.zig
vendored
Normal file
105
deps/libxev/src/bench/async1.zig
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Instant = std.time.Instant;
|
||||
const xev = @import("xev");
|
||||
|
||||
pub const std_options: std.Options = .{
|
||||
.log_level = .info,
|
||||
};
|
||||
|
||||
// Tune-ables
|
||||
pub const NUM_PINGS = 1000 * 1000;
|
||||
|
||||
pub fn main() !void {
|
||||
try run(1);
|
||||
}
|
||||
|
||||
pub fn run(comptime thread_count: comptime_int) !void {
|
||||
var loop = try xev.Loop.init(.{});
|
||||
defer loop.deinit();
|
||||
|
||||
// Initialize all our threads
|
||||
var contexts: [thread_count]Thread = undefined;
|
||||
var threads: [contexts.len]std.Thread = undefined;
|
||||
var comps: [contexts.len]xev.Completion = undefined;
|
||||
for (&contexts, 0..) |*ctx, i| {
|
||||
ctx.* = try Thread.init();
|
||||
ctx.main_async.wait(&loop, &comps[i], Thread, ctx, mainAsyncCallback);
|
||||
threads[i] = try std.Thread.spawn(.{}, Thread.threadMain, .{ctx});
|
||||
}
|
||||
|
||||
const start_time = try Instant.now();
|
||||
try loop.run(.until_done);
|
||||
for (&threads) |thr| thr.join();
|
||||
const end_time = try Instant.now();
|
||||
|
||||
const elapsed = @as(f64, @floatFromInt(end_time.since(start_time)));
|
||||
std.log.info("async{d}: {d:.2} seconds ({d:.2}/sec)", .{
|
||||
thread_count,
|
||||
elapsed / 1e9,
|
||||
NUM_PINGS / (elapsed / 1e9),
|
||||
});
|
||||
}
|
||||
|
||||
fn mainAsyncCallback(
|
||||
ud: ?*Thread,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
r: xev.Async.WaitError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
|
||||
const self = ud.?;
|
||||
self.worker_async.notify() catch unreachable;
|
||||
self.main_sent += 1;
|
||||
self.main_seen += 1;
|
||||
|
||||
return if (self.main_sent >= NUM_PINGS) .disarm else .rearm;
|
||||
}
|
||||
|
||||
/// The thread state
|
||||
const Thread = struct {
|
||||
loop: xev.Loop,
|
||||
worker_async: xev.Async,
|
||||
main_async: xev.Async,
|
||||
worker_sent: usize = 0,
|
||||
worker_seen: usize = 0,
|
||||
main_sent: usize = 0,
|
||||
main_seen: usize = 0,
|
||||
|
||||
pub fn init() !Thread {
|
||||
return .{
|
||||
.loop = try xev.Loop.init(.{}),
|
||||
.worker_async = try xev.Async.init(),
|
||||
.main_async = try xev.Async.init(),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn threadMain(self: *Thread) !void {
|
||||
// Kick us off
|
||||
try self.main_async.notify();
|
||||
|
||||
// Start our waiter
|
||||
var c: xev.Completion = undefined;
|
||||
self.worker_async.wait(&self.loop, &c, Thread, self, asyncCallback);
|
||||
|
||||
// Run
|
||||
try self.loop.run(.until_done);
|
||||
if (self.worker_sent < NUM_PINGS) @panic("FAIL");
|
||||
}
|
||||
|
||||
fn asyncCallback(
|
||||
ud: ?*Thread,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
r: xev.Async.WaitError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
const self = ud.?;
|
||||
self.main_async.notify() catch unreachable;
|
||||
self.worker_sent += 1;
|
||||
self.worker_seen += 1;
|
||||
return if (self.worker_sent >= NUM_PINGS) .disarm else .rearm;
|
||||
}
|
||||
};
|
10
deps/libxev/src/bench/async2.zig
vendored
Normal file
10
deps/libxev/src/bench/async2.zig
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
const std = @import("std");
|
||||
const run = @import("async1.zig").run;
|
||||
|
||||
pub const std_options: std.Options = .{
|
||||
.log_level = .info,
|
||||
};
|
||||
|
||||
pub fn main() !void {
|
||||
try run(2);
|
||||
}
|
10
deps/libxev/src/bench/async4.zig
vendored
Normal file
10
deps/libxev/src/bench/async4.zig
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
const std = @import("std");
|
||||
const run = @import("async1.zig").run;
|
||||
|
||||
pub const std_options: std.Options = .{
|
||||
.log_level = .info,
|
||||
};
|
||||
|
||||
pub fn main() !void {
|
||||
try run(4);
|
||||
}
|
10
deps/libxev/src/bench/async8.zig
vendored
Normal file
10
deps/libxev/src/bench/async8.zig
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
const std = @import("std");
|
||||
const run = @import("async1.zig").run;
|
||||
|
||||
pub const std_options: std.Options = .{
|
||||
.log_level = .info,
|
||||
};
|
||||
|
||||
pub fn main() !void {
|
||||
try run(8);
|
||||
}
|
81
deps/libxev/src/bench/async_pummel_1.zig
vendored
Normal file
81
deps/libxev/src/bench/async_pummel_1.zig
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Instant = std.time.Instant;
|
||||
const xev = @import("xev");
|
||||
|
||||
pub const std_options: std.Options = .{
|
||||
.log_level = .info,
|
||||
};
|
||||
|
||||
// Tune-ables
|
||||
pub const NUM_PINGS = 1000 * 1000;
|
||||
|
||||
pub fn main() !void {
|
||||
try run(1);
|
||||
}
|
||||
|
||||
pub fn run(comptime thread_count: comptime_int) !void {
|
||||
var thread_pool = xev.ThreadPool.init(.{});
|
||||
defer thread_pool.deinit();
|
||||
defer thread_pool.shutdown();
|
||||
|
||||
var loop = try xev.Loop.init(.{
|
||||
.entries = std.math.pow(u13, 2, 12),
|
||||
.thread_pool = &thread_pool,
|
||||
});
|
||||
defer loop.deinit();
|
||||
|
||||
// Create our async
|
||||
notifier = try xev.Async.init();
|
||||
defer notifier.deinit();
|
||||
|
||||
const userdata: ?*void = null;
|
||||
var c: xev.Completion = undefined;
|
||||
notifier.wait(&loop, &c, void, userdata, &asyncCallback);
|
||||
|
||||
// Initialize all our threads
|
||||
var threads: [thread_count]std.Thread = undefined;
|
||||
for (&threads) |*thr| {
|
||||
thr.* = try std.Thread.spawn(.{}, threadMain, .{});
|
||||
}
|
||||
|
||||
const start_time = try Instant.now();
|
||||
try loop.run(.until_done);
|
||||
for (&threads) |thr| thr.join();
|
||||
const end_time = try Instant.now();
|
||||
|
||||
const elapsed = @as(f64, @floatFromInt(end_time.since(start_time)));
|
||||
std.log.info("async_pummel_{d}: {d} callbacks in {d:.2} seconds ({d:.2}/sec)", .{
|
||||
thread_count,
|
||||
callbacks,
|
||||
elapsed / 1e9,
|
||||
@as(f64, @floatFromInt(callbacks)) / (elapsed / 1e9),
|
||||
});
|
||||
}
|
||||
|
||||
var callbacks: usize = 0;
|
||||
var notifier: xev.Async = undefined;
|
||||
var state: enum { running, stop, stopped } = .running;
|
||||
|
||||
fn asyncCallback(
|
||||
_: ?*void,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
r: xev.Async.WaitError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
|
||||
callbacks += 1;
|
||||
if (callbacks < NUM_PINGS) return .rearm;
|
||||
|
||||
// We're done
|
||||
state = .stop;
|
||||
while (state != .stopped) std.time.sleep(0);
|
||||
return .disarm;
|
||||
}
|
||||
|
||||
fn threadMain() !void {
|
||||
while (state == .running) try notifier.notify();
|
||||
state = .stopped;
|
||||
}
|
10
deps/libxev/src/bench/async_pummel_2.zig
vendored
Normal file
10
deps/libxev/src/bench/async_pummel_2.zig
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
const std = @import("std");
|
||||
const run = @import("async_pummel_1.zig").run;
|
||||
|
||||
pub const std_options: std.Options = .{
|
||||
.log_level = .info,
|
||||
};
|
||||
|
||||
pub fn main() !void {
|
||||
try run(2);
|
||||
}
|
10
deps/libxev/src/bench/async_pummel_4.zig
vendored
Normal file
10
deps/libxev/src/bench/async_pummel_4.zig
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
const std = @import("std");
|
||||
const run = @import("async_pummel_1.zig").run;
|
||||
|
||||
pub const std_options: std.Options = .{
|
||||
.log_level = .info,
|
||||
};
|
||||
|
||||
pub fn main() !void {
|
||||
try run(4);
|
||||
}
|
10
deps/libxev/src/bench/async_pummel_8.zig
vendored
Normal file
10
deps/libxev/src/bench/async_pummel_8.zig
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
const std = @import("std");
|
||||
const run = @import("async_pummel_1.zig").run;
|
||||
|
||||
pub const std_options: std.Options = .{
|
||||
.log_level = .info,
|
||||
};
|
||||
|
||||
pub fn main() !void {
|
||||
try run(8);
|
||||
}
|
61
deps/libxev/src/bench/million-timers.zig
vendored
Normal file
61
deps/libxev/src/bench/million-timers.zig
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
const std = @import("std");
|
||||
const Instant = std.time.Instant;
|
||||
const xev = @import("xev");
|
||||
|
||||
pub const NUM_TIMERS: usize = 10 * 1000 * 1000;
|
||||
|
||||
pub fn main() !void {
|
||||
var thread_pool = xev.ThreadPool.init(.{});
|
||||
defer thread_pool.deinit();
|
||||
defer thread_pool.shutdown();
|
||||
|
||||
var loop = try xev.Loop.init(.{
|
||||
.entries = std.math.pow(u13, 2, 12),
|
||||
.thread_pool = &thread_pool,
|
||||
});
|
||||
defer loop.deinit();
|
||||
|
||||
const GPA = std.heap.GeneralPurposeAllocator(.{});
|
||||
var gpa: GPA = .{};
|
||||
defer _ = gpa.deinit();
|
||||
const alloc = gpa.allocator();
|
||||
|
||||
var cs = try alloc.alloc(xev.Completion, NUM_TIMERS);
|
||||
defer alloc.free(cs);
|
||||
|
||||
const before_all = try Instant.now();
|
||||
var i: usize = 0;
|
||||
var timeout: u64 = 1;
|
||||
while (i < NUM_TIMERS) : (i += 1) {
|
||||
if (i % 1000 == 0) timeout += 1;
|
||||
const timer = try xev.Timer.init();
|
||||
timer.run(&loop, &cs[i], timeout, void, null, timerCallback);
|
||||
}
|
||||
|
||||
const before_run = try Instant.now();
|
||||
try loop.run(.until_done);
|
||||
const after_run = try Instant.now();
|
||||
const after_all = try Instant.now();
|
||||
|
||||
std.log.info("{d:.2} seconds total", .{@as(f64, @floatFromInt(after_all.since(before_all))) / 1e9});
|
||||
std.log.info("{d:.2} seconds init", .{@as(f64, @floatFromInt(before_run.since(before_all))) / 1e9});
|
||||
std.log.info("{d:.2} seconds dispatch", .{@as(f64, @floatFromInt(after_run.since(before_run))) / 1e9});
|
||||
std.log.info("{d:.2} seconds cleanup", .{@as(f64, @floatFromInt(after_all.since(after_run))) / 1e9});
|
||||
}
|
||||
|
||||
pub const std_options: std.Options = .{
|
||||
.log_level = .info,
|
||||
};
|
||||
|
||||
var timer_callback_count: usize = 0;
|
||||
|
||||
fn timerCallback(
|
||||
_: ?*void,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
result: xev.Timer.RunError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = result catch unreachable;
|
||||
timer_callback_count += 1;
|
||||
return .disarm;
|
||||
}
|
359
deps/libxev/src/bench/ping-pongs.zig
vendored
Normal file
359
deps/libxev/src/bench/ping-pongs.zig
vendored
Normal file
@@ -0,0 +1,359 @@
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Instant = std.time.Instant;
|
||||
const xev = @import("xev");
|
||||
|
||||
pub const std_options: std.Options = .{
|
||||
.log_level = .info,
|
||||
};
|
||||
|
||||
pub fn main() !void {
|
||||
var thread_pool = xev.ThreadPool.init(.{});
|
||||
defer thread_pool.deinit();
|
||||
defer thread_pool.shutdown();
|
||||
|
||||
var loop = try xev.Loop.init(.{
|
||||
.entries = std.math.pow(u13, 2, 12),
|
||||
.thread_pool = &thread_pool,
|
||||
});
|
||||
defer loop.deinit();
|
||||
|
||||
const GPA = std.heap.GeneralPurposeAllocator(.{});
|
||||
var gpa: GPA = .{};
|
||||
defer _ = gpa.deinit();
|
||||
const alloc = gpa.allocator();
|
||||
|
||||
var server_loop = try xev.Loop.init(.{
|
||||
.entries = std.math.pow(u13, 2, 12),
|
||||
.thread_pool = &thread_pool,
|
||||
});
|
||||
defer server_loop.deinit();
|
||||
|
||||
var server = try Server.init(alloc, &server_loop);
|
||||
defer server.deinit();
|
||||
try server.start();
|
||||
|
||||
// Start our echo server
|
||||
const server_thr = try std.Thread.spawn(.{}, Server.threadMain, .{&server});
|
||||
|
||||
// Start our client
|
||||
var client_loop = try xev.Loop.init(.{
|
||||
.entries = std.math.pow(u13, 2, 12),
|
||||
.thread_pool = &thread_pool,
|
||||
});
|
||||
defer client_loop.deinit();
|
||||
|
||||
var client = try Client.init(alloc, &client_loop);
|
||||
defer client.deinit();
|
||||
try client.start();
|
||||
|
||||
const start_time = try Instant.now();
|
||||
try client_loop.run(.until_done);
|
||||
server_thr.join();
|
||||
const end_time = try Instant.now();
|
||||
|
||||
const elapsed = @as(f64, @floatFromInt(end_time.since(start_time)));
|
||||
std.log.info("{d:.2} roundtrips/s", .{@as(f64, @floatFromInt(client.pongs)) / (elapsed / 1e9)});
|
||||
std.log.info("{d:.2} seconds total", .{elapsed / 1e9});
|
||||
}
|
||||
|
||||
/// Memory pools for things that need stable pointers
|
||||
const BufferPool = std.heap.MemoryPool([4096]u8);
|
||||
const CompletionPool = std.heap.MemoryPool(xev.Completion);
|
||||
const TCPPool = std.heap.MemoryPool(xev.TCP);
|
||||
|
||||
/// The client state
|
||||
const Client = struct {
|
||||
loop: *xev.Loop,
|
||||
completion_pool: CompletionPool,
|
||||
read_buf: [1024]u8,
|
||||
pongs: u64,
|
||||
state: usize = 0,
|
||||
stop: bool = false,
|
||||
|
||||
pub const PING = "PING\n";
|
||||
|
||||
pub fn init(alloc: Allocator, loop: *xev.Loop) !Client {
|
||||
return .{
|
||||
.loop = loop,
|
||||
.completion_pool = CompletionPool.init(alloc),
|
||||
.read_buf = undefined,
|
||||
.pongs = 0,
|
||||
.state = 0,
|
||||
.stop = false,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Client) void {
|
||||
self.completion_pool.deinit();
|
||||
}
|
||||
|
||||
/// Must be called with stable self pointer.
|
||||
pub fn start(self: *Client) !void {
|
||||
const addr = try std.net.Address.parseIp4("127.0.0.1", 3131);
|
||||
const socket = try xev.TCP.init(addr);
|
||||
|
||||
const c = try self.completion_pool.create();
|
||||
socket.connect(self.loop, c, addr, Client, self, connectCallback);
|
||||
}
|
||||
|
||||
fn connectCallback(
|
||||
self_: ?*Client,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
socket: xev.TCP,
|
||||
r: xev.TCP.ConnectError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
|
||||
const self = self_.?;
|
||||
|
||||
// Send message
|
||||
socket.write(l, c, .{ .slice = PING[0..PING.len] }, Client, self, writeCallback);
|
||||
|
||||
// Read
|
||||
const c_read = self.completion_pool.create() catch unreachable;
|
||||
socket.read(l, c_read, .{ .slice = &self.read_buf }, Client, self, readCallback);
|
||||
return .disarm;
|
||||
}
|
||||
|
||||
fn writeCallback(
|
||||
self_: ?*Client,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
s: xev.TCP,
|
||||
b: xev.WriteBuffer,
|
||||
r: xev.TCP.WriteError!usize,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
_ = l;
|
||||
_ = s;
|
||||
_ = b;
|
||||
|
||||
// Put back the completion.
|
||||
self_.?.completion_pool.destroy(c);
|
||||
return .disarm;
|
||||
}
|
||||
|
||||
fn readCallback(
|
||||
self_: ?*Client,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
socket: xev.TCP,
|
||||
buf: xev.ReadBuffer,
|
||||
r: xev.TCP.ReadError!usize,
|
||||
) xev.CallbackAction {
|
||||
const self = self_.?;
|
||||
const n = r catch unreachable;
|
||||
const data = buf.slice[0..n];
|
||||
|
||||
// Count the number of pings in our message
|
||||
var i: usize = 0;
|
||||
while (i < n) : (i += 1) {
|
||||
assert(data[i] == PING[self.state]);
|
||||
self.state = (self.state + 1) % (PING.len);
|
||||
if (self.state == 0) {
|
||||
self.pongs += 1;
|
||||
|
||||
// If we're done then exit
|
||||
if (self.pongs > 500_000) {
|
||||
socket.shutdown(l, c, Client, self, shutdownCallback);
|
||||
return .disarm;
|
||||
}
|
||||
|
||||
// Send another ping
|
||||
const c_ping = self.completion_pool.create() catch unreachable;
|
||||
socket.write(l, c_ping, .{ .slice = PING[0..PING.len] }, Client, self, writeCallback);
|
||||
}
|
||||
}
|
||||
|
||||
// Read again
|
||||
return .rearm;
|
||||
}
|
||||
|
||||
fn shutdownCallback(
|
||||
self_: ?*Client,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
socket: xev.TCP,
|
||||
r: xev.TCP.ShutdownError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch {};
|
||||
|
||||
const self = self_.?;
|
||||
socket.close(l, c, Client, self, closeCallback);
|
||||
return .disarm;
|
||||
}
|
||||
|
||||
fn closeCallback(
|
||||
self_: ?*Client,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
socket: xev.TCP,
|
||||
r: xev.TCP.CloseError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = l;
|
||||
_ = socket;
|
||||
_ = r catch unreachable;
|
||||
|
||||
const self = self_.?;
|
||||
self.stop = true;
|
||||
self.completion_pool.destroy(c);
|
||||
return .disarm;
|
||||
}
|
||||
};
|
||||
|
||||
/// The server state
|
||||
const Server = struct {
|
||||
loop: *xev.Loop,
|
||||
buffer_pool: BufferPool,
|
||||
completion_pool: CompletionPool,
|
||||
socket_pool: TCPPool,
|
||||
stop: bool,
|
||||
|
||||
pub fn init(alloc: Allocator, loop: *xev.Loop) !Server {
|
||||
return .{
|
||||
.loop = loop,
|
||||
.buffer_pool = BufferPool.init(alloc),
|
||||
.completion_pool = CompletionPool.init(alloc),
|
||||
.socket_pool = TCPPool.init(alloc),
|
||||
.stop = false,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Server) void {
|
||||
self.buffer_pool.deinit();
|
||||
self.completion_pool.deinit();
|
||||
self.socket_pool.deinit();
|
||||
}
|
||||
|
||||
/// Must be called with stable self pointer.
|
||||
pub fn start(self: *Server) !void {
|
||||
const addr = try std.net.Address.parseIp4("127.0.0.1", 3131);
|
||||
var socket = try xev.TCP.init(addr);
|
||||
|
||||
const c = try self.completion_pool.create();
|
||||
try socket.bind(addr);
|
||||
try socket.listen(std.os.linux.SOMAXCONN);
|
||||
socket.accept(self.loop, c, Server, self, acceptCallback);
|
||||
}
|
||||
|
||||
pub fn threadMain(self: *Server) !void {
|
||||
try self.loop.run(.until_done);
|
||||
}
|
||||
|
||||
fn destroyBuf(self: *Server, buf: []const u8) void {
|
||||
self.buffer_pool.destroy(
|
||||
@alignCast(
|
||||
@as(*[4096]u8, @ptrFromInt(@intFromPtr(buf.ptr))),
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
fn acceptCallback(
|
||||
self_: ?*Server,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
r: xev.TCP.AcceptError!xev.TCP,
|
||||
) xev.CallbackAction {
|
||||
const self = self_.?;
|
||||
|
||||
// Create our socket
|
||||
const socket = self.socket_pool.create() catch unreachable;
|
||||
socket.* = r catch unreachable;
|
||||
|
||||
// Start reading -- we can reuse c here because its done.
|
||||
const buf = self.buffer_pool.create() catch unreachable;
|
||||
socket.read(l, c, .{ .slice = buf }, Server, self, readCallback);
|
||||
return .disarm;
|
||||
}
|
||||
|
||||
fn readCallback(
|
||||
self_: ?*Server,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
socket: xev.TCP,
|
||||
buf: xev.ReadBuffer,
|
||||
r: xev.TCP.ReadError!usize,
|
||||
) xev.CallbackAction {
|
||||
const self = self_.?;
|
||||
const n = r catch |err| switch (err) {
|
||||
error.EOF => {
|
||||
self.destroyBuf(buf.slice);
|
||||
socket.shutdown(loop, c, Server, self, shutdownCallback);
|
||||
return .disarm;
|
||||
},
|
||||
|
||||
else => {
|
||||
self.destroyBuf(buf.slice);
|
||||
self.completion_pool.destroy(c);
|
||||
std.log.warn("server read unexpected err={}", .{err});
|
||||
return .disarm;
|
||||
},
|
||||
};
|
||||
|
||||
// Echo it back
|
||||
const c_echo = self.completion_pool.create() catch unreachable;
|
||||
const buf_write = self.buffer_pool.create() catch unreachable;
|
||||
@memcpy(buf_write, buf.slice[0..n]);
|
||||
socket.write(loop, c_echo, .{ .slice = buf_write[0..n] }, Server, self, writeCallback);
|
||||
|
||||
// Read again
|
||||
return .rearm;
|
||||
}
|
||||
|
||||
fn writeCallback(
|
||||
self_: ?*Server,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
s: xev.TCP,
|
||||
buf: xev.WriteBuffer,
|
||||
r: xev.TCP.WriteError!usize,
|
||||
) xev.CallbackAction {
|
||||
_ = l;
|
||||
_ = s;
|
||||
_ = r catch unreachable;
|
||||
|
||||
// We do nothing for write, just put back objects into the pool.
|
||||
const self = self_.?;
|
||||
self.completion_pool.destroy(c);
|
||||
self.buffer_pool.destroy(
|
||||
@alignCast(
|
||||
@as(*[4096]u8, @ptrFromInt(@intFromPtr(buf.slice.ptr))),
|
||||
),
|
||||
);
|
||||
return .disarm;
|
||||
}
|
||||
|
||||
fn shutdownCallback(
|
||||
self_: ?*Server,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
s: xev.TCP,
|
||||
r: xev.TCP.ShutdownError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch {};
|
||||
|
||||
const self = self_.?;
|
||||
s.close(l, c, Server, self, closeCallback);
|
||||
return .disarm;
|
||||
}
|
||||
|
||||
fn closeCallback(
|
||||
self_: ?*Server,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
socket: xev.TCP,
|
||||
r: xev.TCP.CloseError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = l;
|
||||
_ = r catch unreachable;
|
||||
_ = socket;
|
||||
|
||||
const self = self_.?;
|
||||
self.stop = true;
|
||||
self.completion_pool.destroy(c);
|
||||
return .disarm;
|
||||
}
|
||||
};
|
177
deps/libxev/src/bench/ping-udp1.zig
vendored
Normal file
177
deps/libxev/src/bench/ping-udp1.zig
vendored
Normal file
@@ -0,0 +1,177 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const assert = std.debug.assert;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Instant = std.time.Instant;
|
||||
const xev = @import("xev");
|
||||
|
||||
pub const std_options: std.Options = .{
|
||||
.log_level = .info,
|
||||
};
|
||||
|
||||
pub fn main() !void {
|
||||
try run(1);
|
||||
}
|
||||
|
||||
pub fn run(comptime count: comptime_int) !void {
|
||||
var thread_pool = xev.ThreadPool.init(.{});
|
||||
defer thread_pool.deinit();
|
||||
defer thread_pool.shutdown();
|
||||
|
||||
var loop = try xev.Loop.init(.{
|
||||
.entries = std.math.pow(u13, 2, 12),
|
||||
.thread_pool = &thread_pool,
|
||||
});
|
||||
defer loop.deinit();
|
||||
|
||||
const addr = try std.net.Address.parseIp4("127.0.0.1", 3131);
|
||||
|
||||
var pingers: [count]Pinger = undefined;
|
||||
for (&pingers) |*p| {
|
||||
p.* = try Pinger.init(addr);
|
||||
try p.start(&loop);
|
||||
}
|
||||
|
||||
const start_time = try Instant.now();
|
||||
try loop.run(.until_done);
|
||||
const end_time = try Instant.now();
|
||||
|
||||
const total: usize = total: {
|
||||
var total: usize = 0;
|
||||
for (&pingers) |p| total += p.pongs;
|
||||
break :total total;
|
||||
};
|
||||
|
||||
const elapsed = @as(f64, @floatFromInt(end_time.since(start_time)));
|
||||
std.log.info("ping_pongs: {d} pingers, ~{d:.0} roundtrips/s", .{
|
||||
count,
|
||||
@as(f64, @floatFromInt(total)) / (elapsed / 1e9),
|
||||
});
|
||||
}
|
||||
|
||||
const Pinger = struct {
|
||||
udp: xev.UDP,
|
||||
addr: std.net.Address,
|
||||
state: usize = 0,
|
||||
pongs: u64 = 0,
|
||||
read_buf: [1024]u8 = undefined,
|
||||
c_read: xev.Completion = undefined,
|
||||
c_write: xev.Completion = undefined,
|
||||
state_read: xev.UDP.State = undefined,
|
||||
state_write: xev.UDP.State = undefined,
|
||||
op_count: u8 = 0,
|
||||
|
||||
pub const PING = "PING\n";
|
||||
|
||||
pub fn init(addr: std.net.Address) !Pinger {
|
||||
return .{
|
||||
.udp = try xev.UDP.init(addr),
|
||||
.state = 0,
|
||||
.pongs = 0,
|
||||
.addr = addr,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn start(self: *Pinger, loop: *xev.Loop) !void {
|
||||
try self.udp.bind(self.addr);
|
||||
|
||||
self.udp.read(
|
||||
loop,
|
||||
&self.c_read,
|
||||
&self.state_read,
|
||||
.{ .slice = &self.read_buf },
|
||||
Pinger,
|
||||
self,
|
||||
Pinger.readCallback,
|
||||
);
|
||||
|
||||
self.write(loop);
|
||||
}
|
||||
|
||||
pub fn write(self: *Pinger, loop: *xev.Loop) void {
|
||||
self.udp.write(
|
||||
loop,
|
||||
&self.c_write,
|
||||
&self.state_write,
|
||||
self.addr,
|
||||
.{ .slice = PING[0..PING.len] },
|
||||
Pinger,
|
||||
self,
|
||||
writeCallback,
|
||||
);
|
||||
}
|
||||
|
||||
pub fn readCallback(
|
||||
self_: ?*Pinger,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
_: *xev.UDP.State,
|
||||
_: std.net.Address,
|
||||
socket: xev.UDP,
|
||||
buf: xev.ReadBuffer,
|
||||
r: xev.UDP.ReadError!usize,
|
||||
) xev.CallbackAction {
|
||||
_ = c;
|
||||
_ = socket;
|
||||
const self = self_.?;
|
||||
const n = r catch unreachable;
|
||||
const data = buf.slice[0..n];
|
||||
|
||||
var i: usize = 0;
|
||||
while (i < n) : (i += 1) {
|
||||
assert(data[i] == PING[self.state]);
|
||||
self.state = (self.state + 1) % (PING.len);
|
||||
if (self.state == 0) {
|
||||
self.pongs += 1;
|
||||
|
||||
// If we're done then exit
|
||||
if (self.pongs > 500_000) {
|
||||
self.udp.close(loop, &self.c_read, Pinger, self, closeCallback);
|
||||
return .disarm;
|
||||
}
|
||||
|
||||
self.op_count += 1;
|
||||
if (self.op_count == 2) {
|
||||
self.op_count = 0;
|
||||
// Send another ping
|
||||
self.write(loop);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return .rearm;
|
||||
}
|
||||
|
||||
pub fn writeCallback(
|
||||
self_: ?*Pinger,
|
||||
loop: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: *xev.UDP.State,
|
||||
_: xev.UDP,
|
||||
_: xev.WriteBuffer,
|
||||
r: xev.UDP.WriteError!usize,
|
||||
) xev.CallbackAction {
|
||||
const self = self_.?;
|
||||
|
||||
self.op_count += 1;
|
||||
if (self.op_count == 2) {
|
||||
self.op_count = 0;
|
||||
// Send another ping
|
||||
self.write(loop);
|
||||
}
|
||||
|
||||
_ = r catch unreachable;
|
||||
return .disarm;
|
||||
}
|
||||
|
||||
pub fn closeCallback(
|
||||
_: ?*Pinger,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: xev.UDP,
|
||||
r: xev.UDP.CloseError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
return .disarm;
|
||||
}
|
||||
};
|
147
deps/libxev/src/bench/udp_pummel_1v1.zig
vendored
Normal file
147
deps/libxev/src/bench/udp_pummel_1v1.zig
vendored
Normal file
@@ -0,0 +1,147 @@
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Instant = std.time.Instant;
|
||||
const xev = @import("xev");
|
||||
|
||||
const EXPECTED = "RANG TANG DING DONG I AM THE JAPANESE SANDMAN";
|
||||
|
||||
/// This is a global var decremented for the test without any locks. That's
|
||||
/// how the original is written and that's how we're going to do it.
|
||||
var packet_counter: usize = 1e6;
|
||||
var send_cb_called: usize = 0;
|
||||
var recv_cb_called: usize = 0;
|
||||
|
||||
pub const std_options: std.Options = .{
|
||||
.log_level = .info,
|
||||
};
|
||||
|
||||
pub fn main() !void {
|
||||
try run(1, 1);
|
||||
}
|
||||
|
||||
pub fn run(comptime n_senders: comptime_int, comptime n_receivers: comptime_int) !void {
|
||||
const base_port = 12345;
|
||||
|
||||
var thread_pool = xev.ThreadPool.init(.{});
|
||||
defer thread_pool.deinit();
|
||||
defer thread_pool.shutdown();
|
||||
|
||||
var loop = try xev.Loop.init(.{
|
||||
.entries = std.math.pow(u13, 2, 12),
|
||||
.thread_pool = &thread_pool,
|
||||
});
|
||||
defer loop.deinit();
|
||||
|
||||
var receivers: [n_receivers]Receiver = undefined;
|
||||
for (&receivers, 0..) |*r, i| {
|
||||
const addr = try std.net.Address.parseIp4("127.0.0.1", @as(u16, @intCast(base_port + i)));
|
||||
r.* = .{ .udp = try xev.UDP.init(addr) };
|
||||
try r.udp.bind(addr);
|
||||
r.udp.read(
|
||||
&loop,
|
||||
&r.c_recv,
|
||||
&r.udp_state,
|
||||
.{ .slice = &r.recv_buf },
|
||||
Receiver,
|
||||
r,
|
||||
Receiver.readCallback,
|
||||
);
|
||||
}
|
||||
|
||||
var senders: [n_senders]Sender = undefined;
|
||||
for (&senders, 0..) |*s, i| {
|
||||
const addr = try std.net.Address.parseIp4(
|
||||
"127.0.0.1",
|
||||
@as(u16, @intCast(base_port + (i % n_receivers))),
|
||||
);
|
||||
s.* = .{ .udp = try xev.UDP.init(addr) };
|
||||
s.udp.write(
|
||||
&loop,
|
||||
&s.c_send,
|
||||
&s.udp_state,
|
||||
addr,
|
||||
.{ .slice = EXPECTED },
|
||||
Sender,
|
||||
s,
|
||||
Sender.writeCallback,
|
||||
);
|
||||
}
|
||||
|
||||
const start_time = try Instant.now();
|
||||
try loop.run(.until_done);
|
||||
const end_time = try Instant.now();
|
||||
|
||||
const elapsed = @as(f64, @floatFromInt(end_time.since(start_time)));
|
||||
std.log.info("udp_pummel_{d}v{d}: {d:.0}f/s received, {d:.0}f/s sent, {d} received, {d} sent in {d:.1} seconds", .{
|
||||
n_senders,
|
||||
n_receivers,
|
||||
@as(f64, @floatFromInt(recv_cb_called)) / (elapsed / std.time.ns_per_s),
|
||||
@as(f64, @floatFromInt(send_cb_called)) / (elapsed / std.time.ns_per_s),
|
||||
recv_cb_called,
|
||||
send_cb_called,
|
||||
elapsed / std.time.ns_per_s,
|
||||
});
|
||||
}
|
||||
|
||||
const Sender = struct {
|
||||
udp: xev.UDP,
|
||||
udp_state: xev.UDP.State = undefined,
|
||||
c_send: xev.Completion = undefined,
|
||||
|
||||
fn writeCallback(
|
||||
_: ?*Sender,
|
||||
l: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: *xev.UDP.State,
|
||||
_: xev.UDP,
|
||||
_: xev.WriteBuffer,
|
||||
r: xev.UDP.WriteError!usize,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
|
||||
if (packet_counter == 0) {
|
||||
l.stop();
|
||||
return .disarm;
|
||||
}
|
||||
|
||||
packet_counter -|= 1;
|
||||
send_cb_called += 1;
|
||||
|
||||
return .rearm;
|
||||
}
|
||||
};
|
||||
|
||||
const Receiver = struct {
|
||||
udp: xev.UDP,
|
||||
udp_state: xev.UDP.State = undefined,
|
||||
c_recv: xev.Completion = undefined,
|
||||
recv_buf: [65536]u8 = undefined,
|
||||
|
||||
fn readCallback(
|
||||
_: ?*Receiver,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: *xev.UDP.State,
|
||||
_: std.net.Address,
|
||||
_: xev.UDP,
|
||||
b: xev.ReadBuffer,
|
||||
r: xev.UDP.ReadError!usize,
|
||||
) xev.CallbackAction {
|
||||
const n = r catch |err| {
|
||||
switch (err) {
|
||||
error.EOF => {},
|
||||
else => std.log.warn("err={}", .{err}),
|
||||
}
|
||||
|
||||
return .disarm;
|
||||
};
|
||||
|
||||
if (!std.mem.eql(u8, b.slice[0..n], EXPECTED)) {
|
||||
@panic("Unexpected data.");
|
||||
}
|
||||
|
||||
recv_cb_called += 1;
|
||||
return .rearm;
|
||||
}
|
||||
};
|
163
deps/libxev/src/build/ScdocStep.zig
vendored
Normal file
163
deps/libxev/src/build/ScdocStep.zig
vendored
Normal file
@@ -0,0 +1,163 @@
|
||||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
const fs = std.fs;
|
||||
const Step = std.Build.Step;
|
||||
const Build = std.Build;
|
||||
|
||||
/// ScdocStep generates man pages using scdoc(1).
|
||||
///
|
||||
/// It reads all the raw pages from src_path and writes them to out_path.
|
||||
/// src_path is typically "docs/" relative to the build root and out_path is
|
||||
/// the build cache.
|
||||
///
|
||||
/// The man pages can be installed by calling install() on the step.
|
||||
const ScdocStep = @This();
|
||||
|
||||
step: Step,
|
||||
builder: *Build,
|
||||
|
||||
/// path to read man page sources from, defaults to the "doc/" subdirectory
|
||||
/// from the build.zig file. This must be an absolute path.
|
||||
src_path: []const u8,
|
||||
|
||||
/// path where the generated man pages will be written (NOT installed). This
|
||||
/// defaults to build cache root.
|
||||
out_path: []const u8,
|
||||
|
||||
pub fn create(builder: *Build) *ScdocStep {
|
||||
const self = builder.allocator.create(ScdocStep) catch unreachable;
|
||||
self.* = init(builder);
|
||||
return self;
|
||||
}
|
||||
|
||||
pub fn init(builder: *Build) ScdocStep {
|
||||
return ScdocStep{
|
||||
.builder = builder,
|
||||
.step = Step.init(.{
|
||||
.id = .custom,
|
||||
.name = "generate man pages",
|
||||
.owner = builder,
|
||||
.makeFn = make,
|
||||
}),
|
||||
.src_path = builder.pathFromRoot("docs/"),
|
||||
.out_path = builder.cache_root.join(builder.allocator, &[_][]const u8{
|
||||
"man",
|
||||
}) catch unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
fn make(step: *std.Build.Step, _: std.Progress.Node) !void {
|
||||
const self: *ScdocStep = @fieldParentPtr("step", step);
|
||||
|
||||
// Create our cache path
|
||||
// TODO(mitchellh): ideally this would be pure zig
|
||||
{
|
||||
const command = try std.fmt.allocPrint(
|
||||
self.builder.allocator,
|
||||
"rm -f {[path]s}/* && mkdir -p {[path]s}",
|
||||
.{ .path = self.out_path },
|
||||
);
|
||||
_ = self.builder.run(&[_][]const u8{ "sh", "-c", command });
|
||||
}
|
||||
|
||||
// Find all our man pages which are in our src path ending with ".scd".
|
||||
var dir = try fs.openDirAbsolute(self.src_path, .{ .iterate = true });
|
||||
defer dir.close();
|
||||
|
||||
var iter = dir.iterate();
|
||||
while (try iter.next()) |*entry| {
|
||||
// We only want "scd" files to generate.
|
||||
if (!mem.eql(u8, fs.path.extension(entry.name), ".scd")) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const src = try fs.path.join(
|
||||
self.builder.allocator,
|
||||
&[_][]const u8{ self.src_path, entry.name },
|
||||
);
|
||||
|
||||
const dst = try fs.path.join(
|
||||
self.builder.allocator,
|
||||
&[_][]const u8{ self.out_path, entry.name[0..(entry.name.len - 4)] },
|
||||
);
|
||||
|
||||
const command = try std.fmt.allocPrint(
|
||||
self.builder.allocator,
|
||||
"scdoc < {s} > {s}",
|
||||
.{ src, dst },
|
||||
);
|
||||
_ = self.builder.run(&[_][]const u8{ "sh", "-c", command });
|
||||
}
|
||||
}
|
||||
|
||||
pub fn install(self: *ScdocStep) !void {
|
||||
// Ensure that `zig build install` depends on our generation step first.
|
||||
self.builder.getInstallStep().dependOn(&self.step);
|
||||
|
||||
// Then run our install step which looks at what we made out of our
|
||||
// generation and moves it to the install prefix.
|
||||
const install_step = InstallStep.create(self.builder, self);
|
||||
self.builder.getInstallStep().dependOn(&install_step.step);
|
||||
}
|
||||
|
||||
/// Install man pages, create using install() on ScdocStep.
|
||||
const InstallStep = struct {
|
||||
step: Step,
|
||||
builder: *Build,
|
||||
scdoc: *ScdocStep,
|
||||
|
||||
pub fn create(builder: *Build, scdoc: *ScdocStep) *InstallStep {
|
||||
const self = builder.allocator.create(InstallStep) catch unreachable;
|
||||
self.* = InstallStep.init(builder, scdoc);
|
||||
self.step.dependOn(&scdoc.step);
|
||||
return self;
|
||||
}
|
||||
|
||||
fn init(builder: *Build, scdoc: *ScdocStep) InstallStep {
|
||||
return InstallStep{
|
||||
.builder = builder,
|
||||
.step = Step.init(.{
|
||||
.id = .custom,
|
||||
.name = "install man pages",
|
||||
.owner = builder,
|
||||
.makeFn = InstallStep.make,
|
||||
}),
|
||||
.scdoc = scdoc,
|
||||
};
|
||||
}
|
||||
|
||||
fn make(step: *Step, progress: std.Progress.Node) !void {
|
||||
const self: *InstallStep = @fieldParentPtr("step", step);
|
||||
|
||||
// Get our absolute output path
|
||||
var path = self.scdoc.out_path;
|
||||
if (!fs.path.isAbsolute(path)) {
|
||||
path = self.builder.pathFromRoot(path);
|
||||
}
|
||||
|
||||
// Find all our man pages which are in our src path ending with ".scd".
|
||||
var dir = try fs.openDirAbsolute(path, .{ .iterate = true });
|
||||
defer dir.close();
|
||||
var iter = dir.iterate();
|
||||
while (try iter.next()) |*entry| {
|
||||
// We expect filenames to be "foo.3" and this gets us "3"
|
||||
const section = entry.name[(entry.name.len - 1)..];
|
||||
|
||||
const src = try fs.path.join(
|
||||
self.builder.allocator,
|
||||
&[_][]const u8{ path, entry.name },
|
||||
);
|
||||
const output = try std.fmt.allocPrint(
|
||||
self.builder.allocator,
|
||||
"share/man/man{s}/{s}",
|
||||
.{ section, entry.name },
|
||||
);
|
||||
|
||||
const fileStep = self.builder.addInstallFile(
|
||||
.{ .cwd_relative = src },
|
||||
output,
|
||||
);
|
||||
try fileStep.step.make(progress);
|
||||
}
|
||||
}
|
||||
};
|
355
deps/libxev/src/c_api.zig
vendored
Normal file
355
deps/libxev/src/c_api.zig
vendored
Normal file
@@ -0,0 +1,355 @@
|
||||
// This file contains the C bindings that are exported when building
|
||||
// the system libraries.
|
||||
//
|
||||
// WHERE IS THE DOCUMENTATION? Note that all the documentation for the C
|
||||
// interface is in the man pages. The header file xev.h purposely has no
|
||||
// documentation so that its concise and easy to see the list of exported
|
||||
// functions.
|
||||
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const assert = std.debug.assert;
|
||||
const xev = @import("main.zig");
|
||||
|
||||
export fn xev_loop_init(loop: *xev.Loop) c_int {
|
||||
// TODO: overflow
|
||||
loop.* = xev.Loop.init(.{}) catch |err| return errorCode(err);
|
||||
return 0;
|
||||
}
|
||||
|
||||
export fn xev_loop_deinit(loop: *xev.Loop) void {
|
||||
loop.deinit();
|
||||
}
|
||||
|
||||
export fn xev_loop_run(loop: *xev.Loop, mode: xev.RunMode) c_int {
|
||||
loop.run(mode) catch |err| return errorCode(err);
|
||||
return 0;
|
||||
}
|
||||
|
||||
export fn xev_loop_now(loop: *xev.Loop) i64 {
|
||||
return loop.now();
|
||||
}
|
||||
|
||||
export fn xev_loop_update_now(loop: *xev.Loop) void {
|
||||
loop.update_now();
|
||||
}
|
||||
|
||||
export fn xev_completion_zero(c: *xev.Completion) void {
|
||||
c.* = .{};
|
||||
}
|
||||
|
||||
export fn xev_completion_state(c: *xev.Completion) xev.CompletionState {
|
||||
return c.state();
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// ThreadPool
|
||||
|
||||
export fn xev_threadpool_config_init(cfg: *xev.ThreadPool.Config) void {
|
||||
cfg.* = .{};
|
||||
}
|
||||
|
||||
export fn xev_threadpool_config_set_stack_size(
|
||||
cfg: *xev.ThreadPool.Config,
|
||||
v: u32,
|
||||
) void {
|
||||
cfg.stack_size = v;
|
||||
}
|
||||
|
||||
export fn xev_threadpool_config_set_max_threads(
|
||||
cfg: *xev.ThreadPool.Config,
|
||||
v: u32,
|
||||
) void {
|
||||
cfg.max_threads = v;
|
||||
}
|
||||
|
||||
export fn xev_threadpool_init(
|
||||
threadpool: *xev.ThreadPool,
|
||||
cfg_: ?*xev.ThreadPool.Config,
|
||||
) c_int {
|
||||
const cfg: xev.ThreadPool.Config = if (cfg_) |v| v.* else .{};
|
||||
threadpool.* = xev.ThreadPool.init(cfg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
export fn xev_threadpool_deinit(threadpool: *xev.ThreadPool) void {
|
||||
threadpool.deinit();
|
||||
}
|
||||
|
||||
export fn xev_threadpool_shutdown(threadpool: *xev.ThreadPool) void {
|
||||
threadpool.shutdown();
|
||||
}
|
||||
|
||||
export fn xev_threadpool_schedule(
|
||||
pool: *xev.ThreadPool,
|
||||
batch: *xev.ThreadPool.Batch,
|
||||
) void {
|
||||
pool.schedule(batch.*);
|
||||
}
|
||||
|
||||
export fn xev_threadpool_task_init(
|
||||
t: *xev.ThreadPool.Task,
|
||||
cb: *const fn (*xev.ThreadPool.Task) callconv(.C) void,
|
||||
) void {
|
||||
const extern_t = @as(*Task, @ptrCast(@alignCast(t)));
|
||||
extern_t.c_callback = cb;
|
||||
|
||||
t.* = .{
|
||||
.callback = (struct {
|
||||
fn callback(inner_t: *xev.ThreadPool.Task) void {
|
||||
const outer_t: *Task = @alignCast(@fieldParentPtr(
|
||||
"data",
|
||||
@as(*Task.Data, @ptrCast(inner_t)),
|
||||
));
|
||||
outer_t.c_callback(inner_t);
|
||||
}
|
||||
}).callback,
|
||||
};
|
||||
}
|
||||
|
||||
export fn xev_threadpool_batch_init(b: *xev.ThreadPool.Batch) void {
|
||||
b.* = .{};
|
||||
}
|
||||
|
||||
export fn xev_threadpool_batch_push_task(
|
||||
b: *xev.ThreadPool.Batch,
|
||||
t: *xev.ThreadPool.Task,
|
||||
) void {
|
||||
b.push(xev.ThreadPool.Batch.from(t));
|
||||
}
|
||||
|
||||
export fn xev_threadpool_batch_push_batch(
|
||||
b: *xev.ThreadPool.Batch,
|
||||
other: *xev.ThreadPool.Batch,
|
||||
) void {
|
||||
b.push(other.*);
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Timers
|
||||
|
||||
export fn xev_timer_init(v: *xev.Timer) c_int {
|
||||
v.* = xev.Timer.init() catch |err| return errorCode(err);
|
||||
return 0;
|
||||
}
|
||||
|
||||
export fn xev_timer_deinit(v: *xev.Timer) void {
|
||||
v.deinit();
|
||||
}
|
||||
|
||||
export fn xev_timer_run(
|
||||
v: *xev.Timer,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
next_ms: u64,
|
||||
userdata: ?*anyopaque,
|
||||
cb: *const fn (
|
||||
*xev.Loop,
|
||||
*xev.Completion,
|
||||
c_int,
|
||||
?*anyopaque,
|
||||
) callconv(.C) xev.CallbackAction,
|
||||
) void {
|
||||
const Callback = @typeInfo(@TypeOf(cb)).Pointer.child;
|
||||
const extern_c = @as(*Completion, @ptrCast(@alignCast(c)));
|
||||
extern_c.c_callback = @as(*const anyopaque, @ptrCast(cb));
|
||||
|
||||
v.run(loop, c, next_ms, anyopaque, userdata, (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
cb_loop: *xev.Loop,
|
||||
cb_c: *xev.Completion,
|
||||
r: xev.Timer.RunError!void,
|
||||
) xev.CallbackAction {
|
||||
const cb_extern_c = @as(*Completion, @ptrCast(cb_c));
|
||||
const cb_c_callback = @as(
|
||||
*const Callback,
|
||||
@ptrCast(@alignCast(cb_extern_c.c_callback)),
|
||||
);
|
||||
return @call(.auto, cb_c_callback, .{
|
||||
cb_loop,
|
||||
cb_c,
|
||||
if (r) |_| 0 else |err| errorCode(err),
|
||||
ud,
|
||||
});
|
||||
}
|
||||
}).callback);
|
||||
}
|
||||
|
||||
export fn xev_timer_reset(
|
||||
v: *xev.Timer,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
c_cancel: *xev.Completion,
|
||||
next_ms: u64,
|
||||
userdata: ?*anyopaque,
|
||||
cb: *const fn (
|
||||
*xev.Loop,
|
||||
*xev.Completion,
|
||||
c_int,
|
||||
?*anyopaque,
|
||||
) callconv(.C) xev.CallbackAction,
|
||||
) void {
|
||||
const Callback = @typeInfo(@TypeOf(cb)).Pointer.child;
|
||||
const extern_c = @as(*Completion, @ptrCast(@alignCast(c)));
|
||||
extern_c.c_callback = @as(*const anyopaque, @ptrCast(cb));
|
||||
|
||||
v.reset(loop, c, c_cancel, next_ms, anyopaque, userdata, (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
cb_loop: *xev.Loop,
|
||||
cb_c: *xev.Completion,
|
||||
r: xev.Timer.RunError!void,
|
||||
) xev.CallbackAction {
|
||||
const cb_extern_c = @as(*Completion, @ptrCast(cb_c));
|
||||
const cb_c_callback = @as(
|
||||
*const Callback,
|
||||
@ptrCast(@alignCast(cb_extern_c.c_callback)),
|
||||
);
|
||||
return @call(.auto, cb_c_callback, .{
|
||||
cb_loop,
|
||||
cb_c,
|
||||
if (r) |_| 0 else |err| errorCode(err),
|
||||
ud,
|
||||
});
|
||||
}
|
||||
}).callback);
|
||||
}
|
||||
|
||||
export fn xev_timer_cancel(
|
||||
v: *xev.Timer,
|
||||
loop: *xev.Loop,
|
||||
c_timer: *xev.Completion,
|
||||
c_cancel: *xev.Completion,
|
||||
userdata: ?*anyopaque,
|
||||
cb: *const fn (
|
||||
*xev.Loop,
|
||||
*xev.Completion,
|
||||
c_int,
|
||||
?*anyopaque,
|
||||
) callconv(.C) xev.CallbackAction,
|
||||
) void {
|
||||
const Callback = @typeInfo(@TypeOf(cb)).Pointer.child;
|
||||
const extern_c = @as(*Completion, @ptrCast(@alignCast(c_cancel)));
|
||||
extern_c.c_callback = @as(*const anyopaque, @ptrCast(cb));
|
||||
|
||||
v.cancel(loop, c_timer, c_cancel, anyopaque, userdata, (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
cb_loop: *xev.Loop,
|
||||
cb_c: *xev.Completion,
|
||||
r: xev.Timer.CancelError!void,
|
||||
) xev.CallbackAction {
|
||||
const cb_extern_c = @as(*Completion, @ptrCast(cb_c));
|
||||
const cb_c_callback = @as(
|
||||
*const Callback,
|
||||
@ptrCast(@alignCast(cb_extern_c.c_callback)),
|
||||
);
|
||||
return @call(.auto, cb_c_callback, .{
|
||||
cb_loop,
|
||||
cb_c,
|
||||
if (r) |_| 0 else |err| errorCode(err),
|
||||
ud,
|
||||
});
|
||||
}
|
||||
}).callback);
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Async
|
||||
|
||||
export fn xev_async_init(v: *xev.Async) c_int {
|
||||
v.* = xev.Async.init() catch |err| return errorCode(err);
|
||||
return 0;
|
||||
}
|
||||
|
||||
export fn xev_async_deinit(v: *xev.Async) void {
|
||||
v.deinit();
|
||||
}
|
||||
|
||||
export fn xev_async_notify(v: *xev.Async) c_int {
|
||||
v.notify() catch |err| return errorCode(err);
|
||||
return 0;
|
||||
}
|
||||
|
||||
export fn xev_async_wait(
|
||||
v: *xev.Async,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
userdata: ?*anyopaque,
|
||||
cb: *const fn (
|
||||
*xev.Loop,
|
||||
*xev.Completion,
|
||||
c_int,
|
||||
?*anyopaque,
|
||||
) callconv(.C) xev.CallbackAction,
|
||||
) void {
|
||||
const Callback = @typeInfo(@TypeOf(cb)).Pointer.child;
|
||||
const extern_c = @as(*Completion, @ptrCast(@alignCast(c)));
|
||||
extern_c.c_callback = @as(*const anyopaque, @ptrCast(cb));
|
||||
|
||||
v.wait(loop, c, anyopaque, userdata, (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
cb_loop: *xev.Loop,
|
||||
cb_c: *xev.Completion,
|
||||
r: xev.Async.WaitError!void,
|
||||
) xev.CallbackAction {
|
||||
const cb_extern_c = @as(*Completion, @ptrCast(cb_c));
|
||||
const cb_c_callback = @as(
|
||||
*const Callback,
|
||||
@ptrCast(@alignCast(cb_extern_c.c_callback)),
|
||||
);
|
||||
return @call(.auto, cb_c_callback, .{
|
||||
cb_loop,
|
||||
cb_c,
|
||||
if (r) |_| 0 else |err| errorCode(err),
|
||||
ud,
|
||||
});
|
||||
}
|
||||
}).callback);
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Sync with xev.h
|
||||
|
||||
/// Since we can't pass the callback at comptime with C, we have to
|
||||
/// have an additional field on completions to store our callback pointer.
|
||||
/// We just tack it onto the end of the memory chunk that C programs allocate
|
||||
/// for completions.
|
||||
const Completion = extern struct {
|
||||
const Data = [@sizeOf(xev.Completion)]u8;
|
||||
data: Data,
|
||||
c_callback: *const anyopaque,
|
||||
};
|
||||
|
||||
const Task = extern struct {
|
||||
const Data = [@sizeOf(xev.ThreadPool.Task)]u8;
|
||||
data: Data,
|
||||
c_callback: *const fn (*xev.ThreadPool.Task) callconv(.C) void,
|
||||
};
|
||||
|
||||
/// Returns the unique error code for an error.
|
||||
fn errorCode(err: anyerror) c_int {
|
||||
// TODO(mitchellh): This is a bad idea because its not stable across
|
||||
// code changes. For now we just document that error codes are not
|
||||
// stable but that is not useful at all!
|
||||
return @intFromError(err);
|
||||
}
|
||||
|
||||
test "c-api sizes" {
|
||||
// This tests the sizes that are defined in the C API. We must ensure
|
||||
// that our main structure sizes never exceed these so that the C ABI
|
||||
// is maintained.
|
||||
//
|
||||
// THE MAGIC NUMBERS ARE KEPT IN SYNC WITH "include/xev.h"
|
||||
const testing = std.testing;
|
||||
try testing.expect(@sizeOf(xev.Loop) <= 512);
|
||||
try testing.expect(@sizeOf(Completion) <= 320);
|
||||
try testing.expect(@sizeOf(xev.Async) <= 256);
|
||||
try testing.expect(@sizeOf(xev.Timer) <= 256);
|
||||
try testing.expectEqual(@as(usize, 48), @sizeOf(xev.ThreadPool));
|
||||
try testing.expectEqual(@as(usize, 24), @sizeOf(xev.ThreadPool.Batch));
|
||||
try testing.expectEqual(@as(usize, 24), @sizeOf(Task));
|
||||
try testing.expectEqual(@as(usize, 8), @sizeOf(xev.ThreadPool.Config));
|
||||
}
|
58
deps/libxev/src/debug.zig
vendored
Normal file
58
deps/libxev/src/debug.zig
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
const std = @import("std");
|
||||
|
||||
inline fn indent(depth: usize, writer: anytype) !void {
|
||||
for (0..depth) |_| try writer.writeByte(' ');
|
||||
}
|
||||
|
||||
pub fn describe(comptime T: type, writer: anytype, depth: usize) !void {
|
||||
const type_info = @typeInfo(T);
|
||||
switch (type_info) {
|
||||
.Type,
|
||||
.Void,
|
||||
.Bool,
|
||||
.NoReturn,
|
||||
.Int,
|
||||
.Float,
|
||||
.Pointer,
|
||||
.Array,
|
||||
.ComptimeFloat,
|
||||
.ComptimeInt,
|
||||
.Undefined,
|
||||
.Null,
|
||||
.Optional,
|
||||
.ErrorUnion,
|
||||
.ErrorSet,
|
||||
.Enum,
|
||||
.Fn,
|
||||
.Opaque,
|
||||
.Frame,
|
||||
.AnyFrame,
|
||||
.Vector,
|
||||
.EnumLiteral,
|
||||
=> {
|
||||
try writer.print("{s} ({d} bytes)", .{ @typeName(T), @sizeOf(T) });
|
||||
},
|
||||
.Union => |s| {
|
||||
try writer.print("{s} ({d} bytes) {{\n", .{ @typeName(T), @sizeOf(T) });
|
||||
inline for (s.fields) |f| {
|
||||
try indent(depth + 4, writer);
|
||||
try writer.print("{s}: ", .{f.name});
|
||||
try describe(f.type, writer, depth + 4);
|
||||
try writer.writeByte('\n');
|
||||
}
|
||||
try indent(depth, writer);
|
||||
try writer.writeByte('}');
|
||||
},
|
||||
.Struct => |s| {
|
||||
try writer.print("{s} ({d} bytes) {{\n", .{ @typeName(T), @sizeOf(T) });
|
||||
inline for (s.fields) |f| {
|
||||
try indent(depth + 4, writer);
|
||||
try writer.print("{s}: ", .{f.name});
|
||||
try describe(f.type, writer, depth + 4);
|
||||
try writer.writeByte('\n');
|
||||
}
|
||||
try indent(depth, writer);
|
||||
try writer.writeByte('}');
|
||||
},
|
||||
}
|
||||
}
|
379
deps/libxev/src/heap.zig
vendored
Normal file
379
deps/libxev/src/heap.zig
vendored
Normal file
@@ -0,0 +1,379 @@
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
|
||||
/// An intrusive heap implementation backed by a pairing heap[1] implementation.
|
||||
///
|
||||
/// Why? Intrusive data structures require the element type to hold the metadata
|
||||
/// required for the structure, rather than an additional container structure.
|
||||
/// There are numerous pros/cons that are documented well by Boost[2]. For Zig,
|
||||
/// I think the primary benefits are making data structures allocation free
|
||||
/// (rather, shifting allocation up to the consumer which can choose how they
|
||||
/// want the memory to be available). There are various costs to this such as
|
||||
/// the costs of pointer chasing, larger memory overhead, requiring the element
|
||||
/// type to be aware of its container, etc. But for certain use cases an intrusive
|
||||
/// data structure can yield much better performance.
|
||||
///
|
||||
/// Usage notes:
|
||||
/// - The element T is expected to have a field "heap" of type InstrusiveHeapField.
|
||||
/// See the tests for a full example of how to set this.
|
||||
/// - You can easily make this a min or max heap by inverting the result of
|
||||
/// "less" below.
|
||||
///
|
||||
/// [1]: https://en.wikipedia.org/wiki/Pairing_heap
|
||||
/// [2]: https://www.boost.org/doc/libs/1_64_0/doc/html/intrusive/intrusive_vs_nontrusive.html
|
||||
pub fn Intrusive(
|
||||
comptime T: type,
|
||||
comptime Context: type,
|
||||
comptime less: *const fn (ctx: Context, a: *T, b: *T) bool,
|
||||
) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
|
||||
root: ?*T = null,
|
||||
context: Context,
|
||||
|
||||
/// Insert a new element v into the heap. An element v can only
|
||||
/// be a member of a single heap at any given time. When compiled
|
||||
/// with runtime-safety, assertions will help verify this property.
|
||||
pub fn insert(self: *Self, v: *T) void {
|
||||
self.root = if (self.root) |root| self.meld(v, root) else v;
|
||||
}
|
||||
|
||||
/// Look at the next minimum value but do not remove it.
|
||||
pub fn peek(self: *Self) ?*T {
|
||||
return self.root;
|
||||
}
|
||||
|
||||
/// Delete the minimum value from the heap and return it.
|
||||
pub fn deleteMin(self: *Self) ?*T {
|
||||
const root = self.root orelse return null;
|
||||
self.root = if (root.heap.child) |child|
|
||||
self.combine_siblings(child)
|
||||
else
|
||||
null;
|
||||
|
||||
// Clear pointers with runtime safety so we can verify on
|
||||
// insert that values aren't incorrectly being set multiple times.
|
||||
root.heap = .{};
|
||||
|
||||
return root;
|
||||
}
|
||||
|
||||
/// Remove the value v from the heap.
|
||||
pub fn remove(self: *Self, v: *T) void {
|
||||
// If v doesn't have a previous value, this must be the root
|
||||
// element. If it is NOT the root element, v can't be in this
|
||||
// heap and we trigger an assertion failure.
|
||||
const prev = v.heap.prev orelse {
|
||||
assert(self.root.? == v);
|
||||
_ = self.deleteMin();
|
||||
return;
|
||||
};
|
||||
|
||||
// Detach "v" from the tree and clean up any links so it
|
||||
// is as if this node never nexisted. The previous value
|
||||
// must point to the proper next value and the pointers
|
||||
// must all be cleaned up.
|
||||
if (v.heap.next) |next| next.heap.prev = prev;
|
||||
if (prev.heap.child == v)
|
||||
prev.heap.child = v.heap.next
|
||||
else
|
||||
prev.heap.next = v.heap.next;
|
||||
v.heap.prev = null;
|
||||
v.heap.next = null;
|
||||
|
||||
// If we have children, then we need to merge them back in.
|
||||
const child = v.heap.child orelse return;
|
||||
v.heap.child = null;
|
||||
const x = self.combine_siblings(child);
|
||||
self.root = self.meld(x, self.root.?);
|
||||
}
|
||||
|
||||
/// Meld (union) two heaps together. This isn't a generalized
|
||||
/// union. It assumes that a.heap.next is null so this is only
|
||||
/// meant in specific scenarios in the pairing heap where meld
|
||||
/// is expected.
|
||||
///
|
||||
/// For example, when melding a new value "v" with an existing
|
||||
/// root "root", "v" must always be the first param.
|
||||
fn meld(self: *Self, a: *T, b: *T) *T {
|
||||
assert(a.heap.next == null);
|
||||
|
||||
if (less(self.context, a, b)) {
|
||||
// B points back to A
|
||||
b.heap.prev = a;
|
||||
|
||||
// If B has siblings, then A inherits B's siblings
|
||||
// and B's immediate sibling must point back to A to
|
||||
// maintain the doubly linked list.
|
||||
if (b.heap.next) |b_next| {
|
||||
a.heap.next = b_next;
|
||||
b_next.heap.prev = a;
|
||||
b.heap.next = null;
|
||||
}
|
||||
|
||||
// If A has a child, then B becomes the leftmost sibling
|
||||
// of that child.
|
||||
if (a.heap.child) |a_child| {
|
||||
b.heap.next = a_child;
|
||||
a_child.heap.prev = b;
|
||||
}
|
||||
|
||||
// B becomes the leftmost child of A
|
||||
a.heap.child = b;
|
||||
|
||||
return a;
|
||||
}
|
||||
|
||||
// Replace A with B in the tree. Any of B's children
|
||||
// become siblings of A. A becomes the leftmost child of B.
|
||||
// A points back to B
|
||||
b.heap.prev = a.heap.prev;
|
||||
a.heap.prev = b;
|
||||
if (b.heap.child) |b_child| {
|
||||
a.heap.next = b_child;
|
||||
b_child.heap.prev = a;
|
||||
}
|
||||
b.heap.child = a;
|
||||
return b;
|
||||
}
|
||||
|
||||
/// Combine the siblings of the leftmost value "left" into a single
|
||||
/// new rooted with the minimum value.
|
||||
fn combine_siblings(self: *Self, left: *T) *T {
|
||||
left.heap.prev = null;
|
||||
|
||||
// Merge pairs right
|
||||
var root: *T = root: {
|
||||
var a: *T = left;
|
||||
while (true) {
|
||||
var b = a.heap.next orelse break :root a;
|
||||
a.heap.next = null;
|
||||
b = self.meld(a, b);
|
||||
a = b.heap.next orelse break :root b;
|
||||
}
|
||||
};
|
||||
|
||||
// Merge pairs left
|
||||
while (true) {
|
||||
var b = root.heap.prev orelse return root;
|
||||
b.heap.next = null;
|
||||
root = self.meld(b, root);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// The state that is required for IntrusiveHeap element types. This
|
||||
/// should be set as the "heap" field in the type T.
|
||||
pub fn IntrusiveField(comptime T: type) type {
|
||||
return struct {
|
||||
child: ?*T = null,
|
||||
prev: ?*T = null,
|
||||
next: ?*T = null,
|
||||
};
|
||||
}
|
||||
|
||||
test "heap" {
|
||||
const Elem = struct {
|
||||
const Self = @This();
|
||||
value: usize = 0,
|
||||
heap: IntrusiveField(Self) = .{},
|
||||
};
|
||||
|
||||
const Heap = Intrusive(Elem, void, (struct {
|
||||
fn less(ctx: void, a: *Elem, b: *Elem) bool {
|
||||
_ = ctx;
|
||||
return a.value < b.value;
|
||||
}
|
||||
}).less);
|
||||
|
||||
var a: Elem = .{ .value = 12 };
|
||||
var b: Elem = .{ .value = 24 };
|
||||
var c: Elem = .{ .value = 7 };
|
||||
var d: Elem = .{ .value = 9 };
|
||||
|
||||
var h: Heap = .{ .context = {} };
|
||||
h.insert(&a);
|
||||
h.insert(&b);
|
||||
h.insert(&c);
|
||||
h.insert(&d);
|
||||
h.remove(&d);
|
||||
|
||||
const testing = std.testing;
|
||||
try testing.expect(h.deleteMin().?.value == 7);
|
||||
try testing.expect(h.deleteMin().?.value == 12);
|
||||
try testing.expect(h.deleteMin().?.value == 24);
|
||||
try testing.expect(h.deleteMin() == null);
|
||||
}
|
||||
|
||||
test "heap remove root" {
|
||||
const Elem = struct {
|
||||
const Self = @This();
|
||||
value: usize = 0,
|
||||
heap: IntrusiveField(Self) = .{},
|
||||
};
|
||||
|
||||
const Heap = Intrusive(Elem, void, (struct {
|
||||
fn less(ctx: void, a: *Elem, b: *Elem) bool {
|
||||
_ = ctx;
|
||||
return a.value < b.value;
|
||||
}
|
||||
}).less);
|
||||
|
||||
var a: Elem = .{ .value = 12 };
|
||||
var b: Elem = .{ .value = 24 };
|
||||
|
||||
var h: Heap = .{ .context = {} };
|
||||
h.insert(&a);
|
||||
h.insert(&b);
|
||||
h.remove(&a);
|
||||
|
||||
const testing = std.testing;
|
||||
try testing.expect(h.deleteMin().?.value == 24);
|
||||
try testing.expect(h.deleteMin() == null);
|
||||
}
|
||||
|
||||
test "heap remove with children" {
|
||||
const Elem = struct {
|
||||
const Self = @This();
|
||||
value: usize = 0,
|
||||
heap: IntrusiveField(Self) = .{},
|
||||
};
|
||||
|
||||
const Heap = Intrusive(Elem, void, (struct {
|
||||
fn less(ctx: void, a: *Elem, b: *Elem) bool {
|
||||
_ = ctx;
|
||||
return a.value < b.value;
|
||||
}
|
||||
}).less);
|
||||
|
||||
var a: Elem = .{ .value = 36 };
|
||||
var b: Elem = .{ .value = 24 };
|
||||
var c: Elem = .{ .value = 12 };
|
||||
|
||||
var h: Heap = .{ .context = {} };
|
||||
h.insert(&a);
|
||||
h.insert(&b);
|
||||
h.insert(&c);
|
||||
h.remove(&b);
|
||||
|
||||
const testing = std.testing;
|
||||
try testing.expect(h.deleteMin().?.value == 12);
|
||||
try testing.expect(h.deleteMin().?.value == 36);
|
||||
try testing.expect(h.deleteMin() == null);
|
||||
}
|
||||
|
||||
test "heap equal values" {
|
||||
const testing = std.testing;
|
||||
|
||||
const Elem = struct {
|
||||
const Self = @This();
|
||||
value: usize = 0,
|
||||
heap: IntrusiveField(Self) = .{},
|
||||
};
|
||||
|
||||
const Heap = Intrusive(Elem, void, (struct {
|
||||
fn less(ctx: void, a: *Elem, b: *Elem) bool {
|
||||
_ = ctx;
|
||||
return a.value < b.value;
|
||||
}
|
||||
}).less);
|
||||
|
||||
var a: Elem = .{ .value = 1 };
|
||||
var b: Elem = .{ .value = 2 };
|
||||
var c: Elem = .{ .value = 3 };
|
||||
var d: Elem = .{ .value = 4 };
|
||||
|
||||
var h: Heap = .{ .context = {} };
|
||||
h.insert(&a);
|
||||
h.insert(&b);
|
||||
h.insert(&c);
|
||||
h.insert(&d);
|
||||
|
||||
try testing.expect(h.deleteMin().?.value == 1);
|
||||
try testing.expect(h.deleteMin().?.value == 2);
|
||||
try testing.expect(h.deleteMin().?.value == 3);
|
||||
try testing.expect(h.deleteMin().?.value == 4);
|
||||
try testing.expect(h.deleteMin() == null);
|
||||
}
|
||||
|
||||
test "heap: million values" {
|
||||
const testing = std.testing;
|
||||
const alloc = testing.allocator;
|
||||
|
||||
const Elem = struct {
|
||||
const Self = @This();
|
||||
value: usize = 0,
|
||||
heap: IntrusiveField(Self) = .{},
|
||||
};
|
||||
|
||||
const Heap = Intrusive(Elem, void, (struct {
|
||||
fn less(ctx: void, a: *Elem, b: *Elem) bool {
|
||||
_ = ctx;
|
||||
return a.value < b.value;
|
||||
}
|
||||
}).less);
|
||||
|
||||
const NUM_TIMERS: usize = 1000 * 1000;
|
||||
var elems = try alloc.alloc(Elem, NUM_TIMERS);
|
||||
defer alloc.free(elems);
|
||||
|
||||
var i: usize = 0;
|
||||
var value: usize = 0;
|
||||
while (i < NUM_TIMERS) : (i += 1) {
|
||||
if (i % 100 == 0) value += 1;
|
||||
elems[i] = .{ .value = value };
|
||||
}
|
||||
|
||||
var h: Heap = .{ .context = {} };
|
||||
for (elems) |*elem| {
|
||||
h.insert(elem);
|
||||
}
|
||||
|
||||
var count: usize = 0;
|
||||
var last: usize = 0;
|
||||
while (h.deleteMin()) |elem| {
|
||||
count += 1;
|
||||
try testing.expect(elem.value >= last);
|
||||
last = elem.value;
|
||||
}
|
||||
try testing.expect(h.deleteMin() == null);
|
||||
try testing.expect(count == NUM_TIMERS);
|
||||
}
|
||||
|
||||
test "heap: dangling next pointer" {
|
||||
const testing = std.testing;
|
||||
const Elem = struct {
|
||||
const Self = @This();
|
||||
value: usize = 0,
|
||||
heap: IntrusiveField(Self) = .{},
|
||||
};
|
||||
|
||||
const Heap = Intrusive(Elem, void, (struct {
|
||||
fn less(ctx: void, a: *Elem, b: *Elem) bool {
|
||||
_ = ctx;
|
||||
return a.value < b.value;
|
||||
}
|
||||
}).less);
|
||||
|
||||
var a: Elem = .{ .value = 2 };
|
||||
var b: Elem = .{ .value = 4 };
|
||||
var c: Elem = .{ .value = 5 };
|
||||
var d: Elem = .{ .value = 1 };
|
||||
var e: Elem = .{ .value = 3 };
|
||||
|
||||
var h: Heap = .{ .context = {} };
|
||||
h.insert(&a);
|
||||
h.insert(&b);
|
||||
h.insert(&c);
|
||||
h.insert(&d);
|
||||
h.insert(&e);
|
||||
|
||||
try testing.expect(h.deleteMin().?.value == 1);
|
||||
try testing.expect(h.deleteMin().?.value == 2);
|
||||
try testing.expect(h.deleteMin().?.value == 3);
|
||||
try testing.expect(h.deleteMin().?.value == 4);
|
||||
try testing.expect(h.deleteMin().?.value == 5);
|
||||
try testing.expect(h.deleteMin() == null);
|
||||
}
|
98
deps/libxev/src/linux/timerfd.zig
vendored
Normal file
98
deps/libxev/src/linux/timerfd.zig
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
const std = @import("std");
|
||||
const linux = std.os.linux;
|
||||
const posix = std.posix;
|
||||
|
||||
/// Timerfd is a wrapper around the timerfd system calls. See the
|
||||
/// timerfd_create man page for information on timerfd and associated
|
||||
/// system calls.
|
||||
///
|
||||
/// This is a small wrapper around timerfd to make it slightly more
|
||||
/// pleasant to use, but may not expose all available functionality.
|
||||
/// For maximum control you should use the syscalls directly.
|
||||
pub const Timerfd = struct {
|
||||
/// The timerfd file descriptor for use with poll, etc.
|
||||
fd: i32,
|
||||
|
||||
/// timerfd_create
|
||||
pub fn init(clock: Clock, flags: linux.TFD) !Timerfd {
|
||||
const res = linux.timerfd_create(@intFromEnum(clock), flags);
|
||||
return switch (posix.errno(res)) {
|
||||
.SUCCESS => .{ .fd = @as(i32, @intCast(res)) },
|
||||
else => error.UnknownError,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *const Timerfd) void {
|
||||
posix.close(self.fd);
|
||||
}
|
||||
|
||||
/// timerfd_settime
|
||||
pub fn set(
|
||||
self: *const Timerfd,
|
||||
flags: linux.TFD.TIMER,
|
||||
new_value: *const Spec,
|
||||
old_value: ?*Spec,
|
||||
) !void {
|
||||
const res = linux.timerfd_settime(
|
||||
self.fd,
|
||||
flags,
|
||||
@as(*const linux.itimerspec, @ptrCast(new_value)),
|
||||
@as(?*linux.itimerspec, @ptrCast(old_value)),
|
||||
);
|
||||
|
||||
return switch (posix.errno(res)) {
|
||||
.SUCCESS => {},
|
||||
else => error.UnknownError,
|
||||
};
|
||||
}
|
||||
|
||||
/// timerfd_gettime
|
||||
pub fn get(self: *const Timerfd) !Spec {
|
||||
var out: Spec = undefined;
|
||||
const res = linux.timerfd_gettime(self.fd, @as(*linux.itimerspec, @ptrCast(&out)));
|
||||
return switch (posix.errno(res)) {
|
||||
.SUCCESS => out,
|
||||
else => error.UnknownError,
|
||||
};
|
||||
}
|
||||
|
||||
/// The clocks available for a Timerfd. This is a non-exhaustive enum
|
||||
/// so that unsupported values can be attempted to be passed into the
|
||||
/// system calls.
|
||||
pub const Clock = enum(i32) {
|
||||
realtime = 0,
|
||||
monotonic = 1,
|
||||
boottime = 7,
|
||||
realtime_alarm = 8,
|
||||
boottime_alarm = 9,
|
||||
_,
|
||||
};
|
||||
|
||||
/// itimerspec
|
||||
pub const Spec = extern struct {
|
||||
interval: TimeSpec = .{},
|
||||
value: TimeSpec = .{},
|
||||
};
|
||||
|
||||
/// timespec
|
||||
pub const TimeSpec = extern struct {
|
||||
seconds: isize = 0,
|
||||
nanoseconds: isize = 0,
|
||||
};
|
||||
};
|
||||
|
||||
test Timerfd {
|
||||
const testing = std.testing;
|
||||
|
||||
var t = try Timerfd.init(.monotonic, .{});
|
||||
defer t.deinit();
|
||||
|
||||
// Set
|
||||
try t.set(.{}, &.{ .value = .{ .seconds = 60 } }, null);
|
||||
try testing.expect((try t.get()).value.seconds > 0);
|
||||
|
||||
// Disarm
|
||||
var old: Timerfd.Spec = undefined;
|
||||
try t.set(.{}, &.{ .value = .{ .seconds = 0 } }, &old);
|
||||
try testing.expect(old.value.seconds > 0);
|
||||
}
|
77
deps/libxev/src/loop.zig
vendored
Normal file
77
deps/libxev/src/loop.zig
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
//! Common loop structures. The actual loop implementation is in backend-specific
|
||||
//! files such as linux/io_uring.zig.
|
||||
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const xev = @import("main.zig");
|
||||
|
||||
/// Common options across backends. Not all options apply to all backends.
|
||||
/// Read the doc comment for individual fields to learn what backends they
|
||||
/// apply to.
|
||||
pub const Options = struct {
|
||||
/// The number of queued completions that can be in flight before
|
||||
/// requiring interaction with the kernel.
|
||||
///
|
||||
/// Backends: io_uring
|
||||
entries: u32 = 256,
|
||||
|
||||
/// A thread pool to use for blocking operations. If the backend doesn't
|
||||
/// need to perform any blocking operations then no threads will ever
|
||||
/// be spawned. If the backend does need to perform blocking operations
|
||||
/// on a thread and no thread pool is provided, the operations will simply
|
||||
/// fail. Unless you're trying to really optimize for space, it is
|
||||
/// recommended you provide a thread pool.
|
||||
///
|
||||
/// Backends: epoll, kqueue
|
||||
thread_pool: ?*xev.ThreadPool = null,
|
||||
};
|
||||
|
||||
/// The loop run mode -- all backends are required to support this in some way.
|
||||
/// Backends may provide backend-specific APIs that behave slightly differently
|
||||
/// or in a more configurable way.
|
||||
pub const RunMode = enum(c_int) {
|
||||
/// Run the event loop once. If there are no blocking operations ready,
|
||||
/// return immediately.
|
||||
no_wait = 0,
|
||||
|
||||
/// Run the event loop once, waiting for at least one blocking operation
|
||||
/// to complete.
|
||||
once = 1,
|
||||
|
||||
/// Run the event loop until it is "done". "Doneness" is defined as
|
||||
/// there being no more completions that are active.
|
||||
until_done = 2,
|
||||
};
|
||||
|
||||
/// The result type for callbacks. This should be used by all loop
|
||||
/// implementations and higher level abstractions in order to control
|
||||
/// what to do after the loop completes.
|
||||
pub const CallbackAction = enum(c_int) {
|
||||
/// The request is complete and is not repeated. For example, a read
|
||||
/// callback only fires once and is no longer watched for reads. You
|
||||
/// can always free memory associated with the completion prior to
|
||||
/// returning this.
|
||||
disarm = 0,
|
||||
|
||||
/// Requeue the same operation request with the same parameters
|
||||
/// with the event loop. This makes it easy to repeat a read, timer,
|
||||
/// etc. This rearms the request EXACTLY as-is. For example, the
|
||||
/// low-level timer interface for io_uring uses an absolute timeout.
|
||||
/// If you rearm the timer, it will fire immediately because the absolute
|
||||
/// timeout will be in the past.
|
||||
///
|
||||
/// The completion is reused so it is not safe to use the same completion
|
||||
/// for anything else.
|
||||
rearm = 1,
|
||||
};
|
||||
|
||||
/// The state that a completion can be in.
|
||||
pub const CompletionState = enum(c_int) {
|
||||
/// The completion is not being used and is ready to be configured
|
||||
/// for new work.
|
||||
dead = 0,
|
||||
|
||||
/// The completion is part of an event loop. This may be already waited
|
||||
/// on or in the process of being registered.
|
||||
active = 1,
|
||||
};
|
174
deps/libxev/src/main.zig
vendored
Normal file
174
deps/libxev/src/main.zig
vendored
Normal file
@@ -0,0 +1,174 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
|
||||
/// The low-level IO interfaces using the recommended compile-time
|
||||
/// interface for the target system.
|
||||
const xev = Backend.default().Api();
|
||||
pub usingnamespace xev;
|
||||
//pub usingnamespace Epoll;
|
||||
|
||||
/// System-specific interfaces. Note that they are always pub for
|
||||
/// all systems but if you reference them and force them to be analyzed
|
||||
/// the proper system APIs must exist. Due to Zig's lazy analysis, if you
|
||||
/// don't use any interface it will NOT be compiled (yay!).
|
||||
pub const IO_Uring = Xev(.io_uring, @import("backend/io_uring.zig"));
|
||||
pub const Epoll = Xev(.epoll, @import("backend/epoll.zig"));
|
||||
pub const Kqueue = Xev(.kqueue, @import("backend/kqueue.zig"));
|
||||
pub const WasiPoll = Xev(.wasi_poll, @import("backend/wasi_poll.zig"));
|
||||
pub const IOCP = Xev(.iocp, @import("backend/iocp.zig"));
|
||||
|
||||
/// Generic thread pool implementation.
|
||||
pub const ThreadPool = @import("ThreadPool.zig");
|
||||
|
||||
/// This stream (lowercase s) can be used as a namespace to access
|
||||
/// Closeable, Writeable, Readable, etc. so that custom streams
|
||||
/// can be constructed.
|
||||
pub const stream = @import("watcher/stream.zig");
|
||||
|
||||
/// The backend types.
|
||||
pub const Backend = enum {
|
||||
io_uring,
|
||||
epoll,
|
||||
kqueue,
|
||||
wasi_poll,
|
||||
iocp,
|
||||
|
||||
/// Returns a recommend default backend from inspecting the system.
|
||||
pub fn default() Backend {
|
||||
return @as(?Backend, switch (builtin.os.tag) {
|
||||
.linux => .io_uring,
|
||||
.ios, .macos => .kqueue,
|
||||
.wasi => .wasi_poll,
|
||||
.windows => .iocp,
|
||||
else => null,
|
||||
}) orelse {
|
||||
@compileLog(builtin.os);
|
||||
@compileError("no default backend for this target");
|
||||
};
|
||||
}
|
||||
|
||||
/// Returns the Api (return value of Xev) for the given backend type.
|
||||
pub fn Api(comptime self: Backend) type {
|
||||
return switch (self) {
|
||||
.io_uring => IO_Uring,
|
||||
.epoll => Epoll,
|
||||
.kqueue => Kqueue,
|
||||
.wasi_poll => WasiPoll,
|
||||
.iocp => IOCP,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
/// Creates the Xev API based on a backend type.
|
||||
///
|
||||
/// For the default backend type for your system (i.e. io_uring on Linux),
|
||||
/// this is the main API you interact with. It is `usingnamespaced` into
|
||||
/// the "xev" package so you'd use types such as `xev.Loop`, `xev.Completion`,
|
||||
/// etc.
|
||||
///
|
||||
/// Unless you're using a custom or specific backend type, you do NOT ever
|
||||
/// need to call the Xev function itself.
|
||||
pub fn Xev(comptime be: Backend, comptime T: type) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
const loop = @import("loop.zig");
|
||||
|
||||
/// The backend that this is. This is supplied at comptime so
|
||||
/// it is up to the caller to say the right thing. This lets custom
|
||||
/// implementations also "quack" like an implementation.
|
||||
pub const backend = be;
|
||||
|
||||
/// The core loop APIs.
|
||||
pub const Loop = T.Loop;
|
||||
pub const Completion = T.Completion;
|
||||
pub const Result = T.Result;
|
||||
pub const ReadBuffer = T.ReadBuffer;
|
||||
pub const WriteBuffer = T.WriteBuffer;
|
||||
pub const Options = loop.Options;
|
||||
pub const RunMode = loop.RunMode;
|
||||
pub const CallbackAction = loop.CallbackAction;
|
||||
pub const CompletionState = loop.CompletionState;
|
||||
|
||||
/// Error types
|
||||
pub const AcceptError = T.AcceptError;
|
||||
pub const CancelError = T.CancelError;
|
||||
pub const CloseError = T.CloseError;
|
||||
pub const ConnectError = T.ConnectError;
|
||||
pub const ShutdownError = T.ShutdownError;
|
||||
pub const WriteError = T.WriteError;
|
||||
pub const ReadError = T.ReadError;
|
||||
|
||||
/// The high-level helper interfaces that make it easier to perform
|
||||
/// common tasks. These may not work with all possible Loop implementations.
|
||||
pub const Async = @import("watcher/async.zig").Async(Self);
|
||||
pub const File = @import("watcher/file.zig").File(Self);
|
||||
pub const Process = @import("watcher/process.zig").Process(Self);
|
||||
pub const Stream = stream.GenericStream(Self);
|
||||
pub const Timer = @import("watcher/timer.zig").Timer(Self);
|
||||
pub const TCP = @import("watcher/tcp.zig").TCP(Self);
|
||||
pub const UDP = @import("watcher/udp.zig").UDP(Self);
|
||||
|
||||
/// The callback of the main Loop operations. Higher level interfaces may
|
||||
/// use a different callback mechanism.
|
||||
pub const Callback = *const fn (
|
||||
userdata: ?*anyopaque,
|
||||
loop: *Loop,
|
||||
completion: *Completion,
|
||||
result: Result,
|
||||
) CallbackAction;
|
||||
|
||||
/// A way to access the raw type.
|
||||
pub const Sys = T;
|
||||
|
||||
/// A callback that does nothing and immediately disarms. This
|
||||
/// implements xev.Callback and is the default value for completions.
|
||||
pub fn noopCallback(
|
||||
_: ?*anyopaque,
|
||||
_: *Loop,
|
||||
_: *Completion,
|
||||
_: Result,
|
||||
) CallbackAction {
|
||||
return .disarm;
|
||||
}
|
||||
|
||||
test {
|
||||
@import("std").testing.refAllDecls(@This());
|
||||
}
|
||||
|
||||
test "completion is zero-able" {
|
||||
const c: Completion = .{};
|
||||
_ = c;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
test {
|
||||
// Tested on all platforms
|
||||
_ = @import("heap.zig");
|
||||
_ = @import("queue.zig");
|
||||
_ = @import("queue_mpsc.zig");
|
||||
_ = ThreadPool;
|
||||
|
||||
// Test the C API
|
||||
if (builtin.os.tag != .wasi) _ = @import("c_api.zig");
|
||||
|
||||
// OS-specific tests
|
||||
switch (builtin.os.tag) {
|
||||
.linux => {
|
||||
_ = Epoll;
|
||||
_ = IO_Uring;
|
||||
_ = @import("linux/timerfd.zig");
|
||||
},
|
||||
|
||||
.wasi => {
|
||||
//_ = WasiPoll;
|
||||
_ = @import("backend/wasi_poll.zig");
|
||||
},
|
||||
|
||||
.windows => {
|
||||
_ = @import("backend/iocp.zig");
|
||||
},
|
||||
|
||||
else => {},
|
||||
}
|
||||
}
|
101
deps/libxev/src/queue.zig
vendored
Normal file
101
deps/libxev/src/queue.zig
vendored
Normal file
@@ -0,0 +1,101 @@
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
|
||||
/// An intrusive queue implementation. The type T must have a field
|
||||
/// "next" of type `?*T`.
|
||||
///
|
||||
/// For those unaware, an intrusive variant of a data structure is one in which
|
||||
/// the data type in the list has the pointer to the next element, rather
|
||||
/// than a higher level "node" or "container" type. The primary benefit
|
||||
/// of this (and the reason we implement this) is that it defers all memory
|
||||
/// management to the caller: the data structure implementation doesn't need
|
||||
/// to allocate "nodes" to contain each element. Instead, the caller provides
|
||||
/// the element and how its allocated is up to them.
|
||||
pub fn Intrusive(comptime T: type) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
|
||||
/// Head is the front of the queue and tail is the back of the queue.
|
||||
head: ?*T = null,
|
||||
tail: ?*T = null,
|
||||
|
||||
/// Enqueue a new element to the back of the queue.
|
||||
pub fn push(self: *Self, v: *T) void {
|
||||
assert(v.next == null);
|
||||
|
||||
if (self.tail) |tail| {
|
||||
// If we have elements in the queue, then we add a new tail.
|
||||
tail.next = v;
|
||||
self.tail = v;
|
||||
} else {
|
||||
// No elements in the queue we setup the initial state.
|
||||
self.head = v;
|
||||
self.tail = v;
|
||||
}
|
||||
}
|
||||
|
||||
/// Dequeue the next element from the queue.
|
||||
pub fn pop(self: *Self) ?*T {
|
||||
// The next element is in "head".
|
||||
const next = self.head orelse return null;
|
||||
|
||||
// If the head and tail are equal this is the last element
|
||||
// so we also set tail to null so we can now be empty.
|
||||
if (self.head == self.tail) self.tail = null;
|
||||
|
||||
// Head is whatever is next (if we're the last element,
|
||||
// this will be null);
|
||||
self.head = next.next;
|
||||
|
||||
// We set the "next" field to null so that this element
|
||||
// can be inserted again.
|
||||
next.next = null;
|
||||
return next;
|
||||
}
|
||||
|
||||
/// Returns true if the queue is empty.
|
||||
pub fn empty(self: *const Self) bool {
|
||||
return self.head == null;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
test Intrusive {
|
||||
const testing = std.testing;
|
||||
|
||||
// Types
|
||||
const Elem = struct {
|
||||
const Self = @This();
|
||||
next: ?*Self = null,
|
||||
};
|
||||
const Queue = Intrusive(Elem);
|
||||
var q: Queue = .{};
|
||||
try testing.expect(q.empty());
|
||||
|
||||
// Elems
|
||||
var elems: [10]Elem = .{.{}} ** 10;
|
||||
|
||||
// One
|
||||
try testing.expect(q.pop() == null);
|
||||
q.push(&elems[0]);
|
||||
try testing.expect(!q.empty());
|
||||
try testing.expect(q.pop().? == &elems[0]);
|
||||
try testing.expect(q.pop() == null);
|
||||
try testing.expect(q.empty());
|
||||
|
||||
// Two
|
||||
try testing.expect(q.pop() == null);
|
||||
q.push(&elems[0]);
|
||||
q.push(&elems[1]);
|
||||
try testing.expect(q.pop().? == &elems[0]);
|
||||
try testing.expect(q.pop().? == &elems[1]);
|
||||
try testing.expect(q.pop() == null);
|
||||
|
||||
// Interleaved
|
||||
try testing.expect(q.pop() == null);
|
||||
q.push(&elems[0]);
|
||||
try testing.expect(q.pop().? == &elems[0]);
|
||||
q.push(&elems[1]);
|
||||
try testing.expect(q.pop().? == &elems[1]);
|
||||
try testing.expect(q.pop() == null);
|
||||
}
|
116
deps/libxev/src/queue_mpsc.zig
vendored
Normal file
116
deps/libxev/src/queue_mpsc.zig
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
|
||||
/// An intrusive MPSC (multi-provider, single consumer) queue implementation.
|
||||
/// The type T must have a field "next" of type `?*T`.
|
||||
///
|
||||
/// This is an implementatin of a Vyukov Queue[1].
|
||||
/// TODO(mitchellh): I haven't audited yet if I got all the atomic operations
|
||||
/// correct. I was short term more focused on getting something that seemed
|
||||
/// to work; I need to make sure it actually works.
|
||||
///
|
||||
/// For those unaware, an intrusive variant of a data structure is one in which
|
||||
/// the data type in the list has the pointer to the next element, rather
|
||||
/// than a higher level "node" or "container" type. The primary benefit
|
||||
/// of this (and the reason we implement this) is that it defers all memory
|
||||
/// management to the caller: the data structure implementation doesn't need
|
||||
/// to allocate "nodes" to contain each element. Instead, the caller provides
|
||||
/// the element and how its allocated is up to them.
|
||||
///
|
||||
/// [1]: https://www.1024cores.net/home/lock-free-algorithms/queues/intrusive-mpsc-node-based-queue
|
||||
pub fn Intrusive(comptime T: type) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
|
||||
/// Head is the front of the queue and tail is the back of the queue.
|
||||
head: *T,
|
||||
tail: *T,
|
||||
stub: T,
|
||||
|
||||
/// Initialize the queue. This requires a stable pointer to itself.
|
||||
/// This must be called before the queue is used concurrently.
|
||||
pub fn init(self: *Self) void {
|
||||
self.head = &self.stub;
|
||||
self.tail = &self.stub;
|
||||
self.stub.next = null;
|
||||
}
|
||||
|
||||
/// Push an item onto the queue. This can be called by any number
|
||||
/// of producers.
|
||||
pub fn push(self: *Self, v: *T) void {
|
||||
@atomicStore(?*T, &v.next, null, .unordered);
|
||||
const prev = @atomicRmw(*T, &self.head, .Xchg, v, .acq_rel);
|
||||
@atomicStore(?*T, &prev.next, v, .release);
|
||||
}
|
||||
|
||||
/// Pop the first in element from the queue. This must be called
|
||||
/// by only a single consumer at any given time.
|
||||
pub fn pop(self: *Self) ?*T {
|
||||
var tail = @atomicLoad(*T, &self.tail, .unordered);
|
||||
var next_ = @atomicLoad(?*T, &tail.next, .acquire);
|
||||
if (tail == &self.stub) {
|
||||
const next = next_ orelse return null;
|
||||
@atomicStore(*T, &self.tail, next, .unordered);
|
||||
tail = next;
|
||||
next_ = @atomicLoad(?*T, &tail.next, .acquire);
|
||||
}
|
||||
|
||||
if (next_) |next| {
|
||||
@atomicStore(*T, &self.tail, next, .release);
|
||||
tail.next = null;
|
||||
return tail;
|
||||
}
|
||||
|
||||
const head = @atomicLoad(*T, &self.head, .unordered);
|
||||
if (tail != head) return null;
|
||||
self.push(&self.stub);
|
||||
|
||||
next_ = @atomicLoad(?*T, &tail.next, .acquire);
|
||||
if (next_) |next| {
|
||||
@atomicStore(*T, &self.tail, next, .unordered);
|
||||
tail.next = null;
|
||||
return tail;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
test Intrusive {
|
||||
const testing = std.testing;
|
||||
|
||||
// Types
|
||||
const Elem = struct {
|
||||
const Self = @This();
|
||||
next: ?*Self = null,
|
||||
};
|
||||
const Queue = Intrusive(Elem);
|
||||
var q: Queue = undefined;
|
||||
q.init();
|
||||
|
||||
// Elems
|
||||
var elems: [10]Elem = .{.{}} ** 10;
|
||||
|
||||
// One
|
||||
try testing.expect(q.pop() == null);
|
||||
q.push(&elems[0]);
|
||||
try testing.expect(q.pop().? == &elems[0]);
|
||||
try testing.expect(q.pop() == null);
|
||||
|
||||
// Two
|
||||
try testing.expect(q.pop() == null);
|
||||
q.push(&elems[0]);
|
||||
q.push(&elems[1]);
|
||||
try testing.expect(q.pop().? == &elems[0]);
|
||||
try testing.expect(q.pop().? == &elems[1]);
|
||||
try testing.expect(q.pop() == null);
|
||||
|
||||
// // Interleaved
|
||||
try testing.expect(q.pop() == null);
|
||||
q.push(&elems[0]);
|
||||
try testing.expect(q.pop().? == &elems[0]);
|
||||
q.push(&elems[1]);
|
||||
try testing.expect(q.pop().? == &elems[1]);
|
||||
try testing.expect(q.pop() == null);
|
||||
}
|
630
deps/libxev/src/watcher/async.zig
vendored
Normal file
630
deps/libxev/src/watcher/async.zig
vendored
Normal file
@@ -0,0 +1,630 @@
|
||||
/// "Wake up" an event loop from any thread using an async completion.
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const assert = std.debug.assert;
|
||||
const posix = std.posix;
|
||||
const common = @import("common.zig");
|
||||
|
||||
pub fn Async(comptime xev: type) type {
|
||||
return switch (xev.backend) {
|
||||
// Supported, uses eventfd
|
||||
.io_uring,
|
||||
.epoll,
|
||||
=> AsyncEventFd(xev),
|
||||
|
||||
// Supported, uses the backend API
|
||||
.wasi_poll => AsyncLoopState(xev, xev.Loop.threaded),
|
||||
|
||||
// Supported, uses mach ports
|
||||
.kqueue => AsyncMachPort(xev),
|
||||
.iocp => AsyncIOCP(xev),
|
||||
};
|
||||
}
|
||||
|
||||
/// Async implementation using eventfd (Linux).
|
||||
fn AsyncEventFd(comptime xev: type) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
|
||||
/// The error that can come in the wait callback.
|
||||
pub const WaitError = xev.ReadError;
|
||||
|
||||
/// eventfd file descriptor
|
||||
fd: posix.fd_t,
|
||||
|
||||
/// Create a new async. An async can be assigned to exactly one loop
|
||||
/// to be woken up. The completion must be allocated in advance.
|
||||
pub fn init() !Self {
|
||||
return .{
|
||||
.fd = try std.posix.eventfd(0, 0),
|
||||
};
|
||||
}
|
||||
|
||||
/// Clean up the async. This will forcibly deinitialize any resources
|
||||
/// and may result in erroneous wait callbacks to be fired.
|
||||
pub fn deinit(self: *Self) void {
|
||||
std.posix.close(self.fd);
|
||||
}
|
||||
|
||||
/// Wait for a message on this async. Note that async messages may be
|
||||
/// coalesced (or they may not be) so you should not expect a 1:1 mapping
|
||||
/// between send and wait.
|
||||
///
|
||||
/// Just like the rest of libxev, the wait must be re-queued if you want
|
||||
/// to continue to be notified of async events.
|
||||
///
|
||||
/// You should NOT register an async with multiple loops (the same loop
|
||||
/// is fine -- but unnecessary). The behavior when waiting on multiple
|
||||
/// loops is undefined.
|
||||
pub fn wait(
|
||||
self: Self,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
comptime Userdata: type,
|
||||
userdata: ?*Userdata,
|
||||
comptime cb: *const fn (
|
||||
ud: ?*Userdata,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
r: WaitError!void,
|
||||
) xev.CallbackAction,
|
||||
) void {
|
||||
c.* = .{
|
||||
.op = .{
|
||||
.read = .{
|
||||
.fd = self.fd,
|
||||
.buffer = .{ .array = undefined },
|
||||
},
|
||||
},
|
||||
|
||||
.userdata = userdata,
|
||||
.callback = (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
return @call(.always_inline, cb, .{
|
||||
common.userdataValue(Userdata, ud),
|
||||
l_inner,
|
||||
c_inner,
|
||||
if (r.read) |v| assert(v > 0) else |err| err,
|
||||
});
|
||||
}
|
||||
}).callback,
|
||||
};
|
||||
loop.add(c);
|
||||
}
|
||||
|
||||
/// Notify a loop to wake up synchronously. This should never block forever
|
||||
/// (it will always EVENTUALLY succeed regardless of if the loop is currently
|
||||
/// ticking or not).
|
||||
///
|
||||
/// The "c" value is the completion associated with the "wait".
|
||||
///
|
||||
/// Internal details subject to change but if you're relying on these
|
||||
/// details then you may want to consider using a lower level interface
|
||||
/// using the loop directly:
|
||||
///
|
||||
/// - linux+io_uring: eventfd is used. If the eventfd write would block
|
||||
/// (EAGAIN) then we assume success because the eventfd is full.
|
||||
///
|
||||
pub fn notify(self: Self) !void {
|
||||
// We want to just write "1" in the correct byte order as our host.
|
||||
const val = @as([8]u8, @bitCast(@as(u64, 1)));
|
||||
_ = posix.write(self.fd, &val) catch |err| switch (err) {
|
||||
error.WouldBlock => return,
|
||||
else => return err,
|
||||
};
|
||||
}
|
||||
|
||||
/// Common tests
|
||||
pub usingnamespace AsyncTests(xev, Self);
|
||||
};
|
||||
}
|
||||
|
||||
/// Async implementation using mach ports (Darwin).
|
||||
///
|
||||
/// This allocates a mach port per async request and sends to that mach
|
||||
/// port to wake up the loop and trigger the completion.
|
||||
fn AsyncMachPort(comptime xev: type) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
|
||||
/// The error that can come in the wait callback.
|
||||
pub const WaitError = xev.Sys.MachPortError;
|
||||
|
||||
/// Missing Mach APIs from Zig stdlib. Data from xnu: osfmk/mach/port.h
|
||||
const mach_port_flavor_t = c_int;
|
||||
const mach_port_limits = extern struct { mpl_qlimit: c_uint };
|
||||
const MACH_PORT_LIMITS_INFO = 1;
|
||||
extern "c" fn mach_port_set_attributes(
|
||||
task: posix.system.ipc_space_t,
|
||||
name: posix.system.mach_port_name_t,
|
||||
flavor: mach_port_flavor_t,
|
||||
info: *anyopaque,
|
||||
count: posix.system.mach_msg_type_number_t,
|
||||
) posix.system.kern_return_t;
|
||||
extern "c" fn mach_port_destroy(
|
||||
task: posix.system.ipc_space_t,
|
||||
name: posix.system.mach_port_name_t,
|
||||
) posix.system.kern_return_t;
|
||||
|
||||
/// The mach port
|
||||
port: posix.system.mach_port_name_t,
|
||||
|
||||
/// Create a new async. An async can be assigned to exactly one loop
|
||||
/// to be woken up. The completion must be allocated in advance.
|
||||
pub fn init() !Self {
|
||||
const mach_self = posix.system.mach_task_self();
|
||||
|
||||
// Allocate the port
|
||||
var mach_port: posix.system.mach_port_name_t = undefined;
|
||||
switch (posix.system.getKernError(posix.system.mach_port_allocate(
|
||||
mach_self,
|
||||
@intFromEnum(posix.system.MACH_PORT_RIGHT.RECEIVE),
|
||||
&mach_port,
|
||||
))) {
|
||||
.SUCCESS => {}, // Success
|
||||
else => return error.MachPortAllocFailed,
|
||||
}
|
||||
errdefer _ = mach_port_destroy(mach_self, mach_port);
|
||||
|
||||
// Insert a send right into the port since we also use this to send
|
||||
switch (posix.system.getKernError(posix.system.mach_port_insert_right(
|
||||
mach_self,
|
||||
mach_port,
|
||||
mach_port,
|
||||
@intFromEnum(posix.system.MACH_MSG_TYPE.MAKE_SEND),
|
||||
))) {
|
||||
.SUCCESS => {}, // Success
|
||||
else => return error.MachPortAllocFailed,
|
||||
}
|
||||
|
||||
// Modify the port queue size to be 1 because we are only
|
||||
// using it for notifications and not for any other purpose.
|
||||
var limits: mach_port_limits = .{ .mpl_qlimit = 1 };
|
||||
switch (posix.system.getKernError(mach_port_set_attributes(
|
||||
mach_self,
|
||||
mach_port,
|
||||
MACH_PORT_LIMITS_INFO,
|
||||
&limits,
|
||||
@sizeOf(@TypeOf(limits)),
|
||||
))) {
|
||||
.SUCCESS => {}, // Success
|
||||
else => return error.MachPortAllocFailed,
|
||||
}
|
||||
|
||||
return .{
|
||||
.port = mach_port,
|
||||
};
|
||||
}
|
||||
|
||||
/// Clean up the async. This will forcibly deinitialize any resources
|
||||
/// and may result in erroneous wait callbacks to be fired.
|
||||
pub fn deinit(self: *Self) void {
|
||||
_ = mach_port_destroy(
|
||||
posix.system.mach_task_self(),
|
||||
self.port,
|
||||
);
|
||||
}
|
||||
|
||||
/// Wait for a message on this async. Note that async messages may be
|
||||
/// coalesced (or they may not be) so you should not expect a 1:1 mapping
|
||||
/// between send and wait.
|
||||
///
|
||||
/// Just like the rest of libxev, the wait must be re-queued if you want
|
||||
/// to continue to be notified of async events.
|
||||
///
|
||||
/// You should NOT register an async with multiple loops (the same loop
|
||||
/// is fine -- but unnecessary). The behavior when waiting on multiple
|
||||
/// loops is undefined.
|
||||
pub fn wait(
|
||||
self: Self,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
comptime Userdata: type,
|
||||
userdata: ?*Userdata,
|
||||
comptime cb: *const fn (
|
||||
ud: ?*Userdata,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
r: WaitError!void,
|
||||
) xev.CallbackAction,
|
||||
) void {
|
||||
c.* = .{
|
||||
.op = .{
|
||||
.machport = .{
|
||||
.port = self.port,
|
||||
.buffer = .{ .array = undefined },
|
||||
},
|
||||
},
|
||||
|
||||
.userdata = userdata,
|
||||
.callback = (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
// Drain the mach port so that we only fire one
|
||||
// notification even if many are queued.
|
||||
drain(c_inner.op.machport.port);
|
||||
|
||||
return @call(.always_inline, cb, .{
|
||||
common.userdataValue(Userdata, ud),
|
||||
l_inner,
|
||||
c_inner,
|
||||
if (r.machport) |_| {} else |err| err,
|
||||
});
|
||||
}
|
||||
}).callback,
|
||||
};
|
||||
|
||||
loop.add(c);
|
||||
}
|
||||
|
||||
/// Drain the given mach port. All message bodies are discarded.
|
||||
fn drain(port: posix.system.mach_port_name_t) void {
|
||||
var message: struct {
|
||||
header: posix.system.mach_msg_header_t,
|
||||
} = undefined;
|
||||
|
||||
while (true) {
|
||||
switch (posix.system.getMachMsgError(posix.system.mach_msg(
|
||||
&message.header,
|
||||
posix.system.MACH_RCV_MSG | posix.system.MACH_RCV_TIMEOUT,
|
||||
0,
|
||||
@sizeOf(@TypeOf(message)),
|
||||
port,
|
||||
posix.system.MACH_MSG_TIMEOUT_NONE,
|
||||
posix.system.MACH_PORT_NULL,
|
||||
))) {
|
||||
// This means a read would've blocked, so we drained.
|
||||
.RCV_TIMED_OUT => return,
|
||||
|
||||
// We dequeued, so we want to loop again.
|
||||
.SUCCESS => {},
|
||||
|
||||
// We dequeued but the message had a body. We ignore
|
||||
// message bodies for async so we are happy to discard
|
||||
// it and continue.
|
||||
.RCV_TOO_LARGE => {},
|
||||
|
||||
else => |err| {
|
||||
std.log.warn("mach msg drain err, may duplicate async wakeups err={}", .{err});
|
||||
return;
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Notify a loop to wake up synchronously. This should never block forever
|
||||
/// (it will always EVENTUALLY succeed regardless of if the loop is currently
|
||||
/// ticking or not).
|
||||
pub fn notify(self: Self) !void {
|
||||
// This constructs an empty mach message. It has no data.
|
||||
var msg: posix.system.mach_msg_header_t = .{
|
||||
// We use COPY_SEND which will not increment any send ref
|
||||
// counts because it'll reuse the existing send right.
|
||||
.msgh_bits = @intFromEnum(posix.system.MACH_MSG_TYPE.COPY_SEND),
|
||||
.msgh_size = @sizeOf(posix.system.mach_msg_header_t),
|
||||
.msgh_remote_port = self.port,
|
||||
.msgh_local_port = posix.system.MACH_PORT_NULL,
|
||||
.msgh_voucher_port = undefined,
|
||||
.msgh_id = undefined,
|
||||
};
|
||||
|
||||
return switch (posix.system.getMachMsgError(
|
||||
posix.system.mach_msg(
|
||||
&msg,
|
||||
posix.system.MACH_SEND_MSG | posix.system.MACH_SEND_TIMEOUT,
|
||||
msg.msgh_size,
|
||||
0,
|
||||
posix.system.MACH_PORT_NULL,
|
||||
0, // Fail instantly if the port is full
|
||||
posix.system.MACH_PORT_NULL,
|
||||
),
|
||||
)) {
|
||||
.SUCCESS => {},
|
||||
else => |e| {
|
||||
std.log.warn("mach msg err={}", .{e});
|
||||
return error.MachMsgFailed;
|
||||
},
|
||||
|
||||
// This is okay because it means that there was no more buffer
|
||||
// space meaning that the port will wake up.
|
||||
.SEND_NO_BUFFER => {},
|
||||
|
||||
// This means that the send would've blocked because the
|
||||
// queue is full. We assume success because the port is full.
|
||||
.SEND_TIMED_OUT => {},
|
||||
};
|
||||
}
|
||||
|
||||
/// Common tests
|
||||
pub usingnamespace AsyncTests(xev, Self);
|
||||
};
|
||||
}
|
||||
|
||||
/// Async implementation that is deferred to the backend implementation
|
||||
/// loop state. This is kind of a hacky implementation and not recommended
|
||||
/// but its the only way currently to get asyncs to work on WASI.
|
||||
fn AsyncLoopState(comptime xev: type, comptime threaded: bool) type {
|
||||
// TODO: we don't support threaded loop state async. We _can_ it just
|
||||
// isn't done yet. To support it we need to have some sort of mutex
|
||||
// to guard waiter below.
|
||||
if (threaded) return struct {};
|
||||
|
||||
return struct {
|
||||
const Self = @This();
|
||||
|
||||
wakeup: bool = false,
|
||||
waiter: ?struct {
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
} = null,
|
||||
|
||||
/// The error that can come in the wait callback.
|
||||
pub const WaitError = xev.Sys.AsyncError;
|
||||
|
||||
pub fn init() !Self {
|
||||
return .{};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self) void {
|
||||
_ = self;
|
||||
}
|
||||
|
||||
pub fn wait(
|
||||
self: *Self,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
comptime Userdata: type,
|
||||
userdata: ?*Userdata,
|
||||
comptime cb: *const fn (
|
||||
ud: ?*Userdata,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
r: WaitError!void,
|
||||
) xev.CallbackAction,
|
||||
) void {
|
||||
c.* = .{
|
||||
.op = .{
|
||||
.async_wait = .{},
|
||||
},
|
||||
.userdata = userdata,
|
||||
.callback = (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
return @call(.always_inline, cb, .{
|
||||
common.userdataValue(Userdata, ud),
|
||||
l_inner,
|
||||
c_inner,
|
||||
if (r.async_wait) |_| {} else |err| err,
|
||||
});
|
||||
}
|
||||
}).callback,
|
||||
};
|
||||
loop.add(c);
|
||||
|
||||
self.waiter = .{
|
||||
.loop = loop,
|
||||
.c = c,
|
||||
};
|
||||
|
||||
if (self.wakeup) self.notify() catch {};
|
||||
}
|
||||
|
||||
pub fn notify(self: *Self) !void {
|
||||
if (self.waiter) |w|
|
||||
w.loop.async_notify(w.c)
|
||||
else
|
||||
self.wakeup = true;
|
||||
}
|
||||
|
||||
/// Common tests
|
||||
pub usingnamespace AsyncTests(xev, Self);
|
||||
};
|
||||
}
|
||||
|
||||
/// Async implementation for IOCP.
|
||||
fn AsyncIOCP(comptime xev: type) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
const windows = std.os.windows;
|
||||
|
||||
pub const WaitError = xev.Sys.AsyncError;
|
||||
|
||||
guard: std.Thread.Mutex = .{},
|
||||
wakeup: bool = false,
|
||||
waiter: ?struct {
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
} = null,
|
||||
|
||||
pub fn init() !Self {
|
||||
return Self{};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self) void {
|
||||
_ = self;
|
||||
}
|
||||
|
||||
pub fn wait(
|
||||
self: *Self,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
comptime Userdata: type,
|
||||
userdata: ?*Userdata,
|
||||
comptime cb: *const fn (
|
||||
ud: ?*Userdata,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
r: WaitError!void,
|
||||
) xev.CallbackAction,
|
||||
) void {
|
||||
c.* = xev.Completion{
|
||||
.op = .{ .async_wait = .{} },
|
||||
.userdata = userdata,
|
||||
.callback = (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
return @call(.always_inline, cb, .{
|
||||
common.userdataValue(Userdata, ud),
|
||||
l_inner,
|
||||
c_inner,
|
||||
if (r.async_wait) |_| {} else |err| err,
|
||||
});
|
||||
}
|
||||
}).callback,
|
||||
};
|
||||
loop.add(c);
|
||||
|
||||
self.guard.lock();
|
||||
defer self.guard.unlock();
|
||||
|
||||
self.waiter = .{
|
||||
.loop = loop,
|
||||
.c = c,
|
||||
};
|
||||
|
||||
if (self.wakeup) loop.async_notify(c);
|
||||
}
|
||||
|
||||
pub fn notify(self: *Self) !void {
|
||||
self.guard.lock();
|
||||
defer self.guard.unlock();
|
||||
|
||||
if (self.waiter) |w| {
|
||||
w.loop.async_notify(w.c);
|
||||
} else {
|
||||
self.wakeup = true;
|
||||
}
|
||||
}
|
||||
|
||||
/// Common tests
|
||||
pub usingnamespace AsyncTests(xev, Self);
|
||||
};
|
||||
}
|
||||
|
||||
fn AsyncTests(comptime xev: type, comptime Impl: type) type {
|
||||
return struct {
|
||||
test "async" {
|
||||
const testing = std.testing;
|
||||
|
||||
var loop = try xev.Loop.init(.{});
|
||||
defer loop.deinit();
|
||||
|
||||
var notifier = try Impl.init();
|
||||
defer notifier.deinit();
|
||||
|
||||
// Wait
|
||||
var wake: bool = false;
|
||||
var c_wait: xev.Completion = undefined;
|
||||
notifier.wait(&loop, &c_wait, bool, &wake, (struct {
|
||||
fn callback(
|
||||
ud: ?*bool,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
r: Impl.WaitError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
ud.?.* = true;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// Send a notification
|
||||
try notifier.notify();
|
||||
|
||||
// Wait for wake
|
||||
try loop.run(.until_done);
|
||||
try testing.expect(wake);
|
||||
}
|
||||
|
||||
test "async: notify first" {
|
||||
const testing = std.testing;
|
||||
|
||||
var loop = try xev.Loop.init(.{});
|
||||
defer loop.deinit();
|
||||
|
||||
var notifier = try Impl.init();
|
||||
defer notifier.deinit();
|
||||
|
||||
// Send a notification
|
||||
try notifier.notify();
|
||||
|
||||
// Wait
|
||||
var wake: bool = false;
|
||||
var c_wait: xev.Completion = undefined;
|
||||
notifier.wait(&loop, &c_wait, bool, &wake, (struct {
|
||||
fn callback(
|
||||
ud: ?*bool,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
r: Impl.WaitError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
ud.?.* = true;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// Wait for wake
|
||||
try loop.run(.until_done);
|
||||
try testing.expect(wake);
|
||||
}
|
||||
|
||||
test "async batches multiple notifications" {
|
||||
const testing = std.testing;
|
||||
|
||||
var loop = try xev.Loop.init(.{});
|
||||
defer loop.deinit();
|
||||
|
||||
var notifier = try Impl.init();
|
||||
defer notifier.deinit();
|
||||
|
||||
// Send a notification many times
|
||||
try notifier.notify();
|
||||
try notifier.notify();
|
||||
try notifier.notify();
|
||||
try notifier.notify();
|
||||
try notifier.notify();
|
||||
|
||||
// Wait
|
||||
var count: u32 = 0;
|
||||
var c_wait: xev.Completion = undefined;
|
||||
notifier.wait(&loop, &c_wait, u32, &count, (struct {
|
||||
fn callback(
|
||||
ud: ?*u32,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
r: Impl.WaitError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
ud.?.* += 1;
|
||||
return .rearm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// Send a notification
|
||||
try notifier.notify();
|
||||
|
||||
// Wait for wake
|
||||
try loop.run(.once);
|
||||
for (0..10) |_| try loop.run(.no_wait);
|
||||
try testing.expectEqual(@as(u32, 1), count);
|
||||
}
|
||||
};
|
||||
}
|
7
deps/libxev/src/watcher/common.zig
vendored
Normal file
7
deps/libxev/src/watcher/common.zig
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
/// Convert the callback value with an opaque pointer into the userdata type
|
||||
/// that we can pass to our higher level callback types.
|
||||
pub fn userdataValue(comptime Userdata: type, v: ?*anyopaque) ?*Userdata {
|
||||
// Void userdata is always a null pointer.
|
||||
if (Userdata == void) return null;
|
||||
return @ptrCast(@alignCast(v));
|
||||
}
|
542
deps/libxev/src/watcher/file.zig
vendored
Normal file
542
deps/libxev/src/watcher/file.zig
vendored
Normal file
@@ -0,0 +1,542 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const common = @import("common.zig");
|
||||
const assert = std.debug.assert;
|
||||
const posix = std.posix;
|
||||
const main = @import("../main.zig");
|
||||
const stream = @import("stream.zig");
|
||||
|
||||
/// File operations.
|
||||
///
|
||||
/// These operations typically run on the event loop thread pool, rather
|
||||
/// than the core async OS APIs, because most core async OS APIs don't support
|
||||
/// async operations on regular files (with many caveats attached to that
|
||||
/// statement). This high-level abstraction will attempt to make the right
|
||||
/// decision about what to do but this should generally be used by
|
||||
/// operations that need to run on a thread pool. For operations that you're
|
||||
/// sure are better supported by core async OS APIs (such as sockets, pipes,
|
||||
/// TTYs, etc.), use a specific high-level abstraction like xev.TCP or
|
||||
/// the generic xev.Stream.
|
||||
///
|
||||
/// This is a "higher-level abstraction" in libxev. The goal of higher-level
|
||||
/// abstractions in libxev are to make it easier to use specific functionality
|
||||
/// with the event loop, but does not promise perfect flexibility or optimal
|
||||
/// performance. In almost all cases, the abstraction is good enough. But,
|
||||
/// if you have specific needs or want to push for the most optimal performance,
|
||||
/// use the platform-specific Loop directly.
|
||||
pub fn File(comptime xev: type) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
const FdType = if (xev.backend == .iocp) std.windows.HANDLE else posix.socket_t;
|
||||
|
||||
/// The underlying file
|
||||
fd: FdType,
|
||||
|
||||
pub usingnamespace stream.Stream(xev, Self, .{
|
||||
.close = true,
|
||||
.read = .read,
|
||||
.write = .write,
|
||||
.threadpool = true,
|
||||
});
|
||||
|
||||
/// Initialize a File from a std.fs.File.
|
||||
pub fn init(file: std.fs.File) !Self {
|
||||
return .{
|
||||
.fd = file.handle,
|
||||
};
|
||||
}
|
||||
|
||||
/// Initialize a File from a file descriptor.
|
||||
pub fn initFd(fd: std.fs.File.Handle) Self {
|
||||
return .{
|
||||
.fd = fd,
|
||||
};
|
||||
}
|
||||
|
||||
/// Clean up any watcher resources. This does NOT close the file.
|
||||
/// If you want to close the file you must call close or do so
|
||||
/// synchronously.
|
||||
pub fn deinit(self: *const File) void {
|
||||
_ = self;
|
||||
}
|
||||
|
||||
pub fn pread(
|
||||
self: Self,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
buf: xev.ReadBuffer,
|
||||
offset: u64,
|
||||
comptime Userdata: type,
|
||||
userdata: ?*Userdata,
|
||||
comptime cb: *const fn (
|
||||
ud: ?*Userdata,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
s: Self,
|
||||
b: xev.ReadBuffer,
|
||||
r: Self.ReadError!usize,
|
||||
) xev.CallbackAction,
|
||||
) void {
|
||||
switch (buf) {
|
||||
inline .slice, .array => {
|
||||
c.* = .{
|
||||
.op = .{
|
||||
.pread = .{
|
||||
.fd = self.fd,
|
||||
.buffer = buf,
|
||||
.offset = offset,
|
||||
},
|
||||
},
|
||||
.userdata = userdata,
|
||||
.callback = (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
return @call(.always_inline, cb, .{
|
||||
common.userdataValue(Userdata, ud),
|
||||
l_inner,
|
||||
c_inner,
|
||||
Self.initFd(c_inner.op.pread.fd),
|
||||
c_inner.op.pread.buffer,
|
||||
if (r.pread) |v| v else |err| err,
|
||||
});
|
||||
}
|
||||
}).callback,
|
||||
};
|
||||
|
||||
// If we're dup-ing, then we ask the backend to manage the fd.
|
||||
switch (xev.backend) {
|
||||
.io_uring,
|
||||
.wasi_poll,
|
||||
.iocp,
|
||||
=> {},
|
||||
|
||||
.epoll => {
|
||||
c.flags.threadpool = true;
|
||||
},
|
||||
|
||||
.kqueue => {
|
||||
c.flags.threadpool = true;
|
||||
},
|
||||
}
|
||||
|
||||
loop.add(c);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn queuePWrite(
|
||||
self: Self,
|
||||
loop: *xev.Loop,
|
||||
q: *Self.WriteQueue,
|
||||
req: *Self.WriteRequest,
|
||||
buf: xev.WriteBuffer,
|
||||
offset: u64,
|
||||
comptime Userdata: type,
|
||||
userdata: ?*Userdata,
|
||||
comptime cb: *const fn (
|
||||
ud: ?*Userdata,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
s: Self,
|
||||
b: xev.WriteBuffer,
|
||||
r: Self.WriteError!usize,
|
||||
) xev.CallbackAction,
|
||||
) void {
|
||||
// Initialize our completion
|
||||
req.* = .{};
|
||||
self.pwrite_init(&req.completion, buf, offset);
|
||||
req.completion.userdata = q;
|
||||
req.completion.callback = (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
const q_inner = @as(?*Self.WriteQueue, @ptrCast(@alignCast(ud))).?;
|
||||
|
||||
// The queue MUST have a request because a completion
|
||||
// can only be added if the queue is not empty, and
|
||||
// nothing else should be popping!.
|
||||
const req_inner = q_inner.pop().?;
|
||||
|
||||
const cb_res = pwrite_result(c_inner, r);
|
||||
const action = @call(.always_inline, cb, .{
|
||||
common.userdataValue(Userdata, req_inner.userdata),
|
||||
l_inner,
|
||||
c_inner,
|
||||
cb_res.writer,
|
||||
cb_res.buf,
|
||||
cb_res.result,
|
||||
});
|
||||
|
||||
// Rearm requeues this request, it doesn't return rearm
|
||||
// on the actual callback here...
|
||||
if (action == .rearm) q_inner.push(req_inner);
|
||||
|
||||
// If we have another request, add that completion next.
|
||||
if (q_inner.head) |req_next| l_inner.add(&req_next.completion);
|
||||
|
||||
// We always disarm because the completion in the next
|
||||
// request will be used if there is more to queue.
|
||||
return .disarm;
|
||||
}
|
||||
}).callback;
|
||||
|
||||
// The userdata as to go on the WriteRequest because we need
|
||||
// our actual completion userdata to be the WriteQueue so that
|
||||
// we can process the queue.
|
||||
req.userdata = @as(?*anyopaque, @ptrCast(@alignCast(userdata)));
|
||||
|
||||
// If the queue is empty, then we add our completion. Otherwise,
|
||||
// the previously queued writes will trigger this one.
|
||||
if (q.empty()) loop.add(&req.completion);
|
||||
|
||||
// We always add this item to our queue no matter what
|
||||
q.push(req);
|
||||
}
|
||||
|
||||
pub fn pwrite(
|
||||
self: Self,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
buf: xev.WriteBuffer,
|
||||
offset: u64,
|
||||
comptime Userdata: type,
|
||||
userdata: ?*Userdata,
|
||||
comptime cb: *const fn (
|
||||
ud: ?*Userdata,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
s: Self,
|
||||
b: xev.WriteBuffer,
|
||||
r: Self.WriteError!usize,
|
||||
) xev.CallbackAction,
|
||||
) void {
|
||||
self.pwrite_init(c, buf, offset);
|
||||
c.userdata = userdata;
|
||||
c.callback = (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
const cb_res = pwrite_result(c_inner, r);
|
||||
return @call(.always_inline, cb, .{
|
||||
common.userdataValue(Userdata, ud),
|
||||
l_inner,
|
||||
c_inner,
|
||||
cb_res.writer,
|
||||
cb_res.buf,
|
||||
cb_res.result,
|
||||
});
|
||||
}
|
||||
}).callback;
|
||||
|
||||
loop.add(c);
|
||||
}
|
||||
|
||||
inline fn pwrite_result(c: *xev.Completion, r: xev.Result) struct {
|
||||
writer: Self,
|
||||
buf: xev.WriteBuffer,
|
||||
result: Self.WriteError!usize,
|
||||
} {
|
||||
return .{
|
||||
.writer = Self.initFd(c.op.pwrite.fd),
|
||||
.buf = c.op.pwrite.buffer,
|
||||
.result = if (r.pwrite) |v| v else |err| err,
|
||||
};
|
||||
}
|
||||
|
||||
fn pwrite_init(
|
||||
self: Self,
|
||||
c: *xev.Completion,
|
||||
buf: xev.WriteBuffer,
|
||||
offset: u64,
|
||||
) void {
|
||||
switch (buf) {
|
||||
inline .slice, .array => {
|
||||
c.* = .{
|
||||
.op = .{
|
||||
.pwrite = .{
|
||||
.fd = self.fd,
|
||||
.buffer = buf,
|
||||
.offset = offset,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
// If we're dup-ing, then we ask the backend to manage the fd.
|
||||
switch (xev.backend) {
|
||||
.io_uring,
|
||||
.wasi_poll,
|
||||
.iocp,
|
||||
=> {},
|
||||
|
||||
.epoll => {
|
||||
c.flags.threadpool = true;
|
||||
},
|
||||
|
||||
.kqueue => {
|
||||
c.flags.threadpool = true;
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
test "read/write" {
|
||||
// wasi: local files don't work with poll (always ready)
|
||||
if (builtin.os.tag == .wasi) return error.SkipZigTest;
|
||||
// windows: std.fs.File is not opened with OVERLAPPED flag.
|
||||
if (builtin.os.tag == .windows) return error.SkipZigTest;
|
||||
|
||||
const testing = std.testing;
|
||||
|
||||
var tpool = main.ThreadPool.init(.{});
|
||||
defer tpool.deinit();
|
||||
defer tpool.shutdown();
|
||||
var loop = try xev.Loop.init(.{ .thread_pool = &tpool });
|
||||
defer loop.deinit();
|
||||
|
||||
// Create our file
|
||||
const path = "test_watcher_file";
|
||||
const f = try std.fs.cwd().createFile(path, .{
|
||||
.read = true,
|
||||
.truncate = true,
|
||||
});
|
||||
defer f.close();
|
||||
defer std.fs.cwd().deleteFile(path) catch {};
|
||||
|
||||
const file = try init(f);
|
||||
|
||||
// Perform a write and then a read
|
||||
var write_buf = [_]u8{ 1, 1, 2, 3, 5, 8, 13 };
|
||||
var c_write: xev.Completion = undefined;
|
||||
file.write(&loop, &c_write, .{ .slice = &write_buf }, void, null, (struct {
|
||||
fn callback(
|
||||
_: ?*void,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: Self,
|
||||
_: xev.WriteBuffer,
|
||||
r: Self.WriteError!usize,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// Wait for the write
|
||||
try loop.run(.until_done);
|
||||
|
||||
// Make sure the data is on disk
|
||||
try f.sync();
|
||||
|
||||
const f2 = try std.fs.cwd().openFile(path, .{});
|
||||
defer f2.close();
|
||||
const file2 = try init(f2);
|
||||
|
||||
// Read
|
||||
var read_buf: [128]u8 = undefined;
|
||||
var read_len: usize = 0;
|
||||
file2.read(&loop, &c_write, .{ .slice = &read_buf }, usize, &read_len, (struct {
|
||||
fn callback(
|
||||
ud: ?*usize,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: Self,
|
||||
_: xev.ReadBuffer,
|
||||
r: Self.ReadError!usize,
|
||||
) xev.CallbackAction {
|
||||
ud.?.* = r catch unreachable;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
try loop.run(.until_done);
|
||||
try testing.expectEqual(read_len, write_buf.len);
|
||||
try testing.expectEqualSlices(u8, &write_buf, read_buf[0..read_len]);
|
||||
}
|
||||
|
||||
test "pread/pwrite" {
|
||||
// wasi: local files don't work with poll (always ready)
|
||||
if (builtin.os.tag == .wasi) return error.SkipZigTest;
|
||||
// windows: std.fs.File is not opened with OVERLAPPED flag.
|
||||
if (builtin.os.tag == .windows) return error.SkipZigTest;
|
||||
|
||||
const testing = std.testing;
|
||||
|
||||
var tpool = main.ThreadPool.init(.{});
|
||||
defer tpool.deinit();
|
||||
defer tpool.shutdown();
|
||||
var loop = try xev.Loop.init(.{ .thread_pool = &tpool });
|
||||
defer loop.deinit();
|
||||
|
||||
// Create our file
|
||||
const path = "test_watcher_file";
|
||||
const f = try std.fs.cwd().createFile(path, .{
|
||||
.read = true,
|
||||
.truncate = true,
|
||||
});
|
||||
defer f.close();
|
||||
defer std.fs.cwd().deleteFile(path) catch {};
|
||||
|
||||
const file = try init(f);
|
||||
|
||||
// Perform a write and then a read
|
||||
var write_buf = [_]u8{ 1, 1, 2, 3, 5, 8, 13 };
|
||||
var c_write: xev.Completion = undefined;
|
||||
file.pwrite(&loop, &c_write, .{ .slice = &write_buf }, 0, void, null, (struct {
|
||||
fn callback(
|
||||
_: ?*void,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: Self,
|
||||
_: xev.WriteBuffer,
|
||||
r: Self.WriteError!usize,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// Wait for the write
|
||||
try loop.run(.until_done);
|
||||
|
||||
// Make sure the data is on disk
|
||||
try f.sync();
|
||||
|
||||
const f2 = try std.fs.cwd().openFile(path, .{});
|
||||
defer f2.close();
|
||||
const file2 = try init(f2);
|
||||
|
||||
var read_buf: [128]u8 = undefined;
|
||||
var read_len: usize = 0;
|
||||
file2.pread(&loop, &c_write, .{ .slice = &read_buf }, 0, usize, &read_len, (struct {
|
||||
fn callback(
|
||||
ud: ?*usize,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: Self,
|
||||
_: xev.ReadBuffer,
|
||||
r: Self.ReadError!usize,
|
||||
) xev.CallbackAction {
|
||||
ud.?.* = r catch unreachable;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
try loop.run(.until_done);
|
||||
try testing.expectEqualSlices(u8, &write_buf, read_buf[0..read_len]);
|
||||
}
|
||||
|
||||
test "queued writes" {
|
||||
// wasi: local files don't work with poll (always ready)
|
||||
if (builtin.os.tag == .wasi) return error.SkipZigTest;
|
||||
// windows: std.fs.File is not opened with OVERLAPPED flag.
|
||||
if (builtin.os.tag == .windows) return error.SkipZigTest;
|
||||
|
||||
const testing = std.testing;
|
||||
|
||||
var tpool = main.ThreadPool.init(.{});
|
||||
defer tpool.deinit();
|
||||
defer tpool.shutdown();
|
||||
var loop = try xev.Loop.init(.{ .thread_pool = &tpool });
|
||||
defer loop.deinit();
|
||||
|
||||
// Create our file
|
||||
const path = "test_watcher_file";
|
||||
const f = try std.fs.cwd().createFile(path, .{
|
||||
.read = true,
|
||||
.truncate = true,
|
||||
});
|
||||
defer f.close();
|
||||
defer std.fs.cwd().deleteFile(path) catch {};
|
||||
|
||||
const file = try init(f);
|
||||
var write_queue: Self.WriteQueue = .{};
|
||||
var write_req: [2]Self.WriteRequest = undefined;
|
||||
|
||||
// Perform a write and then a read
|
||||
file.queueWrite(
|
||||
&loop,
|
||||
&write_queue,
|
||||
&write_req[0],
|
||||
.{ .slice = "1234" },
|
||||
void,
|
||||
null,
|
||||
(struct {
|
||||
fn callback(
|
||||
_: ?*void,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: Self,
|
||||
_: xev.WriteBuffer,
|
||||
r: Self.WriteError!usize,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback,
|
||||
);
|
||||
file.queueWrite(
|
||||
&loop,
|
||||
&write_queue,
|
||||
&write_req[1],
|
||||
.{ .slice = "5678" },
|
||||
void,
|
||||
null,
|
||||
(struct {
|
||||
fn callback(
|
||||
_: ?*void,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: Self,
|
||||
_: xev.WriteBuffer,
|
||||
r: Self.WriteError!usize,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback,
|
||||
);
|
||||
|
||||
// Wait for the write
|
||||
try loop.run(.until_done);
|
||||
|
||||
// Make sure the data is on disk
|
||||
try f.sync();
|
||||
|
||||
const f2 = try std.fs.cwd().openFile(path, .{});
|
||||
defer f2.close();
|
||||
const file2 = try init(f2);
|
||||
|
||||
// Read
|
||||
var read_buf: [128]u8 = undefined;
|
||||
var read_len: usize = 0;
|
||||
var c_read: xev.Completion = undefined;
|
||||
file2.read(&loop, &c_read, .{ .slice = &read_buf }, usize, &read_len, (struct {
|
||||
fn callback(
|
||||
ud: ?*usize,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: Self,
|
||||
_: xev.ReadBuffer,
|
||||
r: Self.ReadError!usize,
|
||||
) xev.CallbackAction {
|
||||
ud.?.* = r catch unreachable;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
try loop.run(.until_done);
|
||||
try testing.expectEqualSlices(u8, "12345678", read_buf[0..read_len]);
|
||||
}
|
||||
};
|
||||
}
|
447
deps/libxev/src/watcher/process.zig
vendored
Normal file
447
deps/libxev/src/watcher/process.zig
vendored
Normal file
@@ -0,0 +1,447 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const assert = std.debug.assert;
|
||||
const linux = std.os.linux;
|
||||
const posix = std.posix;
|
||||
const common = @import("common.zig");
|
||||
|
||||
/// Process management, such as waiting for process exit.
|
||||
pub fn Process(comptime xev: type) type {
|
||||
return switch (xev.backend) {
|
||||
// Supported, uses pidfd
|
||||
.io_uring,
|
||||
.epoll,
|
||||
=> ProcessPidFd(xev),
|
||||
|
||||
.kqueue => ProcessKqueue(xev),
|
||||
|
||||
.iocp => ProcessIocp(xev),
|
||||
|
||||
// Unsupported
|
||||
.wasi_poll => struct {},
|
||||
};
|
||||
}
|
||||
|
||||
/// Process implementation using pidfd (Linux).
|
||||
fn ProcessPidFd(comptime xev: type) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
|
||||
/// The error that can come in the wait callback.
|
||||
pub const WaitError = xev.Sys.PollError || error{
|
||||
InvalidChild,
|
||||
};
|
||||
|
||||
/// pidfd file descriptor
|
||||
fd: posix.fd_t,
|
||||
|
||||
/// Create a new process watcher for the given pid.
|
||||
pub fn init(pid: posix.pid_t) !Self {
|
||||
// Note: SOCK_NONBLOCK == PIDFD_NONBLOCK but we should PR that
|
||||
// over to Zig.
|
||||
const res = linux.pidfd_open(pid, posix.SOCK.NONBLOCK);
|
||||
const fd = switch (posix.errno(res)) {
|
||||
.SUCCESS => @as(posix.fd_t, @intCast(res)),
|
||||
.INVAL => return error.InvalidArgument,
|
||||
.MFILE => return error.ProcessFdQuotaExceeded,
|
||||
.NFILE => return error.SystemFdQuotaExceeded,
|
||||
.NODEV => return error.SystemResources,
|
||||
.NOMEM => return error.SystemResources,
|
||||
else => |err| return posix.unexpectedErrno(err),
|
||||
};
|
||||
|
||||
return .{
|
||||
.fd = fd,
|
||||
};
|
||||
}
|
||||
|
||||
/// Clean up the process watcher.
|
||||
pub fn deinit(self: *Self) void {
|
||||
std.posix.close(self.fd);
|
||||
}
|
||||
|
||||
/// Wait for the process to exit. This will automatically call
|
||||
/// `waitpid` or equivalent and report the exit status.
|
||||
pub fn wait(
|
||||
self: Self,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
comptime Userdata: type,
|
||||
userdata: ?*Userdata,
|
||||
comptime cb: *const fn (
|
||||
ud: ?*Userdata,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
r: WaitError!u32,
|
||||
) xev.CallbackAction,
|
||||
) void {
|
||||
const events: u32 = comptime switch (xev.backend) {
|
||||
.io_uring => posix.POLL.IN,
|
||||
.epoll => linux.EPOLL.IN,
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
c.* = .{
|
||||
.op = .{
|
||||
.poll = .{
|
||||
.fd = self.fd,
|
||||
.events = events,
|
||||
},
|
||||
},
|
||||
|
||||
.userdata = userdata,
|
||||
.callback = (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
const arg: WaitError!u32 = arg: {
|
||||
// If our poll failed, report that error.
|
||||
_ = r.poll catch |err| break :arg err;
|
||||
|
||||
// We need to wait on the pidfd because it is noted as ready
|
||||
const fd = c_inner.op.poll.fd;
|
||||
var info: linux.siginfo_t = undefined;
|
||||
const res = linux.waitid(.PIDFD, fd, &info, linux.W.EXITED);
|
||||
|
||||
break :arg switch (posix.errno(res)) {
|
||||
.SUCCESS => @as(u32, @intCast(info.fields.common.second.sigchld.status)),
|
||||
.CHILD => error.InvalidChild,
|
||||
|
||||
// The fd isn't ready to read, I guess?
|
||||
.AGAIN => return .rearm,
|
||||
else => |err| err: {
|
||||
std.log.warn("unexpected process wait errno={}", .{err});
|
||||
break :err error.Unexpected;
|
||||
},
|
||||
};
|
||||
};
|
||||
|
||||
return @call(.always_inline, cb, .{
|
||||
common.userdataValue(Userdata, ud),
|
||||
l_inner,
|
||||
c_inner,
|
||||
arg,
|
||||
});
|
||||
}
|
||||
}).callback,
|
||||
};
|
||||
loop.add(c);
|
||||
}
|
||||
|
||||
/// Common tests
|
||||
pub usingnamespace ProcessTests(xev, Self, &.{ "sh", "-c", "exit 0" }, &.{ "sh", "-c", "exit 42" });
|
||||
};
|
||||
}
|
||||
|
||||
fn ProcessKqueue(comptime xev: type) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
|
||||
/// The error that can come in the wait callback.
|
||||
pub const WaitError = xev.Sys.ProcError;
|
||||
|
||||
/// The pid to watch.
|
||||
pid: posix.pid_t,
|
||||
|
||||
/// Create a new process watcher for the given pid.
|
||||
pub fn init(pid: posix.pid_t) !Self {
|
||||
return .{
|
||||
.pid = pid,
|
||||
};
|
||||
}
|
||||
|
||||
/// Does nothing for Kqueue.
|
||||
pub fn deinit(self: *Self) void {
|
||||
_ = self;
|
||||
}
|
||||
|
||||
/// Wait for the process to exit. This will automatically call
|
||||
/// `waitpid` or equivalent and report the exit status.
|
||||
pub fn wait(
|
||||
self: Self,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
comptime Userdata: type,
|
||||
userdata: ?*Userdata,
|
||||
comptime cb: *const fn (
|
||||
ud: ?*Userdata,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
r: WaitError!u32,
|
||||
) xev.CallbackAction,
|
||||
) void {
|
||||
c.* = .{
|
||||
.op = .{
|
||||
.proc = .{
|
||||
.pid = self.pid,
|
||||
.flags = posix.system.NOTE_EXIT | posix.system.NOTE_EXITSTATUS,
|
||||
},
|
||||
},
|
||||
|
||||
.userdata = userdata,
|
||||
.callback = (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
return @call(.always_inline, cb, .{
|
||||
common.userdataValue(Userdata, ud),
|
||||
l_inner,
|
||||
c_inner,
|
||||
if (r.proc) |v| v else |err| err,
|
||||
});
|
||||
}
|
||||
}).callback,
|
||||
};
|
||||
loop.add(c);
|
||||
}
|
||||
|
||||
/// Common tests
|
||||
pub usingnamespace ProcessTests(xev, Self, &.{ "sh", "-c", "exit 0" }, &.{ "sh", "-c", "exit 42" });
|
||||
};
|
||||
}
|
||||
|
||||
const windows = @import("../windows.zig");
|
||||
fn ProcessIocp(comptime xev: type) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
|
||||
pub const WaitError = xev.Sys.JobObjectError;
|
||||
|
||||
job: windows.HANDLE,
|
||||
process: windows.HANDLE,
|
||||
|
||||
pub fn init(process: posix.pid_t) !Self {
|
||||
const current_process = windows.kernel32.GetCurrentProcess();
|
||||
|
||||
// Duplicate the process handle so we don't rely on the caller keeping it alive
|
||||
var dup_process: windows.HANDLE = undefined;
|
||||
const dup_result = windows.kernel32.DuplicateHandle(
|
||||
current_process,
|
||||
process,
|
||||
current_process,
|
||||
&dup_process,
|
||||
0,
|
||||
windows.FALSE,
|
||||
windows.DUPLICATE_SAME_ACCESS,
|
||||
);
|
||||
if (dup_result == 0) return windows.unexpectedError(windows.kernel32.GetLastError());
|
||||
|
||||
const job = try windows.exp.CreateJobObject(null, null);
|
||||
errdefer _ = windows.kernel32.CloseHandle(job);
|
||||
|
||||
try windows.exp.AssignProcessToJobObject(job, dup_process);
|
||||
|
||||
return .{
|
||||
.job = job,
|
||||
.process = dup_process,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self) void {
|
||||
_ = windows.kernel32.CloseHandle(self.job);
|
||||
_ = windows.kernel32.CloseHandle(self.process);
|
||||
}
|
||||
|
||||
pub fn wait(
|
||||
self: Self,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
comptime Userdata: type,
|
||||
userdata: ?*Userdata,
|
||||
comptime cb: *const fn (
|
||||
ud: ?*Userdata,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
r: WaitError!u32,
|
||||
) xev.CallbackAction,
|
||||
) void {
|
||||
c.* = .{
|
||||
.op = .{
|
||||
.job_object = .{
|
||||
.job = self.job,
|
||||
.userdata = self.process,
|
||||
},
|
||||
},
|
||||
.userdata = userdata,
|
||||
.callback = (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
if (r.job_object) |result| {
|
||||
switch (result) {
|
||||
.associated => {
|
||||
// There was a period of time between when the job object was created
|
||||
// and when it was associated with the completion port. We may have
|
||||
// missed a notification, so check if it's still alive.
|
||||
|
||||
var exit_code: windows.DWORD = undefined;
|
||||
const process: windows.HANDLE = @ptrCast(c_inner.op.job_object.userdata);
|
||||
const has_code = windows.kernel32.GetExitCodeProcess(process, &exit_code) != 0;
|
||||
if (!has_code) std.log.warn("unable to get exit code for process={}", .{windows.kernel32.GetLastError()});
|
||||
if (exit_code == windows.exp.STILL_ACTIVE) return .rearm;
|
||||
|
||||
return @call(.always_inline, cb, .{
|
||||
common.userdataValue(Userdata, ud),
|
||||
l_inner,
|
||||
c_inner,
|
||||
exit_code,
|
||||
});
|
||||
},
|
||||
.message => |message| {
|
||||
const result_inner = switch (message.type) {
|
||||
.JOB_OBJECT_MSG_EXIT_PROCESS,
|
||||
.JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS,
|
||||
=> b: {
|
||||
const process: windows.HANDLE = @ptrCast(c_inner.op.job_object.userdata);
|
||||
const pid = windows.exp.kernel32.GetProcessId(process);
|
||||
if (pid == 0) break :b WaitError.Unexpected;
|
||||
if (message.value != pid) return .rearm;
|
||||
|
||||
var exit_code: windows.DWORD = undefined;
|
||||
const has_code = windows.kernel32.GetExitCodeProcess(process, &exit_code) != 0;
|
||||
if (!has_code) std.log.warn("unable to get exit code for process={}", .{windows.kernel32.GetLastError()});
|
||||
break :b if (has_code) exit_code else WaitError.Unexpected;
|
||||
},
|
||||
else => return .rearm,
|
||||
};
|
||||
|
||||
return @call(.always_inline, cb, .{ common.userdataValue(Userdata, ud), l_inner, c_inner, result_inner });
|
||||
},
|
||||
}
|
||||
} else |err| {
|
||||
return @call(.always_inline, cb, .{
|
||||
common.userdataValue(Userdata, ud),
|
||||
l_inner,
|
||||
c_inner,
|
||||
err,
|
||||
});
|
||||
}
|
||||
}
|
||||
}).callback,
|
||||
};
|
||||
loop.add(c);
|
||||
}
|
||||
|
||||
/// Common tests
|
||||
pub usingnamespace ProcessTests(xev, Self, &.{ "cmd.exe", "/C", "exit 0" }, &.{ "cmd.exe", "/C", "exit 42" });
|
||||
};
|
||||
}
|
||||
|
||||
fn ProcessTests(
|
||||
comptime xev: type,
|
||||
comptime Impl: type,
|
||||
comptime argv_0: []const []const u8,
|
||||
comptime argv_42: []const []const u8,
|
||||
) type {
|
||||
return struct {
|
||||
test "process wait" {
|
||||
const testing = std.testing;
|
||||
const alloc = testing.allocator;
|
||||
|
||||
var child = std.process.Child.init(argv_0, alloc);
|
||||
try child.spawn();
|
||||
|
||||
var loop = try xev.Loop.init(.{});
|
||||
defer loop.deinit();
|
||||
|
||||
var p = try Impl.init(child.id);
|
||||
defer p.deinit();
|
||||
|
||||
// Wait
|
||||
var code: ?u32 = null;
|
||||
var c_wait: xev.Completion = undefined;
|
||||
p.wait(&loop, &c_wait, ?u32, &code, (struct {
|
||||
fn callback(
|
||||
ud: ?*?u32,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
r: Impl.WaitError!u32,
|
||||
) xev.CallbackAction {
|
||||
ud.?.* = r catch unreachable;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// Wait for wake
|
||||
try loop.run(.until_done);
|
||||
try testing.expectEqual(@as(u32, 0), code.?);
|
||||
}
|
||||
|
||||
test "process wait with non-zero exit code" {
|
||||
const testing = std.testing;
|
||||
const alloc = testing.allocator;
|
||||
|
||||
var child = std.process.Child.init(argv_42, alloc);
|
||||
try child.spawn();
|
||||
|
||||
var loop = try xev.Loop.init(.{});
|
||||
defer loop.deinit();
|
||||
|
||||
var p = try Impl.init(child.id);
|
||||
defer p.deinit();
|
||||
|
||||
// Wait
|
||||
var code: ?u32 = null;
|
||||
var c_wait: xev.Completion = undefined;
|
||||
p.wait(&loop, &c_wait, ?u32, &code, (struct {
|
||||
fn callback(
|
||||
ud: ?*?u32,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
r: Impl.WaitError!u32,
|
||||
) xev.CallbackAction {
|
||||
ud.?.* = r catch unreachable;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// Wait for wake
|
||||
try loop.run(.until_done);
|
||||
try testing.expectEqual(@as(u32, 42), code.?);
|
||||
}
|
||||
|
||||
test "process wait on a process that already exited" {
|
||||
const testing = std.testing;
|
||||
const alloc = testing.allocator;
|
||||
|
||||
var child = std.process.Child.init(argv_0, alloc);
|
||||
try child.spawn();
|
||||
|
||||
var loop = try xev.Loop.init(.{});
|
||||
defer loop.deinit();
|
||||
|
||||
var p = try Impl.init(child.id);
|
||||
defer p.deinit();
|
||||
|
||||
_ = try child.wait();
|
||||
|
||||
// Wait
|
||||
var code: ?u32 = null;
|
||||
var c_wait: xev.Completion = undefined;
|
||||
p.wait(&loop, &c_wait, ?u32, &code, (struct {
|
||||
fn callback(
|
||||
ud: ?*?u32,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
r: Impl.WaitError!u32,
|
||||
) xev.CallbackAction {
|
||||
ud.?.* = r catch 0;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// Wait for wake
|
||||
try loop.run(.until_done);
|
||||
try testing.expectEqual(@as(u32, 0), code.?);
|
||||
}
|
||||
};
|
||||
}
|
822
deps/libxev/src/watcher/stream.zig
vendored
Normal file
822
deps/libxev/src/watcher/stream.zig
vendored
Normal file
@@ -0,0 +1,822 @@
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const builtin = @import("builtin");
|
||||
const common = @import("common.zig");
|
||||
const queue = @import("../queue.zig");
|
||||
|
||||
/// Options for creating a stream type. Each of the options makes the
|
||||
/// functionality available for the stream.
|
||||
pub const Options = struct {
|
||||
read: ReadMethod,
|
||||
write: WriteMethod,
|
||||
close: bool,
|
||||
|
||||
/// True to schedule the read/write on the threadpool.
|
||||
threadpool: bool = false,
|
||||
|
||||
pub const ReadMethod = enum { none, read, recv };
|
||||
pub const WriteMethod = enum { none, write, send };
|
||||
};
|
||||
|
||||
/// Creates a stream type that is meant to be embedded within other
|
||||
/// types using "usingnamespace". A stream is something that supports read,
|
||||
/// write, close, etc. The exact operations supported are defined by the
|
||||
/// "options" struct.
|
||||
///
|
||||
/// T requirements:
|
||||
/// - field named "fd" of type fd_t or socket_t
|
||||
/// - decl named "initFd" to initialize a new T from a fd
|
||||
///
|
||||
pub fn Stream(comptime xev: type, comptime T: type, comptime options: Options) type {
|
||||
return struct {
|
||||
pub usingnamespace if (options.close) Closeable(xev, T, options) else struct {};
|
||||
pub usingnamespace if (options.read != .none) Readable(xev, T, options) else struct {};
|
||||
pub usingnamespace if (options.write != .none) Writeable(xev, T, options) else struct {};
|
||||
};
|
||||
}
|
||||
|
||||
pub fn Closeable(comptime xev: type, comptime T: type, comptime options: Options) type {
|
||||
_ = options;
|
||||
|
||||
return struct {
|
||||
const Self = T;
|
||||
|
||||
pub const CloseError = xev.CloseError;
|
||||
|
||||
/// Close the socket.
|
||||
pub fn close(
|
||||
self: Self,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
comptime Userdata: type,
|
||||
userdata: ?*Userdata,
|
||||
comptime cb: *const fn (
|
||||
ud: ?*Userdata,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
s: Self,
|
||||
r: CloseError!void,
|
||||
) xev.CallbackAction,
|
||||
) void {
|
||||
c.* = .{
|
||||
.op = .{ .close = .{ .fd = self.fd } },
|
||||
.userdata = userdata,
|
||||
.callback = (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
const fd = T.initFd(c_inner.op.close.fd);
|
||||
return @call(.always_inline, cb, .{
|
||||
common.userdataValue(Userdata, ud),
|
||||
l_inner,
|
||||
c_inner,
|
||||
fd,
|
||||
if (r.close) |_| {} else |err| err,
|
||||
});
|
||||
}
|
||||
}).callback,
|
||||
};
|
||||
|
||||
loop.add(c);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn Readable(comptime xev: type, comptime T: type, comptime options: Options) type {
|
||||
return struct {
|
||||
const Self = T;
|
||||
|
||||
pub const ReadError = xev.ReadError;
|
||||
|
||||
/// Read from the socket. This performs a single read. The callback must
|
||||
/// requeue the read if additional reads want to be performed. Additional
|
||||
/// reads simultaneously can be queued by calling this multiple times. Note
|
||||
/// that depending on the backend, the reads can happen out of order.
|
||||
pub fn read(
|
||||
self: Self,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
buf: xev.ReadBuffer,
|
||||
comptime Userdata: type,
|
||||
userdata: ?*Userdata,
|
||||
comptime cb: *const fn (
|
||||
ud: ?*Userdata,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
s: Self,
|
||||
b: xev.ReadBuffer,
|
||||
r: ReadError!usize,
|
||||
) xev.CallbackAction,
|
||||
) void {
|
||||
switch (buf) {
|
||||
inline .slice, .array => {
|
||||
c.* = .{
|
||||
.op = switch (options.read) {
|
||||
.none => unreachable,
|
||||
|
||||
.read => .{
|
||||
.read = .{
|
||||
.fd = self.fd,
|
||||
.buffer = buf,
|
||||
},
|
||||
},
|
||||
|
||||
.recv => .{
|
||||
.recv = .{
|
||||
.fd = self.fd,
|
||||
.buffer = buf,
|
||||
},
|
||||
},
|
||||
},
|
||||
.userdata = userdata,
|
||||
.callback = (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
return switch (options.read) {
|
||||
.none => unreachable,
|
||||
|
||||
.recv => @call(.always_inline, cb, .{
|
||||
common.userdataValue(Userdata, ud),
|
||||
l_inner,
|
||||
c_inner,
|
||||
T.initFd(c_inner.op.recv.fd),
|
||||
c_inner.op.recv.buffer,
|
||||
if (r.recv) |v| v else |err| err,
|
||||
}),
|
||||
|
||||
.read => @call(.always_inline, cb, .{
|
||||
common.userdataValue(Userdata, ud),
|
||||
l_inner,
|
||||
c_inner,
|
||||
T.initFd(c_inner.op.read.fd),
|
||||
c_inner.op.read.buffer,
|
||||
if (r.read) |v| v else |err| err,
|
||||
}),
|
||||
};
|
||||
}
|
||||
}).callback,
|
||||
};
|
||||
|
||||
// If we're dup-ing, then we ask the backend to manage the fd.
|
||||
switch (xev.backend) {
|
||||
.io_uring,
|
||||
.wasi_poll,
|
||||
.iocp,
|
||||
=> {},
|
||||
|
||||
.epoll => {
|
||||
if (options.threadpool)
|
||||
c.flags.threadpool = true
|
||||
else
|
||||
c.flags.dup = true;
|
||||
},
|
||||
|
||||
.kqueue => {
|
||||
if (options.threadpool) c.flags.threadpool = true;
|
||||
},
|
||||
}
|
||||
|
||||
loop.add(c);
|
||||
},
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn Writeable(comptime xev: type, comptime T: type, comptime options: Options) type {
|
||||
return struct {
|
||||
const Self = T;
|
||||
|
||||
pub const WriteError = xev.WriteError;
|
||||
|
||||
/// WriteQueue is the queue of write requests for ordered writes.
|
||||
/// This can be copied around.
|
||||
pub const WriteQueue = queue.Intrusive(WriteRequest);
|
||||
|
||||
/// WriteRequest is a single request for a write. It wraps a
|
||||
/// completion so that it can be inserted into the WriteQueue.
|
||||
pub const WriteRequest = struct {
|
||||
completion: xev.Completion = .{},
|
||||
userdata: ?*anyopaque = null,
|
||||
|
||||
/// This is the original buffer passed to queueWrite. We have
|
||||
/// to keep track of this because we may be forced to split
|
||||
/// the write or rearm the write due to partial writes, but when
|
||||
/// we call the final callback we want to pass the original
|
||||
/// complete buffer.
|
||||
full_write_buffer: xev.WriteBuffer,
|
||||
|
||||
next: ?*@This() = null,
|
||||
|
||||
/// This can be used to convert a completion pointer back to
|
||||
/// a WriteRequest. This is only safe of course if the completion
|
||||
/// originally is from a write request. This is useful for getting
|
||||
/// the WriteRequest back in a callback from queuedWrite.
|
||||
pub fn from(c: *xev.Completion) *WriteRequest {
|
||||
return @fieldParentPtr("completion", c);
|
||||
}
|
||||
};
|
||||
|
||||
/// Write to the stream. This queues the writes to ensure they
|
||||
/// remain in order. Queueing has a small overhead: you must
|
||||
/// maintain a WriteQueue and WriteRequests instead of just
|
||||
/// Completions.
|
||||
///
|
||||
/// If ordering isn't important, or you can maintain ordering
|
||||
/// naturally in your program, consider using write since it
|
||||
/// has a slightly smaller overhead.
|
||||
///
|
||||
/// The "CallbackAction" return value of this callback behaves slightly
|
||||
/// different. The "rearm" return value will re-queue the same write
|
||||
/// at the end of the queue.
|
||||
///
|
||||
/// It is safe to call this at anytime from the main thread.
|
||||
pub fn queueWrite(
|
||||
self: Self,
|
||||
loop: *xev.Loop,
|
||||
q: *WriteQueue,
|
||||
req: *WriteRequest,
|
||||
buf: xev.WriteBuffer,
|
||||
comptime Userdata: type,
|
||||
userdata: ?*Userdata,
|
||||
comptime cb: *const fn (
|
||||
ud: ?*Userdata,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
s: Self,
|
||||
b: xev.WriteBuffer,
|
||||
r: WriteError!usize,
|
||||
) xev.CallbackAction,
|
||||
) void {
|
||||
// Initialize our completion
|
||||
req.* = .{ .full_write_buffer = buf };
|
||||
// Must be kept in sync with partial write logic inside the callback
|
||||
self.write_init(&req.completion, buf);
|
||||
req.completion.userdata = q;
|
||||
req.completion.callback = (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
const q_inner = @as(?*WriteQueue, @ptrCast(@alignCast(ud))).?;
|
||||
|
||||
// The queue MUST have a request because a completion
|
||||
// can only be added if the queue is not empty, and
|
||||
// nothing else should be popping!.
|
||||
//
|
||||
// We only peek the request here (not pop) because we may
|
||||
// need to rearm this write if the write was partial.
|
||||
const req_inner: *WriteRequest = q_inner.head.?;
|
||||
|
||||
const cb_res = write_result(c_inner, r);
|
||||
var result: WriteError!usize = cb_res.result;
|
||||
|
||||
// Checks whether the entire buffer was written, this is
|
||||
// necessary to guarantee correct ordering of writes.
|
||||
// If the write was partial, it re-submits the remainder of
|
||||
// the buffer.
|
||||
const queued_len = writeBufferLength(cb_res.buf);
|
||||
if (cb_res.result) |written_len| {
|
||||
if (written_len < queued_len) {
|
||||
// Write remainder of the buffer, reusing the same completion
|
||||
const rem_buf = writeBufferRemainder(cb_res.buf, written_len);
|
||||
cb_res.writer.write_init(&req_inner.completion, rem_buf);
|
||||
req_inner.completion.userdata = q_inner;
|
||||
req_inner.completion.callback = callback;
|
||||
l_inner.add(&req_inner.completion);
|
||||
return .disarm;
|
||||
}
|
||||
|
||||
// We wrote the entire buffer, modify the result to indicate
|
||||
// to the caller that all bytes have been written.
|
||||
result = writeBufferLength(req_inner.full_write_buffer);
|
||||
} else |_| {}
|
||||
|
||||
// We can pop previously peeked request.
|
||||
_ = q_inner.pop().?;
|
||||
|
||||
const action = @call(.always_inline, cb, .{
|
||||
common.userdataValue(Userdata, req_inner.userdata),
|
||||
l_inner,
|
||||
c_inner,
|
||||
cb_res.writer,
|
||||
req_inner.full_write_buffer,
|
||||
result,
|
||||
});
|
||||
|
||||
// Rearm requeues this request, it doesn't return rearm
|
||||
// on the actual callback here...
|
||||
if (action == .rearm) q_inner.push(req_inner);
|
||||
|
||||
// If we have another request, add that completion next.
|
||||
if (q_inner.head) |req_next| l_inner.add(&req_next.completion);
|
||||
|
||||
// We always disarm because the completion in the next
|
||||
// request will be used if there is more to queue.
|
||||
return .disarm;
|
||||
}
|
||||
}).callback;
|
||||
|
||||
// The userdata as to go on the WriteRequest because we need
|
||||
// our actual completion userdata to be the WriteQueue so that
|
||||
// we can process the queue.
|
||||
req.userdata = @as(?*anyopaque, @ptrCast(@alignCast(userdata)));
|
||||
|
||||
// If the queue is empty, then we add our completion. Otherwise,
|
||||
// the previously queued writes will trigger this one.
|
||||
if (q.empty()) loop.add(&req.completion);
|
||||
|
||||
// We always add this item to our queue no matter what
|
||||
q.push(req);
|
||||
}
|
||||
|
||||
/// Write to the stream. This performs a single write. Additional
|
||||
/// writes can be requested by calling this multiple times.
|
||||
///
|
||||
/// IMPORTANT: writes are NOT queued. There is no order guarantee
|
||||
/// if this is called multiple times. If ordered writes are important
|
||||
/// (they usually are!) then you should only call write again once
|
||||
/// the previous write callback is called.
|
||||
///
|
||||
/// If ordering is important, use queueWrite instead.
|
||||
pub fn write(
|
||||
self: Self,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
buf: xev.WriteBuffer,
|
||||
comptime Userdata: type,
|
||||
userdata: ?*Userdata,
|
||||
comptime cb: *const fn (
|
||||
ud: ?*Userdata,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
s: Self,
|
||||
b: xev.WriteBuffer,
|
||||
r: WriteError!usize,
|
||||
) xev.CallbackAction,
|
||||
) void {
|
||||
self.write_init(c, buf);
|
||||
c.userdata = userdata;
|
||||
c.callback = (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
const cb_res = write_result(c_inner, r);
|
||||
return @call(.always_inline, cb, .{
|
||||
common.userdataValue(Userdata, ud),
|
||||
l_inner,
|
||||
c_inner,
|
||||
cb_res.writer,
|
||||
cb_res.buf,
|
||||
cb_res.result,
|
||||
});
|
||||
}
|
||||
}).callback;
|
||||
|
||||
loop.add(c);
|
||||
}
|
||||
|
||||
/// Extracts the result from a completion for a write callback.
|
||||
inline fn write_result(c: *xev.Completion, r: xev.Result) struct {
|
||||
writer: Self,
|
||||
buf: xev.WriteBuffer,
|
||||
result: WriteError!usize,
|
||||
} {
|
||||
return switch (options.write) {
|
||||
.none => unreachable,
|
||||
|
||||
.send => .{
|
||||
.writer = T.initFd(c.op.send.fd),
|
||||
.buf = c.op.send.buffer,
|
||||
.result = if (r.send) |v| v else |err| err,
|
||||
},
|
||||
|
||||
.write => .{
|
||||
.writer = T.initFd(c.op.write.fd),
|
||||
.buf = c.op.write.buffer,
|
||||
.result = if (r.write) |v| v else |err| err,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/// Initialize the completion c for a write. This does NOT set
|
||||
/// userdata or a callback.
|
||||
fn write_init(
|
||||
self: Self,
|
||||
c: *xev.Completion,
|
||||
buf: xev.WriteBuffer,
|
||||
) void {
|
||||
switch (buf) {
|
||||
inline .slice, .array => {
|
||||
c.* = .{
|
||||
.op = switch (options.write) {
|
||||
.none => unreachable,
|
||||
|
||||
.write => .{
|
||||
.write = .{
|
||||
.fd = self.fd,
|
||||
.buffer = buf,
|
||||
},
|
||||
},
|
||||
|
||||
.send => .{
|
||||
.send = .{
|
||||
.fd = self.fd,
|
||||
.buffer = buf,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
// If we're dup-ing, then we ask the backend to manage the fd.
|
||||
switch (xev.backend) {
|
||||
.io_uring,
|
||||
.wasi_poll,
|
||||
.iocp,
|
||||
=> {},
|
||||
|
||||
.epoll => {
|
||||
if (options.threadpool) {
|
||||
c.flags.threadpool = true;
|
||||
} else {
|
||||
c.flags.dup = true;
|
||||
}
|
||||
},
|
||||
|
||||
.kqueue => {
|
||||
if (options.threadpool) c.flags.threadpool = true;
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the length of the write buffer
|
||||
fn writeBufferLength(buf: xev.WriteBuffer) usize {
|
||||
return switch (buf) {
|
||||
.slice => |slice| slice.len,
|
||||
.array => |array| array.len,
|
||||
};
|
||||
}
|
||||
|
||||
/// Given a `WriteBuffer` and number of bytes written during the previous
|
||||
/// write operation, returns a new `WriteBuffer` with remaining data.
|
||||
fn writeBufferRemainder(buf: xev.WriteBuffer, offset: usize) xev.WriteBuffer {
|
||||
switch (buf) {
|
||||
.slice => |slice| {
|
||||
assert(offset <= slice.len);
|
||||
return .{ .slice = slice[offset..] };
|
||||
},
|
||||
.array => |array| {
|
||||
assert(offset <= array.len);
|
||||
const rem_len = array.len - offset;
|
||||
var wb = xev.WriteBuffer{ .array = .{
|
||||
.array = undefined,
|
||||
.len = rem_len,
|
||||
} };
|
||||
@memcpy(
|
||||
wb.array.array[0..rem_len],
|
||||
array.array[offset..][0..rem_len],
|
||||
);
|
||||
return wb;
|
||||
},
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Creates a generic stream type that supports read, write, close. This
|
||||
/// can be used for any file descriptor that would exhibit normal blocking
|
||||
/// behavior on read/write. This should NOT be used for local files because
|
||||
/// local files have some special properties; you should use xev.File for that.
|
||||
pub fn GenericStream(comptime xev: type) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
|
||||
/// The underlying file
|
||||
fd: std.posix.fd_t,
|
||||
|
||||
pub usingnamespace Stream(xev, Self, .{
|
||||
.close = true,
|
||||
.read = .read,
|
||||
.write = .write,
|
||||
});
|
||||
|
||||
/// Initialize a generic stream from a file descriptor.
|
||||
pub fn initFd(fd: std.posix.fd_t) Self {
|
||||
return .{
|
||||
.fd = fd,
|
||||
};
|
||||
}
|
||||
|
||||
/// Clean up any watcher resources. This does NOT close the file.
|
||||
/// If you want to close the file you must call close or do so
|
||||
/// synchronously.
|
||||
pub fn deinit(self: *const Self) void {
|
||||
_ = self;
|
||||
}
|
||||
|
||||
test "pty: child to parent" {
|
||||
const testing = std.testing;
|
||||
switch (builtin.os.tag) {
|
||||
.linux, .macos => {},
|
||||
else => return error.SkipZigTest,
|
||||
}
|
||||
|
||||
// Create the pty parent/child side.
|
||||
var pty = try Pty.init();
|
||||
defer pty.deinit();
|
||||
|
||||
var loop = try xev.Loop.init(.{});
|
||||
defer loop.deinit();
|
||||
|
||||
const parent = initFd(pty.parent);
|
||||
const child = initFd(pty.child);
|
||||
|
||||
// Read
|
||||
var read_buf: [128]u8 = undefined;
|
||||
var read_len: ?usize = null;
|
||||
var c_read: xev.Completion = undefined;
|
||||
parent.read(&loop, &c_read, .{ .slice = &read_buf }, ?usize, &read_len, (struct {
|
||||
fn callback(
|
||||
ud: ?*?usize,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: Self,
|
||||
_: xev.ReadBuffer,
|
||||
r: Self.ReadError!usize,
|
||||
) xev.CallbackAction {
|
||||
ud.?.* = r catch unreachable;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// This should not block!
|
||||
try loop.run(.no_wait);
|
||||
try testing.expect(read_len == null);
|
||||
|
||||
// Send
|
||||
const send_buf = "hello, world!";
|
||||
var c_write: xev.Completion = undefined;
|
||||
child.write(&loop, &c_write, .{ .slice = send_buf }, void, null, (struct {
|
||||
fn callback(
|
||||
_: ?*void,
|
||||
_: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
_: Self,
|
||||
_: xev.WriteBuffer,
|
||||
r: Self.WriteError!usize,
|
||||
) xev.CallbackAction {
|
||||
_ = c;
|
||||
_ = r catch unreachable;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// The write and read should trigger
|
||||
try loop.run(.until_done);
|
||||
try testing.expect(read_len != null);
|
||||
try testing.expectEqualSlices(u8, send_buf, read_buf[0..read_len.?]);
|
||||
}
|
||||
|
||||
test "pty: parent to child" {
|
||||
const testing = std.testing;
|
||||
switch (builtin.os.tag) {
|
||||
.linux, .macos => {},
|
||||
else => return error.SkipZigTest,
|
||||
}
|
||||
|
||||
// Create the pty parent/child side.
|
||||
var pty = try Pty.init();
|
||||
defer pty.deinit();
|
||||
|
||||
var loop = try xev.Loop.init(.{});
|
||||
defer loop.deinit();
|
||||
|
||||
const parent = initFd(pty.parent);
|
||||
const child = initFd(pty.child);
|
||||
|
||||
// Read
|
||||
var read_buf: [128]u8 = undefined;
|
||||
var read_len: ?usize = null;
|
||||
var c_read: xev.Completion = undefined;
|
||||
child.read(&loop, &c_read, .{ .slice = &read_buf }, ?usize, &read_len, (struct {
|
||||
fn callback(
|
||||
ud: ?*?usize,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: Self,
|
||||
_: xev.ReadBuffer,
|
||||
r: Self.ReadError!usize,
|
||||
) xev.CallbackAction {
|
||||
ud.?.* = r catch unreachable;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// This should not block!
|
||||
try loop.run(.no_wait);
|
||||
try testing.expect(read_len == null);
|
||||
|
||||
// Send (note the newline at the end of the buf is important
|
||||
// since we're in cooked mode)
|
||||
const send_buf = "hello, world!\n";
|
||||
var c_write: xev.Completion = undefined;
|
||||
parent.write(&loop, &c_write, .{ .slice = send_buf }, void, null, (struct {
|
||||
fn callback(
|
||||
_: ?*void,
|
||||
_: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
_: Self,
|
||||
_: xev.WriteBuffer,
|
||||
r: Self.WriteError!usize,
|
||||
) xev.CallbackAction {
|
||||
_ = c;
|
||||
_ = r catch unreachable;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// The write and read should trigger
|
||||
try loop.run(.until_done);
|
||||
try testing.expect(read_len != null);
|
||||
try testing.expectEqualSlices(u8, send_buf, read_buf[0..read_len.?]);
|
||||
}
|
||||
|
||||
test "pty: queued writes" {
|
||||
const testing = std.testing;
|
||||
switch (builtin.os.tag) {
|
||||
.linux, .macos => {},
|
||||
else => return error.SkipZigTest,
|
||||
}
|
||||
|
||||
// Create the pty parent/child side.
|
||||
var pty = try Pty.init();
|
||||
defer pty.deinit();
|
||||
|
||||
var loop = try xev.Loop.init(.{});
|
||||
defer loop.deinit();
|
||||
|
||||
const parent = initFd(pty.parent);
|
||||
const child = initFd(pty.child);
|
||||
|
||||
// Read
|
||||
var read_buf: [128]u8 = undefined;
|
||||
var read_len: ?usize = null;
|
||||
var c_read: xev.Completion = undefined;
|
||||
child.read(&loop, &c_read, .{ .slice = &read_buf }, ?usize, &read_len, (struct {
|
||||
fn callback(
|
||||
ud: ?*?usize,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: Self,
|
||||
_: xev.ReadBuffer,
|
||||
r: Self.ReadError!usize,
|
||||
) xev.CallbackAction {
|
||||
ud.?.* = r catch unreachable;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// This should not block!
|
||||
try loop.run(.no_wait);
|
||||
try testing.expect(read_len == null);
|
||||
|
||||
var write_queue: Self.WriteQueue = .{};
|
||||
var write_req: [2]Self.WriteRequest = undefined;
|
||||
|
||||
// Send (note the newline at the end of the buf is important
|
||||
// since we're in cooked mode)
|
||||
parent.queueWrite(
|
||||
&loop,
|
||||
&write_queue,
|
||||
&write_req[0],
|
||||
.{ .slice = "hello, " },
|
||||
void,
|
||||
null,
|
||||
(struct {
|
||||
fn callback(
|
||||
_: ?*void,
|
||||
_: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
_: Self,
|
||||
_: xev.WriteBuffer,
|
||||
r: Self.WriteError!usize,
|
||||
) xev.CallbackAction {
|
||||
_ = c;
|
||||
_ = r catch unreachable;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback,
|
||||
);
|
||||
|
||||
var c_result: ?*xev.Completion = null;
|
||||
parent.queueWrite(
|
||||
&loop,
|
||||
&write_queue,
|
||||
&write_req[1],
|
||||
.{ .slice = "world!\n" },
|
||||
?*xev.Completion,
|
||||
&c_result,
|
||||
(struct {
|
||||
fn callback(
|
||||
ud: ?*?*xev.Completion,
|
||||
_: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
_: Self,
|
||||
_: xev.WriteBuffer,
|
||||
r: Self.WriteError!usize,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
ud.?.* = c;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback,
|
||||
);
|
||||
|
||||
// The write and read should trigger
|
||||
try loop.run(.until_done);
|
||||
try testing.expect(read_len != null);
|
||||
try testing.expectEqualSlices(u8, "hello, world!\n", read_buf[0..read_len.?]);
|
||||
|
||||
// Verify our completion is equal to our request
|
||||
try testing.expect(Self.WriteRequest.from(c_result.?) == &write_req[1]);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Helper to open a pty. This isn't exposed as a public API this is only
|
||||
/// used for tests.
|
||||
const Pty = struct {
|
||||
/// The file descriptors for the parent/child side of the pty. This refers
|
||||
/// to the master/slave side respectively, and while that terminology is
|
||||
/// the officially used terminology of the syscall, I will use parent/child
|
||||
/// here.
|
||||
parent: std.posix.fd_t,
|
||||
child: std.posix.fd_t,
|
||||
|
||||
/// Redeclare this winsize struct so we can just use a Zig struct. This
|
||||
/// layout should be correct on all tested platforms.
|
||||
const Winsize = extern struct {
|
||||
ws_row: u16,
|
||||
ws_col: u16,
|
||||
ws_xpixel: u16,
|
||||
ws_ypixel: u16,
|
||||
};
|
||||
|
||||
// libc pty.h
|
||||
extern "c" fn openpty(
|
||||
parent: *std.posix.fd_t,
|
||||
child: *std.posix.fd_t,
|
||||
name: ?[*]u8,
|
||||
termios: ?*const anyopaque, // termios but we don't use it
|
||||
winsize: ?*const Winsize,
|
||||
) c_int;
|
||||
|
||||
pub fn init() !Pty {
|
||||
// Reasonable size
|
||||
var size: Winsize = .{
|
||||
.ws_row = 80,
|
||||
.ws_col = 80,
|
||||
.ws_xpixel = 800,
|
||||
.ws_ypixel = 600,
|
||||
};
|
||||
|
||||
var parent_fd: std.posix.fd_t = undefined;
|
||||
var child_fd: std.posix.fd_t = undefined;
|
||||
if (openpty(
|
||||
&parent_fd,
|
||||
&child_fd,
|
||||
null,
|
||||
null,
|
||||
&size,
|
||||
) < 0)
|
||||
return error.OpenptyFailed;
|
||||
errdefer {
|
||||
_ = std.posix.system.close(parent_fd);
|
||||
_ = std.posix.system.close(child_fd);
|
||||
}
|
||||
|
||||
return .{
|
||||
.parent = parent_fd,
|
||||
.child = child_fd,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Pty) void {
|
||||
std.posix.close(self.parent);
|
||||
std.posix.close(self.child);
|
||||
}
|
||||
};
|
624
deps/libxev/src/watcher/tcp.zig
vendored
Normal file
624
deps/libxev/src/watcher/tcp.zig
vendored
Normal file
@@ -0,0 +1,624 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const assert = std.debug.assert;
|
||||
const posix = std.posix;
|
||||
const stream = @import("stream.zig");
|
||||
const common = @import("common.zig");
|
||||
|
||||
/// TCP client and server.
|
||||
///
|
||||
/// This is a "higher-level abstraction" in libxev. The goal of higher-level
|
||||
/// abstractions in libxev are to make it easier to use specific functionality
|
||||
/// with the event loop, but does not promise perfect flexibility or optimal
|
||||
/// performance. In almost all cases, the abstraction is good enough. But,
|
||||
/// if you have specific needs or want to push for the most optimal performance,
|
||||
/// use the platform-specific Loop directly.
|
||||
pub fn TCP(comptime xev: type) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
const FdType = if (xev.backend == .iocp) std.os.windows.HANDLE else posix.socket_t;
|
||||
|
||||
fd: FdType,
|
||||
|
||||
pub usingnamespace stream.Stream(xev, Self, .{
|
||||
.close = true,
|
||||
.read = .recv,
|
||||
.write = .send,
|
||||
});
|
||||
|
||||
/// Initialize a new TCP with the family from the given address. Only
|
||||
/// the family is used, the actual address has no impact on the created
|
||||
/// resource.
|
||||
pub fn init(addr: std.net.Address) !Self {
|
||||
if (xev.backend == .wasi_poll) @compileError("unsupported in WASI");
|
||||
|
||||
const fd = if (xev.backend == .iocp)
|
||||
try std.os.windows.WSASocketW(addr.any.family, posix.SOCK.STREAM, 0, null, 0, std.os.windows.ws2_32.WSA_FLAG_OVERLAPPED)
|
||||
else fd: {
|
||||
// On io_uring we don't use non-blocking sockets because we may
|
||||
// just get EAGAIN over and over from completions.
|
||||
const flags = flags: {
|
||||
var flags: u32 = posix.SOCK.STREAM | posix.SOCK.CLOEXEC;
|
||||
if (xev.backend != .io_uring) flags |= posix.SOCK.NONBLOCK;
|
||||
break :flags flags;
|
||||
};
|
||||
break :fd try posix.socket(addr.any.family, flags, 0);
|
||||
};
|
||||
|
||||
return .{
|
||||
.fd = fd,
|
||||
};
|
||||
}
|
||||
|
||||
/// Initialize a TCP socket from a file descriptor.
|
||||
pub fn initFd(fd: FdType) Self {
|
||||
return .{
|
||||
.fd = fd,
|
||||
};
|
||||
}
|
||||
|
||||
/// Bind the address to the socket.
|
||||
pub fn bind(self: Self, addr: std.net.Address) !void {
|
||||
if (xev.backend == .wasi_poll) @compileError("unsupported in WASI");
|
||||
|
||||
const fd = if (xev.backend == .iocp) @as(std.os.windows.ws2_32.SOCKET, @ptrCast(self.fd)) else self.fd;
|
||||
|
||||
try posix.setsockopt(fd, posix.SOL.SOCKET, posix.SO.REUSEADDR, &std.mem.toBytes(@as(c_int, 1)));
|
||||
try posix.bind(fd, &addr.any, addr.getOsSockLen());
|
||||
}
|
||||
|
||||
/// Listen for connections on the socket. This puts the socket into passive
|
||||
/// listening mode. Connections must still be accepted one at a time.
|
||||
pub fn listen(self: Self, backlog: u31) !void {
|
||||
if (xev.backend == .wasi_poll) @compileError("unsupported in WASI");
|
||||
|
||||
const fd = if (xev.backend == .iocp) @as(std.os.windows.ws2_32.SOCKET, @ptrCast(self.fd)) else self.fd;
|
||||
|
||||
try posix.listen(fd, backlog);
|
||||
}
|
||||
|
||||
/// Accept a single connection.
|
||||
pub fn accept(
|
||||
self: Self,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
comptime Userdata: type,
|
||||
userdata: ?*Userdata,
|
||||
comptime cb: *const fn (
|
||||
ud: ?*Userdata,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
r: AcceptError!Self,
|
||||
) xev.CallbackAction,
|
||||
) void {
|
||||
c.* = .{
|
||||
.op = .{
|
||||
.accept = .{
|
||||
.socket = self.fd,
|
||||
},
|
||||
},
|
||||
|
||||
.userdata = userdata,
|
||||
.callback = (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
return @call(.always_inline, cb, .{
|
||||
common.userdataValue(Userdata, ud),
|
||||
l_inner,
|
||||
c_inner,
|
||||
if (r.accept) |fd| initFd(fd) else |err| err,
|
||||
});
|
||||
}
|
||||
}).callback,
|
||||
};
|
||||
|
||||
// If we're dup-ing, then we ask the backend to manage the fd.
|
||||
switch (xev.backend) {
|
||||
.io_uring,
|
||||
.kqueue,
|
||||
.wasi_poll,
|
||||
.iocp,
|
||||
=> {},
|
||||
|
||||
.epoll => c.flags.dup = true,
|
||||
}
|
||||
|
||||
loop.add(c);
|
||||
}
|
||||
|
||||
/// Establish a connection as a client.
|
||||
pub fn connect(
|
||||
self: Self,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
addr: std.net.Address,
|
||||
comptime Userdata: type,
|
||||
userdata: ?*Userdata,
|
||||
comptime cb: *const fn (
|
||||
ud: ?*Userdata,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
s: Self,
|
||||
r: ConnectError!void,
|
||||
) xev.CallbackAction,
|
||||
) void {
|
||||
if (xev.backend == .wasi_poll) @compileError("unsupported in WASI");
|
||||
|
||||
c.* = .{
|
||||
.op = .{
|
||||
.connect = .{
|
||||
.socket = self.fd,
|
||||
.addr = addr,
|
||||
},
|
||||
},
|
||||
|
||||
.userdata = userdata,
|
||||
.callback = (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
return @call(.always_inline, cb, .{
|
||||
common.userdataValue(Userdata, ud),
|
||||
l_inner,
|
||||
c_inner,
|
||||
initFd(c_inner.op.connect.socket),
|
||||
if (r.connect) |_| {} else |err| err,
|
||||
});
|
||||
}
|
||||
}).callback,
|
||||
};
|
||||
|
||||
loop.add(c);
|
||||
}
|
||||
|
||||
/// Shutdown the socket. This always only shuts down the writer side. You
|
||||
/// can use the lower level interface directly to control this if the
|
||||
/// platform supports it.
|
||||
pub fn shutdown(
|
||||
self: Self,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
comptime Userdata: type,
|
||||
userdata: ?*Userdata,
|
||||
comptime cb: *const fn (
|
||||
ud: ?*Userdata,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
s: Self,
|
||||
r: ShutdownError!void,
|
||||
) xev.CallbackAction,
|
||||
) void {
|
||||
c.* = .{
|
||||
.op = .{
|
||||
.shutdown = .{
|
||||
.socket = self.fd,
|
||||
.how = .send,
|
||||
},
|
||||
},
|
||||
.userdata = userdata,
|
||||
.callback = (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
return @call(.always_inline, cb, .{
|
||||
common.userdataValue(Userdata, ud),
|
||||
l_inner,
|
||||
c_inner,
|
||||
initFd(c_inner.op.shutdown.socket),
|
||||
if (r.shutdown) |_| {} else |err| err,
|
||||
});
|
||||
}
|
||||
}).callback,
|
||||
};
|
||||
|
||||
loop.add(c);
|
||||
}
|
||||
|
||||
pub const AcceptError = xev.AcceptError;
|
||||
pub const ConnectError = xev.ConnectError;
|
||||
pub const ShutdownError = xev.ShutdownError;
|
||||
|
||||
test "TCP: accept/connect/send/recv/close" {
|
||||
// We have no way to get a socket in WASI from a WASI context.
|
||||
if (xev.backend == .wasi_poll) return error.SkipZigTest;
|
||||
|
||||
const testing = std.testing;
|
||||
|
||||
var loop = try xev.Loop.init(.{});
|
||||
defer loop.deinit();
|
||||
|
||||
// Choose random available port (Zig #14907)
|
||||
var address = try std.net.Address.parseIp4("127.0.0.1", 0);
|
||||
const server = try Self.init(address);
|
||||
|
||||
// Bind and listen
|
||||
try server.bind(address);
|
||||
try server.listen(1);
|
||||
|
||||
// Retrieve bound port and initialize client
|
||||
var sock_len = address.getOsSockLen();
|
||||
const fd = if (xev.backend == .iocp) @as(std.os.windows.ws2_32.SOCKET, @ptrCast(server.fd)) else server.fd;
|
||||
try posix.getsockname(fd, &address.any, &sock_len);
|
||||
const client = try Self.init(address);
|
||||
|
||||
//const address = try std.net.Address.parseIp4("127.0.0.1", 3132);
|
||||
//var server = try Self.init(address);
|
||||
//var client = try Self.init(address);
|
||||
|
||||
// Completions we need
|
||||
var c_accept: xev.Completion = undefined;
|
||||
var c_connect: xev.Completion = undefined;
|
||||
|
||||
// Accept
|
||||
var server_conn: ?Self = null;
|
||||
server.accept(&loop, &c_accept, ?Self, &server_conn, (struct {
|
||||
fn callback(
|
||||
ud: ?*?Self,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
r: AcceptError!Self,
|
||||
) xev.CallbackAction {
|
||||
ud.?.* = r catch unreachable;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// Connect
|
||||
var connected: bool = false;
|
||||
client.connect(&loop, &c_connect, address, bool, &connected, (struct {
|
||||
fn callback(
|
||||
ud: ?*bool,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: Self,
|
||||
r: ConnectError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
ud.?.* = true;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// Wait for the connection to be established
|
||||
try loop.run(.until_done);
|
||||
try testing.expect(server_conn != null);
|
||||
try testing.expect(connected);
|
||||
|
||||
// Close the server
|
||||
var server_closed = false;
|
||||
server.close(&loop, &c_accept, bool, &server_closed, (struct {
|
||||
fn callback(
|
||||
ud: ?*bool,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: Self,
|
||||
r: Self.CloseError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
ud.?.* = true;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
try loop.run(.until_done);
|
||||
try testing.expect(server_closed);
|
||||
|
||||
// Send
|
||||
var send_buf = [_]u8{ 1, 1, 2, 3, 5, 8, 13 };
|
||||
client.write(&loop, &c_connect, .{ .slice = &send_buf }, void, null, (struct {
|
||||
fn callback(
|
||||
_: ?*void,
|
||||
_: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
_: Self,
|
||||
_: xev.WriteBuffer,
|
||||
r: Self.WriteError!usize,
|
||||
) xev.CallbackAction {
|
||||
_ = c;
|
||||
_ = r catch unreachable;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// Receive
|
||||
var recv_buf: [128]u8 = undefined;
|
||||
var recv_len: usize = 0;
|
||||
server_conn.?.read(&loop, &c_accept, .{ .slice = &recv_buf }, usize, &recv_len, (struct {
|
||||
fn callback(
|
||||
ud: ?*usize,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: Self,
|
||||
_: xev.ReadBuffer,
|
||||
r: Self.ReadError!usize,
|
||||
) xev.CallbackAction {
|
||||
ud.?.* = r catch unreachable;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// Wait for the send/receive
|
||||
try loop.run(.until_done);
|
||||
try testing.expectEqualSlices(u8, &send_buf, recv_buf[0..recv_len]);
|
||||
|
||||
// Close
|
||||
server_conn.?.close(&loop, &c_accept, ?Self, &server_conn, (struct {
|
||||
fn callback(
|
||||
ud: ?*?Self,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: Self,
|
||||
r: Self.CloseError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
ud.?.* = null;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
client.close(&loop, &c_connect, bool, &connected, (struct {
|
||||
fn callback(
|
||||
ud: ?*bool,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: Self,
|
||||
r: Self.CloseError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
ud.?.* = false;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
try loop.run(.until_done);
|
||||
try testing.expect(server_conn == null);
|
||||
try testing.expect(!connected);
|
||||
try testing.expect(server_closed);
|
||||
}
|
||||
|
||||
// Potentially flaky - this test could hang if the sender is unable to
|
||||
// write everything to the socket for whatever reason
|
||||
// (e.g. incorrectly sized buffer on the receiver side), or if the
|
||||
// receiver is trying to receive while sender has nothing left to send.
|
||||
//
|
||||
// Overview:
|
||||
// 1. Set up server and client sockets
|
||||
// 2. connect & accept, set SO_SNDBUF to 8kB on the client
|
||||
// 3. Try to send 1MB buffer from client to server without queuing, this _should_ fail
|
||||
// and theoretically send <= 8kB, but in practice, it seems to write ~32kB.
|
||||
// Asserts that <= 100kB was written
|
||||
// 4. Set up a queued write with the remaining buffer, shutdown() the socket afterwards
|
||||
// 5. Set up a receiver that loops until it receives the entire buffer
|
||||
// 6. Assert send_buf == recv_buf
|
||||
test "TCP: Queued writes" {
|
||||
// We have no way to get a socket in WASI from a WASI context.
|
||||
if (xev.backend == .wasi_poll) return error.SkipZigTest;
|
||||
// Windows doesn't seem to respect the SNDBUF socket option.
|
||||
if (builtin.os.tag == .windows) return error.SkipZigTest;
|
||||
|
||||
const testing = std.testing;
|
||||
|
||||
var loop = try xev.Loop.init(.{});
|
||||
defer loop.deinit();
|
||||
|
||||
// Choose random available port (Zig #14907)
|
||||
var address = try std.net.Address.parseIp4("127.0.0.1", 0);
|
||||
const server = try Self.init(address);
|
||||
|
||||
// Bind and listen
|
||||
try server.bind(address);
|
||||
try server.listen(1);
|
||||
|
||||
// Retrieve bound port and initialize client
|
||||
var sock_len = address.getOsSockLen();
|
||||
try posix.getsockname(server.fd, &address.any, &sock_len);
|
||||
const client = try Self.init(address);
|
||||
|
||||
// Completions we need
|
||||
var c_accept: xev.Completion = undefined;
|
||||
var c_connect: xev.Completion = undefined;
|
||||
|
||||
// Accept
|
||||
var server_conn: ?Self = null;
|
||||
server.accept(&loop, &c_accept, ?Self, &server_conn, (struct {
|
||||
fn callback(
|
||||
ud: ?*?Self,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
r: AcceptError!Self,
|
||||
) xev.CallbackAction {
|
||||
ud.?.* = r catch unreachable;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// Connect
|
||||
var connected: bool = false;
|
||||
client.connect(&loop, &c_connect, address, bool, &connected, (struct {
|
||||
fn callback(
|
||||
ud: ?*bool,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: Self,
|
||||
r: ConnectError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
ud.?.* = true;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// Wait for the connection to be established
|
||||
try loop.run(.until_done);
|
||||
try testing.expect(server_conn != null);
|
||||
try testing.expect(connected);
|
||||
|
||||
// Close the server
|
||||
var server_closed = false;
|
||||
server.close(&loop, &c_accept, bool, &server_closed, (struct {
|
||||
fn callback(
|
||||
ud: ?*bool,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: Self,
|
||||
r: Self.CloseError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
ud.?.* = true;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
try loop.run(.until_done);
|
||||
try testing.expect(server_closed);
|
||||
|
||||
// Unqueued send - Limit send buffer to 8kB, this should force partial writes.
|
||||
try posix.setsockopt(client.fd, posix.SOL.SOCKET, posix.SO.SNDBUF, &std.mem.toBytes(@as(c_int, 8192)));
|
||||
|
||||
const send_buf = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 0 } ** 100_000;
|
||||
var sent_unqueued: usize = 0;
|
||||
|
||||
// First we try to send the whole 1MB buffer in one write operation, this _should_ result
|
||||
// in a partial write.
|
||||
client.write(&loop, &c_connect, .{ .slice = &send_buf }, usize, &sent_unqueued, (struct {
|
||||
fn callback(
|
||||
sent_unqueued_inner: ?*usize,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: Self,
|
||||
_: xev.WriteBuffer,
|
||||
r: Self.WriteError!usize,
|
||||
) xev.CallbackAction {
|
||||
sent_unqueued_inner.?.* = r catch unreachable;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// Make sure that we sent a small fraction of the buffer
|
||||
try loop.run(.until_done);
|
||||
// SO_SNDBUF doesn't seem to be respected exactly, sent_unqueued will often be ~32kB
|
||||
// even though SO_SNDBUF was set to 8kB
|
||||
try testing.expect(sent_unqueued < (send_buf.len / 10));
|
||||
|
||||
// Set up queued write
|
||||
var w_queue = Self.WriteQueue{};
|
||||
var wr_send: xev.TCP.WriteRequest = undefined;
|
||||
var sent_queued: usize = 0;
|
||||
const queued_slice = send_buf[sent_unqueued..];
|
||||
client.queueWrite(&loop, &w_queue, &wr_send, .{ .slice = queued_slice }, usize, &sent_queued, (struct {
|
||||
fn callback(
|
||||
sent_queued_inner: ?*usize,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
tcp: Self,
|
||||
_: xev.WriteBuffer,
|
||||
r: Self.WriteError!usize,
|
||||
) xev.CallbackAction {
|
||||
sent_queued_inner.?.* = r catch unreachable;
|
||||
|
||||
tcp.shutdown(l, c, void, null, (struct {
|
||||
fn callback(
|
||||
_: ?*void,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: Self,
|
||||
_: Self.ShutdownError!void,
|
||||
) xev.CallbackAction {
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// Set up receiver which is going to keep reading until it reads the full
|
||||
// send buffer
|
||||
const Receiver = struct {
|
||||
loop: *xev.Loop,
|
||||
conn: Self,
|
||||
completion: xev.Completion = .{},
|
||||
buf: [send_buf.len]u8 = undefined,
|
||||
bytes_read: usize = 0,
|
||||
|
||||
pub fn read(receiver: *@This()) void {
|
||||
if (receiver.bytes_read == receiver.buf.len) return;
|
||||
|
||||
const read_buf = xev.ReadBuffer{
|
||||
.slice = receiver.buf[receiver.bytes_read..],
|
||||
};
|
||||
receiver.conn.read(receiver.loop, &receiver.completion, read_buf, @This(), receiver, readCb);
|
||||
}
|
||||
|
||||
pub fn readCb(
|
||||
receiver_opt: ?*@This(),
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: Self,
|
||||
_: xev.ReadBuffer,
|
||||
r: Self.ReadError!usize,
|
||||
) xev.CallbackAction {
|
||||
var receiver = receiver_opt.?;
|
||||
const n_bytes = r catch unreachable;
|
||||
|
||||
receiver.bytes_read += n_bytes;
|
||||
if (receiver.bytes_read < send_buf.len) {
|
||||
receiver.read();
|
||||
}
|
||||
|
||||
return .disarm;
|
||||
}
|
||||
};
|
||||
var receiver = Receiver{
|
||||
.loop = &loop,
|
||||
.conn = server_conn.?,
|
||||
};
|
||||
receiver.read();
|
||||
|
||||
// Wait for the send/receive
|
||||
try loop.run(.until_done);
|
||||
try testing.expectEqualSlices(u8, &send_buf, receiver.buf[0..receiver.bytes_read]);
|
||||
try testing.expect(send_buf.len == sent_unqueued + sent_queued);
|
||||
|
||||
// Close
|
||||
server_conn.?.close(&loop, &c_accept, ?Self, &server_conn, (struct {
|
||||
fn callback(
|
||||
ud: ?*?Self,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: Self,
|
||||
r: Self.CloseError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
ud.?.* = null;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
client.close(&loop, &c_connect, bool, &connected, (struct {
|
||||
fn callback(
|
||||
ud: ?*bool,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: Self,
|
||||
r: Self.CloseError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
ud.?.* = false;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
try loop.run(.until_done);
|
||||
try testing.expect(server_conn == null);
|
||||
try testing.expect(!connected);
|
||||
try testing.expect(server_closed);
|
||||
}
|
||||
};
|
||||
}
|
411
deps/libxev/src/watcher/timer.zig
vendored
Normal file
411
deps/libxev/src/watcher/timer.zig
vendored
Normal file
@@ -0,0 +1,411 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const assert = std.debug.assert;
|
||||
const os = std.os;
|
||||
|
||||
/// A timer fires a callback after a specified amount of time. A timer can
|
||||
/// repeat by returning "rearm" in the callback or by rescheduling the
|
||||
/// start within the callback.
|
||||
pub fn Timer(comptime xev: type) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
|
||||
/// Create a new timer.
|
||||
pub fn init() !Self {
|
||||
return .{};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *const Self) void {
|
||||
// Nothing for now.
|
||||
_ = self;
|
||||
}
|
||||
|
||||
/// Start the timer. The timer will execute in next_ms milliseconds from
|
||||
/// now.
|
||||
///
|
||||
/// This will use the monotonic clock on your system if available so
|
||||
/// this is immune to system clock changes or drift. The callback is
|
||||
/// guaranteed to fire NO EARLIER THAN "next_ms" milliseconds. We can't
|
||||
/// make any guarantees about exactness or time bounds because its possible
|
||||
/// for your OS to just... pause.. the process for an indefinite period of
|
||||
/// time.
|
||||
///
|
||||
/// Like everything else in libxev, if you want something to repeat, you
|
||||
/// must then requeue the completion manually. This punts off one of the
|
||||
/// "hard" aspects of timers: it is up to you to determine what the semantic
|
||||
/// meaning of intervals are. For example, if you want a timer to repeat every
|
||||
/// 10 seconds, is it every 10th second of a wall clock? every 10th second
|
||||
/// after an invocation? every 10th second after the work time from the
|
||||
/// invocation? You have the power to answer these questions, manually.
|
||||
pub fn run(
|
||||
self: Self,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
next_ms: u64,
|
||||
comptime Userdata: type,
|
||||
userdata: ?*Userdata,
|
||||
comptime cb: *const fn (
|
||||
ud: ?*Userdata,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
r: RunError!void,
|
||||
) xev.CallbackAction,
|
||||
) void {
|
||||
_ = self;
|
||||
|
||||
loop.timer(c, next_ms, userdata, (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
return @call(.always_inline, cb, .{
|
||||
@as(?*Userdata, if (Userdata == void) null else @ptrCast(@alignCast(ud))),
|
||||
l_inner,
|
||||
c_inner,
|
||||
if (r.timer) |trigger| @as(RunError!void, switch (trigger) {
|
||||
.request, .expiration => {},
|
||||
.cancel => error.Canceled,
|
||||
}) else |err| err,
|
||||
});
|
||||
}
|
||||
}).callback);
|
||||
}
|
||||
|
||||
/// Reset a timer to execute in next_ms milliseconds. If the timer
|
||||
/// is already started, this will stop it and restart it. If the
|
||||
/// timer has never been started, this is equivalent to running "run".
|
||||
/// In every case, the timer callback is updated to the given userdata
|
||||
/// and callback.
|
||||
///
|
||||
/// This requires an additional completion c_cancel to represent
|
||||
/// the need to possibly cancel the previous timer. You can check
|
||||
/// if c_cancel was used by checking the state() after the call.
|
||||
///
|
||||
/// VERY IMPORTANT: both c and c_cancel MUST NOT be undefined. They
|
||||
/// must be initialized to ".{}" if being used for the first time.
|
||||
pub fn reset(
|
||||
self: Self,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
c_cancel: *xev.Completion,
|
||||
next_ms: u64,
|
||||
comptime Userdata: type,
|
||||
userdata: ?*Userdata,
|
||||
comptime cb: *const fn (
|
||||
ud: ?*Userdata,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
r: RunError!void,
|
||||
) xev.CallbackAction,
|
||||
) void {
|
||||
_ = self;
|
||||
|
||||
loop.timer_reset(c, c_cancel, next_ms, userdata, (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
return @call(.always_inline, cb, .{
|
||||
@as(?*Userdata, if (Userdata == void) null else @ptrCast(@alignCast(ud))),
|
||||
l_inner,
|
||||
c_inner,
|
||||
if (r.timer) |trigger| @as(RunError!void, switch (trigger) {
|
||||
.request, .expiration => {},
|
||||
.cancel => error.Canceled,
|
||||
}) else |err| err,
|
||||
});
|
||||
}
|
||||
}).callback);
|
||||
}
|
||||
|
||||
/// Cancel a previously started timer. The timer to cancel used the completion
|
||||
/// "c_cancel". A new completion "c" must be specified which will be called
|
||||
/// with the callback once cancellation is complete.
|
||||
///
|
||||
/// The original timer will still have its callback fired but with the
|
||||
/// error "error.Canceled".
|
||||
pub fn cancel(
|
||||
self: Self,
|
||||
loop: *xev.Loop,
|
||||
c_timer: *xev.Completion,
|
||||
c_cancel: *xev.Completion,
|
||||
comptime Userdata: type,
|
||||
userdata: ?*Userdata,
|
||||
comptime cb: *const fn (
|
||||
ud: ?*Userdata,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
r: CancelError!void,
|
||||
) xev.CallbackAction,
|
||||
) void {
|
||||
_ = self;
|
||||
|
||||
c_cancel.* = switch (xev.backend) {
|
||||
.io_uring => .{
|
||||
.op = .{
|
||||
.timer_remove = .{
|
||||
.timer = c_timer,
|
||||
},
|
||||
},
|
||||
|
||||
.userdata = userdata,
|
||||
.callback = (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
return @call(.always_inline, cb, .{
|
||||
@as(?*Userdata, if (Userdata == void) null else @ptrCast(@alignCast(ud))),
|
||||
l_inner,
|
||||
c_inner,
|
||||
if (r.timer_remove) |_| {} else |err| err,
|
||||
});
|
||||
}
|
||||
}).callback,
|
||||
},
|
||||
|
||||
.epoll,
|
||||
.kqueue,
|
||||
.wasi_poll,
|
||||
.iocp,
|
||||
=> .{
|
||||
.op = .{
|
||||
.cancel = .{
|
||||
.c = c_timer,
|
||||
},
|
||||
},
|
||||
|
||||
.userdata = userdata,
|
||||
.callback = (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
return @call(.always_inline, cb, .{
|
||||
@as(?*Userdata, if (Userdata == void) null else @ptrCast(@alignCast(ud))),
|
||||
l_inner,
|
||||
c_inner,
|
||||
if (r.cancel) |_| {} else |err| err,
|
||||
});
|
||||
}
|
||||
}).callback,
|
||||
},
|
||||
};
|
||||
|
||||
loop.add(c_cancel);
|
||||
}
|
||||
|
||||
/// Error that could happen while running a timer.
|
||||
pub const RunError = error{
|
||||
/// The timer was canceled before it could expire
|
||||
Canceled,
|
||||
|
||||
/// Some unexpected error.
|
||||
Unexpected,
|
||||
};
|
||||
|
||||
pub const CancelError = xev.CancelError;
|
||||
|
||||
test "timer" {
|
||||
const testing = std.testing;
|
||||
|
||||
var loop = try xev.Loop.init(.{});
|
||||
defer loop.deinit();
|
||||
|
||||
var timer = try init();
|
||||
defer timer.deinit();
|
||||
|
||||
// Add the timer
|
||||
var called = false;
|
||||
var c1: xev.Completion = undefined;
|
||||
timer.run(&loop, &c1, 1, bool, &called, (struct {
|
||||
fn callback(
|
||||
ud: ?*bool,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
r: RunError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
ud.?.* = true;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// Wait
|
||||
try loop.run(.until_done);
|
||||
try testing.expect(called);
|
||||
}
|
||||
|
||||
test "timer reset" {
|
||||
const testing = std.testing;
|
||||
|
||||
var loop = try xev.Loop.init(.{});
|
||||
defer loop.deinit();
|
||||
|
||||
var timer = try init();
|
||||
defer timer.deinit();
|
||||
|
||||
var c_timer: xev.Completion = .{};
|
||||
var c_cancel: xev.Completion = .{};
|
||||
const cb = (struct {
|
||||
fn callback(
|
||||
ud: ?*bool,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
r: RunError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
ud.?.* = true;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback;
|
||||
|
||||
// Add the timer
|
||||
var canceled = false;
|
||||
timer.run(&loop, &c_timer, 100_000, bool, &canceled, cb);
|
||||
|
||||
// Wait
|
||||
try loop.run(.no_wait);
|
||||
try testing.expect(!canceled);
|
||||
|
||||
// Reset it
|
||||
timer.reset(&loop, &c_timer, &c_cancel, 1, bool, &canceled, cb);
|
||||
|
||||
try loop.run(.until_done);
|
||||
try testing.expect(canceled);
|
||||
try testing.expect(c_timer.state() == .dead);
|
||||
try testing.expect(c_cancel.state() == .dead);
|
||||
}
|
||||
|
||||
test "timer reset before tick" {
|
||||
const testing = std.testing;
|
||||
|
||||
var loop = try xev.Loop.init(.{});
|
||||
defer loop.deinit();
|
||||
|
||||
var timer = try init();
|
||||
defer timer.deinit();
|
||||
|
||||
var c_timer: xev.Completion = .{};
|
||||
var c_cancel: xev.Completion = .{};
|
||||
const cb = (struct {
|
||||
fn callback(
|
||||
ud: ?*bool,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
r: RunError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
ud.?.* = true;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback;
|
||||
|
||||
// Add the timer
|
||||
var canceled = false;
|
||||
timer.run(&loop, &c_timer, 100_000, bool, &canceled, cb);
|
||||
|
||||
// Reset it
|
||||
timer.reset(&loop, &c_timer, &c_cancel, 1, bool, &canceled, cb);
|
||||
|
||||
try loop.run(.until_done);
|
||||
try testing.expect(canceled);
|
||||
try testing.expect(c_timer.state() == .dead);
|
||||
try testing.expect(c_cancel.state() == .dead);
|
||||
}
|
||||
|
||||
test "timer reset after trigger" {
|
||||
const testing = std.testing;
|
||||
|
||||
var loop = try xev.Loop.init(.{});
|
||||
defer loop.deinit();
|
||||
|
||||
var timer = try init();
|
||||
defer timer.deinit();
|
||||
|
||||
var c_timer: xev.Completion = .{};
|
||||
var c_cancel: xev.Completion = .{};
|
||||
const cb = (struct {
|
||||
fn callback(
|
||||
ud: ?*bool,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
r: RunError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
ud.?.* = true;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback;
|
||||
|
||||
// Add the timer
|
||||
var canceled = false;
|
||||
timer.run(&loop, &c_timer, 1, bool, &canceled, cb);
|
||||
try loop.run(.until_done);
|
||||
try testing.expect(canceled);
|
||||
canceled = false;
|
||||
|
||||
// Reset it
|
||||
timer.reset(&loop, &c_timer, &c_cancel, 1, bool, &canceled, cb);
|
||||
|
||||
try loop.run(.until_done);
|
||||
try testing.expect(canceled);
|
||||
try testing.expect(c_timer.state() == .dead);
|
||||
try testing.expect(c_cancel.state() == .dead);
|
||||
}
|
||||
|
||||
test "timer cancel" {
|
||||
const testing = std.testing;
|
||||
|
||||
var loop = try xev.Loop.init(.{});
|
||||
defer loop.deinit();
|
||||
|
||||
var timer = try init();
|
||||
defer timer.deinit();
|
||||
|
||||
// Add the timer
|
||||
var canceled = false;
|
||||
var c1: xev.Completion = undefined;
|
||||
timer.run(&loop, &c1, 100_000, bool, &canceled, (struct {
|
||||
fn callback(
|
||||
ud: ?*bool,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
r: RunError!void,
|
||||
) xev.CallbackAction {
|
||||
ud.?.* = if (r) false else |err| err == error.Canceled;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// Cancel
|
||||
var cancel_confirm = false;
|
||||
var c2: xev.Completion = undefined;
|
||||
timer.cancel(&loop, &c1, &c2, bool, &cancel_confirm, (struct {
|
||||
fn callback(
|
||||
ud: ?*bool,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
r: CancelError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
ud.?.* = true;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// Wait
|
||||
try loop.run(.until_done);
|
||||
try testing.expect(canceled);
|
||||
try testing.expect(cancel_confirm);
|
||||
}
|
||||
};
|
||||
}
|
770
deps/libxev/src/watcher/udp.zig
vendored
Normal file
770
deps/libxev/src/watcher/udp.zig
vendored
Normal file
@@ -0,0 +1,770 @@
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const posix = std.posix;
|
||||
const stream = @import("stream.zig");
|
||||
const common = @import("common.zig");
|
||||
|
||||
/// UDP client and server.
|
||||
///
|
||||
/// This is a "higher-level abstraction" in libxev. The goal of higher-level
|
||||
/// abstractions in libxev are to make it easier to use specific functionality
|
||||
/// with the event loop, but does not promise perfect flexibility or optimal
|
||||
/// performance. In almost all cases, the abstraction is good enough. But,
|
||||
/// if you have specific needs or want to push for the most optimal performance,
|
||||
/// use the platform-specific Loop directly.
|
||||
pub fn UDP(comptime xev: type) type {
|
||||
return switch (xev.backend) {
|
||||
// Supported, uses sendmsg/recvmsg exclusively
|
||||
.io_uring,
|
||||
.epoll,
|
||||
=> UDPSendMsg(xev),
|
||||
|
||||
// Supported, uses sendto/recvfrom
|
||||
.kqueue => UDPSendto(xev),
|
||||
|
||||
// Supported with tweaks
|
||||
.iocp => UDPSendtoIOCP(xev),
|
||||
|
||||
// Noop
|
||||
.wasi_poll => struct {},
|
||||
};
|
||||
}
|
||||
|
||||
/// UDP implementation that uses sendto/recvfrom.
|
||||
fn UDPSendto(comptime xev: type) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
|
||||
fd: posix.socket_t,
|
||||
|
||||
/// See UDPSendMsg.State
|
||||
pub const State = struct {
|
||||
userdata: ?*anyopaque,
|
||||
};
|
||||
|
||||
pub usingnamespace stream.Stream(xev, Self, .{
|
||||
.close = true,
|
||||
.read = .none,
|
||||
.write = .none,
|
||||
});
|
||||
|
||||
/// Initialize a new UDP with the family from the given address. Only
|
||||
/// the family is used, the actual address has no impact on the created
|
||||
/// resource.
|
||||
pub fn init(addr: std.net.Address) !Self {
|
||||
return .{
|
||||
.fd = try posix.socket(
|
||||
addr.any.family,
|
||||
posix.SOCK.NONBLOCK | posix.SOCK.DGRAM | posix.SOCK.CLOEXEC,
|
||||
0,
|
||||
),
|
||||
};
|
||||
}
|
||||
|
||||
/// Initialize a UDP socket from a file descriptor.
|
||||
pub fn initFd(fd: posix.socket_t) Self {
|
||||
return .{
|
||||
.fd = fd,
|
||||
};
|
||||
}
|
||||
|
||||
/// Bind the address to the socket.
|
||||
pub fn bind(self: Self, addr: std.net.Address) !void {
|
||||
try posix.setsockopt(self.fd, posix.SOL.SOCKET, posix.SO.REUSEPORT, &std.mem.toBytes(@as(c_int, 1)));
|
||||
try posix.setsockopt(self.fd, posix.SOL.SOCKET, posix.SO.REUSEADDR, &std.mem.toBytes(@as(c_int, 1)));
|
||||
try posix.bind(self.fd, &addr.any, addr.getOsSockLen());
|
||||
}
|
||||
|
||||
/// Read from the socket. This performs a single read. The callback must
|
||||
/// requeue the read if additional reads want to be performed. Additional
|
||||
/// reads simultaneously can be queued by calling this multiple times. Note
|
||||
/// that depending on the backend, the reads can happen out of order.
|
||||
pub fn read(
|
||||
self: Self,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
s: *State,
|
||||
buf: xev.ReadBuffer,
|
||||
comptime Userdata: type,
|
||||
userdata: ?*Userdata,
|
||||
comptime cb: *const fn (
|
||||
ud: ?*Userdata,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
s: *State,
|
||||
addr: std.net.Address,
|
||||
s: Self,
|
||||
b: xev.ReadBuffer,
|
||||
r: ReadError!usize,
|
||||
) xev.CallbackAction,
|
||||
) void {
|
||||
s.* = .{
|
||||
.userdata = userdata,
|
||||
};
|
||||
|
||||
switch (buf) {
|
||||
inline .slice, .array => {
|
||||
c.* = .{
|
||||
.op = .{
|
||||
.recvfrom = .{
|
||||
.fd = self.fd,
|
||||
.buffer = buf,
|
||||
},
|
||||
},
|
||||
.userdata = s,
|
||||
.callback = (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
const s_inner = @as(?*State, @ptrCast(@alignCast(ud))).?;
|
||||
return @call(.always_inline, cb, .{
|
||||
common.userdataValue(Userdata, s_inner.userdata),
|
||||
l_inner,
|
||||
c_inner,
|
||||
s_inner,
|
||||
std.net.Address.initPosix(@alignCast(&c_inner.op.recvfrom.addr)),
|
||||
initFd(c_inner.op.recvfrom.fd),
|
||||
c_inner.op.recvfrom.buffer,
|
||||
r.recvfrom,
|
||||
});
|
||||
}
|
||||
}).callback,
|
||||
};
|
||||
|
||||
loop.add(c);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Write to the socket. This performs a single write. Additional writes
|
||||
/// can be queued by calling this multiple times. Note that depending on the
|
||||
/// backend, writes can happen out of order.
|
||||
pub fn write(
|
||||
self: Self,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
s: *State,
|
||||
addr: std.net.Address,
|
||||
buf: xev.WriteBuffer,
|
||||
comptime Userdata: type,
|
||||
userdata: ?*Userdata,
|
||||
comptime cb: *const fn (
|
||||
ud: ?*Userdata,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
s: *State,
|
||||
s: Self,
|
||||
b: xev.WriteBuffer,
|
||||
r: WriteError!usize,
|
||||
) xev.CallbackAction,
|
||||
) void {
|
||||
s.* = .{
|
||||
.userdata = userdata,
|
||||
};
|
||||
|
||||
switch (buf) {
|
||||
inline .slice, .array => {
|
||||
c.* = .{
|
||||
.op = .{
|
||||
.sendto = .{
|
||||
.fd = self.fd,
|
||||
.buffer = buf,
|
||||
.addr = addr,
|
||||
},
|
||||
},
|
||||
.userdata = s,
|
||||
.callback = (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
const s_inner = @as(?*State, @ptrCast(@alignCast(ud))).?;
|
||||
return @call(.always_inline, cb, .{
|
||||
common.userdataValue(Userdata, s_inner.userdata),
|
||||
l_inner,
|
||||
c_inner,
|
||||
s_inner,
|
||||
initFd(c_inner.op.sendto.fd),
|
||||
c_inner.op.sendto.buffer,
|
||||
r.sendto,
|
||||
});
|
||||
}
|
||||
}).callback,
|
||||
};
|
||||
|
||||
loop.add(c);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub const ReadError = xev.ReadError;
|
||||
pub const WriteError = xev.WriteError;
|
||||
|
||||
/// Common tests
|
||||
pub usingnamespace UDPTests(xev, Self);
|
||||
};
|
||||
}
|
||||
|
||||
/// UDP implementation that uses sendto/recvfrom.
|
||||
fn UDPSendtoIOCP(comptime xev: type) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
const windows = std.os.windows;
|
||||
|
||||
fd: windows.HANDLE,
|
||||
|
||||
/// See UDPSendMsg.State
|
||||
pub const State = struct {
|
||||
userdata: ?*anyopaque,
|
||||
};
|
||||
|
||||
pub usingnamespace stream.Stream(xev, Self, .{
|
||||
.close = true,
|
||||
.read = .none,
|
||||
.write = .none,
|
||||
});
|
||||
|
||||
/// Initialize a new UDP with the family from the given address. Only
|
||||
/// the family is used, the actual address has no impact on the created
|
||||
/// resource.
|
||||
pub fn init(addr: std.net.Address) !Self {
|
||||
const socket = try windows.WSASocketW(addr.any.family, posix.SOCK.DGRAM, 0, null, 0, windows.ws2_32.WSA_FLAG_OVERLAPPED);
|
||||
|
||||
return .{
|
||||
.fd = socket,
|
||||
};
|
||||
}
|
||||
|
||||
/// Initialize a UDP socket from a file descriptor.
|
||||
pub fn initFd(fd: windows.HANDLE) Self {
|
||||
return .{
|
||||
.fd = fd,
|
||||
};
|
||||
}
|
||||
|
||||
/// Bind the address to the socket.
|
||||
pub fn bind(self: Self, addr: std.net.Address) !void {
|
||||
const socket = @as(windows.ws2_32.SOCKET, @ptrCast(self.fd));
|
||||
try posix.setsockopt(socket, posix.SOL.SOCKET, posix.SO.REUSEADDR, &std.mem.toBytes(@as(c_int, 1)));
|
||||
try posix.bind(socket, &addr.any, addr.getOsSockLen());
|
||||
}
|
||||
|
||||
/// Read from the socket. This performs a single read. The callback must
|
||||
/// requeue the read if additional reads want to be performed. Additional
|
||||
/// reads simultaneously can be queued by calling this multiple times. Note
|
||||
/// that depending on the backend, the reads can happen out of order.
|
||||
///
|
||||
/// TODO(mitchellh): a way to receive the remote addr
|
||||
pub fn read(
|
||||
self: Self,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
s: *State,
|
||||
buf: xev.ReadBuffer,
|
||||
comptime Userdata: type,
|
||||
userdata: ?*Userdata,
|
||||
comptime cb: *const fn (
|
||||
ud: ?*Userdata,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
s: *State,
|
||||
addr: std.net.Address,
|
||||
s: Self,
|
||||
b: xev.ReadBuffer,
|
||||
r: ReadError!usize,
|
||||
) xev.CallbackAction,
|
||||
) void {
|
||||
s.* = .{
|
||||
.userdata = userdata,
|
||||
};
|
||||
|
||||
switch (buf) {
|
||||
inline .slice, .array => {
|
||||
c.* = .{
|
||||
.op = .{
|
||||
.recvfrom = .{
|
||||
.fd = self.fd,
|
||||
.buffer = buf,
|
||||
},
|
||||
},
|
||||
.userdata = s,
|
||||
.callback = (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
const s_inner: *State = @ptrCast(@alignCast(ud.?));
|
||||
return @call(.always_inline, cb, .{
|
||||
common.userdataValue(Userdata, s_inner.userdata),
|
||||
l_inner,
|
||||
c_inner,
|
||||
s_inner,
|
||||
std.net.Address.initPosix(@alignCast(&c_inner.op.recvfrom.addr)),
|
||||
initFd(c_inner.op.recvfrom.fd),
|
||||
c_inner.op.recvfrom.buffer,
|
||||
r.recvfrom,
|
||||
});
|
||||
}
|
||||
}).callback,
|
||||
};
|
||||
|
||||
loop.add(c);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Write to the socket. This performs a single write. Additional writes
|
||||
/// can be queued by calling this multiple times. Note that depending on the
|
||||
/// backend, writes can happen out of order.
|
||||
pub fn write(
|
||||
self: Self,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
s: *State,
|
||||
addr: std.net.Address,
|
||||
buf: xev.WriteBuffer,
|
||||
comptime Userdata: type,
|
||||
userdata: ?*Userdata,
|
||||
comptime cb: *const fn (
|
||||
ud: ?*Userdata,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
s: *State,
|
||||
s: Self,
|
||||
b: xev.WriteBuffer,
|
||||
r: WriteError!usize,
|
||||
) xev.CallbackAction,
|
||||
) void {
|
||||
s.* = .{
|
||||
.userdata = userdata,
|
||||
};
|
||||
|
||||
switch (buf) {
|
||||
inline .slice, .array => {
|
||||
c.* = .{
|
||||
.op = .{
|
||||
.sendto = .{
|
||||
.fd = self.fd,
|
||||
.buffer = buf,
|
||||
.addr = addr,
|
||||
},
|
||||
},
|
||||
.userdata = s,
|
||||
.callback = (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
const s_inner: *State = @ptrCast(@alignCast(ud.?));
|
||||
return @call(.always_inline, cb, .{
|
||||
common.userdataValue(Userdata, s_inner.userdata),
|
||||
l_inner,
|
||||
c_inner,
|
||||
s_inner,
|
||||
initFd(c_inner.op.sendto.fd),
|
||||
c_inner.op.sendto.buffer,
|
||||
r.sendto,
|
||||
});
|
||||
}
|
||||
}).callback,
|
||||
};
|
||||
|
||||
loop.add(c);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub const ReadError = xev.ReadError;
|
||||
pub const WriteError = xev.WriteError;
|
||||
|
||||
/// Common tests
|
||||
pub usingnamespace UDPTests(xev, Self);
|
||||
};
|
||||
}
|
||||
|
||||
/// UDP implementation that uses sendmsg/recvmsg
|
||||
fn UDPSendMsg(comptime xev: type) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
|
||||
fd: posix.socket_t,
|
||||
|
||||
/// UDP requires some extra state to perform operations. The state is
|
||||
/// opaque. This isn't part of xev.Completion because it is relatively
|
||||
/// large and would force ALL operations (not just UDP) to have a relatively
|
||||
/// large structure size and we didn't want to pay that cost.
|
||||
pub const State = struct {
|
||||
userdata: ?*anyopaque = null,
|
||||
op: union {
|
||||
recv: struct {
|
||||
buf: xev.ReadBuffer,
|
||||
addr_buffer: std.posix.sockaddr.storage = undefined,
|
||||
msghdr: std.posix.msghdr,
|
||||
iov: [1]std.posix.iovec,
|
||||
},
|
||||
|
||||
send: struct {
|
||||
buf: xev.WriteBuffer,
|
||||
addr: std.net.Address,
|
||||
msghdr: std.posix.msghdr_const,
|
||||
iov: [1]std.posix.iovec_const,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
pub usingnamespace stream.Stream(xev, Self, .{
|
||||
.close = true,
|
||||
.read = .none,
|
||||
.write = .none,
|
||||
});
|
||||
|
||||
/// Initialize a new UDP with the family from the given address. Only
|
||||
/// the family is used, the actual address has no impact on the created
|
||||
/// resource.
|
||||
pub fn init(addr: std.net.Address) !Self {
|
||||
// On io_uring we don't use non-blocking sockets because we may
|
||||
// just get EAGAIN over and over from completions.
|
||||
const flags = flags: {
|
||||
var flags: u32 = posix.SOCK.DGRAM | posix.SOCK.CLOEXEC;
|
||||
if (xev.backend != .io_uring) flags |= posix.SOCK.NONBLOCK;
|
||||
break :flags flags;
|
||||
};
|
||||
|
||||
return .{
|
||||
.fd = try posix.socket(addr.any.family, flags, 0),
|
||||
};
|
||||
}
|
||||
|
||||
/// Initialize a UDP socket from a file descriptor.
|
||||
pub fn initFd(fd: posix.socket_t) Self {
|
||||
return .{
|
||||
.fd = fd,
|
||||
};
|
||||
}
|
||||
|
||||
/// Bind the address to the socket.
|
||||
pub fn bind(self: Self, addr: std.net.Address) !void {
|
||||
try posix.setsockopt(self.fd, posix.SOL.SOCKET, posix.SO.REUSEPORT, &std.mem.toBytes(@as(c_int, 1)));
|
||||
try posix.setsockopt(self.fd, posix.SOL.SOCKET, posix.SO.REUSEADDR, &std.mem.toBytes(@as(c_int, 1)));
|
||||
try posix.bind(self.fd, &addr.any, addr.getOsSockLen());
|
||||
}
|
||||
|
||||
/// Read from the socket. This performs a single read. The callback must
|
||||
/// requeue the read if additional reads want to be performed. Additional
|
||||
/// reads simultaneously can be queued by calling this multiple times. Note
|
||||
/// that depending on the backend, the reads can happen out of order.
|
||||
pub fn read(
|
||||
self: Self,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
s: *State,
|
||||
buf: xev.ReadBuffer,
|
||||
comptime Userdata: type,
|
||||
userdata: ?*Userdata,
|
||||
comptime cb: *const fn (
|
||||
ud: ?*Userdata,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
s: *State,
|
||||
addr: std.net.Address,
|
||||
s: Self,
|
||||
b: xev.ReadBuffer,
|
||||
r: ReadError!usize,
|
||||
) xev.CallbackAction,
|
||||
) void {
|
||||
s.op = .{ .recv = undefined };
|
||||
s.* = .{
|
||||
.userdata = userdata,
|
||||
.op = .{
|
||||
.recv = .{
|
||||
.buf = buf,
|
||||
.msghdr = .{
|
||||
.name = @ptrCast(&s.op.recv.addr_buffer),
|
||||
.namelen = @sizeOf(@TypeOf(s.op.recv.addr_buffer)),
|
||||
.iov = &s.op.recv.iov,
|
||||
.iovlen = 1,
|
||||
.control = null,
|
||||
.controllen = 0,
|
||||
.flags = 0,
|
||||
},
|
||||
.iov = undefined,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
switch (s.op.recv.buf) {
|
||||
.slice => |v| {
|
||||
s.op.recv.iov[0] = .{
|
||||
.base = v.ptr,
|
||||
.len = v.len,
|
||||
};
|
||||
},
|
||||
|
||||
.array => |*arr| {
|
||||
s.op.recv.iov[0] = .{
|
||||
.base = arr,
|
||||
.len = arr.len,
|
||||
};
|
||||
},
|
||||
}
|
||||
|
||||
c.* = .{
|
||||
.op = .{
|
||||
.recvmsg = .{
|
||||
.fd = self.fd,
|
||||
.msghdr = &s.op.recv.msghdr,
|
||||
},
|
||||
},
|
||||
|
||||
.userdata = s,
|
||||
.callback = (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
const s_inner = @as(?*State, @ptrCast(@alignCast(ud))).?;
|
||||
return @call(.always_inline, cb, .{
|
||||
common.userdataValue(Userdata, s_inner.userdata),
|
||||
l_inner,
|
||||
c_inner,
|
||||
s_inner,
|
||||
std.net.Address.initPosix(@ptrCast(&s_inner.op.recv.addr_buffer)),
|
||||
initFd(c_inner.op.recvmsg.fd),
|
||||
s_inner.op.recv.buf,
|
||||
if (r.recvmsg) |v| v else |err| err,
|
||||
});
|
||||
}
|
||||
}).callback,
|
||||
};
|
||||
|
||||
// If we're dup-ing, then we ask the backend to manage the fd.
|
||||
switch (xev.backend) {
|
||||
.io_uring,
|
||||
.kqueue,
|
||||
.wasi_poll,
|
||||
.iocp,
|
||||
=> {},
|
||||
|
||||
.epoll => c.flags.dup = true,
|
||||
}
|
||||
|
||||
loop.add(c);
|
||||
}
|
||||
|
||||
/// Write to the socket. This performs a single write. Additional writes
|
||||
/// can be queued by calling this multiple times. Note that depending on the
|
||||
/// backend, writes can happen out of order.
|
||||
pub fn write(
|
||||
self: Self,
|
||||
loop: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
s: *State,
|
||||
addr: std.net.Address,
|
||||
buf: xev.WriteBuffer,
|
||||
comptime Userdata: type,
|
||||
userdata: ?*Userdata,
|
||||
comptime cb: *const fn (
|
||||
ud: ?*Userdata,
|
||||
l: *xev.Loop,
|
||||
c: *xev.Completion,
|
||||
s: *State,
|
||||
s: Self,
|
||||
b: xev.WriteBuffer,
|
||||
r: WriteError!usize,
|
||||
) xev.CallbackAction,
|
||||
) void {
|
||||
// Set the active field for runtime safety
|
||||
s.op = .{ .send = undefined };
|
||||
s.* = .{
|
||||
.userdata = userdata,
|
||||
.op = .{
|
||||
.send = .{
|
||||
.addr = addr,
|
||||
.buf = buf,
|
||||
.msghdr = .{
|
||||
.name = &s.op.send.addr.any,
|
||||
.namelen = addr.getOsSockLen(),
|
||||
.iov = &s.op.send.iov,
|
||||
.iovlen = 1,
|
||||
.control = null,
|
||||
.controllen = 0,
|
||||
.flags = 0,
|
||||
},
|
||||
.iov = undefined,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
switch (s.op.send.buf) {
|
||||
.slice => |v| {
|
||||
s.op.send.iov[0] = .{
|
||||
.base = v.ptr,
|
||||
.len = v.len,
|
||||
};
|
||||
},
|
||||
|
||||
.array => |*arr| {
|
||||
s.op.send.iov[0] = .{
|
||||
.base = &arr.array,
|
||||
.len = arr.len,
|
||||
};
|
||||
},
|
||||
}
|
||||
|
||||
// On backends like epoll, you watch file descriptors for
|
||||
// specific events. Our implementation doesn't merge multiple
|
||||
// completions for a single fd, so we have to dup the fd. This
|
||||
// means we use more fds than we could optimally. This isn't a
|
||||
// problem with io_uring.
|
||||
|
||||
c.* = .{
|
||||
.op = .{
|
||||
.sendmsg = .{
|
||||
.fd = self.fd,
|
||||
.msghdr = &s.op.send.msghdr,
|
||||
},
|
||||
},
|
||||
|
||||
.userdata = s,
|
||||
.callback = (struct {
|
||||
fn callback(
|
||||
ud: ?*anyopaque,
|
||||
l_inner: *xev.Loop,
|
||||
c_inner: *xev.Completion,
|
||||
r: xev.Result,
|
||||
) xev.CallbackAction {
|
||||
const s_inner = @as(?*State, @ptrCast(@alignCast(ud))).?;
|
||||
return @call(.always_inline, cb, .{
|
||||
common.userdataValue(Userdata, s_inner.userdata),
|
||||
l_inner,
|
||||
c_inner,
|
||||
s_inner,
|
||||
initFd(c_inner.op.sendmsg.fd),
|
||||
s_inner.op.send.buf,
|
||||
if (r.sendmsg) |v| v else |err| err,
|
||||
});
|
||||
}
|
||||
}).callback,
|
||||
};
|
||||
|
||||
// If we're dup-ing, then we ask the backend to manage the fd.
|
||||
switch (xev.backend) {
|
||||
.io_uring,
|
||||
.kqueue,
|
||||
.wasi_poll,
|
||||
.iocp,
|
||||
=> {},
|
||||
|
||||
.epoll => c.flags.dup = true,
|
||||
}
|
||||
|
||||
loop.add(c);
|
||||
}
|
||||
|
||||
pub const ReadError = xev.ReadError;
|
||||
pub const WriteError = xev.WriteError;
|
||||
|
||||
/// Common tests
|
||||
pub usingnamespace UDPTests(xev, Self);
|
||||
};
|
||||
}
|
||||
|
||||
fn UDPTests(comptime xev: type, comptime Impl: type) type {
|
||||
return struct {
|
||||
test "UDP: read/write" {
|
||||
const testing = std.testing;
|
||||
|
||||
var loop = try xev.Loop.init(.{});
|
||||
defer loop.deinit();
|
||||
|
||||
const address = try std.net.Address.parseIp4("127.0.0.1", 3132);
|
||||
const server = try Impl.init(address);
|
||||
const client = try Impl.init(address);
|
||||
|
||||
// Bind / Recv
|
||||
try server.bind(address);
|
||||
var c_read: xev.Completion = undefined;
|
||||
var s_read: Impl.State = undefined;
|
||||
var recv_buf: [128]u8 = undefined;
|
||||
var recv_len: usize = 0;
|
||||
server.read(&loop, &c_read, &s_read, .{ .slice = &recv_buf }, usize, &recv_len, (struct {
|
||||
fn callback(
|
||||
ud: ?*usize,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: *Impl.State,
|
||||
_: std.net.Address,
|
||||
_: Impl,
|
||||
_: xev.ReadBuffer,
|
||||
r: Impl.ReadError!usize,
|
||||
) xev.CallbackAction {
|
||||
ud.?.* = r catch unreachable;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// Send
|
||||
var send_buf = [_]u8{ 1, 1, 2, 3, 5, 8, 13 };
|
||||
var c_write: xev.Completion = undefined;
|
||||
var s_write: Impl.State = undefined;
|
||||
client.write(&loop, &c_write, &s_write, address, .{ .slice = &send_buf }, void, null, (struct {
|
||||
fn callback(
|
||||
_: ?*void,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: *Impl.State,
|
||||
_: Impl,
|
||||
_: xev.WriteBuffer,
|
||||
r: Impl.WriteError!usize,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// Wait for the send/receive
|
||||
try loop.run(.until_done);
|
||||
try testing.expect(recv_len > 0);
|
||||
try testing.expectEqualSlices(u8, &send_buf, recv_buf[0..recv_len]);
|
||||
|
||||
// Close
|
||||
server.close(&loop, &c_read, void, null, (struct {
|
||||
fn callback(
|
||||
_: ?*void,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: Impl,
|
||||
r: Impl.CloseError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
client.close(&loop, &c_write, void, null, (struct {
|
||||
fn callback(
|
||||
_: ?*void,
|
||||
_: *xev.Loop,
|
||||
_: *xev.Completion,
|
||||
_: Impl,
|
||||
r: Impl.CloseError!void,
|
||||
) xev.CallbackAction {
|
||||
_ = r catch unreachable;
|
||||
return .disarm;
|
||||
}
|
||||
}).callback);
|
||||
|
||||
try loop.run(.until_done);
|
||||
}
|
||||
};
|
||||
}
|
220
deps/libxev/src/windows.zig
vendored
Normal file
220
deps/libxev/src/windows.zig
vendored
Normal file
@@ -0,0 +1,220 @@
|
||||
const std = @import("std");
|
||||
const windows = std.os.windows;
|
||||
const posix = std.posix;
|
||||
|
||||
pub usingnamespace std.os.windows;
|
||||
|
||||
/// Namespace containing missing utils from std
|
||||
pub const exp = struct {
|
||||
pub const STATUS_PENDING = 0x00000103;
|
||||
pub const STILL_ACTIVE = STATUS_PENDING;
|
||||
|
||||
pub const JOBOBJECT_ASSOCIATE_COMPLETION_PORT = extern struct {
|
||||
CompletionKey: windows.ULONG_PTR,
|
||||
CompletionPort: windows.HANDLE,
|
||||
};
|
||||
|
||||
pub const JOBOBJECT_BASIC_LIMIT_INFORMATION = extern struct {
|
||||
PerProcessUserTimeLimit: windows.LARGE_INTEGER,
|
||||
PerJobUserTimeLimit: windows.LARGE_INTEGER,
|
||||
LimitFlags: windows.DWORD,
|
||||
MinimumWorkingSetSize: windows.SIZE_T,
|
||||
MaximumWorkingSetSize: windows.SIZE_T,
|
||||
ActiveProcessLimit: windows.DWORD,
|
||||
Affinity: windows.ULONG_PTR,
|
||||
PriorityClass: windows.DWORD,
|
||||
SchedulingClass: windows.DWORD,
|
||||
};
|
||||
|
||||
pub const IO_COUNTERS = extern struct {
|
||||
ReadOperationCount: windows.ULONGLONG,
|
||||
WriteOperationCount: windows.ULONGLONG,
|
||||
OtherOperationCount: windows.ULONGLONG,
|
||||
ReadTransferCount: windows.ULONGLONG,
|
||||
WriteTransferCount: windows.ULONGLONG,
|
||||
OtherTransferCount: windows.ULONGLONG,
|
||||
};
|
||||
|
||||
pub const JOBOBJECT_EXTENDED_LIMIT_INFORMATION = extern struct {
|
||||
BasicLimitInformation: JOBOBJECT_BASIC_LIMIT_INFORMATION,
|
||||
IoInfo: IO_COUNTERS,
|
||||
ProcessMemoryLimit: windows.SIZE_T,
|
||||
JobMemoryLimit: windows.SIZE_T,
|
||||
PeakProcessMemoryUsed: windows.SIZE_T,
|
||||
PeakJobMemoryUsed: windows.SIZE_T,
|
||||
};
|
||||
|
||||
pub const JOB_OBJECT_LIMIT_ACTIVE_PROCESS = 0x00000008;
|
||||
pub const JOB_OBJECT_LIMIT_AFFINITY = 0x00000010;
|
||||
pub const JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x00000800;
|
||||
pub const JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION = 0x00000400;
|
||||
pub const JOB_OBJECT_LIMIT_JOB_MEMORY = 0x00000200;
|
||||
pub const JOB_OBJECT_LIMIT_JOB_TIME = 0x00000004;
|
||||
pub const JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x00002000;
|
||||
pub const JOB_OBJECT_LIMIT_PRESERVE_JOB_TIME = 0x00000004;
|
||||
pub const JOB_OBJECT_LIMIT_PRIORITY_CLASS = 0x00000020;
|
||||
pub const JOB_OBJECT_LIMIT_PROCESS_MEMORY = 0x00000100;
|
||||
pub const JOB_OBJECT_LIMIT_PROCESS_TIME = 0x00000002;
|
||||
pub const JOB_OBJECT_LIMIT_SCHEDULING_CLASS = 0x00000080;
|
||||
pub const JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK = 0x00001000;
|
||||
pub const JOB_OBJECT_LIMIT_SUBSET_AFFINITY = 0x00004000;
|
||||
pub const JOB_OBJECT_LIMIT_WORKINGSET = 0x00000001;
|
||||
|
||||
pub const JOBOBJECT_INFORMATION_CLASS = enum(c_int) {
|
||||
JobObjectAssociateCompletionPortInformation = 7,
|
||||
JobObjectBasicLimitInformation = 2,
|
||||
JobObjectBasicUIRestrictions = 4,
|
||||
JobObjectCpuRateControlInformation = 15,
|
||||
JobObjectEndOfJobTimeInformation = 6,
|
||||
JobObjectExtendedLimitInformation = 9,
|
||||
JobObjectGroupInformation = 11,
|
||||
JobObjectGroupInformationEx = 14,
|
||||
JobObjectLimitViolationInformation2 = 34,
|
||||
JobObjectNetRateControlInformation = 32,
|
||||
JobObjectNotificationLimitInformation = 12,
|
||||
JobObjectNotificationLimitInformation2 = 33,
|
||||
JobObjectSecurityLimitInformation = 5,
|
||||
};
|
||||
|
||||
pub const JOB_OBJECT_MSG_TYPE = enum(windows.DWORD) {
|
||||
JOB_OBJECT_MSG_END_OF_JOB_TIME = 1,
|
||||
JOB_OBJECT_MSG_END_OF_PROCESS_TIME = 2,
|
||||
JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT = 3,
|
||||
JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO = 4,
|
||||
JOB_OBJECT_MSG_NEW_PROCESS = 6,
|
||||
JOB_OBJECT_MSG_EXIT_PROCESS = 7,
|
||||
JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS = 8,
|
||||
JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT = 9,
|
||||
JOB_OBJECT_MSG_JOB_MEMORY_LIMIT = 10,
|
||||
JOB_OBJECT_MSG_NOTIFICATION_LIMIT = 11,
|
||||
JOB_OBJECT_MSG_JOB_CYCLE_TIME_LIMIT = 12,
|
||||
JOB_OBJECT_MSG_SILO_TERMINATED = 13,
|
||||
_,
|
||||
};
|
||||
|
||||
pub const kernel32 = struct {
|
||||
pub extern "kernel32" fn GetProcessId(Process: windows.HANDLE) callconv(windows.WINAPI) windows.DWORD;
|
||||
pub extern "kernel32" fn CreateJobObjectA(lpSecurityAttributes: ?*windows.SECURITY_ATTRIBUTES, lpName: ?windows.LPCSTR) callconv(windows.WINAPI) windows.HANDLE;
|
||||
pub extern "kernel32" fn AssignProcessToJobObject(hJob: windows.HANDLE, hProcess: windows.HANDLE) callconv(windows.WINAPI) windows.BOOL;
|
||||
pub extern "kernel32" fn SetInformationJobObject(
|
||||
hJob: windows.HANDLE,
|
||||
JobObjectInformationClass: JOBOBJECT_INFORMATION_CLASS,
|
||||
lpJobObjectInformation: windows.LPVOID,
|
||||
cbJobObjectInformationLength: windows.DWORD,
|
||||
) callconv(windows.WINAPI) windows.BOOL;
|
||||
};
|
||||
|
||||
pub const CreateFileError = error{} || posix.UnexpectedError;
|
||||
|
||||
pub fn CreateFile(
|
||||
lpFileName: [*:0]const u16,
|
||||
dwDesiredAccess: windows.DWORD,
|
||||
dwShareMode: windows.DWORD,
|
||||
lpSecurityAttributes: ?*windows.SECURITY_ATTRIBUTES,
|
||||
dwCreationDisposition: windows.DWORD,
|
||||
dwFlagsAndAttributes: windows.DWORD,
|
||||
hTemplateFile: ?windows.HANDLE,
|
||||
) CreateFileError!windows.HANDLE {
|
||||
const handle = windows.kernel32.CreateFileW(lpFileName, dwDesiredAccess, dwShareMode, lpSecurityAttributes, dwCreationDisposition, dwFlagsAndAttributes, hTemplateFile);
|
||||
if (handle == windows.INVALID_HANDLE_VALUE) {
|
||||
const err = windows.kernel32.GetLastError();
|
||||
return switch (err) {
|
||||
else => windows.unexpectedError(err),
|
||||
};
|
||||
}
|
||||
|
||||
return handle;
|
||||
}
|
||||
|
||||
pub fn ReadFile(
|
||||
handle: windows.HANDLE,
|
||||
buffer: []u8,
|
||||
overlapped: ?*windows.OVERLAPPED,
|
||||
) windows.ReadFileError!?usize {
|
||||
var read: windows.DWORD = 0;
|
||||
const result: windows.BOOL = windows.kernel32.ReadFile(handle, buffer.ptr, @as(windows.DWORD, @intCast(buffer.len)), &read, overlapped);
|
||||
if (result == windows.FALSE) {
|
||||
const err = windows.kernel32.GetLastError();
|
||||
return switch (err) {
|
||||
windows.Win32Error.IO_PENDING => null,
|
||||
else => windows.unexpectedError(err),
|
||||
};
|
||||
}
|
||||
|
||||
return @as(usize, @intCast(read));
|
||||
}
|
||||
|
||||
pub fn WriteFile(
|
||||
handle: windows.HANDLE,
|
||||
buffer: []const u8,
|
||||
overlapped: ?*windows.OVERLAPPED,
|
||||
) windows.WriteFileError!?usize {
|
||||
var written: windows.DWORD = 0;
|
||||
const result: windows.BOOL = windows.kernel32.WriteFile(handle, buffer.ptr, @as(windows.DWORD, @intCast(buffer.len)), &written, overlapped);
|
||||
if (result == windows.FALSE) {
|
||||
const err = windows.kernel32.GetLastError();
|
||||
return switch (err) {
|
||||
windows.Win32Error.IO_PENDING => null,
|
||||
else => windows.unexpectedError(err),
|
||||
};
|
||||
}
|
||||
|
||||
return @as(usize, @intCast(written));
|
||||
}
|
||||
|
||||
pub const DeleteFileError = error{} || posix.UnexpectedError;
|
||||
|
||||
pub fn DeleteFile(name: [*:0]const u16) DeleteFileError!void {
|
||||
const result: windows.BOOL = windows.kernel32.DeleteFileW(name);
|
||||
if (result == windows.FALSE) {
|
||||
const err = windows.kernel32.GetLastError();
|
||||
return switch (err) {
|
||||
else => windows.unexpectedError(err),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pub const CreateJobObjectError = error{AlreadyExists} || posix.UnexpectedError;
|
||||
pub fn CreateJobObject(
|
||||
lpSecurityAttributes: ?*windows.SECURITY_ATTRIBUTES,
|
||||
lpName: ?windows.LPCSTR,
|
||||
) !windows.HANDLE {
|
||||
const handle = kernel32.CreateJobObjectA(lpSecurityAttributes, lpName);
|
||||
return switch (windows.kernel32.GetLastError()) {
|
||||
.SUCCESS => handle,
|
||||
.ALREADY_EXISTS => CreateJobObjectError.AlreadyExists,
|
||||
else => |err| windows.unexpectedError(err),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn AssignProcessToJobObject(hJob: windows.HANDLE, hProcess: windows.HANDLE) posix.UnexpectedError!void {
|
||||
const result: windows.BOOL = kernel32.AssignProcessToJobObject(hJob, hProcess);
|
||||
if (result == windows.FALSE) {
|
||||
const err = windows.kernel32.GetLastError();
|
||||
return switch (err) {
|
||||
else => windows.unexpectedError(err),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pub fn SetInformationJobObject(
|
||||
hJob: windows.HANDLE,
|
||||
JobObjectInformationClass: JOBOBJECT_INFORMATION_CLASS,
|
||||
lpJobObjectInformation: windows.LPVOID,
|
||||
cbJobObjectInformationLength: windows.DWORD,
|
||||
) posix.UnexpectedError!void {
|
||||
const result: windows.BOOL = kernel32.SetInformationJobObject(
|
||||
hJob,
|
||||
JobObjectInformationClass,
|
||||
lpJobObjectInformation,
|
||||
cbJobObjectInformationLength,
|
||||
);
|
||||
|
||||
if (result == windows.FALSE) {
|
||||
const err = windows.kernel32.GetLastError();
|
||||
return switch (err) {
|
||||
else => windows.unexpectedError(err),
|
||||
};
|
||||
}
|
||||
}
|
||||
};
|
Reference in New Issue
Block a user