Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/crates/wasi-common/src/snapshots/preview_1.rs
1692 views
1
use crate::{
2
I32Exit, SystemTimeSpec, WasiCtx,
3
dir::{DirEntry, OpenResult, ReaddirCursor, ReaddirEntity, TableDirExt},
4
file::{
5
Advice, FdFlags, FdStat, FileAccessMode, FileEntry, FileType, Filestat, OFlags, RiFlags,
6
RoFlags, SdFlags, SiFlags, TableFileExt, WasiFile,
7
},
8
sched::{
9
Poll, Userdata,
10
subscription::{RwEventFlags, SubscriptionResult},
11
},
12
};
13
use cap_std::time::{Duration, SystemClock};
14
use std::borrow::Cow;
15
use std::io::{IoSlice, IoSliceMut};
16
use std::ops::Deref;
17
use std::sync::Arc;
18
use wiggle::GuestMemory;
19
use wiggle::GuestPtr;
20
21
pub mod error;
22
use error::{Error, ErrorExt};
23
24
// Limit the size of intermediate buffers when copying to WebAssembly shared
25
// memory.
26
pub(crate) const MAX_SHARED_BUFFER_SIZE: usize = 1 << 16;
27
28
wiggle::from_witx!({
29
witx: ["witx/preview1/wasi_snapshot_preview1.witx"],
30
errors: { errno => trappable Error },
31
// Note: not every function actually needs to be async, however, nearly all of them do, and
32
// keeping that set the same in this macro and the wasmtime_wiggle / lucet_wiggle macros is
33
// tedious, and there is no cost to having a sync function be async in this case.
34
async: *,
35
wasmtime: false,
36
});
37
38
impl wiggle::GuestErrorType for types::Errno {
39
fn success() -> Self {
40
Self::Success
41
}
42
}
43
44
#[wiggle::async_trait]
45
impl wasi_snapshot_preview1::WasiSnapshotPreview1 for WasiCtx {
46
async fn args_get(
47
&mut self,
48
memory: &mut GuestMemory<'_>,
49
argv: GuestPtr<GuestPtr<u8>>,
50
argv_buf: GuestPtr<u8>,
51
) -> Result<(), Error> {
52
self.args.write_to_guest(memory, argv_buf, argv)
53
}
54
55
async fn args_sizes_get(
56
&mut self,
57
_memory: &mut GuestMemory<'_>,
58
) -> Result<(types::Size, types::Size), Error> {
59
Ok((self.args.number_elements(), self.args.cumulative_size()))
60
}
61
62
async fn environ_get(
63
&mut self,
64
memory: &mut GuestMemory<'_>,
65
environ: GuestPtr<GuestPtr<u8>>,
66
environ_buf: GuestPtr<u8>,
67
) -> Result<(), Error> {
68
self.env.write_to_guest(memory, environ_buf, environ)
69
}
70
71
async fn environ_sizes_get(
72
&mut self,
73
_memory: &mut GuestMemory<'_>,
74
) -> Result<(types::Size, types::Size), Error> {
75
Ok((self.env.number_elements(), self.env.cumulative_size()))
76
}
77
78
async fn clock_res_get(
79
&mut self,
80
_memory: &mut GuestMemory<'_>,
81
id: types::Clockid,
82
) -> Result<types::Timestamp, Error> {
83
let resolution = match id {
84
types::Clockid::Realtime => Ok(self.clocks.system()?.resolution()),
85
types::Clockid::Monotonic => Ok(self.clocks.monotonic()?.abs_clock.resolution()),
86
types::Clockid::ProcessCputimeId | types::Clockid::ThreadCputimeId => {
87
Err(Error::badf().context("process and thread clocks are not supported"))
88
}
89
}?;
90
Ok(resolution.as_nanos().try_into()?)
91
}
92
93
async fn clock_time_get(
94
&mut self,
95
_memory: &mut GuestMemory<'_>,
96
id: types::Clockid,
97
precision: types::Timestamp,
98
) -> Result<types::Timestamp, Error> {
99
let precision = Duration::from_nanos(precision);
100
match id {
101
types::Clockid::Realtime => {
102
let now = self.clocks.system()?.now(precision).into_std();
103
let d = now
104
.duration_since(std::time::SystemTime::UNIX_EPOCH)
105
.map_err(|_| {
106
Error::trap(anyhow::Error::msg("current time before unix epoch"))
107
})?;
108
Ok(d.as_nanos().try_into()?)
109
}
110
types::Clockid::Monotonic => {
111
let clock = self.clocks.monotonic()?;
112
let now = clock.abs_clock.now(precision);
113
let d = now.duration_since(clock.creation_time);
114
Ok(d.as_nanos().try_into()?)
115
}
116
types::Clockid::ProcessCputimeId | types::Clockid::ThreadCputimeId => {
117
Err(Error::badf().context("process and thread clocks are not supported"))
118
}
119
}
120
}
121
122
async fn fd_advise(
123
&mut self,
124
_memory: &mut GuestMemory<'_>,
125
fd: types::Fd,
126
offset: types::Filesize,
127
len: types::Filesize,
128
advice: types::Advice,
129
) -> Result<(), Error> {
130
self.table()
131
.get_file(u32::from(fd))?
132
.file
133
.advise(offset, len, advice.into())
134
.await?;
135
Ok(())
136
}
137
138
async fn fd_allocate(
139
&mut self,
140
_memory: &mut GuestMemory<'_>,
141
fd: types::Fd,
142
_offset: types::Filesize,
143
_len: types::Filesize,
144
) -> Result<(), Error> {
145
// Check if fd is a file, and has rights, just to reject those cases
146
// with the errors expected:
147
let _ = self.table().get_file(u32::from(fd))?;
148
// This operation from cloudabi is linux-specific, isn't even
149
// supported across all linux filesystems, and has no support on macos
150
// or windows. Rather than ship spotty support, it has been removed
151
// from preview 2, and we are no longer supporting it in preview 1 as
152
// well.
153
Err(Error::not_supported())
154
}
155
156
async fn fd_close(
157
&mut self,
158
_memory: &mut GuestMemory<'_>,
159
fd: types::Fd,
160
) -> Result<(), Error> {
161
let table = self.table();
162
let fd = u32::from(fd);
163
164
// Fail fast: If not present in table, Badf
165
if !table.contains_key(fd) {
166
return Err(Error::badf().context("key not in table"));
167
}
168
// fd_close must close either a File or a Dir handle
169
if table.is::<FileEntry>(fd) {
170
let _ = table.delete::<FileEntry>(fd);
171
} else if table.is::<DirEntry>(fd) {
172
let _ = table.delete::<DirEntry>(fd);
173
} else {
174
return Err(Error::badf().context("key does not refer to file or directory"));
175
}
176
177
Ok(())
178
}
179
180
async fn fd_datasync(
181
&mut self,
182
_memory: &mut GuestMemory<'_>,
183
fd: types::Fd,
184
) -> Result<(), Error> {
185
self.table()
186
.get_file(u32::from(fd))?
187
.file
188
.datasync()
189
.await?;
190
Ok(())
191
}
192
193
async fn fd_fdstat_get(
194
&mut self,
195
_memory: &mut GuestMemory<'_>,
196
fd: types::Fd,
197
) -> Result<types::Fdstat, Error> {
198
let table = self.table();
199
let fd = u32::from(fd);
200
if table.is::<FileEntry>(fd) {
201
let file_entry: Arc<FileEntry> = table.get(fd)?;
202
let fdstat = file_entry.get_fdstat().await?;
203
Ok(types::Fdstat::from(&fdstat))
204
} else if table.is::<DirEntry>(fd) {
205
let _dir_entry: Arc<DirEntry> = table.get(fd)?;
206
let dir_fdstat = types::Fdstat {
207
fs_filetype: types::Filetype::Directory,
208
fs_rights_base: directory_base_rights(),
209
fs_rights_inheriting: directory_inheriting_rights(),
210
fs_flags: types::Fdflags::empty(),
211
};
212
Ok(dir_fdstat)
213
} else {
214
Err(Error::badf())
215
}
216
}
217
218
async fn fd_fdstat_set_flags(
219
&mut self,
220
_memory: &mut GuestMemory<'_>,
221
fd: types::Fd,
222
flags: types::Fdflags,
223
) -> Result<(), Error> {
224
if let Some(table) = self.table_mut() {
225
table
226
.get_file_mut(u32::from(fd))?
227
.file
228
.set_fdflags(FdFlags::from(flags))
229
.await
230
} else {
231
log::warn!(
232
"`fd_fdstat_set_flags` does not work with wasi-threads enabled; see https://github.com/bytecodealliance/wasmtime/issues/5643"
233
);
234
Err(Error::not_supported())
235
}
236
}
237
238
async fn fd_fdstat_set_rights(
239
&mut self,
240
_memory: &mut GuestMemory<'_>,
241
fd: types::Fd,
242
_fs_rights_base: types::Rights,
243
_fs_rights_inheriting: types::Rights,
244
) -> Result<(), Error> {
245
let table = self.table();
246
let fd = u32::from(fd);
247
if table.is::<FileEntry>(fd) {
248
let _file_entry: Arc<FileEntry> = table.get(fd)?;
249
Err(Error::not_supported())
250
} else if table.is::<DirEntry>(fd) {
251
let _dir_entry: Arc<DirEntry> = table.get(fd)?;
252
Err(Error::not_supported())
253
} else {
254
Err(Error::badf())
255
}
256
}
257
258
async fn fd_filestat_get(
259
&mut self,
260
_memory: &mut GuestMemory<'_>,
261
fd: types::Fd,
262
) -> Result<types::Filestat, Error> {
263
let table = self.table();
264
let fd = u32::from(fd);
265
if table.is::<FileEntry>(fd) {
266
let filestat = table.get_file(fd)?.file.get_filestat().await?;
267
Ok(filestat.into())
268
} else if table.is::<DirEntry>(fd) {
269
let filestat = table.get_dir(fd)?.dir.get_filestat().await?;
270
Ok(filestat.into())
271
} else {
272
Err(Error::badf())
273
}
274
}
275
276
async fn fd_filestat_set_size(
277
&mut self,
278
_memory: &mut GuestMemory<'_>,
279
fd: types::Fd,
280
size: types::Filesize,
281
) -> Result<(), Error> {
282
self.table()
283
.get_file(u32::from(fd))?
284
.file
285
.set_filestat_size(size)
286
.await?;
287
Ok(())
288
}
289
290
async fn fd_filestat_set_times(
291
&mut self,
292
_memory: &mut GuestMemory<'_>,
293
fd: types::Fd,
294
atim: types::Timestamp,
295
mtim: types::Timestamp,
296
fst_flags: types::Fstflags,
297
) -> Result<(), Error> {
298
let fd = u32::from(fd);
299
let table = self.table();
300
// Validate flags
301
let set_atim = fst_flags.contains(types::Fstflags::ATIM);
302
let set_atim_now = fst_flags.contains(types::Fstflags::ATIM_NOW);
303
let set_mtim = fst_flags.contains(types::Fstflags::MTIM);
304
let set_mtim_now = fst_flags.contains(types::Fstflags::MTIM_NOW);
305
306
let atim = systimespec(set_atim, atim, set_atim_now).map_err(|e| e.context("atim"))?;
307
let mtim = systimespec(set_mtim, mtim, set_mtim_now).map_err(|e| e.context("mtim"))?;
308
309
if table.is::<FileEntry>(fd) {
310
table
311
.get_file(fd)
312
.expect("checked that entry is file")
313
.file
314
.set_times(atim, mtim)
315
.await
316
} else if table.is::<DirEntry>(fd) {
317
table
318
.get_dir(fd)
319
.expect("checked that entry is dir")
320
.dir
321
.set_times(".", atim, mtim, false)
322
.await
323
} else {
324
Err(Error::badf())
325
}
326
}
327
328
async fn fd_read(
329
&mut self,
330
memory: &mut GuestMemory<'_>,
331
fd: types::Fd,
332
iovs: types::IovecArray,
333
) -> Result<types::Size, Error> {
334
let f = self.table().get_file(u32::from(fd))?;
335
// Access mode check normalizes error returned (windows would prefer ACCES here)
336
if !f.access_mode.contains(FileAccessMode::READ) {
337
Err(types::Errno::Badf)?
338
}
339
let f = &f.file;
340
341
let iovs: Vec<wiggle::GuestPtr<[u8]>> = iovs
342
.iter()
343
.map(|iov_ptr| {
344
let iov_ptr = iov_ptr?;
345
let iov: types::Iovec = memory.read(iov_ptr)?;
346
Ok(iov.buf.as_array(iov.buf_len))
347
})
348
.collect::<Result<_, Error>>()?;
349
350
// If the first iov structure is from shared memory we can safely assume
351
// all the rest will be. We then read into memory based on the memory's
352
// shared-ness:
353
// - if not shared, we copy directly into the Wasm memory
354
// - if shared, we use an intermediate buffer; this avoids Rust unsafety
355
// due to holding on to a `&mut [u8]` of Wasm memory when we cannot
356
// guarantee the `&mut` exclusivity--other threads could be modifying
357
// the data as this functions writes to it. Though likely there is no
358
// issue with OS writing to io structs in multi-threaded scenarios,
359
// since we do not know here if `&dyn WasiFile` does anything else
360
// (e.g., read), we cautiously incur some performance overhead by
361
// copying twice.
362
let is_shared_memory = memory.is_shared_memory();
363
let bytes_read: u64 = if is_shared_memory {
364
// For shared memory, read into an intermediate buffer. Only the
365
// first iov will be filled and even then the read is capped by the
366
// `MAX_SHARED_BUFFER_SIZE`, so users are expected to re-call.
367
let iov = iovs.into_iter().next();
368
if let Some(iov) = iov {
369
let mut buffer = vec![0; (iov.len() as usize).min(MAX_SHARED_BUFFER_SIZE)];
370
let bytes_read = f.read_vectored(&mut [IoSliceMut::new(&mut buffer)]).await?;
371
let iov = iov
372
.get_range(0..bytes_read.try_into()?)
373
.expect("it should always be possible to slice the iov smaller");
374
memory.copy_from_slice(&buffer[0..bytes_read.try_into()?], iov)?;
375
bytes_read
376
} else {
377
return Ok(0);
378
}
379
} else {
380
// Convert the first unsafe guest slice into a safe one--Wiggle
381
// can only track mutable borrows for an entire region, and converting
382
// all guest pointers to slices would cause a runtime borrow-checking
383
// error. As read is allowed to return less than the requested amount,
384
// it's valid (though not as efficient) for us to only perform the
385
// read of the first buffer.
386
let guest_slice: &mut [u8] = match iovs.into_iter().filter(|iov| iov.len() > 0).next() {
387
Some(iov) => memory.as_slice_mut(iov)?.unwrap(),
388
None => return Ok(0),
389
};
390
391
// Read directly into the Wasm memory.
392
f.read_vectored(&mut [IoSliceMut::new(guest_slice)]).await?
393
};
394
395
Ok(types::Size::try_from(bytes_read)?)
396
}
397
398
async fn fd_pread(
399
&mut self,
400
memory: &mut GuestMemory<'_>,
401
fd: types::Fd,
402
iovs: types::IovecArray,
403
offset: types::Filesize,
404
) -> Result<types::Size, Error> {
405
let f = self.table().get_file(u32::from(fd))?;
406
// Access mode check normalizes error returned (windows would prefer ACCES here)
407
if !f.access_mode.contains(FileAccessMode::READ) {
408
Err(types::Errno::Badf)?
409
}
410
let f = &f.file;
411
412
let iovs: Vec<wiggle::GuestPtr<[u8]>> = iovs
413
.iter()
414
.map(|iov_ptr| {
415
let iov_ptr = iov_ptr?;
416
let iov: types::Iovec = memory.read(iov_ptr)?;
417
Ok(iov.buf.as_array(iov.buf_len))
418
})
419
.collect::<Result<_, Error>>()?;
420
421
// If the first iov structure is from shared memory we can safely assume
422
// all the rest will be. We then read into memory based on the memory's
423
// shared-ness:
424
// - if not shared, we copy directly into the Wasm memory
425
// - if shared, we use an intermediate buffer; this avoids Rust unsafety
426
// due to holding on to a `&mut [u8]` of Wasm memory when we cannot
427
// guarantee the `&mut` exclusivity--other threads could be modifying
428
// the data as this functions writes to it. Though likely there is no
429
// issue with OS writing to io structs in multi-threaded scenarios,
430
// since we do not know here if `&dyn WasiFile` does anything else
431
// (e.g., read), we cautiously incur some performance overhead by
432
// copying twice.
433
let is_shared_memory = memory.is_shared_memory();
434
let bytes_read: u64 = if is_shared_memory {
435
// For shared memory, read into an intermediate buffer. Only the
436
// first iov will be filled and even then the read is capped by the
437
// `MAX_SHARED_BUFFER_SIZE`, so users are expected to re-call.
438
let iov = iovs.into_iter().next();
439
if let Some(iov) = iov {
440
let mut buffer = vec![0; (iov.len() as usize).min(MAX_SHARED_BUFFER_SIZE)];
441
let bytes_read = f
442
.read_vectored_at(&mut [IoSliceMut::new(&mut buffer)], offset)
443
.await?;
444
let iov = iov
445
.get_range(0..bytes_read.try_into()?)
446
.expect("it should always be possible to slice the iov smaller");
447
memory.copy_from_slice(&buffer[0..bytes_read.try_into()?], iov)?;
448
bytes_read
449
} else {
450
return Ok(0);
451
}
452
} else {
453
// Convert unsafe guest slices to safe ones.
454
let guest_slice: &mut [u8] = match iovs.into_iter().filter(|iov| iov.len() > 0).next() {
455
Some(iov) => memory.as_slice_mut(iov)?.unwrap(),
456
None => return Ok(0),
457
};
458
459
// Read directly into the Wasm memory.
460
f.read_vectored_at(&mut [IoSliceMut::new(guest_slice)], offset)
461
.await?
462
};
463
464
Ok(types::Size::try_from(bytes_read)?)
465
}
466
467
async fn fd_write(
468
&mut self,
469
memory: &mut GuestMemory<'_>,
470
fd: types::Fd,
471
ciovs: types::CiovecArray,
472
) -> Result<types::Size, Error> {
473
let f = self.table().get_file(u32::from(fd))?;
474
// Access mode check normalizes error returned (windows would prefer ACCES here)
475
if !f.access_mode.contains(FileAccessMode::WRITE) {
476
Err(types::Errno::Badf)?
477
}
478
let f = &f.file;
479
480
let guest_slices: Vec<Cow<[u8]>> = ciovs
481
.iter()
482
.map(|iov_ptr| {
483
let iov_ptr = iov_ptr?;
484
let iov: types::Ciovec = memory.read(iov_ptr)?;
485
Ok(memory.as_cow(iov.buf.as_array(iov.buf_len))?)
486
})
487
.collect::<Result<_, Error>>()?;
488
489
let ioslices: Vec<IoSlice> = guest_slices
490
.iter()
491
.map(|s| IoSlice::new(s.deref()))
492
.collect();
493
let bytes_written = f.write_vectored(&ioslices).await?;
494
495
Ok(types::Size::try_from(bytes_written)?)
496
}
497
498
async fn fd_pwrite(
499
&mut self,
500
memory: &mut GuestMemory<'_>,
501
fd: types::Fd,
502
ciovs: types::CiovecArray,
503
offset: types::Filesize,
504
) -> Result<types::Size, Error> {
505
let f = self.table().get_file(u32::from(fd))?;
506
// Access mode check normalizes error returned (windows would prefer ACCES here)
507
if !f.access_mode.contains(FileAccessMode::WRITE) {
508
Err(types::Errno::Badf)?
509
}
510
let f = &f.file;
511
512
let guest_slices: Vec<Cow<[u8]>> = ciovs
513
.iter()
514
.map(|iov_ptr| {
515
let iov_ptr = iov_ptr?;
516
let iov: types::Ciovec = memory.read(iov_ptr)?;
517
Ok(memory.as_cow(iov.buf.as_array(iov.buf_len))?)
518
})
519
.collect::<Result<_, Error>>()?;
520
521
let ioslices: Vec<IoSlice> = guest_slices
522
.iter()
523
.map(|s| IoSlice::new(s.deref()))
524
.collect();
525
let bytes_written = f.write_vectored_at(&ioslices, offset).await?;
526
527
Ok(types::Size::try_from(bytes_written)?)
528
}
529
530
async fn fd_prestat_get(
531
&mut self,
532
_memory: &mut GuestMemory<'_>,
533
fd: types::Fd,
534
) -> Result<types::Prestat, Error> {
535
let table = self.table();
536
let dir_entry: Arc<DirEntry> = table.get(u32::from(fd)).map_err(|_| Error::badf())?;
537
if let Some(preopen) = dir_entry.preopen_path() {
538
let path_str = preopen.to_str().ok_or_else(|| Error::not_supported())?;
539
let pr_name_len = u32::try_from(path_str.as_bytes().len())?;
540
Ok(types::Prestat::Dir(types::PrestatDir { pr_name_len }))
541
} else {
542
Err(Error::not_supported().context("file is not a preopen"))
543
}
544
}
545
546
async fn fd_prestat_dir_name(
547
&mut self,
548
memory: &mut GuestMemory<'_>,
549
fd: types::Fd,
550
path: GuestPtr<u8>,
551
path_max_len: types::Size,
552
) -> Result<(), Error> {
553
let table = self.table();
554
let dir_entry: Arc<DirEntry> = table.get(u32::from(fd)).map_err(|_| Error::not_dir())?;
555
if let Some(preopen) = dir_entry.preopen_path() {
556
let path_bytes = preopen
557
.to_str()
558
.ok_or_else(|| Error::not_supported())?
559
.as_bytes();
560
let path_len = path_bytes.len();
561
if path_len > path_max_len as usize {
562
return Err(Error::name_too_long());
563
}
564
let path = path.as_array(path_len as u32);
565
memory.copy_from_slice(path_bytes, path)?;
566
Ok(())
567
} else {
568
Err(Error::not_supported())
569
}
570
}
571
async fn fd_renumber(
572
&mut self,
573
_memory: &mut GuestMemory<'_>,
574
from: types::Fd,
575
to: types::Fd,
576
) -> Result<(), Error> {
577
let table = self.table();
578
let from = u32::from(from);
579
let to = u32::from(to);
580
if !table.contains_key(from) {
581
return Err(Error::badf());
582
}
583
if !table.contains_key(to) {
584
return Err(Error::badf());
585
}
586
table.renumber(from, to)
587
}
588
589
async fn fd_seek(
590
&mut self,
591
_memory: &mut GuestMemory<'_>,
592
fd: types::Fd,
593
offset: types::Filedelta,
594
whence: types::Whence,
595
) -> Result<types::Filesize, Error> {
596
use std::io::SeekFrom;
597
let whence = match whence {
598
types::Whence::Cur => SeekFrom::Current(offset),
599
types::Whence::End => SeekFrom::End(offset),
600
types::Whence::Set => {
601
SeekFrom::Start(offset.try_into().map_err(|_| Error::invalid_argument())?)
602
}
603
};
604
let newoffset = self
605
.table()
606
.get_file(u32::from(fd))?
607
.file
608
.seek(whence)
609
.await?;
610
Ok(newoffset)
611
}
612
613
async fn fd_sync(&mut self, _memory: &mut GuestMemory<'_>, fd: types::Fd) -> Result<(), Error> {
614
self.table().get_file(u32::from(fd))?.file.sync().await?;
615
Ok(())
616
}
617
618
async fn fd_tell(
619
&mut self,
620
_memory: &mut GuestMemory<'_>,
621
fd: types::Fd,
622
) -> Result<types::Filesize, Error> {
623
let offset = self
624
.table()
625
.get_file(u32::from(fd))?
626
.file
627
.seek(std::io::SeekFrom::Current(0))
628
.await?;
629
Ok(offset)
630
}
631
632
async fn fd_readdir(
633
&mut self,
634
memory: &mut GuestMemory<'_>,
635
fd: types::Fd,
636
mut buf: GuestPtr<u8>,
637
buf_len: types::Size,
638
cookie: types::Dircookie,
639
) -> Result<types::Size, Error> {
640
let mut bufused = 0;
641
for entity in self
642
.table()
643
.get_dir(u32::from(fd))?
644
.dir
645
.readdir(ReaddirCursor::from(cookie))
646
.await?
647
{
648
let entity = entity?;
649
let dirent_raw = dirent_bytes(types::Dirent::try_from(&entity)?);
650
let dirent_len: types::Size = dirent_raw.len().try_into()?;
651
let name_raw = entity.name.as_bytes();
652
let name_len: types::Size = name_raw.len().try_into()?;
653
654
// Copy as many bytes of the dirent as we can, up to the end of the buffer
655
let dirent_copy_len = std::cmp::min(dirent_len, buf_len - bufused);
656
let raw = buf.as_array(dirent_copy_len);
657
memory.copy_from_slice(&dirent_raw[..dirent_copy_len as usize], raw)?;
658
659
// If the dirent struct wasn't compiled entirely, return that we filled the buffer, which
660
// tells libc that we're not at EOF.
661
if dirent_copy_len < dirent_len {
662
return Ok(buf_len);
663
}
664
665
buf = buf.add(dirent_copy_len)?;
666
bufused += dirent_copy_len;
667
668
// Copy as many bytes of the name as we can, up to the end of the buffer
669
let name_copy_len = std::cmp::min(name_len, buf_len - bufused);
670
let raw = buf.as_array(name_copy_len);
671
memory.copy_from_slice(&name_raw[..name_copy_len as usize], raw)?;
672
673
// If the dirent struct wasn't copied entirely, return that we filled the buffer, which
674
// tells libc that we're not at EOF
675
676
if name_copy_len < name_len {
677
return Ok(buf_len);
678
}
679
680
buf = buf.add(name_copy_len)?;
681
bufused += name_copy_len;
682
}
683
Ok(bufused)
684
}
685
686
async fn path_create_directory(
687
&mut self,
688
memory: &mut GuestMemory<'_>,
689
dirfd: types::Fd,
690
path: GuestPtr<str>,
691
) -> Result<(), Error> {
692
self.table()
693
.get_dir(u32::from(dirfd))?
694
.dir
695
.create_dir(memory.as_cow_str(path)?.deref())
696
.await
697
}
698
699
async fn path_filestat_get(
700
&mut self,
701
memory: &mut GuestMemory<'_>,
702
dirfd: types::Fd,
703
flags: types::Lookupflags,
704
path: GuestPtr<str>,
705
) -> Result<types::Filestat, Error> {
706
let filestat = self
707
.table()
708
.get_dir(u32::from(dirfd))?
709
.dir
710
.get_path_filestat(
711
memory.as_cow_str(path)?.deref(),
712
flags.contains(types::Lookupflags::SYMLINK_FOLLOW),
713
)
714
.await?;
715
Ok(types::Filestat::from(filestat))
716
}
717
718
async fn path_filestat_set_times(
719
&mut self,
720
memory: &mut GuestMemory<'_>,
721
dirfd: types::Fd,
722
flags: types::Lookupflags,
723
path: GuestPtr<str>,
724
atim: types::Timestamp,
725
mtim: types::Timestamp,
726
fst_flags: types::Fstflags,
727
) -> Result<(), Error> {
728
let set_atim = fst_flags.contains(types::Fstflags::ATIM);
729
let set_atim_now = fst_flags.contains(types::Fstflags::ATIM_NOW);
730
let set_mtim = fst_flags.contains(types::Fstflags::MTIM);
731
let set_mtim_now = fst_flags.contains(types::Fstflags::MTIM_NOW);
732
733
let atim = systimespec(set_atim, atim, set_atim_now).map_err(|e| e.context("atim"))?;
734
let mtim = systimespec(set_mtim, mtim, set_mtim_now).map_err(|e| e.context("mtim"))?;
735
self.table()
736
.get_dir(u32::from(dirfd))?
737
.dir
738
.set_times(
739
memory.as_cow_str(path)?.deref(),
740
atim,
741
mtim,
742
flags.contains(types::Lookupflags::SYMLINK_FOLLOW),
743
)
744
.await
745
}
746
747
async fn path_link(
748
&mut self,
749
memory: &mut GuestMemory<'_>,
750
src_fd: types::Fd,
751
src_flags: types::Lookupflags,
752
src_path: GuestPtr<str>,
753
target_fd: types::Fd,
754
target_path: GuestPtr<str>,
755
) -> Result<(), Error> {
756
let table = self.table();
757
let src_dir = table.get_dir(u32::from(src_fd))?;
758
let target_dir = table.get_dir(u32::from(target_fd))?;
759
let symlink_follow = src_flags.contains(types::Lookupflags::SYMLINK_FOLLOW);
760
if symlink_follow {
761
return Err(Error::invalid_argument()
762
.context("symlink following on path_link is not supported"));
763
}
764
765
src_dir
766
.dir
767
.hard_link(
768
memory.as_cow_str(src_path)?.deref(),
769
target_dir.dir.deref(),
770
memory.as_cow_str(target_path)?.deref(),
771
)
772
.await
773
}
774
775
async fn path_open(
776
&mut self,
777
memory: &mut GuestMemory<'_>,
778
dirfd: types::Fd,
779
dirflags: types::Lookupflags,
780
path: GuestPtr<str>,
781
oflags: types::Oflags,
782
fs_rights_base: types::Rights,
783
_fs_rights_inheriting: types::Rights,
784
fdflags: types::Fdflags,
785
) -> Result<types::Fd, Error> {
786
let table = self.table();
787
let dirfd = u32::from(dirfd);
788
if table.is::<FileEntry>(dirfd) {
789
return Err(Error::not_dir());
790
}
791
let dir_entry = table.get_dir(dirfd)?;
792
793
let symlink_follow = dirflags.contains(types::Lookupflags::SYMLINK_FOLLOW);
794
795
let oflags = OFlags::from(&oflags);
796
let fdflags = FdFlags::from(fdflags);
797
let path = memory.as_cow_str(path)?;
798
799
let read = fs_rights_base.contains(types::Rights::FD_READ);
800
let write = fs_rights_base.contains(types::Rights::FD_WRITE);
801
let access_mode = if read {
802
FileAccessMode::READ
803
} else {
804
FileAccessMode::empty()
805
} | if write {
806
FileAccessMode::WRITE
807
} else {
808
FileAccessMode::empty()
809
};
810
811
let file = dir_entry
812
.dir
813
.open_file(symlink_follow, path.deref(), oflags, read, write, fdflags)
814
.await?;
815
drop(dir_entry);
816
817
let fd = match file {
818
OpenResult::File(file) => table.push(Arc::new(FileEntry::new(file, access_mode)))?,
819
OpenResult::Dir(child_dir) => table.push(Arc::new(DirEntry::new(None, child_dir)))?,
820
};
821
Ok(types::Fd::from(fd))
822
}
823
824
async fn path_readlink(
825
&mut self,
826
memory: &mut GuestMemory<'_>,
827
dirfd: types::Fd,
828
path: GuestPtr<str>,
829
buf: GuestPtr<u8>,
830
buf_len: types::Size,
831
) -> Result<types::Size, Error> {
832
let link = self
833
.table()
834
.get_dir(u32::from(dirfd))?
835
.dir
836
.read_link(memory.as_cow_str(path)?.deref())
837
.await?
838
.into_os_string()
839
.into_string()
840
.map_err(|_| Error::illegal_byte_sequence().context("link contents"))?;
841
let link_bytes = link.as_bytes();
842
// Like posix readlink(2), silently truncate links when they are larger than the
843
// destination buffer:
844
let link_len = std::cmp::min(link_bytes.len(), buf_len as usize);
845
let buf = buf.as_array(link_len as u32);
846
memory.copy_from_slice(&link_bytes[..link_len], buf)?;
847
Ok(link_len as types::Size)
848
}
849
850
async fn path_remove_directory(
851
&mut self,
852
memory: &mut GuestMemory<'_>,
853
dirfd: types::Fd,
854
path: GuestPtr<str>,
855
) -> Result<(), Error> {
856
self.table()
857
.get_dir(u32::from(dirfd))?
858
.dir
859
.remove_dir(memory.as_cow_str(path)?.deref())
860
.await
861
}
862
863
async fn path_rename(
864
&mut self,
865
memory: &mut GuestMemory<'_>,
866
src_fd: types::Fd,
867
src_path: GuestPtr<str>,
868
dest_fd: types::Fd,
869
dest_path: GuestPtr<str>,
870
) -> Result<(), Error> {
871
let table = self.table();
872
let src_dir = table.get_dir(u32::from(src_fd))?;
873
let dest_dir = table.get_dir(u32::from(dest_fd))?;
874
src_dir
875
.dir
876
.rename(
877
memory.as_cow_str(src_path)?.deref(),
878
dest_dir.dir.deref(),
879
memory.as_cow_str(dest_path)?.deref(),
880
)
881
.await
882
}
883
884
async fn path_symlink(
885
&mut self,
886
memory: &mut GuestMemory<'_>,
887
src_path: GuestPtr<str>,
888
dirfd: types::Fd,
889
dest_path: GuestPtr<str>,
890
) -> Result<(), Error> {
891
self.table()
892
.get_dir(u32::from(dirfd))?
893
.dir
894
.symlink(
895
memory.as_cow_str(src_path)?.deref(),
896
memory.as_cow_str(dest_path)?.deref(),
897
)
898
.await
899
}
900
901
async fn path_unlink_file(
902
&mut self,
903
memory: &mut GuestMemory<'_>,
904
dirfd: types::Fd,
905
path: GuestPtr<str>,
906
) -> Result<(), Error> {
907
self.table()
908
.get_dir(u32::from(dirfd))?
909
.dir
910
.unlink_file(memory.as_cow_str(path)?.deref())
911
.await
912
}
913
914
async fn poll_oneoff(
915
&mut self,
916
memory: &mut GuestMemory<'_>,
917
subs: GuestPtr<types::Subscription>,
918
events: GuestPtr<types::Event>,
919
nsubscriptions: types::Size,
920
) -> Result<types::Size, Error> {
921
if nsubscriptions == 0 {
922
return Err(Error::invalid_argument().context("nsubscriptions must be nonzero"));
923
}
924
925
// Special-case a `poll_oneoff` which is just sleeping on a single
926
// relative timer event, such as what WASI libc uses to implement sleep
927
// functions. This supports all clock IDs, because POSIX says that
928
// `clock_settime` doesn't effect relative sleeps.
929
if nsubscriptions == 1 {
930
let sub = memory.read(subs)?;
931
if let types::SubscriptionU::Clock(clocksub) = sub.u {
932
if !clocksub
933
.flags
934
.contains(types::Subclockflags::SUBSCRIPTION_CLOCK_ABSTIME)
935
{
936
self.sched
937
.sleep(Duration::from_nanos(clocksub.timeout))
938
.await?;
939
memory.write(
940
events,
941
types::Event {
942
userdata: sub.userdata,
943
error: types::Errno::Success,
944
type_: types::Eventtype::Clock,
945
fd_readwrite: fd_readwrite_empty(),
946
},
947
)?;
948
return Ok(1);
949
}
950
}
951
}
952
953
let table = &self.table;
954
// We need these refmuts to outlive Poll, which will hold the &mut dyn WasiFile inside
955
let mut read_refs: Vec<(Arc<FileEntry>, Option<Userdata>)> = Vec::new();
956
let mut write_refs: Vec<(Arc<FileEntry>, Option<Userdata>)> = Vec::new();
957
958
let mut poll = Poll::new();
959
960
let subs = subs.as_array(nsubscriptions);
961
for sub_elem in subs.iter() {
962
let sub_ptr = sub_elem?;
963
let sub = memory.read(sub_ptr)?;
964
match sub.u {
965
types::SubscriptionU::Clock(clocksub) => match clocksub.id {
966
types::Clockid::Monotonic => {
967
let clock = self.clocks.monotonic()?;
968
let precision = Duration::from_nanos(clocksub.precision);
969
let duration = Duration::from_nanos(clocksub.timeout);
970
let start = if clocksub
971
.flags
972
.contains(types::Subclockflags::SUBSCRIPTION_CLOCK_ABSTIME)
973
{
974
clock.creation_time
975
} else {
976
clock.abs_clock.now(precision)
977
};
978
let deadline = start
979
.checked_add(duration)
980
.ok_or_else(|| Error::overflow().context("deadline"))?;
981
poll.subscribe_monotonic_clock(
982
&*clock.abs_clock,
983
deadline,
984
precision,
985
sub.userdata.into(),
986
)
987
}
988
types::Clockid::Realtime => {
989
// POSIX specifies that functions like `nanosleep` and others use the
990
// `REALTIME` clock. But it also says that `clock_settime` has no effect
991
// on threads waiting in these functions. MONOTONIC should always have
992
// resolution at least as good as REALTIME, so we can translate a
993
// non-absolute `REALTIME` request into a `MONOTONIC` request.
994
let clock = self.clocks.monotonic()?;
995
let precision = Duration::from_nanos(clocksub.precision);
996
let duration = Duration::from_nanos(clocksub.timeout);
997
let deadline = if clocksub
998
.flags
999
.contains(types::Subclockflags::SUBSCRIPTION_CLOCK_ABSTIME)
1000
{
1001
return Err(Error::not_supported());
1002
} else {
1003
clock
1004
.abs_clock
1005
.now(precision)
1006
.checked_add(duration)
1007
.ok_or_else(|| Error::overflow().context("deadline"))?
1008
};
1009
poll.subscribe_monotonic_clock(
1010
&*clock.abs_clock,
1011
deadline,
1012
precision,
1013
sub.userdata.into(),
1014
)
1015
}
1016
_ => Err(Error::invalid_argument()
1017
.context("timer subscriptions only support monotonic timer"))?,
1018
},
1019
types::SubscriptionU::FdRead(readsub) => {
1020
let fd = readsub.file_descriptor;
1021
let file_ref = table.get_file(u32::from(fd))?;
1022
read_refs.push((file_ref, Some(sub.userdata.into())));
1023
}
1024
types::SubscriptionU::FdWrite(writesub) => {
1025
let fd = writesub.file_descriptor;
1026
let file_ref = table.get_file(u32::from(fd))?;
1027
write_refs.push((file_ref, Some(sub.userdata.into())));
1028
}
1029
}
1030
}
1031
1032
let mut read_mut_refs: Vec<(&dyn WasiFile, Userdata)> = Vec::new();
1033
for (file_lock, userdata) in read_refs.iter_mut() {
1034
read_mut_refs.push((file_lock.file.deref(), userdata.take().unwrap()));
1035
}
1036
1037
for (f, ud) in read_mut_refs.iter_mut() {
1038
poll.subscribe_read(*f, *ud);
1039
}
1040
1041
let mut write_mut_refs: Vec<(&dyn WasiFile, Userdata)> = Vec::new();
1042
for (file_lock, userdata) in write_refs.iter_mut() {
1043
write_mut_refs.push((file_lock.file.deref(), userdata.take().unwrap()));
1044
}
1045
1046
for (f, ud) in write_mut_refs.iter_mut() {
1047
poll.subscribe_write(*f, *ud);
1048
}
1049
1050
self.sched.poll_oneoff(&mut poll).await?;
1051
1052
let results = poll.results();
1053
let num_results = results.len();
1054
assert!(
1055
num_results <= nsubscriptions as usize,
1056
"results exceeds subscriptions"
1057
);
1058
let events = events.as_array(
1059
num_results
1060
.try_into()
1061
.expect("not greater than nsubscriptions"),
1062
);
1063
for ((result, userdata), event_elem) in results.into_iter().zip(events.iter()) {
1064
let event_ptr = event_elem?;
1065
let userdata: types::Userdata = userdata.into();
1066
memory.write(
1067
event_ptr,
1068
match result {
1069
SubscriptionResult::Read(r) => {
1070
let type_ = types::Eventtype::FdRead;
1071
match r {
1072
Ok((nbytes, flags)) => types::Event {
1073
userdata,
1074
error: types::Errno::Success,
1075
type_,
1076
fd_readwrite: types::EventFdReadwrite {
1077
nbytes,
1078
flags: types::Eventrwflags::from(&flags),
1079
},
1080
},
1081
Err(e) => types::Event {
1082
userdata,
1083
error: e.downcast().map_err(Error::trap)?,
1084
type_,
1085
fd_readwrite: fd_readwrite_empty(),
1086
},
1087
}
1088
}
1089
SubscriptionResult::Write(r) => {
1090
let type_ = types::Eventtype::FdWrite;
1091
match r {
1092
Ok((nbytes, flags)) => types::Event {
1093
userdata,
1094
error: types::Errno::Success,
1095
type_,
1096
fd_readwrite: types::EventFdReadwrite {
1097
nbytes,
1098
flags: types::Eventrwflags::from(&flags),
1099
},
1100
},
1101
Err(e) => types::Event {
1102
userdata,
1103
error: e.downcast().map_err(Error::trap)?,
1104
type_,
1105
fd_readwrite: fd_readwrite_empty(),
1106
},
1107
}
1108
}
1109
SubscriptionResult::MonotonicClock(r) => {
1110
let type_ = types::Eventtype::Clock;
1111
types::Event {
1112
userdata,
1113
error: match r {
1114
Ok(()) => types::Errno::Success,
1115
Err(e) => e.downcast().map_err(Error::trap)?,
1116
},
1117
type_,
1118
fd_readwrite: fd_readwrite_empty(),
1119
}
1120
}
1121
},
1122
)?;
1123
}
1124
1125
Ok(num_results.try_into().expect("results fit into memory"))
1126
}
1127
1128
async fn proc_exit(
1129
&mut self,
1130
_memory: &mut GuestMemory<'_>,
1131
status: types::Exitcode,
1132
) -> anyhow::Error {
1133
// Check that the status is within WASI's range.
1134
if status < 126 {
1135
I32Exit(status as i32).into()
1136
} else {
1137
anyhow::Error::msg("exit with invalid exit status outside of [0..126)")
1138
}
1139
}
1140
1141
async fn proc_raise(
1142
&mut self,
1143
_memory: &mut GuestMemory<'_>,
1144
_sig: types::Signal,
1145
) -> Result<(), Error> {
1146
Err(Error::trap(anyhow::Error::msg("proc_raise unsupported")))
1147
}
1148
1149
async fn sched_yield(&mut self, _memory: &mut GuestMemory<'_>) -> Result<(), Error> {
1150
self.sched.sched_yield().await
1151
}
1152
1153
async fn random_get(
1154
&mut self,
1155
memory: &mut GuestMemory<'_>,
1156
buf: GuestPtr<u8>,
1157
buf_len: types::Size,
1158
) -> Result<(), Error> {
1159
let buf = buf.as_array(buf_len);
1160
if memory.is_shared_memory() {
1161
// If the Wasm memory is shared, copy to an intermediate buffer to
1162
// avoid Rust unsafety (i.e., the called function could rely on
1163
// `&mut [u8]`'s exclusive ownership which is not guaranteed due to
1164
// potential access from other threads).
1165
let mut copied: u32 = 0;
1166
while copied < buf.len() {
1167
let len = (buf.len() - copied).min(MAX_SHARED_BUFFER_SIZE as u32);
1168
let mut tmp = vec![0; len as usize];
1169
self.random.lock().unwrap().try_fill_bytes(&mut tmp)?;
1170
let dest = buf.get_range(copied..copied + len).unwrap();
1171
memory.copy_from_slice(&tmp, dest)?;
1172
copied += len;
1173
}
1174
} else {
1175
// If the Wasm memory is non-shared, copy directly into the linear
1176
// memory.
1177
let mem = &mut memory.as_slice_mut(buf)?.unwrap();
1178
self.random.lock().unwrap().try_fill_bytes(mem)?;
1179
}
1180
Ok(())
1181
}
1182
1183
async fn sock_accept(
1184
&mut self,
1185
_memory: &mut GuestMemory<'_>,
1186
fd: types::Fd,
1187
flags: types::Fdflags,
1188
) -> Result<types::Fd, Error> {
1189
let table = self.table();
1190
let f = table.get_file(u32::from(fd))?;
1191
let file = f.file.sock_accept(FdFlags::from(flags)).await?;
1192
let fd = table.push(Arc::new(FileEntry::new(file, FileAccessMode::all())))?;
1193
Ok(types::Fd::from(fd))
1194
}
1195
1196
async fn sock_recv(
1197
&mut self,
1198
memory: &mut GuestMemory<'_>,
1199
fd: types::Fd,
1200
ri_data: types::IovecArray,
1201
ri_flags: types::Riflags,
1202
) -> Result<(types::Size, types::Roflags), Error> {
1203
let f = self.table().get_file(u32::from(fd))?;
1204
1205
let iovs: Vec<wiggle::GuestPtr<[u8]>> = ri_data
1206
.iter()
1207
.map(|iov_ptr| {
1208
let iov_ptr = iov_ptr?;
1209
let iov: types::Iovec = memory.read(iov_ptr)?;
1210
Ok(iov.buf.as_array(iov.buf_len))
1211
})
1212
.collect::<Result<_, Error>>()?;
1213
1214
// If the first iov structure is from shared memory we can safely assume
1215
// all the rest will be. We then read into memory based on the memory's
1216
// shared-ness:
1217
// - if not shared, we copy directly into the Wasm memory
1218
// - if shared, we use an intermediate buffer; this avoids Rust unsafety
1219
// due to holding on to a `&mut [u8]` of Wasm memory when we cannot
1220
// guarantee the `&mut` exclusivity--other threads could be modifying
1221
// the data as this functions writes to it. Though likely there is no
1222
// issue with OS writing to io structs in multi-threaded scenarios,
1223
// since we do not know here if `&dyn WasiFile` does anything else
1224
// (e.g., read), we cautiously incur some performance overhead by
1225
// copying twice.
1226
let is_shared_memory = memory.is_shared_memory();
1227
let (bytes_read, ro_flags) = if is_shared_memory {
1228
// For shared memory, read into an intermediate buffer. Only the
1229
// first iov will be filled and even then the read is capped by the
1230
// `MAX_SHARED_BUFFER_SIZE`, so users are expected to re-call.
1231
let iov = iovs.into_iter().next();
1232
if let Some(iov) = iov {
1233
let mut buffer = vec![0; (iov.len() as usize).min(MAX_SHARED_BUFFER_SIZE)];
1234
let (bytes_read, ro_flags) = f
1235
.file
1236
.sock_recv(&mut [IoSliceMut::new(&mut buffer)], RiFlags::from(ri_flags))
1237
.await?;
1238
let iov = iov
1239
.get_range(0..bytes_read.try_into()?)
1240
.expect("it should always be possible to slice the iov smaller");
1241
memory.copy_from_slice(&buffer[0..bytes_read.try_into()?], iov)?;
1242
(bytes_read, ro_flags)
1243
} else {
1244
return Ok((0, RoFlags::empty().into()));
1245
}
1246
} else {
1247
// Convert all of the unsafe guest slices to safe ones--this uses
1248
// Wiggle's internal borrow checker to ensure no overlaps. We assume
1249
// here that, because the memory is not shared, there are no other
1250
// threads to access it while it is written to.
1251
let guest_slice: &mut [u8] = match iovs.into_iter().filter(|iov| iov.len() > 0).next() {
1252
Some(iov) => memory.as_slice_mut(iov)?.unwrap(),
1253
None => &mut [],
1254
};
1255
1256
// Read directly into the Wasm memory.
1257
f.file
1258
.sock_recv(&mut [IoSliceMut::new(guest_slice)], RiFlags::from(ri_flags))
1259
.await?
1260
};
1261
1262
Ok((types::Size::try_from(bytes_read)?, ro_flags.into()))
1263
}
1264
1265
async fn sock_send(
1266
&mut self,
1267
memory: &mut GuestMemory<'_>,
1268
fd: types::Fd,
1269
si_data: types::CiovecArray,
1270
_si_flags: types::Siflags,
1271
) -> Result<types::Size, Error> {
1272
let f = self.table().get_file(u32::from(fd))?;
1273
1274
let guest_slices: Vec<Cow<[u8]>> = si_data
1275
.iter()
1276
.map(|iov_ptr| {
1277
let iov_ptr = iov_ptr?;
1278
let iov: types::Ciovec = memory.read(iov_ptr)?;
1279
Ok(memory.as_cow(iov.buf.as_array(iov.buf_len))?)
1280
})
1281
.collect::<Result<_, Error>>()?;
1282
1283
let ioslices: Vec<IoSlice> = guest_slices
1284
.iter()
1285
.map(|s| IoSlice::new(s.deref()))
1286
.collect();
1287
let bytes_written = f.file.sock_send(&ioslices, SiFlags::empty()).await?;
1288
1289
Ok(types::Size::try_from(bytes_written)?)
1290
}
1291
1292
async fn sock_shutdown(
1293
&mut self,
1294
_memory: &mut GuestMemory<'_>,
1295
fd: types::Fd,
1296
how: types::Sdflags,
1297
) -> Result<(), Error> {
1298
let f = self.table().get_file(u32::from(fd))?;
1299
1300
f.file.sock_shutdown(SdFlags::from(how)).await
1301
}
1302
}
1303
1304
impl From<types::Advice> for Advice {
1305
fn from(advice: types::Advice) -> Advice {
1306
match advice {
1307
types::Advice::Normal => Advice::Normal,
1308
types::Advice::Sequential => Advice::Sequential,
1309
types::Advice::Random => Advice::Random,
1310
types::Advice::Willneed => Advice::WillNeed,
1311
types::Advice::Dontneed => Advice::DontNeed,
1312
types::Advice::Noreuse => Advice::NoReuse,
1313
}
1314
}
1315
}
1316
1317
impl From<&FdStat> for types::Fdstat {
1318
fn from(fdstat: &FdStat) -> types::Fdstat {
1319
let mut fs_rights_base = types::Rights::empty();
1320
if fdstat.access_mode.contains(FileAccessMode::READ) {
1321
fs_rights_base |= types::Rights::FD_READ;
1322
}
1323
if fdstat.access_mode.contains(FileAccessMode::WRITE) {
1324
fs_rights_base |= types::Rights::FD_WRITE;
1325
}
1326
types::Fdstat {
1327
fs_filetype: types::Filetype::from(&fdstat.filetype),
1328
fs_rights_base,
1329
fs_rights_inheriting: types::Rights::empty(),
1330
fs_flags: types::Fdflags::from(fdstat.flags),
1331
}
1332
}
1333
}
1334
1335
impl From<&FileType> for types::Filetype {
1336
fn from(ft: &FileType) -> types::Filetype {
1337
match ft {
1338
FileType::Directory => types::Filetype::Directory,
1339
FileType::BlockDevice => types::Filetype::BlockDevice,
1340
FileType::CharacterDevice => types::Filetype::CharacterDevice,
1341
FileType::RegularFile => types::Filetype::RegularFile,
1342
FileType::SocketDgram => types::Filetype::SocketDgram,
1343
FileType::SocketStream => types::Filetype::SocketStream,
1344
FileType::SymbolicLink => types::Filetype::SymbolicLink,
1345
FileType::Unknown => types::Filetype::Unknown,
1346
FileType::Pipe => types::Filetype::Unknown,
1347
}
1348
}
1349
}
1350
1351
macro_rules! convert_flags {
1352
($from:ty, $to:ty, $($flag:ident),+) => {
1353
impl From<$from> for $to {
1354
fn from(f: $from) -> $to {
1355
let mut out = <$to>::empty();
1356
$(
1357
if f.contains(<$from>::$flag) {
1358
out |= <$to>::$flag;
1359
}
1360
)+
1361
out
1362
}
1363
}
1364
}
1365
}
1366
1367
macro_rules! convert_flags_bidirectional {
1368
($from:ty, $to:ty, $($rest:tt)*) => {
1369
convert_flags!($from, $to, $($rest)*);
1370
convert_flags!($to, $from, $($rest)*);
1371
}
1372
}
1373
1374
convert_flags_bidirectional!(
1375
FdFlags,
1376
types::Fdflags,
1377
APPEND,
1378
DSYNC,
1379
NONBLOCK,
1380
RSYNC,
1381
SYNC
1382
);
1383
1384
convert_flags_bidirectional!(RiFlags, types::Riflags, RECV_PEEK, RECV_WAITALL);
1385
1386
convert_flags_bidirectional!(RoFlags, types::Roflags, RECV_DATA_TRUNCATED);
1387
1388
convert_flags_bidirectional!(SdFlags, types::Sdflags, RD, WR);
1389
1390
impl From<&types::Oflags> for OFlags {
1391
fn from(oflags: &types::Oflags) -> OFlags {
1392
let mut out = OFlags::empty();
1393
if oflags.contains(types::Oflags::CREAT) {
1394
out = out | OFlags::CREATE;
1395
}
1396
if oflags.contains(types::Oflags::DIRECTORY) {
1397
out = out | OFlags::DIRECTORY;
1398
}
1399
if oflags.contains(types::Oflags::EXCL) {
1400
out = out | OFlags::EXCLUSIVE;
1401
}
1402
if oflags.contains(types::Oflags::TRUNC) {
1403
out = out | OFlags::TRUNCATE;
1404
}
1405
out
1406
}
1407
}
1408
1409
impl From<&OFlags> for types::Oflags {
1410
fn from(oflags: &OFlags) -> types::Oflags {
1411
let mut out = types::Oflags::empty();
1412
if oflags.contains(OFlags::CREATE) {
1413
out = out | types::Oflags::CREAT;
1414
}
1415
if oflags.contains(OFlags::DIRECTORY) {
1416
out = out | types::Oflags::DIRECTORY;
1417
}
1418
if oflags.contains(OFlags::EXCLUSIVE) {
1419
out = out | types::Oflags::EXCL;
1420
}
1421
if oflags.contains(OFlags::TRUNCATE) {
1422
out = out | types::Oflags::TRUNC;
1423
}
1424
out
1425
}
1426
}
1427
impl From<Filestat> for types::Filestat {
1428
fn from(stat: Filestat) -> types::Filestat {
1429
types::Filestat {
1430
dev: stat.device_id,
1431
ino: stat.inode,
1432
filetype: types::Filetype::from(&stat.filetype),
1433
nlink: stat.nlink,
1434
size: stat.size,
1435
atim: stat
1436
.atim
1437
.map(|t| t.duration_since(std::time::UNIX_EPOCH).unwrap().as_nanos() as u64)
1438
.unwrap_or(0),
1439
mtim: stat
1440
.mtim
1441
.map(|t| t.duration_since(std::time::UNIX_EPOCH).unwrap().as_nanos() as u64)
1442
.unwrap_or(0),
1443
ctim: stat
1444
.ctim
1445
.map(|t| t.duration_since(std::time::UNIX_EPOCH).unwrap().as_nanos() as u64)
1446
.unwrap_or(0),
1447
}
1448
}
1449
}
1450
1451
impl TryFrom<&ReaddirEntity> for types::Dirent {
1452
type Error = Error;
1453
fn try_from(e: &ReaddirEntity) -> Result<types::Dirent, Error> {
1454
Ok(types::Dirent {
1455
d_ino: e.inode,
1456
d_namlen: e.name.as_bytes().len().try_into()?,
1457
d_type: types::Filetype::from(&e.filetype),
1458
d_next: e.next.into(),
1459
})
1460
}
1461
}
1462
1463
fn dirent_bytes(dirent: types::Dirent) -> Vec<u8> {
1464
use wiggle::GuestType;
1465
assert_eq!(
1466
types::Dirent::guest_size(),
1467
std::mem::size_of::<types::Dirent>() as u32,
1468
"Dirent guest repr and host repr should match"
1469
);
1470
assert_eq!(
1471
1,
1472
std::mem::size_of_val(&dirent.d_type),
1473
"Dirent member d_type should be endian-invariant"
1474
);
1475
let size = types::Dirent::guest_size()
1476
.try_into()
1477
.expect("Dirent is smaller than 2^32");
1478
let mut bytes = Vec::with_capacity(size);
1479
bytes.resize(size, 0);
1480
let ptr = bytes.as_mut_ptr().cast::<types::Dirent>();
1481
let guest_dirent = types::Dirent {
1482
d_ino: dirent.d_ino.to_le(),
1483
d_namlen: dirent.d_namlen.to_le(),
1484
d_type: dirent.d_type, // endian-invariant
1485
d_next: dirent.d_next.to_le(),
1486
};
1487
unsafe { ptr.write_unaligned(guest_dirent) };
1488
bytes
1489
}
1490
1491
impl From<&RwEventFlags> for types::Eventrwflags {
1492
fn from(flags: &RwEventFlags) -> types::Eventrwflags {
1493
let mut out = types::Eventrwflags::empty();
1494
if flags.contains(RwEventFlags::HANGUP) {
1495
out = out | types::Eventrwflags::FD_READWRITE_HANGUP;
1496
}
1497
out
1498
}
1499
}
1500
1501
fn fd_readwrite_empty() -> types::EventFdReadwrite {
1502
types::EventFdReadwrite {
1503
nbytes: 0,
1504
flags: types::Eventrwflags::empty(),
1505
}
1506
}
1507
1508
fn systimespec(
1509
set: bool,
1510
ts: types::Timestamp,
1511
now: bool,
1512
) -> Result<Option<SystemTimeSpec>, Error> {
1513
if set && now {
1514
Err(Error::invalid_argument())
1515
} else if set {
1516
Ok(Some(SystemTimeSpec::Absolute(
1517
SystemClock::UNIX_EPOCH + Duration::from_nanos(ts),
1518
)))
1519
} else if now {
1520
Ok(Some(SystemTimeSpec::SymbolicNow))
1521
} else {
1522
Ok(None)
1523
}
1524
}
1525
1526
// This is the default subset of base Rights reported for directories prior to
1527
// https://github.com/bytecodealliance/wasmtime/pull/6265. Some
1528
// implementations still expect this set of rights to be reported.
1529
pub(crate) fn directory_base_rights() -> types::Rights {
1530
types::Rights::PATH_CREATE_DIRECTORY
1531
| types::Rights::PATH_CREATE_FILE
1532
| types::Rights::PATH_LINK_SOURCE
1533
| types::Rights::PATH_LINK_TARGET
1534
| types::Rights::PATH_OPEN
1535
| types::Rights::FD_READDIR
1536
| types::Rights::PATH_READLINK
1537
| types::Rights::PATH_RENAME_SOURCE
1538
| types::Rights::PATH_RENAME_TARGET
1539
| types::Rights::PATH_SYMLINK
1540
| types::Rights::PATH_REMOVE_DIRECTORY
1541
| types::Rights::PATH_UNLINK_FILE
1542
| types::Rights::PATH_FILESTAT_GET
1543
| types::Rights::PATH_FILESTAT_SET_TIMES
1544
| types::Rights::FD_FILESTAT_GET
1545
| types::Rights::FD_FILESTAT_SET_TIMES
1546
}
1547
1548
// This is the default subset of inheriting Rights reported for directories
1549
// prior to https://github.com/bytecodealliance/wasmtime/pull/6265. Some
1550
// implementations still expect this set of rights to be reported.
1551
pub(crate) fn directory_inheriting_rights() -> types::Rights {
1552
types::Rights::FD_DATASYNC
1553
| types::Rights::FD_READ
1554
| types::Rights::FD_SEEK
1555
| types::Rights::FD_FDSTAT_SET_FLAGS
1556
| types::Rights::FD_SYNC
1557
| types::Rights::FD_TELL
1558
| types::Rights::FD_WRITE
1559
| types::Rights::FD_ADVISE
1560
| types::Rights::FD_ALLOCATE
1561
| types::Rights::FD_FILESTAT_GET
1562
| types::Rights::FD_FILESTAT_SET_SIZE
1563
| types::Rights::FD_FILESTAT_SET_TIMES
1564
| types::Rights::POLL_FD_READWRITE
1565
| directory_base_rights()
1566
}
1567
1568