Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/crates/wasi-common/src/snapshots/preview_1.rs
3124 views
1
use crate::{
2
EnvError, I32Exit, SystemTimeSpec, WasiCtx,
3
dir::{DirEntry, OpenResult, ReaddirCursor, ReaddirEntity, TableDirExt},
4
file::{
5
Advice, FdFlags, FdStat, FileAccessMode, FileEntry, FileType, Filestat, OFlags, RiFlags,
6
RoFlags, SdFlags, SiFlags, TableFileExt, WasiFile,
7
},
8
sched::{
9
Poll, Userdata,
10
subscription::{RwEventFlags, SubscriptionResult},
11
},
12
};
13
use cap_std::time::{Duration, SystemClock};
14
use std::borrow::Cow;
15
use std::io::{IoSlice, IoSliceMut};
16
use std::ops::Deref;
17
use std::sync::Arc;
18
use wiggle::GuestMemory;
19
use wiggle::GuestPtr;
20
21
pub mod error;
22
use error::{Error, ErrorExt};
23
24
// Limit the size of intermediate buffers when copying to WebAssembly shared
25
// memory.
26
pub(crate) const MAX_SHARED_BUFFER_SIZE: usize = 1 << 16;
27
28
wiggle::from_witx!({
29
witx: ["witx/preview1/wasi_snapshot_preview1.witx"],
30
errors: { errno => trappable Error },
31
// Note: not every function actually needs to be async, however, nearly all of them do, and
32
// keeping that set the same in this macro and the wasmtime_wiggle / lucet_wiggle macros is
33
// tedious, and there is no cost to having a sync function be async in this case.
34
async: *,
35
wasmtime: false,
36
});
37
38
impl wiggle::GuestErrorType for types::Errno {
39
fn success() -> Self {
40
Self::Success
41
}
42
}
43
44
impl wasi_snapshot_preview1::WasiSnapshotPreview1 for WasiCtx {
45
async fn args_get(
46
&mut self,
47
memory: &mut GuestMemory<'_>,
48
argv: GuestPtr<GuestPtr<u8>>,
49
argv_buf: GuestPtr<u8>,
50
) -> Result<(), Error> {
51
self.args.write_to_guest(memory, argv_buf, argv)
52
}
53
54
async fn args_sizes_get(
55
&mut self,
56
_memory: &mut GuestMemory<'_>,
57
) -> Result<(types::Size, types::Size), Error> {
58
Ok((self.args.number_elements(), self.args.cumulative_size()))
59
}
60
61
async fn environ_get(
62
&mut self,
63
memory: &mut GuestMemory<'_>,
64
environ: GuestPtr<GuestPtr<u8>>,
65
environ_buf: GuestPtr<u8>,
66
) -> Result<(), Error> {
67
self.env.write_to_guest(memory, environ_buf, environ)
68
}
69
70
async fn environ_sizes_get(
71
&mut self,
72
_memory: &mut GuestMemory<'_>,
73
) -> Result<(types::Size, types::Size), Error> {
74
Ok((self.env.number_elements(), self.env.cumulative_size()))
75
}
76
77
async fn clock_res_get(
78
&mut self,
79
_memory: &mut GuestMemory<'_>,
80
id: types::Clockid,
81
) -> Result<types::Timestamp, Error> {
82
let resolution = match id {
83
types::Clockid::Realtime => Ok(self.clocks.system()?.resolution()),
84
types::Clockid::Monotonic => Ok(self.clocks.monotonic()?.abs_clock.resolution()),
85
types::Clockid::ProcessCputimeId | types::Clockid::ThreadCputimeId => {
86
Err(Error::badf().context("process and thread clocks are not supported"))
87
}
88
}?;
89
Ok(resolution.as_nanos().try_into()?)
90
}
91
92
async fn clock_time_get(
93
&mut self,
94
_memory: &mut GuestMemory<'_>,
95
id: types::Clockid,
96
precision: types::Timestamp,
97
) -> Result<types::Timestamp, Error> {
98
let precision = Duration::from_nanos(precision);
99
match id {
100
types::Clockid::Realtime => {
101
let now = self.clocks.system()?.now(precision).into_std();
102
let d = now
103
.duration_since(std::time::SystemTime::UNIX_EPOCH)
104
.map_err(|_| Error::trap(EnvError::msg("current time before unix epoch")))?;
105
Ok(d.as_nanos().try_into()?)
106
}
107
types::Clockid::Monotonic => {
108
let clock = self.clocks.monotonic()?;
109
let now = clock.abs_clock.now(precision);
110
let d = now.duration_since(clock.creation_time);
111
Ok(d.as_nanos().try_into()?)
112
}
113
types::Clockid::ProcessCputimeId | types::Clockid::ThreadCputimeId => {
114
Err(Error::badf().context("process and thread clocks are not supported"))
115
}
116
}
117
}
118
119
async fn fd_advise(
120
&mut self,
121
_memory: &mut GuestMemory<'_>,
122
fd: types::Fd,
123
offset: types::Filesize,
124
len: types::Filesize,
125
advice: types::Advice,
126
) -> Result<(), Error> {
127
self.table()
128
.get_file(u32::from(fd))?
129
.file
130
.advise(offset, len, advice.into())
131
.await?;
132
Ok(())
133
}
134
135
async fn fd_allocate(
136
&mut self,
137
_memory: &mut GuestMemory<'_>,
138
fd: types::Fd,
139
_offset: types::Filesize,
140
_len: types::Filesize,
141
) -> Result<(), Error> {
142
// Check if fd is a file, and has rights, just to reject those cases
143
// with the errors expected:
144
let _ = self.table().get_file(u32::from(fd))?;
145
// This operation from cloudabi is linux-specific, isn't even
146
// supported across all linux filesystems, and has no support on macos
147
// or windows. Rather than ship spotty support, it has been removed
148
// from preview 2, and we are no longer supporting it in preview 1 as
149
// well.
150
Err(Error::not_supported())
151
}
152
153
async fn fd_close(
154
&mut self,
155
_memory: &mut GuestMemory<'_>,
156
fd: types::Fd,
157
) -> Result<(), Error> {
158
let table = self.table();
159
let fd = u32::from(fd);
160
161
// Fail fast: If not present in table, Badf
162
if !table.contains_key(fd) {
163
return Err(Error::badf().context("key not in table"));
164
}
165
// fd_close must close either a File or a Dir handle
166
if table.is::<FileEntry>(fd) {
167
let _ = table.delete::<FileEntry>(fd);
168
} else if table.is::<DirEntry>(fd) {
169
let _ = table.delete::<DirEntry>(fd);
170
} else {
171
return Err(Error::badf().context("key does not refer to file or directory"));
172
}
173
174
Ok(())
175
}
176
177
async fn fd_datasync(
178
&mut self,
179
_memory: &mut GuestMemory<'_>,
180
fd: types::Fd,
181
) -> Result<(), Error> {
182
self.table()
183
.get_file(u32::from(fd))?
184
.file
185
.datasync()
186
.await?;
187
Ok(())
188
}
189
190
async fn fd_fdstat_get(
191
&mut self,
192
_memory: &mut GuestMemory<'_>,
193
fd: types::Fd,
194
) -> Result<types::Fdstat, Error> {
195
let table = self.table();
196
let fd = u32::from(fd);
197
if table.is::<FileEntry>(fd) {
198
let file_entry: Arc<FileEntry> = table.get(fd)?;
199
let fdstat = file_entry.get_fdstat().await?;
200
Ok(types::Fdstat::from(&fdstat))
201
} else if table.is::<DirEntry>(fd) {
202
let _dir_entry: Arc<DirEntry> = table.get(fd)?;
203
let dir_fdstat = types::Fdstat {
204
fs_filetype: types::Filetype::Directory,
205
fs_rights_base: directory_base_rights(),
206
fs_rights_inheriting: directory_inheriting_rights(),
207
fs_flags: types::Fdflags::empty(),
208
};
209
Ok(dir_fdstat)
210
} else {
211
Err(Error::badf())
212
}
213
}
214
215
async fn fd_fdstat_set_flags(
216
&mut self,
217
_memory: &mut GuestMemory<'_>,
218
fd: types::Fd,
219
flags: types::Fdflags,
220
) -> Result<(), Error> {
221
if let Some(table) = self.table_mut() {
222
table
223
.get_file_mut(u32::from(fd))?
224
.file
225
.set_fdflags(FdFlags::from(flags))
226
.await
227
} else {
228
log::warn!(
229
"`fd_fdstat_set_flags` does not work with wasi-threads enabled; see https://github.com/bytecodealliance/wasmtime/issues/5643"
230
);
231
Err(Error::not_supported())
232
}
233
}
234
235
async fn fd_fdstat_set_rights(
236
&mut self,
237
_memory: &mut GuestMemory<'_>,
238
fd: types::Fd,
239
_fs_rights_base: types::Rights,
240
_fs_rights_inheriting: types::Rights,
241
) -> Result<(), Error> {
242
let table = self.table();
243
let fd = u32::from(fd);
244
if table.is::<FileEntry>(fd) {
245
let _file_entry: Arc<FileEntry> = table.get(fd)?;
246
Err(Error::not_supported())
247
} else if table.is::<DirEntry>(fd) {
248
let _dir_entry: Arc<DirEntry> = table.get(fd)?;
249
Err(Error::not_supported())
250
} else {
251
Err(Error::badf())
252
}
253
}
254
255
async fn fd_filestat_get(
256
&mut self,
257
_memory: &mut GuestMemory<'_>,
258
fd: types::Fd,
259
) -> Result<types::Filestat, Error> {
260
let table = self.table();
261
let fd = u32::from(fd);
262
if table.is::<FileEntry>(fd) {
263
let filestat = table.get_file(fd)?.file.get_filestat().await?;
264
Ok(filestat.into())
265
} else if table.is::<DirEntry>(fd) {
266
let filestat = table.get_dir(fd)?.dir.get_filestat().await?;
267
Ok(filestat.into())
268
} else {
269
Err(Error::badf())
270
}
271
}
272
273
async fn fd_filestat_set_size(
274
&mut self,
275
_memory: &mut GuestMemory<'_>,
276
fd: types::Fd,
277
size: types::Filesize,
278
) -> Result<(), Error> {
279
self.table()
280
.get_file(u32::from(fd))?
281
.file
282
.set_filestat_size(size)
283
.await?;
284
Ok(())
285
}
286
287
async fn fd_filestat_set_times(
288
&mut self,
289
_memory: &mut GuestMemory<'_>,
290
fd: types::Fd,
291
atim: types::Timestamp,
292
mtim: types::Timestamp,
293
fst_flags: types::Fstflags,
294
) -> Result<(), Error> {
295
let fd = u32::from(fd);
296
let table = self.table();
297
// Validate flags
298
let set_atim = fst_flags.contains(types::Fstflags::ATIM);
299
let set_atim_now = fst_flags.contains(types::Fstflags::ATIM_NOW);
300
let set_mtim = fst_flags.contains(types::Fstflags::MTIM);
301
let set_mtim_now = fst_flags.contains(types::Fstflags::MTIM_NOW);
302
303
let atim = systimespec(set_atim, atim, set_atim_now).map_err(|e| e.context("atim"))?;
304
let mtim = systimespec(set_mtim, mtim, set_mtim_now).map_err(|e| e.context("mtim"))?;
305
306
if table.is::<FileEntry>(fd) {
307
table
308
.get_file(fd)
309
.expect("checked that entry is file")
310
.file
311
.set_times(atim, mtim)
312
.await
313
} else if table.is::<DirEntry>(fd) {
314
table
315
.get_dir(fd)
316
.expect("checked that entry is dir")
317
.dir
318
.set_times(".", atim, mtim, false)
319
.await
320
} else {
321
Err(Error::badf())
322
}
323
}
324
325
async fn fd_read(
326
&mut self,
327
memory: &mut GuestMemory<'_>,
328
fd: types::Fd,
329
iovs: types::IovecArray,
330
) -> Result<types::Size, Error> {
331
let f = self.table().get_file(u32::from(fd))?;
332
// Access mode check normalizes error returned (windows would prefer ACCES here)
333
if !f.access_mode.contains(FileAccessMode::READ) {
334
Err(types::Errno::Badf)?
335
}
336
let f = &f.file;
337
338
let iovs: Vec<wiggle::GuestPtr<[u8]>> = iovs
339
.iter()
340
.map(|iov_ptr| {
341
let iov_ptr = iov_ptr?;
342
let iov: types::Iovec = memory.read(iov_ptr)?;
343
Ok(iov.buf.as_array(iov.buf_len))
344
})
345
.collect::<Result<_, Error>>()?;
346
347
// If the first iov structure is from shared memory we can safely assume
348
// all the rest will be. We then read into memory based on the memory's
349
// shared-ness:
350
// - if not shared, we copy directly into the Wasm memory
351
// - if shared, we use an intermediate buffer; this avoids Rust unsafety
352
// due to holding on to a `&mut [u8]` of Wasm memory when we cannot
353
// guarantee the `&mut` exclusivity--other threads could be modifying
354
// the data as this functions writes to it. Though likely there is no
355
// issue with OS writing to io structs in multi-threaded scenarios,
356
// since we do not know here if `&dyn WasiFile` does anything else
357
// (e.g., read), we cautiously incur some performance overhead by
358
// copying twice.
359
let is_shared_memory = memory.is_shared_memory();
360
let bytes_read: u64 = if is_shared_memory {
361
// For shared memory, read into an intermediate buffer. Only the
362
// first iov will be filled and even then the read is capped by the
363
// `MAX_SHARED_BUFFER_SIZE`, so users are expected to re-call.
364
let iov = iovs.into_iter().next();
365
if let Some(iov) = iov {
366
let mut buffer = vec![0; (iov.len() as usize).min(MAX_SHARED_BUFFER_SIZE)];
367
let bytes_read = f.read_vectored(&mut [IoSliceMut::new(&mut buffer)]).await?;
368
let iov = iov
369
.get_range(0..bytes_read.try_into()?)
370
.expect("it should always be possible to slice the iov smaller");
371
memory.copy_from_slice(&buffer[0..bytes_read.try_into()?], iov)?;
372
bytes_read
373
} else {
374
return Ok(0);
375
}
376
} else {
377
// Convert the first unsafe guest slice into a safe one--Wiggle
378
// can only track mutable borrows for an entire region, and converting
379
// all guest pointers to slices would cause a runtime borrow-checking
380
// error. As read is allowed to return less than the requested amount,
381
// it's valid (though not as efficient) for us to only perform the
382
// read of the first buffer.
383
let guest_slice: &mut [u8] = match iovs.into_iter().filter(|iov| iov.len() > 0).next() {
384
Some(iov) => memory.as_slice_mut(iov)?.unwrap(),
385
None => return Ok(0),
386
};
387
388
// Read directly into the Wasm memory.
389
f.read_vectored(&mut [IoSliceMut::new(guest_slice)]).await?
390
};
391
392
Ok(types::Size::try_from(bytes_read)?)
393
}
394
395
async fn fd_pread(
396
&mut self,
397
memory: &mut GuestMemory<'_>,
398
fd: types::Fd,
399
iovs: types::IovecArray,
400
offset: types::Filesize,
401
) -> Result<types::Size, Error> {
402
let f = self.table().get_file(u32::from(fd))?;
403
// Access mode check normalizes error returned (windows would prefer ACCES here)
404
if !f.access_mode.contains(FileAccessMode::READ) {
405
Err(types::Errno::Badf)?
406
}
407
let f = &f.file;
408
409
let iovs: Vec<wiggle::GuestPtr<[u8]>> = iovs
410
.iter()
411
.map(|iov_ptr| {
412
let iov_ptr = iov_ptr?;
413
let iov: types::Iovec = memory.read(iov_ptr)?;
414
Ok(iov.buf.as_array(iov.buf_len))
415
})
416
.collect::<Result<_, Error>>()?;
417
418
// If the first iov structure is from shared memory we can safely assume
419
// all the rest will be. We then read into memory based on the memory's
420
// shared-ness:
421
// - if not shared, we copy directly into the Wasm memory
422
// - if shared, we use an intermediate buffer; this avoids Rust unsafety
423
// due to holding on to a `&mut [u8]` of Wasm memory when we cannot
424
// guarantee the `&mut` exclusivity--other threads could be modifying
425
// the data as this functions writes to it. Though likely there is no
426
// issue with OS writing to io structs in multi-threaded scenarios,
427
// since we do not know here if `&dyn WasiFile` does anything else
428
// (e.g., read), we cautiously incur some performance overhead by
429
// copying twice.
430
let is_shared_memory = memory.is_shared_memory();
431
let bytes_read: u64 = if is_shared_memory {
432
// For shared memory, read into an intermediate buffer. Only the
433
// first iov will be filled and even then the read is capped by the
434
// `MAX_SHARED_BUFFER_SIZE`, so users are expected to re-call.
435
let iov = iovs.into_iter().next();
436
if let Some(iov) = iov {
437
let mut buffer = vec![0; (iov.len() as usize).min(MAX_SHARED_BUFFER_SIZE)];
438
let bytes_read = f
439
.read_vectored_at(&mut [IoSliceMut::new(&mut buffer)], offset)
440
.await?;
441
let iov = iov
442
.get_range(0..bytes_read.try_into()?)
443
.expect("it should always be possible to slice the iov smaller");
444
memory.copy_from_slice(&buffer[0..bytes_read.try_into()?], iov)?;
445
bytes_read
446
} else {
447
return Ok(0);
448
}
449
} else {
450
// Convert unsafe guest slices to safe ones.
451
let guest_slice: &mut [u8] = match iovs.into_iter().filter(|iov| iov.len() > 0).next() {
452
Some(iov) => memory.as_slice_mut(iov)?.unwrap(),
453
None => return Ok(0),
454
};
455
456
// Read directly into the Wasm memory.
457
f.read_vectored_at(&mut [IoSliceMut::new(guest_slice)], offset)
458
.await?
459
};
460
461
Ok(types::Size::try_from(bytes_read)?)
462
}
463
464
async fn fd_write(
465
&mut self,
466
memory: &mut GuestMemory<'_>,
467
fd: types::Fd,
468
ciovs: types::CiovecArray,
469
) -> Result<types::Size, Error> {
470
let f = self.table().get_file(u32::from(fd))?;
471
// Access mode check normalizes error returned (windows would prefer ACCES here)
472
if !f.access_mode.contains(FileAccessMode::WRITE) {
473
Err(types::Errno::Badf)?
474
}
475
let f = &f.file;
476
477
let guest_slices: Vec<Cow<[u8]>> = ciovs
478
.iter()
479
.map(|iov_ptr| {
480
let iov_ptr = iov_ptr?;
481
let iov: types::Ciovec = memory.read(iov_ptr)?;
482
Ok(memory.as_cow(iov.buf.as_array(iov.buf_len))?)
483
})
484
.collect::<Result<_, Error>>()?;
485
486
let ioslices: Vec<IoSlice> = guest_slices
487
.iter()
488
.map(|s| IoSlice::new(s.deref()))
489
.collect();
490
let bytes_written = f.write_vectored(&ioslices).await?;
491
492
Ok(types::Size::try_from(bytes_written)?)
493
}
494
495
async fn fd_pwrite(
496
&mut self,
497
memory: &mut GuestMemory<'_>,
498
fd: types::Fd,
499
ciovs: types::CiovecArray,
500
offset: types::Filesize,
501
) -> Result<types::Size, Error> {
502
let f = self.table().get_file(u32::from(fd))?;
503
// Access mode check normalizes error returned (windows would prefer ACCES here)
504
if !f.access_mode.contains(FileAccessMode::WRITE) {
505
Err(types::Errno::Badf)?
506
}
507
let f = &f.file;
508
509
let guest_slices: Vec<Cow<[u8]>> = ciovs
510
.iter()
511
.map(|iov_ptr| {
512
let iov_ptr = iov_ptr?;
513
let iov: types::Ciovec = memory.read(iov_ptr)?;
514
Ok(memory.as_cow(iov.buf.as_array(iov.buf_len))?)
515
})
516
.collect::<Result<_, Error>>()?;
517
518
let ioslices: Vec<IoSlice> = guest_slices
519
.iter()
520
.map(|s| IoSlice::new(s.deref()))
521
.collect();
522
let bytes_written = f.write_vectored_at(&ioslices, offset).await?;
523
524
Ok(types::Size::try_from(bytes_written)?)
525
}
526
527
async fn fd_prestat_get(
528
&mut self,
529
_memory: &mut GuestMemory<'_>,
530
fd: types::Fd,
531
) -> Result<types::Prestat, Error> {
532
let table = self.table();
533
let dir_entry: Arc<DirEntry> = table.get(u32::from(fd)).map_err(|_| Error::badf())?;
534
if let Some(preopen) = dir_entry.preopen_path() {
535
let path_str = preopen.to_str().ok_or_else(|| Error::not_supported())?;
536
let pr_name_len = u32::try_from(path_str.as_bytes().len())?;
537
Ok(types::Prestat::Dir(types::PrestatDir { pr_name_len }))
538
} else {
539
Err(Error::not_supported().context("file is not a preopen"))
540
}
541
}
542
543
async fn fd_prestat_dir_name(
544
&mut self,
545
memory: &mut GuestMemory<'_>,
546
fd: types::Fd,
547
path: GuestPtr<u8>,
548
path_max_len: types::Size,
549
) -> Result<(), Error> {
550
let table = self.table();
551
let dir_entry: Arc<DirEntry> = table.get(u32::from(fd)).map_err(|_| Error::not_dir())?;
552
if let Some(preopen) = dir_entry.preopen_path() {
553
let path_bytes = preopen
554
.to_str()
555
.ok_or_else(|| Error::not_supported())?
556
.as_bytes();
557
let path_len = path_bytes.len();
558
if path_len > path_max_len as usize {
559
return Err(Error::name_too_long());
560
}
561
let path = path.as_array(path_len as u32);
562
memory.copy_from_slice(path_bytes, path)?;
563
Ok(())
564
} else {
565
Err(Error::not_supported())
566
}
567
}
568
async fn fd_renumber(
569
&mut self,
570
_memory: &mut GuestMemory<'_>,
571
from: types::Fd,
572
to: types::Fd,
573
) -> Result<(), Error> {
574
let table = self.table();
575
let from = u32::from(from);
576
let to = u32::from(to);
577
if !table.contains_key(from) {
578
return Err(Error::badf());
579
}
580
if !table.contains_key(to) {
581
return Err(Error::badf());
582
}
583
table.renumber(from, to)
584
}
585
586
async fn fd_seek(
587
&mut self,
588
_memory: &mut GuestMemory<'_>,
589
fd: types::Fd,
590
offset: types::Filedelta,
591
whence: types::Whence,
592
) -> Result<types::Filesize, Error> {
593
use std::io::SeekFrom;
594
let whence = match whence {
595
types::Whence::Cur => SeekFrom::Current(offset),
596
types::Whence::End => SeekFrom::End(offset),
597
types::Whence::Set => {
598
SeekFrom::Start(offset.try_into().map_err(|_| Error::invalid_argument())?)
599
}
600
};
601
let newoffset = self
602
.table()
603
.get_file(u32::from(fd))?
604
.file
605
.seek(whence)
606
.await?;
607
Ok(newoffset)
608
}
609
610
async fn fd_sync(&mut self, _memory: &mut GuestMemory<'_>, fd: types::Fd) -> Result<(), Error> {
611
self.table().get_file(u32::from(fd))?.file.sync().await?;
612
Ok(())
613
}
614
615
async fn fd_tell(
616
&mut self,
617
_memory: &mut GuestMemory<'_>,
618
fd: types::Fd,
619
) -> Result<types::Filesize, Error> {
620
let offset = self
621
.table()
622
.get_file(u32::from(fd))?
623
.file
624
.seek(std::io::SeekFrom::Current(0))
625
.await?;
626
Ok(offset)
627
}
628
629
async fn fd_readdir(
630
&mut self,
631
memory: &mut GuestMemory<'_>,
632
fd: types::Fd,
633
mut buf: GuestPtr<u8>,
634
buf_len: types::Size,
635
cookie: types::Dircookie,
636
) -> Result<types::Size, Error> {
637
let mut bufused = 0;
638
for entity in self
639
.table()
640
.get_dir(u32::from(fd))?
641
.dir
642
.readdir(ReaddirCursor::from(cookie))
643
.await?
644
{
645
let entity = entity?;
646
let dirent_raw = dirent_bytes(types::Dirent::try_from(&entity)?);
647
let dirent_len: types::Size = dirent_raw.len().try_into()?;
648
let name_raw = entity.name.as_bytes();
649
let name_len: types::Size = name_raw.len().try_into()?;
650
651
// Copy as many bytes of the dirent as we can, up to the end of the buffer
652
let dirent_copy_len = std::cmp::min(dirent_len, buf_len - bufused);
653
let raw = buf.as_array(dirent_copy_len);
654
memory.copy_from_slice(&dirent_raw[..dirent_copy_len as usize], raw)?;
655
656
// If the dirent struct wasn't compiled entirely, return that we filled the buffer, which
657
// tells libc that we're not at EOF.
658
if dirent_copy_len < dirent_len {
659
return Ok(buf_len);
660
}
661
662
buf = buf.add(dirent_copy_len)?;
663
bufused += dirent_copy_len;
664
665
// Copy as many bytes of the name as we can, up to the end of the buffer
666
let name_copy_len = std::cmp::min(name_len, buf_len - bufused);
667
let raw = buf.as_array(name_copy_len);
668
memory.copy_from_slice(&name_raw[..name_copy_len as usize], raw)?;
669
670
// If the dirent struct wasn't copied entirely, return that we filled the buffer, which
671
// tells libc that we're not at EOF
672
673
if name_copy_len < name_len {
674
return Ok(buf_len);
675
}
676
677
buf = buf.add(name_copy_len)?;
678
bufused += name_copy_len;
679
}
680
Ok(bufused)
681
}
682
683
async fn path_create_directory(
684
&mut self,
685
memory: &mut GuestMemory<'_>,
686
dirfd: types::Fd,
687
path: GuestPtr<str>,
688
) -> Result<(), Error> {
689
self.table()
690
.get_dir(u32::from(dirfd))?
691
.dir
692
.create_dir(memory.as_cow_str(path)?.deref())
693
.await
694
}
695
696
async fn path_filestat_get(
697
&mut self,
698
memory: &mut GuestMemory<'_>,
699
dirfd: types::Fd,
700
flags: types::Lookupflags,
701
path: GuestPtr<str>,
702
) -> Result<types::Filestat, Error> {
703
let filestat = self
704
.table()
705
.get_dir(u32::from(dirfd))?
706
.dir
707
.get_path_filestat(
708
memory.as_cow_str(path)?.deref(),
709
flags.contains(types::Lookupflags::SYMLINK_FOLLOW),
710
)
711
.await?;
712
Ok(types::Filestat::from(filestat))
713
}
714
715
async fn path_filestat_set_times(
716
&mut self,
717
memory: &mut GuestMemory<'_>,
718
dirfd: types::Fd,
719
flags: types::Lookupflags,
720
path: GuestPtr<str>,
721
atim: types::Timestamp,
722
mtim: types::Timestamp,
723
fst_flags: types::Fstflags,
724
) -> Result<(), Error> {
725
let set_atim = fst_flags.contains(types::Fstflags::ATIM);
726
let set_atim_now = fst_flags.contains(types::Fstflags::ATIM_NOW);
727
let set_mtim = fst_flags.contains(types::Fstflags::MTIM);
728
let set_mtim_now = fst_flags.contains(types::Fstflags::MTIM_NOW);
729
730
let atim = systimespec(set_atim, atim, set_atim_now).map_err(|e| e.context("atim"))?;
731
let mtim = systimespec(set_mtim, mtim, set_mtim_now).map_err(|e| e.context("mtim"))?;
732
self.table()
733
.get_dir(u32::from(dirfd))?
734
.dir
735
.set_times(
736
memory.as_cow_str(path)?.deref(),
737
atim,
738
mtim,
739
flags.contains(types::Lookupflags::SYMLINK_FOLLOW),
740
)
741
.await
742
}
743
744
async fn path_link(
745
&mut self,
746
memory: &mut GuestMemory<'_>,
747
src_fd: types::Fd,
748
src_flags: types::Lookupflags,
749
src_path: GuestPtr<str>,
750
target_fd: types::Fd,
751
target_path: GuestPtr<str>,
752
) -> Result<(), Error> {
753
let table = self.table();
754
let src_dir = table.get_dir(u32::from(src_fd))?;
755
let target_dir = table.get_dir(u32::from(target_fd))?;
756
let symlink_follow = src_flags.contains(types::Lookupflags::SYMLINK_FOLLOW);
757
if symlink_follow {
758
return Err(Error::invalid_argument()
759
.context("symlink following on path_link is not supported"));
760
}
761
762
src_dir
763
.dir
764
.hard_link(
765
memory.as_cow_str(src_path)?.deref(),
766
target_dir.dir.deref(),
767
memory.as_cow_str(target_path)?.deref(),
768
)
769
.await
770
}
771
772
async fn path_open(
773
&mut self,
774
memory: &mut GuestMemory<'_>,
775
dirfd: types::Fd,
776
dirflags: types::Lookupflags,
777
path: GuestPtr<str>,
778
oflags: types::Oflags,
779
fs_rights_base: types::Rights,
780
_fs_rights_inheriting: types::Rights,
781
fdflags: types::Fdflags,
782
) -> Result<types::Fd, Error> {
783
let table = self.table();
784
let dirfd = u32::from(dirfd);
785
if table.is::<FileEntry>(dirfd) {
786
return Err(Error::not_dir());
787
}
788
let dir_entry = table.get_dir(dirfd)?;
789
790
let symlink_follow = dirflags.contains(types::Lookupflags::SYMLINK_FOLLOW);
791
792
let oflags = OFlags::from(&oflags);
793
let fdflags = FdFlags::from(fdflags);
794
let path = memory.as_cow_str(path)?;
795
796
let read = fs_rights_base.contains(types::Rights::FD_READ);
797
let write = fs_rights_base.contains(types::Rights::FD_WRITE);
798
let access_mode = if read {
799
FileAccessMode::READ
800
} else {
801
FileAccessMode::empty()
802
} | if write {
803
FileAccessMode::WRITE
804
} else {
805
FileAccessMode::empty()
806
};
807
808
let file = dir_entry
809
.dir
810
.open_file(symlink_follow, path.deref(), oflags, read, write, fdflags)
811
.await?;
812
drop(dir_entry);
813
814
let fd = match file {
815
// Paper over a divergence between Windows and POSIX, where
816
// POSIX returns EISDIR if you open a directory with the
817
// WRITE flag: https://pubs.opengroup.org/onlinepubs/9699919799/functions/open.html#:~:text=EISDIR
818
#[cfg(windows)]
819
OpenResult::Dir(_) if write => {
820
return Err(types::Errno::Isdir.into());
821
}
822
OpenResult::File(file) => table.push(Arc::new(FileEntry::new(file, access_mode)))?,
823
OpenResult::Dir(child_dir) => table.push(Arc::new(DirEntry::new(None, child_dir)))?,
824
};
825
Ok(types::Fd::from(fd))
826
}
827
828
async fn path_readlink(
829
&mut self,
830
memory: &mut GuestMemory<'_>,
831
dirfd: types::Fd,
832
path: GuestPtr<str>,
833
buf: GuestPtr<u8>,
834
buf_len: types::Size,
835
) -> Result<types::Size, Error> {
836
let link = self
837
.table()
838
.get_dir(u32::from(dirfd))?
839
.dir
840
.read_link(memory.as_cow_str(path)?.deref())
841
.await?
842
.into_os_string()
843
.into_string()
844
.map_err(|_| Error::illegal_byte_sequence().context("link contents"))?;
845
let link_bytes = link.as_bytes();
846
// Like posix readlink(2), silently truncate links when they are larger than the
847
// destination buffer:
848
let link_len = std::cmp::min(link_bytes.len(), buf_len as usize);
849
let buf = buf.as_array(link_len as u32);
850
memory.copy_from_slice(&link_bytes[..link_len], buf)?;
851
Ok(link_len as types::Size)
852
}
853
854
async fn path_remove_directory(
855
&mut self,
856
memory: &mut GuestMemory<'_>,
857
dirfd: types::Fd,
858
path: GuestPtr<str>,
859
) -> Result<(), Error> {
860
self.table()
861
.get_dir(u32::from(dirfd))?
862
.dir
863
.remove_dir(memory.as_cow_str(path)?.deref())
864
.await
865
}
866
867
async fn path_rename(
868
&mut self,
869
memory: &mut GuestMemory<'_>,
870
src_fd: types::Fd,
871
src_path: GuestPtr<str>,
872
dest_fd: types::Fd,
873
dest_path: GuestPtr<str>,
874
) -> Result<(), Error> {
875
let table = self.table();
876
let src_dir = table.get_dir(u32::from(src_fd))?;
877
let dest_dir = table.get_dir(u32::from(dest_fd))?;
878
src_dir
879
.dir
880
.rename(
881
memory.as_cow_str(src_path)?.deref(),
882
dest_dir.dir.deref(),
883
memory.as_cow_str(dest_path)?.deref(),
884
)
885
.await
886
}
887
888
async fn path_symlink(
889
&mut self,
890
memory: &mut GuestMemory<'_>,
891
src_path: GuestPtr<str>,
892
dirfd: types::Fd,
893
dest_path: GuestPtr<str>,
894
) -> Result<(), Error> {
895
self.table()
896
.get_dir(u32::from(dirfd))?
897
.dir
898
.symlink(
899
memory.as_cow_str(src_path)?.deref(),
900
memory.as_cow_str(dest_path)?.deref(),
901
)
902
.await
903
}
904
905
async fn path_unlink_file(
906
&mut self,
907
memory: &mut GuestMemory<'_>,
908
dirfd: types::Fd,
909
path: GuestPtr<str>,
910
) -> Result<(), Error> {
911
self.table()
912
.get_dir(u32::from(dirfd))?
913
.dir
914
.unlink_file(memory.as_cow_str(path)?.deref())
915
.await
916
}
917
918
async fn poll_oneoff(
919
&mut self,
920
memory: &mut GuestMemory<'_>,
921
subs: GuestPtr<types::Subscription>,
922
events: GuestPtr<types::Event>,
923
nsubscriptions: types::Size,
924
) -> Result<types::Size, Error> {
925
if nsubscriptions == 0 {
926
return Err(Error::invalid_argument().context("nsubscriptions must be nonzero"));
927
}
928
929
// Special-case a `poll_oneoff` which is just sleeping on a single
930
// relative timer event, such as what WASI libc uses to implement sleep
931
// functions. This supports all clock IDs, because POSIX says that
932
// `clock_settime` doesn't effect relative sleeps.
933
if nsubscriptions == 1 {
934
let sub = memory.read(subs)?;
935
if let types::SubscriptionU::Clock(clocksub) = sub.u {
936
if !clocksub
937
.flags
938
.contains(types::Subclockflags::SUBSCRIPTION_CLOCK_ABSTIME)
939
{
940
self.sched
941
.sleep(Duration::from_nanos(clocksub.timeout))
942
.await?;
943
memory.write(
944
events,
945
types::Event {
946
userdata: sub.userdata,
947
error: types::Errno::Success,
948
type_: types::Eventtype::Clock,
949
fd_readwrite: fd_readwrite_empty(),
950
},
951
)?;
952
return Ok(1);
953
}
954
}
955
}
956
957
let table = &self.table;
958
// We need these refmuts to outlive Poll, which will hold the &mut dyn WasiFile inside
959
let mut read_refs: Vec<(Arc<FileEntry>, Option<Userdata>)> = Vec::new();
960
let mut write_refs: Vec<(Arc<FileEntry>, Option<Userdata>)> = Vec::new();
961
962
let mut poll = Poll::new();
963
964
let subs = subs.as_array(nsubscriptions);
965
for sub_elem in subs.iter() {
966
let sub_ptr = sub_elem?;
967
let sub = memory.read(sub_ptr)?;
968
match sub.u {
969
types::SubscriptionU::Clock(clocksub) => match clocksub.id {
970
types::Clockid::Monotonic => {
971
let clock = self.clocks.monotonic()?;
972
let precision = Duration::from_nanos(clocksub.precision);
973
let duration = Duration::from_nanos(clocksub.timeout);
974
let start = if clocksub
975
.flags
976
.contains(types::Subclockflags::SUBSCRIPTION_CLOCK_ABSTIME)
977
{
978
clock.creation_time
979
} else {
980
clock.abs_clock.now(precision)
981
};
982
let deadline = start
983
.checked_add(duration)
984
.ok_or_else(|| Error::overflow().context("deadline"))?;
985
poll.subscribe_monotonic_clock(
986
&*clock.abs_clock,
987
deadline,
988
precision,
989
sub.userdata.into(),
990
)
991
}
992
types::Clockid::Realtime => {
993
// POSIX specifies that functions like `nanosleep` and others use the
994
// `REALTIME` clock. But it also says that `clock_settime` has no effect
995
// on threads waiting in these functions. MONOTONIC should always have
996
// resolution at least as good as REALTIME, so we can translate a
997
// non-absolute `REALTIME` request into a `MONOTONIC` request.
998
let clock = self.clocks.monotonic()?;
999
let precision = Duration::from_nanos(clocksub.precision);
1000
let duration = Duration::from_nanos(clocksub.timeout);
1001
let deadline = if clocksub
1002
.flags
1003
.contains(types::Subclockflags::SUBSCRIPTION_CLOCK_ABSTIME)
1004
{
1005
return Err(Error::not_supported());
1006
} else {
1007
clock
1008
.abs_clock
1009
.now(precision)
1010
.checked_add(duration)
1011
.ok_or_else(|| Error::overflow().context("deadline"))?
1012
};
1013
poll.subscribe_monotonic_clock(
1014
&*clock.abs_clock,
1015
deadline,
1016
precision,
1017
sub.userdata.into(),
1018
)
1019
}
1020
_ => Err(Error::invalid_argument()
1021
.context("timer subscriptions only support monotonic timer"))?,
1022
},
1023
types::SubscriptionU::FdRead(readsub) => {
1024
let fd = readsub.file_descriptor;
1025
let file_ref = table.get_file(u32::from(fd))?;
1026
read_refs.push((file_ref, Some(sub.userdata.into())));
1027
}
1028
types::SubscriptionU::FdWrite(writesub) => {
1029
let fd = writesub.file_descriptor;
1030
let file_ref = table.get_file(u32::from(fd))?;
1031
write_refs.push((file_ref, Some(sub.userdata.into())));
1032
}
1033
}
1034
}
1035
1036
let mut read_mut_refs: Vec<(&dyn WasiFile, Userdata)> = Vec::new();
1037
for (file_lock, userdata) in read_refs.iter_mut() {
1038
read_mut_refs.push((file_lock.file.deref(), userdata.take().unwrap()));
1039
}
1040
1041
for (f, ud) in read_mut_refs.iter_mut() {
1042
poll.subscribe_read(*f, *ud);
1043
}
1044
1045
let mut write_mut_refs: Vec<(&dyn WasiFile, Userdata)> = Vec::new();
1046
for (file_lock, userdata) in write_refs.iter_mut() {
1047
write_mut_refs.push((file_lock.file.deref(), userdata.take().unwrap()));
1048
}
1049
1050
for (f, ud) in write_mut_refs.iter_mut() {
1051
poll.subscribe_write(*f, *ud);
1052
}
1053
1054
self.sched.poll_oneoff(&mut poll).await?;
1055
1056
let results = poll.results();
1057
let num_results = results.len();
1058
assert!(
1059
num_results <= nsubscriptions as usize,
1060
"results exceeds subscriptions"
1061
);
1062
let events = events.as_array(
1063
num_results
1064
.try_into()
1065
.expect("not greater than nsubscriptions"),
1066
);
1067
for ((result, userdata), event_elem) in results.into_iter().zip(events.iter()) {
1068
let event_ptr = event_elem?;
1069
let userdata: types::Userdata = userdata.into();
1070
memory.write(
1071
event_ptr,
1072
match result {
1073
SubscriptionResult::Read(r) => {
1074
let type_ = types::Eventtype::FdRead;
1075
match r {
1076
Ok((nbytes, flags)) => types::Event {
1077
userdata,
1078
error: types::Errno::Success,
1079
type_,
1080
fd_readwrite: types::EventFdReadwrite {
1081
nbytes,
1082
flags: types::Eventrwflags::from(&flags),
1083
},
1084
},
1085
Err(e) => types::Event {
1086
userdata,
1087
error: e.downcast().map_err(Error::trap)?,
1088
type_,
1089
fd_readwrite: fd_readwrite_empty(),
1090
},
1091
}
1092
}
1093
SubscriptionResult::Write(r) => {
1094
let type_ = types::Eventtype::FdWrite;
1095
match r {
1096
Ok((nbytes, flags)) => types::Event {
1097
userdata,
1098
error: types::Errno::Success,
1099
type_,
1100
fd_readwrite: types::EventFdReadwrite {
1101
nbytes,
1102
flags: types::Eventrwflags::from(&flags),
1103
},
1104
},
1105
Err(e) => types::Event {
1106
userdata,
1107
error: e.downcast().map_err(Error::trap)?,
1108
type_,
1109
fd_readwrite: fd_readwrite_empty(),
1110
},
1111
}
1112
}
1113
SubscriptionResult::MonotonicClock(r) => {
1114
let type_ = types::Eventtype::Clock;
1115
types::Event {
1116
userdata,
1117
error: match r {
1118
Ok(()) => types::Errno::Success,
1119
Err(e) => e.downcast().map_err(Error::trap)?,
1120
},
1121
type_,
1122
fd_readwrite: fd_readwrite_empty(),
1123
}
1124
}
1125
},
1126
)?;
1127
}
1128
1129
Ok(num_results.try_into().expect("results fit into memory"))
1130
}
1131
1132
async fn proc_exit(
1133
&mut self,
1134
_memory: &mut GuestMemory<'_>,
1135
status: types::Exitcode,
1136
) -> EnvError {
1137
// Check that the status is within WASI's range.
1138
if status < 126 {
1139
I32Exit(status as i32).into()
1140
} else {
1141
EnvError::msg("exit with invalid exit status outside of [0..126)")
1142
}
1143
}
1144
1145
async fn proc_raise(
1146
&mut self,
1147
_memory: &mut GuestMemory<'_>,
1148
_sig: types::Signal,
1149
) -> Result<(), Error> {
1150
Err(Error::trap(EnvError::msg("proc_raise unsupported")))
1151
}
1152
1153
async fn sched_yield(&mut self, _memory: &mut GuestMemory<'_>) -> Result<(), Error> {
1154
self.sched.sched_yield().await
1155
}
1156
1157
async fn random_get(
1158
&mut self,
1159
memory: &mut GuestMemory<'_>,
1160
buf: GuestPtr<u8>,
1161
buf_len: types::Size,
1162
) -> Result<(), Error> {
1163
let buf = buf.as_array(buf_len);
1164
if memory.is_shared_memory() {
1165
// If the Wasm memory is shared, copy to an intermediate buffer to
1166
// avoid Rust unsafety (i.e., the called function could rely on
1167
// `&mut [u8]`'s exclusive ownership which is not guaranteed due to
1168
// potential access from other threads).
1169
let mut copied: u32 = 0;
1170
while copied < buf.len() {
1171
let len = (buf.len() - copied).min(MAX_SHARED_BUFFER_SIZE as u32);
1172
let mut tmp = vec![0; len as usize];
1173
self.random.lock().unwrap().try_fill_bytes(&mut tmp)?;
1174
let dest = buf.get_range(copied..copied + len).unwrap();
1175
memory.copy_from_slice(&tmp, dest)?;
1176
copied += len;
1177
}
1178
} else {
1179
// If the Wasm memory is non-shared, copy directly into the linear
1180
// memory.
1181
let mem = &mut memory.as_slice_mut(buf)?.unwrap();
1182
self.random.lock().unwrap().try_fill_bytes(mem)?;
1183
}
1184
Ok(())
1185
}
1186
1187
async fn sock_accept(
1188
&mut self,
1189
_memory: &mut GuestMemory<'_>,
1190
fd: types::Fd,
1191
flags: types::Fdflags,
1192
) -> Result<types::Fd, Error> {
1193
let table = self.table();
1194
let f = table.get_file(u32::from(fd))?;
1195
let file = f.file.sock_accept(FdFlags::from(flags)).await?;
1196
let fd = table.push(Arc::new(FileEntry::new(file, FileAccessMode::all())))?;
1197
Ok(types::Fd::from(fd))
1198
}
1199
1200
async fn sock_recv(
1201
&mut self,
1202
memory: &mut GuestMemory<'_>,
1203
fd: types::Fd,
1204
ri_data: types::IovecArray,
1205
ri_flags: types::Riflags,
1206
) -> Result<(types::Size, types::Roflags), Error> {
1207
let f = self.table().get_file(u32::from(fd))?;
1208
1209
let iovs: Vec<wiggle::GuestPtr<[u8]>> = ri_data
1210
.iter()
1211
.map(|iov_ptr| {
1212
let iov_ptr = iov_ptr?;
1213
let iov: types::Iovec = memory.read(iov_ptr)?;
1214
Ok(iov.buf.as_array(iov.buf_len))
1215
})
1216
.collect::<Result<_, Error>>()?;
1217
1218
// If the first iov structure is from shared memory we can safely assume
1219
// all the rest will be. We then read into memory based on the memory's
1220
// shared-ness:
1221
// - if not shared, we copy directly into the Wasm memory
1222
// - if shared, we use an intermediate buffer; this avoids Rust unsafety
1223
// due to holding on to a `&mut [u8]` of Wasm memory when we cannot
1224
// guarantee the `&mut` exclusivity--other threads could be modifying
1225
// the data as this functions writes to it. Though likely there is no
1226
// issue with OS writing to io structs in multi-threaded scenarios,
1227
// since we do not know here if `&dyn WasiFile` does anything else
1228
// (e.g., read), we cautiously incur some performance overhead by
1229
// copying twice.
1230
let is_shared_memory = memory.is_shared_memory();
1231
let (bytes_read, ro_flags) = if is_shared_memory {
1232
// For shared memory, read into an intermediate buffer. Only the
1233
// first iov will be filled and even then the read is capped by the
1234
// `MAX_SHARED_BUFFER_SIZE`, so users are expected to re-call.
1235
let iov = iovs.into_iter().next();
1236
if let Some(iov) = iov {
1237
let mut buffer = vec![0; (iov.len() as usize).min(MAX_SHARED_BUFFER_SIZE)];
1238
let (bytes_read, ro_flags) = f
1239
.file
1240
.sock_recv(&mut [IoSliceMut::new(&mut buffer)], RiFlags::from(ri_flags))
1241
.await?;
1242
let iov = iov
1243
.get_range(0..bytes_read.try_into()?)
1244
.expect("it should always be possible to slice the iov smaller");
1245
memory.copy_from_slice(&buffer[0..bytes_read.try_into()?], iov)?;
1246
(bytes_read, ro_flags)
1247
} else {
1248
return Ok((0, RoFlags::empty().into()));
1249
}
1250
} else {
1251
// Convert all of the unsafe guest slices to safe ones--this uses
1252
// Wiggle's internal borrow checker to ensure no overlaps. We assume
1253
// here that, because the memory is not shared, there are no other
1254
// threads to access it while it is written to.
1255
let guest_slice: &mut [u8] = match iovs.into_iter().filter(|iov| iov.len() > 0).next() {
1256
Some(iov) => memory.as_slice_mut(iov)?.unwrap(),
1257
None => &mut [],
1258
};
1259
1260
// Read directly into the Wasm memory.
1261
f.file
1262
.sock_recv(&mut [IoSliceMut::new(guest_slice)], RiFlags::from(ri_flags))
1263
.await?
1264
};
1265
1266
Ok((types::Size::try_from(bytes_read)?, ro_flags.into()))
1267
}
1268
1269
async fn sock_send(
1270
&mut self,
1271
memory: &mut GuestMemory<'_>,
1272
fd: types::Fd,
1273
si_data: types::CiovecArray,
1274
_si_flags: types::Siflags,
1275
) -> Result<types::Size, Error> {
1276
let f = self.table().get_file(u32::from(fd))?;
1277
1278
let guest_slices: Vec<Cow<[u8]>> = si_data
1279
.iter()
1280
.map(|iov_ptr| {
1281
let iov_ptr = iov_ptr?;
1282
let iov: types::Ciovec = memory.read(iov_ptr)?;
1283
Ok(memory.as_cow(iov.buf.as_array(iov.buf_len))?)
1284
})
1285
.collect::<Result<_, Error>>()?;
1286
1287
let ioslices: Vec<IoSlice> = guest_slices
1288
.iter()
1289
.map(|s| IoSlice::new(s.deref()))
1290
.collect();
1291
let bytes_written = f.file.sock_send(&ioslices, SiFlags::empty()).await?;
1292
1293
Ok(types::Size::try_from(bytes_written)?)
1294
}
1295
1296
async fn sock_shutdown(
1297
&mut self,
1298
_memory: &mut GuestMemory<'_>,
1299
fd: types::Fd,
1300
how: types::Sdflags,
1301
) -> Result<(), Error> {
1302
let f = self.table().get_file(u32::from(fd))?;
1303
1304
f.file.sock_shutdown(SdFlags::from(how)).await
1305
}
1306
}
1307
1308
impl From<types::Advice> for Advice {
1309
fn from(advice: types::Advice) -> Advice {
1310
match advice {
1311
types::Advice::Normal => Advice::Normal,
1312
types::Advice::Sequential => Advice::Sequential,
1313
types::Advice::Random => Advice::Random,
1314
types::Advice::Willneed => Advice::WillNeed,
1315
types::Advice::Dontneed => Advice::DontNeed,
1316
types::Advice::Noreuse => Advice::NoReuse,
1317
}
1318
}
1319
}
1320
1321
impl From<&FdStat> for types::Fdstat {
1322
fn from(fdstat: &FdStat) -> types::Fdstat {
1323
let mut fs_rights_base = types::Rights::empty();
1324
if fdstat.access_mode.contains(FileAccessMode::READ) {
1325
fs_rights_base |= types::Rights::FD_READ;
1326
}
1327
if fdstat.access_mode.contains(FileAccessMode::WRITE) {
1328
fs_rights_base |= types::Rights::FD_WRITE;
1329
}
1330
types::Fdstat {
1331
fs_filetype: types::Filetype::from(&fdstat.filetype),
1332
fs_rights_base,
1333
fs_rights_inheriting: types::Rights::empty(),
1334
fs_flags: types::Fdflags::from(fdstat.flags),
1335
}
1336
}
1337
}
1338
1339
impl From<&FileType> for types::Filetype {
1340
fn from(ft: &FileType) -> types::Filetype {
1341
match ft {
1342
FileType::Directory => types::Filetype::Directory,
1343
FileType::BlockDevice => types::Filetype::BlockDevice,
1344
FileType::CharacterDevice => types::Filetype::CharacterDevice,
1345
FileType::RegularFile => types::Filetype::RegularFile,
1346
FileType::SocketDgram => types::Filetype::SocketDgram,
1347
FileType::SocketStream => types::Filetype::SocketStream,
1348
FileType::SymbolicLink => types::Filetype::SymbolicLink,
1349
FileType::Unknown => types::Filetype::Unknown,
1350
FileType::Pipe => types::Filetype::Unknown,
1351
}
1352
}
1353
}
1354
1355
macro_rules! convert_flags {
1356
($from:ty, $to:ty, $($flag:ident),+) => {
1357
impl From<$from> for $to {
1358
fn from(f: $from) -> $to {
1359
let mut out = <$to>::empty();
1360
$(
1361
if f.contains(<$from>::$flag) {
1362
out |= <$to>::$flag;
1363
}
1364
)+
1365
out
1366
}
1367
}
1368
}
1369
}
1370
1371
macro_rules! convert_flags_bidirectional {
1372
($from:ty, $to:ty, $($rest:tt)*) => {
1373
convert_flags!($from, $to, $($rest)*);
1374
convert_flags!($to, $from, $($rest)*);
1375
}
1376
}
1377
1378
convert_flags_bidirectional!(
1379
FdFlags,
1380
types::Fdflags,
1381
APPEND,
1382
DSYNC,
1383
NONBLOCK,
1384
RSYNC,
1385
SYNC
1386
);
1387
1388
convert_flags_bidirectional!(RiFlags, types::Riflags, RECV_PEEK, RECV_WAITALL);
1389
1390
convert_flags_bidirectional!(RoFlags, types::Roflags, RECV_DATA_TRUNCATED);
1391
1392
convert_flags_bidirectional!(SdFlags, types::Sdflags, RD, WR);
1393
1394
impl From<&types::Oflags> for OFlags {
1395
fn from(oflags: &types::Oflags) -> OFlags {
1396
let mut out = OFlags::empty();
1397
if oflags.contains(types::Oflags::CREAT) {
1398
out = out | OFlags::CREATE;
1399
}
1400
if oflags.contains(types::Oflags::DIRECTORY) {
1401
out = out | OFlags::DIRECTORY;
1402
}
1403
if oflags.contains(types::Oflags::EXCL) {
1404
out = out | OFlags::EXCLUSIVE;
1405
}
1406
if oflags.contains(types::Oflags::TRUNC) {
1407
out = out | OFlags::TRUNCATE;
1408
}
1409
out
1410
}
1411
}
1412
1413
impl From<&OFlags> for types::Oflags {
1414
fn from(oflags: &OFlags) -> types::Oflags {
1415
let mut out = types::Oflags::empty();
1416
if oflags.contains(OFlags::CREATE) {
1417
out = out | types::Oflags::CREAT;
1418
}
1419
if oflags.contains(OFlags::DIRECTORY) {
1420
out = out | types::Oflags::DIRECTORY;
1421
}
1422
if oflags.contains(OFlags::EXCLUSIVE) {
1423
out = out | types::Oflags::EXCL;
1424
}
1425
if oflags.contains(OFlags::TRUNCATE) {
1426
out = out | types::Oflags::TRUNC;
1427
}
1428
out
1429
}
1430
}
1431
impl From<Filestat> for types::Filestat {
1432
fn from(stat: Filestat) -> types::Filestat {
1433
types::Filestat {
1434
dev: stat.device_id,
1435
ino: stat.inode,
1436
filetype: types::Filetype::from(&stat.filetype),
1437
nlink: stat.nlink,
1438
size: stat.size,
1439
atim: stat
1440
.atim
1441
.map(|t| t.duration_since(std::time::UNIX_EPOCH).unwrap().as_nanos() as u64)
1442
.unwrap_or(0),
1443
mtim: stat
1444
.mtim
1445
.map(|t| t.duration_since(std::time::UNIX_EPOCH).unwrap().as_nanos() as u64)
1446
.unwrap_or(0),
1447
ctim: stat
1448
.ctim
1449
.map(|t| t.duration_since(std::time::UNIX_EPOCH).unwrap().as_nanos() as u64)
1450
.unwrap_or(0),
1451
}
1452
}
1453
}
1454
1455
impl TryFrom<&ReaddirEntity> for types::Dirent {
1456
type Error = Error;
1457
fn try_from(e: &ReaddirEntity) -> Result<types::Dirent, Error> {
1458
Ok(types::Dirent {
1459
d_ino: e.inode,
1460
d_namlen: e.name.as_bytes().len().try_into()?,
1461
d_type: types::Filetype::from(&e.filetype),
1462
d_next: e.next.into(),
1463
})
1464
}
1465
}
1466
1467
fn dirent_bytes(dirent: types::Dirent) -> Vec<u8> {
1468
use wiggle::GuestType;
1469
assert_eq!(
1470
types::Dirent::guest_size(),
1471
std::mem::size_of::<types::Dirent>() as u32,
1472
"Dirent guest repr and host repr should match"
1473
);
1474
assert_eq!(
1475
1,
1476
std::mem::size_of_val(&dirent.d_type),
1477
"Dirent member d_type should be endian-invariant"
1478
);
1479
let size = types::Dirent::guest_size()
1480
.try_into()
1481
.expect("Dirent is smaller than 2^32");
1482
let mut bytes = Vec::with_capacity(size);
1483
bytes.resize(size, 0);
1484
let ptr = bytes.as_mut_ptr().cast::<types::Dirent>();
1485
let guest_dirent = types::Dirent {
1486
d_ino: dirent.d_ino.to_le(),
1487
d_namlen: dirent.d_namlen.to_le(),
1488
d_type: dirent.d_type, // endian-invariant
1489
d_next: dirent.d_next.to_le(),
1490
};
1491
unsafe { ptr.write_unaligned(guest_dirent) };
1492
bytes
1493
}
1494
1495
impl From<&RwEventFlags> for types::Eventrwflags {
1496
fn from(flags: &RwEventFlags) -> types::Eventrwflags {
1497
let mut out = types::Eventrwflags::empty();
1498
if flags.contains(RwEventFlags::HANGUP) {
1499
out = out | types::Eventrwflags::FD_READWRITE_HANGUP;
1500
}
1501
out
1502
}
1503
}
1504
1505
fn fd_readwrite_empty() -> types::EventFdReadwrite {
1506
types::EventFdReadwrite {
1507
nbytes: 0,
1508
flags: types::Eventrwflags::empty(),
1509
}
1510
}
1511
1512
fn systimespec(
1513
set: bool,
1514
ts: types::Timestamp,
1515
now: bool,
1516
) -> Result<Option<SystemTimeSpec>, Error> {
1517
if set && now {
1518
Err(Error::invalid_argument())
1519
} else if set {
1520
Ok(Some(SystemTimeSpec::Absolute(
1521
SystemClock::UNIX_EPOCH + Duration::from_nanos(ts),
1522
)))
1523
} else if now {
1524
Ok(Some(SystemTimeSpec::SymbolicNow))
1525
} else {
1526
Ok(None)
1527
}
1528
}
1529
1530
// This is the default subset of base Rights reported for directories prior to
1531
// https://github.com/bytecodealliance/wasmtime/pull/6265. Some
1532
// implementations still expect this set of rights to be reported.
1533
pub(crate) fn directory_base_rights() -> types::Rights {
1534
types::Rights::PATH_CREATE_DIRECTORY
1535
| types::Rights::PATH_CREATE_FILE
1536
| types::Rights::PATH_LINK_SOURCE
1537
| types::Rights::PATH_LINK_TARGET
1538
| types::Rights::PATH_OPEN
1539
| types::Rights::FD_READDIR
1540
| types::Rights::PATH_READLINK
1541
| types::Rights::PATH_RENAME_SOURCE
1542
| types::Rights::PATH_RENAME_TARGET
1543
| types::Rights::PATH_SYMLINK
1544
| types::Rights::PATH_REMOVE_DIRECTORY
1545
| types::Rights::PATH_UNLINK_FILE
1546
| types::Rights::PATH_FILESTAT_GET
1547
| types::Rights::PATH_FILESTAT_SET_TIMES
1548
| types::Rights::FD_FILESTAT_GET
1549
| types::Rights::FD_FILESTAT_SET_TIMES
1550
}
1551
1552
// This is the default subset of inheriting Rights reported for directories
1553
// prior to https://github.com/bytecodealliance/wasmtime/pull/6265. Some
1554
// implementations still expect this set of rights to be reported.
1555
pub(crate) fn directory_inheriting_rights() -> types::Rights {
1556
types::Rights::FD_DATASYNC
1557
| types::Rights::FD_READ
1558
| types::Rights::FD_SEEK
1559
| types::Rights::FD_FDSTAT_SET_FLAGS
1560
| types::Rights::FD_SYNC
1561
| types::Rights::FD_TELL
1562
| types::Rights::FD_WRITE
1563
| types::Rights::FD_ADVISE
1564
| types::Rights::FD_ALLOCATE
1565
| types::Rights::FD_FILESTAT_GET
1566
| types::Rights::FD_FILESTAT_SET_SIZE
1567
| types::Rights::FD_FILESTAT_SET_TIMES
1568
| types::Rights::POLL_FD_READWRITE
1569
| directory_base_rights()
1570
}
1571
1572