Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/cranelift/isle/veri/veri_engine/src/solver.rs
1693 views
1
/// Convert our internal Verification IR to an external SMT AST and pass
2
/// queries to that solver.
3
///
4
/// This uses the easy-smt crate to interact with any solver.
5
///
6
use cranelift_isle as isle;
7
use isle::sema::{Pattern, Rule, TermEnv, TypeEnv};
8
9
use crate::solver::encoded_ops::popcnt::popcnt;
10
use crate::type_inference::RuleSemantics;
11
use crate::Config;
12
use easy_smt::{Response, SExpr};
13
use std::cmp::Ordering;
14
use std::collections::HashMap;
15
use veri_ir::{
16
BinaryOp, ConcreteTest, Counterexample, Expr, TermSignature, Terminal, Type, TypeContext,
17
UnaryOp, VerificationResult,
18
};
19
20
mod encoded_ops;
21
22
use encoded_ops::cls;
23
use encoded_ops::clz;
24
use encoded_ops::rev;
25
use encoded_ops::subs;
26
27
use crate::MAX_WIDTH;
28
29
pub struct SolverCtx {
30
smt: easy_smt::Context,
31
pub find_widths: bool,
32
tyctx: TypeContext,
33
pub bitwidth: usize,
34
var_map: HashMap<String, SExpr>,
35
width_vars: HashMap<u32, String>,
36
width_assumptions: Vec<SExpr>,
37
pub additional_decls: Vec<(String, SExpr)>,
38
pub additional_assumptions: Vec<SExpr>,
39
pub additional_assertions: Vec<SExpr>,
40
fresh_bits_idx: usize,
41
lhs_load_args: Option<Vec<SExpr>>,
42
rhs_load_args: Option<Vec<SExpr>>,
43
lhs_store_args: Option<Vec<SExpr>>,
44
rhs_store_args: Option<Vec<SExpr>>,
45
load_return: Option<SExpr>,
46
lhs_flag: bool,
47
}
48
49
pub struct RuleCtx<'a> {
50
rule_sem: &'a RuleSemantics,
51
rule: &'a Rule,
52
termenv: &'a TermEnv,
53
typeenv: &'a TypeEnv,
54
config: &'a Config,
55
}
56
57
impl SolverCtx {
58
pub fn new_fresh_bits(&mut self, width: usize) -> SExpr {
59
let name = format!("fresh{}", self.fresh_bits_idx);
60
self.fresh_bits_idx += 1;
61
self.additional_decls
62
.push((name.clone(), self.smt.bit_vec_sort(self.smt.numeral(width))));
63
self.smt.atom(name)
64
}
65
66
fn new_fresh_int(&mut self) -> SExpr {
67
let name = format!("fresh{}", self.fresh_bits_idx);
68
self.fresh_bits_idx += 1;
69
self.additional_decls
70
.push((name.clone(), self.smt.int_sort()));
71
self.smt.atom(name)
72
}
73
74
fn new_fresh_bool(&mut self) -> SExpr {
75
let name = format!("fresh{}", self.fresh_bits_idx);
76
self.fresh_bits_idx += 1;
77
self.additional_decls
78
.push((name.clone(), self.smt.bool_sort()));
79
self.smt.atom(name)
80
}
81
82
fn declare(&mut self, name: String, typ: SExpr) -> SExpr {
83
let atom = self.smt.atom(&name);
84
self.additional_decls.push((name, typ));
85
atom
86
}
87
88
fn assume(&mut self, expr: SExpr) {
89
self.additional_assumptions.push(expr);
90
}
91
92
fn assert(&mut self, expr: SExpr) {
93
self.additional_assertions.push(expr);
94
}
95
96
/// Construct a constant bit-vector value of the given width. (This is used so pervasively that
97
/// perhaps we should submit it for inclusion in the easy_smt library...)
98
fn bv(&self, value: i128, width: usize) -> SExpr {
99
if value < 0 {
100
return self
101
.smt
102
.list(vec![self.smt.atom("bvneg"), self.bv(-value, width)]);
103
}
104
self.smt.list(vec![
105
self.smt.atoms().und,
106
self.smt.atom(format!("bv{value}")),
107
self.smt.numeral(width),
108
])
109
}
110
111
/// Convert an SMT integer to a bit vector of a given width.
112
fn int2bv(&self, width: usize, value: SExpr) -> SExpr {
113
self.smt.list(vec![
114
self.smt.list(vec![
115
self.smt.atoms().und,
116
self.smt.atom("int2bv"),
117
self.smt.numeral(width),
118
]),
119
value,
120
])
121
}
122
123
/// Convert an SMT bit vector to a nat.
124
fn bv2nat(&self, value: SExpr) -> SExpr {
125
self.smt.list(vec![self.smt.atom("bv2nat"), value])
126
}
127
128
/// Zero-extend an SMT bit vector to a wider bit vector by adding `padding` zeroes to the
129
/// front.
130
fn zero_extend(&self, padding: usize, value: SExpr) -> SExpr {
131
if padding == 0 {
132
return value;
133
}
134
self.smt.list(vec![
135
self.smt.list(vec![
136
self.smt.atoms().und,
137
self.smt.atom("zero_extend"),
138
self.smt.numeral(padding),
139
]),
140
value,
141
])
142
}
143
144
/// Sign-extend an SMT bit vector to a wider bit vector by adding `padding` zeroes to the
145
/// front.
146
fn sign_extend(&self, padding: usize, value: SExpr) -> SExpr {
147
self.smt.list(vec![
148
self.smt.list(vec![
149
self.smt.atoms().und,
150
self.smt.atom("sign_extend"),
151
self.smt.numeral(padding),
152
]),
153
value,
154
])
155
}
156
157
// Extend with concrete source and destination sizes. Includes extracting relevant bits.
158
fn extend_concrete(
159
&mut self,
160
dest_width: usize,
161
source: SExpr,
162
source_width: usize,
163
op: &str,
164
) -> SExpr {
165
if dest_width < source_width {
166
log::warn!(
167
"Unexpected extend widths for {}: dest {dest_width} source {source_width} ",
168
self.smt.display(source),
169
);
170
self.assert(self.smt.false_());
171
return self.bv(
172
0,
173
if self.find_widths {
174
self.bitwidth
175
} else {
176
dest_width
177
},
178
);
179
}
180
181
let delta = dest_width - source_width;
182
if !self.find_widths {
183
return self.smt.list(vec![
184
self.smt.list(vec![
185
self.smt.atoms().und,
186
self.smt.atom(op),
187
self.smt.numeral(delta),
188
]),
189
source,
190
]);
191
}
192
193
// Extract the relevant bits of the source (which is modeled with a wider,
194
// register-width bitvector).
195
let extract = self
196
.smt
197
.extract(source_width.wrapping_sub(1).try_into().unwrap(), 0, source);
198
199
// Do the extend itself.
200
let extend = self.smt.list(vec![
201
self.smt.list(vec![
202
self.smt.atoms().und,
203
self.smt.atom(op),
204
self.smt.numeral(delta),
205
]),
206
extract,
207
]);
208
209
// Pad the extended result back to the full register bitwidth. Use the bits
210
// that were already in the source register. That is, given:
211
// reg - source width source width
212
// | |
213
// SOURCE: [ don't care bits | care bits ]
214
//
215
// dest width
216
// |
217
// OUT: [ same don't care bits | defined extend | care bits ]
218
let mut unconstrained_bits = 0;
219
if dest_width < self.bitwidth {
220
unconstrained_bits = self
221
.bitwidth
222
.checked_sub(delta)
223
.unwrap()
224
.checked_sub(source_width)
225
.unwrap();
226
}
227
228
// If we are extending to the full register width, no padding needed
229
if unconstrained_bits == 0 {
230
extend
231
} else {
232
let padding = self.smt.extract(
233
self.bitwidth.checked_sub(1).unwrap().try_into().unwrap(),
234
self.bitwidth
235
.checked_sub(unconstrained_bits)
236
.unwrap()
237
.try_into()
238
.unwrap(),
239
source,
240
);
241
self.smt.concat(padding, extend)
242
}
243
}
244
245
// SMT-LIB only supports extends (zero or sign) by concrete amounts, but we
246
// need symbolic ones. This method essentially does if-conversion over possible
247
// concrete forms, outputting nested ITE blocks. We consider both the starting
248
// width and the destination width to be potentially symbolic.
249
// For safety, we add an assertion that some arm of this ITE must match.
250
fn extend_symbolic(
251
&mut self,
252
dest_width: SExpr,
253
source: SExpr,
254
source_width: SExpr,
255
op: &str,
256
) -> SExpr {
257
if self.find_widths {
258
return source;
259
}
260
// Symbolic expression for amount to shift
261
let shift = self.smt.sub(dest_width, source_width);
262
263
let mut some_match = vec![];
264
let mut ite_str = source;
265
266
// Special case: if we are asked to extend by 0, just return the source
267
let matching = self.smt.eq(self.smt.numeral(0), shift);
268
some_match.push(matching);
269
ite_str = self.smt.ite(matching, source, ite_str);
270
271
// Possible amounts to extend by
272
for possible_delta in 1..self.bitwidth + 1 {
273
// Possible starting widths
274
for possible_source in 1..self.bitwidth + 1 {
275
// For now, ignore extends beyond the bitwidth. This is safe because
276
// we will fail the rule feasibility check if this is violated.
277
if possible_source + possible_delta > self.bitwidth {
278
continue;
279
}
280
281
// Statement meaning the symbolic case matches this concrete case
282
let matching = self.smt.and(
283
self.smt.eq(self.smt.numeral(possible_delta), shift),
284
self.smt.eq(self.smt.numeral(possible_source), source_width),
285
);
286
some_match.push(matching);
287
let extend = self.extend_concrete(
288
possible_source + possible_delta,
289
source,
290
possible_source,
291
op,
292
);
293
ite_str = self.smt.ite(matching, extend, ite_str);
294
}
295
}
296
let some_shift_matches = self.smt.or_many(some_match);
297
self.width_assumptions.push(some_shift_matches);
298
ite_str
299
}
300
301
fn encode_rotate(&self, op: &str, source: SExpr, amount: SExpr, width: usize) -> SExpr {
302
// SMT bitvector rotate_left requires that the rotate amount be
303
// statically specified. Instead, to use a dynamic amount, desugar
304
// to shifts and bit arithmetic.
305
let width_as_bv = self.bv(width.try_into().unwrap(), width);
306
let wrapped_amount = self.smt.bvurem(amount, width_as_bv);
307
let wrapped_delta = self.smt.bvsub(width_as_bv, wrapped_amount);
308
match op {
309
"rotate_left" => self.smt.bvor(
310
self.smt.bvshl(source, wrapped_amount),
311
self.smt.bvlshr(source, wrapped_delta),
312
),
313
"rotate_right" => self.smt.bvor(
314
self.smt.bvshl(source, wrapped_delta),
315
self.smt.bvlshr(source, wrapped_amount),
316
),
317
_ => unreachable!(),
318
}
319
}
320
321
// SMT bitvector rotate requires that the rotate amount be
322
// statically specified. Instead, to use a dynamic amount, desugar
323
// to shifts and bit arithmetic.
324
fn rotate_symbolic(
325
&mut self,
326
source: SExpr,
327
source_width: usize,
328
amount: SExpr,
329
op: &str,
330
) -> SExpr {
331
if self.find_widths {
332
return source;
333
}
334
let (s, a) = if self.find_widths {
335
// Extract the relevant bits of the source (which is modeled with a wider,
336
// register-width bitvector).
337
let extract_source = self.smt.extract(
338
source_width.checked_sub(1).unwrap().try_into().unwrap(),
339
0,
340
source,
341
);
342
343
let extract_amount = self.smt.extract(
344
source_width.checked_sub(1).unwrap().try_into().unwrap(),
345
0,
346
amount,
347
);
348
(extract_source, extract_amount)
349
} else {
350
(source, amount)
351
};
352
353
// Do the rotate itself.
354
let rotate = self.encode_rotate(op, s, a, source_width);
355
356
// Pad the extended result back to the full register bitwidth. Use the bits
357
// that were already in the source register. That is, given:
358
// reg - source width source width
359
// | |
360
// SOURCE: [ don't care bits | care bits ]
361
//
362
// dest width
363
// |
364
// OUT: [ same don't care bits | care bits ]
365
let unconstrained_bits = self.bitwidth.checked_sub(source_width).unwrap();
366
367
// If we are extending to the full register width, no padding needed
368
if unconstrained_bits == 0 || !self.find_widths {
369
rotate
370
} else {
371
let padding = self.smt.extract(
372
self.bitwidth.checked_sub(1).unwrap().try_into().unwrap(),
373
self.bitwidth
374
.checked_sub(unconstrained_bits)
375
.unwrap()
376
.try_into()
377
.unwrap(),
378
source,
379
);
380
self.smt.concat(padding, rotate)
381
}
382
}
383
384
// SMT-LIB only supports rotates by concrete amounts, but we
385
// need symbolic ones. This method essentially does if-conversion over possible
386
// concrete forms, outputting nested ITE blocks. We consider both the starting
387
// width and the rotate amount to be potentially symbolic.
388
// For safety, we add an assertion that some arm of this ITE must match.
389
fn rotate_symbolic_dyn_source_width(
390
&mut self,
391
source: SExpr,
392
source_width: SExpr,
393
amount: SExpr,
394
op: &str,
395
) -> SExpr {
396
if self.find_widths {
397
return source;
398
}
399
let mut some_match = vec![];
400
let mut ite_str = source;
401
402
// Special case: if we are asked to rotate by 0, just return the source
403
let matching = self.smt.eq(self.bv(0, self.bitwidth), amount);
404
some_match.push(matching);
405
ite_str = self.smt.ite(matching, source, ite_str);
406
407
// Possible starting widths
408
for possible_source in [8usize, 16, 32, 64] {
409
// Statement meaning the symbolic case matches this concrete case
410
let matching = self.smt.eq(self.smt.numeral(possible_source), source_width);
411
some_match.push(matching);
412
413
// Extract the relevant bits of the source (which is modeled with a wider,
414
// register-width bitvector).
415
let extract_source = self.smt.extract(
416
possible_source.checked_sub(1).unwrap().try_into().unwrap(),
417
0,
418
source,
419
);
420
let extract_amount = self.smt.extract(
421
possible_source.checked_sub(1).unwrap().try_into().unwrap(),
422
0,
423
amount,
424
);
425
426
// SMT bitvector rotate_left requires that the rotate amount be
427
// statically specified. Instead, to use a dynamic amount, desugar
428
// to shifts and bit arithmetic.
429
let rotate = self.encode_rotate(op, extract_source, extract_amount, possible_source);
430
431
// Pad the extended result back to the full register bitwidth. Use the bits
432
// that were already in the source register. That is, given:
433
// reg - source width source width
434
// | |
435
// SOURCE: [ don't care bits | care bits ]
436
//
437
// dest width
438
// |
439
// OUT: [ same don't care bits | care bits ]
440
let unconstrained_bits = self.bitwidth.checked_sub(possible_source).unwrap();
441
442
// If we are extending to the full register width, no padding needed
443
let rotate = if unconstrained_bits == 0 {
444
rotate
445
} else {
446
let padding = self.smt.extract(
447
self.bitwidth.checked_sub(1).unwrap().try_into().unwrap(),
448
self.bitwidth
449
.checked_sub(unconstrained_bits)
450
.unwrap()
451
.try_into()
452
.unwrap(),
453
source,
454
);
455
self.smt.concat(padding, rotate)
456
};
457
458
ite_str = self.smt.ite(matching, rotate, ite_str);
459
}
460
let some_shift_matches = self.smt.or_many(some_match);
461
self.width_assumptions.push(some_shift_matches);
462
ite_str
463
}
464
465
pub fn widen_to_register_width(
466
&mut self,
467
tyvar: u32,
468
narrow_width: usize,
469
narrow_decl: SExpr,
470
name: Option<String>,
471
) -> SExpr {
472
let width = self.bitwidth.checked_sub(narrow_width).unwrap();
473
if width > 0 {
474
let mut narrow_name = format!("narrow__{tyvar}");
475
let mut wide_name = format!("wide__{tyvar}");
476
if let Some(s) = name {
477
narrow_name = format!("{s}_{narrow_name}");
478
wide_name = format!("{s}_{wide_name}");
479
}
480
self.assume(self.smt.eq(self.smt.atom(&narrow_name), narrow_decl));
481
self.additional_decls.push((
482
narrow_name.clone(),
483
self.smt.bit_vec_sort(self.smt.numeral(narrow_width)),
484
));
485
self.additional_decls.push((
486
wide_name.clone(),
487
self.smt.bit_vec_sort(self.smt.numeral(self.bitwidth)),
488
));
489
let padding = self.new_fresh_bits(width);
490
self.assume(self.smt.eq(
491
self.smt.atom(&wide_name),
492
self.smt.concat(padding, self.smt.atom(narrow_name)),
493
));
494
self.smt.atom(wide_name)
495
} else if let Some(s) = name {
496
self.assume(self.smt.eq(self.smt.atom(&s), narrow_decl));
497
self.smt.atom(&s)
498
} else {
499
narrow_decl
500
}
501
}
502
503
pub fn get_expr_width_var(&self, e: &Expr) -> Option<SExpr> {
504
if let Some(tyvar) = self.tyctx.tyvars.get(e) {
505
self.width_vars.get(tyvar).map(|s| self.smt.atom(s))
506
} else {
507
None
508
}
509
}
510
511
pub fn vir_to_smt_ty(&self, ty: &Type) -> SExpr {
512
match ty {
513
Type::BitVector(w) => {
514
let width = w.unwrap_or(self.bitwidth);
515
self.smt.bit_vec_sort(self.smt.numeral(width))
516
}
517
Type::Int => self.smt.int_sort(),
518
Type::Bool | Type::Unit => self.smt.bool_sort(),
519
}
520
}
521
522
pub fn get_type(&self, x: &Expr) -> Option<&Type> {
523
self.tyctx.tymap.get(self.tyctx.tyvars.get(x)?)
524
}
525
526
pub fn get_expr_value(&self, e: &Expr) -> Option<i128> {
527
if let Some(tyvar) = self.tyctx.tyvars.get(e) {
528
self.tyctx.tyvals.get(tyvar).copied()
529
} else {
530
None
531
}
532
}
533
534
pub fn static_width(&self, x: &Expr) -> Option<usize> {
535
match self.get_type(x) {
536
Some(Type::BitVector(w)) => *w,
537
_ => None,
538
}
539
}
540
541
pub fn assume_same_width(&mut self, x: &Expr, y: &Expr) {
542
let xw = self.get_expr_width_var(x).unwrap();
543
let yw = self.get_expr_width_var(y).unwrap();
544
self.width_assumptions.push(self.smt.eq(xw, yw));
545
}
546
547
pub fn assume_same_width_from_sexpr(&mut self, x: SExpr, y: &Expr) {
548
let yw = self.get_expr_width_var(y).unwrap();
549
self.width_assumptions.push(self.smt.eq(x, yw));
550
}
551
552
pub fn assume_comparable_types(&mut self, x: &Expr, y: &Expr) {
553
match (self.get_type(x), self.get_type(y)) {
554
(None, _) | (_, None) => panic!("Missing type(s) {x:?} {y:?}"),
555
(Some(Type::Bool), Some(Type::Bool))
556
| (Some(Type::Int), Some(Type::Int))
557
| (Some(Type::Unit), Some(Type::Unit)) => (),
558
(Some(Type::BitVector(Some(xw))), Some(Type::BitVector(Some(yw)))) => {
559
assert_eq!(xw, yw, "incompatible {x:?} {y:?}")
560
}
561
(_, _) => self.assume_same_width(x, y),
562
}
563
}
564
565
pub fn vir_expr_to_sexp(&mut self, e: Expr) -> SExpr {
566
let tyvar = self.tyctx.tyvars.get(&e);
567
let ty = self.get_type(&e);
568
let width = self.get_expr_width_var(&e);
569
let static_expr_width = self.static_width(&e);
570
match e {
571
Expr::Terminal(t) => match t {
572
Terminal::Literal(v, tyvar) => {
573
let lit = self.smt.atom(v);
574
if self.find_widths && matches!(ty.unwrap(), Type::BitVector(_)) {
575
self.widen_to_register_width(tyvar, static_expr_width.unwrap(), lit, None)
576
} else {
577
lit
578
}
579
}
580
Terminal::Var(v) => match self.var_map.get(&v) {
581
Some(o) => *o,
582
None => self.smt.atom(v),
583
},
584
Terminal::Const(i, _) => match ty.unwrap() {
585
Type::BitVector(w) => {
586
let width = w.unwrap_or(self.bitwidth);
587
let narrow_decl = self.bv(i, width);
588
if self.find_widths {
589
self.zero_extend(self.bitwidth - width, narrow_decl)
590
} else {
591
narrow_decl
592
}
593
}
594
Type::Int => self.smt.numeral(i),
595
Type::Bool => {
596
if i == 0 {
597
self.smt.false_()
598
} else {
599
self.smt.true_()
600
}
601
}
602
Type::Unit => self.smt.true_(),
603
},
604
Terminal::True => self.smt.true_(),
605
Terminal::False => self.smt.false_(),
606
Terminal::Wildcard(_) => match ty.unwrap() {
607
Type::BitVector(Some(w)) if !self.find_widths => self.new_fresh_bits(*w),
608
Type::BitVector(_) => self.new_fresh_bits(self.bitwidth),
609
Type::Int => self.new_fresh_int(),
610
Type::Bool => self.new_fresh_bool(),
611
Type::Unit => self.smt.true_(),
612
},
613
},
614
Expr::Unary(op, arg) => {
615
let op = match op {
616
UnaryOp::Not => "not",
617
UnaryOp::BVNeg => {
618
if self.find_widths {
619
self.assume_same_width_from_sexpr(width.unwrap(), &arg);
620
}
621
"bvneg"
622
}
623
UnaryOp::BVNot => {
624
if self.find_widths {
625
self.assume_same_width_from_sexpr(width.unwrap(), &arg);
626
}
627
"bvnot"
628
}
629
};
630
let subexp = self.vir_expr_to_sexp(*arg);
631
self.smt.list(vec![self.smt.atom(op), subexp])
632
}
633
Expr::Binary(op, x, y) => {
634
if self.find_widths {
635
match op {
636
BinaryOp::BVMul
637
| BinaryOp::BVUDiv
638
| BinaryOp::BVSDiv
639
| BinaryOp::BVUrem
640
| BinaryOp::BVSrem
641
| BinaryOp::BVAdd
642
| BinaryOp::BVSub
643
| BinaryOp::BVAnd
644
| BinaryOp::BVOr
645
| BinaryOp::BVShl
646
| BinaryOp::BVShr
647
| BinaryOp::BVAShr
648
| BinaryOp::BVRotl
649
| BinaryOp::BVRotr => self.assume_same_width_from_sexpr(width.unwrap(), &x),
650
BinaryOp::Eq => {
651
if let Some(Type::BitVector(_)) = self.get_type(&x) {
652
self.assume_comparable_types(&x, &y)
653
}
654
}
655
_ => (),
656
};
657
self.assume_comparable_types(&x, &y);
658
}
659
match op {
660
BinaryOp::BVRotl => {
661
let source_width = self.static_width(&x);
662
match source_width {
663
Some(w) => {
664
let xs = self.vir_expr_to_sexp(*x);
665
let ys = self.vir_expr_to_sexp(*y);
666
return self.rotate_symbolic(xs, w, ys, "rotate_left");
667
}
668
None => {
669
let arg_width = self.get_expr_width_var(&x).unwrap();
670
let xs = self.vir_expr_to_sexp(*x);
671
let ys = self.vir_expr_to_sexp(*y);
672
return self.rotate_symbolic_dyn_source_width(
673
xs,
674
arg_width,
675
ys,
676
"rotate_left",
677
);
678
}
679
}
680
}
681
BinaryOp::BVRotr => {
682
let source_width = self.static_width(&x);
683
match source_width {
684
Some(w) => {
685
let xs = self.vir_expr_to_sexp(*x);
686
let ys = self.vir_expr_to_sexp(*y);
687
return self.rotate_symbolic(xs, w, ys, "rotate_right");
688
}
689
None => {
690
let arg_width = self.get_expr_width_var(&x).unwrap();
691
let xs = self.vir_expr_to_sexp(*x);
692
let ys = self.vir_expr_to_sexp(*y);
693
return self.rotate_symbolic_dyn_source_width(
694
xs,
695
arg_width,
696
ys,
697
"rotate_right",
698
);
699
}
700
}
701
}
702
// To shift right, we need to make sure the bits to the right get zeroed. Shift left first.
703
BinaryOp::BVShr => {
704
let arg_width = if self.find_widths {
705
self.get_expr_width_var(&x).unwrap()
706
} else {
707
self.smt.numeral(self.static_width(&x).unwrap())
708
};
709
let xs = self.vir_expr_to_sexp(*x);
710
711
// Strategy: shift left by (bitwidth - arg width) to zero bits to the right
712
// of the bits in the argument size. Then shift right by (amt + (bitwidth - arg width))
713
714
// Width math
715
if self.find_widths {
716
// The shift arg needs to be extracted to the right width, default to 8 if unknown
717
let y_static_width = self.static_width(&y).unwrap_or(8);
718
let y_rec = self.vir_expr_to_sexp(*y);
719
if self.find_widths {
720
return xs;
721
}
722
let extract = self.smt.extract(
723
y_static_width.checked_sub(1).unwrap().try_into().unwrap(),
724
0,
725
y_rec,
726
);
727
let ys = self.zero_extend(self.bitwidth - y_static_width, extract);
728
let arg_width_as_bv = self.int2bv(self.bitwidth, arg_width);
729
let bitwidth_as_bv =
730
self.bv(self.bitwidth.try_into().unwrap(), self.bitwidth);
731
let extra_shift = self.smt.bvsub(bitwidth_as_bv, arg_width_as_bv);
732
let shl_to_zero = self.smt.bvshl(xs, extra_shift);
733
734
let amt_plus_extra = self.smt.bvadd(ys, extra_shift);
735
return self.smt.bvlshr(shl_to_zero, amt_plus_extra);
736
} else {
737
let ys = self.vir_expr_to_sexp(*y);
738
return self.smt.bvlshr(xs, ys);
739
}
740
}
741
BinaryOp::BVAShr => {
742
let arg_width = if self.find_widths {
743
self.get_expr_width_var(&x).unwrap()
744
} else {
745
self.smt.numeral(self.static_width(&x).unwrap())
746
};
747
let xs = self.vir_expr_to_sexp(*x);
748
749
// Strategy: shift left by (bitwidth - arg width) to eliminate bits to the left
750
// of the bits in the argument size. Then shift right by (amt + (bitwidth - arg width))
751
752
// Width math
753
if self.find_widths {
754
// The shift arg needs to be extracted to the right width, default to 8 if unknown
755
let y_static_width = self.static_width(&y).unwrap_or(8);
756
let ys = self.vir_expr_to_sexp(*y);
757
let extract = self.smt.extract(
758
y_static_width.checked_sub(1).unwrap().try_into().unwrap(),
759
0,
760
ys,
761
);
762
let ysext = self.zero_extend(self.bitwidth - y_static_width, extract);
763
764
let arg_width_as_bv = self.int2bv(self.bitwidth, arg_width);
765
let bitwidth_as_bv =
766
self.bv(self.bitwidth.try_into().unwrap(), self.bitwidth);
767
let extra_shift = self.smt.bvsub(bitwidth_as_bv, arg_width_as_bv);
768
let shl_to_zero = self.smt.bvshl(xs, extra_shift);
769
770
let amt_plus_extra = self.smt.bvadd(ysext, extra_shift);
771
return self.smt.bvashr(shl_to_zero, amt_plus_extra);
772
} else {
773
let ys = self.vir_expr_to_sexp(*y);
774
return self.smt.bvashr(xs, ys);
775
}
776
}
777
_ => (),
778
};
779
let op_str = match op {
780
BinaryOp::And => "and",
781
BinaryOp::Or => "or",
782
BinaryOp::Imp => "=>",
783
BinaryOp::Eq => "=",
784
BinaryOp::Lte => match (self.get_type(&x), self.get_type(&y)) {
785
(Some(Type::Int), Some(Type::Int)) => "<=",
786
(Some(Type::BitVector(_)), Some(Type::BitVector(_))) => "bvule",
787
_ => unreachable!(),
788
},
789
BinaryOp::Lt => match (self.get_type(&x), self.get_type(&y)) {
790
(Some(Type::Int), Some(Type::Int)) => "<",
791
(Some(Type::BitVector(_)), Some(Type::BitVector(_))) => "bvult",
792
_ => unreachable!(),
793
},
794
BinaryOp::BVSgt => "bvsgt",
795
BinaryOp::BVSgte => "bvsge",
796
BinaryOp::BVSlt => "bvslt",
797
BinaryOp::BVSlte => "bvsle",
798
BinaryOp::BVUgt => "bvugt",
799
BinaryOp::BVUgte => "bvuge",
800
BinaryOp::BVUlt => "bvult",
801
BinaryOp::BVUlte => "bvule",
802
BinaryOp::BVMul => "bvmul",
803
BinaryOp::BVUDiv => "bvudiv",
804
BinaryOp::BVSDiv => "bvsdiv",
805
BinaryOp::BVAdd => "bvadd",
806
BinaryOp::BVSub => "bvsub",
807
BinaryOp::BVUrem => "bvurem",
808
BinaryOp::BVSrem => "bvsrem",
809
BinaryOp::BVAnd => "bvand",
810
BinaryOp::BVOr => "bvor",
811
BinaryOp::BVXor => "bvxor",
812
BinaryOp::BVShl => "bvshl",
813
BinaryOp::BVSaddo => "bvsaddo",
814
_ => unreachable!("{:?}", op),
815
};
816
// If we have some static width that isn't the bitwidth, extract based on it
817
// before performing the operation for the dynamic case.
818
match static_expr_width {
819
Some(w) if w < self.bitwidth && self.find_widths => {
820
let h: i32 = (w - 1).try_into().unwrap();
821
let x_sexp = self.vir_expr_to_sexp(*x);
822
let y_sexp = self.vir_expr_to_sexp(*y);
823
self.zero_extend(
824
self.bitwidth.checked_sub(w).unwrap(),
825
self.smt.list(vec![
826
self.smt.atom(op_str),
827
self.smt.extract(h, 0, x_sexp),
828
self.smt.extract(h, 0, y_sexp),
829
]),
830
)
831
}
832
_ => {
833
let x_sexp = self.vir_expr_to_sexp(*x);
834
let y_sexp = self.vir_expr_to_sexp(*y);
835
self.smt.list(vec![self.smt.atom(op_str), x_sexp, y_sexp])
836
}
837
}
838
}
839
Expr::BVIntToBV(w, x) => {
840
let x_sexp = self.vir_expr_to_sexp(*x);
841
if self.find_widths {
842
let padded_width = self.bitwidth - w;
843
self.zero_extend(padded_width, self.int2bv(w, x_sexp))
844
} else {
845
self.int2bv(w, x_sexp)
846
}
847
}
848
Expr::BVToInt(x) => {
849
let x_sexp = self.vir_expr_to_sexp(*x);
850
self.bv2nat(x_sexp)
851
}
852
Expr::BVZeroExtTo(i, x) => {
853
let arg_width = if self.find_widths {
854
let expr_width = width.unwrap();
855
self.width_assumptions
856
.push(self.smt.eq(expr_width, self.smt.numeral(i)));
857
self.get_expr_width_var(&x).unwrap()
858
} else {
859
self.smt.numeral(self.static_width(&x).unwrap())
860
};
861
let static_width = self.static_width(&x);
862
let xs = self.vir_expr_to_sexp(*x);
863
if let Some(size) = static_width {
864
self.extend_concrete(i, xs, size, "zero_extend")
865
} else {
866
self.extend_symbolic(self.smt.numeral(i), xs, arg_width, "zero_extend")
867
}
868
}
869
Expr::BVZeroExtToVarWidth(i, x) => {
870
let static_arg_width = self.static_width(&x);
871
let arg_width = self.get_expr_width_var(&x);
872
let is = self.vir_expr_to_sexp(*i);
873
let xs = self.vir_expr_to_sexp(*x);
874
if self.find_widths {
875
let expr_width = width.unwrap();
876
self.width_assumptions.push(self.smt.eq(expr_width, is));
877
}
878
if let (Some(arg_size), Some(e_size)) = (static_arg_width, static_expr_width) {
879
self.extend_concrete(e_size, xs, arg_size, "zero_extend")
880
} else {
881
self.extend_symbolic(is, xs, arg_width.unwrap(), "zero_extend")
882
}
883
}
884
Expr::BVSignExtTo(i, x) => {
885
let arg_width = if self.find_widths {
886
let expr_width = width.unwrap();
887
self.width_assumptions
888
.push(self.smt.eq(expr_width, self.smt.numeral(i)));
889
self.get_expr_width_var(&x).unwrap()
890
} else {
891
self.smt.numeral(self.static_width(&x).unwrap())
892
};
893
let static_width = self.static_width(&x);
894
let xs = self.vir_expr_to_sexp(*x);
895
if let Some(size) = static_width {
896
self.extend_concrete(i, xs, size, "sign_extend")
897
} else {
898
self.extend_symbolic(self.smt.numeral(i), xs, arg_width, "sign_extend")
899
}
900
}
901
Expr::BVSignExtToVarWidth(i, x) => {
902
let static_arg_width = self.static_width(&x);
903
let arg_width = self.get_expr_width_var(&x);
904
let is = self.vir_expr_to_sexp(*i);
905
let xs = self.vir_expr_to_sexp(*x);
906
if self.find_widths {
907
let expr_width = width.unwrap();
908
self.width_assumptions.push(self.smt.eq(expr_width, is));
909
}
910
if let (Some(arg_size), Some(e_size)) = (static_arg_width, static_expr_width) {
911
self.extend_concrete(e_size, xs, arg_size, "sign_extend")
912
} else {
913
self.extend_symbolic(is, xs, arg_width.unwrap(), "sign_extend")
914
}
915
}
916
Expr::BVConvTo(x, y) => {
917
if self.find_widths {
918
let expr_width = width.unwrap();
919
let dyn_width = self.vir_expr_to_sexp(*x);
920
let eq = self.smt.eq(expr_width, dyn_width);
921
self.width_assumptions.push(eq);
922
self.vir_expr_to_sexp(*y)
923
} else {
924
let arg_width = self.static_width(&y).unwrap();
925
match ty {
926
Some(Type::BitVector(Some(w))) => match arg_width.cmp(w) {
927
Ordering::Less => {
928
let padding =
929
self.new_fresh_bits(w.checked_sub(arg_width).unwrap());
930
let ys = self.vir_expr_to_sexp(*y);
931
self.smt.concat(padding, ys)
932
}
933
Ordering::Greater => {
934
let new = (w - 1).try_into().unwrap();
935
let ys = self.vir_expr_to_sexp(*y);
936
self.smt.extract(new, 0, ys)
937
}
938
Ordering::Equal => self.vir_expr_to_sexp(*y),
939
},
940
_ => unreachable!("{:?}, {:?}", x, y),
941
}
942
}
943
}
944
Expr::WidthOf(x) => {
945
if self.find_widths {
946
self.get_expr_width_var(&x).unwrap()
947
} else {
948
self.smt.numeral(self.static_width(&x).unwrap())
949
}
950
}
951
Expr::BVExtract(i, j, x) => {
952
assert!(i >= j);
953
if self.get_type(&x).is_some() {
954
let xs = self.vir_expr_to_sexp(*x);
955
// No-op if we are extracting exactly the full bitwidth
956
if j == 0 && i == self.bitwidth - 1 && self.find_widths {
957
return xs;
958
}
959
let extract =
960
self.smt
961
.extract(i.try_into().unwrap(), j.try_into().unwrap(), xs);
962
let new_width = i - j + 1;
963
if new_width < self.bitwidth && self.find_widths {
964
let padding =
965
self.new_fresh_bits(self.bitwidth.checked_sub(new_width).unwrap());
966
self.smt.concat(padding, extract)
967
} else {
968
extract
969
}
970
} else {
971
unreachable!("Must perform extraction on bv with known width")
972
}
973
}
974
Expr::Conditional(c, t, e) => {
975
if self.find_widths && matches!(ty, Some(Type::BitVector(_))) {
976
self.assume_same_width_from_sexpr(width.unwrap(), &t);
977
self.assume_same_width_from_sexpr(width.unwrap(), &e);
978
}
979
let cs = self.vir_expr_to_sexp(*c);
980
let ts = self.vir_expr_to_sexp(*t);
981
let es = self.vir_expr_to_sexp(*e);
982
self.smt.ite(cs, ts, es)
983
}
984
Expr::Switch(c, cases) => {
985
if self.find_widths {
986
if matches!(ty, Some(Type::BitVector(_))) {
987
for (_, b) in &cases {
988
self.assume_same_width_from_sexpr(width.unwrap(), b);
989
}
990
}
991
let cty = self.get_type(&c);
992
if matches!(cty, Some(Type::BitVector(_))) {
993
let cwidth = self.get_expr_width_var(&c);
994
for (m, _) in &cases {
995
self.assume_same_width_from_sexpr(cwidth.unwrap(), m);
996
}
997
}
998
}
999
let cs = self.vir_expr_to_sexp(*c);
1000
let mut case_sexprs: Vec<(SExpr, SExpr)> = cases
1001
.iter()
1002
.map(|(m, b)| {
1003
(
1004
self.vir_expr_to_sexp(m.clone()),
1005
self.vir_expr_to_sexp(b.clone()),
1006
)
1007
})
1008
.collect();
1009
1010
// Assert that some case must match
1011
let some_case_matches: Vec<SExpr> = case_sexprs
1012
.iter()
1013
.map(|(m, _)| self.smt.eq(cs, *m))
1014
.collect();
1015
self.assert(self.smt.or_many(some_case_matches.clone()));
1016
1017
let (_, last_body) = case_sexprs.remove(case_sexprs.len() - 1);
1018
1019
// Reverse to keep the order of the switch
1020
case_sexprs.iter().rev().fold(last_body, |acc, (m, b)| {
1021
self.smt.ite(self.smt.eq(cs, *m), *b, acc)
1022
})
1023
}
1024
Expr::CLZ(e) => {
1025
let tyvar = *tyvar.unwrap();
1026
if self.find_widths {
1027
self.assume_same_width_from_sexpr(width.unwrap(), &e);
1028
}
1029
let es = self.vir_expr_to_sexp(*e);
1030
match static_expr_width {
1031
Some(1) => clz::clz1(self, es, tyvar),
1032
Some(8) => clz::clz8(self, es, tyvar),
1033
Some(16) => clz::clz16(self, es, tyvar),
1034
Some(32) => clz::clz32(self, es, tyvar),
1035
Some(64) => clz::clz64(self, es, tyvar),
1036
Some(w) => unreachable!("Unexpected CLZ width {}", w),
1037
None => unreachable!("Need static CLZ width"),
1038
}
1039
}
1040
Expr::CLS(e) => {
1041
let tyvar = *tyvar.unwrap();
1042
if self.find_widths {
1043
self.assume_same_width_from_sexpr(width.unwrap(), &e);
1044
}
1045
let es = self.vir_expr_to_sexp(*e);
1046
match static_expr_width {
1047
Some(1) => cls::cls1(self, tyvar),
1048
Some(8) => cls::cls8(self, es, tyvar),
1049
Some(16) => cls::cls16(self, es, tyvar),
1050
Some(32) => cls::cls32(self, es, tyvar),
1051
Some(64) => cls::cls64(self, es, tyvar),
1052
Some(w) => unreachable!("Unexpected CLS width {}", w),
1053
None => unreachable!("Need static CLS width"),
1054
}
1055
}
1056
Expr::Rev(e) => {
1057
let tyvar = *tyvar.unwrap();
1058
if self.find_widths {
1059
self.assume_same_width_from_sexpr(width.unwrap(), &e);
1060
}
1061
let es = self.vir_expr_to_sexp(*e);
1062
match static_expr_width {
1063
Some(1) => rev::rev1(self, es, tyvar),
1064
Some(8) => rev::rev8(self, es, tyvar),
1065
Some(16) => rev::rev16(self, es, tyvar),
1066
Some(32) => rev::rev32(self, es, tyvar),
1067
Some(64) => rev::rev64(self, es, tyvar),
1068
Some(w) => unreachable!("Unexpected CLS width {}", w),
1069
None => unreachable!("Need static CLS width"),
1070
}
1071
}
1072
Expr::BVSubs(ty, x, y) => {
1073
let tyvar = *tyvar.unwrap();
1074
if self.find_widths {
1075
self.assume_comparable_types(&x, &y);
1076
}
1077
let ety = self.vir_expr_to_sexp(*ty);
1078
let ex = self.vir_expr_to_sexp(*x);
1079
let ey = self.vir_expr_to_sexp(*y);
1080
1081
let encoded_32 = subs::subs(self, 32, ex, ey, tyvar);
1082
let encoded_64 = subs::subs(self, 64, ex, ey, tyvar);
1083
1084
self.smt.ite(
1085
self.smt.eq(ety, self.smt.numeral(32)),
1086
encoded_32,
1087
encoded_64,
1088
)
1089
}
1090
Expr::BVPopcnt(x) => {
1091
let tyvar = *tyvar.unwrap();
1092
if self.find_widths {
1093
self.assume_same_width_from_sexpr(width.unwrap(), &x);
1094
}
1095
let ex = self.vir_expr_to_sexp(*x);
1096
1097
match static_expr_width {
1098
Some(8) => {
1099
let p = popcnt(self, 8, ex, tyvar);
1100
if self.find_widths {
1101
self.zero_extend(self.bitwidth - 8, p)
1102
} else {
1103
p
1104
}
1105
}
1106
Some(16) => {
1107
let p = popcnt(self, 16, ex, tyvar);
1108
if self.find_widths {
1109
self.zero_extend(self.bitwidth - 8, p)
1110
} else {
1111
self.zero_extend(8, p)
1112
}
1113
}
1114
Some(32) => {
1115
let p = popcnt(self, 32, ex, tyvar);
1116
if self.find_widths {
1117
self.zero_extend(self.bitwidth - 8, p)
1118
} else {
1119
self.zero_extend(24, p)
1120
}
1121
}
1122
Some(64) => {
1123
let p = popcnt(self, 64, ex, tyvar);
1124
if self.find_widths {
1125
self.zero_extend(self.bitwidth - 8, p)
1126
} else {
1127
self.zero_extend(56, p)
1128
}
1129
}
1130
Some(w) => unreachable!("Unexpected popcnt width {}", w),
1131
None => unreachable!("Need static popcnt width"),
1132
}
1133
}
1134
Expr::BVConcat(xs) => {
1135
if self.find_widths {
1136
let widths: Vec<SExpr> = xs
1137
.iter()
1138
.map(|x| self.get_expr_width_var(x).unwrap())
1139
.collect();
1140
let sum = self.smt.plus_many(widths);
1141
self.width_assumptions
1142
.push(self.smt.eq(width.unwrap(), sum));
1143
}
1144
let mut sexprs: Vec<SExpr> = xs
1145
.iter()
1146
.map(|x| self.vir_expr_to_sexp(x.clone()))
1147
.collect();
1148
let last = sexprs.remove(sexprs.len() - 1);
1149
1150
// Width hack for now
1151
if self.find_widths {
1152
return sexprs[0];
1153
}
1154
// Reverse to keep the order of the cases
1155
sexprs
1156
.iter()
1157
.rev()
1158
.fold(last, |acc, x| self.smt.concat(*x, acc))
1159
}
1160
Expr::LoadEffect(x, y, z) => {
1161
let ex = self.vir_expr_to_sexp(*x);
1162
let ey = self.vir_expr_to_sexp(*y);
1163
let ez = self.vir_expr_to_sexp(*z);
1164
1165
if self.find_widths {
1166
self.width_assumptions.push(self.smt.eq(width.unwrap(), ey));
1167
}
1168
1169
if self.lhs_flag {
1170
if self.lhs_load_args.is_some() {
1171
panic!("Only one load on the LHS currently supported, found multiple.")
1172
}
1173
self.lhs_load_args = Some(vec![ex, ey, ez]);
1174
let load_ret = if self.find_widths {
1175
self.new_fresh_bits(self.bitwidth)
1176
} else {
1177
self.new_fresh_bits(static_expr_width.unwrap())
1178
};
1179
self.load_return = Some(load_ret);
1180
load_ret
1181
} else {
1182
if self.rhs_load_args.is_some() {
1183
panic!("Only one load on the RHS currently supported, found miltiple.")
1184
}
1185
self.rhs_load_args = Some(vec![ex, ey, ez]);
1186
self.load_return.unwrap()
1187
}
1188
}
1189
Expr::StoreEffect(w, x, y, z) => {
1190
let ew = self.vir_expr_to_sexp(*w);
1191
let ex = self.vir_expr_to_sexp(*x);
1192
let ez = self.vir_expr_to_sexp(*z);
1193
1194
if self.find_widths {
1195
let y_width = self.get_expr_width_var(&y).unwrap();
1196
self.width_assumptions.push(self.smt.eq(y_width, ex));
1197
}
1198
let ey = self.vir_expr_to_sexp(*y);
1199
1200
if self.lhs_flag {
1201
self.lhs_store_args = Some(vec![ew, ex, ey, ez]);
1202
} else {
1203
self.rhs_store_args = Some(vec![ew, ex, ey, ez]);
1204
}
1205
self.smt.atom("true")
1206
}
1207
}
1208
}
1209
1210
// Checks whether the assumption list is always false
1211
fn check_assumptions_feasibility(
1212
&mut self,
1213
assumptions: &[SExpr],
1214
term_input_bs: &[String],
1215
config: &Config,
1216
) -> VerificationResult {
1217
log::debug!("Checking assumption feasibility");
1218
self.smt.push().unwrap();
1219
for (i, a) in assumptions.iter().enumerate() {
1220
self.smt
1221
.assert(self.smt.named(format!("assum{i}"), *a))
1222
.unwrap();
1223
}
1224
1225
let res = match self.smt.check() {
1226
Ok(Response::Sat) => {
1227
if !config.distinct_check || term_input_bs.is_empty() {
1228
log::debug!("Assertion list is feasible for at least one input!");
1229
self.smt.pop().unwrap();
1230
return VerificationResult::Success;
1231
}
1232
// Check that there is a model with distinct bitvector inputs
1233
let mut not_all_same = vec![];
1234
let atoms: Vec<SExpr> = term_input_bs.iter().map(|n| self.smt.atom(n)).collect();
1235
let solution = self.smt.get_value(atoms).unwrap();
1236
assert_eq!(term_input_bs.len(), solution.len());
1237
for (variable, value) in solution {
1238
not_all_same.push(self.smt.not(self.smt.eq(variable, value)));
1239
}
1240
match not_all_same.len().cmp(&1) {
1241
Ordering::Equal => self.smt.assert(not_all_same[0]).unwrap(),
1242
Ordering::Greater => self.smt.assert(self.smt.and_many(not_all_same)).unwrap(),
1243
Ordering::Less => unreachable!("must have some BV inputs"),
1244
}
1245
match self.smt.check() {
1246
Ok(Response::Sat) => {
1247
log::debug!("Assertion list is feasible for two distinct inputs");
1248
VerificationResult::Success
1249
}
1250
Ok(Response::Unsat) => {
1251
log::debug!(
1252
"Assertion list is only feasible for one input with distinct BV values!"
1253
);
1254
VerificationResult::NoDistinctModels
1255
}
1256
Ok(Response::Unknown) => {
1257
panic!("Solver said 'unk'");
1258
}
1259
Err(err) => {
1260
unreachable!("Error! {:?}", err);
1261
}
1262
}
1263
}
1264
Ok(Response::Unsat) => {
1265
log::debug!("Assertion list is infeasible!");
1266
let unsat = self.smt.get_unsat_core().unwrap();
1267
log::debug!("Unsat core:\n{}", self.smt.display(unsat));
1268
VerificationResult::InapplicableRule
1269
}
1270
Ok(Response::Unknown) => {
1271
panic!("Solver said 'unk'");
1272
}
1273
Err(err) => {
1274
unreachable!("Error! {:?}", err);
1275
}
1276
};
1277
self.smt.pop().unwrap();
1278
res
1279
}
1280
1281
fn display_hex_to_bin(&self, value: SExpr) -> String {
1282
let sexpr_hex_prefix = "#x";
1283
let val_str = self.smt.display(value).to_string();
1284
if val_str.starts_with(sexpr_hex_prefix) {
1285
let without_prefix = val_str.trim_start_matches("#x");
1286
let as_unsigned = u128::from_str_radix(without_prefix, 16).unwrap();
1287
// SMT-LIB: bvhexX where X is a hexadecimal numeral of length m defines the bitvector
1288
// constant with value X and size 4*m.
1289
match without_prefix.len() {
1290
2 => format!("{}|{:#010b}", self.smt.display(value), as_unsigned),
1291
3 => format!("{}|{:#014b}", self.smt.display(value), as_unsigned),
1292
4 => format!("{}|{:#018b}", self.smt.display(value), as_unsigned),
1293
8 => format!("{}|{:#034b}", self.smt.display(value), as_unsigned),
1294
16 => format!("{}|{:#068b}", self.smt.display(value), as_unsigned),
1295
17 => format!("{}|{:#070b}", self.smt.display(value), as_unsigned),
1296
32 => format!("{}|{:#0130b}", self.smt.display(value), as_unsigned),
1297
_ => {
1298
format!("{}|{:#b}", self.smt.display(value), as_unsigned)
1299
}
1300
}
1301
} else {
1302
val_str
1303
}
1304
}
1305
1306
fn display_value(&self, variable: SExpr, value: SExpr) -> (String, String) {
1307
let var_str = self.smt.display(variable).to_string();
1308
(var_str, self.display_hex_to_bin(value))
1309
}
1310
1311
fn display_isle_pattern(
1312
&mut self,
1313
termenv: &TermEnv,
1314
typeenv: &TypeEnv,
1315
vars: &Vec<(String, String)>,
1316
rule: &Rule,
1317
pat: &Pattern,
1318
) -> SExpr {
1319
let mut to_sexpr = |p| self.display_isle_pattern(termenv, typeenv, vars, rule, p);
1320
1321
match pat {
1322
isle::sema::Pattern::Term(_, term_id, args) => {
1323
let sym = termenv.terms[term_id.index()].name;
1324
let name = typeenv.syms[sym.index()].clone();
1325
1326
let mut sexprs = args.iter().map(&mut to_sexpr).collect::<Vec<SExpr>>();
1327
1328
sexprs.insert(0, self.smt.atom(name));
1329
self.smt.list(sexprs)
1330
}
1331
isle::sema::Pattern::Var(_, var_id) => {
1332
let sym = rule.vars[var_id.index()].name;
1333
let ident = typeenv.syms[sym.index()].clone();
1334
let smt_ident_prefix = format!("{}__clif{}__", ident, var_id.index());
1335
1336
let var = self.display_var_from_smt_prefix(vars, &ident, &smt_ident_prefix);
1337
self.smt.atom(var)
1338
}
1339
isle::sema::Pattern::BindPattern(_, var_id, subpat) => {
1340
let sym = rule.vars[var_id.index()].name;
1341
let ident = &typeenv.syms[sym.index()];
1342
let smt_ident_prefix = format!("{}__clif{}__", ident, var_id.index(),);
1343
let subpat_node = to_sexpr(subpat);
1344
1345
let var = self.display_var_from_smt_prefix(vars, ident, &smt_ident_prefix);
1346
1347
// Special case: elide bind patterns to wildcars
1348
if matches!(**subpat, isle::sema::Pattern::Wildcard(_)) {
1349
self.smt.atom(var)
1350
} else {
1351
self.smt
1352
.list(vec![self.smt.atom(var), self.smt.atom("@"), subpat_node])
1353
}
1354
}
1355
isle::sema::Pattern::Wildcard(_) => self.smt.list(vec![self.smt.atom("_")]),
1356
1357
isle::sema::Pattern::ConstPrim(_, sym) => {
1358
let name = typeenv.syms[sym.index()].clone();
1359
self.smt.list(vec![self.smt.atom(name)])
1360
}
1361
isle::sema::Pattern::ConstBool(_, val) => {
1362
self.smt.list(vec![self.smt.atom(format!("{val}"))])
1363
}
1364
isle::sema::Pattern::ConstInt(_, num) => {
1365
let _smt_name_prefix = format!("{num}__");
1366
self.smt.list(vec![self.smt.atom(num.to_string())])
1367
}
1368
isle::sema::Pattern::And(_, subpats) => {
1369
let mut sexprs = subpats.iter().map(to_sexpr).collect::<Vec<SExpr>>();
1370
1371
sexprs.insert(0, self.smt.atom("and"));
1372
self.smt.list(sexprs)
1373
}
1374
}
1375
}
1376
1377
fn display_var_from_smt_prefix(
1378
&self,
1379
vars: &Vec<(String, String)>,
1380
ident: &str,
1381
prefix: &str,
1382
) -> String {
1383
let matches: Vec<&(String, String)> =
1384
vars.iter().filter(|(v, _)| v.starts_with(prefix)).collect();
1385
if matches.is_empty() {
1386
panic!("Can't find match for: {prefix}\n{vars:?}");
1387
} else if matches.len() == 3 {
1388
assert!(
1389
self.find_widths,
1390
"Only expect multiple matches with dynamic widths"
1391
);
1392
for (name, model) in matches {
1393
if name.contains("narrow") {
1394
return format!("[{}|{}]", self.smt.display(self.smt.atom(ident)), model);
1395
}
1396
}
1397
panic!("narrow not found");
1398
} else if matches.len() == 1 {
1399
let model = &matches.first().unwrap().1;
1400
format!("[{}|{}]", self.smt.display(self.smt.atom(ident)), model)
1401
} else {
1402
panic!("Unexpected number of matches!")
1403
}
1404
}
1405
1406
fn display_isle_expr(
1407
&self,
1408
termenv: &TermEnv,
1409
typeenv: &TypeEnv,
1410
vars: &Vec<(String, String)>,
1411
rule: &Rule,
1412
expr: &isle::sema::Expr,
1413
) -> SExpr {
1414
let to_sexpr = |e| self.display_isle_expr(termenv, typeenv, vars, rule, e);
1415
1416
match expr {
1417
isle::sema::Expr::Term(_, term_id, args) => {
1418
let sym = termenv.terms[term_id.index()].name;
1419
let name = typeenv.syms[sym.index()].clone();
1420
1421
let mut sexprs = args.iter().map(to_sexpr).collect::<Vec<SExpr>>();
1422
1423
sexprs.insert(0, self.smt.atom(name));
1424
self.smt.list(sexprs)
1425
}
1426
isle::sema::Expr::Var(_, var_id) => {
1427
let sym = rule.vars[var_id.index()].name;
1428
let ident = typeenv.syms[sym.index()].clone();
1429
let smt_ident_prefix = format!("{}__clif{}__", ident, var_id.index());
1430
1431
let var = self.display_var_from_smt_prefix(vars, &ident, &smt_ident_prefix);
1432
self.smt.atom(var)
1433
}
1434
isle::sema::Expr::ConstPrim(_, sym) => {
1435
let name = typeenv.syms[sym.index()].clone();
1436
self.smt.list(vec![self.smt.atom(name)])
1437
}
1438
isle::sema::Expr::ConstBool(_, val) => {
1439
self.smt.list(vec![self.smt.atom(format!("{val}"))])
1440
}
1441
isle::sema::Expr::ConstInt(_, num) => {
1442
let _smt_name_prefix = format!("{num}__");
1443
self.smt.list(vec![self.smt.atom(num.to_string())])
1444
}
1445
isle::sema::Expr::Let { bindings, body, .. } => {
1446
let mut sexprs = vec![];
1447
for (varid, _, expr) in bindings {
1448
let sym = rule.vars[varid.index()].name;
1449
let ident = typeenv.syms[sym.index()].clone();
1450
let smt_prefix = format!("{}__clif{}__", ident, varid.index());
1451
let var = self.display_var_from_smt_prefix(vars, &ident, &smt_prefix);
1452
1453
sexprs.push(self.smt.list(vec![self.smt.atom(var), to_sexpr(expr)]));
1454
}
1455
self.smt.list(vec![
1456
self.smt.atom("let"),
1457
self.smt.list(sexprs),
1458
to_sexpr(body),
1459
])
1460
}
1461
}
1462
}
1463
1464
fn display_model(
1465
&mut self,
1466
termenv: &TermEnv,
1467
typeenv: &TypeEnv,
1468
rule: &Rule,
1469
lhs_sexpr: SExpr,
1470
rhs_sexpr: SExpr,
1471
) {
1472
let mut vars = vec![];
1473
let mut lhs_value = None;
1474
let mut rhs_value = None;
1475
for (name, atom) in &self.var_map {
1476
let solution = self
1477
.smt
1478
.get_value(vec![self.smt.atom(name), *atom])
1479
.unwrap();
1480
for (variable, value) in solution {
1481
let display = self.display_value(variable, value);
1482
vars.push(display.clone());
1483
if variable == lhs_sexpr {
1484
lhs_value = Some(display.1);
1485
} else if variable == rhs_sexpr {
1486
rhs_value = Some(display.1);
1487
}
1488
}
1489
}
1490
for (name, _) in &self.additional_decls {
1491
let solution = self.smt.get_value(vec![self.smt.atom(name)]).unwrap();
1492
for (variable, value) in solution {
1493
vars.push(self.display_value(variable, value));
1494
}
1495
}
1496
vars.sort_by_key(|x| x.0.clone());
1497
vars.dedup();
1498
1499
// TODO VERBOSE
1500
println!("Counterexample summary");
1501
let lhs = self.display_isle_pattern(
1502
termenv,
1503
typeenv,
1504
&vars,
1505
rule,
1506
&Pattern::Term(
1507
cranelift_isle::sema::TypeId(0),
1508
rule.root_term,
1509
rule.args.clone(),
1510
),
1511
);
1512
println!("{}", self.smt.display(lhs));
1513
1514
// if-let statement processing
1515
if !&rule.iflets.is_empty() {
1516
print!("(if-let ");
1517
}
1518
for if_let_struct in &rule.iflets {
1519
let if_lhs = &if_let_struct.lhs;
1520
let if_rhs: &cranelift_isle::sema::Expr = &if_let_struct.rhs;
1521
1522
let if_lhs_expr = self.display_isle_pattern(termenv, typeenv, &vars, rule, if_lhs);
1523
1524
let if_rhs_expr = self.display_isle_expr(termenv, typeenv, &vars, rule, if_rhs);
1525
1526
println!(
1527
"({} {})",
1528
self.smt.display(if_lhs_expr),
1529
self.smt.display(if_rhs_expr)
1530
);
1531
}
1532
println!(")");
1533
1534
println!("=>");
1535
let rhs = self.display_isle_expr(termenv, typeenv, &vars, rule, &rule.rhs);
1536
println!("{}", self.smt.display(rhs));
1537
1538
println!("\n{} =>\n{}\n", lhs_value.unwrap(), rhs_value.unwrap(),);
1539
}
1540
1541
fn declare_variables(
1542
&mut self,
1543
rule_sem: &RuleSemantics,
1544
config: &Config,
1545
) -> (Vec<SExpr>, Vec<SExpr>) {
1546
let mut assumptions: Vec<SExpr> = vec![];
1547
log::trace!("Declaring quantified variables");
1548
for v in &rule_sem.quantified_vars {
1549
let name = &v.name;
1550
let ty = self.tyctx.tymap[&v.tyvar];
1551
let var_ty = self.vir_to_smt_ty(&ty);
1552
log::trace!("\t{} : {}", name, self.smt.display(var_ty));
1553
if let Type::BitVector(w) = ty {
1554
if self.find_widths {
1555
let wide = self.widen_to_register_width(
1556
v.tyvar,
1557
w.unwrap_or(self.bitwidth),
1558
self.smt.atom(name),
1559
Some(name.to_string()),
1560
);
1561
self.var_map.insert(name.clone(), wide);
1562
} else {
1563
self.var_map.insert(name.clone(), self.smt.atom(name));
1564
}
1565
} else {
1566
self.var_map.insert(name.clone(), self.smt.atom(name));
1567
}
1568
self.smt.declare_const(name, var_ty).unwrap();
1569
}
1570
self.lhs_flag = true;
1571
for a in &rule_sem.lhs_assumptions {
1572
let p = self.vir_expr_to_sexp(a.clone());
1573
assumptions.push(p)
1574
}
1575
self.lhs_flag = false;
1576
for a in &rule_sem.rhs_assumptions {
1577
let p = self.vir_expr_to_sexp(a.clone());
1578
assumptions.push(p)
1579
}
1580
if self.find_widths {
1581
for a in &self.width_assumptions {
1582
assumptions.push(*a);
1583
}
1584
}
1585
self.additional_assumptions.is_empty();
1586
for a in &self.additional_assumptions {
1587
assumptions.push(*a);
1588
}
1589
// Look at RHS assertions, which are checked, not trusted
1590
let assertions: Vec<SExpr> = rule_sem
1591
.rhs_assertions
1592
.iter()
1593
.map(|a| self.vir_expr_to_sexp(a.clone()))
1594
.collect();
1595
1596
for (name, ty) in &self.additional_decls {
1597
self.smt.declare_const(name, *ty).unwrap();
1598
}
1599
1600
if let Some(a) = &config.custom_assumptions {
1601
let term_args = rule_sem
1602
.term_args
1603
.iter()
1604
.map(|s| self.smt.atom(s))
1605
.collect();
1606
let custom_assumptions = a(&self.smt, term_args);
1607
log::debug!(
1608
"Custom assumptions:\n\t{}\n",
1609
self.smt.display(custom_assumptions)
1610
);
1611
assumptions.push(custom_assumptions);
1612
}
1613
(assumptions, assertions)
1614
}
1615
}
1616
1617
/// Overall query for single rule:
1618
/// <declare vars>
1619
/// (not (=> <assumptions> (= <LHS> <RHS>))))))
1620
pub fn run_solver(
1621
rule_sem: &RuleSemantics,
1622
rule: &Rule,
1623
termenv: &TermEnv,
1624
typeenv: &TypeEnv,
1625
concrete: &Option<ConcreteTest>,
1626
config: &Config,
1627
_types: &TermSignature,
1628
) -> VerificationResult {
1629
if std::env::var("SKIP_SOLVER").is_ok() {
1630
log::debug!("Environment variable SKIP_SOLVER set, returning Unknown");
1631
return VerificationResult::Unknown;
1632
}
1633
1634
let mut solver = easy_smt::ContextBuilder::new()
1635
.replay_file(Some(std::fs::File::create("dynamic_widths.smt2").unwrap()))
1636
.solver("z3", ["-smt2", "-in"])
1637
.build()
1638
.unwrap();
1639
1640
solver
1641
.set_option(":produce-unsat-cores", solver.true_())
1642
.unwrap();
1643
1644
// We start with logic to determine the width of all bitvectors
1645
let mut ctx = SolverCtx {
1646
smt: solver,
1647
// Always find widths at first
1648
find_widths: true,
1649
tyctx: rule_sem.tyctx.clone(),
1650
bitwidth: MAX_WIDTH,
1651
var_map: HashMap::new(),
1652
width_vars: HashMap::new(),
1653
width_assumptions: vec![],
1654
additional_decls: vec![],
1655
additional_assumptions: vec![],
1656
additional_assertions: vec![],
1657
fresh_bits_idx: 0,
1658
lhs_load_args: None,
1659
rhs_load_args: None,
1660
lhs_store_args: None,
1661
rhs_store_args: None,
1662
load_return: None,
1663
lhs_flag: true,
1664
};
1665
1666
let mut unresolved_widths = vec![];
1667
1668
// Check whether the non-solver type inference was able to resolve all bitvector widths,
1669
// and add assumptions for known widths
1670
for (_e, t) in &ctx.tyctx.tyvars {
1671
let ty = &ctx.tyctx.tymap[t];
1672
if let Type::BitVector(w) = ty {
1673
let width_name = format!("width__{t}");
1674
ctx.additional_decls
1675
.push((width_name.clone(), ctx.smt.int_sort()));
1676
match *w {
1677
Some(bitwidth) => {
1678
let eq = ctx
1679
.smt
1680
.eq(ctx.smt.atom(&width_name), ctx.smt.numeral(bitwidth));
1681
ctx.width_assumptions.push(eq);
1682
}
1683
None => {
1684
log::debug!("Unresolved width: {:?} ({})", &_e, *t);
1685
ctx.width_assumptions
1686
.push(ctx.smt.gt(ctx.smt.atom(&width_name), ctx.smt.numeral(0)));
1687
unresolved_widths.push(width_name.clone());
1688
}
1689
};
1690
ctx.width_vars.insert(*t, width_name.clone());
1691
}
1692
}
1693
1694
if unresolved_widths.is_empty() {
1695
log::debug!("All widths resolved after basic type inference");
1696
return run_solver_with_static_widths(
1697
&RuleCtx {
1698
rule_sem,
1699
rule,
1700
termenv,
1701
typeenv,
1702
config,
1703
},
1704
&ctx.tyctx,
1705
concrete,
1706
);
1707
}
1708
1709
log::debug!("Some unresolved widths after basic type inference");
1710
log::debug!("Finding widths from the solver");
1711
ctx.find_widths = true;
1712
let (assumptions, _) = ctx.declare_variables(rule_sem, config);
1713
ctx.smt.push().unwrap();
1714
for (i, a) in assumptions.iter().enumerate() {
1715
ctx.smt
1716
.assert(ctx.smt.named(format!("dyn{i}"), *a))
1717
.unwrap();
1718
}
1719
1720
resolve_dynamic_widths(
1721
RuleCtx {
1722
rule_sem,
1723
rule,
1724
termenv,
1725
typeenv,
1726
config,
1727
},
1728
concrete,
1729
&mut ctx,
1730
unresolved_widths,
1731
0,
1732
)
1733
}
1734
1735
fn resolve_dynamic_widths(
1736
rulectx: RuleCtx,
1737
concrete: &Option<ConcreteTest>,
1738
ctx: &mut SolverCtx,
1739
unresolved_widths: Vec<String>,
1740
attempt: usize,
1741
) -> VerificationResult {
1742
if attempt > 10 {
1743
panic!("Unexpected number of attempts to resolve dynamic widths!")
1744
}
1745
match ctx.smt.check() {
1746
Ok(Response::Sat) => {
1747
let mut cur_tyctx = ctx.tyctx.clone();
1748
let mut width_resolutions = HashMap::new();
1749
for (e, t) in &ctx.tyctx.tyvars {
1750
let ty = &ctx.tyctx.tymap[t];
1751
if let Type::BitVector(w) = ty {
1752
let width_name = format!("width__{t}");
1753
let atom = ctx.smt.atom(&width_name);
1754
let width = ctx.smt.get_value(vec![atom]).unwrap().first().unwrap().1;
1755
let width_int = u8::try_from(ctx.smt.get(width)).unwrap();
1756
1757
// Check that we haven't contradicted previous widths
1758
if let Some(before_width) = w {
1759
assert_eq!(*before_width, width_int as usize)
1760
};
1761
1762
// Check that the width is nonzero
1763
if width_int == 0 {
1764
panic!("Unexpected, zero width! {t} {e:?}");
1765
}
1766
1767
if unresolved_widths.contains(&width_name) {
1768
log::debug!("\tResolved width: {width_name}, {width_int}");
1769
width_resolutions.insert(width_name, width_int);
1770
cur_tyctx
1771
.tymap
1772
.insert(*t, Type::BitVector(Some(width_int as usize)));
1773
}
1774
}
1775
}
1776
let static_result = run_solver_with_static_widths(&rulectx, &cur_tyctx, concrete);
1777
1778
// If we have a failure or unknown, return right away
1779
if !matches!(static_result, VerificationResult::Success) {
1780
return static_result;
1781
}
1782
1783
// Otherwise, try again, but adding the assertion that some width is
1784
// different than our current assigment
1785
let not_equals = width_resolutions.iter().map(|(s, w)| {
1786
ctx.smt.not(
1787
ctx.smt
1788
.eq(ctx.smt.atom(s.clone()), ctx.smt.atom((*w).to_string())),
1789
)
1790
});
1791
ctx.smt.assert(ctx.smt.or_many(not_equals)).unwrap();
1792
1793
resolve_dynamic_widths(rulectx, concrete, ctx, unresolved_widths, attempt + 1)
1794
}
1795
Ok(Response::Unsat) => {
1796
if attempt == 0 {
1797
log::warn!(
1798
"Rule not applicable as written for rule assumptions, skipping full query"
1799
);
1800
let unsat = ctx.smt.get_unsat_core().unwrap();
1801
log::warn!("Unsat core:\n{}", ctx.smt.display(unsat));
1802
VerificationResult::InapplicableRule
1803
} else {
1804
// If this is not the first attempt, some previous width assignment must
1805
// have succeeded.
1806
VerificationResult::Success
1807
}
1808
}
1809
Ok(Response::Unknown) => {
1810
panic!("Solver said 'unk'");
1811
}
1812
Err(err) => {
1813
unreachable!("Error! {:?}", err);
1814
}
1815
}
1816
}
1817
1818
pub fn run_solver_with_static_widths(
1819
rulectx: &RuleCtx,
1820
tyctx: &TypeContext,
1821
concrete: &Option<ConcreteTest>,
1822
) -> VerificationResult {
1823
// Declare variables again, this time with all static widths
1824
let mut solver = easy_smt::ContextBuilder::new()
1825
.replay_file(Some(std::fs::File::create("static_widths.smt2").unwrap()))
1826
.solver("z3", ["-smt2", "-in"])
1827
.build()
1828
.unwrap();
1829
solver
1830
.set_option(":produce-unsat-cores", solver.true_())
1831
.unwrap();
1832
let mut ctx = SolverCtx {
1833
smt: solver,
1834
find_widths: false,
1835
tyctx: tyctx.clone(),
1836
bitwidth: MAX_WIDTH,
1837
var_map: HashMap::new(),
1838
width_vars: HashMap::new(),
1839
width_assumptions: vec![],
1840
additional_decls: vec![],
1841
additional_assumptions: vec![],
1842
additional_assertions: vec![],
1843
fresh_bits_idx: 0,
1844
lhs_load_args: None,
1845
rhs_load_args: None,
1846
lhs_store_args: None,
1847
rhs_store_args: None,
1848
load_return: None,
1849
lhs_flag: true,
1850
};
1851
let (assumptions, mut assertions) = ctx.declare_variables(rulectx.rule_sem, rulectx.config);
1852
1853
let lhs = ctx.vir_expr_to_sexp(rulectx.rule_sem.lhs.clone());
1854
ctx.lhs_flag = false;
1855
let rhs = ctx.vir_expr_to_sexp(rulectx.rule_sem.rhs.clone());
1856
1857
// For debugging
1858
let unnamed_rule = String::from("<unnamed rule>");
1859
let rulename = rulectx
1860
.rule
1861
.name
1862
.map(|name| &rulectx.typeenv.syms[name.index()])
1863
.unwrap_or(&unnamed_rule);
1864
let unit = "()".to_string();
1865
let widthname = ctx
1866
.static_width(&rulectx.rule_sem.lhs)
1867
.map_or(unit, |s| format!("width {s}"));
1868
1869
// Check whether the assumptions are possible
1870
let feasibility = ctx.check_assumptions_feasibility(
1871
&assumptions,
1872
&rulectx.rule_sem.term_input_bvs,
1873
rulectx.config,
1874
);
1875
if feasibility != VerificationResult::Success {
1876
log::warn!("Rule not applicable as written for rule assumptions, skipping full query");
1877
return feasibility;
1878
}
1879
1880
// Correctness query
1881
// Verification condition: first rule's LHS and RHS are equal
1882
if let Some(concrete) = concrete {
1883
return test_concrete_with_static_widths(
1884
rulectx,
1885
concrete,
1886
lhs,
1887
rhs,
1888
&mut ctx,
1889
assumptions,
1890
);
1891
}
1892
1893
let condition = if let Some(condition) = &rulectx.config.custom_verification_condition {
1894
let term_args = rulectx
1895
.rule_sem
1896
.term_args
1897
.iter()
1898
.map(|s| ctx.smt.atom(s))
1899
.collect();
1900
let custom_condition = condition(&ctx.smt, term_args, lhs, rhs);
1901
log::debug!(
1902
"Custom verification condition:\n\t{}\n",
1903
ctx.smt.display(custom_condition)
1904
);
1905
custom_condition
1906
} else {
1907
// Note: this is where we ask if the LHS and the RHS are equal
1908
let side_equality = ctx.smt.eq(lhs, rhs);
1909
log::debug!(
1910
"LHS and RHS equality condition:{}",
1911
ctx.smt.display(side_equality)
1912
);
1913
side_equality
1914
};
1915
1916
for a in &ctx.additional_assertions {
1917
assertions.push(*a);
1918
}
1919
1920
let assumption_conjunction = ctx.smt.and_many(assumptions);
1921
let mut full_condition = if !assertions.is_empty() {
1922
let assertion_conjunction = ctx.smt.and_many(assertions.clone());
1923
ctx.smt.and(condition, assertion_conjunction)
1924
} else {
1925
condition
1926
};
1927
1928
let mut load_conditions = vec![];
1929
match (&ctx.lhs_load_args, &ctx.rhs_load_args) {
1930
(Some(_), Some(_)) => {
1931
let lhs_args_vec = ctx.lhs_load_args.clone().unwrap();
1932
let rhs_args_vec = ctx.rhs_load_args.clone().unwrap();
1933
log::debug!("Load argument conditions:");
1934
for i in 0..lhs_args_vec.len() {
1935
let arg_equal = ctx.smt.eq(lhs_args_vec[i], rhs_args_vec[i]);
1936
load_conditions.push(arg_equal);
1937
log::debug!("\t{}", ctx.smt.display(arg_equal));
1938
full_condition = ctx.smt.and(full_condition, arg_equal);
1939
}
1940
}
1941
(None, None) => (),
1942
(Some(_), None) => {
1943
log::error!("Verification failed for {rulename}, {widthname}");
1944
log::error!("Left hand side has load statement but right hand side does not.");
1945
return VerificationResult::Failure(Counterexample {});
1946
}
1947
(None, Some(_)) => {
1948
log::error!("Verification failed for {rulename}, {widthname}");
1949
log::error!("Right hand side has load statement but left hand side does not.");
1950
return VerificationResult::Failure(Counterexample {});
1951
}
1952
}
1953
1954
let mut store_conditions = vec![];
1955
match (&ctx.lhs_store_args, &ctx.rhs_store_args) {
1956
(Some(_), Some(_)) => {
1957
let lhs_args_vec = ctx.lhs_store_args.clone().unwrap();
1958
let rhs_args_vec = ctx.rhs_store_args.clone().unwrap();
1959
log::debug!("Store argument conditions:");
1960
1961
for i in 0..lhs_args_vec.len() {
1962
let arg_equal = ctx.smt.eq(lhs_args_vec[i], rhs_args_vec[i]);
1963
store_conditions.push(arg_equal);
1964
log::debug!("\t{}", ctx.smt.display(arg_equal));
1965
full_condition = ctx.smt.and(full_condition, arg_equal)
1966
}
1967
}
1968
(None, None) => (),
1969
(Some(_), None) => {
1970
log::error!("Verification failed for {rulename}, {widthname}");
1971
log::error!("Left hand side has store statement but right hand side does not.");
1972
return VerificationResult::Failure(Counterexample {});
1973
}
1974
(None, Some(_)) => {
1975
log::error!("Verification failed for {rulename}, {widthname}");
1976
log::error!("Right hand side has store statement but left hand side does not.");
1977
return VerificationResult::Failure(Counterexample {});
1978
}
1979
}
1980
1981
log::trace!(
1982
"Full verification condition:{}",
1983
ctx.smt.display(full_condition)
1984
);
1985
let query = ctx
1986
.smt
1987
.not(ctx.smt.imp(assumption_conjunction, full_condition));
1988
log::trace!("Running query");
1989
ctx.smt.assert(query).unwrap();
1990
1991
match ctx.smt.check() {
1992
Ok(Response::Sat) => {
1993
println!("Verification failed for {rulename}, {widthname}");
1994
ctx.display_model(rulectx.termenv, rulectx.typeenv, rulectx.rule, lhs, rhs);
1995
let vals = ctx.smt.get_value(vec![condition]).unwrap();
1996
for (variable, value) in vals {
1997
if value == ctx.smt.false_() {
1998
println!("Failed condition:\n{}", ctx.smt.display(variable));
1999
} else if value == ctx.smt.true_() {
2000
println!("Condition met, but failed some assertion(s).")
2001
}
2002
}
2003
2004
if !assertions.is_empty() {
2005
let vals = ctx.smt.get_value(assertions).unwrap();
2006
for (variable, value) in vals {
2007
if value == ctx.smt.false_() {
2008
println!("Failed assertion:\n{}", ctx.smt.display(variable));
2009
}
2010
}
2011
}
2012
2013
if !load_conditions.is_empty() {
2014
let vals = ctx.smt.get_value(load_conditions).unwrap();
2015
for (variable, value) in vals {
2016
if value == ctx.smt.false_() {
2017
log::error!("Failed load condition:\n{}", ctx.smt.display(variable));
2018
}
2019
}
2020
}
2021
VerificationResult::Failure(Counterexample {})
2022
}
2023
Ok(Response::Unsat) => {
2024
println!("Verification succeeded for {rulename}, {widthname}");
2025
VerificationResult::Success
2026
}
2027
Ok(Response::Unknown) => {
2028
panic!("Solver said 'unk'");
2029
}
2030
Err(err) => {
2031
unreachable!("Error! {:?}", err);
2032
}
2033
}
2034
}
2035
2036
pub fn test_concrete_with_static_widths(
2037
rulectx: &RuleCtx,
2038
concrete: &ConcreteTest,
2039
lhs: SExpr,
2040
rhs: SExpr,
2041
ctx: &mut SolverCtx,
2042
assumptions: Vec<SExpr>,
2043
) -> VerificationResult {
2044
// Test code only: test against concrete input/output
2045
// Check that our expected output is valid
2046
for (i, a) in assumptions.iter().enumerate() {
2047
ctx.smt
2048
.assert(ctx.smt.named(format!("conc{i}"), *a))
2049
.unwrap();
2050
}
2051
for (i, e) in ctx.additional_assertions.iter().enumerate() {
2052
ctx.smt
2053
.assert(ctx.smt.named(format!("conc_assert{i}"), *e))
2054
.unwrap();
2055
}
2056
ctx.smt.push().unwrap();
2057
let eq = ctx
2058
.smt
2059
.eq(rhs, ctx.smt.atom(concrete.output.literal.clone()));
2060
2061
ctx.smt
2062
.assert(ctx.smt.named("conceq".to_string(), eq))
2063
.unwrap();
2064
2065
for (i, a) in rulectx.rule_sem.rhs_assertions.iter().enumerate() {
2066
let p = ctx.vir_expr_to_sexp(a.clone());
2067
ctx.smt
2068
.assert(ctx.smt.named(format!("rhs_assert{i}"), p))
2069
.unwrap();
2070
}
2071
2072
if !matches!(ctx.smt.check(), Ok(Response::Sat)) {
2073
// Bad! This is a bug!
2074
// Pop the output assertion
2075
ctx.smt.pop().unwrap();
2076
// Try again
2077
assert!(matches!(ctx.smt.check(), Ok(Response::Sat)));
2078
// Get the value for what output is to panic with a useful message
2079
let val = ctx.smt.get_value(vec![rhs]).unwrap()[0].1;
2080
ctx.display_model(rulectx.termenv, rulectx.typeenv, rulectx.rule, lhs, rhs);
2081
panic!(
2082
"Expected {}, got {}",
2083
concrete.output.literal,
2084
ctx.display_hex_to_bin(val)
2085
);
2086
} else {
2087
log::debug!(
2088
"Expected concrete result matched: {}",
2089
concrete.output.literal
2090
);
2091
ctx.smt.pop().unwrap();
2092
}
2093
2094
// Check that there is no other possible output
2095
ctx.smt.push().unwrap();
2096
ctx.smt
2097
.assert(
2098
ctx.smt.not(
2099
ctx.smt
2100
.eq(rhs, ctx.smt.atom(concrete.output.literal.clone())),
2101
),
2102
)
2103
.unwrap();
2104
if !matches!(ctx.smt.check(), Ok(Response::Unsat)) {
2105
// Get the value for what output is to panic with a useful message
2106
let val = ctx.smt.get_value(vec![rhs]).unwrap()[0].1;
2107
ctx.display_model(rulectx.termenv, rulectx.typeenv, rulectx.rule, lhs, rhs);
2108
// AVH TODO: should probably elevate back to an error with custom verification condition
2109
log::error!(
2110
"WARNING: Expected ONLY {}, got POSSIBLE {}",
2111
concrete.output.literal,
2112
ctx.display_hex_to_bin(val)
2113
);
2114
}
2115
ctx.smt.pop().unwrap();
2116
VerificationResult::Success
2117
}
2118
2119