Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/compiler/nir/nir_builtin_builder.c
4545 views
1
/*
2
* Copyright © 2018 Red Hat Inc.
3
* Copyright © 2015 Intel Corporation
4
*
5
* Permission is hereby granted, free of charge, to any person obtaining a
6
* copy of this software and associated documentation files (the "Software"),
7
* to deal in the Software without restriction, including without limitation
8
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
* and/or sell copies of the Software, and to permit persons to whom the
10
* Software is furnished to do so, subject to the following conditions:
11
*
12
* The above copyright notice and this permission notice (including the next
13
* paragraph) shall be included in all copies or substantial portions of the
14
* Software.
15
*
16
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22
* IN THE SOFTWARE.
23
*/
24
25
#include <math.h>
26
27
#include "nir.h"
28
#include "nir_builtin_builder.h"
29
30
nir_ssa_def*
31
nir_cross3(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
32
{
33
unsigned yzx[3] = { 1, 2, 0 };
34
unsigned zxy[3] = { 2, 0, 1 };
35
36
return nir_fsub(b, nir_fmul(b, nir_swizzle(b, x, yzx, 3),
37
nir_swizzle(b, y, zxy, 3)),
38
nir_fmul(b, nir_swizzle(b, x, zxy, 3),
39
nir_swizzle(b, y, yzx, 3)));
40
}
41
42
nir_ssa_def*
43
nir_cross4(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
44
{
45
nir_ssa_def *cross = nir_cross3(b, x, y);
46
47
return nir_vec4(b,
48
nir_channel(b, cross, 0),
49
nir_channel(b, cross, 1),
50
nir_channel(b, cross, 2),
51
nir_imm_intN_t(b, 0, cross->bit_size));
52
}
53
54
nir_ssa_def*
55
nir_fast_length(nir_builder *b, nir_ssa_def *vec)
56
{
57
switch (vec->num_components) {
58
case 1: return nir_fsqrt(b, nir_fmul(b, vec, vec));
59
case 2: return nir_fsqrt(b, nir_fdot2(b, vec, vec));
60
case 3: return nir_fsqrt(b, nir_fdot3(b, vec, vec));
61
case 4: return nir_fsqrt(b, nir_fdot4(b, vec, vec));
62
case 8: return nir_fsqrt(b, nir_fdot8(b, vec, vec));
63
case 16: return nir_fsqrt(b, nir_fdot16(b, vec, vec));
64
default:
65
unreachable("Invalid number of components");
66
}
67
}
68
69
nir_ssa_def*
70
nir_nextafter(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
71
{
72
nir_ssa_def *zero = nir_imm_intN_t(b, 0, x->bit_size);
73
nir_ssa_def *one = nir_imm_intN_t(b, 1, x->bit_size);
74
75
nir_ssa_def *condeq = nir_feq(b, x, y);
76
nir_ssa_def *conddir = nir_flt(b, x, y);
77
nir_ssa_def *condzero = nir_feq(b, x, zero);
78
79
uint64_t sign_mask = 1ull << (x->bit_size - 1);
80
uint64_t min_abs = 1;
81
82
if (nir_is_denorm_flush_to_zero(b->shader->info.float_controls_execution_mode, x->bit_size)) {
83
switch (x->bit_size) {
84
case 16:
85
min_abs = 1 << 10;
86
break;
87
case 32:
88
min_abs = 1 << 23;
89
break;
90
case 64:
91
min_abs = 1ULL << 52;
92
break;
93
}
94
95
/* Flush denorm to zero to avoid returning a denorm when condeq is true. */
96
x = nir_fmul(b, x, nir_imm_floatN_t(b, 1.0, x->bit_size));
97
}
98
99
/* beware of: +/-0.0 - 1 == NaN */
100
nir_ssa_def *xn =
101
nir_bcsel(b,
102
condzero,
103
nir_imm_intN_t(b, sign_mask | min_abs, x->bit_size),
104
nir_isub(b, x, one));
105
106
/* beware of -0.0 + 1 == -0x1p-149 */
107
nir_ssa_def *xp = nir_bcsel(b, condzero,
108
nir_imm_intN_t(b, min_abs, x->bit_size),
109
nir_iadd(b, x, one));
110
111
/* nextafter can be implemented by just +/- 1 on the int value */
112
nir_ssa_def *res =
113
nir_bcsel(b, nir_ixor(b, conddir, nir_flt(b, x, zero)), xp, xn);
114
115
return nir_nan_check2(b, x, y, nir_bcsel(b, condeq, x, res));
116
}
117
118
nir_ssa_def*
119
nir_normalize(nir_builder *b, nir_ssa_def *vec)
120
{
121
if (vec->num_components == 1)
122
return nir_fsign(b, vec);
123
124
nir_ssa_def *f0 = nir_imm_floatN_t(b, 0.0, vec->bit_size);
125
nir_ssa_def *f1 = nir_imm_floatN_t(b, 1.0, vec->bit_size);
126
nir_ssa_def *finf = nir_imm_floatN_t(b, INFINITY, vec->bit_size);
127
128
/* scale the input to increase precision */
129
nir_ssa_def *maxc = nir_fmax_abs_vec_comp(b, vec);
130
nir_ssa_def *svec = nir_fdiv(b, vec, maxc);
131
/* for inf */
132
nir_ssa_def *finfvec = nir_copysign(b, nir_bcsel(b, nir_feq(b, vec, finf), f1, f0), f1);
133
134
nir_ssa_def *temp = nir_bcsel(b, nir_feq(b, maxc, finf), finfvec, svec);
135
nir_ssa_def *res = nir_fmul(b, temp, nir_frsq(b, nir_fdot(b, temp, temp)));
136
137
return nir_bcsel(b, nir_feq(b, maxc, f0), vec, res);
138
}
139
140
nir_ssa_def*
141
nir_smoothstep(nir_builder *b, nir_ssa_def *edge0, nir_ssa_def *edge1, nir_ssa_def *x)
142
{
143
nir_ssa_def *f2 = nir_imm_floatN_t(b, 2.0, x->bit_size);
144
nir_ssa_def *f3 = nir_imm_floatN_t(b, 3.0, x->bit_size);
145
146
/* t = clamp((x - edge0) / (edge1 - edge0), 0, 1) */
147
nir_ssa_def *t =
148
nir_fsat(b, nir_fdiv(b, nir_fsub(b, x, edge0),
149
nir_fsub(b, edge1, edge0)));
150
151
/* result = t * t * (3 - 2 * t) */
152
return nir_fmul(b, t, nir_fmul(b, t, nir_fsub(b, f3, nir_fmul(b, f2, t))));
153
}
154
155
nir_ssa_def*
156
nir_upsample(nir_builder *b, nir_ssa_def *hi, nir_ssa_def *lo)
157
{
158
assert(lo->num_components == hi->num_components);
159
assert(lo->bit_size == hi->bit_size);
160
161
nir_ssa_def *res[NIR_MAX_VEC_COMPONENTS];
162
for (unsigned i = 0; i < lo->num_components; ++i) {
163
nir_ssa_def *vec = nir_vec2(b, nir_channel(b, lo, i), nir_channel(b, hi, i));
164
res[i] = nir_pack_bits(b, vec, vec->bit_size * 2);
165
}
166
167
return nir_vec(b, res, lo->num_components);
168
}
169
170
/**
171
* Compute xs[0] + xs[1] + xs[2] + ... using fadd.
172
*/
173
static nir_ssa_def *
174
build_fsum(nir_builder *b, nir_ssa_def **xs, int terms)
175
{
176
nir_ssa_def *accum = xs[0];
177
178
for (int i = 1; i < terms; i++)
179
accum = nir_fadd(b, accum, xs[i]);
180
181
return accum;
182
}
183
184
nir_ssa_def *
185
nir_atan(nir_builder *b, nir_ssa_def *y_over_x)
186
{
187
const uint32_t bit_size = y_over_x->bit_size;
188
189
nir_ssa_def *abs_y_over_x = nir_fabs(b, y_over_x);
190
nir_ssa_def *one = nir_imm_floatN_t(b, 1.0f, bit_size);
191
192
/*
193
* range-reduction, first step:
194
*
195
* / y_over_x if |y_over_x| <= 1.0;
196
* x = <
197
* \ 1.0 / y_over_x otherwise
198
*/
199
nir_ssa_def *x = nir_fdiv(b, nir_fmin(b, abs_y_over_x, one),
200
nir_fmax(b, abs_y_over_x, one));
201
202
/*
203
* approximate atan by evaluating polynomial:
204
*
205
* x * 0.9999793128310355 - x^3 * 0.3326756418091246 +
206
* x^5 * 0.1938924977115610 - x^7 * 0.1173503194786851 +
207
* x^9 * 0.0536813784310406 - x^11 * 0.0121323213173444
208
*/
209
nir_ssa_def *x_2 = nir_fmul(b, x, x);
210
nir_ssa_def *x_3 = nir_fmul(b, x_2, x);
211
nir_ssa_def *x_5 = nir_fmul(b, x_3, x_2);
212
nir_ssa_def *x_7 = nir_fmul(b, x_5, x_2);
213
nir_ssa_def *x_9 = nir_fmul(b, x_7, x_2);
214
nir_ssa_def *x_11 = nir_fmul(b, x_9, x_2);
215
216
nir_ssa_def *polynomial_terms[] = {
217
nir_fmul_imm(b, x, 0.9999793128310355f),
218
nir_fmul_imm(b, x_3, -0.3326756418091246f),
219
nir_fmul_imm(b, x_5, 0.1938924977115610f),
220
nir_fmul_imm(b, x_7, -0.1173503194786851f),
221
nir_fmul_imm(b, x_9, 0.0536813784310406f),
222
nir_fmul_imm(b, x_11, -0.0121323213173444f),
223
};
224
225
nir_ssa_def *tmp =
226
build_fsum(b, polynomial_terms, ARRAY_SIZE(polynomial_terms));
227
228
/* range-reduction fixup */
229
tmp = nir_fadd(b, tmp,
230
nir_fmul(b, nir_b2f(b, nir_flt(b, one, abs_y_over_x), bit_size),
231
nir_fadd_imm(b, nir_fmul_imm(b, tmp, -2.0f), M_PI_2)));
232
233
/* sign fixup */
234
return nir_fmul(b, tmp, nir_fsign(b, y_over_x));
235
}
236
237
nir_ssa_def *
238
nir_atan2(nir_builder *b, nir_ssa_def *y, nir_ssa_def *x)
239
{
240
assert(y->bit_size == x->bit_size);
241
const uint32_t bit_size = x->bit_size;
242
243
nir_ssa_def *zero = nir_imm_floatN_t(b, 0, bit_size);
244
nir_ssa_def *one = nir_imm_floatN_t(b, 1, bit_size);
245
246
/* If we're on the left half-plane rotate the coordinates π/2 clock-wise
247
* for the y=0 discontinuity to end up aligned with the vertical
248
* discontinuity of atan(s/t) along t=0. This also makes sure that we
249
* don't attempt to divide by zero along the vertical line, which may give
250
* unspecified results on non-GLSL 4.1-capable hardware.
251
*/
252
nir_ssa_def *flip = nir_fge(b, zero, x);
253
nir_ssa_def *s = nir_bcsel(b, flip, nir_fabs(b, x), y);
254
nir_ssa_def *t = nir_bcsel(b, flip, y, nir_fabs(b, x));
255
256
/* If the magnitude of the denominator exceeds some huge value, scale down
257
* the arguments in order to prevent the reciprocal operation from flushing
258
* its result to zero, which would cause precision problems, and for s
259
* infinite would cause us to return a NaN instead of the correct finite
260
* value.
261
*
262
* If fmin and fmax are respectively the smallest and largest positive
263
* normalized floating point values representable by the implementation,
264
* the constants below should be in agreement with:
265
*
266
* huge <= 1 / fmin
267
* scale <= 1 / fmin / fmax (for |t| >= huge)
268
*
269
* In addition scale should be a negative power of two in order to avoid
270
* loss of precision. The values chosen below should work for most usual
271
* floating point representations with at least the dynamic range of ATI's
272
* 24-bit representation.
273
*/
274
const double huge_val = bit_size >= 32 ? 1e18 : 16384;
275
nir_ssa_def *huge = nir_imm_floatN_t(b, huge_val, bit_size);
276
nir_ssa_def *scale = nir_bcsel(b, nir_fge(b, nir_fabs(b, t), huge),
277
nir_imm_floatN_t(b, 0.25, bit_size), one);
278
nir_ssa_def *rcp_scaled_t = nir_frcp(b, nir_fmul(b, t, scale));
279
nir_ssa_def *s_over_t = nir_fmul(b, nir_fmul(b, s, scale), rcp_scaled_t);
280
281
/* For |x| = |y| assume tan = 1 even if infinite (i.e. pretend momentarily
282
* that ∞/∞ = 1) in order to comply with the rather artificial rules
283
* inherited from IEEE 754-2008, namely:
284
*
285
* "atan2(±∞, −∞) is ±3π/4
286
* atan2(±∞, +∞) is ±π/4"
287
*
288
* Note that this is inconsistent with the rules for the neighborhood of
289
* zero that are based on iterated limits:
290
*
291
* "atan2(±0, −0) is ±π
292
* atan2(±0, +0) is ±0"
293
*
294
* but GLSL specifically allows implementations to deviate from IEEE rules
295
* at (0,0), so we take that license (i.e. pretend that 0/0 = 1 here as
296
* well).
297
*/
298
nir_ssa_def *tan = nir_bcsel(b, nir_feq(b, nir_fabs(b, x), nir_fabs(b, y)),
299
one, nir_fabs(b, s_over_t));
300
301
/* Calculate the arctangent and fix up the result if we had flipped the
302
* coordinate system.
303
*/
304
nir_ssa_def *arc =
305
nir_fadd(b, nir_fmul_imm(b, nir_b2f(b, flip, bit_size), M_PI_2),
306
nir_atan(b, tan));
307
308
/* Rather convoluted calculation of the sign of the result. When x < 0 we
309
* cannot use fsign because we need to be able to distinguish between
310
* negative and positive zero. We don't use bitwise arithmetic tricks for
311
* consistency with the GLSL front-end. When x >= 0 rcp_scaled_t will
312
* always be non-negative so this won't be able to distinguish between
313
* negative and positive zero, but we don't care because atan2 is
314
* continuous along the whole positive y = 0 half-line, so it won't affect
315
* the result significantly.
316
*/
317
return nir_bcsel(b, nir_flt(b, nir_fmin(b, y, rcp_scaled_t), zero),
318
nir_fneg(b, arc), arc);
319
}
320
321
nir_ssa_def *
322
nir_get_texture_size(nir_builder *b, nir_tex_instr *tex)
323
{
324
b->cursor = nir_before_instr(&tex->instr);
325
326
nir_tex_instr *txs;
327
328
unsigned num_srcs = 1; /* One for the LOD */
329
for (unsigned i = 0; i < tex->num_srcs; i++) {
330
if (tex->src[i].src_type == nir_tex_src_texture_deref ||
331
tex->src[i].src_type == nir_tex_src_sampler_deref ||
332
tex->src[i].src_type == nir_tex_src_texture_offset ||
333
tex->src[i].src_type == nir_tex_src_sampler_offset ||
334
tex->src[i].src_type == nir_tex_src_texture_handle ||
335
tex->src[i].src_type == nir_tex_src_sampler_handle)
336
num_srcs++;
337
}
338
339
txs = nir_tex_instr_create(b->shader, num_srcs);
340
txs->op = nir_texop_txs;
341
txs->sampler_dim = tex->sampler_dim;
342
txs->is_array = tex->is_array;
343
txs->is_shadow = tex->is_shadow;
344
txs->is_new_style_shadow = tex->is_new_style_shadow;
345
txs->texture_index = tex->texture_index;
346
txs->sampler_index = tex->sampler_index;
347
txs->dest_type = nir_type_int32;
348
349
unsigned idx = 0;
350
for (unsigned i = 0; i < tex->num_srcs; i++) {
351
if (tex->src[i].src_type == nir_tex_src_texture_deref ||
352
tex->src[i].src_type == nir_tex_src_sampler_deref ||
353
tex->src[i].src_type == nir_tex_src_texture_offset ||
354
tex->src[i].src_type == nir_tex_src_sampler_offset ||
355
tex->src[i].src_type == nir_tex_src_texture_handle ||
356
tex->src[i].src_type == nir_tex_src_sampler_handle) {
357
nir_src_copy(&txs->src[idx].src, &tex->src[i].src, txs);
358
txs->src[idx].src_type = tex->src[i].src_type;
359
idx++;
360
}
361
}
362
/* Add in an LOD because some back-ends require it */
363
txs->src[idx].src = nir_src_for_ssa(nir_imm_int(b, 0));
364
txs->src[idx].src_type = nir_tex_src_lod;
365
366
nir_ssa_dest_init(&txs->instr, &txs->dest,
367
nir_tex_instr_dest_size(txs), 32, NULL);
368
nir_builder_instr_insert(b, &txs->instr);
369
370
return &txs->dest.ssa;
371
}
372
373
nir_ssa_def *
374
nir_get_texture_lod(nir_builder *b, nir_tex_instr *tex)
375
{
376
b->cursor = nir_before_instr(&tex->instr);
377
378
nir_tex_instr *tql;
379
380
unsigned num_srcs = 0;
381
for (unsigned i = 0; i < tex->num_srcs; i++) {
382
if (tex->src[i].src_type == nir_tex_src_coord ||
383
tex->src[i].src_type == nir_tex_src_texture_deref ||
384
tex->src[i].src_type == nir_tex_src_sampler_deref ||
385
tex->src[i].src_type == nir_tex_src_texture_offset ||
386
tex->src[i].src_type == nir_tex_src_sampler_offset ||
387
tex->src[i].src_type == nir_tex_src_texture_handle ||
388
tex->src[i].src_type == nir_tex_src_sampler_handle)
389
num_srcs++;
390
}
391
392
tql = nir_tex_instr_create(b->shader, num_srcs);
393
tql->op = nir_texop_lod;
394
tql->coord_components = tex->coord_components;
395
tql->sampler_dim = tex->sampler_dim;
396
tql->is_array = tex->is_array;
397
tql->is_shadow = tex->is_shadow;
398
tql->is_new_style_shadow = tex->is_new_style_shadow;
399
tql->texture_index = tex->texture_index;
400
tql->sampler_index = tex->sampler_index;
401
tql->dest_type = nir_type_float32;
402
403
unsigned idx = 0;
404
for (unsigned i = 0; i < tex->num_srcs; i++) {
405
if (tex->src[i].src_type == nir_tex_src_coord ||
406
tex->src[i].src_type == nir_tex_src_texture_deref ||
407
tex->src[i].src_type == nir_tex_src_sampler_deref ||
408
tex->src[i].src_type == nir_tex_src_texture_offset ||
409
tex->src[i].src_type == nir_tex_src_sampler_offset ||
410
tex->src[i].src_type == nir_tex_src_texture_handle ||
411
tex->src[i].src_type == nir_tex_src_sampler_handle) {
412
nir_src_copy(&tql->src[idx].src, &tex->src[i].src, tql);
413
tql->src[idx].src_type = tex->src[i].src_type;
414
idx++;
415
}
416
}
417
418
nir_ssa_dest_init(&tql->instr, &tql->dest, 2, 32, NULL);
419
nir_builder_instr_insert(b, &tql->instr);
420
421
/* The LOD is the y component of the result */
422
return nir_channel(b, &tql->dest.ssa, 1);
423
}
424
425