Path: blob/main/contrib/llvm-project/openmp/runtime/src/kmp_atomic.cpp
35258 views
/*1* kmp_atomic.cpp -- ATOMIC implementation routines2*/34//===----------------------------------------------------------------------===//5//6// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.7// See https://llvm.org/LICENSE.txt for license information.8// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception9//10//===----------------------------------------------------------------------===//1112#include "kmp_atomic.h"13#include "kmp.h" // TRUE, asm routines prototypes1415typedef unsigned char uchar;16typedef unsigned short ushort;1718/*!19@defgroup ATOMIC_OPS Atomic Operations20These functions are used for implementing the many different varieties of atomic21operations.2223The compiler is at liberty to inline atomic operations that are naturally24supported by the target architecture. For instance on IA-32 architecture an25atomic like this can be inlined26@code27static int s = 0;28#pragma omp atomic29s++;30@endcode31using the single instruction: `lock; incl s`3233However the runtime does provide entrypoints for these operations to support34compilers that choose not to inline them. (For instance,35`__kmpc_atomic_fixed4_add` could be used to perform the increment above.)3637The names of the functions are encoded by using the data type name and the38operation name, as in these tables.3940Data Type | Data type encoding41-----------|---------------42int8_t | `fixed1`43uint8_t | `fixed1u`44int16_t | `fixed2`45uint16_t | `fixed2u`46int32_t | `fixed4`47uint32_t | `fixed4u`48int32_t | `fixed8`49uint32_t | `fixed8u`50float | `float4`51double | `float8`52float 10 (8087 eighty bit float) | `float10`53complex<float> | `cmplx4`54complex<double> | `cmplx8`55complex<float10> | `cmplx10`56<br>5758Operation | Operation encoding59----------|-------------------60+ | add61- | sub62\* | mul63/ | div64& | andb65<< | shl66\>\> | shr67\| | orb68^ | xor69&& | andl70\|\| | orl71maximum | max72minimum | min73.eqv. | eqv74.neqv. | neqv7576<br>77For non-commutative operations, `_rev` can also be added for the reversed78operation. For the functions that capture the result, the suffix `_cpt` is79added.8081Update Functions82================83The general form of an atomic function that just performs an update (without a84`capture`)85@code86void __kmpc_atomic_<datatype>_<operation>( ident_t *id_ref, int gtid, TYPE *87lhs, TYPE rhs );88@endcode89@param ident_t a pointer to source location90@param gtid the global thread id91@param lhs a pointer to the left operand92@param rhs the right operand9394`capture` functions95===================96The capture functions perform an atomic update and return a result, which is97either the value before the capture, or that after. They take an additional98argument to determine which result is returned.99Their general form is therefore100@code101TYPE __kmpc_atomic_<datatype>_<operation>_cpt( ident_t *id_ref, int gtid, TYPE *102lhs, TYPE rhs, int flag );103@endcode104@param ident_t a pointer to source location105@param gtid the global thread id106@param lhs a pointer to the left operand107@param rhs the right operand108@param flag one if the result is to be captured *after* the operation, zero if109captured *before*.110111The one set of exceptions to this is the `complex<float>` type where the value112is not returned, rather an extra argument pointer is passed.113114They look like115@code116void __kmpc_atomic_cmplx4_<op>_cpt( ident_t *id_ref, int gtid, kmp_cmplx32 *117lhs, kmp_cmplx32 rhs, kmp_cmplx32 * out, int flag );118@endcode119120Read and Write Operations121=========================122The OpenMP<sup>*</sup> standard now supports atomic operations that simply123ensure that the value is read or written atomically, with no modification124performed. In many cases on IA-32 architecture these operations can be inlined125since the architecture guarantees that no tearing occurs on aligned objects126accessed with a single memory operation of up to 64 bits in size.127128The general form of the read operations is129@code130TYPE __kmpc_atomic_<type>_rd ( ident_t *id_ref, int gtid, TYPE * loc );131@endcode132133For the write operations the form is134@code135void __kmpc_atomic_<type>_wr ( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs136);137@endcode138139Full list of functions140======================141This leads to the generation of 376 atomic functions, as follows.142143Functions for integers144---------------------145There are versions here for integers of size 1,2,4 and 8 bytes both signed and146unsigned (where that matters).147@code148__kmpc_atomic_fixed1_add149__kmpc_atomic_fixed1_add_cpt150__kmpc_atomic_fixed1_add_fp151__kmpc_atomic_fixed1_andb152__kmpc_atomic_fixed1_andb_cpt153__kmpc_atomic_fixed1_andl154__kmpc_atomic_fixed1_andl_cpt155__kmpc_atomic_fixed1_div156__kmpc_atomic_fixed1_div_cpt157__kmpc_atomic_fixed1_div_cpt_rev158__kmpc_atomic_fixed1_div_float8159__kmpc_atomic_fixed1_div_fp160__kmpc_atomic_fixed1_div_rev161__kmpc_atomic_fixed1_eqv162__kmpc_atomic_fixed1_eqv_cpt163__kmpc_atomic_fixed1_max164__kmpc_atomic_fixed1_max_cpt165__kmpc_atomic_fixed1_min166__kmpc_atomic_fixed1_min_cpt167__kmpc_atomic_fixed1_mul168__kmpc_atomic_fixed1_mul_cpt169__kmpc_atomic_fixed1_mul_float8170__kmpc_atomic_fixed1_mul_fp171__kmpc_atomic_fixed1_neqv172__kmpc_atomic_fixed1_neqv_cpt173__kmpc_atomic_fixed1_orb174__kmpc_atomic_fixed1_orb_cpt175__kmpc_atomic_fixed1_orl176__kmpc_atomic_fixed1_orl_cpt177__kmpc_atomic_fixed1_rd178__kmpc_atomic_fixed1_shl179__kmpc_atomic_fixed1_shl_cpt180__kmpc_atomic_fixed1_shl_cpt_rev181__kmpc_atomic_fixed1_shl_rev182__kmpc_atomic_fixed1_shr183__kmpc_atomic_fixed1_shr_cpt184__kmpc_atomic_fixed1_shr_cpt_rev185__kmpc_atomic_fixed1_shr_rev186__kmpc_atomic_fixed1_sub187__kmpc_atomic_fixed1_sub_cpt188__kmpc_atomic_fixed1_sub_cpt_rev189__kmpc_atomic_fixed1_sub_fp190__kmpc_atomic_fixed1_sub_rev191__kmpc_atomic_fixed1_swp192__kmpc_atomic_fixed1_wr193__kmpc_atomic_fixed1_xor194__kmpc_atomic_fixed1_xor_cpt195__kmpc_atomic_fixed1u_add_fp196__kmpc_atomic_fixed1u_sub_fp197__kmpc_atomic_fixed1u_mul_fp198__kmpc_atomic_fixed1u_div199__kmpc_atomic_fixed1u_div_cpt200__kmpc_atomic_fixed1u_div_cpt_rev201__kmpc_atomic_fixed1u_div_fp202__kmpc_atomic_fixed1u_div_rev203__kmpc_atomic_fixed1u_shr204__kmpc_atomic_fixed1u_shr_cpt205__kmpc_atomic_fixed1u_shr_cpt_rev206__kmpc_atomic_fixed1u_shr_rev207__kmpc_atomic_fixed2_add208__kmpc_atomic_fixed2_add_cpt209__kmpc_atomic_fixed2_add_fp210__kmpc_atomic_fixed2_andb211__kmpc_atomic_fixed2_andb_cpt212__kmpc_atomic_fixed2_andl213__kmpc_atomic_fixed2_andl_cpt214__kmpc_atomic_fixed2_div215__kmpc_atomic_fixed2_div_cpt216__kmpc_atomic_fixed2_div_cpt_rev217__kmpc_atomic_fixed2_div_float8218__kmpc_atomic_fixed2_div_fp219__kmpc_atomic_fixed2_div_rev220__kmpc_atomic_fixed2_eqv221__kmpc_atomic_fixed2_eqv_cpt222__kmpc_atomic_fixed2_max223__kmpc_atomic_fixed2_max_cpt224__kmpc_atomic_fixed2_min225__kmpc_atomic_fixed2_min_cpt226__kmpc_atomic_fixed2_mul227__kmpc_atomic_fixed2_mul_cpt228__kmpc_atomic_fixed2_mul_float8229__kmpc_atomic_fixed2_mul_fp230__kmpc_atomic_fixed2_neqv231__kmpc_atomic_fixed2_neqv_cpt232__kmpc_atomic_fixed2_orb233__kmpc_atomic_fixed2_orb_cpt234__kmpc_atomic_fixed2_orl235__kmpc_atomic_fixed2_orl_cpt236__kmpc_atomic_fixed2_rd237__kmpc_atomic_fixed2_shl238__kmpc_atomic_fixed2_shl_cpt239__kmpc_atomic_fixed2_shl_cpt_rev240__kmpc_atomic_fixed2_shl_rev241__kmpc_atomic_fixed2_shr242__kmpc_atomic_fixed2_shr_cpt243__kmpc_atomic_fixed2_shr_cpt_rev244__kmpc_atomic_fixed2_shr_rev245__kmpc_atomic_fixed2_sub246__kmpc_atomic_fixed2_sub_cpt247__kmpc_atomic_fixed2_sub_cpt_rev248__kmpc_atomic_fixed2_sub_fp249__kmpc_atomic_fixed2_sub_rev250__kmpc_atomic_fixed2_swp251__kmpc_atomic_fixed2_wr252__kmpc_atomic_fixed2_xor253__kmpc_atomic_fixed2_xor_cpt254__kmpc_atomic_fixed2u_add_fp255__kmpc_atomic_fixed2u_sub_fp256__kmpc_atomic_fixed2u_mul_fp257__kmpc_atomic_fixed2u_div258__kmpc_atomic_fixed2u_div_cpt259__kmpc_atomic_fixed2u_div_cpt_rev260__kmpc_atomic_fixed2u_div_fp261__kmpc_atomic_fixed2u_div_rev262__kmpc_atomic_fixed2u_shr263__kmpc_atomic_fixed2u_shr_cpt264__kmpc_atomic_fixed2u_shr_cpt_rev265__kmpc_atomic_fixed2u_shr_rev266__kmpc_atomic_fixed4_add267__kmpc_atomic_fixed4_add_cpt268__kmpc_atomic_fixed4_add_fp269__kmpc_atomic_fixed4_andb270__kmpc_atomic_fixed4_andb_cpt271__kmpc_atomic_fixed4_andl272__kmpc_atomic_fixed4_andl_cpt273__kmpc_atomic_fixed4_div274__kmpc_atomic_fixed4_div_cpt275__kmpc_atomic_fixed4_div_cpt_rev276__kmpc_atomic_fixed4_div_float8277__kmpc_atomic_fixed4_div_fp278__kmpc_atomic_fixed4_div_rev279__kmpc_atomic_fixed4_eqv280__kmpc_atomic_fixed4_eqv_cpt281__kmpc_atomic_fixed4_max282__kmpc_atomic_fixed4_max_cpt283__kmpc_atomic_fixed4_min284__kmpc_atomic_fixed4_min_cpt285__kmpc_atomic_fixed4_mul286__kmpc_atomic_fixed4_mul_cpt287__kmpc_atomic_fixed4_mul_float8288__kmpc_atomic_fixed4_mul_fp289__kmpc_atomic_fixed4_neqv290__kmpc_atomic_fixed4_neqv_cpt291__kmpc_atomic_fixed4_orb292__kmpc_atomic_fixed4_orb_cpt293__kmpc_atomic_fixed4_orl294__kmpc_atomic_fixed4_orl_cpt295__kmpc_atomic_fixed4_rd296__kmpc_atomic_fixed4_shl297__kmpc_atomic_fixed4_shl_cpt298__kmpc_atomic_fixed4_shl_cpt_rev299__kmpc_atomic_fixed4_shl_rev300__kmpc_atomic_fixed4_shr301__kmpc_atomic_fixed4_shr_cpt302__kmpc_atomic_fixed4_shr_cpt_rev303__kmpc_atomic_fixed4_shr_rev304__kmpc_atomic_fixed4_sub305__kmpc_atomic_fixed4_sub_cpt306__kmpc_atomic_fixed4_sub_cpt_rev307__kmpc_atomic_fixed4_sub_fp308__kmpc_atomic_fixed4_sub_rev309__kmpc_atomic_fixed4_swp310__kmpc_atomic_fixed4_wr311__kmpc_atomic_fixed4_xor312__kmpc_atomic_fixed4_xor_cpt313__kmpc_atomic_fixed4u_add_fp314__kmpc_atomic_fixed4u_sub_fp315__kmpc_atomic_fixed4u_mul_fp316__kmpc_atomic_fixed4u_div317__kmpc_atomic_fixed4u_div_cpt318__kmpc_atomic_fixed4u_div_cpt_rev319__kmpc_atomic_fixed4u_div_fp320__kmpc_atomic_fixed4u_div_rev321__kmpc_atomic_fixed4u_shr322__kmpc_atomic_fixed4u_shr_cpt323__kmpc_atomic_fixed4u_shr_cpt_rev324__kmpc_atomic_fixed4u_shr_rev325__kmpc_atomic_fixed8_add326__kmpc_atomic_fixed8_add_cpt327__kmpc_atomic_fixed8_add_fp328__kmpc_atomic_fixed8_andb329__kmpc_atomic_fixed8_andb_cpt330__kmpc_atomic_fixed8_andl331__kmpc_atomic_fixed8_andl_cpt332__kmpc_atomic_fixed8_div333__kmpc_atomic_fixed8_div_cpt334__kmpc_atomic_fixed8_div_cpt_rev335__kmpc_atomic_fixed8_div_float8336__kmpc_atomic_fixed8_div_fp337__kmpc_atomic_fixed8_div_rev338__kmpc_atomic_fixed8_eqv339__kmpc_atomic_fixed8_eqv_cpt340__kmpc_atomic_fixed8_max341__kmpc_atomic_fixed8_max_cpt342__kmpc_atomic_fixed8_min343__kmpc_atomic_fixed8_min_cpt344__kmpc_atomic_fixed8_mul345__kmpc_atomic_fixed8_mul_cpt346__kmpc_atomic_fixed8_mul_float8347__kmpc_atomic_fixed8_mul_fp348__kmpc_atomic_fixed8_neqv349__kmpc_atomic_fixed8_neqv_cpt350__kmpc_atomic_fixed8_orb351__kmpc_atomic_fixed8_orb_cpt352__kmpc_atomic_fixed8_orl353__kmpc_atomic_fixed8_orl_cpt354__kmpc_atomic_fixed8_rd355__kmpc_atomic_fixed8_shl356__kmpc_atomic_fixed8_shl_cpt357__kmpc_atomic_fixed8_shl_cpt_rev358__kmpc_atomic_fixed8_shl_rev359__kmpc_atomic_fixed8_shr360__kmpc_atomic_fixed8_shr_cpt361__kmpc_atomic_fixed8_shr_cpt_rev362__kmpc_atomic_fixed8_shr_rev363__kmpc_atomic_fixed8_sub364__kmpc_atomic_fixed8_sub_cpt365__kmpc_atomic_fixed8_sub_cpt_rev366__kmpc_atomic_fixed8_sub_fp367__kmpc_atomic_fixed8_sub_rev368__kmpc_atomic_fixed8_swp369__kmpc_atomic_fixed8_wr370__kmpc_atomic_fixed8_xor371__kmpc_atomic_fixed8_xor_cpt372__kmpc_atomic_fixed8u_add_fp373__kmpc_atomic_fixed8u_sub_fp374__kmpc_atomic_fixed8u_mul_fp375__kmpc_atomic_fixed8u_div376__kmpc_atomic_fixed8u_div_cpt377__kmpc_atomic_fixed8u_div_cpt_rev378__kmpc_atomic_fixed8u_div_fp379__kmpc_atomic_fixed8u_div_rev380__kmpc_atomic_fixed8u_shr381__kmpc_atomic_fixed8u_shr_cpt382__kmpc_atomic_fixed8u_shr_cpt_rev383__kmpc_atomic_fixed8u_shr_rev384@endcode385386Functions for floating point387----------------------------388There are versions here for floating point numbers of size 4, 8, 10 and 16389bytes. (Ten byte floats are used by X87, but are now rare).390@code391__kmpc_atomic_float4_add392__kmpc_atomic_float4_add_cpt393__kmpc_atomic_float4_add_float8394__kmpc_atomic_float4_add_fp395__kmpc_atomic_float4_div396__kmpc_atomic_float4_div_cpt397__kmpc_atomic_float4_div_cpt_rev398__kmpc_atomic_float4_div_float8399__kmpc_atomic_float4_div_fp400__kmpc_atomic_float4_div_rev401__kmpc_atomic_float4_max402__kmpc_atomic_float4_max_cpt403__kmpc_atomic_float4_min404__kmpc_atomic_float4_min_cpt405__kmpc_atomic_float4_mul406__kmpc_atomic_float4_mul_cpt407__kmpc_atomic_float4_mul_float8408__kmpc_atomic_float4_mul_fp409__kmpc_atomic_float4_rd410__kmpc_atomic_float4_sub411__kmpc_atomic_float4_sub_cpt412__kmpc_atomic_float4_sub_cpt_rev413__kmpc_atomic_float4_sub_float8414__kmpc_atomic_float4_sub_fp415__kmpc_atomic_float4_sub_rev416__kmpc_atomic_float4_swp417__kmpc_atomic_float4_wr418__kmpc_atomic_float8_add419__kmpc_atomic_float8_add_cpt420__kmpc_atomic_float8_add_fp421__kmpc_atomic_float8_div422__kmpc_atomic_float8_div_cpt423__kmpc_atomic_float8_div_cpt_rev424__kmpc_atomic_float8_div_fp425__kmpc_atomic_float8_div_rev426__kmpc_atomic_float8_max427__kmpc_atomic_float8_max_cpt428__kmpc_atomic_float8_min429__kmpc_atomic_float8_min_cpt430__kmpc_atomic_float8_mul431__kmpc_atomic_float8_mul_cpt432__kmpc_atomic_float8_mul_fp433__kmpc_atomic_float8_rd434__kmpc_atomic_float8_sub435__kmpc_atomic_float8_sub_cpt436__kmpc_atomic_float8_sub_cpt_rev437__kmpc_atomic_float8_sub_fp438__kmpc_atomic_float8_sub_rev439__kmpc_atomic_float8_swp440__kmpc_atomic_float8_wr441__kmpc_atomic_float10_add442__kmpc_atomic_float10_add_cpt443__kmpc_atomic_float10_add_fp444__kmpc_atomic_float10_div445__kmpc_atomic_float10_div_cpt446__kmpc_atomic_float10_div_cpt_rev447__kmpc_atomic_float10_div_fp448__kmpc_atomic_float10_div_rev449__kmpc_atomic_float10_mul450__kmpc_atomic_float10_mul_cpt451__kmpc_atomic_float10_mul_fp452__kmpc_atomic_float10_rd453__kmpc_atomic_float10_sub454__kmpc_atomic_float10_sub_cpt455__kmpc_atomic_float10_sub_cpt_rev456__kmpc_atomic_float10_sub_fp457__kmpc_atomic_float10_sub_rev458__kmpc_atomic_float10_swp459__kmpc_atomic_float10_wr460__kmpc_atomic_float16_add461__kmpc_atomic_float16_add_cpt462__kmpc_atomic_float16_div463__kmpc_atomic_float16_div_cpt464__kmpc_atomic_float16_div_cpt_rev465__kmpc_atomic_float16_div_rev466__kmpc_atomic_float16_max467__kmpc_atomic_float16_max_cpt468__kmpc_atomic_float16_min469__kmpc_atomic_float16_min_cpt470__kmpc_atomic_float16_mul471__kmpc_atomic_float16_mul_cpt472__kmpc_atomic_float16_rd473__kmpc_atomic_float16_sub474__kmpc_atomic_float16_sub_cpt475__kmpc_atomic_float16_sub_cpt_rev476__kmpc_atomic_float16_sub_rev477__kmpc_atomic_float16_swp478__kmpc_atomic_float16_wr479@endcode480481Functions for Complex types482---------------------------483Functions for complex types whose component floating point variables are of size4844,8,10 or 16 bytes. The names here are based on the size of the component float,485*not* the size of the complex type. So `__kmpc_atomic_cmplx8_add` is an486operation on a `complex<double>` or `complex(kind=8)`, *not* `complex<float>`.487488@code489__kmpc_atomic_cmplx4_add490__kmpc_atomic_cmplx4_add_cmplx8491__kmpc_atomic_cmplx4_add_cpt492__kmpc_atomic_cmplx4_div493__kmpc_atomic_cmplx4_div_cmplx8494__kmpc_atomic_cmplx4_div_cpt495__kmpc_atomic_cmplx4_div_cpt_rev496__kmpc_atomic_cmplx4_div_rev497__kmpc_atomic_cmplx4_mul498__kmpc_atomic_cmplx4_mul_cmplx8499__kmpc_atomic_cmplx4_mul_cpt500__kmpc_atomic_cmplx4_rd501__kmpc_atomic_cmplx4_sub502__kmpc_atomic_cmplx4_sub_cmplx8503__kmpc_atomic_cmplx4_sub_cpt504__kmpc_atomic_cmplx4_sub_cpt_rev505__kmpc_atomic_cmplx4_sub_rev506__kmpc_atomic_cmplx4_swp507__kmpc_atomic_cmplx4_wr508__kmpc_atomic_cmplx8_add509__kmpc_atomic_cmplx8_add_cpt510__kmpc_atomic_cmplx8_div511__kmpc_atomic_cmplx8_div_cpt512__kmpc_atomic_cmplx8_div_cpt_rev513__kmpc_atomic_cmplx8_div_rev514__kmpc_atomic_cmplx8_mul515__kmpc_atomic_cmplx8_mul_cpt516__kmpc_atomic_cmplx8_rd517__kmpc_atomic_cmplx8_sub518__kmpc_atomic_cmplx8_sub_cpt519__kmpc_atomic_cmplx8_sub_cpt_rev520__kmpc_atomic_cmplx8_sub_rev521__kmpc_atomic_cmplx8_swp522__kmpc_atomic_cmplx8_wr523__kmpc_atomic_cmplx10_add524__kmpc_atomic_cmplx10_add_cpt525__kmpc_atomic_cmplx10_div526__kmpc_atomic_cmplx10_div_cpt527__kmpc_atomic_cmplx10_div_cpt_rev528__kmpc_atomic_cmplx10_div_rev529__kmpc_atomic_cmplx10_mul530__kmpc_atomic_cmplx10_mul_cpt531__kmpc_atomic_cmplx10_rd532__kmpc_atomic_cmplx10_sub533__kmpc_atomic_cmplx10_sub_cpt534__kmpc_atomic_cmplx10_sub_cpt_rev535__kmpc_atomic_cmplx10_sub_rev536__kmpc_atomic_cmplx10_swp537__kmpc_atomic_cmplx10_wr538__kmpc_atomic_cmplx16_add539__kmpc_atomic_cmplx16_add_cpt540__kmpc_atomic_cmplx16_div541__kmpc_atomic_cmplx16_div_cpt542__kmpc_atomic_cmplx16_div_cpt_rev543__kmpc_atomic_cmplx16_div_rev544__kmpc_atomic_cmplx16_mul545__kmpc_atomic_cmplx16_mul_cpt546__kmpc_atomic_cmplx16_rd547__kmpc_atomic_cmplx16_sub548__kmpc_atomic_cmplx16_sub_cpt549__kmpc_atomic_cmplx16_sub_cpt_rev550__kmpc_atomic_cmplx16_swp551__kmpc_atomic_cmplx16_wr552@endcode553*/554555/*!556@ingroup ATOMIC_OPS557@{558*/559560/*561* Global vars562*/563564#ifndef KMP_GOMP_COMPAT565int __kmp_atomic_mode = 1; // Intel perf566#else567int __kmp_atomic_mode = 2; // GOMP compatibility568#endif /* KMP_GOMP_COMPAT */569570KMP_ALIGN(128)571572// Control access to all user coded atomics in Gnu compat mode573kmp_atomic_lock_t __kmp_atomic_lock;574// Control access to all user coded atomics for 1-byte fixed data types575kmp_atomic_lock_t __kmp_atomic_lock_1i;576// Control access to all user coded atomics for 2-byte fixed data types577kmp_atomic_lock_t __kmp_atomic_lock_2i;578// Control access to all user coded atomics for 4-byte fixed data types579kmp_atomic_lock_t __kmp_atomic_lock_4i;580// Control access to all user coded atomics for kmp_real32 data type581kmp_atomic_lock_t __kmp_atomic_lock_4r;582// Control access to all user coded atomics for 8-byte fixed data types583kmp_atomic_lock_t __kmp_atomic_lock_8i;584// Control access to all user coded atomics for kmp_real64 data type585kmp_atomic_lock_t __kmp_atomic_lock_8r;586// Control access to all user coded atomics for complex byte data type587kmp_atomic_lock_t __kmp_atomic_lock_8c;588// Control access to all user coded atomics for long double data type589kmp_atomic_lock_t __kmp_atomic_lock_10r;590// Control access to all user coded atomics for _Quad data type591kmp_atomic_lock_t __kmp_atomic_lock_16r;592// Control access to all user coded atomics for double complex data type593kmp_atomic_lock_t __kmp_atomic_lock_16c;594// Control access to all user coded atomics for long double complex type595kmp_atomic_lock_t __kmp_atomic_lock_20c;596// Control access to all user coded atomics for _Quad complex data type597kmp_atomic_lock_t __kmp_atomic_lock_32c;598599/* 2007-03-02:600Without "volatile" specifier in OP_CMPXCHG and MIN_MAX_CMPXCHG we have a bug601on *_32 and *_32e. This is just a temporary workaround for the problem. It602seems the right solution is writing OP_CMPXCHG and MIN_MAX_CMPXCHG routines603in assembler language. */604#define KMP_ATOMIC_VOLATILE volatile605606#if (KMP_ARCH_X86) && KMP_HAVE_QUAD607608static inline Quad_a4_t operator+(Quad_a4_t &lhs, Quad_a4_t &rhs) {609return lhs.q + rhs.q;610}611static inline Quad_a4_t operator-(Quad_a4_t &lhs, Quad_a4_t &rhs) {612return lhs.q - rhs.q;613}614static inline Quad_a4_t operator*(Quad_a4_t &lhs, Quad_a4_t &rhs) {615return lhs.q * rhs.q;616}617static inline Quad_a4_t operator/(Quad_a4_t &lhs, Quad_a4_t &rhs) {618return lhs.q / rhs.q;619}620static inline bool operator<(Quad_a4_t &lhs, Quad_a4_t &rhs) {621return lhs.q < rhs.q;622}623static inline bool operator>(Quad_a4_t &lhs, Quad_a4_t &rhs) {624return lhs.q > rhs.q;625}626627static inline Quad_a16_t operator+(Quad_a16_t &lhs, Quad_a16_t &rhs) {628return lhs.q + rhs.q;629}630static inline Quad_a16_t operator-(Quad_a16_t &lhs, Quad_a16_t &rhs) {631return lhs.q - rhs.q;632}633static inline Quad_a16_t operator*(Quad_a16_t &lhs, Quad_a16_t &rhs) {634return lhs.q * rhs.q;635}636static inline Quad_a16_t operator/(Quad_a16_t &lhs, Quad_a16_t &rhs) {637return lhs.q / rhs.q;638}639static inline bool operator<(Quad_a16_t &lhs, Quad_a16_t &rhs) {640return lhs.q < rhs.q;641}642static inline bool operator>(Quad_a16_t &lhs, Quad_a16_t &rhs) {643return lhs.q > rhs.q;644}645646static inline kmp_cmplx128_a4_t operator+(kmp_cmplx128_a4_t &lhs,647kmp_cmplx128_a4_t &rhs) {648return lhs.q + rhs.q;649}650static inline kmp_cmplx128_a4_t operator-(kmp_cmplx128_a4_t &lhs,651kmp_cmplx128_a4_t &rhs) {652return lhs.q - rhs.q;653}654static inline kmp_cmplx128_a4_t operator*(kmp_cmplx128_a4_t &lhs,655kmp_cmplx128_a4_t &rhs) {656return lhs.q * rhs.q;657}658static inline kmp_cmplx128_a4_t operator/(kmp_cmplx128_a4_t &lhs,659kmp_cmplx128_a4_t &rhs) {660return lhs.q / rhs.q;661}662663static inline kmp_cmplx128_a16_t operator+(kmp_cmplx128_a16_t &lhs,664kmp_cmplx128_a16_t &rhs) {665return lhs.q + rhs.q;666}667static inline kmp_cmplx128_a16_t operator-(kmp_cmplx128_a16_t &lhs,668kmp_cmplx128_a16_t &rhs) {669return lhs.q - rhs.q;670}671static inline kmp_cmplx128_a16_t operator*(kmp_cmplx128_a16_t &lhs,672kmp_cmplx128_a16_t &rhs) {673return lhs.q * rhs.q;674}675static inline kmp_cmplx128_a16_t operator/(kmp_cmplx128_a16_t &lhs,676kmp_cmplx128_a16_t &rhs) {677return lhs.q / rhs.q;678}679680#endif // (KMP_ARCH_X86) && KMP_HAVE_QUAD681682// ATOMIC implementation routines -----------------------------------------683// One routine for each operation and operand type.684// All routines declarations looks like685// void __kmpc_atomic_RTYPE_OP( ident_t*, int, TYPE *lhs, TYPE rhs );686687#define KMP_CHECK_GTID \688if (gtid == KMP_GTID_UNKNOWN) { \689gtid = __kmp_entry_gtid(); \690} // check and get gtid when needed691692// Beginning of a definition (provides name, parameters, gebug trace)693// TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned694// fixed)695// OP_ID - operation identifier (add, sub, mul, ...)696// TYPE - operands' type697#define ATOMIC_BEGIN(TYPE_ID, OP_ID, TYPE, RET_TYPE) \698RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID(ident_t *id_ref, int gtid, \699TYPE *lhs, TYPE rhs) { \700KMP_DEBUG_ASSERT(__kmp_init_serial); \701KA_TRACE(100, ("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid));702703// ------------------------------------------------------------------------704// Lock variables used for critical sections for various size operands705#define ATOMIC_LOCK0 __kmp_atomic_lock // all types, for Gnu compat706#define ATOMIC_LOCK1i __kmp_atomic_lock_1i // char707#define ATOMIC_LOCK2i __kmp_atomic_lock_2i // short708#define ATOMIC_LOCK4i __kmp_atomic_lock_4i // long int709#define ATOMIC_LOCK4r __kmp_atomic_lock_4r // float710#define ATOMIC_LOCK8i __kmp_atomic_lock_8i // long long int711#define ATOMIC_LOCK8r __kmp_atomic_lock_8r // double712#define ATOMIC_LOCK8c __kmp_atomic_lock_8c // float complex713#define ATOMIC_LOCK10r __kmp_atomic_lock_10r // long double714#define ATOMIC_LOCK16r __kmp_atomic_lock_16r // _Quad715#define ATOMIC_LOCK16c __kmp_atomic_lock_16c // double complex716#define ATOMIC_LOCK20c __kmp_atomic_lock_20c // long double complex717#define ATOMIC_LOCK32c __kmp_atomic_lock_32c // _Quad complex718719// ------------------------------------------------------------------------720// Operation on *lhs, rhs bound by critical section721// OP - operator (it's supposed to contain an assignment)722// LCK_ID - lock identifier723// Note: don't check gtid as it should always be valid724// 1, 2-byte - expect valid parameter, other - check before this macro725#define OP_CRITICAL(OP, LCK_ID) \726__kmp_acquire_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \727\728(*lhs) OP(rhs); \729\730__kmp_release_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid);731732#define OP_UPDATE_CRITICAL(TYPE, OP, LCK_ID) \733__kmp_acquire_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \734(*lhs) = (TYPE)((*lhs)OP rhs); \735__kmp_release_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid);736737// ------------------------------------------------------------------------738// For GNU compatibility, we may need to use a critical section,739// even though it is not required by the ISA.740//741// On IA-32 architecture, all atomic operations except for fixed 4 byte add,742// sub, and bitwise logical ops, and 1 & 2 byte logical ops use a common743// critical section. On Intel(R) 64, all atomic operations are done with fetch744// and add or compare and exchange. Therefore, the FLAG parameter to this745// macro is either KMP_ARCH_X86 or 0 (or 1, for Intel-specific extension which746// require a critical section, where we predict that they will be implemented747// in the Gnu codegen by calling GOMP_atomic_start() / GOMP_atomic_end()).748//749// When the OP_GOMP_CRITICAL macro is used in a *CRITICAL* macro construct,750// the FLAG parameter should always be 1. If we know that we will be using751// a critical section, then we want to make certain that we use the generic752// lock __kmp_atomic_lock to protect the atomic update, and not of of the753// locks that are specialized based upon the size or type of the data.754//755// If FLAG is 0, then we are relying on dead code elimination by the build756// compiler to get rid of the useless block of code, and save a needless757// branch at runtime.758759#ifdef KMP_GOMP_COMPAT760#define OP_GOMP_CRITICAL(OP, FLAG) \761if ((FLAG) && (__kmp_atomic_mode == 2)) { \762KMP_CHECK_GTID; \763OP_CRITICAL(OP, 0); \764return; \765}766767#define OP_UPDATE_GOMP_CRITICAL(TYPE, OP, FLAG) \768if ((FLAG) && (__kmp_atomic_mode == 2)) { \769KMP_CHECK_GTID; \770OP_UPDATE_CRITICAL(TYPE, OP, 0); \771return; \772}773#else774#define OP_GOMP_CRITICAL(OP, FLAG)775#define OP_UPDATE_GOMP_CRITICAL(TYPE, OP, FLAG)776#endif /* KMP_GOMP_COMPAT */777778#if KMP_MIC779#define KMP_DO_PAUSE _mm_delay_32(1)780#else781#define KMP_DO_PAUSE782#endif /* KMP_MIC */783784// ------------------------------------------------------------------------785// Operation on *lhs, rhs using "compare_and_store" routine786// TYPE - operands' type787// BITS - size in bits, used to distinguish low level calls788// OP - operator789#define OP_CMPXCHG(TYPE, BITS, OP) \790{ \791TYPE old_value, new_value; \792old_value = *(TYPE volatile *)lhs; \793new_value = (TYPE)(old_value OP rhs); \794while (!KMP_COMPARE_AND_STORE_ACQ##BITS( \795(kmp_int##BITS *)lhs, *VOLATILE_CAST(kmp_int##BITS *) & old_value, \796*VOLATILE_CAST(kmp_int##BITS *) & new_value)) { \797KMP_DO_PAUSE; \798\799old_value = *(TYPE volatile *)lhs; \800new_value = (TYPE)(old_value OP rhs); \801} \802}803804#if USE_CMPXCHG_FIX805// 2007-06-25:806// workaround for C78287 (complex(kind=4) data type). lin_32, lin_32e, win_32807// and win_32e are affected (I verified the asm). Compiler ignores the volatile808// qualifier of the temp_val in the OP_CMPXCHG macro. This is a problem of the809// compiler. Related tracker is C76005, targeted to 11.0. I verified the asm of810// the workaround.811#define OP_CMPXCHG_WORKAROUND(TYPE, BITS, OP) \812{ \813struct _sss { \814TYPE cmp; \815kmp_int##BITS *vvv; \816}; \817struct _sss old_value, new_value; \818old_value.vvv = (kmp_int##BITS *)&old_value.cmp; \819new_value.vvv = (kmp_int##BITS *)&new_value.cmp; \820*old_value.vvv = *(volatile kmp_int##BITS *)lhs; \821new_value.cmp = (TYPE)(old_value.cmp OP rhs); \822while (!KMP_COMPARE_AND_STORE_ACQ##BITS( \823(kmp_int##BITS *)lhs, *VOLATILE_CAST(kmp_int##BITS *) old_value.vvv, \824*VOLATILE_CAST(kmp_int##BITS *) new_value.vvv)) { \825KMP_DO_PAUSE; \826\827*old_value.vvv = *(volatile kmp_int##BITS *)lhs; \828new_value.cmp = (TYPE)(old_value.cmp OP rhs); \829} \830}831// end of the first part of the workaround for C78287832#endif // USE_CMPXCHG_FIX833834#if KMP_OS_WINDOWS && (KMP_ARCH_AARCH64 || KMP_ARCH_ARM)835// Undo explicit type casts to get MSVC ARM64 to build. Uses836// OP_CMPXCHG_WORKAROUND definition for OP_CMPXCHG837#undef OP_CMPXCHG838#define OP_CMPXCHG(TYPE, BITS, OP) \839{ \840struct _sss { \841TYPE cmp; \842kmp_int##BITS *vvv; \843}; \844struct _sss old_value, new_value; \845old_value.vvv = (kmp_int##BITS *)&old_value.cmp; \846new_value.vvv = (kmp_int##BITS *)&new_value.cmp; \847*old_value.vvv = *(volatile kmp_int##BITS *)lhs; \848new_value.cmp = old_value.cmp OP rhs; \849while (!KMP_COMPARE_AND_STORE_ACQ##BITS( \850(kmp_int##BITS *)lhs, *VOLATILE_CAST(kmp_int##BITS *) old_value.vvv, \851*VOLATILE_CAST(kmp_int##BITS *) new_value.vvv)) { \852KMP_DO_PAUSE; \853\854*old_value.vvv = *(volatile kmp_int##BITS *)lhs; \855new_value.cmp = old_value.cmp OP rhs; \856} \857}858859#undef OP_UPDATE_CRITICAL860#define OP_UPDATE_CRITICAL(TYPE, OP, LCK_ID) \861__kmp_acquire_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \862(*lhs) = (*lhs)OP rhs; \863__kmp_release_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid);864865#endif // KMP_OS_WINDOWS && (KMP_ARCH_AARCH64 || KMP_ARCH_ARM)866867#if KMP_ARCH_X86 || KMP_ARCH_X86_64868869// ------------------------------------------------------------------------870// X86 or X86_64: no alignment problems ====================================871#define ATOMIC_FIXED_ADD(TYPE_ID, OP_ID, TYPE, BITS, OP, LCK_ID, MASK, \872GOMP_FLAG) \873ATOMIC_BEGIN(TYPE_ID, OP_ID, TYPE, void) \874OP_UPDATE_GOMP_CRITICAL(TYPE, OP, GOMP_FLAG) \875/* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \876KMP_TEST_THEN_ADD##BITS(lhs, OP rhs); \877}878// -------------------------------------------------------------------------879#define ATOMIC_CMPXCHG(TYPE_ID, OP_ID, TYPE, BITS, OP, LCK_ID, MASK, \880GOMP_FLAG) \881ATOMIC_BEGIN(TYPE_ID, OP_ID, TYPE, void) \882OP_UPDATE_GOMP_CRITICAL(TYPE, OP, GOMP_FLAG) \883OP_CMPXCHG(TYPE, BITS, OP) \884}885#if USE_CMPXCHG_FIX886// -------------------------------------------------------------------------887// workaround for C78287 (complex(kind=4) data type)888#define ATOMIC_CMPXCHG_WORKAROUND(TYPE_ID, OP_ID, TYPE, BITS, OP, LCK_ID, \889MASK, GOMP_FLAG) \890ATOMIC_BEGIN(TYPE_ID, OP_ID, TYPE, void) \891OP_UPDATE_GOMP_CRITICAL(TYPE, OP, GOMP_FLAG) \892OP_CMPXCHG_WORKAROUND(TYPE, BITS, OP) \893}894// end of the second part of the workaround for C78287895#endif // USE_CMPXCHG_FIX896897#else898// -------------------------------------------------------------------------899// Code for other architectures that don't handle unaligned accesses.900#define ATOMIC_FIXED_ADD(TYPE_ID, OP_ID, TYPE, BITS, OP, LCK_ID, MASK, \901GOMP_FLAG) \902ATOMIC_BEGIN(TYPE_ID, OP_ID, TYPE, void) \903OP_UPDATE_GOMP_CRITICAL(TYPE, OP, GOMP_FLAG) \904if (!((kmp_uintptr_t)lhs & 0x##MASK)) { \905/* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \906KMP_TEST_THEN_ADD##BITS(lhs, OP rhs); \907} else { \908KMP_CHECK_GTID; \909OP_UPDATE_CRITICAL(TYPE, OP, \910LCK_ID) /* unaligned address - use critical */ \911} \912}913// -------------------------------------------------------------------------914#define ATOMIC_CMPXCHG(TYPE_ID, OP_ID, TYPE, BITS, OP, LCK_ID, MASK, \915GOMP_FLAG) \916ATOMIC_BEGIN(TYPE_ID, OP_ID, TYPE, void) \917OP_UPDATE_GOMP_CRITICAL(TYPE, OP, GOMP_FLAG) \918if (!((kmp_uintptr_t)lhs & 0x##MASK)) { \919OP_CMPXCHG(TYPE, BITS, OP) /* aligned address */ \920} else { \921KMP_CHECK_GTID; \922OP_UPDATE_CRITICAL(TYPE, OP, \923LCK_ID) /* unaligned address - use critical */ \924} \925}926#if USE_CMPXCHG_FIX927// -------------------------------------------------------------------------928// workaround for C78287 (complex(kind=4) data type)929#define ATOMIC_CMPXCHG_WORKAROUND(TYPE_ID, OP_ID, TYPE, BITS, OP, LCK_ID, \930MASK, GOMP_FLAG) \931ATOMIC_BEGIN(TYPE_ID, OP_ID, TYPE, void) \932OP_UPDATE_GOMP_CRITICAL(TYPE, OP, GOMP_FLAG) \933if (!((kmp_uintptr_t)lhs & 0x##MASK)) { \934OP_CMPXCHG(TYPE, BITS, OP) /* aligned address */ \935} else { \936KMP_CHECK_GTID; \937OP_UPDATE_CRITICAL(TYPE, OP, \938LCK_ID) /* unaligned address - use critical */ \939} \940}941// end of the second part of the workaround for C78287942#endif // USE_CMPXCHG_FIX943#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */944945// Routines for ATOMIC 4-byte operands addition and subtraction946ATOMIC_FIXED_ADD(fixed4, add, kmp_int32, 32, +, 4i, 3,9470) // __kmpc_atomic_fixed4_add948ATOMIC_FIXED_ADD(fixed4, sub, kmp_int32, 32, -, 4i, 3,9490) // __kmpc_atomic_fixed4_sub950951ATOMIC_CMPXCHG(float4, add, kmp_real32, 32, +, 4r, 3,952KMP_ARCH_X86) // __kmpc_atomic_float4_add953ATOMIC_CMPXCHG(float4, sub, kmp_real32, 32, -, 4r, 3,954KMP_ARCH_X86) // __kmpc_atomic_float4_sub955956// Routines for ATOMIC 8-byte operands addition and subtraction957ATOMIC_FIXED_ADD(fixed8, add, kmp_int64, 64, +, 8i, 7,958KMP_ARCH_X86) // __kmpc_atomic_fixed8_add959ATOMIC_FIXED_ADD(fixed8, sub, kmp_int64, 64, -, 8i, 7,960KMP_ARCH_X86) // __kmpc_atomic_fixed8_sub961962ATOMIC_CMPXCHG(float8, add, kmp_real64, 64, +, 8r, 7,963KMP_ARCH_X86) // __kmpc_atomic_float8_add964ATOMIC_CMPXCHG(float8, sub, kmp_real64, 64, -, 8r, 7,965KMP_ARCH_X86) // __kmpc_atomic_float8_sub966967// ------------------------------------------------------------------------968// Entries definition for integer operands969// TYPE_ID - operands type and size (fixed4, float4)970// OP_ID - operation identifier (add, sub, mul, ...)971// TYPE - operand type972// BITS - size in bits, used to distinguish low level calls973// OP - operator (used in critical section)974// LCK_ID - lock identifier, used to possibly distinguish lock variable975// MASK - used for alignment check976977// TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID,MASK,GOMP_FLAG978// ------------------------------------------------------------------------979// Routines for ATOMIC integer operands, other operators980// ------------------------------------------------------------------------981// TYPE_ID,OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG982ATOMIC_CMPXCHG(fixed1, add, kmp_int8, 8, +, 1i, 0,983KMP_ARCH_X86) // __kmpc_atomic_fixed1_add984ATOMIC_CMPXCHG(fixed1, andb, kmp_int8, 8, &, 1i, 0,9850) // __kmpc_atomic_fixed1_andb986ATOMIC_CMPXCHG(fixed1, div, kmp_int8, 8, /, 1i, 0,987KMP_ARCH_X86) // __kmpc_atomic_fixed1_div988ATOMIC_CMPXCHG(fixed1u, div, kmp_uint8, 8, /, 1i, 0,989KMP_ARCH_X86) // __kmpc_atomic_fixed1u_div990ATOMIC_CMPXCHG(fixed1, mul, kmp_int8, 8, *, 1i, 0,991KMP_ARCH_X86) // __kmpc_atomic_fixed1_mul992ATOMIC_CMPXCHG(fixed1, orb, kmp_int8, 8, |, 1i, 0,9930) // __kmpc_atomic_fixed1_orb994ATOMIC_CMPXCHG(fixed1, shl, kmp_int8, 8, <<, 1i, 0,995KMP_ARCH_X86) // __kmpc_atomic_fixed1_shl996ATOMIC_CMPXCHG(fixed1, shr, kmp_int8, 8, >>, 1i, 0,997KMP_ARCH_X86) // __kmpc_atomic_fixed1_shr998ATOMIC_CMPXCHG(fixed1u, shr, kmp_uint8, 8, >>, 1i, 0,999KMP_ARCH_X86) // __kmpc_atomic_fixed1u_shr1000ATOMIC_CMPXCHG(fixed1, sub, kmp_int8, 8, -, 1i, 0,1001KMP_ARCH_X86) // __kmpc_atomic_fixed1_sub1002ATOMIC_CMPXCHG(fixed1, xor, kmp_int8, 8, ^, 1i, 0,10030) // __kmpc_atomic_fixed1_xor1004ATOMIC_CMPXCHG(fixed2, add, kmp_int16, 16, +, 2i, 1,1005KMP_ARCH_X86) // __kmpc_atomic_fixed2_add1006ATOMIC_CMPXCHG(fixed2, andb, kmp_int16, 16, &, 2i, 1,10070) // __kmpc_atomic_fixed2_andb1008ATOMIC_CMPXCHG(fixed2, div, kmp_int16, 16, /, 2i, 1,1009KMP_ARCH_X86) // __kmpc_atomic_fixed2_div1010ATOMIC_CMPXCHG(fixed2u, div, kmp_uint16, 16, /, 2i, 1,1011KMP_ARCH_X86) // __kmpc_atomic_fixed2u_div1012ATOMIC_CMPXCHG(fixed2, mul, kmp_int16, 16, *, 2i, 1,1013KMP_ARCH_X86) // __kmpc_atomic_fixed2_mul1014ATOMIC_CMPXCHG(fixed2, orb, kmp_int16, 16, |, 2i, 1,10150) // __kmpc_atomic_fixed2_orb1016ATOMIC_CMPXCHG(fixed2, shl, kmp_int16, 16, <<, 2i, 1,1017KMP_ARCH_X86) // __kmpc_atomic_fixed2_shl1018ATOMIC_CMPXCHG(fixed2, shr, kmp_int16, 16, >>, 2i, 1,1019KMP_ARCH_X86) // __kmpc_atomic_fixed2_shr1020ATOMIC_CMPXCHG(fixed2u, shr, kmp_uint16, 16, >>, 2i, 1,1021KMP_ARCH_X86) // __kmpc_atomic_fixed2u_shr1022ATOMIC_CMPXCHG(fixed2, sub, kmp_int16, 16, -, 2i, 1,1023KMP_ARCH_X86) // __kmpc_atomic_fixed2_sub1024ATOMIC_CMPXCHG(fixed2, xor, kmp_int16, 16, ^, 2i, 1,10250) // __kmpc_atomic_fixed2_xor1026ATOMIC_CMPXCHG(fixed4, andb, kmp_int32, 32, &, 4i, 3,10270) // __kmpc_atomic_fixed4_andb1028ATOMIC_CMPXCHG(fixed4, div, kmp_int32, 32, /, 4i, 3,1029KMP_ARCH_X86) // __kmpc_atomic_fixed4_div1030ATOMIC_CMPXCHG(fixed4u, div, kmp_uint32, 32, /, 4i, 3,1031KMP_ARCH_X86) // __kmpc_atomic_fixed4u_div1032ATOMIC_CMPXCHG(fixed4, mul, kmp_int32, 32, *, 4i, 3,1033KMP_ARCH_X86) // __kmpc_atomic_fixed4_mul1034ATOMIC_CMPXCHG(fixed4, orb, kmp_int32, 32, |, 4i, 3,10350) // __kmpc_atomic_fixed4_orb1036ATOMIC_CMPXCHG(fixed4, shl, kmp_int32, 32, <<, 4i, 3,1037KMP_ARCH_X86) // __kmpc_atomic_fixed4_shl1038ATOMIC_CMPXCHG(fixed4, shr, kmp_int32, 32, >>, 4i, 3,1039KMP_ARCH_X86) // __kmpc_atomic_fixed4_shr1040ATOMIC_CMPXCHG(fixed4u, shr, kmp_uint32, 32, >>, 4i, 3,1041KMP_ARCH_X86) // __kmpc_atomic_fixed4u_shr1042ATOMIC_CMPXCHG(fixed4, xor, kmp_int32, 32, ^, 4i, 3,10430) // __kmpc_atomic_fixed4_xor1044ATOMIC_CMPXCHG(fixed8, andb, kmp_int64, 64, &, 8i, 7,1045KMP_ARCH_X86) // __kmpc_atomic_fixed8_andb1046ATOMIC_CMPXCHG(fixed8, div, kmp_int64, 64, /, 8i, 7,1047KMP_ARCH_X86) // __kmpc_atomic_fixed8_div1048ATOMIC_CMPXCHG(fixed8u, div, kmp_uint64, 64, /, 8i, 7,1049KMP_ARCH_X86) // __kmpc_atomic_fixed8u_div1050ATOMIC_CMPXCHG(fixed8, mul, kmp_int64, 64, *, 8i, 7,1051KMP_ARCH_X86) // __kmpc_atomic_fixed8_mul1052ATOMIC_CMPXCHG(fixed8, orb, kmp_int64, 64, |, 8i, 7,1053KMP_ARCH_X86) // __kmpc_atomic_fixed8_orb1054ATOMIC_CMPXCHG(fixed8, shl, kmp_int64, 64, <<, 8i, 7,1055KMP_ARCH_X86) // __kmpc_atomic_fixed8_shl1056ATOMIC_CMPXCHG(fixed8, shr, kmp_int64, 64, >>, 8i, 7,1057KMP_ARCH_X86) // __kmpc_atomic_fixed8_shr1058ATOMIC_CMPXCHG(fixed8u, shr, kmp_uint64, 64, >>, 8i, 7,1059KMP_ARCH_X86) // __kmpc_atomic_fixed8u_shr1060ATOMIC_CMPXCHG(fixed8, xor, kmp_int64, 64, ^, 8i, 7,1061KMP_ARCH_X86) // __kmpc_atomic_fixed8_xor1062ATOMIC_CMPXCHG(float4, div, kmp_real32, 32, /, 4r, 3,1063KMP_ARCH_X86) // __kmpc_atomic_float4_div1064ATOMIC_CMPXCHG(float4, mul, kmp_real32, 32, *, 4r, 3,1065KMP_ARCH_X86) // __kmpc_atomic_float4_mul1066ATOMIC_CMPXCHG(float8, div, kmp_real64, 64, /, 8r, 7,1067KMP_ARCH_X86) // __kmpc_atomic_float8_div1068ATOMIC_CMPXCHG(float8, mul, kmp_real64, 64, *, 8r, 7,1069KMP_ARCH_X86) // __kmpc_atomic_float8_mul1070// TYPE_ID,OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG10711072/* ------------------------------------------------------------------------ */1073/* Routines for C/C++ Reduction operators && and || */10741075// ------------------------------------------------------------------------1076// Need separate macros for &&, || because there is no combined assignment1077// TODO: eliminate ATOMIC_CRIT_{L,EQV} macros as not used1078#define ATOMIC_CRIT_L(TYPE_ID, OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG) \1079ATOMIC_BEGIN(TYPE_ID, OP_ID, TYPE, void) \1080OP_GOMP_CRITICAL(= *lhs OP, GOMP_FLAG) \1081OP_CRITICAL(= *lhs OP, LCK_ID) \1082}10831084#if KMP_ARCH_X86 || KMP_ARCH_X86_6410851086// ------------------------------------------------------------------------1087// X86 or X86_64: no alignment problems ===================================1088#define ATOMIC_CMPX_L(TYPE_ID, OP_ID, TYPE, BITS, OP, LCK_ID, MASK, GOMP_FLAG) \1089ATOMIC_BEGIN(TYPE_ID, OP_ID, TYPE, void) \1090OP_GOMP_CRITICAL(= *lhs OP, GOMP_FLAG) \1091OP_CMPXCHG(TYPE, BITS, OP) \1092}10931094#else1095// ------------------------------------------------------------------------1096// Code for other architectures that don't handle unaligned accesses.1097#define ATOMIC_CMPX_L(TYPE_ID, OP_ID, TYPE, BITS, OP, LCK_ID, MASK, GOMP_FLAG) \1098ATOMIC_BEGIN(TYPE_ID, OP_ID, TYPE, void) \1099OP_GOMP_CRITICAL(= *lhs OP, GOMP_FLAG) \1100if (!((kmp_uintptr_t)lhs & 0x##MASK)) { \1101OP_CMPXCHG(TYPE, BITS, OP) /* aligned address */ \1102} else { \1103KMP_CHECK_GTID; \1104OP_CRITICAL(= *lhs OP, LCK_ID) /* unaligned - use critical */ \1105} \1106}1107#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */11081109ATOMIC_CMPX_L(fixed1, andl, char, 8, &&, 1i, 0,1110KMP_ARCH_X86) // __kmpc_atomic_fixed1_andl1111ATOMIC_CMPX_L(fixed1, orl, char, 8, ||, 1i, 0,1112KMP_ARCH_X86) // __kmpc_atomic_fixed1_orl1113ATOMIC_CMPX_L(fixed2, andl, short, 16, &&, 2i, 1,1114KMP_ARCH_X86) // __kmpc_atomic_fixed2_andl1115ATOMIC_CMPX_L(fixed2, orl, short, 16, ||, 2i, 1,1116KMP_ARCH_X86) // __kmpc_atomic_fixed2_orl1117ATOMIC_CMPX_L(fixed4, andl, kmp_int32, 32, &&, 4i, 3,11180) // __kmpc_atomic_fixed4_andl1119ATOMIC_CMPX_L(fixed4, orl, kmp_int32, 32, ||, 4i, 3,11200) // __kmpc_atomic_fixed4_orl1121ATOMIC_CMPX_L(fixed8, andl, kmp_int64, 64, &&, 8i, 7,1122KMP_ARCH_X86) // __kmpc_atomic_fixed8_andl1123ATOMIC_CMPX_L(fixed8, orl, kmp_int64, 64, ||, 8i, 7,1124KMP_ARCH_X86) // __kmpc_atomic_fixed8_orl11251126/* ------------------------------------------------------------------------- */1127/* Routines for Fortran operators that matched no one in C: */1128/* MAX, MIN, .EQV., .NEQV. */1129/* Operators .AND., .OR. are covered by __kmpc_atomic_*_{andl,orl} */1130/* Intrinsics IAND, IOR, IEOR are covered by __kmpc_atomic_*_{andb,orb,xor} */11311132// -------------------------------------------------------------------------1133// MIN and MAX need separate macros1134// OP - operator to check if we need any actions?1135#define MIN_MAX_CRITSECT(OP, LCK_ID) \1136__kmp_acquire_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \1137\1138if (*lhs OP rhs) { /* still need actions? */ \1139*lhs = rhs; \1140} \1141__kmp_release_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid);11421143// -------------------------------------------------------------------------1144#ifdef KMP_GOMP_COMPAT1145#define GOMP_MIN_MAX_CRITSECT(OP, FLAG) \1146if ((FLAG) && (__kmp_atomic_mode == 2)) { \1147KMP_CHECK_GTID; \1148MIN_MAX_CRITSECT(OP, 0); \1149return; \1150}1151#else1152#define GOMP_MIN_MAX_CRITSECT(OP, FLAG)1153#endif /* KMP_GOMP_COMPAT */11541155// -------------------------------------------------------------------------1156#define MIN_MAX_CMPXCHG(TYPE, BITS, OP) \1157{ \1158TYPE KMP_ATOMIC_VOLATILE temp_val; \1159TYPE old_value; \1160temp_val = *lhs; \1161old_value = temp_val; \1162while (old_value OP rhs && /* still need actions? */ \1163!KMP_COMPARE_AND_STORE_ACQ##BITS( \1164(kmp_int##BITS *)lhs, \1165*VOLATILE_CAST(kmp_int##BITS *) & old_value, \1166*VOLATILE_CAST(kmp_int##BITS *) & rhs)) { \1167temp_val = *lhs; \1168old_value = temp_val; \1169} \1170}11711172// -------------------------------------------------------------------------1173// 1-byte, 2-byte operands - use critical section1174#define MIN_MAX_CRITICAL(TYPE_ID, OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG) \1175ATOMIC_BEGIN(TYPE_ID, OP_ID, TYPE, void) \1176if (*lhs OP rhs) { /* need actions? */ \1177GOMP_MIN_MAX_CRITSECT(OP, GOMP_FLAG) \1178MIN_MAX_CRITSECT(OP, LCK_ID) \1179} \1180}11811182#if KMP_ARCH_X86 || KMP_ARCH_X86_6411831184// -------------------------------------------------------------------------1185// X86 or X86_64: no alignment problems ====================================1186#define MIN_MAX_COMPXCHG(TYPE_ID, OP_ID, TYPE, BITS, OP, LCK_ID, MASK, \1187GOMP_FLAG) \1188ATOMIC_BEGIN(TYPE_ID, OP_ID, TYPE, void) \1189if (*lhs OP rhs) { \1190GOMP_MIN_MAX_CRITSECT(OP, GOMP_FLAG) \1191MIN_MAX_CMPXCHG(TYPE, BITS, OP) \1192} \1193}11941195#else1196// -------------------------------------------------------------------------1197// Code for other architectures that don't handle unaligned accesses.1198#define MIN_MAX_COMPXCHG(TYPE_ID, OP_ID, TYPE, BITS, OP, LCK_ID, MASK, \1199GOMP_FLAG) \1200ATOMIC_BEGIN(TYPE_ID, OP_ID, TYPE, void) \1201if (*lhs OP rhs) { \1202GOMP_MIN_MAX_CRITSECT(OP, GOMP_FLAG) \1203if (!((kmp_uintptr_t)lhs & 0x##MASK)) { \1204MIN_MAX_CMPXCHG(TYPE, BITS, OP) /* aligned address */ \1205} else { \1206KMP_CHECK_GTID; \1207MIN_MAX_CRITSECT(OP, LCK_ID) /* unaligned address */ \1208} \1209} \1210}1211#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */12121213MIN_MAX_COMPXCHG(fixed1, max, char, 8, <, 1i, 0,1214KMP_ARCH_X86) // __kmpc_atomic_fixed1_max1215MIN_MAX_COMPXCHG(fixed1, min, char, 8, >, 1i, 0,1216KMP_ARCH_X86) // __kmpc_atomic_fixed1_min1217MIN_MAX_COMPXCHG(fixed2, max, short, 16, <, 2i, 1,1218KMP_ARCH_X86) // __kmpc_atomic_fixed2_max1219MIN_MAX_COMPXCHG(fixed2, min, short, 16, >, 2i, 1,1220KMP_ARCH_X86) // __kmpc_atomic_fixed2_min1221MIN_MAX_COMPXCHG(fixed4, max, kmp_int32, 32, <, 4i, 3,12220) // __kmpc_atomic_fixed4_max1223MIN_MAX_COMPXCHG(fixed4, min, kmp_int32, 32, >, 4i, 3,12240) // __kmpc_atomic_fixed4_min1225MIN_MAX_COMPXCHG(fixed8, max, kmp_int64, 64, <, 8i, 7,1226KMP_ARCH_X86) // __kmpc_atomic_fixed8_max1227MIN_MAX_COMPXCHG(fixed8, min, kmp_int64, 64, >, 8i, 7,1228KMP_ARCH_X86) // __kmpc_atomic_fixed8_min1229MIN_MAX_COMPXCHG(float4, max, kmp_real32, 32, <, 4r, 3,1230KMP_ARCH_X86) // __kmpc_atomic_float4_max1231MIN_MAX_COMPXCHG(float4, min, kmp_real32, 32, >, 4r, 3,1232KMP_ARCH_X86) // __kmpc_atomic_float4_min1233MIN_MAX_COMPXCHG(float8, max, kmp_real64, 64, <, 8r, 7,1234KMP_ARCH_X86) // __kmpc_atomic_float8_max1235MIN_MAX_COMPXCHG(float8, min, kmp_real64, 64, >, 8r, 7,1236KMP_ARCH_X86) // __kmpc_atomic_float8_min1237#if KMP_ARCH_X86 || KMP_ARCH_X86_641238MIN_MAX_CRITICAL(float10, max, long double, <, 10r,12391) // __kmpc_atomic_float10_max1240MIN_MAX_CRITICAL(float10, min, long double, >, 10r,12411) // __kmpc_atomic_float10_min1242#endif // KMP_ARCH_X86 || KMP_ARCH_X86_641243#if KMP_HAVE_QUAD1244MIN_MAX_CRITICAL(float16, max, QUAD_LEGACY, <, 16r,12451) // __kmpc_atomic_float16_max1246MIN_MAX_CRITICAL(float16, min, QUAD_LEGACY, >, 16r,12471) // __kmpc_atomic_float16_min1248#if (KMP_ARCH_X86)1249MIN_MAX_CRITICAL(float16, max_a16, Quad_a16_t, <, 16r,12501) // __kmpc_atomic_float16_max_a161251MIN_MAX_CRITICAL(float16, min_a16, Quad_a16_t, >, 16r,12521) // __kmpc_atomic_float16_min_a161253#endif // (KMP_ARCH_X86)1254#endif // KMP_HAVE_QUAD1255// ------------------------------------------------------------------------1256// Need separate macros for .EQV. because of the need of complement (~)1257// OP ignored for critical sections, ^=~ used instead1258#define ATOMIC_CRIT_EQV(TYPE_ID, OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG) \1259ATOMIC_BEGIN(TYPE_ID, OP_ID, TYPE, void) \1260OP_GOMP_CRITICAL(^= (TYPE) ~, GOMP_FLAG) /* send assignment */ \1261OP_CRITICAL(^= (TYPE) ~, LCK_ID) /* send assignment and complement */ \1262}12631264// ------------------------------------------------------------------------1265#if KMP_ARCH_X86 || KMP_ARCH_X86_641266// ------------------------------------------------------------------------1267// X86 or X86_64: no alignment problems ===================================1268#define ATOMIC_CMPX_EQV(TYPE_ID, OP_ID, TYPE, BITS, OP, LCK_ID, MASK, \1269GOMP_FLAG) \1270ATOMIC_BEGIN(TYPE_ID, OP_ID, TYPE, void) \1271OP_GOMP_CRITICAL(^= (TYPE) ~, GOMP_FLAG) /* send assignment */ \1272OP_CMPXCHG(TYPE, BITS, OP) \1273}1274// ------------------------------------------------------------------------1275#else1276// ------------------------------------------------------------------------1277// Code for other architectures that don't handle unaligned accesses.1278#define ATOMIC_CMPX_EQV(TYPE_ID, OP_ID, TYPE, BITS, OP, LCK_ID, MASK, \1279GOMP_FLAG) \1280ATOMIC_BEGIN(TYPE_ID, OP_ID, TYPE, void) \1281OP_GOMP_CRITICAL(^= (TYPE) ~, GOMP_FLAG) \1282if (!((kmp_uintptr_t)lhs & 0x##MASK)) { \1283OP_CMPXCHG(TYPE, BITS, OP) /* aligned address */ \1284} else { \1285KMP_CHECK_GTID; \1286OP_CRITICAL(^= (TYPE) ~, LCK_ID) /* unaligned address - use critical */ \1287} \1288}1289#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */12901291ATOMIC_CMPXCHG(fixed1, neqv, kmp_int8, 8, ^, 1i, 0,1292KMP_ARCH_X86) // __kmpc_atomic_fixed1_neqv1293ATOMIC_CMPXCHG(fixed2, neqv, kmp_int16, 16, ^, 2i, 1,1294KMP_ARCH_X86) // __kmpc_atomic_fixed2_neqv1295ATOMIC_CMPXCHG(fixed4, neqv, kmp_int32, 32, ^, 4i, 3,1296KMP_ARCH_X86) // __kmpc_atomic_fixed4_neqv1297ATOMIC_CMPXCHG(fixed8, neqv, kmp_int64, 64, ^, 8i, 7,1298KMP_ARCH_X86) // __kmpc_atomic_fixed8_neqv1299ATOMIC_CMPX_EQV(fixed1, eqv, kmp_int8, 8, ^~, 1i, 0,1300KMP_ARCH_X86) // __kmpc_atomic_fixed1_eqv1301ATOMIC_CMPX_EQV(fixed2, eqv, kmp_int16, 16, ^~, 2i, 1,1302KMP_ARCH_X86) // __kmpc_atomic_fixed2_eqv1303ATOMIC_CMPX_EQV(fixed4, eqv, kmp_int32, 32, ^~, 4i, 3,1304KMP_ARCH_X86) // __kmpc_atomic_fixed4_eqv1305ATOMIC_CMPX_EQV(fixed8, eqv, kmp_int64, 64, ^~, 8i, 7,1306KMP_ARCH_X86) // __kmpc_atomic_fixed8_eqv13071308// ------------------------------------------------------------------------1309// Routines for Extended types: long double, _Quad, complex flavours (use1310// critical section)1311// TYPE_ID, OP_ID, TYPE - detailed above1312// OP - operator1313// LCK_ID - lock identifier, used to possibly distinguish lock variable1314#define ATOMIC_CRITICAL(TYPE_ID, OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG) \1315ATOMIC_BEGIN(TYPE_ID, OP_ID, TYPE, void) \1316OP_UPDATE_GOMP_CRITICAL(TYPE, OP, GOMP_FLAG) /* send assignment */ \1317OP_UPDATE_CRITICAL(TYPE, OP, LCK_ID) /* send assignment */ \1318}13191320/* ------------------------------------------------------------------------- */1321#if KMP_ARCH_X86 || KMP_ARCH_X86_641322// routines for long double type1323ATOMIC_CRITICAL(float10, add, long double, +, 10r,13241) // __kmpc_atomic_float10_add1325ATOMIC_CRITICAL(float10, sub, long double, -, 10r,13261) // __kmpc_atomic_float10_sub1327ATOMIC_CRITICAL(float10, mul, long double, *, 10r,13281) // __kmpc_atomic_float10_mul1329ATOMIC_CRITICAL(float10, div, long double, /, 10r,13301) // __kmpc_atomic_float10_div1331#endif // KMP_ARCH_X86 || KMP_ARCH_X86_641332#if KMP_HAVE_QUAD1333// routines for _Quad type1334ATOMIC_CRITICAL(float16, add, QUAD_LEGACY, +, 16r,13351) // __kmpc_atomic_float16_add1336ATOMIC_CRITICAL(float16, sub, QUAD_LEGACY, -, 16r,13371) // __kmpc_atomic_float16_sub1338ATOMIC_CRITICAL(float16, mul, QUAD_LEGACY, *, 16r,13391) // __kmpc_atomic_float16_mul1340ATOMIC_CRITICAL(float16, div, QUAD_LEGACY, /, 16r,13411) // __kmpc_atomic_float16_div1342#if (KMP_ARCH_X86)1343ATOMIC_CRITICAL(float16, add_a16, Quad_a16_t, +, 16r,13441) // __kmpc_atomic_float16_add_a161345ATOMIC_CRITICAL(float16, sub_a16, Quad_a16_t, -, 16r,13461) // __kmpc_atomic_float16_sub_a161347ATOMIC_CRITICAL(float16, mul_a16, Quad_a16_t, *, 16r,13481) // __kmpc_atomic_float16_mul_a161349ATOMIC_CRITICAL(float16, div_a16, Quad_a16_t, /, 16r,13501) // __kmpc_atomic_float16_div_a161351#endif // (KMP_ARCH_X86)1352#endif // KMP_HAVE_QUAD1353// routines for complex types13541355#if USE_CMPXCHG_FIX1356// workaround for C78287 (complex(kind=4) data type)1357ATOMIC_CMPXCHG_WORKAROUND(cmplx4, add, kmp_cmplx32, 64, +, 8c, 7,13581) // __kmpc_atomic_cmplx4_add1359ATOMIC_CMPXCHG_WORKAROUND(cmplx4, sub, kmp_cmplx32, 64, -, 8c, 7,13601) // __kmpc_atomic_cmplx4_sub1361ATOMIC_CMPXCHG_WORKAROUND(cmplx4, mul, kmp_cmplx32, 64, *, 8c, 7,13621) // __kmpc_atomic_cmplx4_mul1363ATOMIC_CMPXCHG_WORKAROUND(cmplx4, div, kmp_cmplx32, 64, /, 8c, 7,13641) // __kmpc_atomic_cmplx4_div1365// end of the workaround for C782871366#else1367ATOMIC_CRITICAL(cmplx4, add, kmp_cmplx32, +, 8c, 1) // __kmpc_atomic_cmplx4_add1368ATOMIC_CRITICAL(cmplx4, sub, kmp_cmplx32, -, 8c, 1) // __kmpc_atomic_cmplx4_sub1369ATOMIC_CRITICAL(cmplx4, mul, kmp_cmplx32, *, 8c, 1) // __kmpc_atomic_cmplx4_mul1370ATOMIC_CRITICAL(cmplx4, div, kmp_cmplx32, /, 8c, 1) // __kmpc_atomic_cmplx4_div1371#endif // USE_CMPXCHG_FIX13721373ATOMIC_CRITICAL(cmplx8, add, kmp_cmplx64, +, 16c, 1) // __kmpc_atomic_cmplx8_add1374ATOMIC_CRITICAL(cmplx8, sub, kmp_cmplx64, -, 16c, 1) // __kmpc_atomic_cmplx8_sub1375ATOMIC_CRITICAL(cmplx8, mul, kmp_cmplx64, *, 16c, 1) // __kmpc_atomic_cmplx8_mul1376ATOMIC_CRITICAL(cmplx8, div, kmp_cmplx64, /, 16c, 1) // __kmpc_atomic_cmplx8_div1377#if KMP_ARCH_X86 || KMP_ARCH_X86_641378ATOMIC_CRITICAL(cmplx10, add, kmp_cmplx80, +, 20c,13791) // __kmpc_atomic_cmplx10_add1380ATOMIC_CRITICAL(cmplx10, sub, kmp_cmplx80, -, 20c,13811) // __kmpc_atomic_cmplx10_sub1382ATOMIC_CRITICAL(cmplx10, mul, kmp_cmplx80, *, 20c,13831) // __kmpc_atomic_cmplx10_mul1384ATOMIC_CRITICAL(cmplx10, div, kmp_cmplx80, /, 20c,13851) // __kmpc_atomic_cmplx10_div1386#endif // KMP_ARCH_X86 || KMP_ARCH_X86_641387#if KMP_HAVE_QUAD1388ATOMIC_CRITICAL(cmplx16, add, CPLX128_LEG, +, 32c,13891) // __kmpc_atomic_cmplx16_add1390ATOMIC_CRITICAL(cmplx16, sub, CPLX128_LEG, -, 32c,13911) // __kmpc_atomic_cmplx16_sub1392ATOMIC_CRITICAL(cmplx16, mul, CPLX128_LEG, *, 32c,13931) // __kmpc_atomic_cmplx16_mul1394ATOMIC_CRITICAL(cmplx16, div, CPLX128_LEG, /, 32c,13951) // __kmpc_atomic_cmplx16_div1396#if (KMP_ARCH_X86)1397ATOMIC_CRITICAL(cmplx16, add_a16, kmp_cmplx128_a16_t, +, 32c,13981) // __kmpc_atomic_cmplx16_add_a161399ATOMIC_CRITICAL(cmplx16, sub_a16, kmp_cmplx128_a16_t, -, 32c,14001) // __kmpc_atomic_cmplx16_sub_a161401ATOMIC_CRITICAL(cmplx16, mul_a16, kmp_cmplx128_a16_t, *, 32c,14021) // __kmpc_atomic_cmplx16_mul_a161403ATOMIC_CRITICAL(cmplx16, div_a16, kmp_cmplx128_a16_t, /, 32c,14041) // __kmpc_atomic_cmplx16_div_a161405#endif // (KMP_ARCH_X86)1406#endif // KMP_HAVE_QUAD14071408// OpenMP 4.0: x = expr binop x for non-commutative operations.1409// Supported only on IA-32 architecture and Intel(R) 641410#if KMP_ARCH_X86 || KMP_ARCH_X86_6414111412// ------------------------------------------------------------------------1413// Operation on *lhs, rhs bound by critical section1414// OP - operator (it's supposed to contain an assignment)1415// LCK_ID - lock identifier1416// Note: don't check gtid as it should always be valid1417// 1, 2-byte - expect valid parameter, other - check before this macro1418#define OP_CRITICAL_REV(TYPE, OP, LCK_ID) \1419__kmp_acquire_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \1420\1421(*lhs) = (TYPE)((rhs)OP(*lhs)); \1422\1423__kmp_release_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid);14241425#ifdef KMP_GOMP_COMPAT1426#define OP_GOMP_CRITICAL_REV(TYPE, OP, FLAG) \1427if ((FLAG) && (__kmp_atomic_mode == 2)) { \1428KMP_CHECK_GTID; \1429OP_CRITICAL_REV(TYPE, OP, 0); \1430return; \1431}14321433#else1434#define OP_GOMP_CRITICAL_REV(TYPE, OP, FLAG)1435#endif /* KMP_GOMP_COMPAT */14361437// Beginning of a definition (provides name, parameters, gebug trace)1438// TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned1439// fixed)1440// OP_ID - operation identifier (add, sub, mul, ...)1441// TYPE - operands' type1442#define ATOMIC_BEGIN_REV(TYPE_ID, OP_ID, TYPE, RET_TYPE) \1443RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID##_rev(ident_t *id_ref, int gtid, \1444TYPE *lhs, TYPE rhs) { \1445KMP_DEBUG_ASSERT(__kmp_init_serial); \1446KA_TRACE(100, ("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_rev: T#%d\n", gtid));14471448// ------------------------------------------------------------------------1449// Operation on *lhs, rhs using "compare_and_store" routine1450// TYPE - operands' type1451// BITS - size in bits, used to distinguish low level calls1452// OP - operator1453// Note: temp_val introduced in order to force the compiler to read1454// *lhs only once (w/o it the compiler reads *lhs twice)1455#define OP_CMPXCHG_REV(TYPE, BITS, OP) \1456{ \1457TYPE KMP_ATOMIC_VOLATILE temp_val; \1458TYPE old_value, new_value; \1459temp_val = *lhs; \1460old_value = temp_val; \1461new_value = (TYPE)(rhs OP old_value); \1462while (!KMP_COMPARE_AND_STORE_ACQ##BITS( \1463(kmp_int##BITS *)lhs, *VOLATILE_CAST(kmp_int##BITS *) & old_value, \1464*VOLATILE_CAST(kmp_int##BITS *) & new_value)) { \1465KMP_DO_PAUSE; \1466\1467temp_val = *lhs; \1468old_value = temp_val; \1469new_value = (TYPE)(rhs OP old_value); \1470} \1471}14721473// -------------------------------------------------------------------------1474#define ATOMIC_CMPXCHG_REV(TYPE_ID, OP_ID, TYPE, BITS, OP, LCK_ID, GOMP_FLAG) \1475ATOMIC_BEGIN_REV(TYPE_ID, OP_ID, TYPE, void) \1476OP_GOMP_CRITICAL_REV(TYPE, OP, GOMP_FLAG) \1477OP_CMPXCHG_REV(TYPE, BITS, OP) \1478}14791480// ------------------------------------------------------------------------1481// Entries definition for integer operands1482// TYPE_ID - operands type and size (fixed4, float4)1483// OP_ID - operation identifier (add, sub, mul, ...)1484// TYPE - operand type1485// BITS - size in bits, used to distinguish low level calls1486// OP - operator (used in critical section)1487// LCK_ID - lock identifier, used to possibly distinguish lock variable14881489// TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID,GOMP_FLAG1490// ------------------------------------------------------------------------1491// Routines for ATOMIC integer operands, other operators1492// ------------------------------------------------------------------------1493// TYPE_ID,OP_ID, TYPE, BITS, OP, LCK_ID, GOMP_FLAG1494ATOMIC_CMPXCHG_REV(fixed1, div, kmp_int8, 8, /, 1i,1495KMP_ARCH_X86) // __kmpc_atomic_fixed1_div_rev1496ATOMIC_CMPXCHG_REV(fixed1u, div, kmp_uint8, 8, /, 1i,1497KMP_ARCH_X86) // __kmpc_atomic_fixed1u_div_rev1498ATOMIC_CMPXCHG_REV(fixed1, shl, kmp_int8, 8, <<, 1i,1499KMP_ARCH_X86) // __kmpc_atomic_fixed1_shl_rev1500ATOMIC_CMPXCHG_REV(fixed1, shr, kmp_int8, 8, >>, 1i,1501KMP_ARCH_X86) // __kmpc_atomic_fixed1_shr_rev1502ATOMIC_CMPXCHG_REV(fixed1u, shr, kmp_uint8, 8, >>, 1i,1503KMP_ARCH_X86) // __kmpc_atomic_fixed1u_shr_rev1504ATOMIC_CMPXCHG_REV(fixed1, sub, kmp_int8, 8, -, 1i,1505KMP_ARCH_X86) // __kmpc_atomic_fixed1_sub_rev15061507ATOMIC_CMPXCHG_REV(fixed2, div, kmp_int16, 16, /, 2i,1508KMP_ARCH_X86) // __kmpc_atomic_fixed2_div_rev1509ATOMIC_CMPXCHG_REV(fixed2u, div, kmp_uint16, 16, /, 2i,1510KMP_ARCH_X86) // __kmpc_atomic_fixed2u_div_rev1511ATOMIC_CMPXCHG_REV(fixed2, shl, kmp_int16, 16, <<, 2i,1512KMP_ARCH_X86) // __kmpc_atomic_fixed2_shl_rev1513ATOMIC_CMPXCHG_REV(fixed2, shr, kmp_int16, 16, >>, 2i,1514KMP_ARCH_X86) // __kmpc_atomic_fixed2_shr_rev1515ATOMIC_CMPXCHG_REV(fixed2u, shr, kmp_uint16, 16, >>, 2i,1516KMP_ARCH_X86) // __kmpc_atomic_fixed2u_shr_rev1517ATOMIC_CMPXCHG_REV(fixed2, sub, kmp_int16, 16, -, 2i,1518KMP_ARCH_X86) // __kmpc_atomic_fixed2_sub_rev15191520ATOMIC_CMPXCHG_REV(fixed4, div, kmp_int32, 32, /, 4i,1521KMP_ARCH_X86) // __kmpc_atomic_fixed4_div_rev1522ATOMIC_CMPXCHG_REV(fixed4u, div, kmp_uint32, 32, /, 4i,1523KMP_ARCH_X86) // __kmpc_atomic_fixed4u_div_rev1524ATOMIC_CMPXCHG_REV(fixed4, shl, kmp_int32, 32, <<, 4i,1525KMP_ARCH_X86) // __kmpc_atomic_fixed4_shl_rev1526ATOMIC_CMPXCHG_REV(fixed4, shr, kmp_int32, 32, >>, 4i,1527KMP_ARCH_X86) // __kmpc_atomic_fixed4_shr_rev1528ATOMIC_CMPXCHG_REV(fixed4u, shr, kmp_uint32, 32, >>, 4i,1529KMP_ARCH_X86) // __kmpc_atomic_fixed4u_shr_rev1530ATOMIC_CMPXCHG_REV(fixed4, sub, kmp_int32, 32, -, 4i,1531KMP_ARCH_X86) // __kmpc_atomic_fixed4_sub_rev15321533ATOMIC_CMPXCHG_REV(fixed8, div, kmp_int64, 64, /, 8i,1534KMP_ARCH_X86) // __kmpc_atomic_fixed8_div_rev1535ATOMIC_CMPXCHG_REV(fixed8u, div, kmp_uint64, 64, /, 8i,1536KMP_ARCH_X86) // __kmpc_atomic_fixed8u_div_rev1537ATOMIC_CMPXCHG_REV(fixed8, shl, kmp_int64, 64, <<, 8i,1538KMP_ARCH_X86) // __kmpc_atomic_fixed8_shl_rev1539ATOMIC_CMPXCHG_REV(fixed8, shr, kmp_int64, 64, >>, 8i,1540KMP_ARCH_X86) // __kmpc_atomic_fixed8_shr_rev1541ATOMIC_CMPXCHG_REV(fixed8u, shr, kmp_uint64, 64, >>, 8i,1542KMP_ARCH_X86) // __kmpc_atomic_fixed8u_shr_rev1543ATOMIC_CMPXCHG_REV(fixed8, sub, kmp_int64, 64, -, 8i,1544KMP_ARCH_X86) // __kmpc_atomic_fixed8_sub_rev15451546ATOMIC_CMPXCHG_REV(float4, div, kmp_real32, 32, /, 4r,1547KMP_ARCH_X86) // __kmpc_atomic_float4_div_rev1548ATOMIC_CMPXCHG_REV(float4, sub, kmp_real32, 32, -, 4r,1549KMP_ARCH_X86) // __kmpc_atomic_float4_sub_rev15501551ATOMIC_CMPXCHG_REV(float8, div, kmp_real64, 64, /, 8r,1552KMP_ARCH_X86) // __kmpc_atomic_float8_div_rev1553ATOMIC_CMPXCHG_REV(float8, sub, kmp_real64, 64, -, 8r,1554KMP_ARCH_X86) // __kmpc_atomic_float8_sub_rev1555// TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID, GOMP_FLAG15561557// ------------------------------------------------------------------------1558// Routines for Extended types: long double, _Quad, complex flavours (use1559// critical section)1560// TYPE_ID, OP_ID, TYPE - detailed above1561// OP - operator1562// LCK_ID - lock identifier, used to possibly distinguish lock variable1563#define ATOMIC_CRITICAL_REV(TYPE_ID, OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG) \1564ATOMIC_BEGIN_REV(TYPE_ID, OP_ID, TYPE, void) \1565OP_GOMP_CRITICAL_REV(TYPE, OP, GOMP_FLAG) \1566OP_CRITICAL_REV(TYPE, OP, LCK_ID) \1567}15681569/* ------------------------------------------------------------------------- */1570// routines for long double type1571ATOMIC_CRITICAL_REV(float10, sub, long double, -, 10r,15721) // __kmpc_atomic_float10_sub_rev1573ATOMIC_CRITICAL_REV(float10, div, long double, /, 10r,15741) // __kmpc_atomic_float10_div_rev1575#if KMP_HAVE_QUAD1576// routines for _Quad type1577ATOMIC_CRITICAL_REV(float16, sub, QUAD_LEGACY, -, 16r,15781) // __kmpc_atomic_float16_sub_rev1579ATOMIC_CRITICAL_REV(float16, div, QUAD_LEGACY, /, 16r,15801) // __kmpc_atomic_float16_div_rev1581#if (KMP_ARCH_X86)1582ATOMIC_CRITICAL_REV(float16, sub_a16, Quad_a16_t, -, 16r,15831) // __kmpc_atomic_float16_sub_a16_rev1584ATOMIC_CRITICAL_REV(float16, div_a16, Quad_a16_t, /, 16r,15851) // __kmpc_atomic_float16_div_a16_rev1586#endif // KMP_ARCH_X861587#endif // KMP_HAVE_QUAD15881589// routines for complex types1590ATOMIC_CRITICAL_REV(cmplx4, sub, kmp_cmplx32, -, 8c,15911) // __kmpc_atomic_cmplx4_sub_rev1592ATOMIC_CRITICAL_REV(cmplx4, div, kmp_cmplx32, /, 8c,15931) // __kmpc_atomic_cmplx4_div_rev1594ATOMIC_CRITICAL_REV(cmplx8, sub, kmp_cmplx64, -, 16c,15951) // __kmpc_atomic_cmplx8_sub_rev1596ATOMIC_CRITICAL_REV(cmplx8, div, kmp_cmplx64, /, 16c,15971) // __kmpc_atomic_cmplx8_div_rev1598ATOMIC_CRITICAL_REV(cmplx10, sub, kmp_cmplx80, -, 20c,15991) // __kmpc_atomic_cmplx10_sub_rev1600ATOMIC_CRITICAL_REV(cmplx10, div, kmp_cmplx80, /, 20c,16011) // __kmpc_atomic_cmplx10_div_rev1602#if KMP_HAVE_QUAD1603ATOMIC_CRITICAL_REV(cmplx16, sub, CPLX128_LEG, -, 32c,16041) // __kmpc_atomic_cmplx16_sub_rev1605ATOMIC_CRITICAL_REV(cmplx16, div, CPLX128_LEG, /, 32c,16061) // __kmpc_atomic_cmplx16_div_rev1607#if (KMP_ARCH_X86)1608ATOMIC_CRITICAL_REV(cmplx16, sub_a16, kmp_cmplx128_a16_t, -, 32c,16091) // __kmpc_atomic_cmplx16_sub_a16_rev1610ATOMIC_CRITICAL_REV(cmplx16, div_a16, kmp_cmplx128_a16_t, /, 32c,16111) // __kmpc_atomic_cmplx16_div_a16_rev1612#endif // KMP_ARCH_X861613#endif // KMP_HAVE_QUAD16141615#endif // KMP_ARCH_X86 || KMP_ARCH_X86_641616// End of OpenMP 4.0: x = expr binop x for non-commutative operations.16171618/* ------------------------------------------------------------------------ */1619/* Routines for mixed types of LHS and RHS, when RHS is "larger" */1620/* Note: in order to reduce the total number of types combinations */1621/* it is supposed that compiler converts RHS to longest floating type,*/1622/* that is _Quad, before call to any of these routines */1623/* Conversion to _Quad will be done by the compiler during calculation, */1624/* conversion back to TYPE - before the assignment, like: */1625/* *lhs = (TYPE)( (_Quad)(*lhs) OP rhs ) */1626/* Performance penalty expected because of SW emulation use */1627/* ------------------------------------------------------------------------ */16281629#define ATOMIC_BEGIN_MIX(TYPE_ID, TYPE, OP_ID, RTYPE_ID, RTYPE) \1630void __kmpc_atomic_##TYPE_ID##_##OP_ID##_##RTYPE_ID( \1631ident_t *id_ref, int gtid, TYPE *lhs, RTYPE rhs) { \1632KMP_DEBUG_ASSERT(__kmp_init_serial); \1633KA_TRACE(100, \1634("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_" #RTYPE_ID ": T#%d\n", \1635gtid));16361637// -------------------------------------------------------------------------1638#define ATOMIC_CRITICAL_FP(TYPE_ID, TYPE, OP_ID, OP, RTYPE_ID, RTYPE, LCK_ID, \1639GOMP_FLAG) \1640ATOMIC_BEGIN_MIX(TYPE_ID, TYPE, OP_ID, RTYPE_ID, RTYPE) \1641OP_UPDATE_GOMP_CRITICAL(TYPE, OP, GOMP_FLAG) /* send assignment */ \1642OP_UPDATE_CRITICAL(TYPE, OP, LCK_ID) /* send assignment */ \1643}16441645// -------------------------------------------------------------------------1646#if KMP_ARCH_X86 || KMP_ARCH_X86_641647// -------------------------------------------------------------------------1648// X86 or X86_64: no alignment problems ====================================1649#define ATOMIC_CMPXCHG_MIX(TYPE_ID, TYPE, OP_ID, BITS, OP, RTYPE_ID, RTYPE, \1650LCK_ID, MASK, GOMP_FLAG) \1651ATOMIC_BEGIN_MIX(TYPE_ID, TYPE, OP_ID, RTYPE_ID, RTYPE) \1652OP_UPDATE_GOMP_CRITICAL(TYPE, OP, GOMP_FLAG) \1653OP_CMPXCHG(TYPE, BITS, OP) \1654}1655// -------------------------------------------------------------------------1656#else1657// ------------------------------------------------------------------------1658// Code for other architectures that don't handle unaligned accesses.1659#define ATOMIC_CMPXCHG_MIX(TYPE_ID, TYPE, OP_ID, BITS, OP, RTYPE_ID, RTYPE, \1660LCK_ID, MASK, GOMP_FLAG) \1661ATOMIC_BEGIN_MIX(TYPE_ID, TYPE, OP_ID, RTYPE_ID, RTYPE) \1662OP_UPDATE_GOMP_CRITICAL(TYPE, OP, GOMP_FLAG) \1663if (!((kmp_uintptr_t)lhs & 0x##MASK)) { \1664OP_CMPXCHG(TYPE, BITS, OP) /* aligned address */ \1665} else { \1666KMP_CHECK_GTID; \1667OP_UPDATE_CRITICAL(TYPE, OP, \1668LCK_ID) /* unaligned address - use critical */ \1669} \1670}1671#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */16721673// -------------------------------------------------------------------------1674#if KMP_ARCH_X86 || KMP_ARCH_X86_641675// -------------------------------------------------------------------------1676#define ATOMIC_CMPXCHG_REV_MIX(TYPE_ID, TYPE, OP_ID, BITS, OP, RTYPE_ID, \1677RTYPE, LCK_ID, MASK, GOMP_FLAG) \1678ATOMIC_BEGIN_MIX(TYPE_ID, TYPE, OP_ID, RTYPE_ID, RTYPE) \1679OP_GOMP_CRITICAL_REV(TYPE, OP, GOMP_FLAG) \1680OP_CMPXCHG_REV(TYPE, BITS, OP) \1681}1682#define ATOMIC_CRITICAL_REV_FP(TYPE_ID, TYPE, OP_ID, OP, RTYPE_ID, RTYPE, \1683LCK_ID, GOMP_FLAG) \1684ATOMIC_BEGIN_MIX(TYPE_ID, TYPE, OP_ID, RTYPE_ID, RTYPE) \1685OP_GOMP_CRITICAL_REV(TYPE, OP, GOMP_FLAG) \1686OP_CRITICAL_REV(TYPE, OP, LCK_ID) \1687}1688#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */16891690// RHS=float81691ATOMIC_CMPXCHG_MIX(fixed1, char, mul, 8, *, float8, kmp_real64, 1i, 0,1692KMP_ARCH_X86) // __kmpc_atomic_fixed1_mul_float81693ATOMIC_CMPXCHG_MIX(fixed1, char, div, 8, /, float8, kmp_real64, 1i, 0,1694KMP_ARCH_X86) // __kmpc_atomic_fixed1_div_float81695ATOMIC_CMPXCHG_MIX(fixed2, short, mul, 16, *, float8, kmp_real64, 2i, 1,1696KMP_ARCH_X86) // __kmpc_atomic_fixed2_mul_float81697ATOMIC_CMPXCHG_MIX(fixed2, short, div, 16, /, float8, kmp_real64, 2i, 1,1698KMP_ARCH_X86) // __kmpc_atomic_fixed2_div_float81699ATOMIC_CMPXCHG_MIX(fixed4, kmp_int32, mul, 32, *, float8, kmp_real64, 4i, 3,17000) // __kmpc_atomic_fixed4_mul_float81701ATOMIC_CMPXCHG_MIX(fixed4, kmp_int32, div, 32, /, float8, kmp_real64, 4i, 3,17020) // __kmpc_atomic_fixed4_div_float81703ATOMIC_CMPXCHG_MIX(fixed8, kmp_int64, mul, 64, *, float8, kmp_real64, 8i, 7,1704KMP_ARCH_X86) // __kmpc_atomic_fixed8_mul_float81705ATOMIC_CMPXCHG_MIX(fixed8, kmp_int64, div, 64, /, float8, kmp_real64, 8i, 7,1706KMP_ARCH_X86) // __kmpc_atomic_fixed8_div_float81707ATOMIC_CMPXCHG_MIX(float4, kmp_real32, add, 32, +, float8, kmp_real64, 4r, 3,1708KMP_ARCH_X86) // __kmpc_atomic_float4_add_float81709ATOMIC_CMPXCHG_MIX(float4, kmp_real32, sub, 32, -, float8, kmp_real64, 4r, 3,1710KMP_ARCH_X86) // __kmpc_atomic_float4_sub_float81711ATOMIC_CMPXCHG_MIX(float4, kmp_real32, mul, 32, *, float8, kmp_real64, 4r, 3,1712KMP_ARCH_X86) // __kmpc_atomic_float4_mul_float81713ATOMIC_CMPXCHG_MIX(float4, kmp_real32, div, 32, /, float8, kmp_real64, 4r, 3,1714KMP_ARCH_X86) // __kmpc_atomic_float4_div_float817151716// RHS=float16 (deprecated, to be removed when we are sure the compiler does not1717// use them)1718#if KMP_HAVE_QUAD1719ATOMIC_CMPXCHG_MIX(fixed1, char, add, 8, +, fp, _Quad, 1i, 0,1720KMP_ARCH_X86) // __kmpc_atomic_fixed1_add_fp1721ATOMIC_CMPXCHG_MIX(fixed1u, uchar, add, 8, +, fp, _Quad, 1i, 0,1722KMP_ARCH_X86) // __kmpc_atomic_fixed1u_add_fp1723ATOMIC_CMPXCHG_MIX(fixed1, char, sub, 8, -, fp, _Quad, 1i, 0,1724KMP_ARCH_X86) // __kmpc_atomic_fixed1_sub_fp1725ATOMIC_CMPXCHG_MIX(fixed1u, uchar, sub, 8, -, fp, _Quad, 1i, 0,1726KMP_ARCH_X86) // __kmpc_atomic_fixed1u_sub_fp1727ATOMIC_CMPXCHG_MIX(fixed1, char, mul, 8, *, fp, _Quad, 1i, 0,1728KMP_ARCH_X86) // __kmpc_atomic_fixed1_mul_fp1729ATOMIC_CMPXCHG_MIX(fixed1u, uchar, mul, 8, *, fp, _Quad, 1i, 0,1730KMP_ARCH_X86) // __kmpc_atomic_fixed1u_mul_fp1731ATOMIC_CMPXCHG_MIX(fixed1, char, div, 8, /, fp, _Quad, 1i, 0,1732KMP_ARCH_X86) // __kmpc_atomic_fixed1_div_fp1733ATOMIC_CMPXCHG_MIX(fixed1u, uchar, div, 8, /, fp, _Quad, 1i, 0,1734KMP_ARCH_X86) // __kmpc_atomic_fixed1u_div_fp17351736ATOMIC_CMPXCHG_MIX(fixed2, short, add, 16, +, fp, _Quad, 2i, 1,1737KMP_ARCH_X86) // __kmpc_atomic_fixed2_add_fp1738ATOMIC_CMPXCHG_MIX(fixed2u, ushort, add, 16, +, fp, _Quad, 2i, 1,1739KMP_ARCH_X86) // __kmpc_atomic_fixed2u_add_fp1740ATOMIC_CMPXCHG_MIX(fixed2, short, sub, 16, -, fp, _Quad, 2i, 1,1741KMP_ARCH_X86) // __kmpc_atomic_fixed2_sub_fp1742ATOMIC_CMPXCHG_MIX(fixed2u, ushort, sub, 16, -, fp, _Quad, 2i, 1,1743KMP_ARCH_X86) // __kmpc_atomic_fixed2u_sub_fp1744ATOMIC_CMPXCHG_MIX(fixed2, short, mul, 16, *, fp, _Quad, 2i, 1,1745KMP_ARCH_X86) // __kmpc_atomic_fixed2_mul_fp1746ATOMIC_CMPXCHG_MIX(fixed2u, ushort, mul, 16, *, fp, _Quad, 2i, 1,1747KMP_ARCH_X86) // __kmpc_atomic_fixed2u_mul_fp1748ATOMIC_CMPXCHG_MIX(fixed2, short, div, 16, /, fp, _Quad, 2i, 1,1749KMP_ARCH_X86) // __kmpc_atomic_fixed2_div_fp1750ATOMIC_CMPXCHG_MIX(fixed2u, ushort, div, 16, /, fp, _Quad, 2i, 1,1751KMP_ARCH_X86) // __kmpc_atomic_fixed2u_div_fp17521753ATOMIC_CMPXCHG_MIX(fixed4, kmp_int32, add, 32, +, fp, _Quad, 4i, 3,17540) // __kmpc_atomic_fixed4_add_fp1755ATOMIC_CMPXCHG_MIX(fixed4u, kmp_uint32, add, 32, +, fp, _Quad, 4i, 3,17560) // __kmpc_atomic_fixed4u_add_fp1757ATOMIC_CMPXCHG_MIX(fixed4, kmp_int32, sub, 32, -, fp, _Quad, 4i, 3,17580) // __kmpc_atomic_fixed4_sub_fp1759ATOMIC_CMPXCHG_MIX(fixed4u, kmp_uint32, sub, 32, -, fp, _Quad, 4i, 3,17600) // __kmpc_atomic_fixed4u_sub_fp1761ATOMIC_CMPXCHG_MIX(fixed4, kmp_int32, mul, 32, *, fp, _Quad, 4i, 3,17620) // __kmpc_atomic_fixed4_mul_fp1763ATOMIC_CMPXCHG_MIX(fixed4u, kmp_uint32, mul, 32, *, fp, _Quad, 4i, 3,17640) // __kmpc_atomic_fixed4u_mul_fp1765ATOMIC_CMPXCHG_MIX(fixed4, kmp_int32, div, 32, /, fp, _Quad, 4i, 3,17660) // __kmpc_atomic_fixed4_div_fp1767ATOMIC_CMPXCHG_MIX(fixed4u, kmp_uint32, div, 32, /, fp, _Quad, 4i, 3,17680) // __kmpc_atomic_fixed4u_div_fp17691770ATOMIC_CMPXCHG_MIX(fixed8, kmp_int64, add, 64, +, fp, _Quad, 8i, 7,1771KMP_ARCH_X86) // __kmpc_atomic_fixed8_add_fp1772ATOMIC_CMPXCHG_MIX(fixed8u, kmp_uint64, add, 64, +, fp, _Quad, 8i, 7,1773KMP_ARCH_X86) // __kmpc_atomic_fixed8u_add_fp1774ATOMIC_CMPXCHG_MIX(fixed8, kmp_int64, sub, 64, -, fp, _Quad, 8i, 7,1775KMP_ARCH_X86) // __kmpc_atomic_fixed8_sub_fp1776ATOMIC_CMPXCHG_MIX(fixed8u, kmp_uint64, sub, 64, -, fp, _Quad, 8i, 7,1777KMP_ARCH_X86) // __kmpc_atomic_fixed8u_sub_fp1778ATOMIC_CMPXCHG_MIX(fixed8, kmp_int64, mul, 64, *, fp, _Quad, 8i, 7,1779KMP_ARCH_X86) // __kmpc_atomic_fixed8_mul_fp1780ATOMIC_CMPXCHG_MIX(fixed8u, kmp_uint64, mul, 64, *, fp, _Quad, 8i, 7,1781KMP_ARCH_X86) // __kmpc_atomic_fixed8u_mul_fp1782ATOMIC_CMPXCHG_MIX(fixed8, kmp_int64, div, 64, /, fp, _Quad, 8i, 7,1783KMP_ARCH_X86) // __kmpc_atomic_fixed8_div_fp1784ATOMIC_CMPXCHG_MIX(fixed8u, kmp_uint64, div, 64, /, fp, _Quad, 8i, 7,1785KMP_ARCH_X86) // __kmpc_atomic_fixed8u_div_fp17861787ATOMIC_CMPXCHG_MIX(float4, kmp_real32, add, 32, +, fp, _Quad, 4r, 3,1788KMP_ARCH_X86) // __kmpc_atomic_float4_add_fp1789ATOMIC_CMPXCHG_MIX(float4, kmp_real32, sub, 32, -, fp, _Quad, 4r, 3,1790KMP_ARCH_X86) // __kmpc_atomic_float4_sub_fp1791ATOMIC_CMPXCHG_MIX(float4, kmp_real32, mul, 32, *, fp, _Quad, 4r, 3,1792KMP_ARCH_X86) // __kmpc_atomic_float4_mul_fp1793ATOMIC_CMPXCHG_MIX(float4, kmp_real32, div, 32, /, fp, _Quad, 4r, 3,1794KMP_ARCH_X86) // __kmpc_atomic_float4_div_fp17951796ATOMIC_CMPXCHG_MIX(float8, kmp_real64, add, 64, +, fp, _Quad, 8r, 7,1797KMP_ARCH_X86) // __kmpc_atomic_float8_add_fp1798ATOMIC_CMPXCHG_MIX(float8, kmp_real64, sub, 64, -, fp, _Quad, 8r, 7,1799KMP_ARCH_X86) // __kmpc_atomic_float8_sub_fp1800ATOMIC_CMPXCHG_MIX(float8, kmp_real64, mul, 64, *, fp, _Quad, 8r, 7,1801KMP_ARCH_X86) // __kmpc_atomic_float8_mul_fp1802ATOMIC_CMPXCHG_MIX(float8, kmp_real64, div, 64, /, fp, _Quad, 8r, 7,1803KMP_ARCH_X86) // __kmpc_atomic_float8_div_fp18041805#if KMP_ARCH_X86 || KMP_ARCH_X86_641806ATOMIC_CRITICAL_FP(float10, long double, add, +, fp, _Quad, 10r,18071) // __kmpc_atomic_float10_add_fp1808ATOMIC_CRITICAL_FP(float10, long double, sub, -, fp, _Quad, 10r,18091) // __kmpc_atomic_float10_sub_fp1810ATOMIC_CRITICAL_FP(float10, long double, mul, *, fp, _Quad, 10r,18111) // __kmpc_atomic_float10_mul_fp1812ATOMIC_CRITICAL_FP(float10, long double, div, /, fp, _Quad, 10r,18131) // __kmpc_atomic_float10_div_fp18141815// Reverse operations1816ATOMIC_CMPXCHG_REV_MIX(fixed1, char, sub_rev, 8, -, fp, _Quad, 1i, 0,1817KMP_ARCH_X86) // __kmpc_atomic_fixed1_sub_rev_fp1818ATOMIC_CMPXCHG_REV_MIX(fixed1u, uchar, sub_rev, 8, -, fp, _Quad, 1i, 0,1819KMP_ARCH_X86) // __kmpc_atomic_fixed1u_sub_rev_fp1820ATOMIC_CMPXCHG_REV_MIX(fixed1, char, div_rev, 8, /, fp, _Quad, 1i, 0,1821KMP_ARCH_X86) // __kmpc_atomic_fixed1_div_rev_fp1822ATOMIC_CMPXCHG_REV_MIX(fixed1u, uchar, div_rev, 8, /, fp, _Quad, 1i, 0,1823KMP_ARCH_X86) // __kmpc_atomic_fixed1u_div_rev_fp18241825ATOMIC_CMPXCHG_REV_MIX(fixed2, short, sub_rev, 16, -, fp, _Quad, 2i, 1,1826KMP_ARCH_X86) // __kmpc_atomic_fixed2_sub_rev_fp1827ATOMIC_CMPXCHG_REV_MIX(fixed2u, ushort, sub_rev, 16, -, fp, _Quad, 2i, 1,1828KMP_ARCH_X86) // __kmpc_atomic_fixed2u_sub_rev_fp1829ATOMIC_CMPXCHG_REV_MIX(fixed2, short, div_rev, 16, /, fp, _Quad, 2i, 1,1830KMP_ARCH_X86) // __kmpc_atomic_fixed2_div_rev_fp1831ATOMIC_CMPXCHG_REV_MIX(fixed2u, ushort, div_rev, 16, /, fp, _Quad, 2i, 1,1832KMP_ARCH_X86) // __kmpc_atomic_fixed2u_div_rev_fp18331834ATOMIC_CMPXCHG_REV_MIX(fixed4, kmp_int32, sub_rev, 32, -, fp, _Quad, 4i, 3,18350) // __kmpc_atomic_fixed4_sub_rev_fp1836ATOMIC_CMPXCHG_REV_MIX(fixed4u, kmp_uint32, sub_rev, 32, -, fp, _Quad, 4i, 3,18370) // __kmpc_atomic_fixed4u_sub_rev_fp1838ATOMIC_CMPXCHG_REV_MIX(fixed4, kmp_int32, div_rev, 32, /, fp, _Quad, 4i, 3,18390) // __kmpc_atomic_fixed4_div_rev_fp1840ATOMIC_CMPXCHG_REV_MIX(fixed4u, kmp_uint32, div_rev, 32, /, fp, _Quad, 4i, 3,18410) // __kmpc_atomic_fixed4u_div_rev_fp18421843ATOMIC_CMPXCHG_REV_MIX(fixed8, kmp_int64, sub_rev, 64, -, fp, _Quad, 8i, 7,1844KMP_ARCH_X86) // __kmpc_atomic_fixed8_sub_rev_fp1845ATOMIC_CMPXCHG_REV_MIX(fixed8u, kmp_uint64, sub_rev, 64, -, fp, _Quad, 8i, 7,1846KMP_ARCH_X86) // __kmpc_atomic_fixed8u_sub_rev_fp1847ATOMIC_CMPXCHG_REV_MIX(fixed8, kmp_int64, div_rev, 64, /, fp, _Quad, 8i, 7,1848KMP_ARCH_X86) // __kmpc_atomic_fixed8_div_rev_fp1849ATOMIC_CMPXCHG_REV_MIX(fixed8u, kmp_uint64, div_rev, 64, /, fp, _Quad, 8i, 7,1850KMP_ARCH_X86) // __kmpc_atomic_fixed8u_div_rev_fp18511852ATOMIC_CMPXCHG_REV_MIX(float4, kmp_real32, sub_rev, 32, -, fp, _Quad, 4r, 3,1853KMP_ARCH_X86) // __kmpc_atomic_float4_sub_rev_fp1854ATOMIC_CMPXCHG_REV_MIX(float4, kmp_real32, div_rev, 32, /, fp, _Quad, 4r, 3,1855KMP_ARCH_X86) // __kmpc_atomic_float4_div_rev_fp18561857ATOMIC_CMPXCHG_REV_MIX(float8, kmp_real64, sub_rev, 64, -, fp, _Quad, 8r, 7,1858KMP_ARCH_X86) // __kmpc_atomic_float8_sub_rev_fp1859ATOMIC_CMPXCHG_REV_MIX(float8, kmp_real64, div_rev, 64, /, fp, _Quad, 8r, 7,1860KMP_ARCH_X86) // __kmpc_atomic_float8_div_rev_fp18611862ATOMIC_CRITICAL_REV_FP(float10, long double, sub_rev, -, fp, _Quad, 10r,18631) // __kmpc_atomic_float10_sub_rev_fp1864ATOMIC_CRITICAL_REV_FP(float10, long double, div_rev, /, fp, _Quad, 10r,18651) // __kmpc_atomic_float10_div_rev_fp1866#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */18671868#endif // KMP_HAVE_QUAD18691870#if KMP_ARCH_X86 || KMP_ARCH_X86_641871// ------------------------------------------------------------------------1872// X86 or X86_64: no alignment problems ====================================1873#if USE_CMPXCHG_FIX1874// workaround for C78287 (complex(kind=4) data type)1875#define ATOMIC_CMPXCHG_CMPLX(TYPE_ID, TYPE, OP_ID, BITS, OP, RTYPE_ID, RTYPE, \1876LCK_ID, MASK, GOMP_FLAG) \1877ATOMIC_BEGIN_MIX(TYPE_ID, TYPE, OP_ID, RTYPE_ID, RTYPE) \1878OP_UPDATE_GOMP_CRITICAL(TYPE, OP, GOMP_FLAG) \1879OP_CMPXCHG_WORKAROUND(TYPE, BITS, OP) \1880}1881// end of the second part of the workaround for C782871882#else1883#define ATOMIC_CMPXCHG_CMPLX(TYPE_ID, TYPE, OP_ID, BITS, OP, RTYPE_ID, RTYPE, \1884LCK_ID, MASK, GOMP_FLAG) \1885ATOMIC_BEGIN_MIX(TYPE_ID, TYPE, OP_ID, RTYPE_ID, RTYPE) \1886OP_UPDATE_GOMP_CRITICAL(TYPE, OP, GOMP_FLAG) \1887OP_CMPXCHG(TYPE, BITS, OP) \1888}1889#endif // USE_CMPXCHG_FIX1890#else1891// ------------------------------------------------------------------------1892// Code for other architectures that don't handle unaligned accesses.1893#define ATOMIC_CMPXCHG_CMPLX(TYPE_ID, TYPE, OP_ID, BITS, OP, RTYPE_ID, RTYPE, \1894LCK_ID, MASK, GOMP_FLAG) \1895ATOMIC_BEGIN_MIX(TYPE_ID, TYPE, OP_ID, RTYPE_ID, RTYPE) \1896OP_UPDATE_GOMP_CRITICAL(TYPE, OP, GOMP_FLAG) \1897if (!((kmp_uintptr_t)lhs & 0x##MASK)) { \1898OP_CMPXCHG(TYPE, BITS, OP) /* aligned address */ \1899} else { \1900KMP_CHECK_GTID; \1901OP_UPDATE_CRITICAL(TYPE, OP, \1902LCK_ID) /* unaligned address - use critical */ \1903} \1904}1905#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */19061907ATOMIC_CMPXCHG_CMPLX(cmplx4, kmp_cmplx32, add, 64, +, cmplx8, kmp_cmplx64, 8c,19087, KMP_ARCH_X86) // __kmpc_atomic_cmplx4_add_cmplx81909ATOMIC_CMPXCHG_CMPLX(cmplx4, kmp_cmplx32, sub, 64, -, cmplx8, kmp_cmplx64, 8c,19107, KMP_ARCH_X86) // __kmpc_atomic_cmplx4_sub_cmplx81911ATOMIC_CMPXCHG_CMPLX(cmplx4, kmp_cmplx32, mul, 64, *, cmplx8, kmp_cmplx64, 8c,19127, KMP_ARCH_X86) // __kmpc_atomic_cmplx4_mul_cmplx81913ATOMIC_CMPXCHG_CMPLX(cmplx4, kmp_cmplx32, div, 64, /, cmplx8, kmp_cmplx64, 8c,19147, KMP_ARCH_X86) // __kmpc_atomic_cmplx4_div_cmplx819151916// READ, WRITE, CAPTURE19171918// ------------------------------------------------------------------------1919// Atomic READ routines19201921// ------------------------------------------------------------------------1922// Beginning of a definition (provides name, parameters, gebug trace)1923// TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned1924// fixed)1925// OP_ID - operation identifier (add, sub, mul, ...)1926// TYPE - operands' type1927#define ATOMIC_BEGIN_READ(TYPE_ID, OP_ID, TYPE, RET_TYPE) \1928RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID(ident_t *id_ref, int gtid, \1929TYPE *loc) { \1930KMP_DEBUG_ASSERT(__kmp_init_serial); \1931KA_TRACE(100, ("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid));19321933// ------------------------------------------------------------------------1934// Operation on *lhs, rhs using "compare_and_store_ret" routine1935// TYPE - operands' type1936// BITS - size in bits, used to distinguish low level calls1937// OP - operator1938// Note: temp_val introduced in order to force the compiler to read1939// *lhs only once (w/o it the compiler reads *lhs twice)1940// TODO: check if it is still necessary1941// Return old value regardless of the result of "compare & swap# operation1942#define OP_CMPXCHG_READ(TYPE, BITS, OP) \1943{ \1944TYPE KMP_ATOMIC_VOLATILE temp_val; \1945union f_i_union { \1946TYPE f_val; \1947kmp_int##BITS i_val; \1948}; \1949union f_i_union old_value; \1950temp_val = *loc; \1951old_value.f_val = temp_val; \1952old_value.i_val = KMP_COMPARE_AND_STORE_RET##BITS( \1953(kmp_int##BITS *)loc, \1954*VOLATILE_CAST(kmp_int##BITS *) & old_value.i_val, \1955*VOLATILE_CAST(kmp_int##BITS *) & old_value.i_val); \1956new_value = old_value.f_val; \1957return new_value; \1958}19591960// -------------------------------------------------------------------------1961// Operation on *lhs, rhs bound by critical section1962// OP - operator (it's supposed to contain an assignment)1963// LCK_ID - lock identifier1964// Note: don't check gtid as it should always be valid1965// 1, 2-byte - expect valid parameter, other - check before this macro1966#define OP_CRITICAL_READ(OP, LCK_ID) \1967__kmp_acquire_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \1968\1969new_value = (*loc); \1970\1971__kmp_release_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid);19721973// -------------------------------------------------------------------------1974#ifdef KMP_GOMP_COMPAT1975#define OP_GOMP_CRITICAL_READ(OP, FLAG) \1976if ((FLAG) && (__kmp_atomic_mode == 2)) { \1977KMP_CHECK_GTID; \1978OP_CRITICAL_READ(OP, 0); \1979return new_value; \1980}1981#else1982#define OP_GOMP_CRITICAL_READ(OP, FLAG)1983#endif /* KMP_GOMP_COMPAT */19841985// -------------------------------------------------------------------------1986#define ATOMIC_FIXED_READ(TYPE_ID, OP_ID, TYPE, BITS, OP, GOMP_FLAG) \1987ATOMIC_BEGIN_READ(TYPE_ID, OP_ID, TYPE, TYPE) \1988TYPE new_value; \1989OP_GOMP_CRITICAL_READ(OP## =, GOMP_FLAG) \1990new_value = KMP_TEST_THEN_ADD##BITS(loc, OP 0); \1991return new_value; \1992}1993// -------------------------------------------------------------------------1994#define ATOMIC_CMPXCHG_READ(TYPE_ID, OP_ID, TYPE, BITS, OP, GOMP_FLAG) \1995ATOMIC_BEGIN_READ(TYPE_ID, OP_ID, TYPE, TYPE) \1996TYPE new_value; \1997OP_GOMP_CRITICAL_READ(OP## =, GOMP_FLAG) \1998OP_CMPXCHG_READ(TYPE, BITS, OP) \1999}2000// ------------------------------------------------------------------------2001// Routines for Extended types: long double, _Quad, complex flavours (use2002// critical section)2003// TYPE_ID, OP_ID, TYPE - detailed above2004// OP - operator2005// LCK_ID - lock identifier, used to possibly distinguish lock variable2006#define ATOMIC_CRITICAL_READ(TYPE_ID, OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG) \2007ATOMIC_BEGIN_READ(TYPE_ID, OP_ID, TYPE, TYPE) \2008TYPE new_value; \2009OP_GOMP_CRITICAL_READ(OP## =, GOMP_FLAG) /* send assignment */ \2010OP_CRITICAL_READ(OP, LCK_ID) /* send assignment */ \2011return new_value; \2012}20132014// ------------------------------------------------------------------------2015// Fix for cmplx4 read (CQ220361) on Windows* OS. Regular routine with return2016// value doesn't work.2017// Let's return the read value through the additional parameter.2018#if (KMP_OS_WINDOWS)20192020#define OP_CRITICAL_READ_WRK(OP, LCK_ID) \2021__kmp_acquire_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \2022\2023(*out) = (*loc); \2024\2025__kmp_release_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid);2026// ------------------------------------------------------------------------2027#ifdef KMP_GOMP_COMPAT2028#define OP_GOMP_CRITICAL_READ_WRK(OP, FLAG) \2029if ((FLAG) && (__kmp_atomic_mode == 2)) { \2030KMP_CHECK_GTID; \2031OP_CRITICAL_READ_WRK(OP, 0); \2032}2033#else2034#define OP_GOMP_CRITICAL_READ_WRK(OP, FLAG)2035#endif /* KMP_GOMP_COMPAT */2036// ------------------------------------------------------------------------2037#define ATOMIC_BEGIN_READ_WRK(TYPE_ID, OP_ID, TYPE) \2038void __kmpc_atomic_##TYPE_ID##_##OP_ID(TYPE *out, ident_t *id_ref, int gtid, \2039TYPE *loc) { \2040KMP_DEBUG_ASSERT(__kmp_init_serial); \2041KA_TRACE(100, ("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid));20422043// ------------------------------------------------------------------------2044#define ATOMIC_CRITICAL_READ_WRK(TYPE_ID, OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG) \2045ATOMIC_BEGIN_READ_WRK(TYPE_ID, OP_ID, TYPE) \2046OP_GOMP_CRITICAL_READ_WRK(OP## =, GOMP_FLAG) /* send assignment */ \2047OP_CRITICAL_READ_WRK(OP, LCK_ID) /* send assignment */ \2048}20492050#endif // KMP_OS_WINDOWS20512052// ------------------------------------------------------------------------2053// TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG2054ATOMIC_FIXED_READ(fixed4, rd, kmp_int32, 32, +, 0) // __kmpc_atomic_fixed4_rd2055ATOMIC_FIXED_READ(fixed8, rd, kmp_int64, 64, +,2056KMP_ARCH_X86) // __kmpc_atomic_fixed8_rd2057ATOMIC_CMPXCHG_READ(float4, rd, kmp_real32, 32, +,2058KMP_ARCH_X86) // __kmpc_atomic_float4_rd2059ATOMIC_CMPXCHG_READ(float8, rd, kmp_real64, 64, +,2060KMP_ARCH_X86) // __kmpc_atomic_float8_rd20612062// !!! TODO: Remove lock operations for "char" since it can't be non-atomic2063ATOMIC_CMPXCHG_READ(fixed1, rd, kmp_int8, 8, +,2064KMP_ARCH_X86) // __kmpc_atomic_fixed1_rd2065ATOMIC_CMPXCHG_READ(fixed2, rd, kmp_int16, 16, +,2066KMP_ARCH_X86) // __kmpc_atomic_fixed2_rd20672068ATOMIC_CRITICAL_READ(float10, rd, long double, +, 10r,20691) // __kmpc_atomic_float10_rd2070#if KMP_HAVE_QUAD2071ATOMIC_CRITICAL_READ(float16, rd, QUAD_LEGACY, +, 16r,20721) // __kmpc_atomic_float16_rd2073#endif // KMP_HAVE_QUAD20742075// Fix for CQ220361 on Windows* OS2076#if (KMP_OS_WINDOWS)2077ATOMIC_CRITICAL_READ_WRK(cmplx4, rd, kmp_cmplx32, +, 8c,20781) // __kmpc_atomic_cmplx4_rd2079#else2080ATOMIC_CRITICAL_READ(cmplx4, rd, kmp_cmplx32, +, 8c,20811) // __kmpc_atomic_cmplx4_rd2082#endif // (KMP_OS_WINDOWS)2083ATOMIC_CRITICAL_READ(cmplx8, rd, kmp_cmplx64, +, 16c,20841) // __kmpc_atomic_cmplx8_rd2085ATOMIC_CRITICAL_READ(cmplx10, rd, kmp_cmplx80, +, 20c,20861) // __kmpc_atomic_cmplx10_rd2087#if KMP_HAVE_QUAD2088ATOMIC_CRITICAL_READ(cmplx16, rd, CPLX128_LEG, +, 32c,20891) // __kmpc_atomic_cmplx16_rd2090#if (KMP_ARCH_X86)2091ATOMIC_CRITICAL_READ(float16, a16_rd, Quad_a16_t, +, 16r,20921) // __kmpc_atomic_float16_a16_rd2093ATOMIC_CRITICAL_READ(cmplx16, a16_rd, kmp_cmplx128_a16_t, +, 32c,20941) // __kmpc_atomic_cmplx16_a16_rd2095#endif // (KMP_ARCH_X86)2096#endif // KMP_HAVE_QUAD20972098// ------------------------------------------------------------------------2099// Atomic WRITE routines21002101#define ATOMIC_XCHG_WR(TYPE_ID, OP_ID, TYPE, BITS, OP, GOMP_FLAG) \2102ATOMIC_BEGIN(TYPE_ID, OP_ID, TYPE, void) \2103OP_GOMP_CRITICAL(OP, GOMP_FLAG) \2104KMP_XCHG_FIXED##BITS(lhs, rhs); \2105}2106// ------------------------------------------------------------------------2107#define ATOMIC_XCHG_FLOAT_WR(TYPE_ID, OP_ID, TYPE, BITS, OP, GOMP_FLAG) \2108ATOMIC_BEGIN(TYPE_ID, OP_ID, TYPE, void) \2109OP_GOMP_CRITICAL(OP, GOMP_FLAG) \2110KMP_XCHG_REAL##BITS(lhs, rhs); \2111}21122113// ------------------------------------------------------------------------2114// Operation on *lhs, rhs using "compare_and_store" routine2115// TYPE - operands' type2116// BITS - size in bits, used to distinguish low level calls2117// OP - operator2118// Note: temp_val introduced in order to force the compiler to read2119// *lhs only once (w/o it the compiler reads *lhs twice)2120#define OP_CMPXCHG_WR(TYPE, BITS, OP) \2121{ \2122TYPE KMP_ATOMIC_VOLATILE temp_val; \2123TYPE old_value, new_value; \2124temp_val = *lhs; \2125old_value = temp_val; \2126new_value = rhs; \2127while (!KMP_COMPARE_AND_STORE_ACQ##BITS( \2128(kmp_int##BITS *)lhs, *VOLATILE_CAST(kmp_int##BITS *) & old_value, \2129*VOLATILE_CAST(kmp_int##BITS *) & new_value)) { \2130temp_val = *lhs; \2131old_value = temp_val; \2132new_value = rhs; \2133} \2134}21352136// -------------------------------------------------------------------------2137#define ATOMIC_CMPXCHG_WR(TYPE_ID, OP_ID, TYPE, BITS, OP, GOMP_FLAG) \2138ATOMIC_BEGIN(TYPE_ID, OP_ID, TYPE, void) \2139OP_GOMP_CRITICAL(OP, GOMP_FLAG) \2140OP_CMPXCHG_WR(TYPE, BITS, OP) \2141}21422143// ------------------------------------------------------------------------2144// Routines for Extended types: long double, _Quad, complex flavours (use2145// critical section)2146// TYPE_ID, OP_ID, TYPE - detailed above2147// OP - operator2148// LCK_ID - lock identifier, used to possibly distinguish lock variable2149#define ATOMIC_CRITICAL_WR(TYPE_ID, OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG) \2150ATOMIC_BEGIN(TYPE_ID, OP_ID, TYPE, void) \2151OP_GOMP_CRITICAL(OP, GOMP_FLAG) /* send assignment */ \2152OP_CRITICAL(OP, LCK_ID) /* send assignment */ \2153}2154// -------------------------------------------------------------------------21552156ATOMIC_XCHG_WR(fixed1, wr, kmp_int8, 8, =,2157KMP_ARCH_X86) // __kmpc_atomic_fixed1_wr2158ATOMIC_XCHG_WR(fixed2, wr, kmp_int16, 16, =,2159KMP_ARCH_X86) // __kmpc_atomic_fixed2_wr2160ATOMIC_XCHG_WR(fixed4, wr, kmp_int32, 32, =,2161KMP_ARCH_X86) // __kmpc_atomic_fixed4_wr2162#if (KMP_ARCH_X86)2163ATOMIC_CMPXCHG_WR(fixed8, wr, kmp_int64, 64, =,2164KMP_ARCH_X86) // __kmpc_atomic_fixed8_wr2165#else2166ATOMIC_XCHG_WR(fixed8, wr, kmp_int64, 64, =,2167KMP_ARCH_X86) // __kmpc_atomic_fixed8_wr2168#endif // (KMP_ARCH_X86)21692170ATOMIC_XCHG_FLOAT_WR(float4, wr, kmp_real32, 32, =,2171KMP_ARCH_X86) // __kmpc_atomic_float4_wr2172#if (KMP_ARCH_X86)2173ATOMIC_CMPXCHG_WR(float8, wr, kmp_real64, 64, =,2174KMP_ARCH_X86) // __kmpc_atomic_float8_wr2175#else2176ATOMIC_XCHG_FLOAT_WR(float8, wr, kmp_real64, 64, =,2177KMP_ARCH_X86) // __kmpc_atomic_float8_wr2178#endif // (KMP_ARCH_X86)21792180ATOMIC_CRITICAL_WR(float10, wr, long double, =, 10r,21811) // __kmpc_atomic_float10_wr2182#if KMP_HAVE_QUAD2183ATOMIC_CRITICAL_WR(float16, wr, QUAD_LEGACY, =, 16r,21841) // __kmpc_atomic_float16_wr2185#endif // KMP_HAVE_QUAD2186ATOMIC_CRITICAL_WR(cmplx4, wr, kmp_cmplx32, =, 8c, 1) // __kmpc_atomic_cmplx4_wr2187ATOMIC_CRITICAL_WR(cmplx8, wr, kmp_cmplx64, =, 16c,21881) // __kmpc_atomic_cmplx8_wr2189ATOMIC_CRITICAL_WR(cmplx10, wr, kmp_cmplx80, =, 20c,21901) // __kmpc_atomic_cmplx10_wr2191#if KMP_HAVE_QUAD2192ATOMIC_CRITICAL_WR(cmplx16, wr, CPLX128_LEG, =, 32c,21931) // __kmpc_atomic_cmplx16_wr2194#if (KMP_ARCH_X86)2195ATOMIC_CRITICAL_WR(float16, a16_wr, Quad_a16_t, =, 16r,21961) // __kmpc_atomic_float16_a16_wr2197ATOMIC_CRITICAL_WR(cmplx16, a16_wr, kmp_cmplx128_a16_t, =, 32c,21981) // __kmpc_atomic_cmplx16_a16_wr2199#endif // (KMP_ARCH_X86)2200#endif // KMP_HAVE_QUAD22012202// ------------------------------------------------------------------------2203// Atomic CAPTURE routines22042205// Beginning of a definition (provides name, parameters, gebug trace)2206// TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned2207// fixed)2208// OP_ID - operation identifier (add, sub, mul, ...)2209// TYPE - operands' type2210#define ATOMIC_BEGIN_CPT(TYPE_ID, OP_ID, TYPE, RET_TYPE) \2211RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID(ident_t *id_ref, int gtid, \2212TYPE *lhs, TYPE rhs, int flag) { \2213KMP_DEBUG_ASSERT(__kmp_init_serial); \2214KA_TRACE(100, ("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid));22152216// -------------------------------------------------------------------------2217// Operation on *lhs, rhs bound by critical section2218// OP - operator (it's supposed to contain an assignment)2219// LCK_ID - lock identifier2220// Note: don't check gtid as it should always be valid2221// 1, 2-byte - expect valid parameter, other - check before this macro2222#define OP_CRITICAL_CPT(OP, LCK_ID) \2223__kmp_acquire_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \2224\2225if (flag) { \2226(*lhs) OP rhs; \2227new_value = (*lhs); \2228} else { \2229new_value = (*lhs); \2230(*lhs) OP rhs; \2231} \2232\2233__kmp_release_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \2234return new_value;22352236#define OP_UPDATE_CRITICAL_CPT(TYPE, OP, LCK_ID) \2237__kmp_acquire_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \2238\2239if (flag) { \2240(*lhs) = (TYPE)((*lhs)OP rhs); \2241new_value = (*lhs); \2242} else { \2243new_value = (*lhs); \2244(*lhs) = (TYPE)((*lhs)OP rhs); \2245} \2246\2247__kmp_release_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \2248return new_value;22492250// ------------------------------------------------------------------------2251#ifdef KMP_GOMP_COMPAT2252#define OP_GOMP_CRITICAL_CPT(TYPE, OP, FLAG) \2253if ((FLAG) && (__kmp_atomic_mode == 2)) { \2254KMP_CHECK_GTID; \2255OP_UPDATE_CRITICAL_CPT(TYPE, OP, 0); \2256}2257#else2258#define OP_GOMP_CRITICAL_CPT(TYPE, OP, FLAG)2259#endif /* KMP_GOMP_COMPAT */22602261// ------------------------------------------------------------------------2262// Operation on *lhs, rhs using "compare_and_store" routine2263// TYPE - operands' type2264// BITS - size in bits, used to distinguish low level calls2265// OP - operator2266// Note: temp_val introduced in order to force the compiler to read2267// *lhs only once (w/o it the compiler reads *lhs twice)2268#define OP_CMPXCHG_CPT(TYPE, BITS, OP) \2269{ \2270TYPE KMP_ATOMIC_VOLATILE temp_val; \2271TYPE old_value, new_value; \2272temp_val = *lhs; \2273old_value = temp_val; \2274new_value = (TYPE)(old_value OP rhs); \2275while (!KMP_COMPARE_AND_STORE_ACQ##BITS( \2276(kmp_int##BITS *)lhs, *VOLATILE_CAST(kmp_int##BITS *) & old_value, \2277*VOLATILE_CAST(kmp_int##BITS *) & new_value)) { \2278temp_val = *lhs; \2279old_value = temp_val; \2280new_value = (TYPE)(old_value OP rhs); \2281} \2282if (flag) { \2283return new_value; \2284} else \2285return old_value; \2286}22872288// -------------------------------------------------------------------------2289#define ATOMIC_CMPXCHG_CPT(TYPE_ID, OP_ID, TYPE, BITS, OP, GOMP_FLAG) \2290ATOMIC_BEGIN_CPT(TYPE_ID, OP_ID, TYPE, TYPE) \2291TYPE new_value; \2292(void)new_value; \2293OP_GOMP_CRITICAL_CPT(TYPE, OP, GOMP_FLAG) \2294OP_CMPXCHG_CPT(TYPE, BITS, OP) \2295}22962297// -------------------------------------------------------------------------2298#define ATOMIC_FIXED_ADD_CPT(TYPE_ID, OP_ID, TYPE, BITS, OP, GOMP_FLAG) \2299ATOMIC_BEGIN_CPT(TYPE_ID, OP_ID, TYPE, TYPE) \2300TYPE old_value, new_value; \2301(void)new_value; \2302OP_GOMP_CRITICAL_CPT(TYPE, OP, GOMP_FLAG) \2303/* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \2304old_value = KMP_TEST_THEN_ADD##BITS(lhs, OP rhs); \2305if (flag) { \2306return old_value OP rhs; \2307} else \2308return old_value; \2309}2310// -------------------------------------------------------------------------23112312ATOMIC_FIXED_ADD_CPT(fixed4, add_cpt, kmp_int32, 32, +,23130) // __kmpc_atomic_fixed4_add_cpt2314ATOMIC_FIXED_ADD_CPT(fixed4, sub_cpt, kmp_int32, 32, -,23150) // __kmpc_atomic_fixed4_sub_cpt2316ATOMIC_FIXED_ADD_CPT(fixed8, add_cpt, kmp_int64, 64, +,2317KMP_ARCH_X86) // __kmpc_atomic_fixed8_add_cpt2318ATOMIC_FIXED_ADD_CPT(fixed8, sub_cpt, kmp_int64, 64, -,2319KMP_ARCH_X86) // __kmpc_atomic_fixed8_sub_cpt23202321ATOMIC_CMPXCHG_CPT(float4, add_cpt, kmp_real32, 32, +,2322KMP_ARCH_X86) // __kmpc_atomic_float4_add_cpt2323ATOMIC_CMPXCHG_CPT(float4, sub_cpt, kmp_real32, 32, -,2324KMP_ARCH_X86) // __kmpc_atomic_float4_sub_cpt2325ATOMIC_CMPXCHG_CPT(float8, add_cpt, kmp_real64, 64, +,2326KMP_ARCH_X86) // __kmpc_atomic_float8_add_cpt2327ATOMIC_CMPXCHG_CPT(float8, sub_cpt, kmp_real64, 64, -,2328KMP_ARCH_X86) // __kmpc_atomic_float8_sub_cpt23292330// ------------------------------------------------------------------------2331// Entries definition for integer operands2332// TYPE_ID - operands type and size (fixed4, float4)2333// OP_ID - operation identifier (add, sub, mul, ...)2334// TYPE - operand type2335// BITS - size in bits, used to distinguish low level calls2336// OP - operator (used in critical section)2337// TYPE_ID,OP_ID, TYPE, BITS,OP,GOMP_FLAG2338// ------------------------------------------------------------------------2339// Routines for ATOMIC integer operands, other operators2340// ------------------------------------------------------------------------2341// TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG2342ATOMIC_CMPXCHG_CPT(fixed1, add_cpt, kmp_int8, 8, +,2343KMP_ARCH_X86) // __kmpc_atomic_fixed1_add_cpt2344ATOMIC_CMPXCHG_CPT(fixed1, andb_cpt, kmp_int8, 8, &,23450) // __kmpc_atomic_fixed1_andb_cpt2346ATOMIC_CMPXCHG_CPT(fixed1, div_cpt, kmp_int8, 8, /,2347KMP_ARCH_X86) // __kmpc_atomic_fixed1_div_cpt2348ATOMIC_CMPXCHG_CPT(fixed1u, div_cpt, kmp_uint8, 8, /,2349KMP_ARCH_X86) // __kmpc_atomic_fixed1u_div_cpt2350ATOMIC_CMPXCHG_CPT(fixed1, mul_cpt, kmp_int8, 8, *,2351KMP_ARCH_X86) // __kmpc_atomic_fixed1_mul_cpt2352ATOMIC_CMPXCHG_CPT(fixed1, orb_cpt, kmp_int8, 8, |,23530) // __kmpc_atomic_fixed1_orb_cpt2354ATOMIC_CMPXCHG_CPT(fixed1, shl_cpt, kmp_int8, 8, <<,2355KMP_ARCH_X86) // __kmpc_atomic_fixed1_shl_cpt2356ATOMIC_CMPXCHG_CPT(fixed1, shr_cpt, kmp_int8, 8, >>,2357KMP_ARCH_X86) // __kmpc_atomic_fixed1_shr_cpt2358ATOMIC_CMPXCHG_CPT(fixed1u, shr_cpt, kmp_uint8, 8, >>,2359KMP_ARCH_X86) // __kmpc_atomic_fixed1u_shr_cpt2360ATOMIC_CMPXCHG_CPT(fixed1, sub_cpt, kmp_int8, 8, -,2361KMP_ARCH_X86) // __kmpc_atomic_fixed1_sub_cpt2362ATOMIC_CMPXCHG_CPT(fixed1, xor_cpt, kmp_int8, 8, ^,23630) // __kmpc_atomic_fixed1_xor_cpt2364ATOMIC_CMPXCHG_CPT(fixed2, add_cpt, kmp_int16, 16, +,2365KMP_ARCH_X86) // __kmpc_atomic_fixed2_add_cpt2366ATOMIC_CMPXCHG_CPT(fixed2, andb_cpt, kmp_int16, 16, &,23670) // __kmpc_atomic_fixed2_andb_cpt2368ATOMIC_CMPXCHG_CPT(fixed2, div_cpt, kmp_int16, 16, /,2369KMP_ARCH_X86) // __kmpc_atomic_fixed2_div_cpt2370ATOMIC_CMPXCHG_CPT(fixed2u, div_cpt, kmp_uint16, 16, /,2371KMP_ARCH_X86) // __kmpc_atomic_fixed2u_div_cpt2372ATOMIC_CMPXCHG_CPT(fixed2, mul_cpt, kmp_int16, 16, *,2373KMP_ARCH_X86) // __kmpc_atomic_fixed2_mul_cpt2374ATOMIC_CMPXCHG_CPT(fixed2, orb_cpt, kmp_int16, 16, |,23750) // __kmpc_atomic_fixed2_orb_cpt2376ATOMIC_CMPXCHG_CPT(fixed2, shl_cpt, kmp_int16, 16, <<,2377KMP_ARCH_X86) // __kmpc_atomic_fixed2_shl_cpt2378ATOMIC_CMPXCHG_CPT(fixed2, shr_cpt, kmp_int16, 16, >>,2379KMP_ARCH_X86) // __kmpc_atomic_fixed2_shr_cpt2380ATOMIC_CMPXCHG_CPT(fixed2u, shr_cpt, kmp_uint16, 16, >>,2381KMP_ARCH_X86) // __kmpc_atomic_fixed2u_shr_cpt2382ATOMIC_CMPXCHG_CPT(fixed2, sub_cpt, kmp_int16, 16, -,2383KMP_ARCH_X86) // __kmpc_atomic_fixed2_sub_cpt2384ATOMIC_CMPXCHG_CPT(fixed2, xor_cpt, kmp_int16, 16, ^,23850) // __kmpc_atomic_fixed2_xor_cpt2386ATOMIC_CMPXCHG_CPT(fixed4, andb_cpt, kmp_int32, 32, &,23870) // __kmpc_atomic_fixed4_andb_cpt2388ATOMIC_CMPXCHG_CPT(fixed4, div_cpt, kmp_int32, 32, /,2389KMP_ARCH_X86) // __kmpc_atomic_fixed4_div_cpt2390ATOMIC_CMPXCHG_CPT(fixed4u, div_cpt, kmp_uint32, 32, /,2391KMP_ARCH_X86) // __kmpc_atomic_fixed4u_div_cpt2392ATOMIC_CMPXCHG_CPT(fixed4, mul_cpt, kmp_int32, 32, *,2393KMP_ARCH_X86) // __kmpc_atomic_fixed4_mul_cpt2394ATOMIC_CMPXCHG_CPT(fixed4, orb_cpt, kmp_int32, 32, |,23950) // __kmpc_atomic_fixed4_orb_cpt2396ATOMIC_CMPXCHG_CPT(fixed4, shl_cpt, kmp_int32, 32, <<,2397KMP_ARCH_X86) // __kmpc_atomic_fixed4_shl_cpt2398ATOMIC_CMPXCHG_CPT(fixed4, shr_cpt, kmp_int32, 32, >>,2399KMP_ARCH_X86) // __kmpc_atomic_fixed4_shr_cpt2400ATOMIC_CMPXCHG_CPT(fixed4u, shr_cpt, kmp_uint32, 32, >>,2401KMP_ARCH_X86) // __kmpc_atomic_fixed4u_shr_cpt2402ATOMIC_CMPXCHG_CPT(fixed4, xor_cpt, kmp_int32, 32, ^,24030) // __kmpc_atomic_fixed4_xor_cpt2404ATOMIC_CMPXCHG_CPT(fixed8, andb_cpt, kmp_int64, 64, &,2405KMP_ARCH_X86) // __kmpc_atomic_fixed8_andb_cpt2406ATOMIC_CMPXCHG_CPT(fixed8, div_cpt, kmp_int64, 64, /,2407KMP_ARCH_X86) // __kmpc_atomic_fixed8_div_cpt2408ATOMIC_CMPXCHG_CPT(fixed8u, div_cpt, kmp_uint64, 64, /,2409KMP_ARCH_X86) // __kmpc_atomic_fixed8u_div_cpt2410ATOMIC_CMPXCHG_CPT(fixed8, mul_cpt, kmp_int64, 64, *,2411KMP_ARCH_X86) // __kmpc_atomic_fixed8_mul_cpt2412ATOMIC_CMPXCHG_CPT(fixed8, orb_cpt, kmp_int64, 64, |,2413KMP_ARCH_X86) // __kmpc_atomic_fixed8_orb_cpt2414ATOMIC_CMPXCHG_CPT(fixed8, shl_cpt, kmp_int64, 64, <<,2415KMP_ARCH_X86) // __kmpc_atomic_fixed8_shl_cpt2416ATOMIC_CMPXCHG_CPT(fixed8, shr_cpt, kmp_int64, 64, >>,2417KMP_ARCH_X86) // __kmpc_atomic_fixed8_shr_cpt2418ATOMIC_CMPXCHG_CPT(fixed8u, shr_cpt, kmp_uint64, 64, >>,2419KMP_ARCH_X86) // __kmpc_atomic_fixed8u_shr_cpt2420ATOMIC_CMPXCHG_CPT(fixed8, xor_cpt, kmp_int64, 64, ^,2421KMP_ARCH_X86) // __kmpc_atomic_fixed8_xor_cpt2422ATOMIC_CMPXCHG_CPT(float4, div_cpt, kmp_real32, 32, /,2423KMP_ARCH_X86) // __kmpc_atomic_float4_div_cpt2424ATOMIC_CMPXCHG_CPT(float4, mul_cpt, kmp_real32, 32, *,2425KMP_ARCH_X86) // __kmpc_atomic_float4_mul_cpt2426ATOMIC_CMPXCHG_CPT(float8, div_cpt, kmp_real64, 64, /,2427KMP_ARCH_X86) // __kmpc_atomic_float8_div_cpt2428ATOMIC_CMPXCHG_CPT(float8, mul_cpt, kmp_real64, 64, *,2429KMP_ARCH_X86) // __kmpc_atomic_float8_mul_cpt2430// TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG24312432// CAPTURE routines for mixed types RHS=float162433#if KMP_HAVE_QUAD24342435// Beginning of a definition (provides name, parameters, gebug trace)2436// TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned2437// fixed)2438// OP_ID - operation identifier (add, sub, mul, ...)2439// TYPE - operands' type2440#define ATOMIC_BEGIN_CPT_MIX(TYPE_ID, OP_ID, TYPE, RTYPE_ID, RTYPE) \2441TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID##_##RTYPE_ID( \2442ident_t *id_ref, int gtid, TYPE *lhs, RTYPE rhs, int flag) { \2443KMP_DEBUG_ASSERT(__kmp_init_serial); \2444KA_TRACE(100, \2445("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_" #RTYPE_ID ": T#%d\n", \2446gtid));24472448// -------------------------------------------------------------------------2449#define ATOMIC_CMPXCHG_CPT_MIX(TYPE_ID, TYPE, OP_ID, BITS, OP, RTYPE_ID, \2450RTYPE, LCK_ID, MASK, GOMP_FLAG) \2451ATOMIC_BEGIN_CPT_MIX(TYPE_ID, OP_ID, TYPE, RTYPE_ID, RTYPE) \2452TYPE new_value; \2453(void)new_value; \2454OP_GOMP_CRITICAL_CPT(TYPE, OP, GOMP_FLAG) \2455OP_CMPXCHG_CPT(TYPE, BITS, OP) \2456}24572458// -------------------------------------------------------------------------2459#define ATOMIC_CRITICAL_CPT_MIX(TYPE_ID, TYPE, OP_ID, OP, RTYPE_ID, RTYPE, \2460LCK_ID, GOMP_FLAG) \2461ATOMIC_BEGIN_CPT_MIX(TYPE_ID, OP_ID, TYPE, RTYPE_ID, RTYPE) \2462TYPE new_value; \2463(void)new_value; \2464OP_GOMP_CRITICAL_CPT(TYPE, OP, GOMP_FLAG) /* send assignment */ \2465OP_UPDATE_CRITICAL_CPT(TYPE, OP, LCK_ID) /* send assignment */ \2466}24672468ATOMIC_CMPXCHG_CPT_MIX(fixed1, char, add_cpt, 8, +, fp, _Quad, 1i, 0,2469KMP_ARCH_X86) // __kmpc_atomic_fixed1_add_cpt_fp2470ATOMIC_CMPXCHG_CPT_MIX(fixed1u, uchar, add_cpt, 8, +, fp, _Quad, 1i, 0,2471KMP_ARCH_X86) // __kmpc_atomic_fixed1u_add_cpt_fp2472ATOMIC_CMPXCHG_CPT_MIX(fixed1, char, sub_cpt, 8, -, fp, _Quad, 1i, 0,2473KMP_ARCH_X86) // __kmpc_atomic_fixed1_sub_cpt_fp2474ATOMIC_CMPXCHG_CPT_MIX(fixed1u, uchar, sub_cpt, 8, -, fp, _Quad, 1i, 0,2475KMP_ARCH_X86) // __kmpc_atomic_fixed1u_sub_cpt_fp2476ATOMIC_CMPXCHG_CPT_MIX(fixed1, char, mul_cpt, 8, *, fp, _Quad, 1i, 0,2477KMP_ARCH_X86) // __kmpc_atomic_fixed1_mul_cpt_fp2478ATOMIC_CMPXCHG_CPT_MIX(fixed1u, uchar, mul_cpt, 8, *, fp, _Quad, 1i, 0,2479KMP_ARCH_X86) // __kmpc_atomic_fixed1u_mul_cpt_fp2480ATOMIC_CMPXCHG_CPT_MIX(fixed1, char, div_cpt, 8, /, fp, _Quad, 1i, 0,2481KMP_ARCH_X86) // __kmpc_atomic_fixed1_div_cpt_fp2482ATOMIC_CMPXCHG_CPT_MIX(fixed1u, uchar, div_cpt, 8, /, fp, _Quad, 1i, 0,2483KMP_ARCH_X86) // __kmpc_atomic_fixed1u_div_cpt_fp24842485ATOMIC_CMPXCHG_CPT_MIX(fixed2, short, add_cpt, 16, +, fp, _Quad, 2i, 1,2486KMP_ARCH_X86) // __kmpc_atomic_fixed2_add_cpt_fp2487ATOMIC_CMPXCHG_CPT_MIX(fixed2u, ushort, add_cpt, 16, +, fp, _Quad, 2i, 1,2488KMP_ARCH_X86) // __kmpc_atomic_fixed2u_add_cpt_fp2489ATOMIC_CMPXCHG_CPT_MIX(fixed2, short, sub_cpt, 16, -, fp, _Quad, 2i, 1,2490KMP_ARCH_X86) // __kmpc_atomic_fixed2_sub_cpt_fp2491ATOMIC_CMPXCHG_CPT_MIX(fixed2u, ushort, sub_cpt, 16, -, fp, _Quad, 2i, 1,2492KMP_ARCH_X86) // __kmpc_atomic_fixed2u_sub_cpt_fp2493ATOMIC_CMPXCHG_CPT_MIX(fixed2, short, mul_cpt, 16, *, fp, _Quad, 2i, 1,2494KMP_ARCH_X86) // __kmpc_atomic_fixed2_mul_cpt_fp2495ATOMIC_CMPXCHG_CPT_MIX(fixed2u, ushort, mul_cpt, 16, *, fp, _Quad, 2i, 1,2496KMP_ARCH_X86) // __kmpc_atomic_fixed2u_mul_cpt_fp2497ATOMIC_CMPXCHG_CPT_MIX(fixed2, short, div_cpt, 16, /, fp, _Quad, 2i, 1,2498KMP_ARCH_X86) // __kmpc_atomic_fixed2_div_cpt_fp2499ATOMIC_CMPXCHG_CPT_MIX(fixed2u, ushort, div_cpt, 16, /, fp, _Quad, 2i, 1,2500KMP_ARCH_X86) // __kmpc_atomic_fixed2u_div_cpt_fp25012502ATOMIC_CMPXCHG_CPT_MIX(fixed4, kmp_int32, add_cpt, 32, +, fp, _Quad, 4i, 3,25030) // __kmpc_atomic_fixed4_add_cpt_fp2504ATOMIC_CMPXCHG_CPT_MIX(fixed4u, kmp_uint32, add_cpt, 32, +, fp, _Quad, 4i, 3,25050) // __kmpc_atomic_fixed4u_add_cpt_fp2506ATOMIC_CMPXCHG_CPT_MIX(fixed4, kmp_int32, sub_cpt, 32, -, fp, _Quad, 4i, 3,25070) // __kmpc_atomic_fixed4_sub_cpt_fp2508ATOMIC_CMPXCHG_CPT_MIX(fixed4u, kmp_uint32, sub_cpt, 32, -, fp, _Quad, 4i, 3,25090) // __kmpc_atomic_fixed4u_sub_cpt_fp2510ATOMIC_CMPXCHG_CPT_MIX(fixed4, kmp_int32, mul_cpt, 32, *, fp, _Quad, 4i, 3,25110) // __kmpc_atomic_fixed4_mul_cpt_fp2512ATOMIC_CMPXCHG_CPT_MIX(fixed4u, kmp_uint32, mul_cpt, 32, *, fp, _Quad, 4i, 3,25130) // __kmpc_atomic_fixed4u_mul_cpt_fp2514ATOMIC_CMPXCHG_CPT_MIX(fixed4, kmp_int32, div_cpt, 32, /, fp, _Quad, 4i, 3,25150) // __kmpc_atomic_fixed4_div_cpt_fp2516ATOMIC_CMPXCHG_CPT_MIX(fixed4u, kmp_uint32, div_cpt, 32, /, fp, _Quad, 4i, 3,25170) // __kmpc_atomic_fixed4u_div_cpt_fp25182519ATOMIC_CMPXCHG_CPT_MIX(fixed8, kmp_int64, add_cpt, 64, +, fp, _Quad, 8i, 7,2520KMP_ARCH_X86) // __kmpc_atomic_fixed8_add_cpt_fp2521ATOMIC_CMPXCHG_CPT_MIX(fixed8u, kmp_uint64, add_cpt, 64, +, fp, _Quad, 8i, 7,2522KMP_ARCH_X86) // __kmpc_atomic_fixed8u_add_cpt_fp2523ATOMIC_CMPXCHG_CPT_MIX(fixed8, kmp_int64, sub_cpt, 64, -, fp, _Quad, 8i, 7,2524KMP_ARCH_X86) // __kmpc_atomic_fixed8_sub_cpt_fp2525ATOMIC_CMPXCHG_CPT_MIX(fixed8u, kmp_uint64, sub_cpt, 64, -, fp, _Quad, 8i, 7,2526KMP_ARCH_X86) // __kmpc_atomic_fixed8u_sub_cpt_fp2527ATOMIC_CMPXCHG_CPT_MIX(fixed8, kmp_int64, mul_cpt, 64, *, fp, _Quad, 8i, 7,2528KMP_ARCH_X86) // __kmpc_atomic_fixed8_mul_cpt_fp2529ATOMIC_CMPXCHG_CPT_MIX(fixed8u, kmp_uint64, mul_cpt, 64, *, fp, _Quad, 8i, 7,2530KMP_ARCH_X86) // __kmpc_atomic_fixed8u_mul_cpt_fp2531ATOMIC_CMPXCHG_CPT_MIX(fixed8, kmp_int64, div_cpt, 64, /, fp, _Quad, 8i, 7,2532KMP_ARCH_X86) // __kmpc_atomic_fixed8_div_cpt_fp2533ATOMIC_CMPXCHG_CPT_MIX(fixed8u, kmp_uint64, div_cpt, 64, /, fp, _Quad, 8i, 7,2534KMP_ARCH_X86) // __kmpc_atomic_fixed8u_div_cpt_fp25352536ATOMIC_CMPXCHG_CPT_MIX(float4, kmp_real32, add_cpt, 32, +, fp, _Quad, 4r, 3,2537KMP_ARCH_X86) // __kmpc_atomic_float4_add_cpt_fp2538ATOMIC_CMPXCHG_CPT_MIX(float4, kmp_real32, sub_cpt, 32, -, fp, _Quad, 4r, 3,2539KMP_ARCH_X86) // __kmpc_atomic_float4_sub_cpt_fp2540ATOMIC_CMPXCHG_CPT_MIX(float4, kmp_real32, mul_cpt, 32, *, fp, _Quad, 4r, 3,2541KMP_ARCH_X86) // __kmpc_atomic_float4_mul_cpt_fp2542ATOMIC_CMPXCHG_CPT_MIX(float4, kmp_real32, div_cpt, 32, /, fp, _Quad, 4r, 3,2543KMP_ARCH_X86) // __kmpc_atomic_float4_div_cpt_fp25442545ATOMIC_CMPXCHG_CPT_MIX(float8, kmp_real64, add_cpt, 64, +, fp, _Quad, 8r, 7,2546KMP_ARCH_X86) // __kmpc_atomic_float8_add_cpt_fp2547ATOMIC_CMPXCHG_CPT_MIX(float8, kmp_real64, sub_cpt, 64, -, fp, _Quad, 8r, 7,2548KMP_ARCH_X86) // __kmpc_atomic_float8_sub_cpt_fp2549ATOMIC_CMPXCHG_CPT_MIX(float8, kmp_real64, mul_cpt, 64, *, fp, _Quad, 8r, 7,2550KMP_ARCH_X86) // __kmpc_atomic_float8_mul_cpt_fp2551ATOMIC_CMPXCHG_CPT_MIX(float8, kmp_real64, div_cpt, 64, /, fp, _Quad, 8r, 7,2552KMP_ARCH_X86) // __kmpc_atomic_float8_div_cpt_fp25532554ATOMIC_CRITICAL_CPT_MIX(float10, long double, add_cpt, +, fp, _Quad, 10r,25551) // __kmpc_atomic_float10_add_cpt_fp2556ATOMIC_CRITICAL_CPT_MIX(float10, long double, sub_cpt, -, fp, _Quad, 10r,25571) // __kmpc_atomic_float10_sub_cpt_fp2558ATOMIC_CRITICAL_CPT_MIX(float10, long double, mul_cpt, *, fp, _Quad, 10r,25591) // __kmpc_atomic_float10_mul_cpt_fp2560ATOMIC_CRITICAL_CPT_MIX(float10, long double, div_cpt, /, fp, _Quad, 10r,25611) // __kmpc_atomic_float10_div_cpt_fp25622563#endif // KMP_HAVE_QUAD25642565// ------------------------------------------------------------------------2566// Routines for C/C++ Reduction operators && and ||25672568// -------------------------------------------------------------------------2569// Operation on *lhs, rhs bound by critical section2570// OP - operator (it's supposed to contain an assignment)2571// LCK_ID - lock identifier2572// Note: don't check gtid as it should always be valid2573// 1, 2-byte - expect valid parameter, other - check before this macro2574#define OP_CRITICAL_L_CPT(OP, LCK_ID) \2575__kmp_acquire_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \2576\2577if (flag) { \2578new_value OP rhs; \2579(*lhs) = new_value; \2580} else { \2581new_value = (*lhs); \2582(*lhs) OP rhs; \2583} \2584\2585__kmp_release_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid);25862587// ------------------------------------------------------------------------2588#ifdef KMP_GOMP_COMPAT2589#define OP_GOMP_CRITICAL_L_CPT(OP, FLAG) \2590if ((FLAG) && (__kmp_atomic_mode == 2)) { \2591KMP_CHECK_GTID; \2592OP_CRITICAL_L_CPT(OP, 0); \2593return new_value; \2594}2595#else2596#define OP_GOMP_CRITICAL_L_CPT(OP, FLAG)2597#endif /* KMP_GOMP_COMPAT */25982599// ------------------------------------------------------------------------2600// Need separate macros for &&, || because there is no combined assignment2601#define ATOMIC_CMPX_L_CPT(TYPE_ID, OP_ID, TYPE, BITS, OP, GOMP_FLAG) \2602ATOMIC_BEGIN_CPT(TYPE_ID, OP_ID, TYPE, TYPE) \2603TYPE new_value; \2604(void)new_value; \2605OP_GOMP_CRITICAL_L_CPT(= *lhs OP, GOMP_FLAG) \2606OP_CMPXCHG_CPT(TYPE, BITS, OP) \2607}26082609ATOMIC_CMPX_L_CPT(fixed1, andl_cpt, char, 8, &&,2610KMP_ARCH_X86) // __kmpc_atomic_fixed1_andl_cpt2611ATOMIC_CMPX_L_CPT(fixed1, orl_cpt, char, 8, ||,2612KMP_ARCH_X86) // __kmpc_atomic_fixed1_orl_cpt2613ATOMIC_CMPX_L_CPT(fixed2, andl_cpt, short, 16, &&,2614KMP_ARCH_X86) // __kmpc_atomic_fixed2_andl_cpt2615ATOMIC_CMPX_L_CPT(fixed2, orl_cpt, short, 16, ||,2616KMP_ARCH_X86) // __kmpc_atomic_fixed2_orl_cpt2617ATOMIC_CMPX_L_CPT(fixed4, andl_cpt, kmp_int32, 32, &&,26180) // __kmpc_atomic_fixed4_andl_cpt2619ATOMIC_CMPX_L_CPT(fixed4, orl_cpt, kmp_int32, 32, ||,26200) // __kmpc_atomic_fixed4_orl_cpt2621ATOMIC_CMPX_L_CPT(fixed8, andl_cpt, kmp_int64, 64, &&,2622KMP_ARCH_X86) // __kmpc_atomic_fixed8_andl_cpt2623ATOMIC_CMPX_L_CPT(fixed8, orl_cpt, kmp_int64, 64, ||,2624KMP_ARCH_X86) // __kmpc_atomic_fixed8_orl_cpt26252626// -------------------------------------------------------------------------2627// Routines for Fortran operators that matched no one in C:2628// MAX, MIN, .EQV., .NEQV.2629// Operators .AND., .OR. are covered by __kmpc_atomic_*_{andl,orl}_cpt2630// Intrinsics IAND, IOR, IEOR are covered by __kmpc_atomic_*_{andb,orb,xor}_cpt26312632// -------------------------------------------------------------------------2633// MIN and MAX need separate macros2634// OP - operator to check if we need any actions?2635#define MIN_MAX_CRITSECT_CPT(OP, LCK_ID) \2636__kmp_acquire_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \2637\2638if (*lhs OP rhs) { /* still need actions? */ \2639old_value = *lhs; \2640*lhs = rhs; \2641if (flag) \2642new_value = rhs; \2643else \2644new_value = old_value; \2645} else { \2646new_value = *lhs; \2647} \2648__kmp_release_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \2649return new_value;26502651// -------------------------------------------------------------------------2652#ifdef KMP_GOMP_COMPAT2653#define GOMP_MIN_MAX_CRITSECT_CPT(OP, FLAG) \2654if ((FLAG) && (__kmp_atomic_mode == 2)) { \2655KMP_CHECK_GTID; \2656MIN_MAX_CRITSECT_CPT(OP, 0); \2657}2658#else2659#define GOMP_MIN_MAX_CRITSECT_CPT(OP, FLAG)2660#endif /* KMP_GOMP_COMPAT */26612662// -------------------------------------------------------------------------2663#define MIN_MAX_CMPXCHG_CPT(TYPE, BITS, OP) \2664{ \2665TYPE KMP_ATOMIC_VOLATILE temp_val; \2666/*TYPE old_value; */ \2667temp_val = *lhs; \2668old_value = temp_val; \2669while (old_value OP rhs && /* still need actions? */ \2670!KMP_COMPARE_AND_STORE_ACQ##BITS( \2671(kmp_int##BITS *)lhs, \2672*VOLATILE_CAST(kmp_int##BITS *) & old_value, \2673*VOLATILE_CAST(kmp_int##BITS *) & rhs)) { \2674temp_val = *lhs; \2675old_value = temp_val; \2676} \2677if (flag) \2678return rhs; \2679else \2680return old_value; \2681}26822683// -------------------------------------------------------------------------2684// 1-byte, 2-byte operands - use critical section2685#define MIN_MAX_CRITICAL_CPT(TYPE_ID, OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG) \2686ATOMIC_BEGIN_CPT(TYPE_ID, OP_ID, TYPE, TYPE) \2687TYPE new_value, old_value; \2688if (*lhs OP rhs) { /* need actions? */ \2689GOMP_MIN_MAX_CRITSECT_CPT(OP, GOMP_FLAG) \2690MIN_MAX_CRITSECT_CPT(OP, LCK_ID) \2691} \2692return *lhs; \2693}26942695#define MIN_MAX_COMPXCHG_CPT(TYPE_ID, OP_ID, TYPE, BITS, OP, GOMP_FLAG) \2696ATOMIC_BEGIN_CPT(TYPE_ID, OP_ID, TYPE, TYPE) \2697TYPE new_value, old_value; \2698(void)new_value; \2699if (*lhs OP rhs) { \2700GOMP_MIN_MAX_CRITSECT_CPT(OP, GOMP_FLAG) \2701MIN_MAX_CMPXCHG_CPT(TYPE, BITS, OP) \2702} \2703return *lhs; \2704}27052706MIN_MAX_COMPXCHG_CPT(fixed1, max_cpt, char, 8, <,2707KMP_ARCH_X86) // __kmpc_atomic_fixed1_max_cpt2708MIN_MAX_COMPXCHG_CPT(fixed1, min_cpt, char, 8, >,2709KMP_ARCH_X86) // __kmpc_atomic_fixed1_min_cpt2710MIN_MAX_COMPXCHG_CPT(fixed2, max_cpt, short, 16, <,2711KMP_ARCH_X86) // __kmpc_atomic_fixed2_max_cpt2712MIN_MAX_COMPXCHG_CPT(fixed2, min_cpt, short, 16, >,2713KMP_ARCH_X86) // __kmpc_atomic_fixed2_min_cpt2714MIN_MAX_COMPXCHG_CPT(fixed4, max_cpt, kmp_int32, 32, <,27150) // __kmpc_atomic_fixed4_max_cpt2716MIN_MAX_COMPXCHG_CPT(fixed4, min_cpt, kmp_int32, 32, >,27170) // __kmpc_atomic_fixed4_min_cpt2718MIN_MAX_COMPXCHG_CPT(fixed8, max_cpt, kmp_int64, 64, <,2719KMP_ARCH_X86) // __kmpc_atomic_fixed8_max_cpt2720MIN_MAX_COMPXCHG_CPT(fixed8, min_cpt, kmp_int64, 64, >,2721KMP_ARCH_X86) // __kmpc_atomic_fixed8_min_cpt2722MIN_MAX_COMPXCHG_CPT(float4, max_cpt, kmp_real32, 32, <,2723KMP_ARCH_X86) // __kmpc_atomic_float4_max_cpt2724MIN_MAX_COMPXCHG_CPT(float4, min_cpt, kmp_real32, 32, >,2725KMP_ARCH_X86) // __kmpc_atomic_float4_min_cpt2726MIN_MAX_COMPXCHG_CPT(float8, max_cpt, kmp_real64, 64, <,2727KMP_ARCH_X86) // __kmpc_atomic_float8_max_cpt2728MIN_MAX_COMPXCHG_CPT(float8, min_cpt, kmp_real64, 64, >,2729KMP_ARCH_X86) // __kmpc_atomic_float8_min_cpt2730MIN_MAX_CRITICAL_CPT(float10, max_cpt, long double, <, 10r,27311) // __kmpc_atomic_float10_max_cpt2732MIN_MAX_CRITICAL_CPT(float10, min_cpt, long double, >, 10r,27331) // __kmpc_atomic_float10_min_cpt2734#if KMP_HAVE_QUAD2735MIN_MAX_CRITICAL_CPT(float16, max_cpt, QUAD_LEGACY, <, 16r,27361) // __kmpc_atomic_float16_max_cpt2737MIN_MAX_CRITICAL_CPT(float16, min_cpt, QUAD_LEGACY, >, 16r,27381) // __kmpc_atomic_float16_min_cpt2739#if (KMP_ARCH_X86)2740MIN_MAX_CRITICAL_CPT(float16, max_a16_cpt, Quad_a16_t, <, 16r,27411) // __kmpc_atomic_float16_max_a16_cpt2742MIN_MAX_CRITICAL_CPT(float16, min_a16_cpt, Quad_a16_t, >, 16r,27431) // __kmpc_atomic_float16_mix_a16_cpt2744#endif // (KMP_ARCH_X86)2745#endif // KMP_HAVE_QUAD27462747// ------------------------------------------------------------------------2748#ifdef KMP_GOMP_COMPAT2749#define OP_GOMP_CRITICAL_EQV_CPT(OP, FLAG) \2750if ((FLAG) && (__kmp_atomic_mode == 2)) { \2751KMP_CHECK_GTID; \2752OP_CRITICAL_CPT(OP, 0); \2753}2754#else2755#define OP_GOMP_CRITICAL_EQV_CPT(OP, FLAG)2756#endif /* KMP_GOMP_COMPAT */2757// ------------------------------------------------------------------------2758#define ATOMIC_CMPX_EQV_CPT(TYPE_ID, OP_ID, TYPE, BITS, OP, GOMP_FLAG) \2759ATOMIC_BEGIN_CPT(TYPE_ID, OP_ID, TYPE, TYPE) \2760TYPE new_value; \2761(void)new_value; \2762OP_GOMP_CRITICAL_EQV_CPT(^= (TYPE) ~, GOMP_FLAG) /* send assignment */ \2763OP_CMPXCHG_CPT(TYPE, BITS, OP) \2764}27652766// ------------------------------------------------------------------------27672768ATOMIC_CMPXCHG_CPT(fixed1, neqv_cpt, kmp_int8, 8, ^,2769KMP_ARCH_X86) // __kmpc_atomic_fixed1_neqv_cpt2770ATOMIC_CMPXCHG_CPT(fixed2, neqv_cpt, kmp_int16, 16, ^,2771KMP_ARCH_X86) // __kmpc_atomic_fixed2_neqv_cpt2772ATOMIC_CMPXCHG_CPT(fixed4, neqv_cpt, kmp_int32, 32, ^,2773KMP_ARCH_X86) // __kmpc_atomic_fixed4_neqv_cpt2774ATOMIC_CMPXCHG_CPT(fixed8, neqv_cpt, kmp_int64, 64, ^,2775KMP_ARCH_X86) // __kmpc_atomic_fixed8_neqv_cpt2776ATOMIC_CMPX_EQV_CPT(fixed1, eqv_cpt, kmp_int8, 8, ^~,2777KMP_ARCH_X86) // __kmpc_atomic_fixed1_eqv_cpt2778ATOMIC_CMPX_EQV_CPT(fixed2, eqv_cpt, kmp_int16, 16, ^~,2779KMP_ARCH_X86) // __kmpc_atomic_fixed2_eqv_cpt2780ATOMIC_CMPX_EQV_CPT(fixed4, eqv_cpt, kmp_int32, 32, ^~,2781KMP_ARCH_X86) // __kmpc_atomic_fixed4_eqv_cpt2782ATOMIC_CMPX_EQV_CPT(fixed8, eqv_cpt, kmp_int64, 64, ^~,2783KMP_ARCH_X86) // __kmpc_atomic_fixed8_eqv_cpt27842785// ------------------------------------------------------------------------2786// Routines for Extended types: long double, _Quad, complex flavours (use2787// critical section)2788// TYPE_ID, OP_ID, TYPE - detailed above2789// OP - operator2790// LCK_ID - lock identifier, used to possibly distinguish lock variable2791#define ATOMIC_CRITICAL_CPT(TYPE_ID, OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG) \2792ATOMIC_BEGIN_CPT(TYPE_ID, OP_ID, TYPE, TYPE) \2793TYPE new_value; \2794OP_GOMP_CRITICAL_CPT(TYPE, OP, GOMP_FLAG) /* send assignment */ \2795OP_UPDATE_CRITICAL_CPT(TYPE, OP, LCK_ID) /* send assignment */ \2796}27972798// ------------------------------------------------------------------------2799// Workaround for cmplx4. Regular routines with return value don't work2800// on Win_32e. Let's return captured values through the additional parameter.2801#define OP_CRITICAL_CPT_WRK(OP, LCK_ID) \2802__kmp_acquire_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \2803\2804if (flag) { \2805(*lhs) OP rhs; \2806(*out) = (*lhs); \2807} else { \2808(*out) = (*lhs); \2809(*lhs) OP rhs; \2810} \2811\2812__kmp_release_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \2813return;2814// ------------------------------------------------------------------------28152816#ifdef KMP_GOMP_COMPAT2817#define OP_GOMP_CRITICAL_CPT_WRK(OP, FLAG) \2818if ((FLAG) && (__kmp_atomic_mode == 2)) { \2819KMP_CHECK_GTID; \2820OP_CRITICAL_CPT_WRK(OP## =, 0); \2821}2822#else2823#define OP_GOMP_CRITICAL_CPT_WRK(OP, FLAG)2824#endif /* KMP_GOMP_COMPAT */2825// ------------------------------------------------------------------------28262827#define ATOMIC_BEGIN_WRK(TYPE_ID, OP_ID, TYPE) \2828void __kmpc_atomic_##TYPE_ID##_##OP_ID(ident_t *id_ref, int gtid, TYPE *lhs, \2829TYPE rhs, TYPE *out, int flag) { \2830KMP_DEBUG_ASSERT(__kmp_init_serial); \2831KA_TRACE(100, ("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid));2832// ------------------------------------------------------------------------28332834#define ATOMIC_CRITICAL_CPT_WRK(TYPE_ID, OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG) \2835ATOMIC_BEGIN_WRK(TYPE_ID, OP_ID, TYPE) \2836OP_GOMP_CRITICAL_CPT_WRK(OP, GOMP_FLAG) \2837OP_CRITICAL_CPT_WRK(OP## =, LCK_ID) \2838}2839// The end of workaround for cmplx428402841/* ------------------------------------------------------------------------- */2842// routines for long double type2843ATOMIC_CRITICAL_CPT(float10, add_cpt, long double, +, 10r,28441) // __kmpc_atomic_float10_add_cpt2845ATOMIC_CRITICAL_CPT(float10, sub_cpt, long double, -, 10r,28461) // __kmpc_atomic_float10_sub_cpt2847ATOMIC_CRITICAL_CPT(float10, mul_cpt, long double, *, 10r,28481) // __kmpc_atomic_float10_mul_cpt2849ATOMIC_CRITICAL_CPT(float10, div_cpt, long double, /, 10r,28501) // __kmpc_atomic_float10_div_cpt2851#if KMP_HAVE_QUAD2852// routines for _Quad type2853ATOMIC_CRITICAL_CPT(float16, add_cpt, QUAD_LEGACY, +, 16r,28541) // __kmpc_atomic_float16_add_cpt2855ATOMIC_CRITICAL_CPT(float16, sub_cpt, QUAD_LEGACY, -, 16r,28561) // __kmpc_atomic_float16_sub_cpt2857ATOMIC_CRITICAL_CPT(float16, mul_cpt, QUAD_LEGACY, *, 16r,28581) // __kmpc_atomic_float16_mul_cpt2859ATOMIC_CRITICAL_CPT(float16, div_cpt, QUAD_LEGACY, /, 16r,28601) // __kmpc_atomic_float16_div_cpt2861#if (KMP_ARCH_X86)2862ATOMIC_CRITICAL_CPT(float16, add_a16_cpt, Quad_a16_t, +, 16r,28631) // __kmpc_atomic_float16_add_a16_cpt2864ATOMIC_CRITICAL_CPT(float16, sub_a16_cpt, Quad_a16_t, -, 16r,28651) // __kmpc_atomic_float16_sub_a16_cpt2866ATOMIC_CRITICAL_CPT(float16, mul_a16_cpt, Quad_a16_t, *, 16r,28671) // __kmpc_atomic_float16_mul_a16_cpt2868ATOMIC_CRITICAL_CPT(float16, div_a16_cpt, Quad_a16_t, /, 16r,28691) // __kmpc_atomic_float16_div_a16_cpt2870#endif // (KMP_ARCH_X86)2871#endif // KMP_HAVE_QUAD28722873// routines for complex types28742875// cmplx4 routines to return void2876ATOMIC_CRITICAL_CPT_WRK(cmplx4, add_cpt, kmp_cmplx32, +, 8c,28771) // __kmpc_atomic_cmplx4_add_cpt2878ATOMIC_CRITICAL_CPT_WRK(cmplx4, sub_cpt, kmp_cmplx32, -, 8c,28791) // __kmpc_atomic_cmplx4_sub_cpt2880ATOMIC_CRITICAL_CPT_WRK(cmplx4, mul_cpt, kmp_cmplx32, *, 8c,28811) // __kmpc_atomic_cmplx4_mul_cpt2882ATOMIC_CRITICAL_CPT_WRK(cmplx4, div_cpt, kmp_cmplx32, /, 8c,28831) // __kmpc_atomic_cmplx4_div_cpt28842885ATOMIC_CRITICAL_CPT(cmplx8, add_cpt, kmp_cmplx64, +, 16c,28861) // __kmpc_atomic_cmplx8_add_cpt2887ATOMIC_CRITICAL_CPT(cmplx8, sub_cpt, kmp_cmplx64, -, 16c,28881) // __kmpc_atomic_cmplx8_sub_cpt2889ATOMIC_CRITICAL_CPT(cmplx8, mul_cpt, kmp_cmplx64, *, 16c,28901) // __kmpc_atomic_cmplx8_mul_cpt2891ATOMIC_CRITICAL_CPT(cmplx8, div_cpt, kmp_cmplx64, /, 16c,28921) // __kmpc_atomic_cmplx8_div_cpt2893ATOMIC_CRITICAL_CPT(cmplx10, add_cpt, kmp_cmplx80, +, 20c,28941) // __kmpc_atomic_cmplx10_add_cpt2895ATOMIC_CRITICAL_CPT(cmplx10, sub_cpt, kmp_cmplx80, -, 20c,28961) // __kmpc_atomic_cmplx10_sub_cpt2897ATOMIC_CRITICAL_CPT(cmplx10, mul_cpt, kmp_cmplx80, *, 20c,28981) // __kmpc_atomic_cmplx10_mul_cpt2899ATOMIC_CRITICAL_CPT(cmplx10, div_cpt, kmp_cmplx80, /, 20c,29001) // __kmpc_atomic_cmplx10_div_cpt2901#if KMP_HAVE_QUAD2902ATOMIC_CRITICAL_CPT(cmplx16, add_cpt, CPLX128_LEG, +, 32c,29031) // __kmpc_atomic_cmplx16_add_cpt2904ATOMIC_CRITICAL_CPT(cmplx16, sub_cpt, CPLX128_LEG, -, 32c,29051) // __kmpc_atomic_cmplx16_sub_cpt2906ATOMIC_CRITICAL_CPT(cmplx16, mul_cpt, CPLX128_LEG, *, 32c,29071) // __kmpc_atomic_cmplx16_mul_cpt2908ATOMIC_CRITICAL_CPT(cmplx16, div_cpt, CPLX128_LEG, /, 32c,29091) // __kmpc_atomic_cmplx16_div_cpt2910#if (KMP_ARCH_X86)2911ATOMIC_CRITICAL_CPT(cmplx16, add_a16_cpt, kmp_cmplx128_a16_t, +, 32c,29121) // __kmpc_atomic_cmplx16_add_a16_cpt2913ATOMIC_CRITICAL_CPT(cmplx16, sub_a16_cpt, kmp_cmplx128_a16_t, -, 32c,29141) // __kmpc_atomic_cmplx16_sub_a16_cpt2915ATOMIC_CRITICAL_CPT(cmplx16, mul_a16_cpt, kmp_cmplx128_a16_t, *, 32c,29161) // __kmpc_atomic_cmplx16_mul_a16_cpt2917ATOMIC_CRITICAL_CPT(cmplx16, div_a16_cpt, kmp_cmplx128_a16_t, /, 32c,29181) // __kmpc_atomic_cmplx16_div_a16_cpt2919#endif // (KMP_ARCH_X86)2920#endif // KMP_HAVE_QUAD29212922// OpenMP 4.0: v = x = expr binop x; { v = x; x = expr binop x; } { x = expr2923// binop x; v = x; } for non-commutative operations.2924// Supported only on IA-32 architecture and Intel(R) 6429252926#if KMP_ARCH_X86 || KMP_ARCH_X86_642927// -------------------------------------------------------------------------2928// Operation on *lhs, rhs bound by critical section2929// OP - operator (it's supposed to contain an assignment)2930// LCK_ID - lock identifier2931// Note: don't check gtid as it should always be valid2932// 1, 2-byte - expect valid parameter, other - check before this macro2933#define OP_CRITICAL_CPT_REV(TYPE, OP, LCK_ID) \2934__kmp_acquire_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \2935\2936if (flag) { \2937/*temp_val = (*lhs);*/ \2938(*lhs) = (TYPE)((rhs)OP(*lhs)); \2939new_value = (*lhs); \2940} else { \2941new_value = (*lhs); \2942(*lhs) = (TYPE)((rhs)OP(*lhs)); \2943} \2944__kmp_release_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \2945return new_value;29462947// ------------------------------------------------------------------------2948#ifdef KMP_GOMP_COMPAT2949#define OP_GOMP_CRITICAL_CPT_REV(TYPE, OP, FLAG) \2950if ((FLAG) && (__kmp_atomic_mode == 2)) { \2951KMP_CHECK_GTID; \2952OP_CRITICAL_CPT_REV(TYPE, OP, 0); \2953}2954#else2955#define OP_GOMP_CRITICAL_CPT_REV(TYPE, OP, FLAG)2956#endif /* KMP_GOMP_COMPAT */29572958// ------------------------------------------------------------------------2959// Operation on *lhs, rhs using "compare_and_store" routine2960// TYPE - operands' type2961// BITS - size in bits, used to distinguish low level calls2962// OP - operator2963// Note: temp_val introduced in order to force the compiler to read2964// *lhs only once (w/o it the compiler reads *lhs twice)2965#define OP_CMPXCHG_CPT_REV(TYPE, BITS, OP) \2966{ \2967TYPE KMP_ATOMIC_VOLATILE temp_val; \2968TYPE old_value, new_value; \2969temp_val = *lhs; \2970old_value = temp_val; \2971new_value = (TYPE)(rhs OP old_value); \2972while (!KMP_COMPARE_AND_STORE_ACQ##BITS( \2973(kmp_int##BITS *)lhs, *VOLATILE_CAST(kmp_int##BITS *) & old_value, \2974*VOLATILE_CAST(kmp_int##BITS *) & new_value)) { \2975temp_val = *lhs; \2976old_value = temp_val; \2977new_value = (TYPE)(rhs OP old_value); \2978} \2979if (flag) { \2980return new_value; \2981} else \2982return old_value; \2983}29842985// -------------------------------------------------------------------------2986#define ATOMIC_CMPXCHG_CPT_REV(TYPE_ID, OP_ID, TYPE, BITS, OP, GOMP_FLAG) \2987ATOMIC_BEGIN_CPT(TYPE_ID, OP_ID, TYPE, TYPE) \2988TYPE new_value; \2989(void)new_value; \2990OP_GOMP_CRITICAL_CPT_REV(TYPE, OP, GOMP_FLAG) \2991OP_CMPXCHG_CPT_REV(TYPE, BITS, OP) \2992}29932994ATOMIC_CMPXCHG_CPT_REV(fixed1, div_cpt_rev, kmp_int8, 8, /,2995KMP_ARCH_X86) // __kmpc_atomic_fixed1_div_cpt_rev2996ATOMIC_CMPXCHG_CPT_REV(fixed1u, div_cpt_rev, kmp_uint8, 8, /,2997KMP_ARCH_X86) // __kmpc_atomic_fixed1u_div_cpt_rev2998ATOMIC_CMPXCHG_CPT_REV(fixed1, shl_cpt_rev, kmp_int8, 8, <<,2999KMP_ARCH_X86) // __kmpc_atomic_fixed1_shl_cpt_rev3000ATOMIC_CMPXCHG_CPT_REV(fixed1, shr_cpt_rev, kmp_int8, 8, >>,3001KMP_ARCH_X86) // __kmpc_atomic_fixed1_shr_cpt_rev3002ATOMIC_CMPXCHG_CPT_REV(fixed1u, shr_cpt_rev, kmp_uint8, 8, >>,3003KMP_ARCH_X86) // __kmpc_atomic_fixed1u_shr_cpt_rev3004ATOMIC_CMPXCHG_CPT_REV(fixed1, sub_cpt_rev, kmp_int8, 8, -,3005KMP_ARCH_X86) // __kmpc_atomic_fixed1_sub_cpt_rev3006ATOMIC_CMPXCHG_CPT_REV(fixed2, div_cpt_rev, kmp_int16, 16, /,3007KMP_ARCH_X86) // __kmpc_atomic_fixed2_div_cpt_rev3008ATOMIC_CMPXCHG_CPT_REV(fixed2u, div_cpt_rev, kmp_uint16, 16, /,3009KMP_ARCH_X86) // __kmpc_atomic_fixed2u_div_cpt_rev3010ATOMIC_CMPXCHG_CPT_REV(fixed2, shl_cpt_rev, kmp_int16, 16, <<,3011KMP_ARCH_X86) // __kmpc_atomic_fixed2_shl_cpt_rev3012ATOMIC_CMPXCHG_CPT_REV(fixed2, shr_cpt_rev, kmp_int16, 16, >>,3013KMP_ARCH_X86) // __kmpc_atomic_fixed2_shr_cpt_rev3014ATOMIC_CMPXCHG_CPT_REV(fixed2u, shr_cpt_rev, kmp_uint16, 16, >>,3015KMP_ARCH_X86) // __kmpc_atomic_fixed2u_shr_cpt_rev3016ATOMIC_CMPXCHG_CPT_REV(fixed2, sub_cpt_rev, kmp_int16, 16, -,3017KMP_ARCH_X86) // __kmpc_atomic_fixed2_sub_cpt_rev3018ATOMIC_CMPXCHG_CPT_REV(fixed4, div_cpt_rev, kmp_int32, 32, /,3019KMP_ARCH_X86) // __kmpc_atomic_fixed4_div_cpt_rev3020ATOMIC_CMPXCHG_CPT_REV(fixed4u, div_cpt_rev, kmp_uint32, 32, /,3021KMP_ARCH_X86) // __kmpc_atomic_fixed4u_div_cpt_rev3022ATOMIC_CMPXCHG_CPT_REV(fixed4, shl_cpt_rev, kmp_int32, 32, <<,3023KMP_ARCH_X86) // __kmpc_atomic_fixed4_shl_cpt_rev3024ATOMIC_CMPXCHG_CPT_REV(fixed4, shr_cpt_rev, kmp_int32, 32, >>,3025KMP_ARCH_X86) // __kmpc_atomic_fixed4_shr_cpt_rev3026ATOMIC_CMPXCHG_CPT_REV(fixed4u, shr_cpt_rev, kmp_uint32, 32, >>,3027KMP_ARCH_X86) // __kmpc_atomic_fixed4u_shr_cpt_rev3028ATOMIC_CMPXCHG_CPT_REV(fixed4, sub_cpt_rev, kmp_int32, 32, -,3029KMP_ARCH_X86) // __kmpc_atomic_fixed4_sub_cpt_rev3030ATOMIC_CMPXCHG_CPT_REV(fixed8, div_cpt_rev, kmp_int64, 64, /,3031KMP_ARCH_X86) // __kmpc_atomic_fixed8_div_cpt_rev3032ATOMIC_CMPXCHG_CPT_REV(fixed8u, div_cpt_rev, kmp_uint64, 64, /,3033KMP_ARCH_X86) // __kmpc_atomic_fixed8u_div_cpt_rev3034ATOMIC_CMPXCHG_CPT_REV(fixed8, shl_cpt_rev, kmp_int64, 64, <<,3035KMP_ARCH_X86) // __kmpc_atomic_fixed8_shl_cpt_rev3036ATOMIC_CMPXCHG_CPT_REV(fixed8, shr_cpt_rev, kmp_int64, 64, >>,3037KMP_ARCH_X86) // __kmpc_atomic_fixed8_shr_cpt_rev3038ATOMIC_CMPXCHG_CPT_REV(fixed8u, shr_cpt_rev, kmp_uint64, 64, >>,3039KMP_ARCH_X86) // __kmpc_atomic_fixed8u_shr_cpt_rev3040ATOMIC_CMPXCHG_CPT_REV(fixed8, sub_cpt_rev, kmp_int64, 64, -,3041KMP_ARCH_X86) // __kmpc_atomic_fixed8_sub_cpt_rev3042ATOMIC_CMPXCHG_CPT_REV(float4, div_cpt_rev, kmp_real32, 32, /,3043KMP_ARCH_X86) // __kmpc_atomic_float4_div_cpt_rev3044ATOMIC_CMPXCHG_CPT_REV(float4, sub_cpt_rev, kmp_real32, 32, -,3045KMP_ARCH_X86) // __kmpc_atomic_float4_sub_cpt_rev3046ATOMIC_CMPXCHG_CPT_REV(float8, div_cpt_rev, kmp_real64, 64, /,3047KMP_ARCH_X86) // __kmpc_atomic_float8_div_cpt_rev3048ATOMIC_CMPXCHG_CPT_REV(float8, sub_cpt_rev, kmp_real64, 64, -,3049KMP_ARCH_X86) // __kmpc_atomic_float8_sub_cpt_rev3050// TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG30513052// ------------------------------------------------------------------------3053// Routines for Extended types: long double, _Quad, complex flavours (use3054// critical section)3055// TYPE_ID, OP_ID, TYPE - detailed above3056// OP - operator3057// LCK_ID - lock identifier, used to possibly distinguish lock variable3058#define ATOMIC_CRITICAL_CPT_REV(TYPE_ID, OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG) \3059ATOMIC_BEGIN_CPT(TYPE_ID, OP_ID, TYPE, TYPE) \3060TYPE new_value; \3061/*printf("__kmp_atomic_mode = %d\n", __kmp_atomic_mode);*/ \3062OP_GOMP_CRITICAL_CPT_REV(TYPE, OP, GOMP_FLAG) \3063OP_CRITICAL_CPT_REV(TYPE, OP, LCK_ID) \3064}30653066/* ------------------------------------------------------------------------- */3067// routines for long double type3068ATOMIC_CRITICAL_CPT_REV(float10, sub_cpt_rev, long double, -, 10r,30691) // __kmpc_atomic_float10_sub_cpt_rev3070ATOMIC_CRITICAL_CPT_REV(float10, div_cpt_rev, long double, /, 10r,30711) // __kmpc_atomic_float10_div_cpt_rev3072#if KMP_HAVE_QUAD3073// routines for _Quad type3074ATOMIC_CRITICAL_CPT_REV(float16, sub_cpt_rev, QUAD_LEGACY, -, 16r,30751) // __kmpc_atomic_float16_sub_cpt_rev3076ATOMIC_CRITICAL_CPT_REV(float16, div_cpt_rev, QUAD_LEGACY, /, 16r,30771) // __kmpc_atomic_float16_div_cpt_rev3078#if (KMP_ARCH_X86)3079ATOMIC_CRITICAL_CPT_REV(float16, sub_a16_cpt_rev, Quad_a16_t, -, 16r,30801) // __kmpc_atomic_float16_sub_a16_cpt_rev3081ATOMIC_CRITICAL_CPT_REV(float16, div_a16_cpt_rev, Quad_a16_t, /, 16r,30821) // __kmpc_atomic_float16_div_a16_cpt_rev3083#endif // (KMP_ARCH_X86)3084#endif // KMP_HAVE_QUAD30853086// routines for complex types30873088// ------------------------------------------------------------------------3089// Workaround for cmplx4. Regular routines with return value don't work3090// on Win_32e. Let's return captured values through the additional parameter.3091#define OP_CRITICAL_CPT_REV_WRK(OP, LCK_ID) \3092__kmp_acquire_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \3093\3094if (flag) { \3095(*lhs) = (rhs)OP(*lhs); \3096(*out) = (*lhs); \3097} else { \3098(*out) = (*lhs); \3099(*lhs) = (rhs)OP(*lhs); \3100} \3101\3102__kmp_release_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \3103return;3104// ------------------------------------------------------------------------31053106#ifdef KMP_GOMP_COMPAT3107#define OP_GOMP_CRITICAL_CPT_REV_WRK(OP, FLAG) \3108if ((FLAG) && (__kmp_atomic_mode == 2)) { \3109KMP_CHECK_GTID; \3110OP_CRITICAL_CPT_REV_WRK(OP, 0); \3111}3112#else3113#define OP_GOMP_CRITICAL_CPT_REV_WRK(OP, FLAG)3114#endif /* KMP_GOMP_COMPAT */3115// ------------------------------------------------------------------------31163117#define ATOMIC_CRITICAL_CPT_REV_WRK(TYPE_ID, OP_ID, TYPE, OP, LCK_ID, \3118GOMP_FLAG) \3119ATOMIC_BEGIN_WRK(TYPE_ID, OP_ID, TYPE) \3120OP_GOMP_CRITICAL_CPT_REV_WRK(OP, GOMP_FLAG) \3121OP_CRITICAL_CPT_REV_WRK(OP, LCK_ID) \3122}3123// The end of workaround for cmplx431243125// !!! TODO: check if we need to return void for cmplx4 routines3126// cmplx4 routines to return void3127ATOMIC_CRITICAL_CPT_REV_WRK(cmplx4, sub_cpt_rev, kmp_cmplx32, -, 8c,31281) // __kmpc_atomic_cmplx4_sub_cpt_rev3129ATOMIC_CRITICAL_CPT_REV_WRK(cmplx4, div_cpt_rev, kmp_cmplx32, /, 8c,31301) // __kmpc_atomic_cmplx4_div_cpt_rev31313132ATOMIC_CRITICAL_CPT_REV(cmplx8, sub_cpt_rev, kmp_cmplx64, -, 16c,31331) // __kmpc_atomic_cmplx8_sub_cpt_rev3134ATOMIC_CRITICAL_CPT_REV(cmplx8, div_cpt_rev, kmp_cmplx64, /, 16c,31351) // __kmpc_atomic_cmplx8_div_cpt_rev3136ATOMIC_CRITICAL_CPT_REV(cmplx10, sub_cpt_rev, kmp_cmplx80, -, 20c,31371) // __kmpc_atomic_cmplx10_sub_cpt_rev3138ATOMIC_CRITICAL_CPT_REV(cmplx10, div_cpt_rev, kmp_cmplx80, /, 20c,31391) // __kmpc_atomic_cmplx10_div_cpt_rev3140#if KMP_HAVE_QUAD3141ATOMIC_CRITICAL_CPT_REV(cmplx16, sub_cpt_rev, CPLX128_LEG, -, 32c,31421) // __kmpc_atomic_cmplx16_sub_cpt_rev3143ATOMIC_CRITICAL_CPT_REV(cmplx16, div_cpt_rev, CPLX128_LEG, /, 32c,31441) // __kmpc_atomic_cmplx16_div_cpt_rev3145#if (KMP_ARCH_X86)3146ATOMIC_CRITICAL_CPT_REV(cmplx16, sub_a16_cpt_rev, kmp_cmplx128_a16_t, -, 32c,31471) // __kmpc_atomic_cmplx16_sub_a16_cpt_rev3148ATOMIC_CRITICAL_CPT_REV(cmplx16, div_a16_cpt_rev, kmp_cmplx128_a16_t, /, 32c,31491) // __kmpc_atomic_cmplx16_div_a16_cpt_rev3150#endif // (KMP_ARCH_X86)3151#endif // KMP_HAVE_QUAD31523153// Capture reverse for mixed type: RHS=float163154#if KMP_HAVE_QUAD31553156// Beginning of a definition (provides name, parameters, gebug trace)3157// TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned3158// fixed)3159// OP_ID - operation identifier (add, sub, mul, ...)3160// TYPE - operands' type3161// -------------------------------------------------------------------------3162#define ATOMIC_CMPXCHG_CPT_REV_MIX(TYPE_ID, TYPE, OP_ID, BITS, OP, RTYPE_ID, \3163RTYPE, LCK_ID, MASK, GOMP_FLAG) \3164ATOMIC_BEGIN_CPT_MIX(TYPE_ID, OP_ID, TYPE, RTYPE_ID, RTYPE) \3165TYPE new_value; \3166(void)new_value; \3167OP_GOMP_CRITICAL_CPT_REV(TYPE, OP, GOMP_FLAG) \3168OP_CMPXCHG_CPT_REV(TYPE, BITS, OP) \3169}31703171// -------------------------------------------------------------------------3172#define ATOMIC_CRITICAL_CPT_REV_MIX(TYPE_ID, TYPE, OP_ID, OP, RTYPE_ID, RTYPE, \3173LCK_ID, GOMP_FLAG) \3174ATOMIC_BEGIN_CPT_MIX(TYPE_ID, OP_ID, TYPE, RTYPE_ID, RTYPE) \3175TYPE new_value; \3176(void)new_value; \3177OP_GOMP_CRITICAL_CPT_REV(TYPE, OP, GOMP_FLAG) /* send assignment */ \3178OP_CRITICAL_CPT_REV(TYPE, OP, LCK_ID) /* send assignment */ \3179}31803181ATOMIC_CMPXCHG_CPT_REV_MIX(fixed1, char, sub_cpt_rev, 8, -, fp, _Quad, 1i, 0,3182KMP_ARCH_X86) // __kmpc_atomic_fixed1_sub_cpt_rev_fp3183ATOMIC_CMPXCHG_CPT_REV_MIX(fixed1u, uchar, sub_cpt_rev, 8, -, fp, _Quad, 1i, 0,3184KMP_ARCH_X86) // __kmpc_atomic_fixed1u_sub_cpt_rev_fp3185ATOMIC_CMPXCHG_CPT_REV_MIX(fixed1, char, div_cpt_rev, 8, /, fp, _Quad, 1i, 0,3186KMP_ARCH_X86) // __kmpc_atomic_fixed1_div_cpt_rev_fp3187ATOMIC_CMPXCHG_CPT_REV_MIX(fixed1u, uchar, div_cpt_rev, 8, /, fp, _Quad, 1i, 0,3188KMP_ARCH_X86) // __kmpc_atomic_fixed1u_div_cpt_rev_fp31893190ATOMIC_CMPXCHG_CPT_REV_MIX(fixed2, short, sub_cpt_rev, 16, -, fp, _Quad, 2i, 1,3191KMP_ARCH_X86) // __kmpc_atomic_fixed2_sub_cpt_rev_fp3192ATOMIC_CMPXCHG_CPT_REV_MIX(fixed2u, ushort, sub_cpt_rev, 16, -, fp, _Quad, 2i,31931,3194KMP_ARCH_X86) // __kmpc_atomic_fixed2u_sub_cpt_rev_fp3195ATOMIC_CMPXCHG_CPT_REV_MIX(fixed2, short, div_cpt_rev, 16, /, fp, _Quad, 2i, 1,3196KMP_ARCH_X86) // __kmpc_atomic_fixed2_div_cpt_rev_fp3197ATOMIC_CMPXCHG_CPT_REV_MIX(fixed2u, ushort, div_cpt_rev, 16, /, fp, _Quad, 2i,31981,3199KMP_ARCH_X86) // __kmpc_atomic_fixed2u_div_cpt_rev_fp32003201ATOMIC_CMPXCHG_CPT_REV_MIX(fixed4, kmp_int32, sub_cpt_rev, 32, -, fp, _Quad, 4i,32023, 0) // __kmpc_atomic_fixed4_sub_cpt_rev_fp3203ATOMIC_CMPXCHG_CPT_REV_MIX(fixed4u, kmp_uint32, sub_cpt_rev, 32, -, fp, _Quad,32044i, 3, 0) // __kmpc_atomic_fixed4u_sub_cpt_rev_fp3205ATOMIC_CMPXCHG_CPT_REV_MIX(fixed4, kmp_int32, div_cpt_rev, 32, /, fp, _Quad, 4i,32063, 0) // __kmpc_atomic_fixed4_div_cpt_rev_fp3207ATOMIC_CMPXCHG_CPT_REV_MIX(fixed4u, kmp_uint32, div_cpt_rev, 32, /, fp, _Quad,32084i, 3, 0) // __kmpc_atomic_fixed4u_div_cpt_rev_fp32093210ATOMIC_CMPXCHG_CPT_REV_MIX(fixed8, kmp_int64, sub_cpt_rev, 64, -, fp, _Quad, 8i,32117,3212KMP_ARCH_X86) // __kmpc_atomic_fixed8_sub_cpt_rev_fp3213ATOMIC_CMPXCHG_CPT_REV_MIX(fixed8u, kmp_uint64, sub_cpt_rev, 64, -, fp, _Quad,32148i, 7,3215KMP_ARCH_X86) // __kmpc_atomic_fixed8u_sub_cpt_rev_fp3216ATOMIC_CMPXCHG_CPT_REV_MIX(fixed8, kmp_int64, div_cpt_rev, 64, /, fp, _Quad, 8i,32177,3218KMP_ARCH_X86) // __kmpc_atomic_fixed8_div_cpt_rev_fp3219ATOMIC_CMPXCHG_CPT_REV_MIX(fixed8u, kmp_uint64, div_cpt_rev, 64, /, fp, _Quad,32208i, 7,3221KMP_ARCH_X86) // __kmpc_atomic_fixed8u_div_cpt_rev_fp32223223ATOMIC_CMPXCHG_CPT_REV_MIX(float4, kmp_real32, sub_cpt_rev, 32, -, fp, _Quad,32244r, 3,3225KMP_ARCH_X86) // __kmpc_atomic_float4_sub_cpt_rev_fp3226ATOMIC_CMPXCHG_CPT_REV_MIX(float4, kmp_real32, div_cpt_rev, 32, /, fp, _Quad,32274r, 3,3228KMP_ARCH_X86) // __kmpc_atomic_float4_div_cpt_rev_fp32293230ATOMIC_CMPXCHG_CPT_REV_MIX(float8, kmp_real64, sub_cpt_rev, 64, -, fp, _Quad,32318r, 7,3232KMP_ARCH_X86) // __kmpc_atomic_float8_sub_cpt_rev_fp3233ATOMIC_CMPXCHG_CPT_REV_MIX(float8, kmp_real64, div_cpt_rev, 64, /, fp, _Quad,32348r, 7,3235KMP_ARCH_X86) // __kmpc_atomic_float8_div_cpt_rev_fp32363237ATOMIC_CRITICAL_CPT_REV_MIX(float10, long double, sub_cpt_rev, -, fp, _Quad,323810r, 1) // __kmpc_atomic_float10_sub_cpt_rev_fp3239ATOMIC_CRITICAL_CPT_REV_MIX(float10, long double, div_cpt_rev, /, fp, _Quad,324010r, 1) // __kmpc_atomic_float10_div_cpt_rev_fp32413242#endif // KMP_HAVE_QUAD32433244// OpenMP 4.0 Capture-write (swap): {v = x; x = expr;}32453246#define ATOMIC_BEGIN_SWP(TYPE_ID, TYPE) \3247TYPE __kmpc_atomic_##TYPE_ID##_swp(ident_t *id_ref, int gtid, TYPE *lhs, \3248TYPE rhs) { \3249KMP_DEBUG_ASSERT(__kmp_init_serial); \3250KA_TRACE(100, ("__kmpc_atomic_" #TYPE_ID "_swp: T#%d\n", gtid));32513252#define CRITICAL_SWP(LCK_ID) \3253__kmp_acquire_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \3254\3255old_value = (*lhs); \3256(*lhs) = rhs; \3257\3258__kmp_release_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \3259return old_value;32603261// ------------------------------------------------------------------------3262#ifdef KMP_GOMP_COMPAT3263#define GOMP_CRITICAL_SWP(FLAG) \3264if ((FLAG) && (__kmp_atomic_mode == 2)) { \3265KMP_CHECK_GTID; \3266CRITICAL_SWP(0); \3267}3268#else3269#define GOMP_CRITICAL_SWP(FLAG)3270#endif /* KMP_GOMP_COMPAT */32713272#define ATOMIC_XCHG_SWP(TYPE_ID, TYPE, BITS, GOMP_FLAG) \3273ATOMIC_BEGIN_SWP(TYPE_ID, TYPE) \3274TYPE old_value; \3275GOMP_CRITICAL_SWP(GOMP_FLAG) \3276old_value = KMP_XCHG_FIXED##BITS(lhs, rhs); \3277return old_value; \3278}3279// ------------------------------------------------------------------------3280#define ATOMIC_XCHG_FLOAT_SWP(TYPE_ID, TYPE, BITS, GOMP_FLAG) \3281ATOMIC_BEGIN_SWP(TYPE_ID, TYPE) \3282TYPE old_value; \3283GOMP_CRITICAL_SWP(GOMP_FLAG) \3284old_value = KMP_XCHG_REAL##BITS(lhs, rhs); \3285return old_value; \3286}32873288// ------------------------------------------------------------------------3289#define CMPXCHG_SWP(TYPE, BITS) \3290{ \3291TYPE KMP_ATOMIC_VOLATILE temp_val; \3292TYPE old_value, new_value; \3293temp_val = *lhs; \3294old_value = temp_val; \3295new_value = rhs; \3296while (!KMP_COMPARE_AND_STORE_ACQ##BITS( \3297(kmp_int##BITS *)lhs, *VOLATILE_CAST(kmp_int##BITS *) & old_value, \3298*VOLATILE_CAST(kmp_int##BITS *) & new_value)) { \3299temp_val = *lhs; \3300old_value = temp_val; \3301new_value = rhs; \3302} \3303return old_value; \3304}33053306// -------------------------------------------------------------------------3307#define ATOMIC_CMPXCHG_SWP(TYPE_ID, TYPE, BITS, GOMP_FLAG) \3308ATOMIC_BEGIN_SWP(TYPE_ID, TYPE) \3309TYPE old_value; \3310(void)old_value; \3311GOMP_CRITICAL_SWP(GOMP_FLAG) \3312CMPXCHG_SWP(TYPE, BITS) \3313}33143315ATOMIC_XCHG_SWP(fixed1, kmp_int8, 8, KMP_ARCH_X86) // __kmpc_atomic_fixed1_swp3316ATOMIC_XCHG_SWP(fixed2, kmp_int16, 16, KMP_ARCH_X86) // __kmpc_atomic_fixed2_swp3317ATOMIC_XCHG_SWP(fixed4, kmp_int32, 32, KMP_ARCH_X86) // __kmpc_atomic_fixed4_swp33183319ATOMIC_XCHG_FLOAT_SWP(float4, kmp_real32, 32,3320KMP_ARCH_X86) // __kmpc_atomic_float4_swp33213322#if (KMP_ARCH_X86)3323ATOMIC_CMPXCHG_SWP(fixed8, kmp_int64, 64,3324KMP_ARCH_X86) // __kmpc_atomic_fixed8_swp3325ATOMIC_CMPXCHG_SWP(float8, kmp_real64, 64,3326KMP_ARCH_X86) // __kmpc_atomic_float8_swp3327#else3328ATOMIC_XCHG_SWP(fixed8, kmp_int64, 64, KMP_ARCH_X86) // __kmpc_atomic_fixed8_swp3329ATOMIC_XCHG_FLOAT_SWP(float8, kmp_real64, 64,3330KMP_ARCH_X86) // __kmpc_atomic_float8_swp3331#endif // (KMP_ARCH_X86)33323333// ------------------------------------------------------------------------3334// Routines for Extended types: long double, _Quad, complex flavours (use3335// critical section)3336#define ATOMIC_CRITICAL_SWP(TYPE_ID, TYPE, LCK_ID, GOMP_FLAG) \3337ATOMIC_BEGIN_SWP(TYPE_ID, TYPE) \3338TYPE old_value; \3339GOMP_CRITICAL_SWP(GOMP_FLAG) \3340CRITICAL_SWP(LCK_ID) \3341}33423343// ------------------------------------------------------------------------3344// !!! TODO: check if we need to return void for cmplx4 routines3345// Workaround for cmplx4. Regular routines with return value don't work3346// on Win_32e. Let's return captured values through the additional parameter.33473348#define ATOMIC_BEGIN_SWP_WRK(TYPE_ID, TYPE) \3349void __kmpc_atomic_##TYPE_ID##_swp(ident_t *id_ref, int gtid, TYPE *lhs, \3350TYPE rhs, TYPE *out) { \3351KMP_DEBUG_ASSERT(__kmp_init_serial); \3352KA_TRACE(100, ("__kmpc_atomic_" #TYPE_ID "_swp: T#%d\n", gtid));33533354#define CRITICAL_SWP_WRK(LCK_ID) \3355__kmp_acquire_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \3356\3357tmp = (*lhs); \3358(*lhs) = (rhs); \3359(*out) = tmp; \3360__kmp_release_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \3361return;3362// ------------------------------------------------------------------------33633364#ifdef KMP_GOMP_COMPAT3365#define GOMP_CRITICAL_SWP_WRK(FLAG) \3366if ((FLAG) && (__kmp_atomic_mode == 2)) { \3367KMP_CHECK_GTID; \3368CRITICAL_SWP_WRK(0); \3369}3370#else3371#define GOMP_CRITICAL_SWP_WRK(FLAG)3372#endif /* KMP_GOMP_COMPAT */3373// ------------------------------------------------------------------------33743375#define ATOMIC_CRITICAL_SWP_WRK(TYPE_ID, TYPE, LCK_ID, GOMP_FLAG) \3376ATOMIC_BEGIN_SWP_WRK(TYPE_ID, TYPE) \3377TYPE tmp; \3378GOMP_CRITICAL_SWP_WRK(GOMP_FLAG) \3379CRITICAL_SWP_WRK(LCK_ID) \3380}3381// The end of workaround for cmplx433823383ATOMIC_CRITICAL_SWP(float10, long double, 10r, 1) // __kmpc_atomic_float10_swp3384#if KMP_HAVE_QUAD3385ATOMIC_CRITICAL_SWP(float16, QUAD_LEGACY, 16r, 1) // __kmpc_atomic_float16_swp3386#endif // KMP_HAVE_QUAD3387// cmplx4 routine to return void3388ATOMIC_CRITICAL_SWP_WRK(cmplx4, kmp_cmplx32, 8c, 1) // __kmpc_atomic_cmplx4_swp33893390// ATOMIC_CRITICAL_SWP( cmplx4, kmp_cmplx32, 8c, 1 ) //3391// __kmpc_atomic_cmplx4_swp33923393ATOMIC_CRITICAL_SWP(cmplx8, kmp_cmplx64, 16c, 1) // __kmpc_atomic_cmplx8_swp3394ATOMIC_CRITICAL_SWP(cmplx10, kmp_cmplx80, 20c, 1) // __kmpc_atomic_cmplx10_swp3395#if KMP_HAVE_QUAD3396ATOMIC_CRITICAL_SWP(cmplx16, CPLX128_LEG, 32c, 1) // __kmpc_atomic_cmplx16_swp3397#if (KMP_ARCH_X86)3398ATOMIC_CRITICAL_SWP(float16_a16, Quad_a16_t, 16r,33991) // __kmpc_atomic_float16_a16_swp3400ATOMIC_CRITICAL_SWP(cmplx16_a16, kmp_cmplx128_a16_t, 32c,34011) // __kmpc_atomic_cmplx16_a16_swp3402#endif // (KMP_ARCH_X86)3403#endif // KMP_HAVE_QUAD34043405// End of OpenMP 4.0 Capture34063407#endif // KMP_ARCH_X86 || KMP_ARCH_X86_6434083409#undef OP_CRITICAL34103411/* ------------------------------------------------------------------------ */3412/* Generic atomic routines */34133414void __kmpc_atomic_1(ident_t *id_ref, int gtid, void *lhs, void *rhs,3415void (*f)(void *, void *, void *)) {3416KMP_DEBUG_ASSERT(__kmp_init_serial);34173418if (3419#if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT)3420FALSE /* must use lock */3421#else3422TRUE3423#endif // KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT)3424) {3425kmp_int8 old_value, new_value;34263427old_value = *(kmp_int8 *)lhs;3428(*f)(&new_value, &old_value, rhs);34293430/* TODO: Should this be acquire or release? */3431while (!KMP_COMPARE_AND_STORE_ACQ8((kmp_int8 *)lhs, *(kmp_int8 *)&old_value,3432*(kmp_int8 *)&new_value)) {3433KMP_CPU_PAUSE();34343435old_value = *(kmp_int8 *)lhs;3436(*f)(&new_value, &old_value, rhs);3437}34383439return;3440} else {3441// All 1-byte data is of integer data type.34423443#ifdef KMP_GOMP_COMPAT3444if (__kmp_atomic_mode == 2) {3445__kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid);3446} else3447#endif /* KMP_GOMP_COMPAT */3448__kmp_acquire_atomic_lock(&__kmp_atomic_lock_1i, gtid);34493450(*f)(lhs, lhs, rhs);34513452#ifdef KMP_GOMP_COMPAT3453if (__kmp_atomic_mode == 2) {3454__kmp_release_atomic_lock(&__kmp_atomic_lock, gtid);3455} else3456#endif /* KMP_GOMP_COMPAT */3457__kmp_release_atomic_lock(&__kmp_atomic_lock_1i, gtid);3458}3459}34603461void __kmpc_atomic_2(ident_t *id_ref, int gtid, void *lhs, void *rhs,3462void (*f)(void *, void *, void *)) {3463if (3464#if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT)3465FALSE /* must use lock */3466#elif KMP_ARCH_X86 || KMP_ARCH_X86_643467TRUE /* no alignment problems */3468#else3469!((kmp_uintptr_t)lhs & 0x1) /* make sure address is 2-byte aligned */3470#endif // KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT)3471) {3472kmp_int16 old_value, new_value;34733474old_value = *(kmp_int16 *)lhs;3475(*f)(&new_value, &old_value, rhs);34763477/* TODO: Should this be acquire or release? */3478while (!KMP_COMPARE_AND_STORE_ACQ16(3479(kmp_int16 *)lhs, *(kmp_int16 *)&old_value, *(kmp_int16 *)&new_value)) {3480KMP_CPU_PAUSE();34813482old_value = *(kmp_int16 *)lhs;3483(*f)(&new_value, &old_value, rhs);3484}34853486return;3487} else {3488// All 2-byte data is of integer data type.34893490#ifdef KMP_GOMP_COMPAT3491if (__kmp_atomic_mode == 2) {3492__kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid);3493} else3494#endif /* KMP_GOMP_COMPAT */3495__kmp_acquire_atomic_lock(&__kmp_atomic_lock_2i, gtid);34963497(*f)(lhs, lhs, rhs);34983499#ifdef KMP_GOMP_COMPAT3500if (__kmp_atomic_mode == 2) {3501__kmp_release_atomic_lock(&__kmp_atomic_lock, gtid);3502} else3503#endif /* KMP_GOMP_COMPAT */3504__kmp_release_atomic_lock(&__kmp_atomic_lock_2i, gtid);3505}3506}35073508void __kmpc_atomic_4(ident_t *id_ref, int gtid, void *lhs, void *rhs,3509void (*f)(void *, void *, void *)) {3510KMP_DEBUG_ASSERT(__kmp_init_serial);35113512if (3513// FIXME: On IA-32 architecture, gcc uses cmpxchg only for 4-byte ints.3514// Gomp compatibility is broken if this routine is called for floats.3515#if KMP_ARCH_X86 || KMP_ARCH_X86_643516TRUE /* no alignment problems */3517#else3518!((kmp_uintptr_t)lhs & 0x3) /* make sure address is 4-byte aligned */3519#endif // KMP_ARCH_X86 || KMP_ARCH_X86_643520) {3521kmp_int32 old_value, new_value;35223523old_value = *(kmp_int32 *)lhs;3524(*f)(&new_value, &old_value, rhs);35253526/* TODO: Should this be acquire or release? */3527while (!KMP_COMPARE_AND_STORE_ACQ32(3528(kmp_int32 *)lhs, *(kmp_int32 *)&old_value, *(kmp_int32 *)&new_value)) {3529KMP_CPU_PAUSE();35303531old_value = *(kmp_int32 *)lhs;3532(*f)(&new_value, &old_value, rhs);3533}35343535return;3536} else {3537// Use __kmp_atomic_lock_4i for all 4-byte data,3538// even if it isn't of integer data type.35393540#ifdef KMP_GOMP_COMPAT3541if (__kmp_atomic_mode == 2) {3542__kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid);3543} else3544#endif /* KMP_GOMP_COMPAT */3545__kmp_acquire_atomic_lock(&__kmp_atomic_lock_4i, gtid);35463547(*f)(lhs, lhs, rhs);35483549#ifdef KMP_GOMP_COMPAT3550if (__kmp_atomic_mode == 2) {3551__kmp_release_atomic_lock(&__kmp_atomic_lock, gtid);3552} else3553#endif /* KMP_GOMP_COMPAT */3554__kmp_release_atomic_lock(&__kmp_atomic_lock_4i, gtid);3555}3556}35573558void __kmpc_atomic_8(ident_t *id_ref, int gtid, void *lhs, void *rhs,3559void (*f)(void *, void *, void *)) {3560KMP_DEBUG_ASSERT(__kmp_init_serial);3561if (35623563#if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT)3564FALSE /* must use lock */3565#elif KMP_ARCH_X86 || KMP_ARCH_X86_643566TRUE /* no alignment problems */3567#else3568!((kmp_uintptr_t)lhs & 0x7) /* make sure address is 8-byte aligned */3569#endif // KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT)3570) {3571kmp_int64 old_value, new_value;35723573old_value = *(kmp_int64 *)lhs;3574(*f)(&new_value, &old_value, rhs);3575/* TODO: Should this be acquire or release? */3576while (!KMP_COMPARE_AND_STORE_ACQ64(3577(kmp_int64 *)lhs, *(kmp_int64 *)&old_value, *(kmp_int64 *)&new_value)) {3578KMP_CPU_PAUSE();35793580old_value = *(kmp_int64 *)lhs;3581(*f)(&new_value, &old_value, rhs);3582}35833584return;3585} else {3586// Use __kmp_atomic_lock_8i for all 8-byte data,3587// even if it isn't of integer data type.35883589#ifdef KMP_GOMP_COMPAT3590if (__kmp_atomic_mode == 2) {3591__kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid);3592} else3593#endif /* KMP_GOMP_COMPAT */3594__kmp_acquire_atomic_lock(&__kmp_atomic_lock_8i, gtid);35953596(*f)(lhs, lhs, rhs);35973598#ifdef KMP_GOMP_COMPAT3599if (__kmp_atomic_mode == 2) {3600__kmp_release_atomic_lock(&__kmp_atomic_lock, gtid);3601} else3602#endif /* KMP_GOMP_COMPAT */3603__kmp_release_atomic_lock(&__kmp_atomic_lock_8i, gtid);3604}3605}3606#if KMP_ARCH_X86 || KMP_ARCH_X86_643607void __kmpc_atomic_10(ident_t *id_ref, int gtid, void *lhs, void *rhs,3608void (*f)(void *, void *, void *)) {3609KMP_DEBUG_ASSERT(__kmp_init_serial);36103611#ifdef KMP_GOMP_COMPAT3612if (__kmp_atomic_mode == 2) {3613__kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid);3614} else3615#endif /* KMP_GOMP_COMPAT */3616__kmp_acquire_atomic_lock(&__kmp_atomic_lock_10r, gtid);36173618(*f)(lhs, lhs, rhs);36193620#ifdef KMP_GOMP_COMPAT3621if (__kmp_atomic_mode == 2) {3622__kmp_release_atomic_lock(&__kmp_atomic_lock, gtid);3623} else3624#endif /* KMP_GOMP_COMPAT */3625__kmp_release_atomic_lock(&__kmp_atomic_lock_10r, gtid);3626}3627#endif // KMP_ARCH_X86 || KMP_ARCH_X86_6436283629void __kmpc_atomic_16(ident_t *id_ref, int gtid, void *lhs, void *rhs,3630void (*f)(void *, void *, void *)) {3631KMP_DEBUG_ASSERT(__kmp_init_serial);36323633#ifdef KMP_GOMP_COMPAT3634if (__kmp_atomic_mode == 2) {3635__kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid);3636} else3637#endif /* KMP_GOMP_COMPAT */3638__kmp_acquire_atomic_lock(&__kmp_atomic_lock_16c, gtid);36393640(*f)(lhs, lhs, rhs);36413642#ifdef KMP_GOMP_COMPAT3643if (__kmp_atomic_mode == 2) {3644__kmp_release_atomic_lock(&__kmp_atomic_lock, gtid);3645} else3646#endif /* KMP_GOMP_COMPAT */3647__kmp_release_atomic_lock(&__kmp_atomic_lock_16c, gtid);3648}3649#if KMP_ARCH_X86 || KMP_ARCH_X86_643650void __kmpc_atomic_20(ident_t *id_ref, int gtid, void *lhs, void *rhs,3651void (*f)(void *, void *, void *)) {3652KMP_DEBUG_ASSERT(__kmp_init_serial);36533654#ifdef KMP_GOMP_COMPAT3655if (__kmp_atomic_mode == 2) {3656__kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid);3657} else3658#endif /* KMP_GOMP_COMPAT */3659__kmp_acquire_atomic_lock(&__kmp_atomic_lock_20c, gtid);36603661(*f)(lhs, lhs, rhs);36623663#ifdef KMP_GOMP_COMPAT3664if (__kmp_atomic_mode == 2) {3665__kmp_release_atomic_lock(&__kmp_atomic_lock, gtid);3666} else3667#endif /* KMP_GOMP_COMPAT */3668__kmp_release_atomic_lock(&__kmp_atomic_lock_20c, gtid);3669}3670#endif // KMP_ARCH_X86 || KMP_ARCH_X86_643671void __kmpc_atomic_32(ident_t *id_ref, int gtid, void *lhs, void *rhs,3672void (*f)(void *, void *, void *)) {3673KMP_DEBUG_ASSERT(__kmp_init_serial);36743675#ifdef KMP_GOMP_COMPAT3676if (__kmp_atomic_mode == 2) {3677__kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid);3678} else3679#endif /* KMP_GOMP_COMPAT */3680__kmp_acquire_atomic_lock(&__kmp_atomic_lock_32c, gtid);36813682(*f)(lhs, lhs, rhs);36833684#ifdef KMP_GOMP_COMPAT3685if (__kmp_atomic_mode == 2) {3686__kmp_release_atomic_lock(&__kmp_atomic_lock, gtid);3687} else3688#endif /* KMP_GOMP_COMPAT */3689__kmp_release_atomic_lock(&__kmp_atomic_lock_32c, gtid);3690}36913692// AC: same two routines as GOMP_atomic_start/end, but will be called by our3693// compiler; duplicated in order to not use 3-party names in pure Intel code3694// TODO: consider adding GTID parameter after consultation with Ernesto/Xinmin.3695void __kmpc_atomic_start(void) {3696int gtid = __kmp_entry_gtid();3697KA_TRACE(20, ("__kmpc_atomic_start: T#%d\n", gtid));3698__kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid);3699}37003701void __kmpc_atomic_end(void) {3702int gtid = __kmp_get_gtid();3703KA_TRACE(20, ("__kmpc_atomic_end: T#%d\n", gtid));3704__kmp_release_atomic_lock(&__kmp_atomic_lock, gtid);3705}37063707#if KMP_ARCH_X86 || KMP_ARCH_X86_6437083709// OpenMP 5.1 compare and swap37103711/*!3712@param loc Source code location3713@param gtid Global thread id3714@param x Memory location to operate on3715@param e Expected value3716@param d Desired value3717@return Result of comparison37183719Implements Compare And Swap atomic operation.37203721Sample code:3722#pragma omp atomic compare update capture3723{ r = x == e; if(r) { x = d; } }3724*/3725bool __kmpc_atomic_bool_1_cas(ident_t *loc, int gtid, char *x, char e, char d) {3726return KMP_COMPARE_AND_STORE_ACQ8(x, e, d);3727}3728bool __kmpc_atomic_bool_2_cas(ident_t *loc, int gtid, short *x, short e,3729short d) {3730return KMP_COMPARE_AND_STORE_ACQ16(x, e, d);3731}3732bool __kmpc_atomic_bool_4_cas(ident_t *loc, int gtid, kmp_int32 *x, kmp_int32 e,3733kmp_int32 d) {3734return KMP_COMPARE_AND_STORE_ACQ32(x, e, d);3735}3736bool __kmpc_atomic_bool_8_cas(ident_t *loc, int gtid, kmp_int64 *x, kmp_int64 e,3737kmp_int64 d) {3738return KMP_COMPARE_AND_STORE_ACQ64(x, e, d);3739}37403741/*!3742@param loc Source code location3743@param gtid Global thread id3744@param x Memory location to operate on3745@param e Expected value3746@param d Desired value3747@return Old value of x37483749Implements Compare And Swap atomic operation.37503751Sample code:3752#pragma omp atomic compare update capture3753{ v = x; if (x == e) { x = d; } }3754*/3755char __kmpc_atomic_val_1_cas(ident_t *loc, int gtid, char *x, char e, char d) {3756return KMP_COMPARE_AND_STORE_RET8(x, e, d);3757}3758short __kmpc_atomic_val_2_cas(ident_t *loc, int gtid, short *x, short e,3759short d) {3760return KMP_COMPARE_AND_STORE_RET16(x, e, d);3761}3762kmp_int32 __kmpc_atomic_val_4_cas(ident_t *loc, int gtid, kmp_int32 *x,3763kmp_int32 e, kmp_int32 d) {3764return KMP_COMPARE_AND_STORE_RET32(x, e, d);3765}3766kmp_int64 __kmpc_atomic_val_8_cas(ident_t *loc, int gtid, kmp_int64 *x,3767kmp_int64 e, kmp_int64 d) {3768return KMP_COMPARE_AND_STORE_RET64(x, e, d);3769}37703771/*!3772@param loc Source code location3773@param gtid Global thread id3774@param x Memory location to operate on3775@param e Expected value3776@param d Desired value3777@param pv Captured value location3778@return Result of comparison37793780Implements Compare And Swap + Capture atomic operation.37813782v gets old valie of x if comparison failed, untouched otherwise.3783Sample code:3784#pragma omp atomic compare update capture3785{ r = x == e; if(r) { x = d; } else { v = x; } }3786*/3787bool __kmpc_atomic_bool_1_cas_cpt(ident_t *loc, int gtid, char *x, char e,3788char d, char *pv) {3789char old = KMP_COMPARE_AND_STORE_RET8(x, e, d);3790if (old == e)3791return true;3792KMP_ASSERT(pv != NULL);3793*pv = old;3794return false;3795}3796bool __kmpc_atomic_bool_2_cas_cpt(ident_t *loc, int gtid, short *x, short e,3797short d, short *pv) {3798short old = KMP_COMPARE_AND_STORE_RET16(x, e, d);3799if (old == e)3800return true;3801KMP_ASSERT(pv != NULL);3802*pv = old;3803return false;3804}3805bool __kmpc_atomic_bool_4_cas_cpt(ident_t *loc, int gtid, kmp_int32 *x,3806kmp_int32 e, kmp_int32 d, kmp_int32 *pv) {3807kmp_int32 old = KMP_COMPARE_AND_STORE_RET32(x, e, d);3808if (old == e)3809return true;3810KMP_ASSERT(pv != NULL);3811*pv = old;3812return false;3813}3814bool __kmpc_atomic_bool_8_cas_cpt(ident_t *loc, int gtid, kmp_int64 *x,3815kmp_int64 e, kmp_int64 d, kmp_int64 *pv) {3816kmp_int64 old = KMP_COMPARE_AND_STORE_RET64(x, e, d);3817if (old == e)3818return true;3819KMP_ASSERT(pv != NULL);3820*pv = old;3821return false;3822}38233824/*!3825@param loc Source code location3826@param gtid Global thread id3827@param x Memory location to operate on3828@param e Expected value3829@param d Desired value3830@param pv Captured value location3831@return Old value of x38323833Implements Compare And Swap + Capture atomic operation.38343835v gets new valie of x.3836Sample code:3837#pragma omp atomic compare update capture3838{ if (x == e) { x = d; }; v = x; }3839*/3840char __kmpc_atomic_val_1_cas_cpt(ident_t *loc, int gtid, char *x, char e,3841char d, char *pv) {3842char old = KMP_COMPARE_AND_STORE_RET8(x, e, d);3843KMP_ASSERT(pv != NULL);3844*pv = old == e ? d : old;3845return old;3846}3847short __kmpc_atomic_val_2_cas_cpt(ident_t *loc, int gtid, short *x, short e,3848short d, short *pv) {3849short old = KMP_COMPARE_AND_STORE_RET16(x, e, d);3850KMP_ASSERT(pv != NULL);3851*pv = old == e ? d : old;3852return old;3853}3854kmp_int32 __kmpc_atomic_val_4_cas_cpt(ident_t *loc, int gtid, kmp_int32 *x,3855kmp_int32 e, kmp_int32 d, kmp_int32 *pv) {3856kmp_int32 old = KMP_COMPARE_AND_STORE_RET32(x, e, d);3857KMP_ASSERT(pv != NULL);3858*pv = old == e ? d : old;3859return old;3860}3861kmp_int64 __kmpc_atomic_val_8_cas_cpt(ident_t *loc, int gtid, kmp_int64 *x,3862kmp_int64 e, kmp_int64 d, kmp_int64 *pv) {3863kmp_int64 old = KMP_COMPARE_AND_STORE_RET64(x, e, d);3864KMP_ASSERT(pv != NULL);3865*pv = old == e ? d : old;3866return old;3867}38683869// End OpenMP 5.1 compare + capture3870#endif // KMP_ARCH_X86 || KMP_ARCH_X86_6438713872/*!3873@}3874*/38753876// end of file387738783879