Path: blob/21.2-virgl/src/intel/vulkan/tests/state_pool_no_free.c
4547 views
/*1* Copyright © 2015 Intel Corporation2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice (including the next11* paragraph) shall be included in all copies or substantial portions of the12* Software.13*14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR15* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,16* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL17* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER18* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING19* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS20* IN THE SOFTWARE.21*/2223#include <pthread.h>2425#include "anv_private.h"26#include "test_common.h"2728#define NUM_THREADS 1629#define STATES_PER_THREAD 102430#define NUM_RUNS 643132struct job {33pthread_t thread;34unsigned id;35struct anv_state_pool *pool;36uint32_t offsets[STATES_PER_THREAD];37} jobs[NUM_THREADS];3839pthread_barrier_t barrier;4041static void *alloc_states(void *_job)42{43struct job *job = _job;4445pthread_barrier_wait(&barrier);4647for (unsigned i = 0; i < STATES_PER_THREAD; i++) {48struct anv_state state = anv_state_pool_alloc(job->pool, 16, 16);49job->offsets[i] = state.offset;50}5152return NULL;53}5455static void run_test()56{57struct anv_physical_device physical_device = { };58struct anv_device device = {59.physical = &physical_device,60};61struct anv_state_pool state_pool;6263pthread_mutex_init(&device.mutex, NULL);64anv_bo_cache_init(&device.bo_cache);65anv_state_pool_init(&state_pool, &device, "test", 4096, 0, 64);6667pthread_barrier_init(&barrier, NULL, NUM_THREADS);6869for (unsigned i = 0; i < NUM_THREADS; i++) {70jobs[i].pool = &state_pool;71jobs[i].id = i;72pthread_create(&jobs[i].thread, NULL, alloc_states, &jobs[i]);73}7475for (unsigned i = 0; i < NUM_THREADS; i++)76pthread_join(jobs[i].thread, NULL);7778/* A list of indices, one per thread */79unsigned next[NUM_THREADS];80memset(next, 0, sizeof(next));8182int highest = -1;83while (true) {84/* First, we find which thread has the highest next element */85int thread_max = -1;86int max_thread_idx = -1;87for (unsigned i = 0; i < NUM_THREADS; i++) {88if (next[i] >= STATES_PER_THREAD)89continue;9091if (thread_max < jobs[i].offsets[next[i]]) {92thread_max = jobs[i].offsets[next[i]];93max_thread_idx = i;94}95}9697/* The only way this can happen is if all of the next[] values are at98* BLOCKS_PER_THREAD, in which case, we're done.99*/100if (thread_max == -1)101break;102103/* That next element had better be higher than the previous highest */104ASSERT(jobs[max_thread_idx].offsets[next[max_thread_idx]] > highest);105106highest = jobs[max_thread_idx].offsets[next[max_thread_idx]];107next[max_thread_idx]++;108}109110anv_state_pool_finish(&state_pool);111pthread_mutex_destroy(&device.mutex);112}113114int main(void)115{116for (unsigned i = 0; i < NUM_RUNS; i++)117run_test();118}119120121