Path: blob/main/sys/contrib/vchiq/interface/vchiq_arm/vchiq_arm.c
109239 views
/**1* Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.2* Copyright (c) 2010-2012 Broadcom. All rights reserved.3*4* Redistribution and use in source and binary forms, with or without5* modification, are permitted provided that the following conditions6* are met:7* 1. Redistributions of source code must retain the above copyright8* notice, this list of conditions, and the following disclaimer,9* without modification.10* 2. Redistributions in binary form must reproduce the above copyright11* notice, this list of conditions and the following disclaimer in the12* documentation and/or other materials provided with the distribution.13* 3. The names of the above-listed copyright holders may not be used14* to endorse or promote products derived from this software without15* specific prior written permission.16*17* ALTERNATIVELY, this software may be distributed under the terms of the18* GNU General Public License ("GPL") version 2, as published by the Free19* Software Foundation.20*21* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS22* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,23* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR24* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR25* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,26* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,27* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR28* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF29* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING30* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS31* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.32*/333435#include "vchiq_core.h"36#include "vchiq_ioctl.h"37#include "vchiq_arm.h"3839#define DEVICE_NAME "vchiq"4041/* Override the default prefix, which would be vchiq_arm (from the filename) */42#undef MODULE_PARAM_PREFIX43#define MODULE_PARAM_PREFIX DEVICE_NAME "."4445#define VCHIQ_MINOR 04647/* Some per-instance constants */48#define MAX_COMPLETIONS 12849#define MAX_SERVICES 6450#define MAX_ELEMENTS 851#define MSG_QUEUE_SIZE 1285253#define KEEPALIVE_VER 154#define KEEPALIVE_VER_MIN KEEPALIVE_VER5556/* Run time control of log level, based on KERN_XXX level. */57int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;58int vchiq_susp_log_level = VCHIQ_LOG_ERROR;5960#define SUSPEND_TIMER_TIMEOUT_MS 10061#define SUSPEND_RETRY_TIMER_TIMEOUT_MS 10006263#define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */64static const char *const suspend_state_names[] = {65"VC_SUSPEND_FORCE_CANCELED",66"VC_SUSPEND_REJECTED",67"VC_SUSPEND_FAILED",68"VC_SUSPEND_IDLE",69"VC_SUSPEND_REQUESTED",70"VC_SUSPEND_IN_PROGRESS",71"VC_SUSPEND_SUSPENDED"72};73#define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */74static const char *const resume_state_names[] = {75"VC_RESUME_FAILED",76"VC_RESUME_IDLE",77"VC_RESUME_REQUESTED",78"VC_RESUME_IN_PROGRESS",79"VC_RESUME_RESUMED"80};81/* The number of times we allow force suspend to timeout before actually82** _forcing_ suspend. This is to cater for SW which fails to release vchiq83** correctly - we don't want to prevent ARM suspend indefinitely in this case.84*/85#define FORCE_SUSPEND_FAIL_MAX 88687/* The time in ms allowed for videocore to go idle when force suspend has been88* requested */89#define FORCE_SUSPEND_TIMEOUT_MS 200909192static void suspend_timer_callback(unsigned long context);93#ifdef notyet94static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance);95static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance);96#endif979899typedef struct user_service_struct {100VCHIQ_SERVICE_T *service;101void *userdata;102VCHIQ_INSTANCE_T instance;103char is_vchi;104char dequeue_pending;105char close_pending;106int message_available_pos;107int msg_insert;108int msg_remove;109struct semaphore insert_event;110struct semaphore remove_event;111struct semaphore close_event;112VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];113} USER_SERVICE_T;114115struct bulk_waiter_node {116struct bulk_waiter bulk_waiter;117int pid;118struct list_head list;119};120121struct vchiq_instance_struct {122VCHIQ_STATE_T *state;123VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS];124int completion_insert;125int completion_remove;126struct semaphore insert_event;127struct semaphore remove_event;128struct mutex completion_mutex;129130int connected;131int closing;132int pid;133int mark;134int use_close_delivered;135int trace;136137struct list_head bulk_waiter_list;138struct mutex bulk_waiter_list_mutex;139140#ifdef notyet141VCHIQ_DEBUGFS_NODE_T proc_entry;142#endif143};144145typedef struct dump_context_struct {146char __user *buf;147size_t actual;148size_t space;149loff_t offset;150} DUMP_CONTEXT_T;151152static struct cdev * vchiq_cdev;153VCHIQ_STATE_T g_state;154static DEFINE_SPINLOCK(msg_queue_spinlock);155156static const char *const ioctl_names[] = {157"CONNECT",158"SHUTDOWN",159"CREATE_SERVICE",160"REMOVE_SERVICE",161"QUEUE_MESSAGE",162"QUEUE_BULK_TRANSMIT",163"QUEUE_BULK_RECEIVE",164"AWAIT_COMPLETION",165"DEQUEUE_MESSAGE",166"GET_CLIENT_ID",167"GET_CONFIG",168"CLOSE_SERVICE",169"USE_SERVICE",170"RELEASE_SERVICE",171"SET_SERVICE_OPTION",172"DUMP_PHYS_MEM",173"LIB_VERSION",174"CLOSE_DELIVERED"175};176177vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) ==178(VCHIQ_IOC_MAX + 1));179180static d_open_t vchiq_open;181static d_close_t vchiq_close;182static d_ioctl_t vchiq_ioctl;183184static struct cdevsw vchiq_cdevsw = {185.d_version = D_VERSION,186.d_ioctl = vchiq_ioctl,187.d_open = vchiq_open,188.d_close = vchiq_close,189.d_name = DEVICE_NAME,190};191192#if 0193static void194dump_phys_mem(void *virt_addr, uint32_t num_bytes);195#endif196197/****************************************************************************198*199* add_completion200*201***************************************************************************/202203static VCHIQ_STATUS_T204add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,205VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service,206void *bulk_userdata)207{208VCHIQ_COMPLETION_DATA_T *completion;209int insert;210DEBUG_INITIALISE(g_state.local)211212insert = instance->completion_insert;213while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {214/* Out of space - wait for the client */215DEBUG_TRACE(SERVICE_CALLBACK_LINE);216vchiq_log_trace(vchiq_arm_log_level,217"add_completion - completion queue full");218DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);219220if (down_interruptible(&instance->remove_event) != 0) {221vchiq_log_info(vchiq_arm_log_level,222"service_callback interrupted");223return VCHIQ_RETRY;224}225226if (instance->closing) {227vchiq_log_info(vchiq_arm_log_level,228"service_callback closing");229return VCHIQ_SUCCESS;230}231DEBUG_TRACE(SERVICE_CALLBACK_LINE);232}233234completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];235236completion->header = header;237completion->reason = reason;238/* N.B. service_userdata is updated while processing AWAIT_COMPLETION */239completion->service_userdata = user_service->service;240completion->bulk_userdata = bulk_userdata;241242if (reason == VCHIQ_SERVICE_CLOSED) {243/* Take an extra reference, to be held until244this CLOSED notification is delivered. */245lock_service(user_service->service);246if (instance->use_close_delivered)247user_service->close_pending = 1;248}249250/* A write barrier is needed here to ensure that the entire completion251record is written out before the insert point. */252wmb();253254if (reason == VCHIQ_MESSAGE_AVAILABLE)255user_service->message_available_pos = insert;256257instance->completion_insert = ++insert;258259up(&instance->insert_event);260261return VCHIQ_SUCCESS;262}263264/****************************************************************************265*266* service_callback267*268***************************************************************************/269270static VCHIQ_STATUS_T271service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,272VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)273{274/* How do we ensure the callback goes to the right client?275** The service_user data points to a USER_SERVICE_T record containing276** the original callback and the user state structure, which contains a277** circular buffer for completion records.278*/279USER_SERVICE_T *user_service;280VCHIQ_SERVICE_T *service;281VCHIQ_INSTANCE_T instance;282int skip_completion = 0;283DEBUG_INITIALISE(g_state.local)284285DEBUG_TRACE(SERVICE_CALLBACK_LINE);286287service = handle_to_service(handle);288BUG_ON(!service);289user_service = (USER_SERVICE_T *)service->base.userdata;290instance = user_service->instance;291292if (!instance || instance->closing)293return VCHIQ_SUCCESS;294295vchiq_log_trace(vchiq_arm_log_level,296"service_callback - service %lx(%d,%p), reason %d, header %lx, "297"instance %lx, bulk_userdata %lx",298(unsigned long)user_service,299service->localport, user_service->userdata,300reason, (unsigned long)header,301(unsigned long)instance, (unsigned long)bulk_userdata);302303if (header && user_service->is_vchi) {304spin_lock(&msg_queue_spinlock);305while (user_service->msg_insert ==306(user_service->msg_remove + MSG_QUEUE_SIZE)) {307spin_unlock(&msg_queue_spinlock);308DEBUG_TRACE(SERVICE_CALLBACK_LINE);309DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);310vchiq_log_trace(vchiq_arm_log_level,311"service_callback - msg queue full");312/* If there is no MESSAGE_AVAILABLE in the completion313** queue, add one314*/315if ((user_service->message_available_pos -316instance->completion_remove) < 0) {317VCHIQ_STATUS_T status;318vchiq_log_info(vchiq_arm_log_level,319"Inserting extra MESSAGE_AVAILABLE");320DEBUG_TRACE(SERVICE_CALLBACK_LINE);321status = add_completion(instance, reason,322NULL, user_service, bulk_userdata);323if (status != VCHIQ_SUCCESS) {324DEBUG_TRACE(SERVICE_CALLBACK_LINE);325return status;326}327}328329DEBUG_TRACE(SERVICE_CALLBACK_LINE);330if (down_interruptible(&user_service->remove_event)331!= 0) {332vchiq_log_info(vchiq_arm_log_level,333"service_callback interrupted");334DEBUG_TRACE(SERVICE_CALLBACK_LINE);335return VCHIQ_RETRY;336} else if (instance->closing) {337vchiq_log_info(vchiq_arm_log_level,338"service_callback closing");339DEBUG_TRACE(SERVICE_CALLBACK_LINE);340return VCHIQ_ERROR;341}342DEBUG_TRACE(SERVICE_CALLBACK_LINE);343spin_lock(&msg_queue_spinlock);344}345346user_service->msg_queue[user_service->msg_insert &347(MSG_QUEUE_SIZE - 1)] = header;348user_service->msg_insert++;349350/* If there is a thread waiting in DEQUEUE_MESSAGE, or if351** there is a MESSAGE_AVAILABLE in the completion queue then352** bypass the completion queue.353*/354if (((user_service->message_available_pos -355instance->completion_remove) >= 0) ||356user_service->dequeue_pending) {357user_service->dequeue_pending = 0;358skip_completion = 1;359}360361spin_unlock(&msg_queue_spinlock);362363up(&user_service->insert_event);364365header = NULL;366}367368if (skip_completion) {369DEBUG_TRACE(SERVICE_CALLBACK_LINE);370return VCHIQ_SUCCESS;371}372373DEBUG_TRACE(SERVICE_CALLBACK_LINE);374375return add_completion(instance, reason, header, user_service,376bulk_userdata);377}378379/****************************************************************************380*381* user_service_free382*383***************************************************************************/384static void385user_service_free(void *userdata)386{387USER_SERVICE_T *user_service = userdata;388389_sema_destroy(&user_service->insert_event);390_sema_destroy(&user_service->remove_event);391392kfree(user_service);393}394395/****************************************************************************396*397* close_delivered398*399***************************************************************************/400static void close_delivered(USER_SERVICE_T *user_service)401{402vchiq_log_info(vchiq_arm_log_level,403"close_delivered(handle=%x)",404user_service->service->handle);405406if (user_service->close_pending) {407/* Allow the underlying service to be culled */408unlock_service(user_service->service);409410/* Wake the user-thread blocked in close_ or remove_service */411up(&user_service->close_event);412413user_service->close_pending = 0;414}415}416417/****************************************************************************418*419* vchiq_ioctl420*421***************************************************************************/422423static int424vchiq_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,425struct thread *td)426{427VCHIQ_INSTANCE_T instance;428VCHIQ_STATUS_T status = VCHIQ_SUCCESS;429VCHIQ_SERVICE_T *service = NULL;430int ret = 0;431int i, rc;432DEBUG_INITIALISE(g_state.local)433434if ((ret = devfs_get_cdevpriv((void**)&instance))) {435printf("vchiq_ioctl: devfs_get_cdevpriv failed: error %d\n", ret);436return (ret);437}438439/* XXXBSD: HACK! */440#define _IOC_NR(x) ((x) & 0xff)441#define _IOC_TYPE(x) IOCGROUP(x)442443vchiq_log_trace(vchiq_arm_log_level,444"vchiq_ioctl - instance %zx, cmd %s, arg %p",445(size_t)instance,446((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&447(_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?448ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);449450#ifdef COMPAT_FREEBSD32451/* A fork in the road to freebsd32 compatibilty */452#define _CF32_FORK(compat_c, native_c) { \453int _____dont_call_your_vars_this = 0; \454switch (cmd) { \455_CF32_CASE {_____dont_call_your_vars_this = 1;} \456break; \457} \458if (_____dont_call_your_vars_this) \459{ compat_c } \460else \461{ native_c } \462}463#else464#define _CF32_FORK(compat_c, native_c) { native_c }465#endif466switch (cmd) {467case VCHIQ_IOC_SHUTDOWN:468if (!instance->connected)469break;470471/* Remove all services */472i = 0;473while ((service = next_service_by_instance(instance->state,474instance, &i)) != NULL) {475status = vchiq_remove_service(service->handle);476unlock_service(service);477if (status != VCHIQ_SUCCESS)478break;479}480service = NULL;481482if (status == VCHIQ_SUCCESS) {483/* Wake the completion thread and ask it to exit */484instance->closing = 1;485up(&instance->insert_event);486}487488break;489490case VCHIQ_IOC_CONNECT:491if (instance->connected) {492ret = -EINVAL;493break;494}495rc = lmutex_lock_interruptible(&instance->state->mutex);496if (rc != 0) {497vchiq_log_error(vchiq_arm_log_level,498"vchiq: connect: could not lock mutex for "499"state %d: %d",500instance->state->id, rc);501ret = -EINTR;502break;503}504status = vchiq_connect_internal(instance->state, instance);505lmutex_unlock(&instance->state->mutex);506507if (status == VCHIQ_SUCCESS)508instance->connected = 1;509else510vchiq_log_error(vchiq_arm_log_level,511"vchiq: could not connect: %d", status);512break;513514#ifdef COMPAT_FREEBSD32515#define _CF32_CASE \516case VCHIQ_IOC_CREATE_SERVICE32:517_CF32_CASE518#endif519case VCHIQ_IOC_CREATE_SERVICE: {520VCHIQ_CREATE_SERVICE_T args;521USER_SERVICE_T *user_service = NULL;522void *userdata;523int srvstate;524525_CF32_FORK(526VCHIQ_CREATE_SERVICE32_T args32;527memcpy(&args32, (const void*)arg, sizeof(args32));528args.params.fourcc = args32.params.fourcc;529/* XXXMDC not actually used? overwritten straight away */530args.params.callback =531(VCHIQ_CALLBACK_T)(uintptr_t) args32.params.callback;532args.params.userdata = (void*)(uintptr_t)args32.params.userdata;533args.params.version = args32.params.version;534args.params.version_min = args32.params.version_min;535args.is_open = args32.is_open;536args.is_vchi = args32.is_vchi;537args.handle = args32.handle;538,539memcpy(&args, (const void*)arg, sizeof(args));540)541542user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL);543if (!user_service) {544ret = -ENOMEM;545break;546}547548if (args.is_open) {549if (!instance->connected) {550ret = -ENOTCONN;551kfree(user_service);552break;553}554srvstate = VCHIQ_SRVSTATE_OPENING;555} else {556srvstate =557instance->connected ?558VCHIQ_SRVSTATE_LISTENING :559VCHIQ_SRVSTATE_HIDDEN;560}561562userdata = args.params.userdata;563args.params.callback = service_callback;564args.params.userdata = user_service;565service = vchiq_add_service_internal(566instance->state,567&args.params, srvstate,568instance, user_service_free);569570if (service != NULL) {571user_service->service = service;572user_service->userdata = userdata;573user_service->instance = instance;574user_service->is_vchi = (args.is_vchi != 0);575user_service->dequeue_pending = 0;576user_service->close_pending = 0;577user_service->message_available_pos =578instance->completion_remove - 1;579user_service->msg_insert = 0;580user_service->msg_remove = 0;581_sema_init(&user_service->insert_event, 0);582_sema_init(&user_service->remove_event, 0);583_sema_init(&user_service->close_event, 0);584585if (args.is_open) {586status = vchiq_open_service_internal587(service, instance->pid);588if (status != VCHIQ_SUCCESS) {589vchiq_remove_service(service->handle);590service = NULL;591ret = (status == VCHIQ_RETRY) ?592-EINTR : -EIO;593break;594}595}596#ifdef VCHIQ_IOCTL_DEBUG597printf("%s: [CREATE SERVICE] handle = %08x\n", __func__, service->handle);598#endif599_CF32_FORK(600memcpy((void *)601&(((VCHIQ_CREATE_SERVICE32_T*)602arg)->handle),603(const void *)&service->handle,604sizeof(service->handle));605,606memcpy((void *)607&(((VCHIQ_CREATE_SERVICE_T*)608arg)->handle),609(const void *)&service->handle,610sizeof(service->handle));611);612613service = NULL;614} else {615ret = -EEXIST;616kfree(user_service);617}618} break;619#undef _CF32_CASE620621case VCHIQ_IOC_CLOSE_SERVICE: {622VCHIQ_SERVICE_HANDLE_T handle;623624memcpy(&handle, (const void*)arg, sizeof(handle));625626#ifdef VCHIQ_IOCTL_DEBUG627printf("%s: [CLOSE SERVICE] handle = %08x\n", __func__, handle);628#endif629630service = find_service_for_instance(instance, handle);631if (service != NULL) {632USER_SERVICE_T *user_service =633(USER_SERVICE_T *)service->base.userdata;634/* close_pending is false on first entry, and when the635wait in vchiq_close_service has been interrupted. */636if (!user_service->close_pending) {637status = vchiq_close_service(service->handle);638if (status != VCHIQ_SUCCESS)639break;640}641642/* close_pending is true once the underlying service643has been closed until the client library calls the644CLOSE_DELIVERED ioctl, signalling close_event. */645if (user_service->close_pending &&646down_interruptible(&user_service->close_event))647status = VCHIQ_RETRY;648}649else650ret = -EINVAL;651} break;652653case VCHIQ_IOC_REMOVE_SERVICE: {654VCHIQ_SERVICE_HANDLE_T handle;655656memcpy(&handle, (const void*)arg, sizeof(handle));657658#ifdef VCHIQ_IOCTL_DEBUG659printf("%s: [REMOVE SERVICE] handle = %08x\n", __func__, handle);660#endif661662service = find_service_for_instance(instance, handle);663if (service != NULL) {664USER_SERVICE_T *user_service =665(USER_SERVICE_T *)service->base.userdata;666/* close_pending is false on first entry, and when the667wait in vchiq_close_service has been interrupted. */668if (!user_service->close_pending) {669status = vchiq_remove_service(service->handle);670if (status != VCHIQ_SUCCESS)671break;672}673674/* close_pending is true once the underlying service675has been closed until the client library calls the676CLOSE_DELIVERED ioctl, signalling close_event. */677if (user_service->close_pending &&678down_interruptible(&user_service->close_event))679status = VCHIQ_RETRY;680}681else682ret = -EINVAL;683} break;684685case VCHIQ_IOC_USE_SERVICE:686case VCHIQ_IOC_RELEASE_SERVICE: {687VCHIQ_SERVICE_HANDLE_T handle;688689memcpy(&handle, (const void*)arg, sizeof(handle));690691#ifdef VCHIQ_IOCTL_DEBUG692printf("%s: [%s SERVICE] handle = %08x\n", __func__,693cmd == VCHIQ_IOC_USE_SERVICE ? "USE" : "RELEASE", handle);694#endif695696service = find_service_for_instance(instance, handle);697if (service != NULL) {698status = (cmd == VCHIQ_IOC_USE_SERVICE) ?699vchiq_use_service_internal(service) :700vchiq_release_service_internal(service);701if (status != VCHIQ_SUCCESS) {702vchiq_log_error(vchiq_susp_log_level,703"%s: cmd %s returned error %d for "704"service %c%c%c%c:%8x",705__func__,706(cmd == VCHIQ_IOC_USE_SERVICE) ?707"VCHIQ_IOC_USE_SERVICE" :708"VCHIQ_IOC_RELEASE_SERVICE",709status,710VCHIQ_FOURCC_AS_4CHARS(711service->base.fourcc),712service->client_id);713ret = -EINVAL;714}715} else716ret = -EINVAL;717} break;718719#ifdef COMPAT_FREEBSD32720#define _CF32_CASE \721case VCHIQ_IOC_QUEUE_MESSAGE32:722_CF32_CASE723#endif724case VCHIQ_IOC_QUEUE_MESSAGE: {725VCHIQ_QUEUE_MESSAGE_T args;726_CF32_FORK(727VCHIQ_QUEUE_MESSAGE32_T args32;728memcpy(&args32, (const void*)arg, sizeof(args32));729args.handle = args32.handle;730args.count = args32.count;731args.elements = (VCHIQ_ELEMENT_T *)(uintptr_t)args32.elements;732,733memcpy(&args, (const void*)arg, sizeof(args));734)735736#ifdef VCHIQ_IOCTL_DEBUG737printf("%s: [QUEUE MESSAGE] handle = %08x\n", __func__, args.handle);738#endif739740service = find_service_for_instance(instance, args.handle);741742if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {743/* Copy elements into kernel space */744VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];745long cp_ret;746_CF32_FORK(747VCHIQ_ELEMENT32_T elements32[MAX_ELEMENTS];748cp_ret = copy_from_user(elements32, args.elements,749args.count * sizeof(VCHIQ_ELEMENT32_T));750for(int i=0;cp_ret == 0 && i < args.count;++i){751elements[i].data =752(void *)(uintptr_t)elements32[i].data;753elements[i].size = elements32[i].size;754}755756,757cp_ret = copy_from_user(elements, args.elements,758args.count * sizeof(VCHIQ_ELEMENT_T));759)760if (cp_ret == 0)761status = vchiq_queue_message762(args.handle,763elements, args.count);764else765ret = -EFAULT;766} else {767ret = -EINVAL;768}769} break;770#undef _CF32_CASE771772#ifdef COMPAT_FREEBSD32773#define _CF32_CASE \774case VCHIQ_IOC_QUEUE_BULK_TRANSMIT32: \775case VCHIQ_IOC_QUEUE_BULK_RECEIVE32:776_CF32_CASE777#endif778case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:779case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {780VCHIQ_QUEUE_BULK_TRANSFER_T args;781782struct bulk_waiter_node *waiter = NULL;783VCHIQ_BULK_DIR_T dir =784(cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ||785(cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT32)?786VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;787788_CF32_FORK(789VCHIQ_QUEUE_BULK_TRANSFER32_T args32;790memcpy(&args32, (const void*)arg, sizeof(args32));791/* XXXMDC parens needed (macro parsing?) */792args = ((VCHIQ_QUEUE_BULK_TRANSFER_T) {793.handle = args32.handle,794.data = (void *)(uintptr_t) args32.data,795.size = args32.size,796.userdata = (void *)(uintptr_t) args32.userdata,797.mode = args32.mode,798});799,800memcpy(&args, (const void*)arg, sizeof(args));801)802803service = find_service_for_instance(instance, args.handle);804if (!service) {805ret = -EINVAL;806break;807}808809if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {810waiter = kzalloc(sizeof(struct bulk_waiter_node),811GFP_KERNEL);812if (!waiter) {813ret = -ENOMEM;814break;815}816args.userdata = &waiter->bulk_waiter;817} else if (args.mode == VCHIQ_BULK_MODE_WAITING) {818struct list_head *pos;819lmutex_lock(&instance->bulk_waiter_list_mutex);820list_for_each(pos, &instance->bulk_waiter_list) {821if (list_entry(pos, struct bulk_waiter_node,822list)->pid == current->p_pid) {823waiter = list_entry(pos,824struct bulk_waiter_node,825list);826list_del(pos);827break;828}829}830lmutex_unlock(&instance->bulk_waiter_list_mutex);831if (!waiter) {832vchiq_log_error(vchiq_arm_log_level,833"no bulk_waiter found for pid %d",834current->p_pid);835ret = -ESRCH;836break;837}838vchiq_log_info(vchiq_arm_log_level,839"found bulk_waiter %zx for pid %d",840(size_t)waiter, current->p_pid);841args.userdata = &waiter->bulk_waiter;842}843844status = vchiq_bulk_transfer845(args.handle,846VCHI_MEM_HANDLE_INVALID,847args.data, args.size,848args.userdata, args.mode,849dir);850if (!waiter)851break;852if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||853!waiter->bulk_waiter.bulk) {854if (waiter->bulk_waiter.bulk) {855/* Cancel the signal when the transfer856** completes. */857spin_lock(&bulk_waiter_spinlock);858waiter->bulk_waiter.bulk->userdata = NULL;859spin_unlock(&bulk_waiter_spinlock);860}861_sema_destroy(&waiter->bulk_waiter.event);862kfree(waiter);863} else {864const VCHIQ_BULK_MODE_T mode_waiting =865VCHIQ_BULK_MODE_WAITING;866waiter->pid = current->p_pid;867lmutex_lock(&instance->bulk_waiter_list_mutex);868list_add(&waiter->list, &instance->bulk_waiter_list);869lmutex_unlock(&instance->bulk_waiter_list_mutex);870vchiq_log_info(vchiq_arm_log_level,871"saved bulk_waiter %zx for pid %d",872(size_t)waiter, current->p_pid);873874_CF32_FORK(875memcpy((void *)876&(((VCHIQ_QUEUE_BULK_TRANSFER32_T *)877arg)->mode),878(const void *)&mode_waiting,879sizeof(mode_waiting));880,881memcpy((void *)882&(((VCHIQ_QUEUE_BULK_TRANSFER_T *)883arg)->mode),884(const void *)&mode_waiting,885sizeof(mode_waiting));886)887}888} break;889#undef _CF32_CASE890891#ifdef COMPAT_FREEBSD32892#define _CF32_CASE \893case VCHIQ_IOC_AWAIT_COMPLETION32:894_CF32_CASE895#endif896case VCHIQ_IOC_AWAIT_COMPLETION: {897VCHIQ_AWAIT_COMPLETION_T args;898int count = 0;899900DEBUG_TRACE(AWAIT_COMPLETION_LINE);901if (!instance->connected) {902ret = -ENOTCONN;903break;904}905906_CF32_FORK(907VCHIQ_AWAIT_COMPLETION32_T args32;908memcpy(&args32, (const void*)arg, sizeof(args32));909args.count = args32.count;910args.buf = (VCHIQ_COMPLETION_DATA_T *)(uintptr_t)args32.buf;911args.msgbufsize = args32.msgbufsize;912args.msgbufcount = args32.msgbufcount;913args.msgbufs = (void **)(uintptr_t)args32.msgbufs;914,915memcpy(&args, (const void*)arg, sizeof(args));916)917918lmutex_lock(&instance->completion_mutex);919920DEBUG_TRACE(AWAIT_COMPLETION_LINE);921while ((instance->completion_remove ==922instance->completion_insert)923&& !instance->closing) {924925DEBUG_TRACE(AWAIT_COMPLETION_LINE);926lmutex_unlock(&instance->completion_mutex);927rc = down_interruptible(&instance->insert_event);928lmutex_lock(&instance->completion_mutex);929if (rc != 0) {930DEBUG_TRACE(AWAIT_COMPLETION_LINE);931vchiq_log_info(vchiq_arm_log_level,932"AWAIT_COMPLETION interrupted");933ret = -EINTR;934break;935}936}937DEBUG_TRACE(AWAIT_COMPLETION_LINE);938939if (ret == 0) {940int msgbufcount = args.msgbufcount;941int remove;942943remove = instance->completion_remove;944945for (count = 0; count < args.count; count++) {946VCHIQ_COMPLETION_DATA_T *completion;947VCHIQ_SERVICE_T *service1;948USER_SERVICE_T *user_service;949VCHIQ_HEADER_T *header;950951if (remove == instance->completion_insert)952break;953954completion = &instance->completions[955remove & (MAX_COMPLETIONS - 1)];956957958/* A read memory barrier is needed to prevent959** the prefetch of a stale completion record960*/961rmb();962963service1 = completion->service_userdata;964user_service = service1->base.userdata;965completion->service_userdata =966user_service->userdata;967968header = completion->header;969if (header) {970void __user *msgbuf;971int msglen;972973msglen = header->size +974sizeof(VCHIQ_HEADER_T);975/* This must be a VCHIQ-style service */976if (args.msgbufsize < msglen) {977vchiq_log_error(978vchiq_arm_log_level,979"header %zx: msgbufsize"980" %x < msglen %x",981(size_t)header,982args.msgbufsize,983msglen);984WARN(1, "invalid message "985"size\n");986if (count == 0)987ret = -EMSGSIZE;988break;989}990if (msgbufcount <= 0)991/* Stall here for lack of a992** buffer for the message. */993break;994/* Get the pointer from user space */995msgbufcount--;996_CF32_FORK(997uint32_t *msgbufs32 =998(uint32_t *) args.msgbufs;999uint32_t msgbuf32 = 0;1000if (copy_from_user(&msgbuf32,1001(const uint32_t __user *)1002&msgbufs32[msgbufcount],1003sizeof(msgbuf32)) != 0) {1004if (count == 0)1005ret = -EFAULT;1006break;1007}1008msgbuf = (void __user *)(uintptr_t)msgbuf32;1009,1010if (copy_from_user(&msgbuf,1011(const void __user *)1012&args.msgbufs[msgbufcount],1013sizeof(msgbuf)) != 0) {1014if (count == 0)1015ret = -EFAULT;1016break;1017}1018)10191020/* Copy the message to user space */1021if (copy_to_user(msgbuf, header,1022msglen) != 0) {1023if (count == 0)1024ret = -EFAULT;1025break;1026}10271028/* Now it has been copied, the message1029** can be released. */1030vchiq_release_message(service1->handle,1031header);10321033/* The completion must point to the1034** msgbuf. */1035completion->header = msgbuf;1036}10371038if ((completion->reason ==1039VCHIQ_SERVICE_CLOSED) &&1040!instance->use_close_delivered)1041unlock_service(service1);1042_CF32_FORK(1043VCHIQ_COMPLETION_DATA32_T comp32 = {0};1044comp32.reason =1045(uint32_t)(size_t) completion->reason;1046comp32.service_userdata =1047(uint32_t)(size_t)1048completion->service_userdata;1049comp32.bulk_userdata =1050(uint32_t)(size_t)1051completion->bulk_userdata;1052comp32.header =1053(uint32_t)(size_t)completion->header;10541055VCHIQ_COMPLETION_DATA32_T __user *buf_loc;1056buf_loc = (VCHIQ_COMPLETION_DATA32_T __user *)1057args.buf;1058buf_loc += count;1059if (copy_to_user(1060buf_loc, &comp32, sizeof(comp32)1061) != 0) {1062if (ret == 0)1063ret = -EFAULT;1064}1065,1066if (copy_to_user((void __user *)(1067(size_t)args.buf +1068count * sizeof(VCHIQ_COMPLETION_DATA_T)),1069completion,1070sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) {1071if (ret == 0)1072ret = -EFAULT;1073break;1074}1075)10761077/* Ensure that the above copy has completed1078** before advancing the remove pointer. */1079mb();10801081instance->completion_remove = ++remove;1082}10831084if (msgbufcount != args.msgbufcount) {1085_CF32_FORK(1086memcpy(1087(void __user *)1088&((VCHIQ_AWAIT_COMPLETION32_T *)arg)->1089msgbufcount,1090&msgbufcount,1091sizeof(msgbufcount));1092,1093memcpy((void __user *)1094&((VCHIQ_AWAIT_COMPLETION_T *)arg)->1095msgbufcount,1096&msgbufcount,1097sizeof(msgbufcount));1098)1099}11001101if (count != args.count)1102{1103_CF32_FORK(1104memcpy((void __user *)1105&((VCHIQ_AWAIT_COMPLETION32_T *)arg)->count,1106&count, sizeof(count));1107,1108memcpy((void __user *)1109&((VCHIQ_AWAIT_COMPLETION_T *)arg)->count,1110&count, sizeof(count));1111)1112}1113}11141115if (count != 0)1116up(&instance->remove_event);11171118if ((ret == 0) && instance->closing)1119ret = -ENOTCONN;1120/*1121* XXXBSD: ioctl return codes are not negative as in linux, so1122* we can not indicate success with positive number of passed1123* messages1124*/1125if (ret > 0)1126ret = 0;11271128lmutex_unlock(&instance->completion_mutex);1129DEBUG_TRACE(AWAIT_COMPLETION_LINE);1130} break;1131#undef _CF32_CASE11321133#ifdef COMPAT_FREEBSD321134#define _CF32_CASE \1135case VCHIQ_IOC_DEQUEUE_MESSAGE32:1136_CF32_CASE1137#endif1138case VCHIQ_IOC_DEQUEUE_MESSAGE: {1139VCHIQ_DEQUEUE_MESSAGE_T args;1140USER_SERVICE_T *user_service;1141VCHIQ_HEADER_T *header;11421143DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);1144_CF32_FORK(1145VCHIQ_DEQUEUE_MESSAGE32_T args32;1146memcpy(&args32, (const void*)arg, sizeof(args32));1147args.handle = args32.handle;1148args.blocking = args32.blocking;1149args.bufsize = args32.bufsize;1150args.buf = (void *)(uintptr_t)args32.buf;1151,1152memcpy(&args, (const void*)arg, sizeof(args));1153)1154service = find_service_for_instance(instance, args.handle);1155if (!service) {1156ret = -EINVAL;1157break;1158}1159user_service = (USER_SERVICE_T *)service->base.userdata;1160if (user_service->is_vchi == 0) {1161ret = -EINVAL;1162break;1163}11641165spin_lock(&msg_queue_spinlock);1166if (user_service->msg_remove == user_service->msg_insert) {1167if (!args.blocking) {1168spin_unlock(&msg_queue_spinlock);1169DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);1170ret = -EWOULDBLOCK;1171break;1172}1173user_service->dequeue_pending = 1;1174do {1175spin_unlock(&msg_queue_spinlock);1176DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);1177if (down_interruptible(1178&user_service->insert_event) != 0) {1179vchiq_log_info(vchiq_arm_log_level,1180"DEQUEUE_MESSAGE interrupted");1181ret = -EINTR;1182break;1183}1184spin_lock(&msg_queue_spinlock);1185} while (user_service->msg_remove ==1186user_service->msg_insert);11871188if (ret)1189break;1190}11911192BUG_ON((int)(user_service->msg_insert -1193user_service->msg_remove) < 0);11941195header = user_service->msg_queue[user_service->msg_remove &1196(MSG_QUEUE_SIZE - 1)];1197user_service->msg_remove++;1198spin_unlock(&msg_queue_spinlock);11991200up(&user_service->remove_event);1201if (header == NULL)1202ret = -ENOTCONN;1203else if (header->size <= args.bufsize) {1204/* Copy to user space if msgbuf is not NULL */1205if ((args.buf == NULL) ||1206(copy_to_user((void __user *)args.buf,1207header->data,1208header->size) == 0)) {1209args.bufsize = header->size;1210_CF32_FORK(1211VCHIQ_DEQUEUE_MESSAGE32_T args32;1212args32.handle = args.handle;1213args32.blocking = args.blocking;1214args32.bufsize = args.bufsize;1215args32.buf = (uintptr_t)(void *)args.buf;12161217memcpy((void *)arg, &args32,1218sizeof(args32));1219,1220memcpy((void *)arg, &args,1221sizeof(args));1222)1223vchiq_release_message(1224service->handle,1225header);1226} else1227ret = -EFAULT;1228} else {1229vchiq_log_error(vchiq_arm_log_level,1230"header %zx: bufsize %x < size %x",1231(size_t)header, args.bufsize,1232header->size);1233WARN(1, "invalid size\n");1234ret = -EMSGSIZE;1235}1236DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);1237} break;1238#undef _CF32_CASE12391240case VCHIQ_IOC_GET_CLIENT_ID: {1241VCHIQ_SERVICE_HANDLE_T handle;12421243memcpy(&handle, (const void*)arg, sizeof(handle));12441245ret = vchiq_get_client_id(handle);1246} break;12471248#ifdef COMPAT_FREEBSD321249#define _CF32_CASE \1250case VCHIQ_IOC_GET_CONFIG32:1251_CF32_CASE1252#endif1253case VCHIQ_IOC_GET_CONFIG: {1254VCHIQ_GET_CONFIG_T args;1255VCHIQ_CONFIG_T config;1256_CF32_FORK(1257VCHIQ_GET_CONFIG32_T args32;12581259memcpy(&args32, (const void*)arg, sizeof(args32));1260args.config_size = args32.config_size;1261args.pconfig = (VCHIQ_CONFIG_T *)1262(uintptr_t)args32.pconfig;1263,1264memcpy(&args, (const void*)arg, sizeof(args));1265)1266if (args.config_size > sizeof(config)) {1267ret = -EINVAL;1268break;1269}1270status = vchiq_get_config(instance, args.config_size, &config);1271if (status == VCHIQ_SUCCESS) {1272if (copy_to_user((void __user *)args.pconfig,1273&config, args.config_size) != 0) {1274ret = -EFAULT;1275break;1276}1277}1278} break;1279#undef _CF32_CASE12801281case VCHIQ_IOC_SET_SERVICE_OPTION: {1282VCHIQ_SET_SERVICE_OPTION_T args;12831284memcpy(&args, (const void*)arg, sizeof(args));12851286service = find_service_for_instance(instance, args.handle);1287if (!service) {1288ret = -EINVAL;1289break;1290}12911292status = vchiq_set_service_option(1293args.handle, args.option, args.value);1294} break;12951296#ifdef COMPAT_FREEBSD321297#define _CF32_CASE \1298case VCHIQ_IOC_DUMP_PHYS_MEM32:1299_CF32_CASE1300#endif1301case VCHIQ_IOC_DUMP_PHYS_MEM: {1302VCHIQ_DUMP_MEM_T args;13031304_CF32_FORK(1305VCHIQ_DUMP_MEM32_T args32;1306memcpy(&args32, (const void*)arg, sizeof(args32));1307args.virt_addr = (void *)(uintptr_t)args32.virt_addr;1308args.num_bytes = (size_t)args32.num_bytes;1309,1310memcpy(&args, (const void*)arg, sizeof(args));1311)1312printf("IMPLEMENT ME: %s:%d\n", __FILE__, __LINE__);1313#if 01314dump_phys_mem(args.virt_addr, args.num_bytes);1315#endif1316} break;1317#undef _CF32_CASE13181319case VCHIQ_IOC_LIB_VERSION: {1320size_t lib_version = (size_t)arg;13211322if (lib_version < VCHIQ_VERSION_MIN)1323ret = -EINVAL;1324else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)1325instance->use_close_delivered = 1;1326} break;13271328case VCHIQ_IOC_CLOSE_DELIVERED: {1329VCHIQ_SERVICE_HANDLE_T handle;1330memcpy(&handle, (const void*)arg, sizeof(handle));13311332service = find_closed_service_for_instance(instance, handle);1333if (service != NULL) {1334USER_SERVICE_T *user_service =1335(USER_SERVICE_T *)service->base.userdata;1336close_delivered(user_service);1337}1338else1339ret = -EINVAL;1340} break;13411342default:1343ret = -ENOTTY;1344break;1345}1346#undef _CF32_FORK13471348if (service)1349unlock_service(service);13501351if (ret == 0) {1352if (status == VCHIQ_ERROR)1353ret = -EIO;1354else if (status == VCHIQ_RETRY)1355ret = -EINTR;1356}13571358if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&1359(ret != -EWOULDBLOCK))1360vchiq_log_info(vchiq_arm_log_level,1361" ioctl instance %lx, cmd %s -> status %d, %d",1362(unsigned long)instance,1363(_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?1364ioctl_names[_IOC_NR(cmd)] :1365"<invalid>",1366status, ret);1367else1368vchiq_log_trace(vchiq_arm_log_level,1369" ioctl instance %lx, cmd %s -> status %d, %d",1370(unsigned long)instance,1371(_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?1372ioctl_names[_IOC_NR(cmd)] :1373"<invalid>",1374status, ret);13751376/* XXXBSD: report BSD-style error to userland */1377if (ret < 0)1378ret = -ret;13791380return ret;1381}1382138313841385/****************************************************************************1386*1387* vchiq_open1388*1389***************************************************************************/1390static void instance_dtr(void *data);13911392static int1393vchiq_open(struct cdev *dev, int flags, int fmt __unused, struct thread *td)1394{1395vchiq_log_info(vchiq_arm_log_level, "vchiq_open");1396/* XXXBSD: do we really need this check? */1397if (1) {1398VCHIQ_STATE_T *state = vchiq_get_state();1399VCHIQ_INSTANCE_T instance;14001401if (!state) {1402vchiq_log_error(vchiq_arm_log_level,1403"vchiq has no connection to VideoCore");1404return -ENOTCONN;1405}14061407instance = kmalloc(sizeof(*instance), GFP_KERNEL);1408if (!instance)1409return -ENOMEM;14101411instance->state = state;1412/* XXXBSD: PID or thread ID? */1413instance->pid = td->td_proc->p_pid;14141415#ifdef notyet1416ret = vchiq_proc_add_instance(instance);1417if (ret != 0) {1418kfree(instance);1419return ret;1420}1421#endif14221423_sema_init(&instance->insert_event, 0);1424_sema_init(&instance->remove_event, 0);1425lmutex_init(&instance->completion_mutex);1426lmutex_init(&instance->bulk_waiter_list_mutex);1427INIT_LIST_HEAD(&instance->bulk_waiter_list);14281429devfs_set_cdevpriv(instance, instance_dtr);1430}1431else {1432vchiq_log_error(vchiq_arm_log_level,1433"Unknown minor device");1434return -ENXIO;1435}14361437return 0;1438}14391440/****************************************************************************1441*1442* vchiq_release1443*1444***************************************************************************/144514461447static int1448_vchiq_close_instance(VCHIQ_INSTANCE_T instance)1449{1450int ret = 0;1451VCHIQ_STATE_T *state = vchiq_get_state();1452VCHIQ_SERVICE_T *service;1453int i;14541455vchiq_log_info(vchiq_arm_log_level,1456"vchiq_release: instance=%lx",1457(unsigned long)instance);14581459if (!state) {1460ret = -EPERM;1461goto out;1462}14631464/* Ensure videocore is awake to allow termination. */1465vchiq_use_internal(instance->state, NULL,1466USE_TYPE_VCHIQ);14671468lmutex_lock(&instance->completion_mutex);14691470/* Wake the completion thread and ask it to exit */1471instance->closing = 1;1472up(&instance->insert_event);14731474lmutex_unlock(&instance->completion_mutex);14751476/* Wake the slot handler if the completion queue is full. */1477up(&instance->remove_event);14781479/* Mark all services for termination... */1480i = 0;1481while ((service = next_service_by_instance(state, instance,1482&i)) != NULL) {1483USER_SERVICE_T *user_service = service->base.userdata;14841485/* Wake the slot handler if the msg queue is full. */1486up(&user_service->remove_event);14871488vchiq_terminate_service_internal(service);1489unlock_service(service);1490}14911492/* ...and wait for them to die */1493i = 0;1494while ((service = next_service_by_instance(state, instance, &i))1495!= NULL) {1496USER_SERVICE_T *user_service = service->base.userdata;14971498down(&service->remove_event);14991500BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);15011502spin_lock(&msg_queue_spinlock);15031504while (user_service->msg_remove !=1505user_service->msg_insert) {1506VCHIQ_HEADER_T *header = user_service->1507msg_queue[user_service->msg_remove &1508(MSG_QUEUE_SIZE - 1)];1509user_service->msg_remove++;1510spin_unlock(&msg_queue_spinlock);15111512if (header)1513vchiq_release_message(1514service->handle,1515header);1516spin_lock(&msg_queue_spinlock);1517}15181519spin_unlock(&msg_queue_spinlock);15201521unlock_service(service);1522}15231524/* Release any closed services */1525while (instance->completion_remove !=1526instance->completion_insert) {1527VCHIQ_COMPLETION_DATA_T *completion;1528VCHIQ_SERVICE_T *service;1529completion = &instance->completions[1530instance->completion_remove &1531(MAX_COMPLETIONS - 1)];1532service = completion->service_userdata;1533if (completion->reason == VCHIQ_SERVICE_CLOSED)1534{1535USER_SERVICE_T *user_service =1536service->base.userdata;15371538/* Wake any blocked user-thread */1539if (instance->use_close_delivered)1540up(&user_service->close_event);15411542unlock_service(service);1543}1544instance->completion_remove++;1545}15461547/* Release the PEER service count. */1548vchiq_release_internal(instance->state, NULL);15491550{1551struct list_head *pos, *next;1552list_for_each_safe(pos, next,1553&instance->bulk_waiter_list) {1554struct bulk_waiter_node *waiter;1555waiter = list_entry(pos,1556struct bulk_waiter_node,1557list);1558list_del(pos);1559vchiq_log_info(vchiq_arm_log_level,1560"bulk_waiter - cleaned up %zx "1561"for pid %d",1562(size_t)waiter, waiter->pid);1563_sema_destroy(&waiter->bulk_waiter.event);1564kfree(waiter);1565}1566}15671568out:1569return ret;15701571}15721573static void1574instance_dtr(void *data)1575{1576VCHIQ_INSTANCE_T instance = data;1577_vchiq_close_instance(instance);1578kfree(data);1579}15801581static int1582vchiq_close(struct cdev *dev, int flags __unused, int fmt __unused,1583struct thread *td)1584{15851586/* XXXMDC it's privdata that tracks opens */1587/* XXXMDC only get closes when there are no more open fds on a vnode */15881589return(0);15901591}15921593/****************************************************************************1594*1595* vchiq_dump1596*1597***************************************************************************/15981599void1600vchiq_dump(void *dump_context, const char *str, int len)1601{1602DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context;16031604if (context->actual < context->space) {1605int copy_bytes;1606if (context->offset > 0) {1607int skip_bytes = min(len, (int)context->offset);1608str += skip_bytes;1609len -= skip_bytes;1610context->offset -= skip_bytes;1611if (context->offset > 0)1612return;1613}1614copy_bytes = min(len, (int)(context->space - context->actual));1615if (copy_bytes == 0)1616return;1617memcpy(context->buf + context->actual, str, copy_bytes);1618context->actual += copy_bytes;1619len -= copy_bytes;16201621/* If tne terminating NUL is included in the length, then it1622** marks the end of a line and should be replaced with a1623** carriage return. */1624if ((len == 0) && (str[copy_bytes - 1] == '\0')) {1625char cr = '\n';1626memcpy(context->buf + context->actual - 1, &cr, 1);1627}1628}1629}16301631/****************************************************************************1632*1633* vchiq_dump_platform_instance_state1634*1635***************************************************************************/16361637void1638vchiq_dump_platform_instances(void *dump_context)1639{1640VCHIQ_STATE_T *state = vchiq_get_state();1641char buf[80];1642int len;1643int i;16441645/* There is no list of instances, so instead scan all services,1646marking those that have been dumped. */16471648for (i = 0; i < state->unused_service; i++) {1649VCHIQ_SERVICE_T *service = state->services[i];1650VCHIQ_INSTANCE_T instance;16511652if (service && (service->base.callback == service_callback)) {1653instance = service->instance;1654if (instance)1655instance->mark = 0;1656}1657}16581659for (i = 0; i < state->unused_service; i++) {1660VCHIQ_SERVICE_T *service = state->services[i];1661VCHIQ_INSTANCE_T instance;16621663if (service && (service->base.callback == service_callback)) {1664instance = service->instance;1665if (instance && !instance->mark) {1666len = snprintf(buf, sizeof(buf),1667"Instance %zx: pid %d,%s completions "1668"%d/%d",1669(size_t)instance, instance->pid,1670instance->connected ? " connected, " :1671"",1672instance->completion_insert -1673instance->completion_remove,1674MAX_COMPLETIONS);16751676vchiq_dump(dump_context, buf, len + 1);16771678instance->mark = 1;1679}1680}1681}1682}16831684/****************************************************************************1685*1686* vchiq_dump_platform_service_state1687*1688***************************************************************************/16891690void1691vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)1692{1693USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata;1694char buf[80];1695int len;16961697len = snprintf(buf, sizeof(buf), " instance %zx",1698(size_t)service->instance);16991700if ((service->base.callback == service_callback) &&1701user_service->is_vchi) {1702len += snprintf(buf + len, sizeof(buf) - len,1703", %d/%d messages",1704user_service->msg_insert - user_service->msg_remove,1705MSG_QUEUE_SIZE);17061707if (user_service->dequeue_pending)1708len += snprintf(buf + len, sizeof(buf) - len,1709" (dequeue pending)");1710}17111712vchiq_dump(dump_context, buf, len + 1);1713}17141715#ifdef notyet1716/****************************************************************************1717*1718* dump_user_mem1719*1720***************************************************************************/17211722static void1723dump_phys_mem(void *virt_addr, uint32_t num_bytes)1724{1725int rc;1726uint8_t *end_virt_addr = virt_addr + num_bytes;1727int num_pages;1728int offset;1729int end_offset;1730int page_idx;1731int prev_idx;1732struct page *page;1733struct page **pages;1734uint8_t *kmapped_virt_ptr;17351736/* Align virtAddr and endVirtAddr to 16 byte boundaries. */17371738virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);1739end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &1740~0x0fuL);17411742offset = (int)(long)virt_addr & (PAGE_SIZE - 1);1743end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);17441745num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE;17461747pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);1748if (pages == NULL) {1749vchiq_log_error(vchiq_arm_log_level,1750"Unable to allocation memory for %d pages\n",1751num_pages);1752return;1753}17541755down_read(¤t->mm->mmap_sem);1756rc = get_user_pages(current, /* task */1757current->mm, /* mm */1758(unsigned long)virt_addr, /* start */1759num_pages, /* len */17600, /* write */17610, /* force */1762pages, /* pages (array of page pointers) */1763NULL); /* vmas */1764up_read(¤t->mm->mmap_sem);17651766prev_idx = -1;1767page = NULL;17681769while (offset < end_offset) {17701771int page_offset = offset % PAGE_SIZE;1772page_idx = offset / PAGE_SIZE;17731774if (page_idx != prev_idx) {17751776if (page != NULL)1777kunmap(page);1778page = pages[page_idx];1779kmapped_virt_ptr = kmap(page);17801781prev_idx = page_idx;1782}17831784if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)1785vchiq_log_dump_mem("ph",1786(uint32_t)(unsigned long)&kmapped_virt_ptr[1787page_offset],1788&kmapped_virt_ptr[page_offset], 16);17891790offset += 16;1791}1792if (page != NULL)1793kunmap(page);17941795for (page_idx = 0; page_idx < num_pages; page_idx++)1796page_cache_release(pages[page_idx]);17971798kfree(pages);1799}18001801/****************************************************************************1802*1803* vchiq_read1804*1805***************************************************************************/18061807static ssize_t1808vchiq_read(struct file *file, char __user *buf,1809size_t count, loff_t *ppos)1810{1811DUMP_CONTEXT_T context;1812context.buf = buf;1813context.actual = 0;1814context.space = count;1815context.offset = *ppos;18161817vchiq_dump_state(&context, &g_state);18181819*ppos += context.actual;18201821return context.actual;1822}1823#endif18241825VCHIQ_STATE_T *1826vchiq_get_state(void)1827{18281829if (g_state.remote == NULL)1830printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);1831else if (g_state.remote->initialised != 1)1832printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",1833__func__, g_state.remote->initialised);18341835return ((g_state.remote != NULL) &&1836(g_state.remote->initialised == 1)) ? &g_state : NULL;1837}18381839/*1840* Autosuspend related functionality1841*/18421843int1844vchiq_videocore_wanted(VCHIQ_STATE_T *state)1845{1846VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);1847if (!arm_state)1848/* autosuspend not supported - always return wanted */1849return 1;1850else if (arm_state->blocked_count)1851return 1;1852else if (!arm_state->videocore_use_count)1853/* usage count zero - check for override unless we're forcing */1854if (arm_state->resume_blocked)1855return 0;1856else1857return vchiq_platform_videocore_wanted(state);1858else1859/* non-zero usage count - videocore still required */1860return 1;1861}18621863static VCHIQ_STATUS_T1864vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,1865VCHIQ_HEADER_T *header,1866VCHIQ_SERVICE_HANDLE_T service_user,1867void *bulk_user)1868{1869vchiq_log_error(vchiq_susp_log_level,1870"%s callback reason %d", __func__, reason);1871return 0;1872}18731874static int1875vchiq_keepalive_thread_func(void *v)1876{1877VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;1878VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);18791880VCHIQ_STATUS_T status;1881VCHIQ_INSTANCE_T instance;1882VCHIQ_SERVICE_HANDLE_T ka_handle;18831884VCHIQ_SERVICE_PARAMS_T params = {1885.fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),1886.callback = vchiq_keepalive_vchiq_callback,1887.version = KEEPALIVE_VER,1888.version_min = KEEPALIVE_VER_MIN1889};18901891status = vchiq_initialise(&instance);1892if (status != VCHIQ_SUCCESS) {1893vchiq_log_error(vchiq_susp_log_level,1894"%s vchiq_initialise failed %d", __func__, status);1895goto exit;1896}18971898status = vchiq_connect(instance);1899if (status != VCHIQ_SUCCESS) {1900vchiq_log_error(vchiq_susp_log_level,1901"%s vchiq_connect failed %d", __func__, status);1902goto shutdown;1903}19041905status = vchiq_add_service(instance, ¶ms, &ka_handle);1906if (status != VCHIQ_SUCCESS) {1907vchiq_log_error(vchiq_susp_log_level,1908"%s vchiq_open_service failed %d", __func__, status);1909goto shutdown;1910}19111912while (1) {1913long rc = 0, uc = 0;1914if (wait_for_completion_interruptible(&arm_state->ka_evt)1915!= 0) {1916vchiq_log_error(vchiq_susp_log_level,1917"%s interrupted", __func__);1918flush_signals(current);1919continue;1920}19211922/* read and clear counters. Do release_count then use_count to1923* prevent getting more releases than uses */1924rc = atomic_xchg(&arm_state->ka_release_count, 0);1925uc = atomic_xchg(&arm_state->ka_use_count, 0);19261927/* Call use/release service the requisite number of times.1928* Process use before release so use counts don't go negative */1929while (uc--) {1930atomic_inc(&arm_state->ka_use_ack_count);1931status = vchiq_use_service(ka_handle);1932if (status != VCHIQ_SUCCESS) {1933vchiq_log_error(vchiq_susp_log_level,1934"%s vchiq_use_service error %d",1935__func__, status);1936}1937}1938while (rc--) {1939status = vchiq_release_service(ka_handle);1940if (status != VCHIQ_SUCCESS) {1941vchiq_log_error(vchiq_susp_log_level,1942"%s vchiq_release_service error %d",1943__func__, status);1944}1945}1946}19471948shutdown:1949vchiq_shutdown(instance);1950exit:1951return 0;1952}19531954VCHIQ_STATUS_T1955vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)1956{1957VCHIQ_STATUS_T status = VCHIQ_SUCCESS;19581959if (arm_state) {1960rwlock_init(&arm_state->susp_res_lock);19611962init_completion(&arm_state->ka_evt);1963atomic_set(&arm_state->ka_use_count, 0);1964atomic_set(&arm_state->ka_use_ack_count, 0);1965atomic_set(&arm_state->ka_release_count, 0);19661967init_completion(&arm_state->vc_suspend_complete);19681969init_completion(&arm_state->vc_resume_complete);1970/* Initialise to 'done' state. We only want to block on resume1971* completion while videocore is suspended. */1972set_resume_state(arm_state, VC_RESUME_RESUMED);19731974init_completion(&arm_state->resume_blocker);1975/* Initialise to 'done' state. We only want to block on this1976* completion while resume is blocked */1977complete_all(&arm_state->resume_blocker);19781979init_completion(&arm_state->blocked_blocker);1980/* Initialise to 'done' state. We only want to block on this1981* completion while things are waiting on the resume blocker */1982complete_all(&arm_state->blocked_blocker);19831984arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;1985arm_state->suspend_timer_running = 0;1986vchiq_init_timer(&arm_state->suspend_timer);1987arm_state->suspend_timer.data = (unsigned long)(state);1988arm_state->suspend_timer.function = suspend_timer_callback;19891990arm_state->first_connect = 0;19911992}1993return status;1994}19951996/*1997** Functions to modify the state variables;1998** set_suspend_state1999** set_resume_state2000**2001** There are more state variables than we might like, so ensure they remain in2002** step. Suspend and resume state are maintained separately, since most of2003** these state machines can operate independently. However, there are a few2004** states where state transitions in one state machine cause a reset to the2005** other state machine. In addition, there are some completion events which2006** need to occur on state machine reset and end-state(s), so these are also2007** dealt with in these functions.2008**2009** In all states we set the state variable according to the input, but in some2010** cases we perform additional steps outlined below;2011**2012** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.2013** The suspend completion is completed after any suspend2014** attempt. When we reset the state machine we also reset2015** the completion. This reset occurs when videocore is2016** resumed, and also if we initiate suspend after a suspend2017** failure.2018**2019** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for2020** suspend - ie from this point on we must try to suspend2021** before resuming can occur. We therefore also reset the2022** resume state machine to VC_RESUME_IDLE in this state.2023**2024** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call2025** complete_all on the suspend completion to notify2026** anything waiting for suspend to happen.2027**2028** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also2029** initiate resume, so no need to alter resume state.2030** We call complete_all on the suspend completion to notify2031** of suspend rejection.2032**2033** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the2034** suspend completion and reset the resume state machine.2035**2036** VC_RESUME_IDLE - Initialise the resume completion at the same time. The2037** resume completion is in its 'done' state whenever2038** videcore is running. Therfore, the VC_RESUME_IDLE state2039** implies that videocore is suspended.2040** Hence, any thread which needs to wait until videocore is2041** running can wait on this completion - it will only block2042** if videocore is suspended.2043**2044** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running.2045** Call complete_all on the resume completion to unblock2046** any threads waiting for resume. Also reset the suspend2047** state machine to it's idle state.2048**2049** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.2050*/20512052void2053set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,2054enum vc_suspend_status new_state)2055{2056/* set the state in all cases */2057arm_state->vc_suspend_state = new_state;20582059/* state specific additional actions */2060switch (new_state) {2061case VC_SUSPEND_FORCE_CANCELED:2062complete_all(&arm_state->vc_suspend_complete);2063break;2064case VC_SUSPEND_REJECTED:2065complete_all(&arm_state->vc_suspend_complete);2066break;2067case VC_SUSPEND_FAILED:2068complete_all(&arm_state->vc_suspend_complete);2069arm_state->vc_resume_state = VC_RESUME_RESUMED;2070complete_all(&arm_state->vc_resume_complete);2071break;2072case VC_SUSPEND_IDLE:2073/* TODO: reinit_completion */2074INIT_COMPLETION(arm_state->vc_suspend_complete);2075break;2076case VC_SUSPEND_REQUESTED:2077break;2078case VC_SUSPEND_IN_PROGRESS:2079set_resume_state(arm_state, VC_RESUME_IDLE);2080break;2081case VC_SUSPEND_SUSPENDED:2082complete_all(&arm_state->vc_suspend_complete);2083break;2084default:2085BUG();2086break;2087}2088}20892090void2091set_resume_state(VCHIQ_ARM_STATE_T *arm_state,2092enum vc_resume_status new_state)2093{2094/* set the state in all cases */2095arm_state->vc_resume_state = new_state;20962097/* state specific additional actions */2098switch (new_state) {2099case VC_RESUME_FAILED:2100break;2101case VC_RESUME_IDLE:2102/* TODO: reinit_completion */2103INIT_COMPLETION(arm_state->vc_resume_complete);2104break;2105case VC_RESUME_REQUESTED:2106break;2107case VC_RESUME_IN_PROGRESS:2108break;2109case VC_RESUME_RESUMED:2110complete_all(&arm_state->vc_resume_complete);2111set_suspend_state(arm_state, VC_SUSPEND_IDLE);2112break;2113default:2114BUG();2115break;2116}2117}211821192120/* should be called with the write lock held */2121inline void2122start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)2123{2124vchiq_del_timer(&arm_state->suspend_timer);2125arm_state->suspend_timer.expires = jiffies +2126msecs_to_jiffies(arm_state->2127suspend_timer_timeout);2128vchiq_add_timer(&arm_state->suspend_timer);2129arm_state->suspend_timer_running = 1;2130}21312132/* should be called with the write lock held */2133static inline void2134stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)2135{2136if (arm_state->suspend_timer_running) {2137vchiq_del_timer(&arm_state->suspend_timer);2138arm_state->suspend_timer_running = 0;2139}2140}21412142static inline int2143need_resume(VCHIQ_STATE_T *state)2144{2145VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);2146return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&2147(arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&2148vchiq_videocore_wanted(state);2149}21502151static int2152block_resume(VCHIQ_ARM_STATE_T *arm_state)2153{2154int status = VCHIQ_SUCCESS;2155const unsigned long timeout_val =2156msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);2157int resume_count = 0;21582159/* Allow any threads which were blocked by the last force suspend to2160* complete if they haven't already. Only give this one shot; if2161* blocked_count is incremented after blocked_blocker is completed2162* (which only happens when blocked_count hits 0) then those threads2163* will have to wait until next time around */2164if (arm_state->blocked_count) {2165/* TODO: reinit_completion */2166INIT_COMPLETION(arm_state->blocked_blocker);2167write_unlock_bh(&arm_state->susp_res_lock);2168vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "2169"blocked clients", __func__);2170if (wait_for_completion_interruptible_timeout(2171&arm_state->blocked_blocker, timeout_val)2172<= 0) {2173vchiq_log_error(vchiq_susp_log_level, "%s wait for "2174"previously blocked clients failed" , __func__);2175status = VCHIQ_ERROR;2176write_lock_bh(&arm_state->susp_res_lock);2177goto out;2178}2179vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "2180"clients resumed", __func__);2181write_lock_bh(&arm_state->susp_res_lock);2182}21832184/* We need to wait for resume to complete if it's in process */2185while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&2186arm_state->vc_resume_state > VC_RESUME_IDLE) {2187if (resume_count > 1) {2188status = VCHIQ_ERROR;2189vchiq_log_error(vchiq_susp_log_level, "%s waited too "2190"many times for resume" , __func__);2191goto out;2192}2193write_unlock_bh(&arm_state->susp_res_lock);2194vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",2195__func__);2196if (wait_for_completion_interruptible_timeout(2197&arm_state->vc_resume_complete, timeout_val)2198<= 0) {2199vchiq_log_error(vchiq_susp_log_level, "%s wait for "2200"resume failed (%s)", __func__,2201resume_state_names[arm_state->vc_resume_state +2202VC_RESUME_NUM_OFFSET]);2203status = VCHIQ_ERROR;2204write_lock_bh(&arm_state->susp_res_lock);2205goto out;2206}2207vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);2208write_lock_bh(&arm_state->susp_res_lock);2209resume_count++;2210}2211/* TODO: reinit_completion */2212INIT_COMPLETION(arm_state->resume_blocker);2213arm_state->resume_blocked = 1;22142215out:2216return status;2217}22182219static inline void2220unblock_resume(VCHIQ_ARM_STATE_T *arm_state)2221{2222complete_all(&arm_state->resume_blocker);2223arm_state->resume_blocked = 0;2224}22252226/* Initiate suspend via slot handler. Should be called with the write lock2227* held */2228VCHIQ_STATUS_T2229vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)2230{2231VCHIQ_STATUS_T status = VCHIQ_ERROR;2232VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);22332234if (!arm_state)2235goto out;22362237vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);2238status = VCHIQ_SUCCESS;223922402241switch (arm_state->vc_suspend_state) {2242case VC_SUSPEND_REQUESTED:2243vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "2244"requested", __func__);2245break;2246case VC_SUSPEND_IN_PROGRESS:2247vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "2248"progress", __func__);2249break;22502251default:2252/* We don't expect to be in other states, so log but continue2253* anyway */2254vchiq_log_error(vchiq_susp_log_level,2255"%s unexpected suspend state %s", __func__,2256suspend_state_names[arm_state->vc_suspend_state +2257VC_SUSPEND_NUM_OFFSET]);2258/* fall through */2259case VC_SUSPEND_REJECTED:2260case VC_SUSPEND_FAILED:2261/* Ensure any idle state actions have been run */2262set_suspend_state(arm_state, VC_SUSPEND_IDLE);2263/* fall through */2264case VC_SUSPEND_IDLE:2265vchiq_log_info(vchiq_susp_log_level,2266"%s: suspending", __func__);2267set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);2268/* kick the slot handler thread to initiate suspend */2269request_poll(state, NULL, 0);2270break;2271}22722273out:2274vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);2275return status;2276}22772278void2279vchiq_platform_check_suspend(VCHIQ_STATE_T *state)2280{2281VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);2282int susp = 0;22832284if (!arm_state)2285goto out;22862287vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);22882289write_lock_bh(&arm_state->susp_res_lock);2290if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&2291arm_state->vc_resume_state == VC_RESUME_RESUMED) {2292set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);2293susp = 1;2294}2295write_unlock_bh(&arm_state->susp_res_lock);22962297if (susp)2298vchiq_platform_suspend(state);22992300out:2301vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);2302return;2303}230423052306static void2307output_timeout_error(VCHIQ_STATE_T *state)2308{2309VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);2310char service_err[50] = "";2311int vc_use_count = arm_state->videocore_use_count;2312int active_services = state->unused_service;2313int i;23142315if (!arm_state->videocore_use_count) {2316snprintf(service_err, 50, " Videocore usecount is 0");2317goto output_msg;2318}2319for (i = 0; i < active_services; i++) {2320VCHIQ_SERVICE_T *service_ptr = state->services[i];2321if (service_ptr && service_ptr->service_use_count &&2322(service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {2323snprintf(service_err, 50, " %c%c%c%c(%8x) service has "2324"use count %d%s", VCHIQ_FOURCC_AS_4CHARS(2325service_ptr->base.fourcc),2326service_ptr->client_id,2327service_ptr->service_use_count,2328service_ptr->service_use_count ==2329vc_use_count ? "" : " (+ more)");2330break;2331}2332}23332334output_msg:2335vchiq_log_error(vchiq_susp_log_level,2336"timed out waiting for vc suspend (%d).%s",2337arm_state->autosuspend_override, service_err);23382339}23402341/* Try to get videocore into suspended state, regardless of autosuspend state.2342** We don't actually force suspend, since videocore may get into a bad state2343** if we force suspend at a bad time. Instead, we wait for autosuspend to2344** determine a good point to suspend. If this doesn't happen within 100ms we2345** report failure.2346**2347** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if2348** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.2349*/2350VCHIQ_STATUS_T2351vchiq_arm_force_suspend(VCHIQ_STATE_T *state)2352{2353VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);2354VCHIQ_STATUS_T status = VCHIQ_ERROR;2355long rc = 0;2356int repeat = -1;23572358if (!arm_state)2359goto out;23602361vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);23622363write_lock_bh(&arm_state->susp_res_lock);23642365status = block_resume(arm_state);2366if (status != VCHIQ_SUCCESS)2367goto unlock;2368if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {2369/* Already suspended - just block resume and exit */2370vchiq_log_info(vchiq_susp_log_level, "%s already suspended",2371__func__);2372status = VCHIQ_SUCCESS;2373goto unlock;2374} else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {2375/* initiate suspend immediately in the case that we're waiting2376* for the timeout */2377stop_suspend_timer(arm_state);2378if (!vchiq_videocore_wanted(state)) {2379vchiq_log_info(vchiq_susp_log_level, "%s videocore "2380"idle, initiating suspend", __func__);2381status = vchiq_arm_vcsuspend(state);2382} else if (arm_state->autosuspend_override <2383FORCE_SUSPEND_FAIL_MAX) {2384vchiq_log_info(vchiq_susp_log_level, "%s letting "2385"videocore go idle", __func__);2386status = VCHIQ_SUCCESS;2387} else {2388vchiq_log_warning(vchiq_susp_log_level, "%s failed too "2389"many times - attempting suspend", __func__);2390status = vchiq_arm_vcsuspend(state);2391}2392} else {2393vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "2394"in progress - wait for completion", __func__);2395status = VCHIQ_SUCCESS;2396}23972398/* Wait for suspend to happen due to system idle (not forced..) */2399if (status != VCHIQ_SUCCESS)2400goto unblock_resume;24012402do {2403write_unlock_bh(&arm_state->susp_res_lock);24042405rc = wait_for_completion_interruptible_timeout(2406&arm_state->vc_suspend_complete,2407msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));24082409write_lock_bh(&arm_state->susp_res_lock);2410if (rc < 0) {2411vchiq_log_warning(vchiq_susp_log_level, "%s "2412"interrupted waiting for suspend", __func__);2413status = VCHIQ_ERROR;2414goto unblock_resume;2415} else if (rc == 0) {2416if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {2417/* Repeat timeout once if in progress */2418if (repeat < 0) {2419repeat = 1;2420continue;2421}2422}2423arm_state->autosuspend_override++;2424output_timeout_error(state);24252426status = VCHIQ_RETRY;2427goto unblock_resume;2428}2429} while (0 < (repeat--));24302431/* Check and report state in case we need to abort ARM suspend */2432if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {2433status = VCHIQ_RETRY;2434vchiq_log_error(vchiq_susp_log_level,2435"%s videocore suspend failed (state %s)", __func__,2436suspend_state_names[arm_state->vc_suspend_state +2437VC_SUSPEND_NUM_OFFSET]);2438/* Reset the state only if it's still in an error state.2439* Something could have already initiated another suspend. */2440if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)2441set_suspend_state(arm_state, VC_SUSPEND_IDLE);24422443goto unblock_resume;2444}24452446/* successfully suspended - unlock and exit */2447goto unlock;24482449unblock_resume:2450/* all error states need to unblock resume before exit */2451unblock_resume(arm_state);24522453unlock:2454write_unlock_bh(&arm_state->susp_res_lock);24552456out:2457vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);2458return status;2459}24602461void2462vchiq_check_suspend(VCHIQ_STATE_T *state)2463{2464VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);24652466if (!arm_state)2467goto out;24682469vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);24702471write_lock_bh(&arm_state->susp_res_lock);2472if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&2473arm_state->first_connect &&2474!vchiq_videocore_wanted(state)) {2475vchiq_arm_vcsuspend(state);2476}2477write_unlock_bh(&arm_state->susp_res_lock);24782479out:2480vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);2481return;2482}248324842485int2486vchiq_arm_allow_resume(VCHIQ_STATE_T *state)2487{2488VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);2489int resume = 0;2490int ret = -1;24912492if (!arm_state)2493goto out;24942495vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);24962497write_lock_bh(&arm_state->susp_res_lock);2498unblock_resume(arm_state);2499resume = vchiq_check_resume(state);2500write_unlock_bh(&arm_state->susp_res_lock);25012502if (resume) {2503if (wait_for_completion_interruptible(2504&arm_state->vc_resume_complete) < 0) {2505vchiq_log_error(vchiq_susp_log_level,2506"%s interrupted", __func__);2507/* failed, cannot accurately derive suspend2508* state, so exit early. */2509goto out;2510}2511}25122513read_lock_bh(&arm_state->susp_res_lock);2514if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {2515vchiq_log_info(vchiq_susp_log_level,2516"%s: Videocore remains suspended", __func__);2517} else {2518vchiq_log_info(vchiq_susp_log_level,2519"%s: Videocore resumed", __func__);2520ret = 0;2521}2522read_unlock_bh(&arm_state->susp_res_lock);2523out:2524vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);2525return ret;2526}25272528/* This function should be called with the write lock held */2529int2530vchiq_check_resume(VCHIQ_STATE_T *state)2531{2532VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);2533int resume = 0;25342535if (!arm_state)2536goto out;25372538vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);25392540if (need_resume(state)) {2541set_resume_state(arm_state, VC_RESUME_REQUESTED);2542request_poll(state, NULL, 0);2543resume = 1;2544}25452546out:2547vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);2548return resume;2549}25502551#ifdef notyet2552void2553vchiq_platform_check_resume(VCHIQ_STATE_T *state)2554{2555VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);2556int res = 0;25572558if (!arm_state)2559goto out;25602561vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);25622563write_lock_bh(&arm_state->susp_res_lock);2564if (arm_state->wake_address == 0) {2565vchiq_log_info(vchiq_susp_log_level,2566"%s: already awake", __func__);2567goto unlock;2568}2569if (arm_state->vc_resume_state == VC_RESUME_IN_PROGRESS) {2570vchiq_log_info(vchiq_susp_log_level,2571"%s: already resuming", __func__);2572goto unlock;2573}25742575if (arm_state->vc_resume_state == VC_RESUME_REQUESTED) {2576set_resume_state(arm_state, VC_RESUME_IN_PROGRESS);2577res = 1;2578} else2579vchiq_log_trace(vchiq_susp_log_level,2580"%s: not resuming (resume state %s)", __func__,2581resume_state_names[arm_state->vc_resume_state +2582VC_RESUME_NUM_OFFSET]);25832584unlock:2585write_unlock_bh(&arm_state->susp_res_lock);25862587if (res)2588vchiq_platform_resume(state);25892590out:2591vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);2592return;25932594}2595#endif2596259725982599VCHIQ_STATUS_T2600vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,2601enum USE_TYPE_E use_type)2602{2603VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);2604VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;2605char entity[16];2606int *entity_uc;2607int local_uc, local_entity_uc;26082609if (!arm_state)2610goto out;26112612vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);26132614if (use_type == USE_TYPE_VCHIQ) {2615snprintf(entity, sizeof(entity), "VCHIQ: ");2616entity_uc = &arm_state->peer_use_count;2617} else if (service) {2618snprintf(entity, sizeof(entity), "%c%c%c%c:%8x",2619VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),2620service->client_id);2621entity_uc = &service->service_use_count;2622} else {2623vchiq_log_error(vchiq_susp_log_level, "%s null service "2624"ptr", __func__);2625ret = VCHIQ_ERROR;2626goto out;2627}26282629write_lock_bh(&arm_state->susp_res_lock);2630while (arm_state->resume_blocked) {2631/* If we call 'use' while force suspend is waiting for suspend,2632* then we're about to block the thread which the force is2633* waiting to complete, so we're bound to just time out. In this2634* case, set the suspend state such that the wait will be2635* canceled, so we can complete as quickly as possible. */2636if (arm_state->resume_blocked && arm_state->vc_suspend_state ==2637VC_SUSPEND_IDLE) {2638set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);2639break;2640}2641/* If suspend is already in progress then we need to block */2642if (!try_wait_for_completion(&arm_state->resume_blocker)) {2643/* Indicate that there are threads waiting on the resume2644* blocker. These need to be allowed to complete before2645* a _second_ call to force suspend can complete,2646* otherwise low priority threads might never actually2647* continue */2648arm_state->blocked_count++;2649write_unlock_bh(&arm_state->susp_res_lock);2650vchiq_log_info(vchiq_susp_log_level, "%s %s resume "2651"blocked - waiting...", __func__, entity);2652if (wait_for_completion_killable(2653&arm_state->resume_blocker) != 0) {2654vchiq_log_error(vchiq_susp_log_level, "%s %s "2655"wait for resume blocker interrupted",2656__func__, entity);2657ret = VCHIQ_ERROR;2658write_lock_bh(&arm_state->susp_res_lock);2659arm_state->blocked_count--;2660write_unlock_bh(&arm_state->susp_res_lock);2661goto out;2662}2663vchiq_log_info(vchiq_susp_log_level, "%s %s resume "2664"unblocked", __func__, entity);2665write_lock_bh(&arm_state->susp_res_lock);2666if (--arm_state->blocked_count == 0)2667complete_all(&arm_state->blocked_blocker);2668}2669}26702671stop_suspend_timer(arm_state);26722673local_uc = ++arm_state->videocore_use_count;2674local_entity_uc = ++(*entity_uc);26752676/* If there's a pending request which hasn't yet been serviced then2677* just clear it. If we're past VC_SUSPEND_REQUESTED state then2678* vc_resume_complete will block until we either resume or fail to2679* suspend */2680if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)2681set_suspend_state(arm_state, VC_SUSPEND_IDLE);26822683if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {2684set_resume_state(arm_state, VC_RESUME_REQUESTED);2685vchiq_log_info(vchiq_susp_log_level,2686"%s %s count %d, state count %d",2687__func__, entity, local_entity_uc, local_uc);2688request_poll(state, NULL, 0);2689} else2690vchiq_log_trace(vchiq_susp_log_level,2691"%s %s count %d, state count %d",2692__func__, entity, *entity_uc, local_uc);269326942695write_unlock_bh(&arm_state->susp_res_lock);26962697/* Completion is in a done state when we're not suspended, so this won't2698* block for the non-suspended case. */2699if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {2700vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",2701__func__, entity);2702if (wait_for_completion_killable(2703&arm_state->vc_resume_complete) != 0) {2704vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "2705"resume interrupted", __func__, entity);2706ret = VCHIQ_ERROR;2707goto out;2708}2709vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,2710entity);2711}27122713if (ret == VCHIQ_SUCCESS) {2714VCHIQ_STATUS_T status = VCHIQ_SUCCESS;2715long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);2716while (ack_cnt && (status == VCHIQ_SUCCESS)) {2717/* Send the use notify to videocore */2718status = vchiq_send_remote_use_active(state);2719if (status == VCHIQ_SUCCESS)2720ack_cnt--;2721else2722atomic_add(ack_cnt,2723&arm_state->ka_use_ack_count);2724}2725}27262727out:2728vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);2729return ret;2730}27312732VCHIQ_STATUS_T2733vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service)2734{2735VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);2736VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;2737char entity[16];2738int *entity_uc;27392740if (!arm_state)2741goto out;27422743vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);27442745if (service) {2746snprintf(entity, sizeof(entity), "%c%c%c%c:%8x",2747VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),2748service->client_id);2749entity_uc = &service->service_use_count;2750} else {2751snprintf(entity, sizeof(entity), "PEER: ");2752entity_uc = &arm_state->peer_use_count;2753}27542755write_lock_bh(&arm_state->susp_res_lock);2756if (!arm_state->videocore_use_count || !(*entity_uc)) {2757/* Don't use BUG_ON - don't allow user thread to crash kernel */2758WARN_ON(!arm_state->videocore_use_count);2759WARN_ON(!(*entity_uc));2760ret = VCHIQ_ERROR;2761goto unlock;2762}2763--arm_state->videocore_use_count;2764--(*entity_uc);27652766if (!vchiq_videocore_wanted(state)) {2767if (vchiq_platform_use_suspend_timer() &&2768!arm_state->resume_blocked) {2769/* Only use the timer if we're not trying to force2770* suspend (=> resume_blocked) */2771start_suspend_timer(arm_state);2772} else {2773vchiq_log_info(vchiq_susp_log_level,2774"%s %s count %d, state count %d - suspending",2775__func__, entity, *entity_uc,2776arm_state->videocore_use_count);2777vchiq_arm_vcsuspend(state);2778}2779} else2780vchiq_log_trace(vchiq_susp_log_level,2781"%s %s count %d, state count %d",2782__func__, entity, *entity_uc,2783arm_state->videocore_use_count);27842785unlock:2786write_unlock_bh(&arm_state->susp_res_lock);27872788out:2789vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);2790return ret;2791}27922793void2794vchiq_on_remote_use(VCHIQ_STATE_T *state)2795{2796VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);2797vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);2798atomic_inc(&arm_state->ka_use_count);2799complete(&arm_state->ka_evt);2800}28012802void2803vchiq_on_remote_release(VCHIQ_STATE_T *state)2804{2805VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);2806vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);2807atomic_inc(&arm_state->ka_release_count);2808complete(&arm_state->ka_evt);2809}28102811VCHIQ_STATUS_T2812vchiq_use_service_internal(VCHIQ_SERVICE_T *service)2813{2814return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);2815}28162817VCHIQ_STATUS_T2818vchiq_release_service_internal(VCHIQ_SERVICE_T *service)2819{2820return vchiq_release_internal(service->state, service);2821}28222823static void suspend_timer_callback(unsigned long context)2824{2825VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;2826VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);2827if (!arm_state)2828goto out;2829vchiq_log_info(vchiq_susp_log_level,2830"%s - suspend timer expired - check suspend", __func__);2831vchiq_check_suspend(state);2832out:2833return;2834}28352836VCHIQ_STATUS_T2837vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)2838{2839VCHIQ_STATUS_T ret = VCHIQ_ERROR;2840VCHIQ_SERVICE_T *service = find_service_by_handle(handle);2841if (service) {2842ret = vchiq_use_internal(service->state, service,2843USE_TYPE_SERVICE_NO_RESUME);2844unlock_service(service);2845}2846return ret;2847}28482849VCHIQ_STATUS_T2850vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)2851{2852VCHIQ_STATUS_T ret = VCHIQ_ERROR;2853VCHIQ_SERVICE_T *service = find_service_by_handle(handle);2854if (service) {2855ret = vchiq_use_internal(service->state, service,2856USE_TYPE_SERVICE);2857unlock_service(service);2858}2859return ret;2860}28612862VCHIQ_STATUS_T2863vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)2864{2865VCHIQ_STATUS_T ret = VCHIQ_ERROR;2866VCHIQ_SERVICE_T *service = find_service_by_handle(handle);2867if (service) {2868ret = vchiq_release_internal(service->state, service);2869unlock_service(service);2870}2871return ret;2872}28732874void2875vchiq_dump_service_use_state(VCHIQ_STATE_T *state)2876{2877VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);2878int i, j = 0;2879/* Only dump 64 services */2880static const int local_max_services = 64;2881/* If there's more than 64 services, only dump ones with2882* non-zero counts */2883int only_nonzero = 0;2884static const char *nz = "<-- preventing suspend";28852886enum vc_suspend_status vc_suspend_state;2887enum vc_resume_status vc_resume_state;2888int peer_count;2889int vc_use_count;2890int active_services;2891struct service_data_struct {2892int fourcc;2893int clientid;2894int use_count;2895} service_data[local_max_services];28962897if (!arm_state)2898return;28992900read_lock_bh(&arm_state->susp_res_lock);2901vc_suspend_state = arm_state->vc_suspend_state;2902vc_resume_state = arm_state->vc_resume_state;2903peer_count = arm_state->peer_use_count;2904vc_use_count = arm_state->videocore_use_count;2905active_services = state->unused_service;2906if (active_services > local_max_services)2907only_nonzero = 1;29082909for (i = 0; (i < active_services) && (j < local_max_services); i++) {2910VCHIQ_SERVICE_T *service_ptr = state->services[i];2911if (!service_ptr)2912continue;29132914if (only_nonzero && !service_ptr->service_use_count)2915continue;29162917if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) {2918service_data[j].fourcc = service_ptr->base.fourcc;2919service_data[j].clientid = service_ptr->client_id;2920service_data[j++].use_count = service_ptr->2921service_use_count;2922}2923}29242925read_unlock_bh(&arm_state->susp_res_lock);29262927vchiq_log_warning(vchiq_susp_log_level,2928"-- Videcore suspend state: %s --",2929suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);2930vchiq_log_warning(vchiq_susp_log_level,2931"-- Videcore resume state: %s --",2932resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);29332934if (only_nonzero)2935vchiq_log_warning(vchiq_susp_log_level, "Too many active "2936"services (%d). Only dumping up to first %d services "2937"with non-zero use-count", active_services,2938local_max_services);29392940for (i = 0; i < j; i++) {2941vchiq_log_warning(vchiq_susp_log_level,2942"----- %c%c%c%c:%d service count %d %s",2943VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),2944service_data[i].clientid,2945service_data[i].use_count,2946service_data[i].use_count ? nz : "");2947}2948vchiq_log_warning(vchiq_susp_log_level,2949"----- VCHIQ use count count %d", peer_count);2950vchiq_log_warning(vchiq_susp_log_level,2951"--- Overall vchiq instance use count %d", vc_use_count);29522953vchiq_dump_platform_use_state(state);2954}29552956VCHIQ_STATUS_T2957vchiq_check_service(VCHIQ_SERVICE_T *service)2958{2959VCHIQ_ARM_STATE_T *arm_state;2960VCHIQ_STATUS_T ret = VCHIQ_ERROR;29612962if (!service || !service->state)2963goto out;29642965vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);29662967arm_state = vchiq_platform_get_arm_state(service->state);29682969read_lock_bh(&arm_state->susp_res_lock);2970if (service->service_use_count)2971ret = VCHIQ_SUCCESS;2972read_unlock_bh(&arm_state->susp_res_lock);29732974if (ret == VCHIQ_ERROR) {2975vchiq_log_error(vchiq_susp_log_level,2976"%s ERROR - %c%c%c%c:%8x service count %d, "2977"state count %d, videocore suspend state %s", __func__,2978VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),2979service->client_id, service->service_use_count,2980arm_state->videocore_use_count,2981suspend_state_names[arm_state->vc_suspend_state +2982VC_SUSPEND_NUM_OFFSET]);2983vchiq_dump_service_use_state(service->state);2984}2985out:2986return ret;2987}29882989/* stub functions */2990void vchiq_on_remote_use_active(VCHIQ_STATE_T *state)2991{2992(void)state;2993}29942995void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,2996VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)2997{2998VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);2999vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,3000get_conn_state_name(oldstate), get_conn_state_name(newstate));3001if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {3002write_lock_bh(&arm_state->susp_res_lock);3003if (!arm_state->first_connect) {3004char threadname[10];3005arm_state->first_connect = 1;3006write_unlock_bh(&arm_state->susp_res_lock);3007snprintf(threadname, sizeof(threadname), "VCHIQka-%d",3008state->id);3009arm_state->ka_thread = vchiq_thread_create(3010&vchiq_keepalive_thread_func,3011(void *)state,3012threadname);3013if (arm_state->ka_thread == NULL) {3014vchiq_log_error(vchiq_susp_log_level,3015"vchiq: FATAL: couldn't create thread %s",3016threadname);3017} else {3018wake_up_process(arm_state->ka_thread);3019}3020} else3021write_unlock_bh(&arm_state->susp_res_lock);3022}3023}30243025/****************************************************************************3026*3027* vchiq_init - called when the module is loaded.3028*3029***************************************************************************/30303031int __init vchiq_init(void);3032int __init3033vchiq_init(void)3034{3035int err;30363037#ifdef notyet3038/* create proc entries */3039err = vchiq_proc_init();3040if (err != 0)3041goto failed_proc_init;3042#endif30433044vchiq_cdev = make_dev(&vchiq_cdevsw, 0,3045UID_ROOT, GID_WHEEL, 0600, "vchiq");3046if (!vchiq_cdev) {3047printf("Failed to create /dev/vchiq");3048return (-ENXIO);3049}30503051spin_lock_init(&msg_queue_spinlock);30523053err = vchiq_platform_init(&g_state);3054if (err != 0)3055goto failed_platform_init;30563057vchiq_log_info(vchiq_arm_log_level,3058"vchiq: initialised - version %d (min %d)",3059VCHIQ_VERSION, VCHIQ_VERSION_MIN);30603061return 0;30623063failed_platform_init:3064if (vchiq_cdev) {3065destroy_dev(vchiq_cdev);3066vchiq_cdev = NULL;3067}3068vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");3069return err;3070}30713072#ifdef notyet3073static int vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)3074{3075VCHIQ_SERVICE_T *service;3076int use_count = 0, i;3077i = 0;3078while ((service = next_service_by_instance(instance->state,3079instance, &i)) != NULL) {3080use_count += service->service_use_count;3081unlock_service(service);3082}3083return use_count;3084}30853086/* read the per-process use-count */3087static int proc_read_use_count(char *page, char **start,3088off_t off, int count,3089int *eof, void *data)3090{3091VCHIQ_INSTANCE_T instance = data;3092int len, use_count;30933094use_count = vchiq_instance_get_use_count(instance);3095len = snprintf(page+off, count, "%d\n", use_count);30963097return len;3098}30993100/* add an instance (process) to the proc entries */3101static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance)3102{3103char pidstr[32];3104struct proc_dir_entry *top, *use_count;3105struct proc_dir_entry *clients = vchiq_clients_top();3106int pid = instance->pid;31073108snprintf(pidstr, sizeof(pidstr), "%d", pid);3109top = proc_mkdir(pidstr, clients);3110if (!top)3111goto fail_top;31123113use_count = create_proc_read_entry("use_count",31140444, top,3115proc_read_use_count,3116instance);3117if (!use_count)3118goto fail_use_count;31193120instance->proc_entry = top;31213122return 0;31233124fail_use_count:3125remove_proc_entry(top->name, clients);3126fail_top:3127return -ENOMEM;3128}31293130static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance)3131{3132struct proc_dir_entry *clients = vchiq_clients_top();3133remove_proc_entry("use_count", instance->proc_entry);3134remove_proc_entry(instance->proc_entry->name, clients);3135}31363137#endif31383139/****************************************************************************3140*3141* vchiq_exit - called when the module is unloaded.3142*3143***************************************************************************/31443145void vchiq_exit(void);3146void3147vchiq_exit(void)3148{31493150vchiq_platform_exit(&g_state);3151if (vchiq_cdev) {3152destroy_dev(vchiq_cdev);3153vchiq_cdev = NULL;3154}3155}315631573158