/*1* Created: Fri Jan 19 10:48:35 2001 by [email protected]2*3* Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.4* All Rights Reserved.5*6* Author Rickard E. (Rik) Faith <[email protected]>7*8* Permission is hereby granted, free of charge, to any person obtaining a9* copy of this software and associated documentation files (the "Software"),10* to deal in the Software without restriction, including without limitation11* the rights to use, copy, modify, merge, publish, distribute, sublicense,12* and/or sell copies of the Software, and to permit persons to whom the13* Software is furnished to do so, subject to the following conditions:14*15* The above copyright notice and this permission notice (including the next16* paragraph) shall be included in all copies or substantial portions of the17* Software.18*19* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR20* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,21* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL22* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR23* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,24* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER25* DEALINGS IN THE SOFTWARE.26*/2728#include <linux/bitops.h>29#include <linux/cgroup_dmem.h>30#include <linux/debugfs.h>31#include <linux/export.h>32#include <linux/fs.h>33#include <linux/module.h>34#include <linux/moduleparam.h>35#include <linux/mount.h>36#include <linux/pseudo_fs.h>37#include <linux/sched.h>38#include <linux/slab.h>39#include <linux/sprintf.h>40#include <linux/srcu.h>41#include <linux/xarray.h>4243#include <drm/drm_accel.h>44#include <drm/drm_bridge.h>45#include <drm/drm_cache.h>46#include <drm/drm_client_event.h>47#include <drm/drm_color_mgmt.h>48#include <drm/drm_drv.h>49#include <drm/drm_file.h>50#include <drm/drm_managed.h>51#include <drm/drm_mode_object.h>52#include <drm/drm_panic.h>53#include <drm/drm_print.h>54#include <drm/drm_privacy_screen_machine.h>5556#include "drm_crtc_internal.h"57#include "drm_internal.h"5859MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");60MODULE_DESCRIPTION("DRM shared core routines");61MODULE_LICENSE("GPL and additional rights");6263DEFINE_XARRAY_ALLOC(drm_minors_xa);6465/*66* If the drm core fails to init for whatever reason,67* we should prevent any drivers from registering with it.68* It's best to check this at drm_dev_init(), as some drivers69* prefer to embed struct drm_device into their own device70* structure and call drm_dev_init() themselves.71*/72static bool drm_core_init_complete;7374DEFINE_STATIC_SRCU(drm_unplug_srcu);7576/*77* DRM Minors78* A DRM device can provide several char-dev interfaces on the DRM-Major. Each79* of them is represented by a drm_minor object. Depending on the capabilities80* of the device-driver, different interfaces are registered.81*82* Minors can be accessed via dev->$minor_name. This pointer is either83* NULL or a valid drm_minor pointer and stays valid as long as the device is84* valid. This means, DRM minors have the same life-time as the underlying85* device. However, this doesn't mean that the minor is active. Minors are86* registered and unregistered dynamically according to device-state.87*/8889static struct xarray *drm_minor_get_xa(enum drm_minor_type type)90{91if (type == DRM_MINOR_PRIMARY || type == DRM_MINOR_RENDER)92return &drm_minors_xa;93#if IS_ENABLED(CONFIG_DRM_ACCEL)94else if (type == DRM_MINOR_ACCEL)95return &accel_minors_xa;96#endif97else98return ERR_PTR(-EOPNOTSUPP);99}100101static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,102enum drm_minor_type type)103{104switch (type) {105case DRM_MINOR_PRIMARY:106return &dev->primary;107case DRM_MINOR_RENDER:108return &dev->render;109case DRM_MINOR_ACCEL:110return &dev->accel;111default:112BUG();113}114}115116static void drm_minor_alloc_release(struct drm_device *dev, void *data)117{118struct drm_minor *minor = data;119120WARN_ON(dev != minor->dev);121122put_device(minor->kdev);123124xa_erase(drm_minor_get_xa(minor->type), minor->index);125}126127/*128* DRM used to support 64 devices, for backwards compatibility we need to maintain the129* minor allocation scheme where minors 0-63 are primary nodes, 64-127 are control nodes,130* and 128-191 are render nodes.131* After reaching the limit, we're allocating minors dynamically - first-come, first-serve.132* Accel nodes are using a distinct major, so the minors are allocated in continuous 0-MAX133* range.134*/135#define DRM_MINOR_LIMIT(t) ({ \136typeof(t) _t = (t); \137_t == DRM_MINOR_ACCEL ? XA_LIMIT(0, ACCEL_MAX_MINORS) : XA_LIMIT(64 * _t, 64 * _t + 63); \138})139#define DRM_EXTENDED_MINOR_LIMIT XA_LIMIT(192, (1 << MINORBITS) - 1)140141static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type)142{143struct drm_minor *minor;144int r;145146minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL);147if (!minor)148return -ENOMEM;149150minor->type = type;151minor->dev = dev;152153r = xa_alloc(drm_minor_get_xa(type), &minor->index,154NULL, DRM_MINOR_LIMIT(type), GFP_KERNEL);155if (r == -EBUSY && (type == DRM_MINOR_PRIMARY || type == DRM_MINOR_RENDER))156r = xa_alloc(&drm_minors_xa, &minor->index,157NULL, DRM_EXTENDED_MINOR_LIMIT, GFP_KERNEL);158if (r < 0)159return r;160161r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor);162if (r)163return r;164165minor->kdev = drm_sysfs_minor_alloc(minor);166if (IS_ERR(minor->kdev))167return PTR_ERR(minor->kdev);168169*drm_minor_get_slot(dev, type) = minor;170return 0;171}172173static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type)174{175struct drm_minor *minor;176void *entry;177int ret;178179DRM_DEBUG("\n");180181minor = *drm_minor_get_slot(dev, type);182if (!minor)183return 0;184185if (minor->type != DRM_MINOR_ACCEL) {186ret = drm_debugfs_register(minor, minor->index);187if (ret) {188DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");189goto err_debugfs;190}191}192193ret = device_add(minor->kdev);194if (ret)195goto err_debugfs;196197/* replace NULL with @minor so lookups will succeed from now on */198entry = xa_store(drm_minor_get_xa(type), minor->index, minor, GFP_KERNEL);199if (xa_is_err(entry)) {200ret = xa_err(entry);201goto err_debugfs;202}203WARN_ON(entry);204205DRM_DEBUG("new minor registered %d\n", minor->index);206return 0;207208err_debugfs:209drm_debugfs_unregister(minor);210return ret;211}212213static void drm_minor_unregister(struct drm_device *dev, enum drm_minor_type type)214{215struct drm_minor *minor;216217minor = *drm_minor_get_slot(dev, type);218if (!minor || !device_is_registered(minor->kdev))219return;220221/* replace @minor with NULL so lookups will fail from now on */222xa_store(drm_minor_get_xa(type), minor->index, NULL, GFP_KERNEL);223224device_del(minor->kdev);225dev_set_drvdata(minor->kdev, NULL); /* safety belt */226drm_debugfs_unregister(minor);227}228229/*230* Looks up the given minor-ID and returns the respective DRM-minor object. The231* refence-count of the underlying device is increased so you must release this232* object with drm_minor_release().233*234* As long as you hold this minor, it is guaranteed that the object and the235* minor->dev pointer will stay valid! However, the device may get unplugged and236* unregistered while you hold the minor.237*/238struct drm_minor *drm_minor_acquire(struct xarray *minor_xa, unsigned int minor_id)239{240struct drm_minor *minor;241242xa_lock(minor_xa);243minor = xa_load(minor_xa, minor_id);244if (minor)245drm_dev_get(minor->dev);246xa_unlock(minor_xa);247248if (!minor) {249return ERR_PTR(-ENODEV);250} else if (drm_dev_is_unplugged(minor->dev)) {251drm_dev_put(minor->dev);252return ERR_PTR(-ENODEV);253}254255return minor;256}257258void drm_minor_release(struct drm_minor *minor)259{260drm_dev_put(minor->dev);261}262263/**264* DOC: driver instance overview265*266* A device instance for a drm driver is represented by &struct drm_device. This267* is allocated and initialized with devm_drm_dev_alloc(), usually from268* bus-specific ->probe() callbacks implemented by the driver. The driver then269* needs to initialize all the various subsystems for the drm device like memory270* management, vblank handling, modesetting support and initial output271* configuration plus obviously initialize all the corresponding hardware bits.272* Finally when everything is up and running and ready for userspace the device273* instance can be published using drm_dev_register().274*275* There is also deprecated support for initializing device instances using276* bus-specific helpers and the &drm_driver.load callback. But due to277* backwards-compatibility needs the device instance have to be published too278* early, which requires unpretty global locking to make safe and is therefore279* only support for existing drivers not yet converted to the new scheme.280*281* When cleaning up a device instance everything needs to be done in reverse:282* First unpublish the device instance with drm_dev_unregister(). Then clean up283* any other resources allocated at device initialization and drop the driver's284* reference to &drm_device using drm_dev_put().285*286* Note that any allocation or resource which is visible to userspace must be287* released only when the final drm_dev_put() is called, and not when the288* driver is unbound from the underlying physical struct &device. Best to use289* &drm_device managed resources with drmm_add_action(), drmm_kmalloc() and290* related functions.291*292* devres managed resources like devm_kmalloc() can only be used for resources293* directly related to the underlying hardware device, and only used in code294* paths fully protected by drm_dev_enter() and drm_dev_exit().295*296* Display driver example297* ~~~~~~~~~~~~~~~~~~~~~~298*299* The following example shows a typical structure of a DRM display driver.300* The example focus on the probe() function and the other functions that is301* almost always present and serves as a demonstration of devm_drm_dev_alloc().302*303* .. code-block:: c304*305* struct driver_device {306* struct drm_device drm;307* void *userspace_facing;308* struct clk *pclk;309* };310*311* static const struct drm_driver driver_drm_driver = {312* [...]313* };314*315* static int driver_probe(struct platform_device *pdev)316* {317* struct driver_device *priv;318* struct drm_device *drm;319* int ret;320*321* priv = devm_drm_dev_alloc(&pdev->dev, &driver_drm_driver,322* struct driver_device, drm);323* if (IS_ERR(priv))324* return PTR_ERR(priv);325* drm = &priv->drm;326*327* ret = drmm_mode_config_init(drm);328* if (ret)329* return ret;330*331* priv->userspace_facing = drmm_kzalloc(..., GFP_KERNEL);332* if (!priv->userspace_facing)333* return -ENOMEM;334*335* priv->pclk = devm_clk_get(dev, "PCLK");336* if (IS_ERR(priv->pclk))337* return PTR_ERR(priv->pclk);338*339* // Further setup, display pipeline etc340*341* platform_set_drvdata(pdev, drm);342*343* drm_mode_config_reset(drm);344*345* ret = drm_dev_register(drm);346* if (ret)347* return ret;348*349* drm_fbdev_{...}_setup(drm, 32);350*351* return 0;352* }353*354* // This function is called before the devm_ resources are released355* static int driver_remove(struct platform_device *pdev)356* {357* struct drm_device *drm = platform_get_drvdata(pdev);358*359* drm_dev_unregister(drm);360* drm_atomic_helper_shutdown(drm)361*362* return 0;363* }364*365* // This function is called on kernel restart and shutdown366* static void driver_shutdown(struct platform_device *pdev)367* {368* drm_atomic_helper_shutdown(platform_get_drvdata(pdev));369* }370*371* static int __maybe_unused driver_pm_suspend(struct device *dev)372* {373* return drm_mode_config_helper_suspend(dev_get_drvdata(dev));374* }375*376* static int __maybe_unused driver_pm_resume(struct device *dev)377* {378* drm_mode_config_helper_resume(dev_get_drvdata(dev));379*380* return 0;381* }382*383* static const struct dev_pm_ops driver_pm_ops = {384* SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume)385* };386*387* static struct platform_driver driver_driver = {388* .driver = {389* [...]390* .pm = &driver_pm_ops,391* },392* .probe = driver_probe,393* .remove = driver_remove,394* .shutdown = driver_shutdown,395* };396* module_platform_driver(driver_driver);397*398* Drivers that want to support device unplugging (USB, DT overlay unload) should399* use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect400* regions that is accessing device resources to prevent use after they're401* released. This is done using drm_dev_enter() and drm_dev_exit(). There is one402* shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before403* drm_atomic_helper_shutdown() is called. This means that if the disable code404* paths are protected, they will not run on regular driver module unload,405* possibly leaving the hardware enabled.406*/407408/**409* drm_put_dev - Unregister and release a DRM device410* @dev: DRM device411*412* Called at module unload time or when a PCI device is unplugged.413*414* Cleans up all DRM device, calling drm_lastclose().415*416* Note: Use of this function is deprecated. It will eventually go away417* completely. Please use drm_dev_unregister() and drm_dev_put() explicitly418* instead to make sure that the device isn't userspace accessible any more419* while teardown is in progress, ensuring that userspace can't access an420* inconsistent state.421*/422void drm_put_dev(struct drm_device *dev)423{424DRM_DEBUG("\n");425426if (!dev) {427DRM_ERROR("cleanup called no dev\n");428return;429}430431drm_dev_unregister(dev);432drm_dev_put(dev);433}434EXPORT_SYMBOL(drm_put_dev);435436/**437* drm_dev_enter - Enter device critical section438* @dev: DRM device439* @idx: Pointer to index that will be passed to the matching drm_dev_exit()440*441* This function marks and protects the beginning of a section that should not442* be entered after the device has been unplugged. The section end is marked443* with drm_dev_exit(). Calls to this function can be nested.444*445* Returns:446* True if it is OK to enter the section, false otherwise.447*/448bool drm_dev_enter(struct drm_device *dev, int *idx)449{450*idx = srcu_read_lock(&drm_unplug_srcu);451452if (dev->unplugged) {453srcu_read_unlock(&drm_unplug_srcu, *idx);454return false;455}456457return true;458}459EXPORT_SYMBOL(drm_dev_enter);460461/**462* drm_dev_exit - Exit device critical section463* @idx: index returned from drm_dev_enter()464*465* This function marks the end of a section that should not be entered after466* the device has been unplugged.467*/468void drm_dev_exit(int idx)469{470srcu_read_unlock(&drm_unplug_srcu, idx);471}472EXPORT_SYMBOL(drm_dev_exit);473474/**475* drm_dev_unplug - unplug a DRM device476* @dev: DRM device477*478* This unplugs a hotpluggable DRM device, which makes it inaccessible to479* userspace operations. Entry-points can use drm_dev_enter() and480* drm_dev_exit() to protect device resources in a race free manner. This481* essentially unregisters the device like drm_dev_unregister(), but can be482* called while there are still open users of @dev.483*/484void drm_dev_unplug(struct drm_device *dev)485{486/*487* After synchronizing any critical read section is guaranteed to see488* the new value of ->unplugged, and any critical section which might489* still have seen the old value of ->unplugged is guaranteed to have490* finished.491*/492dev->unplugged = true;493synchronize_srcu(&drm_unplug_srcu);494495drm_dev_unregister(dev);496497/* Clear all CPU mappings pointing to this device */498unmap_mapping_range(dev->anon_inode->i_mapping, 0, 0, 1);499}500EXPORT_SYMBOL(drm_dev_unplug);501502/**503* drm_dev_set_dma_dev - set the DMA device for a DRM device504* @dev: DRM device505* @dma_dev: DMA device or NULL506*507* Sets the DMA device of the given DRM device. Only required if508* the DMA device is different from the DRM device's parent. After509* calling this function, the DRM device holds a reference on510* @dma_dev. Pass NULL to clear the DMA device.511*/512void drm_dev_set_dma_dev(struct drm_device *dev, struct device *dma_dev)513{514dma_dev = get_device(dma_dev);515516put_device(dev->dma_dev);517dev->dma_dev = dma_dev;518}519EXPORT_SYMBOL(drm_dev_set_dma_dev);520521/*522* Available recovery methods for wedged device. To be sent along with device523* wedged uevent.524*/525static const char *drm_get_wedge_recovery(unsigned int opt)526{527switch (BIT(opt)) {528case DRM_WEDGE_RECOVERY_NONE:529return "none";530case DRM_WEDGE_RECOVERY_REBIND:531return "rebind";532case DRM_WEDGE_RECOVERY_BUS_RESET:533return "bus-reset";534default:535return NULL;536}537}538539#define WEDGE_STR_LEN 32540#define PID_STR_LEN 15541#define COMM_STR_LEN (TASK_COMM_LEN + 5)542543/**544* drm_dev_wedged_event - generate a device wedged uevent545* @dev: DRM device546* @method: method(s) to be used for recovery547* @info: optional information about the guilty task548*549* This generates a device wedged uevent for the DRM device specified by @dev.550* Recovery @method\(s) of choice will be sent in the uevent environment as551* ``WEDGED=<method1>[,..,<methodN>]`` in order of less to more side-effects.552* If caller is unsure about recovery or @method is unknown (0),553* ``WEDGED=unknown`` will be sent instead.554*555* Refer to "Device Wedging" chapter in Documentation/gpu/drm-uapi.rst for more556* details.557*558* Returns: 0 on success, negative error code otherwise.559*/560int drm_dev_wedged_event(struct drm_device *dev, unsigned long method,561struct drm_wedge_task_info *info)562{563char event_string[WEDGE_STR_LEN], pid_string[PID_STR_LEN], comm_string[COMM_STR_LEN];564char *envp[] = { event_string, NULL, NULL, NULL };565const char *recovery = NULL;566unsigned int len, opt;567568len = scnprintf(event_string, sizeof(event_string), "%s", "WEDGED=");569570for_each_set_bit(opt, &method, BITS_PER_TYPE(method)) {571recovery = drm_get_wedge_recovery(opt);572if (drm_WARN_ONCE(dev, !recovery, "invalid recovery method %u\n", opt))573break;574575len += scnprintf(event_string + len, sizeof(event_string) - len, "%s,", recovery);576}577578if (recovery)579/* Get rid of trailing comma */580event_string[len - 1] = '\0';581else582/* Caller is unsure about recovery, do the best we can at this point. */583snprintf(event_string, sizeof(event_string), "%s", "WEDGED=unknown");584585drm_info(dev, "device wedged, %s\n", method == DRM_WEDGE_RECOVERY_NONE ?586"but recovered through reset" : "needs recovery");587588if (info && (info->comm[0] != '\0') && (info->pid >= 0)) {589snprintf(pid_string, sizeof(pid_string), "PID=%u", info->pid);590snprintf(comm_string, sizeof(comm_string), "TASK=%s", info->comm);591envp[1] = pid_string;592envp[2] = comm_string;593}594595return kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);596}597EXPORT_SYMBOL(drm_dev_wedged_event);598599/*600* DRM internal mount601* We want to be able to allocate our own "struct address_space" to control602* memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow603* stand-alone address_space objects, so we need an underlying inode. As there604* is no way to allocate an independent inode easily, we need a fake internal605* VFS mount-point.606*607* The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()608* frees it again. You are allowed to use iget() and iput() to get references to609* the inode. But each drm_fs_inode_new() call must be paired with exactly one610* drm_fs_inode_free() call (which does not have to be the last iput()).611* We use drm_fs_inode_*() to manage our internal VFS mount-point and share it612* between multiple inode-users. You could, technically, call613* iget() + drm_fs_inode_free() directly after alloc and sometime later do an614* iput(), but this way you'd end up with a new vfsmount for each inode.615*/616617static int drm_fs_cnt;618static struct vfsmount *drm_fs_mnt;619620static int drm_fs_init_fs_context(struct fs_context *fc)621{622return init_pseudo(fc, 0x010203ff) ? 0 : -ENOMEM;623}624625static struct file_system_type drm_fs_type = {626.name = "drm",627.owner = THIS_MODULE,628.init_fs_context = drm_fs_init_fs_context,629.kill_sb = kill_anon_super,630};631632static struct inode *drm_fs_inode_new(void)633{634struct inode *inode;635int r;636637r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);638if (r < 0) {639DRM_ERROR("Cannot mount pseudo fs: %d\n", r);640return ERR_PTR(r);641}642643inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);644if (IS_ERR(inode))645simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);646647return inode;648}649650static void drm_fs_inode_free(struct inode *inode)651{652if (inode) {653iput(inode);654simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);655}656}657658/**659* DOC: component helper usage recommendations660*661* DRM drivers that drive hardware where a logical device consists of a pile of662* independent hardware blocks are recommended to use the :ref:`component helper663* library<component>`. For consistency and better options for code reuse the664* following guidelines apply:665*666* - The entire device initialization procedure should be run from the667* &component_master_ops.master_bind callback, starting with668* devm_drm_dev_alloc(), then binding all components with669* component_bind_all() and finishing with drm_dev_register().670*671* - The opaque pointer passed to all components through component_bind_all()672* should point at &struct drm_device of the device instance, not some driver673* specific private structure.674*675* - The component helper fills the niche where further standardization of676* interfaces is not practical. When there already is, or will be, a677* standardized interface like &drm_bridge or &drm_panel, providing its own678* functions to find such components at driver load time, like679* drm_of_find_panel_or_bridge(), then the component helper should not be680* used.681*/682683static void drm_dev_init_release(struct drm_device *dev, void *res)684{685drm_fs_inode_free(dev->anon_inode);686687put_device(dev->dma_dev);688dev->dma_dev = NULL;689put_device(dev->dev);690/* Prevent use-after-free in drm_managed_release when debugging is691* enabled. Slightly awkward, but can't really be helped. */692dev->dev = NULL;693mutex_destroy(&dev->master_mutex);694mutex_destroy(&dev->clientlist_mutex);695mutex_destroy(&dev->filelist_mutex);696mutex_destroy(&dev->struct_mutex);697}698699static int drm_dev_init(struct drm_device *dev,700const struct drm_driver *driver,701struct device *parent)702{703struct inode *inode;704int ret;705706if (!drm_core_init_complete) {707DRM_ERROR("DRM core is not initialized\n");708return -ENODEV;709}710711if (WARN_ON(!parent))712return -EINVAL;713714kref_init(&dev->ref);715dev->dev = get_device(parent);716dev->driver = driver;717718INIT_LIST_HEAD(&dev->managed.resources);719spin_lock_init(&dev->managed.lock);720721/* no per-device feature limits by default */722dev->driver_features = ~0u;723724if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL) &&725(drm_core_check_feature(dev, DRIVER_RENDER) ||726drm_core_check_feature(dev, DRIVER_MODESET))) {727DRM_ERROR("DRM driver can't be both a compute acceleration and graphics driver\n");728return -EINVAL;729}730731INIT_LIST_HEAD(&dev->filelist);732INIT_LIST_HEAD(&dev->filelist_internal);733INIT_LIST_HEAD(&dev->clientlist);734INIT_LIST_HEAD(&dev->vblank_event_list);735736spin_lock_init(&dev->event_lock);737mutex_init(&dev->struct_mutex);738mutex_init(&dev->filelist_mutex);739mutex_init(&dev->clientlist_mutex);740mutex_init(&dev->master_mutex);741raw_spin_lock_init(&dev->mode_config.panic_lock);742743ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL);744if (ret)745return ret;746747inode = drm_fs_inode_new();748if (IS_ERR(inode)) {749ret = PTR_ERR(inode);750DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);751goto err;752}753754dev->anon_inode = inode;755756if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL)) {757ret = drm_minor_alloc(dev, DRM_MINOR_ACCEL);758if (ret)759goto err;760} else {761if (drm_core_check_feature(dev, DRIVER_RENDER)) {762ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);763if (ret)764goto err;765}766767ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);768if (ret)769goto err;770}771772if (drm_core_check_feature(dev, DRIVER_GEM)) {773ret = drm_gem_init(dev);774if (ret) {775DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");776goto err;777}778}779780dev->unique = drmm_kstrdup(dev, dev_name(parent), GFP_KERNEL);781if (!dev->unique) {782ret = -ENOMEM;783goto err;784}785786drm_debugfs_dev_init(dev);787788return 0;789790err:791drm_managed_release(dev);792793return ret;794}795796static void devm_drm_dev_init_release(void *data)797{798drm_dev_put(data);799}800801static int devm_drm_dev_init(struct device *parent,802struct drm_device *dev,803const struct drm_driver *driver)804{805int ret;806807ret = drm_dev_init(dev, driver, parent);808if (ret)809return ret;810811return devm_add_action_or_reset(parent,812devm_drm_dev_init_release, dev);813}814815void *__devm_drm_dev_alloc(struct device *parent,816const struct drm_driver *driver,817size_t size, size_t offset)818{819void *container;820struct drm_device *drm;821int ret;822823container = kzalloc(size, GFP_KERNEL);824if (!container)825return ERR_PTR(-ENOMEM);826827drm = container + offset;828ret = devm_drm_dev_init(parent, drm, driver);829if (ret) {830kfree(container);831return ERR_PTR(ret);832}833drmm_add_final_kfree(drm, container);834835return container;836}837EXPORT_SYMBOL(__devm_drm_dev_alloc);838839/**840* __drm_dev_alloc - Allocation of a &drm_device instance841* @parent: Parent device object842* @driver: DRM driver843* @size: the size of the struct which contains struct drm_device844* @offset: the offset of the &drm_device within the container.845*846* This should *NOT* be by any drivers, but is a dedicated interface for the847* corresponding Rust abstraction.848*849* This is the same as devm_drm_dev_alloc(), but without the corresponding850* resource management through the parent device, but not the same as851* drm_dev_alloc(), since the latter is the deprecated version, which does not852* support subclassing.853*854* Returns: A pointer to new DRM device, or an ERR_PTR on failure.855*/856void *__drm_dev_alloc(struct device *parent,857const struct drm_driver *driver,858size_t size, size_t offset)859{860void *container;861struct drm_device *drm;862int ret;863864container = kzalloc(size, GFP_KERNEL);865if (!container)866return ERR_PTR(-ENOMEM);867868drm = container + offset;869ret = drm_dev_init(drm, driver, parent);870if (ret) {871kfree(container);872return ERR_PTR(ret);873}874drmm_add_final_kfree(drm, container);875876return container;877}878EXPORT_SYMBOL(__drm_dev_alloc);879880/**881* drm_dev_alloc - Allocate new DRM device882* @driver: DRM driver to allocate device for883* @parent: Parent device object884*885* This is the deprecated version of devm_drm_dev_alloc(), which does not support886* subclassing through embedding the struct &drm_device in a driver private887* structure, and which does not support automatic cleanup through devres.888*889* RETURNS:890* Pointer to new DRM device, or ERR_PTR on failure.891*/892struct drm_device *drm_dev_alloc(const struct drm_driver *driver,893struct device *parent)894{895return __drm_dev_alloc(parent, driver, sizeof(struct drm_device), 0);896}897EXPORT_SYMBOL(drm_dev_alloc);898899static void drm_dev_release(struct kref *ref)900{901struct drm_device *dev = container_of(ref, struct drm_device, ref);902903/* Just in case register/unregister was never called */904drm_debugfs_dev_fini(dev);905906if (dev->driver->release)907dev->driver->release(dev);908909drm_managed_release(dev);910911kfree(dev->managed.final_kfree);912}913914/**915* drm_dev_get - Take reference of a DRM device916* @dev: device to take reference of or NULL917*918* This increases the ref-count of @dev by one. You *must* already own a919* reference when calling this. Use drm_dev_put() to drop this reference920* again.921*922* This function never fails. However, this function does not provide *any*923* guarantee whether the device is alive or running. It only provides a924* reference to the object and the memory associated with it.925*/926void drm_dev_get(struct drm_device *dev)927{928if (dev)929kref_get(&dev->ref);930}931EXPORT_SYMBOL(drm_dev_get);932933/**934* drm_dev_put - Drop reference of a DRM device935* @dev: device to drop reference of or NULL936*937* This decreases the ref-count of @dev by one. The device is destroyed if the938* ref-count drops to zero.939*/940void drm_dev_put(struct drm_device *dev)941{942if (dev)943kref_put(&dev->ref, drm_dev_release);944}945EXPORT_SYMBOL(drm_dev_put);946947static void drmm_cg_unregister_region(struct drm_device *dev, void *arg)948{949dmem_cgroup_unregister_region(arg);950}951952/**953* drmm_cgroup_register_region - Register a region of a DRM device to cgroups954* @dev: device for region955* @region_name: Region name for registering956* @size: Size of region in bytes957*958* This decreases the ref-count of @dev by one. The device is destroyed if the959* ref-count drops to zero.960*/961struct dmem_cgroup_region *drmm_cgroup_register_region(struct drm_device *dev, const char *region_name, u64 size)962{963struct dmem_cgroup_region *region;964int ret;965966region = dmem_cgroup_register_region(size, "drm/%s/%s", dev->unique, region_name);967if (IS_ERR_OR_NULL(region))968return region;969970ret = drmm_add_action_or_reset(dev, drmm_cg_unregister_region, region);971if (ret)972return ERR_PTR(ret);973974return region;975}976EXPORT_SYMBOL_GPL(drmm_cgroup_register_region);977978static int create_compat_control_link(struct drm_device *dev)979{980struct drm_minor *minor;981char *name;982int ret;983984if (!drm_core_check_feature(dev, DRIVER_MODESET))985return 0;986987minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);988if (!minor)989return 0;990991/*992* Some existing userspace out there uses the existing of the controlD*993* sysfs files to figure out whether it's a modeset driver. It only does994* readdir, hence a symlink is sufficient (and the least confusing995* option). Otherwise controlD* is entirely unused.996*997* Old controlD chardev have been allocated in the range998* 64-127.999*/1000name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);1001if (!name)1002return -ENOMEM;10031004ret = sysfs_create_link(minor->kdev->kobj.parent,1005&minor->kdev->kobj,1006name);10071008kfree(name);10091010return ret;1011}10121013static void remove_compat_control_link(struct drm_device *dev)1014{1015struct drm_minor *minor;1016char *name;10171018if (!drm_core_check_feature(dev, DRIVER_MODESET))1019return;10201021minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);1022if (!minor)1023return;10241025name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);1026if (!name)1027return;10281029sysfs_remove_link(minor->kdev->kobj.parent, name);10301031kfree(name);1032}10331034/**1035* drm_dev_register - Register DRM device1036* @dev: Device to register1037* @flags: Flags passed to the driver's .load() function1038*1039* Register the DRM device @dev with the system, advertise device to user-space1040* and start normal device operation. @dev must be initialized via drm_dev_init()1041* previously.1042*1043* Never call this twice on any device!1044*1045* NOTE: To ensure backward compatibility with existing drivers method this1046* function calls the &drm_driver.load method after registering the device1047* nodes, creating race conditions. Usage of the &drm_driver.load methods is1048* therefore deprecated, drivers must perform all initialization before calling1049* drm_dev_register().1050*1051* RETURNS:1052* 0 on success, negative error code on failure.1053*/1054int drm_dev_register(struct drm_device *dev, unsigned long flags)1055{1056const struct drm_driver *driver = dev->driver;1057int ret;10581059if (!driver->load)1060drm_mode_config_validate(dev);10611062WARN_ON(!dev->managed.final_kfree);10631064if (drm_dev_needs_global_mutex(dev))1065mutex_lock(&drm_global_mutex);10661067if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL))1068accel_debugfs_register(dev);1069else1070drm_debugfs_dev_register(dev);10711072ret = drm_minor_register(dev, DRM_MINOR_RENDER);1073if (ret)1074goto err_minors;10751076ret = drm_minor_register(dev, DRM_MINOR_PRIMARY);1077if (ret)1078goto err_minors;10791080ret = drm_minor_register(dev, DRM_MINOR_ACCEL);1081if (ret)1082goto err_minors;10831084ret = create_compat_control_link(dev);1085if (ret)1086goto err_minors;10871088dev->registered = true;10891090if (driver->load) {1091ret = driver->load(dev, flags);1092if (ret)1093goto err_minors;1094}10951096if (drm_core_check_feature(dev, DRIVER_MODESET)) {1097ret = drm_modeset_register_all(dev);1098if (ret)1099goto err_unload;1100}1101drm_panic_register(dev);11021103DRM_INFO("Initialized %s %d.%d.%d for %s on minor %d\n",1104driver->name, driver->major, driver->minor,1105driver->patchlevel,1106dev->dev ? dev_name(dev->dev) : "virtual device",1107dev->primary ? dev->primary->index : dev->accel->index);11081109goto out_unlock;11101111err_unload:1112if (dev->driver->unload)1113dev->driver->unload(dev);1114err_minors:1115remove_compat_control_link(dev);1116drm_minor_unregister(dev, DRM_MINOR_ACCEL);1117drm_minor_unregister(dev, DRM_MINOR_PRIMARY);1118drm_minor_unregister(dev, DRM_MINOR_RENDER);1119out_unlock:1120if (drm_dev_needs_global_mutex(dev))1121mutex_unlock(&drm_global_mutex);1122return ret;1123}1124EXPORT_SYMBOL(drm_dev_register);11251126/**1127* drm_dev_unregister - Unregister DRM device1128* @dev: Device to unregister1129*1130* Unregister the DRM device from the system. This does the reverse of1131* drm_dev_register() but does not deallocate the device. The caller must call1132* drm_dev_put() to drop their final reference, unless it is managed with devres1133* (as devices allocated with devm_drm_dev_alloc() are), in which case there is1134* already an unwind action registered.1135*1136* A special form of unregistering for hotpluggable devices is drm_dev_unplug(),1137* which can be called while there are still open users of @dev.1138*1139* This should be called first in the device teardown code to make sure1140* userspace can't access the device instance any more.1141*/1142void drm_dev_unregister(struct drm_device *dev)1143{1144dev->registered = false;11451146drm_panic_unregister(dev);11471148drm_client_dev_unregister(dev);11491150if (drm_core_check_feature(dev, DRIVER_MODESET))1151drm_modeset_unregister_all(dev);11521153if (dev->driver->unload)1154dev->driver->unload(dev);11551156remove_compat_control_link(dev);1157drm_minor_unregister(dev, DRM_MINOR_ACCEL);1158drm_minor_unregister(dev, DRM_MINOR_PRIMARY);1159drm_minor_unregister(dev, DRM_MINOR_RENDER);1160drm_debugfs_dev_fini(dev);1161}1162EXPORT_SYMBOL(drm_dev_unregister);11631164/*1165* DRM Core1166* The DRM core module initializes all global DRM objects and makes them1167* available to drivers. Once setup, drivers can probe their respective1168* devices.1169* Currently, core management includes:1170* - The "DRM-Global" key/value database1171* - Global ID management for connectors1172* - DRM major number allocation1173* - DRM minor management1174* - DRM sysfs class1175* - DRM debugfs root1176*1177* Furthermore, the DRM core provides dynamic char-dev lookups. For each1178* interface registered on a DRM device, you can request minor numbers from DRM1179* core. DRM core takes care of major-number management and char-dev1180* registration. A stub ->open() callback forwards any open() requests to the1181* registered minor.1182*/11831184static int drm_stub_open(struct inode *inode, struct file *filp)1185{1186const struct file_operations *new_fops;1187struct drm_minor *minor;1188int err;11891190DRM_DEBUG("\n");11911192minor = drm_minor_acquire(&drm_minors_xa, iminor(inode));1193if (IS_ERR(minor))1194return PTR_ERR(minor);11951196new_fops = fops_get(minor->dev->driver->fops);1197if (!new_fops) {1198err = -ENODEV;1199goto out;1200}12011202replace_fops(filp, new_fops);1203if (filp->f_op->open)1204err = filp->f_op->open(inode, filp);1205else1206err = 0;12071208out:1209drm_minor_release(minor);12101211return err;1212}12131214static const struct file_operations drm_stub_fops = {1215.owner = THIS_MODULE,1216.open = drm_stub_open,1217.llseek = noop_llseek,1218};12191220static void drm_core_exit(void)1221{1222drm_privacy_screen_lookup_exit();1223drm_panic_exit();1224accel_core_exit();1225unregister_chrdev(DRM_MAJOR, "drm");1226drm_debugfs_remove_root();1227drm_sysfs_destroy();1228WARN_ON(!xa_empty(&drm_minors_xa));1229drm_connector_ida_destroy();1230}12311232static int __init drm_core_init(void)1233{1234int ret;12351236drm_connector_ida_init();1237drm_memcpy_init_early();12381239ret = drm_sysfs_init();1240if (ret < 0) {1241DRM_ERROR("Cannot create DRM class: %d\n", ret);1242goto error;1243}12441245drm_debugfs_init_root();1246drm_debugfs_bridge_params();12471248ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);1249if (ret < 0)1250goto error;12511252ret = accel_core_init();1253if (ret < 0)1254goto error;12551256drm_panic_init();12571258drm_privacy_screen_lookup_init();12591260drm_core_init_complete = true;12611262DRM_DEBUG("Initialized\n");1263return 0;12641265error:1266drm_core_exit();1267return ret;1268}12691270module_init(drm_core_init);1271module_exit(drm_core_exit);127212731274