Path: blob/master/drivers/gpu/drm/display/drm_dp_tunnel.c
26493 views
// SPDX-License-Identifier: MIT1/*2* Copyright © 2023 Intel Corporation3*/45#include <linux/export.h>6#include <linux/ref_tracker.h>7#include <linux/types.h>89#include <drm/drm_atomic_state_helper.h>1011#include <drm/drm_atomic.h>12#include <drm/drm_print.h>13#include <drm/display/drm_dp.h>14#include <drm/display/drm_dp_helper.h>15#include <drm/display/drm_dp_tunnel.h>1617#define to_group(__private_obj) \18container_of(__private_obj, struct drm_dp_tunnel_group, base)1920#define to_group_state(__private_state) \21container_of(__private_state, struct drm_dp_tunnel_group_state, base)2223#define is_dp_tunnel_private_obj(__obj) \24((__obj)->funcs == &tunnel_group_funcs)2526#define for_each_new_group_in_state(__state, __new_group_state, __i) \27for ((__i) = 0; \28(__i) < (__state)->num_private_objs; \29(__i)++) \30for_each_if ((__state)->private_objs[__i].ptr && \31is_dp_tunnel_private_obj((__state)->private_objs[__i].ptr) && \32((__new_group_state) = \33to_group_state((__state)->private_objs[__i].new_state), 1))3435#define for_each_old_group_in_state(__state, __old_group_state, __i) \36for ((__i) = 0; \37(__i) < (__state)->num_private_objs; \38(__i)++) \39for_each_if ((__state)->private_objs[__i].ptr && \40is_dp_tunnel_private_obj((__state)->private_objs[__i].ptr) && \41((__old_group_state) = \42to_group_state((__state)->private_objs[__i].old_state), 1))4344#define for_each_tunnel_in_group(__group, __tunnel) \45list_for_each_entry(__tunnel, &(__group)->tunnels, node)4647#define for_each_tunnel_state(__group_state, __tunnel_state) \48list_for_each_entry(__tunnel_state, &(__group_state)->tunnel_states, node)4950#define for_each_tunnel_state_safe(__group_state, __tunnel_state, __tunnel_state_tmp) \51list_for_each_entry_safe(__tunnel_state, __tunnel_state_tmp, \52&(__group_state)->tunnel_states, node)5354#define kbytes_to_mbits(__kbytes) \55DIV_ROUND_UP((__kbytes) * 8, 1000)5657#define DPTUN_BW_ARG(__bw) ((__bw) < 0 ? (__bw) : kbytes_to_mbits(__bw))5859#define __tun_prn(__tunnel, __level, __type, __fmt, ...) \60drm_##__level##__type((__tunnel)->group->mgr->dev, \61"[DPTUN %s][%s] " __fmt, \62drm_dp_tunnel_name(__tunnel), \63(__tunnel)->aux->name, ## \64__VA_ARGS__)6566#define tun_dbg(__tunnel, __fmt, ...) \67__tun_prn(__tunnel, dbg, _kms, __fmt, ## __VA_ARGS__)6869#define tun_dbg_stat(__tunnel, __err, __fmt, ...) do { \70if (__err) \71__tun_prn(__tunnel, dbg, _kms, __fmt " (Failed, err: %pe)\n", \72## __VA_ARGS__, ERR_PTR(__err)); \73else \74__tun_prn(__tunnel, dbg, _kms, __fmt " (Ok)\n", \75## __VA_ARGS__); \76} while (0)7778#define tun_dbg_atomic(__tunnel, __fmt, ...) \79__tun_prn(__tunnel, dbg, _atomic, __fmt, ## __VA_ARGS__)8081#define tun_grp_dbg(__group, __fmt, ...) \82drm_dbg_kms((__group)->mgr->dev, \83"[DPTUN %s] " __fmt, \84drm_dp_tunnel_group_name(__group), ## \85__VA_ARGS__)8687#define DP_TUNNELING_BASE DP_TUNNELING_OUI8889#define __DPTUN_REG_RANGE(__start, __size) \90GENMASK_ULL((__start) + (__size) - 1, (__start))9192#define DPTUN_REG_RANGE(__addr, __size) \93__DPTUN_REG_RANGE((__addr) - DP_TUNNELING_BASE, (__size))9495#define DPTUN_REG(__addr) DPTUN_REG_RANGE(__addr, 1)9697#define DPTUN_INFO_REG_MASK ( \98DPTUN_REG_RANGE(DP_TUNNELING_OUI, DP_TUNNELING_OUI_BYTES) | \99DPTUN_REG_RANGE(DP_TUNNELING_DEV_ID, DP_TUNNELING_DEV_ID_BYTES) | \100DPTUN_REG(DP_TUNNELING_HW_REV) | \101DPTUN_REG(DP_TUNNELING_SW_REV_MAJOR) | \102DPTUN_REG(DP_TUNNELING_SW_REV_MINOR) | \103DPTUN_REG(DP_TUNNELING_CAPABILITIES) | \104DPTUN_REG(DP_IN_ADAPTER_INFO) | \105DPTUN_REG(DP_USB4_DRIVER_ID) | \106DPTUN_REG(DP_USB4_DRIVER_BW_CAPABILITY) | \107DPTUN_REG(DP_IN_ADAPTER_TUNNEL_INFORMATION) | \108DPTUN_REG(DP_BW_GRANULARITY) | \109DPTUN_REG(DP_ESTIMATED_BW) | \110DPTUN_REG(DP_ALLOCATED_BW) | \111DPTUN_REG(DP_TUNNELING_MAX_LINK_RATE) | \112DPTUN_REG(DP_TUNNELING_MAX_LANE_COUNT) | \113DPTUN_REG(DP_DPTX_BW_ALLOCATION_MODE_CONTROL))114115static const DECLARE_BITMAP(dptun_info_regs, 64) = {116DPTUN_INFO_REG_MASK & -1UL,117#if BITS_PER_LONG == 32118DPTUN_INFO_REG_MASK >> 32,119#endif120};121122struct drm_dp_tunnel_regs {123u8 buf[HWEIGHT64(DPTUN_INFO_REG_MASK)];124};125126struct drm_dp_tunnel_group;127128struct drm_dp_tunnel {129struct drm_dp_tunnel_group *group;130131struct list_head node;132133struct kref kref;134struct ref_tracker *tracker;135struct drm_dp_aux *aux;136char name[8];137138int bw_granularity;139int estimated_bw;140int allocated_bw;141142int max_dprx_rate;143u8 max_dprx_lane_count;144145u8 adapter_id;146147bool bw_alloc_supported:1;148bool bw_alloc_enabled:1;149bool has_io_error:1;150bool destroyed:1;151};152153struct drm_dp_tunnel_group_state;154155struct drm_dp_tunnel_state {156struct drm_dp_tunnel_group_state *group_state;157158struct drm_dp_tunnel_ref tunnel_ref;159160struct list_head node;161162u32 stream_mask;163int *stream_bw;164};165166struct drm_dp_tunnel_group_state {167struct drm_private_state base;168169struct list_head tunnel_states;170};171172struct drm_dp_tunnel_group {173struct drm_private_obj base;174struct drm_dp_tunnel_mgr *mgr;175176struct list_head tunnels;177178/* available BW including the allocated_bw of all tunnels in the group */179int available_bw;180181u8 drv_group_id;182char name[8];183184bool active:1;185};186187struct drm_dp_tunnel_mgr {188struct drm_device *dev;189190int group_count;191struct drm_dp_tunnel_group *groups;192wait_queue_head_t bw_req_queue;193194#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG195struct ref_tracker_dir ref_tracker;196#endif197};198199/*200* The following helpers provide a way to read out the tunneling DPCD201* registers with a minimal amount of AUX transfers (1 transfer per contiguous202* range, as permitted by the 16 byte per transfer AUX limit), not accessing203* other registers to avoid any read side-effects.204*/205static int next_reg_area(int *offset)206{207*offset = find_next_bit(dptun_info_regs, 64, *offset);208209return find_next_zero_bit(dptun_info_regs, 64, *offset + 1) - *offset;210}211212#define tunnel_reg_ptr(__regs, __address) ({ \213WARN_ON(!test_bit((__address) - DP_TUNNELING_BASE, dptun_info_regs)); \214&(__regs)->buf[bitmap_weight(dptun_info_regs, (__address) - DP_TUNNELING_BASE)]; \215})216217static int read_tunnel_regs(struct drm_dp_aux *aux, struct drm_dp_tunnel_regs *regs)218{219int offset = 0;220int len;221222while ((len = next_reg_area(&offset))) {223int address = DP_TUNNELING_BASE + offset;224225if (drm_dp_dpcd_read_data(aux, address, tunnel_reg_ptr(regs, address), len) < 0)226return -EIO;227228offset += len;229}230231return 0;232}233234static u8 tunnel_reg(const struct drm_dp_tunnel_regs *regs, int address)235{236return *tunnel_reg_ptr(regs, address);237}238239static u8 tunnel_reg_drv_group_id(const struct drm_dp_tunnel_regs *regs)240{241u8 drv_id = tunnel_reg(regs, DP_USB4_DRIVER_ID) & DP_USB4_DRIVER_ID_MASK;242u8 group_id = tunnel_reg(regs, DP_IN_ADAPTER_TUNNEL_INFORMATION) & DP_GROUP_ID_MASK;243244if (!group_id)245return 0;246247return (drv_id << DP_GROUP_ID_BITS) | group_id;248}249250/* Return granularity in kB/s units */251static int tunnel_reg_bw_granularity(const struct drm_dp_tunnel_regs *regs)252{253int gr = tunnel_reg(regs, DP_BW_GRANULARITY) & DP_BW_GRANULARITY_MASK;254255if (gr > 2)256return -1;257258return (250000 << gr) / 8;259}260261static int tunnel_reg_max_dprx_rate(const struct drm_dp_tunnel_regs *regs)262{263u8 bw_code = tunnel_reg(regs, DP_TUNNELING_MAX_LINK_RATE);264265return drm_dp_bw_code_to_link_rate(bw_code);266}267268static int tunnel_reg_max_dprx_lane_count(const struct drm_dp_tunnel_regs *regs)269{270return tunnel_reg(regs, DP_TUNNELING_MAX_LANE_COUNT) &271DP_TUNNELING_MAX_LANE_COUNT_MASK;272}273274static bool tunnel_reg_bw_alloc_supported(const struct drm_dp_tunnel_regs *regs)275{276u8 cap_mask = DP_TUNNELING_SUPPORT | DP_IN_BW_ALLOCATION_MODE_SUPPORT;277278if ((tunnel_reg(regs, DP_TUNNELING_CAPABILITIES) & cap_mask) != cap_mask)279return false;280281return tunnel_reg(regs, DP_USB4_DRIVER_BW_CAPABILITY) &282DP_USB4_DRIVER_BW_ALLOCATION_MODE_SUPPORT;283}284285static bool tunnel_reg_bw_alloc_enabled(const struct drm_dp_tunnel_regs *regs)286{287return tunnel_reg(regs, DP_DPTX_BW_ALLOCATION_MODE_CONTROL) &288DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE;289}290291static u8 tunnel_group_drv_id(u8 drv_group_id)292{293return drv_group_id >> DP_GROUP_ID_BITS;294}295296static u8 tunnel_group_id(u8 drv_group_id)297{298return drv_group_id & DP_GROUP_ID_MASK;299}300301const char *drm_dp_tunnel_name(const struct drm_dp_tunnel *tunnel)302{303return tunnel->name;304}305EXPORT_SYMBOL(drm_dp_tunnel_name);306307static const char *drm_dp_tunnel_group_name(const struct drm_dp_tunnel_group *group)308{309return group->name;310}311312static struct drm_dp_tunnel_group *313lookup_or_alloc_group(struct drm_dp_tunnel_mgr *mgr, u8 drv_group_id)314{315struct drm_dp_tunnel_group *group = NULL;316int i;317318for (i = 0; i < mgr->group_count; i++) {319/*320* A tunnel group with 0 group ID shouldn't have more than one321* tunnels.322*/323if (tunnel_group_id(drv_group_id) &&324mgr->groups[i].drv_group_id == drv_group_id)325return &mgr->groups[i];326327if (!group && !mgr->groups[i].active)328group = &mgr->groups[i];329}330331if (!group) {332drm_dbg_kms(mgr->dev,333"DPTUN: Can't allocate more tunnel groups\n");334return NULL;335}336337group->drv_group_id = drv_group_id;338group->active = true;339340/*341* The group name format here and elsewhere: Driver-ID:Group-ID:*342* (* standing for all DP-Adapters/tunnels in the group).343*/344snprintf(group->name, sizeof(group->name), "%d:%d:*",345tunnel_group_drv_id(drv_group_id) & ((1 << DP_GROUP_ID_BITS) - 1),346tunnel_group_id(drv_group_id) & ((1 << DP_USB4_DRIVER_ID_BITS) - 1));347348return group;349}350351static void free_group(struct drm_dp_tunnel_group *group)352{353struct drm_dp_tunnel_mgr *mgr = group->mgr;354355if (drm_WARN_ON(mgr->dev, !list_empty(&group->tunnels)))356return;357358group->drv_group_id = 0;359group->available_bw = -1;360group->active = false;361}362363static struct drm_dp_tunnel *364tunnel_get(struct drm_dp_tunnel *tunnel)365{366kref_get(&tunnel->kref);367368return tunnel;369}370371static void free_tunnel(struct kref *kref)372{373struct drm_dp_tunnel *tunnel = container_of(kref, typeof(*tunnel), kref);374struct drm_dp_tunnel_group *group = tunnel->group;375376list_del(&tunnel->node);377if (list_empty(&group->tunnels))378free_group(group);379380kfree(tunnel);381}382383static void tunnel_put(struct drm_dp_tunnel *tunnel)384{385kref_put(&tunnel->kref, free_tunnel);386}387388#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG389static void track_tunnel_ref(struct drm_dp_tunnel *tunnel,390struct ref_tracker **tracker)391{392ref_tracker_alloc(&tunnel->group->mgr->ref_tracker,393tracker, GFP_KERNEL);394}395396static void untrack_tunnel_ref(struct drm_dp_tunnel *tunnel,397struct ref_tracker **tracker)398{399ref_tracker_free(&tunnel->group->mgr->ref_tracker,400tracker);401}402#else403static void track_tunnel_ref(struct drm_dp_tunnel *tunnel,404struct ref_tracker **tracker)405{406}407408static void untrack_tunnel_ref(struct drm_dp_tunnel *tunnel,409struct ref_tracker **tracker)410{411}412#endif413414/**415* drm_dp_tunnel_get - Get a reference for a DP tunnel416* @tunnel: Tunnel object417* @tracker: Debug tracker for the reference418*419* Get a reference for @tunnel, along with a debug tracker to help locating420* the source of a reference leak/double reference put etc. issue.421*422* The reference must be dropped after use calling drm_dp_tunnel_put()423* passing @tunnel and *@tracker returned from here.424*425* Returns @tunnel - as a convenience - along with *@tracker.426*/427struct drm_dp_tunnel *428drm_dp_tunnel_get(struct drm_dp_tunnel *tunnel,429struct ref_tracker **tracker)430{431track_tunnel_ref(tunnel, tracker);432433return tunnel_get(tunnel);434}435EXPORT_SYMBOL(drm_dp_tunnel_get);436437/**438* drm_dp_tunnel_put - Put a reference for a DP tunnel439* @tunnel: Tunnel object440* @tracker: Debug tracker for the reference441*442* Put a reference for @tunnel along with its debug *@tracker, which443* was obtained with drm_dp_tunnel_get().444*/445void drm_dp_tunnel_put(struct drm_dp_tunnel *tunnel,446struct ref_tracker **tracker)447{448untrack_tunnel_ref(tunnel, tracker);449450tunnel_put(tunnel);451}452EXPORT_SYMBOL(drm_dp_tunnel_put);453454static bool add_tunnel_to_group(struct drm_dp_tunnel_mgr *mgr,455u8 drv_group_id,456struct drm_dp_tunnel *tunnel)457{458struct drm_dp_tunnel_group *group;459460group = lookup_or_alloc_group(mgr, drv_group_id);461if (!group)462return false;463464tunnel->group = group;465list_add(&tunnel->node, &group->tunnels);466467return true;468}469470static struct drm_dp_tunnel *471create_tunnel(struct drm_dp_tunnel_mgr *mgr,472struct drm_dp_aux *aux,473const struct drm_dp_tunnel_regs *regs)474{475u8 drv_group_id = tunnel_reg_drv_group_id(regs);476struct drm_dp_tunnel *tunnel;477478tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);479if (!tunnel)480return NULL;481482INIT_LIST_HEAD(&tunnel->node);483484kref_init(&tunnel->kref);485486tunnel->aux = aux;487488tunnel->adapter_id = tunnel_reg(regs, DP_IN_ADAPTER_INFO) & DP_IN_ADAPTER_NUMBER_MASK;489490snprintf(tunnel->name, sizeof(tunnel->name), "%d:%d:%d",491tunnel_group_drv_id(drv_group_id) & ((1 << DP_GROUP_ID_BITS) - 1),492tunnel_group_id(drv_group_id) & ((1 << DP_USB4_DRIVER_ID_BITS) - 1),493tunnel->adapter_id & ((1 << DP_IN_ADAPTER_NUMBER_BITS) - 1));494495tunnel->bw_granularity = tunnel_reg_bw_granularity(regs);496tunnel->allocated_bw = tunnel_reg(regs, DP_ALLOCATED_BW) *497tunnel->bw_granularity;498/*499* An initial allocated BW of 0 indicates an undefined state: the500* actual allocation is determined by the TBT CM, usually following a501* legacy allocation policy (based on the max DPRX caps). From the502* driver's POV the state becomes defined only after the first503* allocation request.504*/505if (!tunnel->allocated_bw)506tunnel->allocated_bw = -1;507508tunnel->bw_alloc_supported = tunnel_reg_bw_alloc_supported(regs);509tunnel->bw_alloc_enabled = tunnel_reg_bw_alloc_enabled(regs);510511if (!add_tunnel_to_group(mgr, drv_group_id, tunnel)) {512kfree(tunnel);513514return NULL;515}516517track_tunnel_ref(tunnel, &tunnel->tracker);518519return tunnel;520}521522static void destroy_tunnel(struct drm_dp_tunnel *tunnel)523{524untrack_tunnel_ref(tunnel, &tunnel->tracker);525tunnel_put(tunnel);526}527528/**529* drm_dp_tunnel_set_io_error - Set the IO error flag for a DP tunnel530* @tunnel: Tunnel object531*532* Set the IO error flag for @tunnel. Drivers can call this function upon533* detecting a failure that affects the tunnel functionality, for instance534* after a DP AUX transfer failure on the port @tunnel is connected to.535*536* This disables further management of @tunnel, including any related537* AUX accesses for tunneling DPCD registers, returning error to the538* initiators of these. The driver is supposed to drop this tunnel and -539* optionally - recreate it.540*/541void drm_dp_tunnel_set_io_error(struct drm_dp_tunnel *tunnel)542{543tunnel->has_io_error = true;544}545EXPORT_SYMBOL(drm_dp_tunnel_set_io_error);546547#define SKIP_DPRX_CAPS_CHECK BIT(0)548#define ALLOW_ALLOCATED_BW_CHANGE BIT(1)549static bool tunnel_regs_are_valid(struct drm_dp_tunnel_mgr *mgr,550const struct drm_dp_tunnel_regs *regs,551unsigned int flags)552{553u8 drv_group_id = tunnel_reg_drv_group_id(regs);554bool check_dprx = !(flags & SKIP_DPRX_CAPS_CHECK);555bool ret = true;556557if (!tunnel_reg_bw_alloc_supported(regs)) {558if (tunnel_group_id(drv_group_id)) {559drm_dbg_kms(mgr->dev,560"DPTUN: A non-zero group ID is only allowed with BWA support\n");561ret = false;562}563564if (tunnel_reg(regs, DP_ALLOCATED_BW)) {565drm_dbg_kms(mgr->dev,566"DPTUN: BW is allocated without BWA support\n");567ret = false;568}569570return ret;571}572573if (!tunnel_group_id(drv_group_id)) {574drm_dbg_kms(mgr->dev,575"DPTUN: BWA support requires a non-zero group ID\n");576ret = false;577}578579if (check_dprx && hweight8(tunnel_reg_max_dprx_lane_count(regs)) != 1) {580drm_dbg_kms(mgr->dev,581"DPTUN: Invalid DPRX lane count: %d\n",582tunnel_reg_max_dprx_lane_count(regs));583584ret = false;585}586587if (check_dprx && !tunnel_reg_max_dprx_rate(regs)) {588drm_dbg_kms(mgr->dev,589"DPTUN: DPRX rate is 0\n");590591ret = false;592}593594if (tunnel_reg_bw_granularity(regs) < 0) {595drm_dbg_kms(mgr->dev,596"DPTUN: Invalid BW granularity\n");597598ret = false;599}600601if (tunnel_reg(regs, DP_ALLOCATED_BW) > tunnel_reg(regs, DP_ESTIMATED_BW)) {602drm_dbg_kms(mgr->dev,603"DPTUN: Allocated BW %d > estimated BW %d Mb/s\n",604DPTUN_BW_ARG(tunnel_reg(regs, DP_ALLOCATED_BW) *605tunnel_reg_bw_granularity(regs)),606DPTUN_BW_ARG(tunnel_reg(regs, DP_ESTIMATED_BW) *607tunnel_reg_bw_granularity(regs)));608609ret = false;610}611612return ret;613}614615static int tunnel_allocated_bw(const struct drm_dp_tunnel *tunnel)616{617return max(tunnel->allocated_bw, 0);618}619620static bool tunnel_info_changes_are_valid(struct drm_dp_tunnel *tunnel,621const struct drm_dp_tunnel_regs *regs,622unsigned int flags)623{624u8 new_drv_group_id = tunnel_reg_drv_group_id(regs);625bool ret = true;626627if (tunnel->bw_alloc_supported != tunnel_reg_bw_alloc_supported(regs)) {628tun_dbg(tunnel,629"BW alloc support has changed %s -> %s\n",630str_yes_no(tunnel->bw_alloc_supported),631str_yes_no(tunnel_reg_bw_alloc_supported(regs)));632633ret = false;634}635636if (tunnel->group->drv_group_id != new_drv_group_id) {637tun_dbg(tunnel,638"Driver/group ID has changed %d:%d:* -> %d:%d:*\n",639tunnel_group_drv_id(tunnel->group->drv_group_id),640tunnel_group_id(tunnel->group->drv_group_id),641tunnel_group_drv_id(new_drv_group_id),642tunnel_group_id(new_drv_group_id));643644ret = false;645}646647if (!tunnel->bw_alloc_supported)648return ret;649650if (tunnel->bw_granularity != tunnel_reg_bw_granularity(regs)) {651tun_dbg(tunnel,652"BW granularity has changed: %d -> %d Mb/s\n",653DPTUN_BW_ARG(tunnel->bw_granularity),654DPTUN_BW_ARG(tunnel_reg_bw_granularity(regs)));655656ret = false;657}658659/*660* On some devices at least the BW alloc mode enabled status is always661* reported as 0, so skip checking that here.662*/663664if (!(flags & ALLOW_ALLOCATED_BW_CHANGE) &&665tunnel_allocated_bw(tunnel) !=666tunnel_reg(regs, DP_ALLOCATED_BW) * tunnel->bw_granularity) {667tun_dbg(tunnel,668"Allocated BW has changed: %d -> %d Mb/s\n",669DPTUN_BW_ARG(tunnel->allocated_bw),670DPTUN_BW_ARG(tunnel_reg(regs, DP_ALLOCATED_BW) * tunnel->bw_granularity));671672ret = false;673}674675return ret;676}677678static int679read_and_verify_tunnel_regs(struct drm_dp_tunnel *tunnel,680struct drm_dp_tunnel_regs *regs,681unsigned int flags)682{683int err;684685err = read_tunnel_regs(tunnel->aux, regs);686if (err < 0) {687drm_dp_tunnel_set_io_error(tunnel);688689return err;690}691692if (!tunnel_regs_are_valid(tunnel->group->mgr, regs, flags))693return -EINVAL;694695if (!tunnel_info_changes_are_valid(tunnel, regs, flags))696return -EINVAL;697698return 0;699}700701static bool update_dprx_caps(struct drm_dp_tunnel *tunnel, const struct drm_dp_tunnel_regs *regs)702{703bool changed = false;704705if (tunnel_reg_max_dprx_rate(regs) != tunnel->max_dprx_rate) {706tunnel->max_dprx_rate = tunnel_reg_max_dprx_rate(regs);707changed = true;708}709710if (tunnel_reg_max_dprx_lane_count(regs) != tunnel->max_dprx_lane_count) {711tunnel->max_dprx_lane_count = tunnel_reg_max_dprx_lane_count(regs);712changed = true;713}714715return changed;716}717718static int dev_id_len(const u8 *dev_id, int max_len)719{720while (max_len && dev_id[max_len - 1] == '\0')721max_len--;722723return max_len;724}725726static int get_max_dprx_bw(const struct drm_dp_tunnel *tunnel)727{728int max_dprx_bw = drm_dp_max_dprx_data_rate(tunnel->max_dprx_rate,729tunnel->max_dprx_lane_count);730731/*732* A BW request of roundup(max_dprx_bw, tunnel->bw_granularity) results in733* an allocation of max_dprx_bw. A BW request above this rounded-up734* value will fail.735*/736return min(roundup(max_dprx_bw, tunnel->bw_granularity),737MAX_DP_REQUEST_BW * tunnel->bw_granularity);738}739740static int get_max_tunnel_bw(const struct drm_dp_tunnel *tunnel)741{742return min(get_max_dprx_bw(tunnel), tunnel->group->available_bw);743}744745/**746* drm_dp_tunnel_detect - Detect DP tunnel on the link747* @mgr: Tunnel manager748* @aux: DP AUX on which the tunnel will be detected749*750* Detect if there is any DP tunnel on the link and add it to the tunnel751* group's tunnel list.752*753* Returns a pointer to a tunnel on success, or an ERR_PTR() error on754* failure.755*/756struct drm_dp_tunnel *757drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr *mgr,758struct drm_dp_aux *aux)759{760struct drm_dp_tunnel_regs regs;761struct drm_dp_tunnel *tunnel;762int err;763764err = read_tunnel_regs(aux, ®s);765if (err)766return ERR_PTR(err);767768if (!(tunnel_reg(®s, DP_TUNNELING_CAPABILITIES) &769DP_TUNNELING_SUPPORT))770return ERR_PTR(-ENODEV);771772/* The DPRX caps are valid only after enabling BW alloc mode. */773if (!tunnel_regs_are_valid(mgr, ®s, SKIP_DPRX_CAPS_CHECK))774return ERR_PTR(-EINVAL);775776tunnel = create_tunnel(mgr, aux, ®s);777if (!tunnel)778return ERR_PTR(-ENOMEM);779780tun_dbg(tunnel,781"OUI:%*phD DevID:%*pE Rev-HW:%d.%d SW:%d.%d PR-Sup:%s BWA-Sup:%s BWA-En:%s\n",782DP_TUNNELING_OUI_BYTES,783tunnel_reg_ptr(®s, DP_TUNNELING_OUI),784dev_id_len(tunnel_reg_ptr(®s, DP_TUNNELING_DEV_ID), DP_TUNNELING_DEV_ID_BYTES),785tunnel_reg_ptr(®s, DP_TUNNELING_DEV_ID),786(tunnel_reg(®s, DP_TUNNELING_HW_REV) & DP_TUNNELING_HW_REV_MAJOR_MASK) >>787DP_TUNNELING_HW_REV_MAJOR_SHIFT,788(tunnel_reg(®s, DP_TUNNELING_HW_REV) & DP_TUNNELING_HW_REV_MINOR_MASK) >>789DP_TUNNELING_HW_REV_MINOR_SHIFT,790tunnel_reg(®s, DP_TUNNELING_SW_REV_MAJOR),791tunnel_reg(®s, DP_TUNNELING_SW_REV_MINOR),792str_yes_no(tunnel_reg(®s, DP_TUNNELING_CAPABILITIES) &793DP_PANEL_REPLAY_OPTIMIZATION_SUPPORT),794str_yes_no(tunnel->bw_alloc_supported),795str_yes_no(tunnel->bw_alloc_enabled));796797return tunnel;798}799EXPORT_SYMBOL(drm_dp_tunnel_detect);800801/**802* drm_dp_tunnel_destroy - Destroy tunnel object803* @tunnel: Tunnel object804*805* Remove the tunnel from the tunnel topology and destroy it.806*807* Returns 0 on success, -ENODEV if the tunnel has been destroyed already.808*/809int drm_dp_tunnel_destroy(struct drm_dp_tunnel *tunnel)810{811if (!tunnel)812return 0;813814if (drm_WARN_ON(tunnel->group->mgr->dev, tunnel->destroyed))815return -ENODEV;816817tun_dbg(tunnel, "destroying\n");818819tunnel->destroyed = true;820destroy_tunnel(tunnel);821822return 0;823}824EXPORT_SYMBOL(drm_dp_tunnel_destroy);825826static int check_tunnel(const struct drm_dp_tunnel *tunnel)827{828if (tunnel->destroyed)829return -ENODEV;830831if (tunnel->has_io_error)832return -EIO;833834return 0;835}836837static int group_allocated_bw(struct drm_dp_tunnel_group *group)838{839struct drm_dp_tunnel *tunnel;840int group_allocated_bw = 0;841842for_each_tunnel_in_group(group, tunnel) {843if (check_tunnel(tunnel) == 0 &&844tunnel->bw_alloc_enabled)845group_allocated_bw += tunnel_allocated_bw(tunnel);846}847848return group_allocated_bw;849}850851/*852* The estimated BW reported by the TBT Connection Manager for each tunnel in853* a group includes the BW already allocated for the given tunnel and the854* unallocated BW which is free to be used by any tunnel in the group.855*/856static int group_free_bw(const struct drm_dp_tunnel *tunnel)857{858return tunnel->estimated_bw - tunnel_allocated_bw(tunnel);859}860861static int calc_group_available_bw(const struct drm_dp_tunnel *tunnel)862{863return group_allocated_bw(tunnel->group) +864group_free_bw(tunnel);865}866867static int update_group_available_bw(struct drm_dp_tunnel *tunnel,868const struct drm_dp_tunnel_regs *regs)869{870struct drm_dp_tunnel *tunnel_iter;871int group_available_bw;872bool changed;873874tunnel->estimated_bw = tunnel_reg(regs, DP_ESTIMATED_BW) * tunnel->bw_granularity;875876if (calc_group_available_bw(tunnel) == tunnel->group->available_bw)877return 0;878879for_each_tunnel_in_group(tunnel->group, tunnel_iter) {880int err;881882if (tunnel_iter == tunnel)883continue;884885if (check_tunnel(tunnel_iter) != 0 ||886!tunnel_iter->bw_alloc_enabled)887continue;888889err = drm_dp_dpcd_probe(tunnel_iter->aux, DP_DPCD_REV);890if (err) {891tun_dbg(tunnel_iter,892"Probe failed, assume disconnected (err %pe)\n",893ERR_PTR(err));894drm_dp_tunnel_set_io_error(tunnel_iter);895}896}897898group_available_bw = calc_group_available_bw(tunnel);899900tun_dbg(tunnel, "Updated group available BW: %d->%d\n",901DPTUN_BW_ARG(tunnel->group->available_bw),902DPTUN_BW_ARG(group_available_bw));903904changed = tunnel->group->available_bw != group_available_bw;905906tunnel->group->available_bw = group_available_bw;907908return changed ? 1 : 0;909}910911static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable)912{913u8 mask = DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE | DP_UNMASK_BW_ALLOCATION_IRQ;914u8 val;915916if (drm_dp_dpcd_read_byte(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, &val) < 0)917goto out_err;918919if (enable)920val |= mask;921else922val &= ~mask;923924if (drm_dp_dpcd_write_byte(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, val) < 0)925goto out_err;926927tunnel->bw_alloc_enabled = enable;928929return 0;930931out_err:932drm_dp_tunnel_set_io_error(tunnel);933934return -EIO;935}936937/**938* drm_dp_tunnel_enable_bw_alloc - Enable DP tunnel BW allocation mode939* @tunnel: Tunnel object940*941* Enable the DP tunnel BW allocation mode on @tunnel if it supports it.942*943* Returns 0 in case of success, negative error code otherwise.944*/945int drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel *tunnel)946{947struct drm_dp_tunnel_regs regs;948int err;949950err = check_tunnel(tunnel);951if (err)952return err;953954if (!tunnel->bw_alloc_supported)955return -EOPNOTSUPP;956957if (!tunnel_group_id(tunnel->group->drv_group_id))958return -EINVAL;959960err = set_bw_alloc_mode(tunnel, true);961if (err)962goto out;963964/*965* After a BWA disable/re-enable sequence the allocated BW can either966* stay at its last requested value or, for instance after system967* suspend/resume, TBT CM can reset back the allocation to the amount968* allocated in the legacy/non-BWA mode. Accordingly allow for the969* allocation to change wrt. the last SW state.970*/971err = read_and_verify_tunnel_regs(tunnel, ®s,972ALLOW_ALLOCATED_BW_CHANGE);973if (err) {974set_bw_alloc_mode(tunnel, false);975976goto out;977}978979if (!tunnel->max_dprx_rate)980update_dprx_caps(tunnel, ®s);981982if (tunnel->group->available_bw == -1) {983err = update_group_available_bw(tunnel, ®s);984if (err > 0)985err = 0;986}987out:988tun_dbg_stat(tunnel, err,989"Enabling BW alloc mode: DPRX:%dx%d Group alloc:%d/%d Mb/s",990tunnel->max_dprx_rate / 100, tunnel->max_dprx_lane_count,991DPTUN_BW_ARG(group_allocated_bw(tunnel->group)),992DPTUN_BW_ARG(tunnel->group->available_bw));993994return err;995}996EXPORT_SYMBOL(drm_dp_tunnel_enable_bw_alloc);997998/**999* drm_dp_tunnel_disable_bw_alloc - Disable DP tunnel BW allocation mode1000* @tunnel: Tunnel object1001*1002* Disable the DP tunnel BW allocation mode on @tunnel.1003*1004* Returns 0 in case of success, negative error code otherwise.1005*/1006int drm_dp_tunnel_disable_bw_alloc(struct drm_dp_tunnel *tunnel)1007{1008int err;10091010err = check_tunnel(tunnel);1011if (err)1012return err;10131014tunnel->allocated_bw = -1;10151016err = set_bw_alloc_mode(tunnel, false);10171018tun_dbg_stat(tunnel, err, "Disabling BW alloc mode");10191020return err;1021}1022EXPORT_SYMBOL(drm_dp_tunnel_disable_bw_alloc);10231024/**1025* drm_dp_tunnel_bw_alloc_is_enabled - Query the BW allocation mode enabled state1026* @tunnel: Tunnel object1027*1028* Query if the BW allocation mode is enabled for @tunnel.1029*1030* Returns %true if the BW allocation mode is enabled for @tunnel.1031*/1032bool drm_dp_tunnel_bw_alloc_is_enabled(const struct drm_dp_tunnel *tunnel)1033{1034return tunnel && tunnel->bw_alloc_enabled;1035}1036EXPORT_SYMBOL(drm_dp_tunnel_bw_alloc_is_enabled);10371038static int clear_bw_req_state(struct drm_dp_aux *aux)1039{1040u8 bw_req_mask = DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED;10411042if (drm_dp_dpcd_write_byte(aux, DP_TUNNELING_STATUS, bw_req_mask) < 0)1043return -EIO;10441045return 0;1046}10471048static int bw_req_complete(struct drm_dp_aux *aux, bool *status_changed)1049{1050u8 bw_req_mask = DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED;1051u8 status_change_mask = DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED;1052u8 val;1053int err;10541055if (drm_dp_dpcd_read_byte(aux, DP_TUNNELING_STATUS, &val) < 0)1056return -EIO;10571058*status_changed = val & status_change_mask;10591060val &= bw_req_mask;10611062if (!val)1063return -EAGAIN;10641065err = clear_bw_req_state(aux);1066if (err < 0)1067return err;10681069return val == DP_BW_REQUEST_SUCCEEDED ? 0 : -ENOSPC;1070}10711072static int allocate_tunnel_bw(struct drm_dp_tunnel *tunnel, int bw)1073{1074struct drm_dp_tunnel_mgr *mgr = tunnel->group->mgr;1075int request_bw = DIV_ROUND_UP(bw, tunnel->bw_granularity);1076DEFINE_WAIT_FUNC(wait, woken_wake_function);1077long timeout;1078int err;10791080if (bw < 0) {1081err = -EINVAL;1082goto out;1083}10841085if (request_bw * tunnel->bw_granularity == tunnel->allocated_bw)1086return 0;10871088/* Atomic check should prevent the following. */1089if (drm_WARN_ON(mgr->dev, request_bw > MAX_DP_REQUEST_BW)) {1090err = -EINVAL;1091goto out;1092}10931094err = clear_bw_req_state(tunnel->aux);1095if (err)1096goto out;10971098if (drm_dp_dpcd_write_byte(tunnel->aux, DP_REQUEST_BW, request_bw) < 0) {1099err = -EIO;1100goto out;1101}11021103timeout = msecs_to_jiffies(3000);1104add_wait_queue(&mgr->bw_req_queue, &wait);11051106for (;;) {1107bool status_changed;11081109err = bw_req_complete(tunnel->aux, &status_changed);1110if (err != -EAGAIN)1111break;11121113if (status_changed) {1114struct drm_dp_tunnel_regs regs;11151116err = read_and_verify_tunnel_regs(tunnel, ®s,1117ALLOW_ALLOCATED_BW_CHANGE);1118if (err)1119break;1120}11211122if (!timeout) {1123err = -ETIMEDOUT;1124break;1125}11261127timeout = wait_woken(&wait, TASK_UNINTERRUPTIBLE, timeout);1128};11291130remove_wait_queue(&mgr->bw_req_queue, &wait);11311132if (err)1133goto out;11341135tunnel->allocated_bw = request_bw * tunnel->bw_granularity;11361137out:1138tun_dbg_stat(tunnel, err, "Allocating %d/%d Mb/s for tunnel: Group alloc:%d/%d Mb/s",1139DPTUN_BW_ARG(request_bw * tunnel->bw_granularity),1140DPTUN_BW_ARG(get_max_tunnel_bw(tunnel)),1141DPTUN_BW_ARG(group_allocated_bw(tunnel->group)),1142DPTUN_BW_ARG(tunnel->group->available_bw));11431144if (err == -EIO)1145drm_dp_tunnel_set_io_error(tunnel);11461147return err;1148}11491150/**1151* drm_dp_tunnel_alloc_bw - Allocate BW for a DP tunnel1152* @tunnel: Tunnel object1153* @bw: BW in kB/s units1154*1155* Allocate @bw kB/s for @tunnel. The allocated BW must be freed after use by1156* calling this function for the same tunnel setting @bw to 0.1157*1158* Returns 0 in case of success, a negative error code otherwise.1159*/1160int drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw)1161{1162int err;11631164err = check_tunnel(tunnel);1165if (err)1166return err;11671168return allocate_tunnel_bw(tunnel, bw);1169}1170EXPORT_SYMBOL(drm_dp_tunnel_alloc_bw);11711172/**1173* drm_dp_tunnel_get_allocated_bw - Get the BW allocated for a DP tunnel1174* @tunnel: Tunnel object1175*1176* Get the current BW allocated for @tunnel. After the tunnel is created /1177* resumed and the BW allocation mode is enabled for it, the allocation1178* becomes determined only after the first allocation request by the driver1179* calling drm_dp_tunnel_alloc_bw().1180*1181* Return the BW allocated for the tunnel, or -1 if the allocation is1182* undetermined.1183*/1184int drm_dp_tunnel_get_allocated_bw(struct drm_dp_tunnel *tunnel)1185{1186return tunnel->allocated_bw;1187}1188EXPORT_SYMBOL(drm_dp_tunnel_get_allocated_bw);11891190/*1191* Return 0 if the status hasn't changed, 1 if the status has changed, a1192* negative error code in case of an I/O failure.1193*/1194static int check_and_clear_status_change(struct drm_dp_tunnel *tunnel)1195{1196u8 mask = DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED;1197u8 val;11981199if (drm_dp_dpcd_read_byte(tunnel->aux, DP_TUNNELING_STATUS, &val) < 0)1200goto out_err;12011202val &= mask;12031204if (val) {1205if (drm_dp_dpcd_write_byte(tunnel->aux, DP_TUNNELING_STATUS, val) < 0)1206goto out_err;12071208return 1;1209}12101211if (!drm_dp_tunnel_bw_alloc_is_enabled(tunnel))1212return 0;12131214/*1215* Check for estimated BW changes explicitly to account for lost1216* BW change notifications.1217*/1218if (drm_dp_dpcd_read_byte(tunnel->aux, DP_ESTIMATED_BW, &val) < 0)1219goto out_err;12201221if (val * tunnel->bw_granularity != tunnel->estimated_bw)1222return 1;12231224return 0;12251226out_err:1227drm_dp_tunnel_set_io_error(tunnel);12281229return -EIO;1230}12311232/**1233* drm_dp_tunnel_update_state - Update DP tunnel SW state with the HW state1234* @tunnel: Tunnel object1235*1236* Update the SW state of @tunnel with the HW state.1237*1238* Returns 0 if the state has not changed, 1 if it has changed and got updated1239* successfully and a negative error code otherwise.1240*/1241int drm_dp_tunnel_update_state(struct drm_dp_tunnel *tunnel)1242{1243struct drm_dp_tunnel_regs regs;1244bool changed = false;1245int ret;12461247ret = check_tunnel(tunnel);1248if (ret < 0)1249return ret;12501251ret = check_and_clear_status_change(tunnel);1252if (ret < 0)1253goto out;12541255if (!ret)1256return 0;12571258ret = read_and_verify_tunnel_regs(tunnel, ®s, 0);1259if (ret)1260goto out;12611262if (update_dprx_caps(tunnel, ®s))1263changed = true;12641265ret = update_group_available_bw(tunnel, ®s);1266if (ret == 1)1267changed = true;12681269out:1270tun_dbg_stat(tunnel, ret < 0 ? ret : 0,1271"State update: Changed:%s DPRX:%dx%d Tunnel alloc:%d/%d Group alloc:%d/%d Mb/s",1272str_yes_no(changed),1273tunnel->max_dprx_rate / 100, tunnel->max_dprx_lane_count,1274DPTUN_BW_ARG(tunnel->allocated_bw),1275DPTUN_BW_ARG(get_max_tunnel_bw(tunnel)),1276DPTUN_BW_ARG(group_allocated_bw(tunnel->group)),1277DPTUN_BW_ARG(tunnel->group->available_bw));12781279if (ret < 0)1280return ret;12811282if (changed)1283return 1;12841285return 0;1286}1287EXPORT_SYMBOL(drm_dp_tunnel_update_state);12881289/*1290* drm_dp_tunnel_handle_irq - Handle DP tunnel IRQs1291*1292* Handle any pending DP tunnel IRQs, waking up waiters for a completion1293* event.1294*1295* Returns 1 if the state of the tunnel has changed which requires calling1296* drm_dp_tunnel_update_state(), a negative error code in case of a failure,1297* 0 otherwise.1298*/1299int drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_aux *aux)1300{1301u8 val;13021303if (drm_dp_dpcd_read_byte(aux, DP_TUNNELING_STATUS, &val) < 0)1304return -EIO;13051306if (val & (DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED))1307wake_up_all(&mgr->bw_req_queue);13081309if (val & (DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED))1310return 1;13111312return 0;1313}1314EXPORT_SYMBOL(drm_dp_tunnel_handle_irq);13151316/**1317* drm_dp_tunnel_max_dprx_rate - Query the maximum rate of the tunnel's DPRX1318* @tunnel: Tunnel object1319*1320* The function is used to query the maximum link rate of the DPRX connected1321* to @tunnel. Note that this rate will not be limited by the BW limit of the1322* tunnel, as opposed to the standard and extended DP_MAX_LINK_RATE DPCD1323* registers.1324*1325* Returns the maximum link rate in 10 kbit/s units.1326*/1327int drm_dp_tunnel_max_dprx_rate(const struct drm_dp_tunnel *tunnel)1328{1329return tunnel->max_dprx_rate;1330}1331EXPORT_SYMBOL(drm_dp_tunnel_max_dprx_rate);13321333/**1334* drm_dp_tunnel_max_dprx_lane_count - Query the maximum lane count of the tunnel's DPRX1335* @tunnel: Tunnel object1336*1337* The function is used to query the maximum lane count of the DPRX connected1338* to @tunnel. Note that this lane count will not be limited by the BW limit of1339* the tunnel, as opposed to the standard and extended DP_MAX_LANE_COUNT DPCD1340* registers.1341*1342* Returns the maximum lane count.1343*/1344int drm_dp_tunnel_max_dprx_lane_count(const struct drm_dp_tunnel *tunnel)1345{1346return tunnel->max_dprx_lane_count;1347}1348EXPORT_SYMBOL(drm_dp_tunnel_max_dprx_lane_count);13491350/**1351* drm_dp_tunnel_available_bw - Query the estimated total available BW of the tunnel1352* @tunnel: Tunnel object1353*1354* This function is used to query the estimated total available BW of the1355* tunnel. This includes the currently allocated and free BW for all the1356* tunnels in @tunnel's group. The available BW is valid only after the BW1357* allocation mode has been enabled for the tunnel and its state got updated1358* calling drm_dp_tunnel_update_state().1359*1360* Returns the @tunnel group's estimated total available bandwidth in kB/s1361* units, or -1 if the available BW isn't valid (the BW allocation mode is1362* not enabled or the tunnel's state hasn't been updated).1363*/1364int drm_dp_tunnel_available_bw(const struct drm_dp_tunnel *tunnel)1365{1366return tunnel->group->available_bw;1367}1368EXPORT_SYMBOL(drm_dp_tunnel_available_bw);13691370static struct drm_dp_tunnel_group_state *1371drm_dp_tunnel_atomic_get_group_state(struct drm_atomic_state *state,1372const struct drm_dp_tunnel *tunnel)1373{1374return (struct drm_dp_tunnel_group_state *)1375drm_atomic_get_private_obj_state(state,1376&tunnel->group->base);1377}13781379static struct drm_dp_tunnel_state *1380add_tunnel_state(struct drm_dp_tunnel_group_state *group_state,1381struct drm_dp_tunnel *tunnel)1382{1383struct drm_dp_tunnel_state *tunnel_state;13841385tun_dbg_atomic(tunnel,1386"Adding state for tunnel %p to group state %p\n",1387tunnel, group_state);13881389tunnel_state = kzalloc(sizeof(*tunnel_state), GFP_KERNEL);1390if (!tunnel_state)1391return NULL;13921393tunnel_state->group_state = group_state;13941395drm_dp_tunnel_ref_get(tunnel, &tunnel_state->tunnel_ref);13961397INIT_LIST_HEAD(&tunnel_state->node);1398list_add(&tunnel_state->node, &group_state->tunnel_states);13991400return tunnel_state;1401}14021403static void free_tunnel_state(struct drm_dp_tunnel_state *tunnel_state)1404{1405tun_dbg_atomic(tunnel_state->tunnel_ref.tunnel,1406"Freeing state for tunnel %p\n",1407tunnel_state->tunnel_ref.tunnel);14081409list_del(&tunnel_state->node);14101411kfree(tunnel_state->stream_bw);1412drm_dp_tunnel_ref_put(&tunnel_state->tunnel_ref);14131414kfree(tunnel_state);1415}14161417static void free_group_state(struct drm_dp_tunnel_group_state *group_state)1418{1419struct drm_dp_tunnel_state *tunnel_state;1420struct drm_dp_tunnel_state *tunnel_state_tmp;14211422for_each_tunnel_state_safe(group_state, tunnel_state, tunnel_state_tmp)1423free_tunnel_state(tunnel_state);14241425kfree(group_state);1426}14271428static struct drm_dp_tunnel_state *1429get_tunnel_state(struct drm_dp_tunnel_group_state *group_state,1430const struct drm_dp_tunnel *tunnel)1431{1432struct drm_dp_tunnel_state *tunnel_state;14331434for_each_tunnel_state(group_state, tunnel_state)1435if (tunnel_state->tunnel_ref.tunnel == tunnel)1436return tunnel_state;14371438return NULL;1439}14401441static struct drm_dp_tunnel_state *1442get_or_add_tunnel_state(struct drm_dp_tunnel_group_state *group_state,1443struct drm_dp_tunnel *tunnel)1444{1445struct drm_dp_tunnel_state *tunnel_state;14461447tunnel_state = get_tunnel_state(group_state, tunnel);1448if (tunnel_state)1449return tunnel_state;14501451return add_tunnel_state(group_state, tunnel);1452}14531454static struct drm_private_state *1455tunnel_group_duplicate_state(struct drm_private_obj *obj)1456{1457struct drm_dp_tunnel_group_state *group_state;1458struct drm_dp_tunnel_state *tunnel_state;14591460group_state = kzalloc(sizeof(*group_state), GFP_KERNEL);1461if (!group_state)1462return NULL;14631464INIT_LIST_HEAD(&group_state->tunnel_states);14651466__drm_atomic_helper_private_obj_duplicate_state(obj, &group_state->base);14671468for_each_tunnel_state(to_group_state(obj->state), tunnel_state) {1469struct drm_dp_tunnel_state *new_tunnel_state;14701471new_tunnel_state = get_or_add_tunnel_state(group_state,1472tunnel_state->tunnel_ref.tunnel);1473if (!new_tunnel_state)1474goto out_free_state;14751476new_tunnel_state->stream_mask = tunnel_state->stream_mask;1477new_tunnel_state->stream_bw = kmemdup(tunnel_state->stream_bw,1478sizeof(*tunnel_state->stream_bw) *1479hweight32(tunnel_state->stream_mask),1480GFP_KERNEL);14811482if (!new_tunnel_state->stream_bw)1483goto out_free_state;1484}14851486return &group_state->base;14871488out_free_state:1489free_group_state(group_state);14901491return NULL;1492}14931494static void tunnel_group_destroy_state(struct drm_private_obj *obj, struct drm_private_state *state)1495{1496free_group_state(to_group_state(state));1497}14981499static const struct drm_private_state_funcs tunnel_group_funcs = {1500.atomic_duplicate_state = tunnel_group_duplicate_state,1501.atomic_destroy_state = tunnel_group_destroy_state,1502};15031504/**1505* drm_dp_tunnel_atomic_get_state - get/allocate the new atomic state for a tunnel1506* @state: Atomic state1507* @tunnel: Tunnel to get the state for1508*1509* Get the new atomic state for @tunnel, duplicating it from the old tunnel1510* state if not yet allocated.1511*1512* Return the state or an ERR_PTR() error on failure.1513*/1514struct drm_dp_tunnel_state *1515drm_dp_tunnel_atomic_get_state(struct drm_atomic_state *state,1516struct drm_dp_tunnel *tunnel)1517{1518struct drm_dp_tunnel_group_state *group_state;1519struct drm_dp_tunnel_state *tunnel_state;15201521group_state = drm_dp_tunnel_atomic_get_group_state(state, tunnel);1522if (IS_ERR(group_state))1523return ERR_CAST(group_state);15241525tunnel_state = get_or_add_tunnel_state(group_state, tunnel);1526if (!tunnel_state)1527return ERR_PTR(-ENOMEM);15281529return tunnel_state;1530}1531EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_state);15321533/**1534* drm_dp_tunnel_atomic_get_old_state - get the old atomic state for a tunnel1535* @state: Atomic state1536* @tunnel: Tunnel to get the state for1537*1538* Get the old atomic state for @tunnel.1539*1540* Return the old state or NULL if the tunnel's atomic state is not in @state.1541*/1542struct drm_dp_tunnel_state *1543drm_dp_tunnel_atomic_get_old_state(struct drm_atomic_state *state,1544const struct drm_dp_tunnel *tunnel)1545{1546struct drm_dp_tunnel_group_state *old_group_state;1547int i;15481549for_each_old_group_in_state(state, old_group_state, i)1550if (to_group(old_group_state->base.obj) == tunnel->group)1551return get_tunnel_state(old_group_state, tunnel);15521553return NULL;1554}1555EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_old_state);15561557/**1558* drm_dp_tunnel_atomic_get_new_state - get the new atomic state for a tunnel1559* @state: Atomic state1560* @tunnel: Tunnel to get the state for1561*1562* Get the new atomic state for @tunnel.1563*1564* Return the new state or NULL if the tunnel's atomic state is not in @state.1565*/1566struct drm_dp_tunnel_state *1567drm_dp_tunnel_atomic_get_new_state(struct drm_atomic_state *state,1568const struct drm_dp_tunnel *tunnel)1569{1570struct drm_dp_tunnel_group_state *new_group_state;1571int i;15721573for_each_new_group_in_state(state, new_group_state, i)1574if (to_group(new_group_state->base.obj) == tunnel->group)1575return get_tunnel_state(new_group_state, tunnel);15761577return NULL;1578}1579EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_new_state);15801581static bool init_group(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_tunnel_group *group)1582{1583struct drm_dp_tunnel_group_state *group_state;15841585group_state = kzalloc(sizeof(*group_state), GFP_KERNEL);1586if (!group_state)1587return false;15881589INIT_LIST_HEAD(&group_state->tunnel_states);15901591group->mgr = mgr;1592group->available_bw = -1;1593INIT_LIST_HEAD(&group->tunnels);15941595drm_atomic_private_obj_init(mgr->dev, &group->base, &group_state->base,1596&tunnel_group_funcs);15971598return true;1599}16001601static void cleanup_group(struct drm_dp_tunnel_group *group)1602{1603drm_atomic_private_obj_fini(&group->base);1604}16051606#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG1607static void check_unique_stream_ids(const struct drm_dp_tunnel_group_state *group_state)1608{1609const struct drm_dp_tunnel_state *tunnel_state;1610u32 stream_mask = 0;16111612for_each_tunnel_state(group_state, tunnel_state) {1613drm_WARN(to_group(group_state->base.obj)->mgr->dev,1614tunnel_state->stream_mask & stream_mask,1615"[DPTUN %s]: conflicting stream IDs %x (IDs in other tunnels %x)\n",1616tunnel_state->tunnel_ref.tunnel->name,1617tunnel_state->stream_mask,1618stream_mask);16191620stream_mask |= tunnel_state->stream_mask;1621}1622}1623#else1624static void check_unique_stream_ids(const struct drm_dp_tunnel_group_state *group_state)1625{1626}1627#endif16281629static int stream_id_to_idx(u32 stream_mask, u8 stream_id)1630{1631return hweight32(stream_mask & (BIT(stream_id) - 1));1632}16331634static int resize_bw_array(struct drm_dp_tunnel_state *tunnel_state,1635unsigned long old_mask, unsigned long new_mask)1636{1637unsigned long move_mask = old_mask & new_mask;1638int *new_bws = NULL;1639int id;16401641WARN_ON(!new_mask);16421643if (old_mask == new_mask)1644return 0;16451646new_bws = kcalloc(hweight32(new_mask), sizeof(*new_bws), GFP_KERNEL);1647if (!new_bws)1648return -ENOMEM;16491650for_each_set_bit(id, &move_mask, BITS_PER_TYPE(move_mask))1651new_bws[stream_id_to_idx(new_mask, id)] =1652tunnel_state->stream_bw[stream_id_to_idx(old_mask, id)];16531654kfree(tunnel_state->stream_bw);1655tunnel_state->stream_bw = new_bws;1656tunnel_state->stream_mask = new_mask;16571658return 0;1659}16601661static int set_stream_bw(struct drm_dp_tunnel_state *tunnel_state,1662u8 stream_id, int bw)1663{1664int err;16651666err = resize_bw_array(tunnel_state,1667tunnel_state->stream_mask,1668tunnel_state->stream_mask | BIT(stream_id));1669if (err)1670return err;16711672tunnel_state->stream_bw[stream_id_to_idx(tunnel_state->stream_mask, stream_id)] = bw;16731674return 0;1675}16761677static int clear_stream_bw(struct drm_dp_tunnel_state *tunnel_state,1678u8 stream_id)1679{1680if (!(tunnel_state->stream_mask & ~BIT(stream_id))) {1681free_tunnel_state(tunnel_state);1682return 0;1683}16841685return resize_bw_array(tunnel_state,1686tunnel_state->stream_mask,1687tunnel_state->stream_mask & ~BIT(stream_id));1688}16891690/**1691* drm_dp_tunnel_atomic_set_stream_bw - Set the BW for a DP tunnel stream1692* @state: Atomic state1693* @tunnel: DP tunnel containing the stream1694* @stream_id: Stream ID1695* @bw: BW of the stream1696*1697* Set a DP tunnel stream's required BW in the atomic state.1698*1699* Returns 0 in case of success, a negative error code otherwise.1700*/1701int drm_dp_tunnel_atomic_set_stream_bw(struct drm_atomic_state *state,1702struct drm_dp_tunnel *tunnel,1703u8 stream_id, int bw)1704{1705struct drm_dp_tunnel_group_state *new_group_state;1706struct drm_dp_tunnel_state *tunnel_state;1707int err;17081709if (drm_WARN_ON(tunnel->group->mgr->dev,1710stream_id > BITS_PER_TYPE(tunnel_state->stream_mask)))1711return -EINVAL;17121713tun_dbg(tunnel,1714"Setting %d Mb/s for stream %d\n",1715DPTUN_BW_ARG(bw), stream_id);17161717new_group_state = drm_dp_tunnel_atomic_get_group_state(state, tunnel);1718if (IS_ERR(new_group_state))1719return PTR_ERR(new_group_state);17201721if (bw == 0) {1722tunnel_state = get_tunnel_state(new_group_state, tunnel);1723if (!tunnel_state)1724return 0;17251726return clear_stream_bw(tunnel_state, stream_id);1727}17281729tunnel_state = get_or_add_tunnel_state(new_group_state, tunnel);1730if (drm_WARN_ON(state->dev, !tunnel_state))1731return -EINVAL;17321733err = set_stream_bw(tunnel_state, stream_id, bw);1734if (err)1735return err;17361737check_unique_stream_ids(new_group_state);17381739return 0;1740}1741EXPORT_SYMBOL(drm_dp_tunnel_atomic_set_stream_bw);17421743/**1744* drm_dp_tunnel_atomic_get_required_bw - Get the BW required by a DP tunnel1745* @tunnel_state: Atomic state of the queried tunnel1746*1747* Calculate the BW required by a tunnel adding up the required BW of all1748* the streams in the tunnel.1749*1750* Return the total BW required by the tunnel.1751*/1752int drm_dp_tunnel_atomic_get_required_bw(const struct drm_dp_tunnel_state *tunnel_state)1753{1754int tunnel_bw = 0;1755int i;17561757if (!tunnel_state || !tunnel_state->stream_mask)1758return 0;17591760for (i = 0; i < hweight32(tunnel_state->stream_mask); i++)1761tunnel_bw += tunnel_state->stream_bw[i];17621763return tunnel_bw;1764}1765EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_required_bw);17661767/**1768* drm_dp_tunnel_atomic_get_group_streams_in_state - Get mask of stream IDs in a group1769* @state: Atomic state1770* @tunnel: Tunnel object1771* @stream_mask: Mask of streams in @tunnel's group1772*1773* Get the mask of all the stream IDs in the tunnel group of @tunnel.1774*1775* Return 0 in case of success - with the stream IDs in @stream_mask - or a1776* negative error code in case of failure.1777*/1778int drm_dp_tunnel_atomic_get_group_streams_in_state(struct drm_atomic_state *state,1779const struct drm_dp_tunnel *tunnel,1780u32 *stream_mask)1781{1782struct drm_dp_tunnel_group_state *group_state;1783struct drm_dp_tunnel_state *tunnel_state;17841785group_state = drm_dp_tunnel_atomic_get_group_state(state, tunnel);1786if (IS_ERR(group_state))1787return PTR_ERR(group_state);17881789*stream_mask = 0;1790for_each_tunnel_state(group_state, tunnel_state)1791*stream_mask |= tunnel_state->stream_mask;17921793return 0;1794}1795EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_group_streams_in_state);17961797static int1798drm_dp_tunnel_atomic_check_group_bw(struct drm_dp_tunnel_group_state *new_group_state,1799u32 *failed_stream_mask)1800{1801struct drm_dp_tunnel_group *group = to_group(new_group_state->base.obj);1802struct drm_dp_tunnel_state *new_tunnel_state;1803u32 group_stream_mask = 0;1804int group_bw = 0;18051806for_each_tunnel_state(new_group_state, new_tunnel_state) {1807struct drm_dp_tunnel *tunnel = new_tunnel_state->tunnel_ref.tunnel;1808int max_dprx_bw = get_max_dprx_bw(tunnel);1809int tunnel_bw = drm_dp_tunnel_atomic_get_required_bw(new_tunnel_state);18101811tun_dbg(tunnel,1812"%sRequired %d/%d Mb/s total for tunnel.\n",1813tunnel_bw > max_dprx_bw ? "Not enough BW: " : "",1814DPTUN_BW_ARG(tunnel_bw),1815DPTUN_BW_ARG(max_dprx_bw));18161817if (tunnel_bw > max_dprx_bw) {1818*failed_stream_mask = new_tunnel_state->stream_mask;1819return -ENOSPC;1820}18211822group_bw += min(roundup(tunnel_bw, tunnel->bw_granularity),1823max_dprx_bw);1824group_stream_mask |= new_tunnel_state->stream_mask;1825}18261827tun_grp_dbg(group,1828"%sRequired %d/%d Mb/s total for tunnel group.\n",1829group_bw > group->available_bw ? "Not enough BW: " : "",1830DPTUN_BW_ARG(group_bw),1831DPTUN_BW_ARG(group->available_bw));18321833if (group_bw > group->available_bw) {1834*failed_stream_mask = group_stream_mask;1835return -ENOSPC;1836}18371838return 0;1839}18401841/**1842* drm_dp_tunnel_atomic_check_stream_bws - Check BW limit for all streams in state1843* @state: Atomic state1844* @failed_stream_mask: Mask of stream IDs with a BW limit failure1845*1846* Check the required BW of each DP tunnel in @state against both the DPRX BW1847* limit of the tunnel and the BW limit of the tunnel group. Return a mask of1848* stream IDs in @failed_stream_mask once a check fails. The mask will contain1849* either all the streams in a tunnel (in case a DPRX BW limit check failed) or1850* all the streams in a tunnel group (in case a group BW limit check failed).1851*1852* Return 0 if all the BW limit checks passed, -ENOSPC in case a BW limit1853* check failed - with @failed_stream_mask containing the streams failing the1854* check - or a negative error code otherwise.1855*/1856int drm_dp_tunnel_atomic_check_stream_bws(struct drm_atomic_state *state,1857u32 *failed_stream_mask)1858{1859struct drm_dp_tunnel_group_state *new_group_state;1860int i;18611862for_each_new_group_in_state(state, new_group_state, i) {1863int ret;18641865ret = drm_dp_tunnel_atomic_check_group_bw(new_group_state,1866failed_stream_mask);1867if (ret)1868return ret;1869}18701871return 0;1872}1873EXPORT_SYMBOL(drm_dp_tunnel_atomic_check_stream_bws);18741875static void destroy_mgr(struct drm_dp_tunnel_mgr *mgr)1876{1877int i;18781879for (i = 0; i < mgr->group_count; i++) {1880cleanup_group(&mgr->groups[i]);1881drm_WARN_ON(mgr->dev, !list_empty(&mgr->groups[i].tunnels));1882}18831884#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG1885ref_tracker_dir_exit(&mgr->ref_tracker);1886#endif18871888kfree(mgr->groups);1889kfree(mgr);1890}18911892/**1893* drm_dp_tunnel_mgr_create - Create a DP tunnel manager1894* @dev: DRM device object1895* @max_group_count: Maximum number of tunnel groups1896*1897* Creates a DP tunnel manager for @dev.1898*1899* Returns a pointer to the tunnel manager if created successfully or error1900* pointer in case of failure.1901*/1902struct drm_dp_tunnel_mgr *1903drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)1904{1905struct drm_dp_tunnel_mgr *mgr;1906int i;19071908mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);1909if (!mgr)1910return ERR_PTR(-ENOMEM);19111912mgr->dev = dev;1913init_waitqueue_head(&mgr->bw_req_queue);19141915mgr->groups = kcalloc(max_group_count, sizeof(*mgr->groups), GFP_KERNEL);1916if (!mgr->groups) {1917kfree(mgr);19181919return ERR_PTR(-ENOMEM);1920}19211922#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG1923ref_tracker_dir_init(&mgr->ref_tracker, 16, "drm_dptun");1924#endif19251926for (i = 0; i < max_group_count; i++) {1927if (!init_group(mgr, &mgr->groups[i])) {1928destroy_mgr(mgr);19291930return ERR_PTR(-ENOMEM);1931}19321933mgr->group_count++;1934}19351936return mgr;1937}1938EXPORT_SYMBOL(drm_dp_tunnel_mgr_create);19391940/**1941* drm_dp_tunnel_mgr_destroy - Destroy DP tunnel manager1942* @mgr: Tunnel manager object1943*1944* Destroy the tunnel manager.1945*/1946void drm_dp_tunnel_mgr_destroy(struct drm_dp_tunnel_mgr *mgr)1947{1948destroy_mgr(mgr);1949}1950EXPORT_SYMBOL(drm_dp_tunnel_mgr_destroy);195119521953