Path: blob/master/include/drm/display/drm_dp_mst_helper.h
26285 views
/*1* Copyright © 2014 Red Hat.2*3* Permission to use, copy, modify, distribute, and sell this software and its4* documentation for any purpose is hereby granted without fee, provided that5* the above copyright notice appear in all copies and that both that copyright6* notice and this permission notice appear in supporting documentation, and7* that the name of the copyright holders not be used in advertising or8* publicity pertaining to distribution of the software without specific,9* written prior permission. The copyright holders make no representations10* about the suitability of this software for any purpose. It is provided "as11* is" without express or implied warranty.12*13* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,14* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO15* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR16* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,17* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER18* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE19* OF THIS SOFTWARE.20*/21#ifndef _DRM_DP_MST_HELPER_H_22#define _DRM_DP_MST_HELPER_H_2324#include <linux/types.h>25#include <drm/display/drm_dp_helper.h>26#include <drm/drm_atomic.h>27#include <drm/drm_fixed.h>2829#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)30#include <linux/stackdepot.h>31#include <linux/timekeeping.h>3233enum drm_dp_mst_topology_ref_type {34DRM_DP_MST_TOPOLOGY_REF_GET,35DRM_DP_MST_TOPOLOGY_REF_PUT,36};3738struct drm_dp_mst_topology_ref_history {39struct drm_dp_mst_topology_ref_entry {40enum drm_dp_mst_topology_ref_type type;41int count;42ktime_t ts_nsec;43depot_stack_handle_t backtrace;44} *entries;45int len;46};47#endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */4849enum drm_dp_mst_payload_allocation {50DRM_DP_MST_PAYLOAD_ALLOCATION_NONE,51DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL,52DRM_DP_MST_PAYLOAD_ALLOCATION_DFP,53DRM_DP_MST_PAYLOAD_ALLOCATION_REMOTE,54};5556struct drm_dp_mst_branch;5758/**59* struct drm_dp_mst_port - MST port60* @port_num: port number61* @input: if this port is an input port. Protected by62* &drm_dp_mst_topology_mgr.base.lock.63* @mcs: message capability status - DP 1.2 spec. Protected by64* &drm_dp_mst_topology_mgr.base.lock.65* @ddps: DisplayPort Device Plug Status - DP 1.2. Protected by66* &drm_dp_mst_topology_mgr.base.lock.67* @pdt: Peer Device Type. Protected by68* &drm_dp_mst_topology_mgr.base.lock.69* @ldps: Legacy Device Plug Status. Protected by70* &drm_dp_mst_topology_mgr.base.lock.71* @dpcd_rev: DPCD revision of device on this port. Protected by72* &drm_dp_mst_topology_mgr.base.lock.73* @num_sdp_streams: Number of simultaneous streams. Protected by74* &drm_dp_mst_topology_mgr.base.lock.75* @num_sdp_stream_sinks: Number of stream sinks. Protected by76* &drm_dp_mst_topology_mgr.base.lock.77* @full_pbn: Max possible bandwidth for this port. Protected by78* &drm_dp_mst_topology_mgr.base.lock.79* @next: link to next port on this branch device80* @aux: i2c aux transport to talk to device connected to this port, protected81* by &drm_dp_mst_topology_mgr.base.lock.82* @passthrough_aux: parent aux to which DSC pass-through requests should be83* sent, only set if DSC pass-through is possible.84* @parent: branch device parent of this port85* @connector: DRM connector this port is connected to. Protected by86* &drm_dp_mst_topology_mgr.base.lock.87* @mgr: topology manager this port lives under.88*89* This structure represents an MST port endpoint on a device somewhere90* in the MST topology.91*/92struct drm_dp_mst_port {93/**94* @topology_kref: refcount for this port's lifetime in the topology,95* only the DP MST helpers should need to touch this96*/97struct kref topology_kref;9899/**100* @malloc_kref: refcount for the memory allocation containing this101* structure. See drm_dp_mst_get_port_malloc() and102* drm_dp_mst_put_port_malloc().103*/104struct kref malloc_kref;105106#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)107/**108* @topology_ref_history: A history of each topology109* reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.110*/111struct drm_dp_mst_topology_ref_history topology_ref_history;112#endif113114u8 port_num;115bool input;116bool mcs;117bool ddps;118u8 pdt;119bool ldps;120u8 dpcd_rev;121u8 num_sdp_streams;122u8 num_sdp_stream_sinks;123uint16_t full_pbn;124struct list_head next;125/**126* @mstb: the branch device connected to this port, if there is one.127* This should be considered protected for reading by128* &drm_dp_mst_topology_mgr.lock. There are two exceptions to this:129* &drm_dp_mst_topology_mgr.up_req_work and130* &drm_dp_mst_topology_mgr.work, which do not grab131* &drm_dp_mst_topology_mgr.lock during reads but are the only132* updaters of this list and are protected from writing concurrently133* by &drm_dp_mst_topology_mgr.probe_lock.134*/135struct drm_dp_mst_branch *mstb;136struct drm_dp_aux aux; /* i2c bus for this port? */137struct drm_dp_aux *passthrough_aux;138struct drm_dp_mst_branch *parent;139140struct drm_connector *connector;141struct drm_dp_mst_topology_mgr *mgr;142143/**144* @cached_edid: for DP logical ports - make tiling work by ensuring145* that the EDID for all connectors is read immediately.146*/147const struct drm_edid *cached_edid;148149/**150* @fec_capable: bool indicating if FEC can be supported up to that151* point in the MST topology.152*/153bool fec_capable;154};155156/* sideband msg header - not bit struct */157struct drm_dp_sideband_msg_hdr {158u8 lct;159u8 lcr;160u8 rad[8];161bool broadcast;162bool path_msg;163u8 msg_len;164bool somt;165bool eomt;166bool seqno;167};168169struct drm_dp_sideband_msg_rx {170u8 chunk[48];171u8 msg[256];172u8 curchunk_len;173u8 curchunk_idx; /* chunk we are parsing now */174u8 curchunk_hdrlen;175u8 curlen; /* total length of the msg */176bool have_somt;177bool have_eomt;178struct drm_dp_sideband_msg_hdr initial_hdr;179};180181/**182* struct drm_dp_mst_branch - MST branch device.183* @rad: Relative Address to talk to this branch device.184* @lct: Link count total to talk to this branch device.185* @num_ports: number of ports on the branch.186* @port_parent: pointer to the port parent, NULL if toplevel.187* @mgr: topology manager for this branch device.188* @link_address_sent: if a link address message has been sent to this device yet.189* @guid: guid for DP 1.2 branch device. port under this branch can be190* identified by port #.191*192* This structure represents an MST branch device, there is one193* primary branch device at the root, along with any other branches connected194* to downstream port of parent branches.195*/196struct drm_dp_mst_branch {197/**198* @topology_kref: refcount for this branch device's lifetime in the199* topology, only the DP MST helpers should need to touch this200*/201struct kref topology_kref;202203/**204* @malloc_kref: refcount for the memory allocation containing this205* structure. See drm_dp_mst_get_mstb_malloc() and206* drm_dp_mst_put_mstb_malloc().207*/208struct kref malloc_kref;209210#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)211/**212* @topology_ref_history: A history of each topology213* reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.214*/215struct drm_dp_mst_topology_ref_history topology_ref_history;216#endif217218/**219* @destroy_next: linked-list entry used by220* drm_dp_delayed_destroy_work()221*/222struct list_head destroy_next;223224/**225* @rad: Relative Address of the MST branch.226* For &drm_dp_mst_topology_mgr.mst_primary, it's rad[8] are all 0,227* unset and unused. For MST branches connected after mst_primary,228* in each element of rad[] the nibbles are ordered by the most229* signifcant 4 bits first and the least significant 4 bits second.230*/231u8 rad[8];232u8 lct;233int num_ports;234235/**236* @ports: the list of ports on this branch device. This should be237* considered protected for reading by &drm_dp_mst_topology_mgr.lock.238* There are two exceptions to this:239* &drm_dp_mst_topology_mgr.up_req_work and240* &drm_dp_mst_topology_mgr.work, which do not grab241* &drm_dp_mst_topology_mgr.lock during reads but are the only242* updaters of this list and are protected from updating the list243* concurrently by @drm_dp_mst_topology_mgr.probe_lock244*/245struct list_head ports;246247struct drm_dp_mst_port *port_parent;248struct drm_dp_mst_topology_mgr *mgr;249250bool link_address_sent;251252/* global unique identifier to identify branch devices */253guid_t guid;254};255256257struct drm_dp_nak_reply {258guid_t guid;259u8 reason;260u8 nak_data;261};262263struct drm_dp_link_address_ack_reply {264guid_t guid;265u8 nports;266struct drm_dp_link_addr_reply_port {267bool input_port;268u8 peer_device_type;269u8 port_number;270bool mcs;271bool ddps;272bool legacy_device_plug_status;273u8 dpcd_revision;274guid_t peer_guid;275u8 num_sdp_streams;276u8 num_sdp_stream_sinks;277} ports[16];278};279280struct drm_dp_remote_dpcd_read_ack_reply {281u8 port_number;282u8 num_bytes;283u8 bytes[255];284};285286struct drm_dp_remote_dpcd_write_ack_reply {287u8 port_number;288};289290struct drm_dp_remote_dpcd_write_nak_reply {291u8 port_number;292u8 reason;293u8 bytes_written_before_failure;294};295296struct drm_dp_remote_i2c_read_ack_reply {297u8 port_number;298u8 num_bytes;299u8 bytes[255];300};301302struct drm_dp_remote_i2c_read_nak_reply {303u8 port_number;304u8 nak_reason;305u8 i2c_nak_transaction;306};307308struct drm_dp_remote_i2c_write_ack_reply {309u8 port_number;310};311312struct drm_dp_query_stream_enc_status_ack_reply {313/* Bit[23:16]- Stream Id */314u8 stream_id;315316/* Bit[15]- Signed */317bool reply_signed;318319/* Bit[10:8]- Stream Output Sink Type */320bool unauthorizable_device_present;321bool legacy_device_present;322bool query_capable_device_present;323324/* Bit[12:11]- Stream Output CP Type */325bool hdcp_1x_device_present;326bool hdcp_2x_device_present;327328/* Bit[4]- Stream Authentication */329bool auth_completed;330331/* Bit[3]- Stream Encryption */332bool encryption_enabled;333334/* Bit[2]- Stream Repeater Function Present */335bool repeater_present;336337/* Bit[1:0]- Stream State */338u8 state;339};340341#define DRM_DP_MAX_SDP_STREAMS 16342struct drm_dp_allocate_payload {343u8 port_number;344u8 number_sdp_streams;345u8 vcpi;346u16 pbn;347u8 sdp_stream_sink[DRM_DP_MAX_SDP_STREAMS];348};349350struct drm_dp_allocate_payload_ack_reply {351u8 port_number;352u8 vcpi;353u16 allocated_pbn;354};355356struct drm_dp_connection_status_notify {357guid_t guid;358u8 port_number;359bool legacy_device_plug_status;360bool displayport_device_plug_status;361bool message_capability_status;362bool input_port;363u8 peer_device_type;364};365366struct drm_dp_remote_dpcd_read {367u8 port_number;368u32 dpcd_address;369u8 num_bytes;370};371372struct drm_dp_remote_dpcd_write {373u8 port_number;374u32 dpcd_address;375u8 num_bytes;376u8 *bytes;377};378379#define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4380struct drm_dp_remote_i2c_read {381u8 num_transactions;382u8 port_number;383struct drm_dp_remote_i2c_read_tx {384u8 i2c_dev_id;385u8 num_bytes;386u8 *bytes;387u8 no_stop_bit;388u8 i2c_transaction_delay;389} transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS];390u8 read_i2c_device_id;391u8 num_bytes_read;392};393394struct drm_dp_remote_i2c_write {395u8 port_number;396u8 write_i2c_device_id;397u8 num_bytes;398u8 *bytes;399};400401struct drm_dp_query_stream_enc_status {402u8 stream_id;403u8 client_id[7]; /* 56-bit nonce */404u8 stream_event;405bool valid_stream_event;406u8 stream_behavior;407u8 valid_stream_behavior;408};409410/* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */411struct drm_dp_port_number_req {412u8 port_number;413};414415struct drm_dp_enum_path_resources_ack_reply {416u8 port_number;417bool fec_capable;418u16 full_payload_bw_number;419u16 avail_payload_bw_number;420};421422/* covers POWER_DOWN_PHY, POWER_UP_PHY */423struct drm_dp_port_number_rep {424u8 port_number;425};426427struct drm_dp_query_payload {428u8 port_number;429u8 vcpi;430};431432struct drm_dp_resource_status_notify {433u8 port_number;434guid_t guid;435u16 available_pbn;436};437438struct drm_dp_query_payload_ack_reply {439u8 port_number;440u16 allocated_pbn;441};442443struct drm_dp_sideband_msg_req_body {444u8 req_type;445union ack_req {446struct drm_dp_connection_status_notify conn_stat;447struct drm_dp_port_number_req port_num;448struct drm_dp_resource_status_notify resource_stat;449450struct drm_dp_query_payload query_payload;451struct drm_dp_allocate_payload allocate_payload;452453struct drm_dp_remote_dpcd_read dpcd_read;454struct drm_dp_remote_dpcd_write dpcd_write;455456struct drm_dp_remote_i2c_read i2c_read;457struct drm_dp_remote_i2c_write i2c_write;458459struct drm_dp_query_stream_enc_status enc_status;460} u;461};462463struct drm_dp_sideband_msg_reply_body {464u8 reply_type;465u8 req_type;466union ack_replies {467struct drm_dp_nak_reply nak;468struct drm_dp_link_address_ack_reply link_addr;469struct drm_dp_port_number_rep port_number;470471struct drm_dp_enum_path_resources_ack_reply path_resources;472struct drm_dp_allocate_payload_ack_reply allocate_payload;473struct drm_dp_query_payload_ack_reply query_payload;474475struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack;476struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack;477struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack;478479struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack;480struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack;481struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack;482483struct drm_dp_query_stream_enc_status_ack_reply enc_status;484} u;485};486487/* msg is queued to be put into a slot */488#define DRM_DP_SIDEBAND_TX_QUEUED 0489/* msg has started transmitting on a slot - still on msgq */490#define DRM_DP_SIDEBAND_TX_START_SEND 1491/* msg has finished transmitting on a slot - removed from msgq only in slot */492#define DRM_DP_SIDEBAND_TX_SENT 2493/* msg has received a response - removed from slot */494#define DRM_DP_SIDEBAND_TX_RX 3495#define DRM_DP_SIDEBAND_TX_TIMEOUT 4496497struct drm_dp_sideband_msg_tx {498u8 msg[256];499u8 chunk[48];500u8 cur_offset;501u8 cur_len;502struct drm_dp_mst_branch *dst;503struct list_head next;504int seqno;505int state;506bool path_msg;507struct drm_dp_sideband_msg_reply_body reply;508};509510/* sideband msg handler */511struct drm_dp_mst_topology_mgr;512struct drm_dp_mst_topology_cbs {513/* create a connector for a port */514struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);515/*516* Checks for any pending MST interrupts, passing them to MST core for517* processing, the same way an HPD IRQ pulse handler would do this.518* If provided MST core calls this callback from a poll-waiting loop519* when waiting for MST down message replies. The driver is expected520* to guard against a race between this callback and the driver's HPD521* IRQ pulse handler.522*/523void (*poll_hpd_irq)(struct drm_dp_mst_topology_mgr *mgr);524};525526#define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)527528/**529* struct drm_dp_mst_atomic_payload - Atomic state struct for an MST payload530*531* The primary atomic state structure for a given MST payload. Stores information like current532* bandwidth allocation, intended action for this payload, etc.533*/534struct drm_dp_mst_atomic_payload {535/** @port: The MST port assigned to this payload */536struct drm_dp_mst_port *port;537538/**539* @vc_start_slot: The time slot that this payload starts on. Because payload start slots540* can't be determined ahead of time, the contents of this value are UNDEFINED at atomic541* check time. This shouldn't usually matter, as the start slot should never be relevant for542* atomic state computations.543*544* Since this value is determined at commit time instead of check time, this value is545* protected by the MST helpers ensuring that async commits operating on the given topology546* never run in parallel. In the event that a driver does need to read this value (e.g. to547* inform hardware of the starting timeslot for a payload), the driver may either:548*549* * Read this field during the atomic commit after550* drm_dp_mst_atomic_wait_for_dependencies() has been called, which will ensure the551* previous MST states payload start slots have been copied over to the new state. Note552* that a new start slot won't be assigned/removed from this payload until553* drm_dp_add_payload_part1()/drm_dp_remove_payload_part2() have been called.554* * Acquire the MST modesetting lock, and then wait for any pending MST-related commits to555* get committed to hardware by calling drm_crtc_commit_wait() on each of the556* &drm_crtc_commit structs in &drm_dp_mst_topology_state.commit_deps.557*558* If neither of the two above solutions suffice (e.g. the driver needs to read the start559* slot in the middle of an atomic commit without waiting for some reason), then drivers560* should cache this value themselves after changing payloads.561*/562s8 vc_start_slot;563564/** @vcpi: The Virtual Channel Payload Identifier */565u8 vcpi;566/**567* @time_slots:568* The number of timeslots allocated to this payload from the source DP Tx to569* the immediate downstream DP Rx570*/571int time_slots;572/** @pbn: The payload bandwidth for this payload */573int pbn;574575/** @delete: Whether or not we intend to delete this payload during this atomic commit */576bool delete : 1;577/** @dsc_enabled: Whether or not this payload has DSC enabled */578bool dsc_enabled : 1;579580/** @payload_allocation_status: The allocation status of this payload */581enum drm_dp_mst_payload_allocation payload_allocation_status;582583/** @next: The list node for this payload */584struct list_head next;585};586587/**588* struct drm_dp_mst_topology_state - DisplayPort MST topology atomic state589*590* This struct represents the atomic state of the toplevel DisplayPort MST manager591*/592struct drm_dp_mst_topology_state {593/** @base: Base private state for atomic */594struct drm_private_state base;595596/** @mgr: The topology manager */597struct drm_dp_mst_topology_mgr *mgr;598599/**600* @pending_crtc_mask: A bitmask of all CRTCs this topology state touches, drivers may601* modify this to add additional dependencies if needed.602*/603u32 pending_crtc_mask;604/**605* @commit_deps: A list of all CRTC commits affecting this topology, this field isn't606* populated until drm_dp_mst_atomic_wait_for_dependencies() is called.607*/608struct drm_crtc_commit **commit_deps;609/** @num_commit_deps: The number of CRTC commits in @commit_deps */610size_t num_commit_deps;611612/** @payload_mask: A bitmask of allocated VCPIs, used for VCPI assignments */613u32 payload_mask;614/** @payloads: The list of payloads being created/destroyed in this state */615struct list_head payloads;616617/** @total_avail_slots: The total number of slots this topology can handle (63 or 64) */618u8 total_avail_slots;619/** @start_slot: The first usable time slot in this topology (1 or 0) */620u8 start_slot;621622/**623* @pbn_div: The current PBN divisor for this topology. The driver is expected to fill this624* out itself.625*/626fixed20_12 pbn_div;627};628629#define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)630631/**632* struct drm_dp_mst_topology_mgr - DisplayPort MST manager633*634* This struct represents the toplevel displayport MST topology manager.635* There should be one instance of this for every MST capable DP connector636* on the GPU.637*/638struct drm_dp_mst_topology_mgr {639/**640* @base: Base private object for atomic641*/642struct drm_private_obj base;643644/**645* @dev: device pointer for adding i2c devices etc.646*/647struct drm_device *dev;648/**649* @cbs: callbacks for connector addition and destruction.650*/651const struct drm_dp_mst_topology_cbs *cbs;652/**653* @max_dpcd_transaction_bytes: maximum number of bytes to read/write654* in one go.655*/656int max_dpcd_transaction_bytes;657/**658* @aux: AUX channel for the DP MST connector this topolgy mgr is659* controlling.660*/661struct drm_dp_aux *aux;662/**663* @max_payloads: maximum number of payloads the GPU can generate.664*/665int max_payloads;666/**667* @conn_base_id: DRM connector ID this mgr is connected to. Only used668* to build the MST connector path value.669*/670int conn_base_id;671672/**673* @up_req_recv: Message receiver state for up requests.674*/675struct drm_dp_sideband_msg_rx up_req_recv;676677/**678* @down_rep_recv: Message receiver state for replies to down679* requests.680*/681struct drm_dp_sideband_msg_rx down_rep_recv;682683/**684* @lock: protects @mst_state, @mst_primary, @dpcd, and685* @payload_id_table_cleared.686*/687struct mutex lock;688689/**690* @probe_lock: Prevents @work and @up_req_work, the only writers of691* &drm_dp_mst_port.mstb and &drm_dp_mst_branch.ports, from racing692* while they update the topology.693*/694struct mutex probe_lock;695696/**697* @mst_state: If this manager is enabled for an MST capable port. False698* if no MST sink/branch devices is connected.699*/700bool mst_state : 1;701702/**703* @payload_id_table_cleared: Whether or not we've cleared the payload704* ID table for @mst_primary. Protected by @lock.705*/706bool payload_id_table_cleared : 1;707708/**709* @reset_rx_state: The down request's reply and up request message710* receiver state must be reset, after the topology manager got711* removed. Protected by @lock.712*/713bool reset_rx_state : 1;714715/**716* @payload_count: The number of currently active payloads in hardware. This value is only717* intended to be used internally by MST helpers for payload tracking, and is only safe to718* read/write from the atomic commit (not check) context.719*/720u8 payload_count;721722/**723* @next_start_slot: The starting timeslot to use for new VC payloads. This value is used724* internally by MST helpers for payload tracking, and is only safe to read/write from the725* atomic commit (not check) context.726*/727u8 next_start_slot;728729/**730* @mst_primary: Pointer to the primary/first branch device.731*/732struct drm_dp_mst_branch *mst_primary;733734/**735* @dpcd: Cache of DPCD for primary port.736*/737u8 dpcd[DP_RECEIVER_CAP_SIZE];738/**739* @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.740*/741u8 sink_count;742743/**744* @funcs: Atomic helper callbacks745*/746const struct drm_private_state_funcs *funcs;747748/**749* @qlock: protects @tx_msg_downq and &drm_dp_sideband_msg_tx.state750*/751struct mutex qlock;752753/**754* @tx_msg_downq: List of pending down requests755*/756struct list_head tx_msg_downq;757758/**759* @tx_waitq: Wait to queue stall for the tx worker.760*/761wait_queue_head_t tx_waitq;762/**763* @work: Probe work.764*/765struct work_struct work;766/**767* @tx_work: Sideband transmit worker. This can nest within the main768* @work worker for each transaction @work launches.769*/770struct work_struct tx_work;771772/**773* @destroy_port_list: List of to be destroyed connectors.774*/775struct list_head destroy_port_list;776/**777* @destroy_branch_device_list: List of to be destroyed branch778* devices.779*/780struct list_head destroy_branch_device_list;781/**782* @delayed_destroy_lock: Protects @destroy_port_list and783* @destroy_branch_device_list.784*/785struct mutex delayed_destroy_lock;786787/**788* @delayed_destroy_wq: Workqueue used for delayed_destroy_work items.789* A dedicated WQ makes it possible to drain any requeued work items790* on it.791*/792struct workqueue_struct *delayed_destroy_wq;793794/**795* @delayed_destroy_work: Work item to destroy MST port and branch796* devices, needed to avoid locking inversion.797*/798struct work_struct delayed_destroy_work;799800/**801* @up_req_list: List of pending up requests from the topology that802* need to be processed, in chronological order.803*/804struct list_head up_req_list;805/**806* @up_req_lock: Protects @up_req_list807*/808struct mutex up_req_lock;809/**810* @up_req_work: Work item to process up requests received from the811* topology. Needed to avoid blocking hotplug handling and sideband812* transmissions.813*/814struct work_struct up_req_work;815816#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)817/**818* @topology_ref_history_lock: protects819* &drm_dp_mst_port.topology_ref_history and820* &drm_dp_mst_branch.topology_ref_history.821*/822struct mutex topology_ref_history_lock;823#endif824};825826int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,827struct drm_device *dev, struct drm_dp_aux *aux,828int max_dpcd_transaction_bytes,829int max_payloads, int conn_base_id);830831void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);832833/**834* enum drm_dp_mst_mode - sink's MST mode capability835*/836enum drm_dp_mst_mode {837/**838* @DRM_DP_SST: The sink does not support MST nor single stream sideband839* messaging.840*/841DRM_DP_SST,842/**843* @DRM_DP_MST: Sink supports MST, more than one stream and single844* stream sideband messaging.845*/846DRM_DP_MST,847/**848* @DRM_DP_SST_SIDEBAND_MSG: Sink supports only one stream and single849* stream sideband messaging.850*/851DRM_DP_SST_SIDEBAND_MSG,852};853854enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);855int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);856857int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr,858const u8 *esi,859u8 *ack,860bool *handled);861void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr);862863int864drm_dp_mst_detect_port(struct drm_connector *connector,865struct drm_modeset_acquire_ctx *ctx,866struct drm_dp_mst_topology_mgr *mgr,867struct drm_dp_mst_port *port);868869const struct drm_edid *drm_dp_mst_edid_read(struct drm_connector *connector,870struct drm_dp_mst_topology_mgr *mgr,871struct drm_dp_mst_port *port);872struct edid *drm_dp_mst_get_edid(struct drm_connector *connector,873struct drm_dp_mst_topology_mgr *mgr,874struct drm_dp_mst_port *port);875876fixed20_12 drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count);877878int drm_dp_calc_pbn_mode(int clock, int bpp);879880void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap);881882int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,883struct drm_dp_mst_topology_state *mst_state,884struct drm_dp_mst_atomic_payload *payload);885int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,886struct drm_dp_mst_atomic_payload *payload);887void drm_dp_remove_payload_part1(struct drm_dp_mst_topology_mgr *mgr,888struct drm_dp_mst_topology_state *mst_state,889struct drm_dp_mst_atomic_payload *payload);890void drm_dp_remove_payload_part2(struct drm_dp_mst_topology_mgr *mgr,891struct drm_dp_mst_topology_state *mst_state,892const struct drm_dp_mst_atomic_payload *old_payload,893struct drm_dp_mst_atomic_payload *new_payload);894895int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);896897void drm_dp_mst_dump_topology(struct seq_file *m,898struct drm_dp_mst_topology_mgr *mgr);899900void drm_dp_mst_topology_queue_probe(struct drm_dp_mst_topology_mgr *mgr);901902void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);903int __must_check904drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,905bool sync);906907ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,908unsigned int offset, void *buffer, size_t size);909ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,910unsigned int offset, void *buffer, size_t size);911912int drm_dp_mst_connector_late_register(struct drm_connector *connector,913struct drm_dp_mst_port *port);914void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,915struct drm_dp_mst_port *port);916917struct drm_dp_mst_topology_state *918drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,919struct drm_dp_mst_topology_mgr *mgr);920struct drm_dp_mst_topology_state *921drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state,922struct drm_dp_mst_topology_mgr *mgr);923struct drm_dp_mst_topology_state *924drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,925struct drm_dp_mst_topology_mgr *mgr);926struct drm_dp_mst_atomic_payload *927drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,928struct drm_dp_mst_port *port);929bool drm_dp_mst_port_downstream_of_parent(struct drm_dp_mst_topology_mgr *mgr,930struct drm_dp_mst_port *port,931struct drm_dp_mst_port *parent);932int __must_check933drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,934struct drm_dp_mst_topology_mgr *mgr,935struct drm_dp_mst_port *port, int pbn);936int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,937struct drm_dp_mst_port *port,938int pbn, bool enable);939int __must_check940drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state,941struct drm_dp_mst_topology_mgr *mgr);942int __must_check943drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,944struct drm_dp_mst_topology_mgr *mgr,945struct drm_dp_mst_port *port);946void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state);947int __must_check drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state);948int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,949struct drm_dp_mst_port *port, bool power_up);950int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,951struct drm_dp_mst_port *port,952struct drm_dp_query_stream_enc_status_ack_reply *status);953int __must_check drm_dp_mst_atomic_check_mgr(struct drm_atomic_state *state,954struct drm_dp_mst_topology_mgr *mgr,955struct drm_dp_mst_topology_state *mst_state,956struct drm_dp_mst_port **failing_port);957int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);958int __must_check drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state,959struct drm_dp_mst_topology_mgr *mgr);960961void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);962void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);963964static inline965bool drm_dp_mst_port_is_logical(struct drm_dp_mst_port *port)966{967return port->port_num >= DP_MST_LOGICAL_PORT_0;968}969970struct drm_dp_aux *drm_dp_mst_aux_for_parent(struct drm_dp_mst_port *port);971struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port);972973static inline struct drm_dp_mst_topology_state *974to_drm_dp_mst_topology_state(struct drm_private_state *state)975{976return container_of(state, struct drm_dp_mst_topology_state, base);977}978979extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;980981/**982* __drm_dp_mst_state_iter_get - private atomic state iterator function for983* macro-internal use984* @state: &struct drm_atomic_state pointer985* @mgr: pointer to the &struct drm_dp_mst_topology_mgr iteration cursor986* @old_state: optional pointer to the old &struct drm_dp_mst_topology_state987* iteration cursor988* @new_state: optional pointer to the new &struct drm_dp_mst_topology_state989* iteration cursor990* @i: int iteration cursor, for macro-internal use991*992* Used by for_each_oldnew_mst_mgr_in_state(),993* for_each_old_mst_mgr_in_state(), and for_each_new_mst_mgr_in_state(). Don't994* call this directly.995*996* Returns:997* True if the current &struct drm_private_obj is a &struct998* drm_dp_mst_topology_mgr, false otherwise.999*/1000static inline bool1001__drm_dp_mst_state_iter_get(struct drm_atomic_state *state,1002struct drm_dp_mst_topology_mgr **mgr,1003struct drm_dp_mst_topology_state **old_state,1004struct drm_dp_mst_topology_state **new_state,1005int i)1006{1007struct __drm_private_objs_state *objs_state = &state->private_objs[i];10081009if (objs_state->ptr->funcs != &drm_dp_mst_topology_state_funcs)1010return false;10111012*mgr = to_dp_mst_topology_mgr(objs_state->ptr);1013if (old_state)1014*old_state = to_dp_mst_topology_state(objs_state->old_state);1015if (new_state)1016*new_state = to_dp_mst_topology_state(objs_state->new_state);10171018return true;1019}10201021/**1022* for_each_oldnew_mst_mgr_in_state - iterate over all DP MST topology1023* managers in an atomic update1024* @__state: &struct drm_atomic_state pointer1025* @mgr: &struct drm_dp_mst_topology_mgr iteration cursor1026* @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old1027* state1028* @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new1029* state1030* @__i: int iteration cursor, for macro-internal use1031*1032* This iterates over all DRM DP MST topology managers in an atomic update,1033* tracking both old and new state. This is useful in places where the state1034* delta needs to be considered, for example in atomic check functions.1035*/1036#define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \1037for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \1038for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i)))10391040/**1041* for_each_old_mst_mgr_in_state - iterate over all DP MST topology managers1042* in an atomic update1043* @__state: &struct drm_atomic_state pointer1044* @mgr: &struct drm_dp_mst_topology_mgr iteration cursor1045* @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old1046* state1047* @__i: int iteration cursor, for macro-internal use1048*1049* This iterates over all DRM DP MST topology managers in an atomic update,1050* tracking only the old state. This is useful in disable functions, where we1051* need the old state the hardware is still in.1052*/1053#define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \1054for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \1055for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i)))10561057/**1058* for_each_new_mst_mgr_in_state - iterate over all DP MST topology managers1059* in an atomic update1060* @__state: &struct drm_atomic_state pointer1061* @mgr: &struct drm_dp_mst_topology_mgr iteration cursor1062* @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new1063* state1064* @__i: int iteration cursor, for macro-internal use1065*1066* This iterates over all DRM DP MST topology managers in an atomic update,1067* tracking only the new state. This is useful in enable functions, where we1068* need the new state the hardware should be in when the atomic commit1069* operation has completed.1070*/1071#define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \1072for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \1073for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i)))10741075#endif107610771078