Path: blob/master/drivers/gpu/drm/display/drm_dp_mst_topology.c
26493 views
/*1* Copyright © 2014 Red Hat2*3* Permission to use, copy, modify, distribute, and sell this software and its4* documentation for any purpose is hereby granted without fee, provided that5* the above copyright notice appear in all copies and that both that copyright6* notice and this permission notice appear in supporting documentation, and7* that the name of the copyright holders not be used in advertising or8* publicity pertaining to distribution of the software without specific,9* written prior permission. The copyright holders make no representations10* about the suitability of this software for any purpose. It is provided "as11* is" without express or implied warranty.12*13* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,14* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO15* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR16* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,17* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER18* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE19* OF THIS SOFTWARE.20*/2122#include <linux/bitfield.h>23#include <linux/delay.h>24#include <linux/errno.h>25#include <linux/export.h>26#include <linux/i2c.h>27#include <linux/init.h>28#include <linux/kernel.h>29#include <linux/random.h>30#include <linux/sched.h>31#include <linux/seq_file.h>3233#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)34#include <linux/stacktrace.h>35#include <linux/sort.h>36#include <linux/timekeeping.h>37#include <linux/math64.h>38#endif3940#include <drm/display/drm_dp_mst_helper.h>41#include <drm/drm_atomic.h>42#include <drm/drm_atomic_helper.h>43#include <drm/drm_drv.h>44#include <drm/drm_edid.h>45#include <drm/drm_fixed.h>46#include <drm/drm_print.h>47#include <drm/drm_probe_helper.h>4849#include "drm_dp_helper_internal.h"50#include "drm_dp_mst_topology_internal.h"5152/**53* DOC: dp mst helper54*55* These functions contain parts of the DisplayPort 1.2a MultiStream Transport56* protocol. The helpers contain a topology manager and bandwidth manager.57* The helpers encapsulate the sending and received of sideband msgs.58*/59struct drm_dp_pending_up_req {60struct drm_dp_sideband_msg_hdr hdr;61struct drm_dp_sideband_msg_req_body msg;62struct list_head next;63};6465static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,66char *buf);6768static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);6970static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,71struct drm_dp_mst_port *port,72int offset, int size, u8 *bytes);73static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,74struct drm_dp_mst_port *port,75int offset, int size, u8 *bytes);7677static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,78struct drm_dp_mst_branch *mstb);7980static void81drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,82struct drm_dp_mst_branch *mstb);8384static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,85struct drm_dp_mst_branch *mstb,86struct drm_dp_mst_port *port);87static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,88guid_t *guid);8990static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port);91static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port);92static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);9394static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,95struct drm_dp_mst_branch *branch);9697#define DBG_PREFIX "[dp_mst]"9899#define DP_STR(x) [DP_ ## x] = #x100101static const char *drm_dp_mst_req_type_str(u8 req_type)102{103static const char * const req_type_str[] = {104DP_STR(GET_MSG_TRANSACTION_VERSION),105DP_STR(LINK_ADDRESS),106DP_STR(CONNECTION_STATUS_NOTIFY),107DP_STR(ENUM_PATH_RESOURCES),108DP_STR(ALLOCATE_PAYLOAD),109DP_STR(QUERY_PAYLOAD),110DP_STR(RESOURCE_STATUS_NOTIFY),111DP_STR(CLEAR_PAYLOAD_ID_TABLE),112DP_STR(REMOTE_DPCD_READ),113DP_STR(REMOTE_DPCD_WRITE),114DP_STR(REMOTE_I2C_READ),115DP_STR(REMOTE_I2C_WRITE),116DP_STR(POWER_UP_PHY),117DP_STR(POWER_DOWN_PHY),118DP_STR(SINK_EVENT_NOTIFY),119DP_STR(QUERY_STREAM_ENC_STATUS),120};121122if (req_type >= ARRAY_SIZE(req_type_str) ||123!req_type_str[req_type])124return "unknown";125126return req_type_str[req_type];127}128129#undef DP_STR130#define DP_STR(x) [DP_NAK_ ## x] = #x131132static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)133{134static const char * const nak_reason_str[] = {135DP_STR(WRITE_FAILURE),136DP_STR(INVALID_READ),137DP_STR(CRC_FAILURE),138DP_STR(BAD_PARAM),139DP_STR(DEFER),140DP_STR(LINK_FAILURE),141DP_STR(NO_RESOURCES),142DP_STR(DPCD_FAIL),143DP_STR(I2C_NAK),144DP_STR(ALLOCATE_FAIL),145};146147if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||148!nak_reason_str[nak_reason])149return "unknown";150151return nak_reason_str[nak_reason];152}153154#undef DP_STR155#define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x156157static const char *drm_dp_mst_sideband_tx_state_str(int state)158{159static const char * const sideband_reason_str[] = {160DP_STR(QUEUED),161DP_STR(START_SEND),162DP_STR(SENT),163DP_STR(RX),164DP_STR(TIMEOUT),165};166167if (state >= ARRAY_SIZE(sideband_reason_str) ||168!sideband_reason_str[state])169return "unknown";170171return sideband_reason_str[state];172}173174static inline u8175drm_dp_mst_get_ufp_num_at_lct_from_rad(u8 lct, const u8 *rad)176{177int idx = (lct / 2) - 1;178int shift = (lct % 2) ? 0 : 4;179u8 ufp_num;180181/* mst_primary, it's rad is unset*/182if (lct == 1)183return 0;184185ufp_num = (rad[idx] >> shift) & 0xf;186187return ufp_num;188}189190static int191drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)192{193int i;194u8 unpacked_rad[16] = {};195196for (i = 0; i < lct; i++)197unpacked_rad[i] = drm_dp_mst_get_ufp_num_at_lct_from_rad(i + 1, rad);198199/* TODO: Eventually add something to printk so we can format the rad200* like this: 1.2.3201*/202return snprintf(out, len, "%*phC", lct, unpacked_rad);203}204205/* sideband msg handling */206static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)207{208u8 bitmask = 0x80;209u8 bitshift = 7;210u8 array_index = 0;211int number_of_bits = num_nibbles * 4;212u8 remainder = 0;213214while (number_of_bits != 0) {215number_of_bits--;216remainder <<= 1;217remainder |= (data[array_index] & bitmask) >> bitshift;218bitmask >>= 1;219bitshift--;220if (bitmask == 0) {221bitmask = 0x80;222bitshift = 7;223array_index++;224}225if ((remainder & 0x10) == 0x10)226remainder ^= 0x13;227}228229number_of_bits = 4;230while (number_of_bits != 0) {231number_of_bits--;232remainder <<= 1;233if ((remainder & 0x10) != 0)234remainder ^= 0x13;235}236237return remainder;238}239240static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)241{242u8 bitmask = 0x80;243u8 bitshift = 7;244u8 array_index = 0;245int number_of_bits = number_of_bytes * 8;246u16 remainder = 0;247248while (number_of_bits != 0) {249number_of_bits--;250remainder <<= 1;251remainder |= (data[array_index] & bitmask) >> bitshift;252bitmask >>= 1;253bitshift--;254if (bitmask == 0) {255bitmask = 0x80;256bitshift = 7;257array_index++;258}259if ((remainder & 0x100) == 0x100)260remainder ^= 0xd5;261}262263number_of_bits = 8;264while (number_of_bits != 0) {265number_of_bits--;266remainder <<= 1;267if ((remainder & 0x100) != 0)268remainder ^= 0xd5;269}270271return remainder & 0xff;272}273static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)274{275u8 size = 3;276277size += (hdr->lct / 2);278return size;279}280281static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,282u8 *buf, int *len)283{284int idx = 0;285int i;286u8 crc4;287288buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);289for (i = 0; i < (hdr->lct / 2); i++)290buf[idx++] = hdr->rad[i];291buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |292(hdr->msg_len & 0x3f);293buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);294295crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);296buf[idx - 1] |= (crc4 & 0xf);297298*len = idx;299}300301static bool drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr *mgr,302struct drm_dp_sideband_msg_hdr *hdr,303u8 *buf, int buflen, u8 *hdrlen)304{305u8 crc4;306u8 len;307int i;308u8 idx;309310if (buf[0] == 0)311return false;312len = 3;313len += ((buf[0] & 0xf0) >> 4) / 2;314if (len > buflen)315return false;316crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);317318if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {319drm_dbg_kms(mgr->dev, "crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);320return false;321}322323hdr->lct = (buf[0] & 0xf0) >> 4;324hdr->lcr = (buf[0] & 0xf);325idx = 1;326for (i = 0; i < (hdr->lct / 2); i++)327hdr->rad[i] = buf[idx++];328hdr->broadcast = (buf[idx] >> 7) & 0x1;329hdr->path_msg = (buf[idx] >> 6) & 0x1;330hdr->msg_len = buf[idx] & 0x3f;331if (hdr->msg_len < 1) /* min space for body CRC */332return false;333334idx++;335hdr->somt = (buf[idx] >> 7) & 0x1;336hdr->eomt = (buf[idx] >> 6) & 0x1;337hdr->seqno = (buf[idx] >> 4) & 0x1;338idx++;339*hdrlen = idx;340return true;341}342343void344drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,345struct drm_dp_sideband_msg_tx *raw)346{347int idx = 0;348int i;349u8 *buf = raw->msg;350351buf[idx++] = req->req_type & 0x7f;352353switch (req->req_type) {354case DP_ENUM_PATH_RESOURCES:355case DP_POWER_DOWN_PHY:356case DP_POWER_UP_PHY:357buf[idx] = (req->u.port_num.port_number & 0xf) << 4;358idx++;359break;360case DP_ALLOCATE_PAYLOAD:361buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |362(req->u.allocate_payload.number_sdp_streams & 0xf);363idx++;364buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);365idx++;366buf[idx] = (req->u.allocate_payload.pbn >> 8);367idx++;368buf[idx] = (req->u.allocate_payload.pbn & 0xff);369idx++;370for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {371buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |372(req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);373idx++;374}375if (req->u.allocate_payload.number_sdp_streams & 1) {376i = req->u.allocate_payload.number_sdp_streams - 1;377buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;378idx++;379}380break;381case DP_QUERY_PAYLOAD:382buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;383idx++;384buf[idx] = (req->u.query_payload.vcpi & 0x7f);385idx++;386break;387case DP_REMOTE_DPCD_READ:388buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;389buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;390idx++;391buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;392idx++;393buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);394idx++;395buf[idx] = (req->u.dpcd_read.num_bytes);396idx++;397break;398399case DP_REMOTE_DPCD_WRITE:400buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;401buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;402idx++;403buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;404idx++;405buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);406idx++;407buf[idx] = (req->u.dpcd_write.num_bytes);408idx++;409memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);410idx += req->u.dpcd_write.num_bytes;411break;412case DP_REMOTE_I2C_READ:413buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;414buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);415idx++;416for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {417buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;418idx++;419buf[idx] = req->u.i2c_read.transactions[i].num_bytes;420idx++;421memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);422idx += req->u.i2c_read.transactions[i].num_bytes;423424buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;425buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);426idx++;427}428buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;429idx++;430buf[idx] = (req->u.i2c_read.num_bytes_read);431idx++;432break;433434case DP_REMOTE_I2C_WRITE:435buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;436idx++;437buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;438idx++;439buf[idx] = (req->u.i2c_write.num_bytes);440idx++;441memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);442idx += req->u.i2c_write.num_bytes;443break;444case DP_QUERY_STREAM_ENC_STATUS: {445const struct drm_dp_query_stream_enc_status *msg;446447msg = &req->u.enc_status;448buf[idx] = msg->stream_id;449idx++;450memcpy(&buf[idx], msg->client_id, sizeof(msg->client_id));451idx += sizeof(msg->client_id);452buf[idx] = 0;453buf[idx] |= FIELD_PREP(GENMASK(1, 0), msg->stream_event);454buf[idx] |= msg->valid_stream_event ? BIT(2) : 0;455buf[idx] |= FIELD_PREP(GENMASK(4, 3), msg->stream_behavior);456buf[idx] |= msg->valid_stream_behavior ? BIT(5) : 0;457idx++;458}459break;460}461raw->cur_len = idx;462}463EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);464465/* Decode a sideband request we've encoded, mainly used for debugging */466int467drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,468struct drm_dp_sideband_msg_req_body *req)469{470const u8 *buf = raw->msg;471int i, idx = 0;472473req->req_type = buf[idx++] & 0x7f;474switch (req->req_type) {475case DP_ENUM_PATH_RESOURCES:476case DP_POWER_DOWN_PHY:477case DP_POWER_UP_PHY:478req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;479break;480case DP_ALLOCATE_PAYLOAD:481{482struct drm_dp_allocate_payload *a =483&req->u.allocate_payload;484485a->number_sdp_streams = buf[idx] & 0xf;486a->port_number = (buf[idx] >> 4) & 0xf;487488WARN_ON(buf[++idx] & 0x80);489a->vcpi = buf[idx] & 0x7f;490491a->pbn = buf[++idx] << 8;492a->pbn |= buf[++idx];493494idx++;495for (i = 0; i < a->number_sdp_streams; i++) {496a->sdp_stream_sink[i] =497(buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;498}499}500break;501case DP_QUERY_PAYLOAD:502req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;503WARN_ON(buf[++idx] & 0x80);504req->u.query_payload.vcpi = buf[idx] & 0x7f;505break;506case DP_REMOTE_DPCD_READ:507{508struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;509510r->port_number = (buf[idx] >> 4) & 0xf;511512r->dpcd_address = (buf[idx] << 16) & 0xf0000;513r->dpcd_address |= (buf[++idx] << 8) & 0xff00;514r->dpcd_address |= buf[++idx] & 0xff;515516r->num_bytes = buf[++idx];517}518break;519case DP_REMOTE_DPCD_WRITE:520{521struct drm_dp_remote_dpcd_write *w =522&req->u.dpcd_write;523524w->port_number = (buf[idx] >> 4) & 0xf;525526w->dpcd_address = (buf[idx] << 16) & 0xf0000;527w->dpcd_address |= (buf[++idx] << 8) & 0xff00;528w->dpcd_address |= buf[++idx] & 0xff;529530w->num_bytes = buf[++idx];531532w->bytes = kmemdup(&buf[++idx], w->num_bytes,533GFP_KERNEL);534if (!w->bytes)535return -ENOMEM;536}537break;538case DP_REMOTE_I2C_READ:539{540struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;541struct drm_dp_remote_i2c_read_tx *tx;542bool failed = false;543544r->num_transactions = buf[idx] & 0x3;545r->port_number = (buf[idx] >> 4) & 0xf;546for (i = 0; i < r->num_transactions; i++) {547tx = &r->transactions[i];548549tx->i2c_dev_id = buf[++idx] & 0x7f;550tx->num_bytes = buf[++idx];551tx->bytes = kmemdup(&buf[++idx],552tx->num_bytes,553GFP_KERNEL);554if (!tx->bytes) {555failed = true;556break;557}558idx += tx->num_bytes;559tx->no_stop_bit = (buf[idx] >> 5) & 0x1;560tx->i2c_transaction_delay = buf[idx] & 0xf;561}562563if (failed) {564for (i = 0; i < r->num_transactions; i++) {565tx = &r->transactions[i];566kfree(tx->bytes);567}568return -ENOMEM;569}570571r->read_i2c_device_id = buf[++idx] & 0x7f;572r->num_bytes_read = buf[++idx];573}574break;575case DP_REMOTE_I2C_WRITE:576{577struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;578579w->port_number = (buf[idx] >> 4) & 0xf;580w->write_i2c_device_id = buf[++idx] & 0x7f;581w->num_bytes = buf[++idx];582w->bytes = kmemdup(&buf[++idx], w->num_bytes,583GFP_KERNEL);584if (!w->bytes)585return -ENOMEM;586}587break;588case DP_QUERY_STREAM_ENC_STATUS:589req->u.enc_status.stream_id = buf[idx++];590for (i = 0; i < sizeof(req->u.enc_status.client_id); i++)591req->u.enc_status.client_id[i] = buf[idx++];592593req->u.enc_status.stream_event = FIELD_GET(GENMASK(1, 0),594buf[idx]);595req->u.enc_status.valid_stream_event = FIELD_GET(BIT(2),596buf[idx]);597req->u.enc_status.stream_behavior = FIELD_GET(GENMASK(4, 3),598buf[idx]);599req->u.enc_status.valid_stream_behavior = FIELD_GET(BIT(5),600buf[idx]);601break;602}603604return 0;605}606EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);607608void609drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,610int indent, struct drm_printer *printer)611{612int i;613614#define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)615if (req->req_type == DP_LINK_ADDRESS) {616/* No contents to print */617P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));618return;619}620621P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));622indent++;623624switch (req->req_type) {625case DP_ENUM_PATH_RESOURCES:626case DP_POWER_DOWN_PHY:627case DP_POWER_UP_PHY:628P("port=%d\n", req->u.port_num.port_number);629break;630case DP_ALLOCATE_PAYLOAD:631P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",632req->u.allocate_payload.port_number,633req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,634req->u.allocate_payload.number_sdp_streams,635req->u.allocate_payload.number_sdp_streams,636req->u.allocate_payload.sdp_stream_sink);637break;638case DP_QUERY_PAYLOAD:639P("port=%d vcpi=%d\n",640req->u.query_payload.port_number,641req->u.query_payload.vcpi);642break;643case DP_REMOTE_DPCD_READ:644P("port=%d dpcd_addr=%05x len=%d\n",645req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,646req->u.dpcd_read.num_bytes);647break;648case DP_REMOTE_DPCD_WRITE:649P("port=%d addr=%05x len=%d: %*ph\n",650req->u.dpcd_write.port_number,651req->u.dpcd_write.dpcd_address,652req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,653req->u.dpcd_write.bytes);654break;655case DP_REMOTE_I2C_READ:656P("port=%d num_tx=%d id=%d size=%d:\n",657req->u.i2c_read.port_number,658req->u.i2c_read.num_transactions,659req->u.i2c_read.read_i2c_device_id,660req->u.i2c_read.num_bytes_read);661662indent++;663for (i = 0; i < req->u.i2c_read.num_transactions; i++) {664const struct drm_dp_remote_i2c_read_tx *rtx =665&req->u.i2c_read.transactions[i];666667P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",668i, rtx->i2c_dev_id, rtx->num_bytes,669rtx->no_stop_bit, rtx->i2c_transaction_delay,670rtx->num_bytes, rtx->bytes);671}672break;673case DP_REMOTE_I2C_WRITE:674P("port=%d id=%d size=%d: %*ph\n",675req->u.i2c_write.port_number,676req->u.i2c_write.write_i2c_device_id,677req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,678req->u.i2c_write.bytes);679break;680case DP_QUERY_STREAM_ENC_STATUS:681P("stream_id=%u client_id=%*ph stream_event=%x "682"valid_event=%d stream_behavior=%x valid_behavior=%d",683req->u.enc_status.stream_id,684(int)ARRAY_SIZE(req->u.enc_status.client_id),685req->u.enc_status.client_id, req->u.enc_status.stream_event,686req->u.enc_status.valid_stream_event,687req->u.enc_status.stream_behavior,688req->u.enc_status.valid_stream_behavior);689break;690default:691P("???\n");692break;693}694#undef P695}696EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);697698static inline void699drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,700const struct drm_dp_sideband_msg_tx *txmsg)701{702struct drm_dp_sideband_msg_req_body req;703char buf[64];704int ret;705int i;706707drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,708sizeof(buf));709drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",710txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,711drm_dp_mst_sideband_tx_state_str(txmsg->state),712txmsg->path_msg, buf);713714ret = drm_dp_decode_sideband_req(txmsg, &req);715if (ret) {716drm_printf(p, "<failed to decode sideband req: %d>\n", ret);717return;718}719drm_dp_dump_sideband_msg_req_body(&req, 1, p);720721switch (req.req_type) {722case DP_REMOTE_DPCD_WRITE:723kfree(req.u.dpcd_write.bytes);724break;725case DP_REMOTE_I2C_READ:726for (i = 0; i < req.u.i2c_read.num_transactions; i++)727kfree(req.u.i2c_read.transactions[i].bytes);728break;729case DP_REMOTE_I2C_WRITE:730kfree(req.u.i2c_write.bytes);731break;732}733}734735static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)736{737u8 crc4;738739crc4 = drm_dp_msg_data_crc4(msg, len);740msg[len] = crc4;741}742743static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,744struct drm_dp_sideband_msg_tx *raw)745{746int idx = 0;747u8 *buf = raw->msg;748749buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);750751raw->cur_len = idx;752}753754static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg,755struct drm_dp_sideband_msg_hdr *hdr,756u8 hdrlen)757{758/*759* ignore out-of-order messages or messages that are part of a760* failed transaction761*/762if (!hdr->somt && !msg->have_somt)763return false;764765/* get length contained in this portion */766msg->curchunk_idx = 0;767msg->curchunk_len = hdr->msg_len;768msg->curchunk_hdrlen = hdrlen;769770/* we have already gotten an somt - don't bother parsing */771if (hdr->somt && msg->have_somt)772return false;773774if (hdr->somt) {775memcpy(&msg->initial_hdr, hdr,776sizeof(struct drm_dp_sideband_msg_hdr));777msg->have_somt = true;778}779if (hdr->eomt)780msg->have_eomt = true;781782return true;783}784785/* this adds a chunk of msg to the builder to get the final msg */786static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg,787u8 *replybuf, u8 replybuflen)788{789u8 crc4;790791memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);792msg->curchunk_idx += replybuflen;793794if (msg->curchunk_idx >= msg->curchunk_len) {795/* do CRC */796crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);797if (crc4 != msg->chunk[msg->curchunk_len - 1])798print_hex_dump(KERN_DEBUG, "wrong crc",799DUMP_PREFIX_NONE, 16, 1,800msg->chunk, msg->curchunk_len, false);801/* copy chunk into bigger msg */802memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);803msg->curlen += msg->curchunk_len - 1;804}805return true;806}807808static bool drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_mgr *mgr,809struct drm_dp_sideband_msg_rx *raw,810struct drm_dp_sideband_msg_reply_body *repmsg)811{812int idx = 1;813int i;814815import_guid(&repmsg->u.link_addr.guid, &raw->msg[idx]);816idx += 16;817repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;818idx++;819if (idx > raw->curlen)820goto fail_len;821for (i = 0; i < repmsg->u.link_addr.nports; i++) {822if (raw->msg[idx] & 0x80)823repmsg->u.link_addr.ports[i].input_port = 1;824825repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;826repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);827828idx++;829if (idx > raw->curlen)830goto fail_len;831repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;832repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;833if (repmsg->u.link_addr.ports[i].input_port == 0)834repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;835idx++;836if (idx > raw->curlen)837goto fail_len;838if (repmsg->u.link_addr.ports[i].input_port == 0) {839repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);840idx++;841if (idx > raw->curlen)842goto fail_len;843import_guid(&repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx]);844idx += 16;845if (idx > raw->curlen)846goto fail_len;847repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;848repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);849idx++;850851}852if (idx > raw->curlen)853goto fail_len;854}855856return true;857fail_len:858DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);859return false;860}861862static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,863struct drm_dp_sideband_msg_reply_body *repmsg)864{865int idx = 1;866867repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;868idx++;869if (idx > raw->curlen)870goto fail_len;871repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];872idx++;873if (idx > raw->curlen)874goto fail_len;875876memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);877return true;878fail_len:879DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);880return false;881}882883static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,884struct drm_dp_sideband_msg_reply_body *repmsg)885{886int idx = 1;887888repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;889idx++;890if (idx > raw->curlen)891goto fail_len;892return true;893fail_len:894DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);895return false;896}897898static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,899struct drm_dp_sideband_msg_reply_body *repmsg)900{901int idx = 1;902903repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);904idx++;905if (idx > raw->curlen)906goto fail_len;907repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];908idx++;909/* TODO check */910memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);911return true;912fail_len:913DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);914return false;915}916917static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,918struct drm_dp_sideband_msg_reply_body *repmsg)919{920int idx = 1;921922repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;923repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;924idx++;925if (idx > raw->curlen)926goto fail_len;927repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);928idx += 2;929if (idx > raw->curlen)930goto fail_len;931repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);932idx += 2;933if (idx > raw->curlen)934goto fail_len;935return true;936fail_len:937DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);938return false;939}940941static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,942struct drm_dp_sideband_msg_reply_body *repmsg)943{944int idx = 1;945946repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;947idx++;948if (idx > raw->curlen)949goto fail_len;950repmsg->u.allocate_payload.vcpi = raw->msg[idx];951idx++;952if (idx > raw->curlen)953goto fail_len;954repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);955idx += 2;956if (idx > raw->curlen)957goto fail_len;958return true;959fail_len:960DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);961return false;962}963964static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,965struct drm_dp_sideband_msg_reply_body *repmsg)966{967int idx = 1;968969repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;970idx++;971if (idx > raw->curlen)972goto fail_len;973repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);974idx += 2;975if (idx > raw->curlen)976goto fail_len;977return true;978fail_len:979DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);980return false;981}982983static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,984struct drm_dp_sideband_msg_reply_body *repmsg)985{986int idx = 1;987988repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;989idx++;990if (idx > raw->curlen) {991DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",992idx, raw->curlen);993return false;994}995return true;996}997998static bool999drm_dp_sideband_parse_query_stream_enc_status(1000struct drm_dp_sideband_msg_rx *raw,1001struct drm_dp_sideband_msg_reply_body *repmsg)1002{1003struct drm_dp_query_stream_enc_status_ack_reply *reply;10041005reply = &repmsg->u.enc_status;10061007reply->stream_id = raw->msg[3];10081009reply->reply_signed = raw->msg[2] & BIT(0);10101011/*1012* NOTE: It's my impression from reading the spec that the below parsing1013* is correct. However I noticed while testing with an HDCP 1.4 display1014* through an HDCP 2.2 hub that only bit 3 was set. In that case, I1015* would expect both bits to be set. So keep the parsing following the1016* spec, but beware reality might not match the spec (at least for some1017* configurations).1018*/1019reply->hdcp_1x_device_present = raw->msg[2] & BIT(4);1020reply->hdcp_2x_device_present = raw->msg[2] & BIT(3);10211022reply->query_capable_device_present = raw->msg[2] & BIT(5);1023reply->legacy_device_present = raw->msg[2] & BIT(6);1024reply->unauthorizable_device_present = raw->msg[2] & BIT(7);10251026reply->auth_completed = !!(raw->msg[1] & BIT(3));1027reply->encryption_enabled = !!(raw->msg[1] & BIT(4));1028reply->repeater_present = !!(raw->msg[1] & BIT(5));1029reply->state = (raw->msg[1] & GENMASK(7, 6)) >> 6;10301031return true;1032}10331034static bool drm_dp_sideband_parse_reply(const struct drm_dp_mst_topology_mgr *mgr,1035struct drm_dp_sideband_msg_rx *raw,1036struct drm_dp_sideband_msg_reply_body *msg)1037{1038memset(msg, 0, sizeof(*msg));1039msg->reply_type = (raw->msg[0] & 0x80) >> 7;1040msg->req_type = (raw->msg[0] & 0x7f);10411042if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {1043import_guid(&msg->u.nak.guid, &raw->msg[1]);1044msg->u.nak.reason = raw->msg[17];1045msg->u.nak.nak_data = raw->msg[18];1046return false;1047}10481049switch (msg->req_type) {1050case DP_LINK_ADDRESS:1051return drm_dp_sideband_parse_link_address(mgr, raw, msg);1052case DP_QUERY_PAYLOAD:1053return drm_dp_sideband_parse_query_payload_ack(raw, msg);1054case DP_REMOTE_DPCD_READ:1055return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);1056case DP_REMOTE_DPCD_WRITE:1057return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);1058case DP_REMOTE_I2C_READ:1059return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);1060case DP_REMOTE_I2C_WRITE:1061return true; /* since there's nothing to parse */1062case DP_ENUM_PATH_RESOURCES:1063return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);1064case DP_ALLOCATE_PAYLOAD:1065return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);1066case DP_POWER_DOWN_PHY:1067case DP_POWER_UP_PHY:1068return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);1069case DP_CLEAR_PAYLOAD_ID_TABLE:1070return true; /* since there's nothing to parse */1071case DP_QUERY_STREAM_ENC_STATUS:1072return drm_dp_sideband_parse_query_stream_enc_status(raw, msg);1073default:1074drm_err(mgr->dev, "Got unknown reply 0x%02x (%s)\n",1075msg->req_type, drm_dp_mst_req_type_str(msg->req_type));1076return false;1077}1078}10791080static bool1081drm_dp_sideband_parse_connection_status_notify(const struct drm_dp_mst_topology_mgr *mgr,1082struct drm_dp_sideband_msg_rx *raw,1083struct drm_dp_sideband_msg_req_body *msg)1084{1085int idx = 1;10861087msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;1088idx++;1089if (idx > raw->curlen)1090goto fail_len;10911092import_guid(&msg->u.conn_stat.guid, &raw->msg[idx]);1093idx += 16;1094if (idx > raw->curlen)1095goto fail_len;10961097msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;1098msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;1099msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;1100msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;1101msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);1102idx++;1103return true;1104fail_len:1105drm_dbg_kms(mgr->dev, "connection status reply parse length fail %d %d\n",1106idx, raw->curlen);1107return false;1108}11091110static bool drm_dp_sideband_parse_resource_status_notify(const struct drm_dp_mst_topology_mgr *mgr,1111struct drm_dp_sideband_msg_rx *raw,1112struct drm_dp_sideband_msg_req_body *msg)1113{1114int idx = 1;11151116msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;1117idx++;1118if (idx > raw->curlen)1119goto fail_len;11201121import_guid(&msg->u.resource_stat.guid, &raw->msg[idx]);1122idx += 16;1123if (idx > raw->curlen)1124goto fail_len;11251126msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);1127idx++;1128return true;1129fail_len:1130drm_dbg_kms(mgr->dev, "resource status reply parse length fail %d %d\n", idx, raw->curlen);1131return false;1132}11331134static bool drm_dp_sideband_parse_req(const struct drm_dp_mst_topology_mgr *mgr,1135struct drm_dp_sideband_msg_rx *raw,1136struct drm_dp_sideband_msg_req_body *msg)1137{1138memset(msg, 0, sizeof(*msg));1139msg->req_type = (raw->msg[0] & 0x7f);11401141switch (msg->req_type) {1142case DP_CONNECTION_STATUS_NOTIFY:1143return drm_dp_sideband_parse_connection_status_notify(mgr, raw, msg);1144case DP_RESOURCE_STATUS_NOTIFY:1145return drm_dp_sideband_parse_resource_status_notify(mgr, raw, msg);1146default:1147drm_err(mgr->dev, "Got unknown request 0x%02x (%s)\n",1148msg->req_type, drm_dp_mst_req_type_str(msg->req_type));1149return false;1150}1151}11521153static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg,1154u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)1155{1156struct drm_dp_sideband_msg_req_body req;11571158req.req_type = DP_REMOTE_DPCD_WRITE;1159req.u.dpcd_write.port_number = port_num;1160req.u.dpcd_write.dpcd_address = offset;1161req.u.dpcd_write.num_bytes = num_bytes;1162req.u.dpcd_write.bytes = bytes;1163drm_dp_encode_sideband_req(&req, msg);1164}11651166static void build_link_address(struct drm_dp_sideband_msg_tx *msg)1167{1168struct drm_dp_sideband_msg_req_body req;11691170req.req_type = DP_LINK_ADDRESS;1171drm_dp_encode_sideband_req(&req, msg);1172}11731174static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)1175{1176struct drm_dp_sideband_msg_req_body req;11771178req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;1179drm_dp_encode_sideband_req(&req, msg);1180msg->path_msg = true;1181}11821183static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,1184int port_num)1185{1186struct drm_dp_sideband_msg_req_body req;11871188req.req_type = DP_ENUM_PATH_RESOURCES;1189req.u.port_num.port_number = port_num;1190drm_dp_encode_sideband_req(&req, msg);1191msg->path_msg = true;1192return 0;1193}11941195static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg,1196int port_num,1197u8 vcpi, uint16_t pbn,1198u8 number_sdp_streams,1199u8 *sdp_stream_sink)1200{1201struct drm_dp_sideband_msg_req_body req;12021203memset(&req, 0, sizeof(req));1204req.req_type = DP_ALLOCATE_PAYLOAD;1205req.u.allocate_payload.port_number = port_num;1206req.u.allocate_payload.vcpi = vcpi;1207req.u.allocate_payload.pbn = pbn;1208req.u.allocate_payload.number_sdp_streams = number_sdp_streams;1209memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,1210number_sdp_streams);1211drm_dp_encode_sideband_req(&req, msg);1212msg->path_msg = true;1213}12141215static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,1216int port_num, bool power_up)1217{1218struct drm_dp_sideband_msg_req_body req;12191220if (power_up)1221req.req_type = DP_POWER_UP_PHY;1222else1223req.req_type = DP_POWER_DOWN_PHY;12241225req.u.port_num.port_number = port_num;1226drm_dp_encode_sideband_req(&req, msg);1227msg->path_msg = true;1228}12291230static int1231build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id,1232u8 *q_id)1233{1234struct drm_dp_sideband_msg_req_body req;12351236req.req_type = DP_QUERY_STREAM_ENC_STATUS;1237req.u.enc_status.stream_id = stream_id;1238memcpy(req.u.enc_status.client_id, q_id,1239sizeof(req.u.enc_status.client_id));1240req.u.enc_status.stream_event = 0;1241req.u.enc_status.valid_stream_event = false;1242req.u.enc_status.stream_behavior = 0;1243req.u.enc_status.valid_stream_behavior = false;12441245drm_dp_encode_sideband_req(&req, msg);1246return 0;1247}12481249static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,1250struct drm_dp_sideband_msg_tx *txmsg)1251{1252unsigned int state;12531254/*1255* All updates to txmsg->state are protected by mgr->qlock, and the two1256* cases we check here are terminal states. For those the barriers1257* provided by the wake_up/wait_event pair are enough.1258*/1259state = READ_ONCE(txmsg->state);1260return (state == DRM_DP_SIDEBAND_TX_RX ||1261state == DRM_DP_SIDEBAND_TX_TIMEOUT);1262}12631264static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,1265struct drm_dp_sideband_msg_tx *txmsg)1266{1267struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;1268unsigned long wait_timeout = msecs_to_jiffies(4000);1269unsigned long wait_expires = jiffies + wait_timeout;1270int ret;12711272for (;;) {1273/*1274* If the driver provides a way for this, change to1275* poll-waiting for the MST reply interrupt if we didn't receive1276* it for 50 msec. This would cater for cases where the HPD1277* pulse signal got lost somewhere, even though the sink raised1278* the corresponding MST interrupt correctly. One example is the1279* Club 3D CAC-1557 TypeC -> DP adapter which for some reason1280* filters out short pulses with a duration less than ~540 usec.1281*1282* The poll period is 50 msec to avoid missing an interrupt1283* after the sink has cleared it (after a 110msec timeout1284* since it raised the interrupt).1285*/1286ret = wait_event_timeout(mgr->tx_waitq,1287check_txmsg_state(mgr, txmsg),1288mgr->cbs->poll_hpd_irq ?1289msecs_to_jiffies(50) :1290wait_timeout);12911292if (ret || !mgr->cbs->poll_hpd_irq ||1293time_after(jiffies, wait_expires))1294break;12951296mgr->cbs->poll_hpd_irq(mgr);1297}12981299mutex_lock(&mgr->qlock);1300if (ret > 0) {1301if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {1302ret = -EIO;1303goto out;1304}1305} else {1306drm_dbg_kms(mgr->dev, "timedout msg send %p %d %d\n",1307txmsg, txmsg->state, txmsg->seqno);13081309/* dump some state */1310ret = -EIO;13111312/* remove from q */1313if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||1314txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||1315txmsg->state == DRM_DP_SIDEBAND_TX_SENT)1316list_del(&txmsg->next);1317}1318out:1319if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {1320struct drm_printer p = drm_dbg_printer(mgr->dev, DRM_UT_DP,1321DBG_PREFIX);13221323drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);1324}1325mutex_unlock(&mgr->qlock);13261327drm_dp_mst_kick_tx(mgr);1328return ret;1329}13301331static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)1332{1333struct drm_dp_mst_branch *mstb;13341335mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);1336if (!mstb)1337return NULL;13381339mstb->lct = lct;1340if (lct > 1)1341memcpy(mstb->rad, rad, lct / 2);1342INIT_LIST_HEAD(&mstb->ports);1343kref_init(&mstb->topology_kref);1344kref_init(&mstb->malloc_kref);1345return mstb;1346}13471348static void drm_dp_free_mst_branch_device(struct kref *kref)1349{1350struct drm_dp_mst_branch *mstb =1351container_of(kref, struct drm_dp_mst_branch, malloc_kref);13521353if (mstb->port_parent)1354drm_dp_mst_put_port_malloc(mstb->port_parent);13551356kfree(mstb);1357}13581359/**1360* DOC: Branch device and port refcounting1361*1362* Topology refcount overview1363* ~~~~~~~~~~~~~~~~~~~~~~~~~~1364*1365* The refcounting schemes for &struct drm_dp_mst_branch and &struct1366* drm_dp_mst_port are somewhat unusual. Both ports and branch devices have1367* two different kinds of refcounts: topology refcounts, and malloc refcounts.1368*1369* Topology refcounts are not exposed to drivers, and are handled internally1370* by the DP MST helpers. The helpers use them in order to prevent the1371* in-memory topology state from being changed in the middle of critical1372* operations like changing the internal state of payload allocations. This1373* means each branch and port will be considered to be connected to the rest1374* of the topology until its topology refcount reaches zero. Additionally,1375* for ports this means that their associated &struct drm_connector will stay1376* registered with userspace until the port's refcount reaches 0.1377*1378* Malloc refcount overview1379* ~~~~~~~~~~~~~~~~~~~~~~~~1380*1381* Malloc references are used to keep a &struct drm_dp_mst_port or &struct1382* drm_dp_mst_branch allocated even after all of its topology references have1383* been dropped, so that the driver or MST helpers can safely access each1384* branch's last known state before it was disconnected from the topology.1385* When the malloc refcount of a port or branch reaches 0, the memory1386* allocation containing the &struct drm_dp_mst_branch or &struct1387* drm_dp_mst_port respectively will be freed.1388*1389* For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed1390* to drivers. As of writing this documentation, there are no drivers that1391* have a usecase for accessing &struct drm_dp_mst_branch outside of the MST1392* helpers. Exposing this API to drivers in a race-free manner would take more1393* tweaking of the refcounting scheme, however patches are welcome provided1394* there is a legitimate driver usecase for this.1395*1396* Refcount relationships in a topology1397* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~1398*1399* Let's take a look at why the relationship between topology and malloc1400* refcounts is designed the way it is.1401*1402* .. kernel-figure:: dp-mst/topology-figure-1.dot1403*1404* An example of topology and malloc refs in a DP MST topology with two1405* active payloads. Topology refcount increments are indicated by solid1406* lines, and malloc refcount increments are indicated by dashed lines.1407* Each starts from the branch which incremented the refcount, and ends at1408* the branch to which the refcount belongs to, i.e. the arrow points the1409* same way as the C pointers used to reference a structure.1410*1411* As you can see in the above figure, every branch increments the topology1412* refcount of its children, and increments the malloc refcount of its1413* parent. Additionally, every payload increments the malloc refcount of its1414* assigned port by 1.1415*1416* So, what would happen if MSTB #3 from the above figure was unplugged from1417* the system, but the driver hadn't yet removed payload #2 from port #3? The1418* topology would start to look like the figure below.1419*1420* .. kernel-figure:: dp-mst/topology-figure-2.dot1421*1422* Ports and branch devices which have been released from memory are1423* colored grey, and references which have been removed are colored red.1424*1425* Whenever a port or branch device's topology refcount reaches zero, it will1426* decrement the topology refcounts of all its children, the malloc refcount1427* of its parent, and finally its own malloc refcount. For MSTB #4 and port1428* #4, this means they both have been disconnected from the topology and freed1429* from memory. But, because payload #2 is still holding a reference to port1430* #3, port #3 is removed from the topology but its &struct drm_dp_mst_port1431* is still accessible from memory. This also means port #3 has not yet1432* decremented the malloc refcount of MSTB #3, so its &struct1433* drm_dp_mst_branch will also stay allocated in memory until port #3's1434* malloc refcount reaches 0.1435*1436* This relationship is necessary because in order to release payload #2, we1437* need to be able to figure out the last relative of port #3 that's still1438* connected to the topology. In this case, we would travel up the topology as1439* shown below.1440*1441* .. kernel-figure:: dp-mst/topology-figure-3.dot1442*1443* And finally, remove payload #2 by communicating with port #2 through1444* sideband transactions.1445*/14461447/**1448* drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch1449* device1450* @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of1451*1452* Increments &drm_dp_mst_branch.malloc_kref. When1453* &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb1454* will be released and @mstb may no longer be used.1455*1456* See also: drm_dp_mst_put_mstb_malloc()1457*/1458static void1459drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)1460{1461kref_get(&mstb->malloc_kref);1462drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));1463}14641465/**1466* drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch1467* device1468* @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of1469*1470* Decrements &drm_dp_mst_branch.malloc_kref. When1471* &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb1472* will be released and @mstb may no longer be used.1473*1474* See also: drm_dp_mst_get_mstb_malloc()1475*/1476static void1477drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)1478{1479drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);1480kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);1481}14821483static void drm_dp_free_mst_port(struct kref *kref)1484{1485struct drm_dp_mst_port *port =1486container_of(kref, struct drm_dp_mst_port, malloc_kref);14871488drm_dp_mst_put_mstb_malloc(port->parent);1489kfree(port);1490}14911492/**1493* drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port1494* @port: The &struct drm_dp_mst_port to increment the malloc refcount of1495*1496* Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref1497* reaches 0, the memory allocation for @port will be released and @port may1498* no longer be used.1499*1500* Because @port could potentially be freed at any time by the DP MST helpers1501* if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this1502* function, drivers that which to make use of &struct drm_dp_mst_port should1503* ensure that they grab at least one main malloc reference to their MST ports1504* in &drm_dp_mst_topology_cbs.add_connector. This callback is called before1505* there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.1506*1507* See also: drm_dp_mst_put_port_malloc()1508*/1509void1510drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)1511{1512kref_get(&port->malloc_kref);1513drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref));1514}1515EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);15161517/**1518* drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port1519* @port: The &struct drm_dp_mst_port to decrement the malloc refcount of1520*1521* Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref1522* reaches 0, the memory allocation for @port will be released and @port may1523* no longer be used.1524*1525* See also: drm_dp_mst_get_port_malloc()1526*/1527void1528drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)1529{1530drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);1531kref_put(&port->malloc_kref, drm_dp_free_mst_port);1532}1533EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);15341535#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)15361537#define STACK_DEPTH 815381539static noinline void1540__topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,1541struct drm_dp_mst_topology_ref_history *history,1542enum drm_dp_mst_topology_ref_type type)1543{1544struct drm_dp_mst_topology_ref_entry *entry = NULL;1545depot_stack_handle_t backtrace;1546ulong stack_entries[STACK_DEPTH];1547uint n;1548int i;15491550n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);1551backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);1552if (!backtrace)1553return;15541555/* Try to find an existing entry for this backtrace */1556for (i = 0; i < history->len; i++) {1557if (history->entries[i].backtrace == backtrace) {1558entry = &history->entries[i];1559break;1560}1561}15621563/* Otherwise add one */1564if (!entry) {1565struct drm_dp_mst_topology_ref_entry *new;1566int new_len = history->len + 1;15671568new = krealloc(history->entries, sizeof(*new) * new_len,1569GFP_KERNEL);1570if (!new)1571return;15721573entry = &new[history->len];1574history->len = new_len;1575history->entries = new;15761577entry->backtrace = backtrace;1578entry->type = type;1579entry->count = 0;1580}1581entry->count++;1582entry->ts_nsec = ktime_get_ns();1583}15841585static int1586topology_ref_history_cmp(const void *a, const void *b)1587{1588const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;15891590if (entry_a->ts_nsec > entry_b->ts_nsec)1591return 1;1592else if (entry_a->ts_nsec < entry_b->ts_nsec)1593return -1;1594else1595return 0;1596}15971598static inline const char *1599topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)1600{1601if (type == DRM_DP_MST_TOPOLOGY_REF_GET)1602return "get";1603else1604return "put";1605}16061607static void1608__dump_topology_ref_history(struct drm_device *drm,1609struct drm_dp_mst_topology_ref_history *history,1610void *ptr, const char *type_str)1611{1612struct drm_printer p = drm_dbg_printer(drm, DRM_UT_DP, DBG_PREFIX);1613char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);1614int i;16151616if (!buf)1617return;16181619if (!history->len)1620goto out;16211622/* First, sort the list so that it goes from oldest to newest1623* reference entry1624*/1625sort(history->entries, history->len, sizeof(*history->entries),1626topology_ref_history_cmp, NULL);16271628drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",1629type_str, ptr);16301631for (i = 0; i < history->len; i++) {1632const struct drm_dp_mst_topology_ref_entry *entry =1633&history->entries[i];1634u64 ts_nsec = entry->ts_nsec;1635u32 rem_nsec = do_div(ts_nsec, 1000000000);16361637stack_depot_snprint(entry->backtrace, buf, PAGE_SIZE, 4);16381639drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s",1640entry->count,1641topology_ref_type_to_str(entry->type),1642ts_nsec, rem_nsec / 1000, buf);1643}16441645/* Now free the history, since this is the only time we expose it */1646kfree(history->entries);1647out:1648kfree(buf);1649}16501651static __always_inline void1652drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)1653{1654__dump_topology_ref_history(mstb->mgr->dev, &mstb->topology_ref_history,1655mstb, "MSTB");1656}16571658static __always_inline void1659drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)1660{1661__dump_topology_ref_history(port->mgr->dev, &port->topology_ref_history,1662port, "Port");1663}16641665static __always_inline void1666save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,1667enum drm_dp_mst_topology_ref_type type)1668{1669__topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);1670}16711672static __always_inline void1673save_port_topology_ref(struct drm_dp_mst_port *port,1674enum drm_dp_mst_topology_ref_type type)1675{1676__topology_ref_save(port->mgr, &port->topology_ref_history, type);1677}16781679static inline void1680topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)1681{1682mutex_lock(&mgr->topology_ref_history_lock);1683}16841685static inline void1686topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)1687{1688mutex_unlock(&mgr->topology_ref_history_lock);1689}1690#else1691static inline void1692topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}1693static inline void1694topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}1695static inline void1696drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}1697static inline void1698drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}1699#define save_mstb_topology_ref(mstb, type)1700#define save_port_topology_ref(port, type)1701#endif17021703struct drm_dp_mst_atomic_payload *1704drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,1705struct drm_dp_mst_port *port)1706{1707struct drm_dp_mst_atomic_payload *payload;17081709list_for_each_entry(payload, &state->payloads, next)1710if (payload->port == port)1711return payload;17121713return NULL;1714}1715EXPORT_SYMBOL(drm_atomic_get_mst_payload_state);17161717static void drm_dp_destroy_mst_branch_device(struct kref *kref)1718{1719struct drm_dp_mst_branch *mstb =1720container_of(kref, struct drm_dp_mst_branch, topology_kref);1721struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;17221723drm_dp_mst_dump_mstb_topology_history(mstb);17241725INIT_LIST_HEAD(&mstb->destroy_next);17261727/*1728* This can get called under mgr->mutex, so we need to perform the1729* actual destruction of the mstb in another worker1730*/1731mutex_lock(&mgr->delayed_destroy_lock);1732list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);1733mutex_unlock(&mgr->delayed_destroy_lock);1734queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);1735}17361737/**1738* drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a1739* branch device unless it's zero1740* @mstb: &struct drm_dp_mst_branch to increment the topology refcount of1741*1742* Attempts to grab a topology reference to @mstb, if it hasn't yet been1743* removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has1744* reached 0). Holding a topology reference implies that a malloc reference1745* will be held to @mstb as long as the user holds the topology reference.1746*1747* Care should be taken to ensure that the user has at least one malloc1748* reference to @mstb. If you already have a topology reference to @mstb, you1749* should use drm_dp_mst_topology_get_mstb() instead.1750*1751* See also:1752* drm_dp_mst_topology_get_mstb()1753* drm_dp_mst_topology_put_mstb()1754*1755* Returns:1756* * 1: A topology reference was grabbed successfully1757* * 0: @port is no longer in the topology, no reference was grabbed1758*/1759static int __must_check1760drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)1761{1762int ret;17631764topology_ref_history_lock(mstb->mgr);1765ret = kref_get_unless_zero(&mstb->topology_kref);1766if (ret) {1767drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));1768save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);1769}17701771topology_ref_history_unlock(mstb->mgr);17721773return ret;1774}17751776/**1777* drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a1778* branch device1779* @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of1780*1781* Increments &drm_dp_mst_branch.topology_refcount without checking whether or1782* not it's already reached 0. This is only valid to use in scenarios where1783* you are already guaranteed to have at least one active topology reference1784* to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.1785*1786* See also:1787* drm_dp_mst_topology_try_get_mstb()1788* drm_dp_mst_topology_put_mstb()1789*/1790static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)1791{1792topology_ref_history_lock(mstb->mgr);17931794save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);1795WARN_ON(kref_read(&mstb->topology_kref) == 0);1796kref_get(&mstb->topology_kref);1797drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));17981799topology_ref_history_unlock(mstb->mgr);1800}18011802/**1803* drm_dp_mst_topology_put_mstb() - release a topology reference to a branch1804* device1805* @mstb: The &struct drm_dp_mst_branch to release the topology reference from1806*1807* Releases a topology reference from @mstb by decrementing1808* &drm_dp_mst_branch.topology_kref.1809*1810* See also:1811* drm_dp_mst_topology_try_get_mstb()1812* drm_dp_mst_topology_get_mstb()1813*/1814static void1815drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)1816{1817topology_ref_history_lock(mstb->mgr);18181819drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref) - 1);1820save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);18211822topology_ref_history_unlock(mstb->mgr);1823kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);1824}18251826static void drm_dp_destroy_port(struct kref *kref)1827{1828struct drm_dp_mst_port *port =1829container_of(kref, struct drm_dp_mst_port, topology_kref);1830struct drm_dp_mst_topology_mgr *mgr = port->mgr;18311832drm_dp_mst_dump_port_topology_history(port);18331834/* There's nothing that needs locking to destroy an input port yet */1835if (port->input) {1836drm_dp_mst_put_port_malloc(port);1837return;1838}18391840drm_edid_free(port->cached_edid);18411842/*1843* we can't destroy the connector here, as we might be holding the1844* mode_config.mutex from an EDID retrieval1845*/1846mutex_lock(&mgr->delayed_destroy_lock);1847list_add(&port->next, &mgr->destroy_port_list);1848mutex_unlock(&mgr->delayed_destroy_lock);1849queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);1850}18511852/**1853* drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a1854* port unless it's zero1855* @port: &struct drm_dp_mst_port to increment the topology refcount of1856*1857* Attempts to grab a topology reference to @port, if it hasn't yet been1858* removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached1859* 0). Holding a topology reference implies that a malloc reference will be1860* held to @port as long as the user holds the topology reference.1861*1862* Care should be taken to ensure that the user has at least one malloc1863* reference to @port. If you already have a topology reference to @port, you1864* should use drm_dp_mst_topology_get_port() instead.1865*1866* See also:1867* drm_dp_mst_topology_get_port()1868* drm_dp_mst_topology_put_port()1869*1870* Returns:1871* * 1: A topology reference was grabbed successfully1872* * 0: @port is no longer in the topology, no reference was grabbed1873*/1874static int __must_check1875drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)1876{1877int ret;18781879topology_ref_history_lock(port->mgr);1880ret = kref_get_unless_zero(&port->topology_kref);1881if (ret) {1882drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref));1883save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);1884}18851886topology_ref_history_unlock(port->mgr);1887return ret;1888}18891890/**1891* drm_dp_mst_topology_get_port() - Increment the topology refcount of a port1892* @port: The &struct drm_dp_mst_port to increment the topology refcount of1893*1894* Increments &drm_dp_mst_port.topology_refcount without checking whether or1895* not it's already reached 0. This is only valid to use in scenarios where1896* you are already guaranteed to have at least one active topology reference1897* to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.1898*1899* See also:1900* drm_dp_mst_topology_try_get_port()1901* drm_dp_mst_topology_put_port()1902*/1903static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)1904{1905topology_ref_history_lock(port->mgr);19061907WARN_ON(kref_read(&port->topology_kref) == 0);1908kref_get(&port->topology_kref);1909drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref));1910save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);19111912topology_ref_history_unlock(port->mgr);1913}19141915/**1916* drm_dp_mst_topology_put_port() - release a topology reference to a port1917* @port: The &struct drm_dp_mst_port to release the topology reference from1918*1919* Releases a topology reference from @port by decrementing1920* &drm_dp_mst_port.topology_kref.1921*1922* See also:1923* drm_dp_mst_topology_try_get_port()1924* drm_dp_mst_topology_get_port()1925*/1926static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)1927{1928topology_ref_history_lock(port->mgr);19291930drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref) - 1);1931save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);19321933topology_ref_history_unlock(port->mgr);1934kref_put(&port->topology_kref, drm_dp_destroy_port);1935}19361937static struct drm_dp_mst_branch *1938drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,1939struct drm_dp_mst_branch *to_find)1940{1941struct drm_dp_mst_port *port;1942struct drm_dp_mst_branch *rmstb;19431944if (to_find == mstb)1945return mstb;19461947list_for_each_entry(port, &mstb->ports, next) {1948if (port->mstb) {1949rmstb = drm_dp_mst_topology_get_mstb_validated_locked(1950port->mstb, to_find);1951if (rmstb)1952return rmstb;1953}1954}1955return NULL;1956}19571958static struct drm_dp_mst_branch *1959drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,1960struct drm_dp_mst_branch *mstb)1961{1962struct drm_dp_mst_branch *rmstb = NULL;19631964mutex_lock(&mgr->lock);1965if (mgr->mst_primary) {1966rmstb = drm_dp_mst_topology_get_mstb_validated_locked(1967mgr->mst_primary, mstb);19681969if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))1970rmstb = NULL;1971}1972mutex_unlock(&mgr->lock);1973return rmstb;1974}19751976static struct drm_dp_mst_port *1977drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,1978struct drm_dp_mst_port *to_find)1979{1980struct drm_dp_mst_port *port, *mport;19811982list_for_each_entry(port, &mstb->ports, next) {1983if (port == to_find)1984return port;19851986if (port->mstb) {1987mport = drm_dp_mst_topology_get_port_validated_locked(1988port->mstb, to_find);1989if (mport)1990return mport;1991}1992}1993return NULL;1994}19951996static struct drm_dp_mst_port *1997drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,1998struct drm_dp_mst_port *port)1999{2000struct drm_dp_mst_port *rport = NULL;20012002mutex_lock(&mgr->lock);2003if (mgr->mst_primary) {2004rport = drm_dp_mst_topology_get_port_validated_locked(2005mgr->mst_primary, port);20062007if (rport && !drm_dp_mst_topology_try_get_port(rport))2008rport = NULL;2009}2010mutex_unlock(&mgr->lock);2011return rport;2012}20132014static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)2015{2016struct drm_dp_mst_port *port;2017int ret;20182019list_for_each_entry(port, &mstb->ports, next) {2020if (port->port_num == port_num) {2021ret = drm_dp_mst_topology_try_get_port(port);2022return ret ? port : NULL;2023}2024}20252026return NULL;2027}20282029/*2030* calculate a new RAD for this MST branch device2031* if parent has an LCT of 2 then it has 1 nibble of RAD,2032* if parent has an LCT of 3 then it has 2 nibbles of RAD,2033*/2034static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,2035u8 *rad)2036{2037int parent_lct = port->parent->lct;2038int shift = 4;2039int idx = (parent_lct - 1) / 2;20402041if (parent_lct > 1) {2042memcpy(rad, port->parent->rad, idx + 1);2043shift = (parent_lct % 2) ? 4 : 0;2044} else2045rad[0] = 0;20462047rad[idx] |= port->port_num << shift;2048return parent_lct + 1;2049}20502051static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs)2052{2053switch (pdt) {2054case DP_PEER_DEVICE_DP_LEGACY_CONV:2055case DP_PEER_DEVICE_SST_SINK:2056return true;2057case DP_PEER_DEVICE_MST_BRANCHING:2058/* For sst branch device */2059if (!mcs)2060return true;20612062return false;2063}2064return true;2065}20662067static int2068drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,2069bool new_mcs)2070{2071struct drm_dp_mst_topology_mgr *mgr = port->mgr;2072struct drm_dp_mst_branch *mstb;2073u8 rad[8], lct;2074int ret = 0;20752076if (port->pdt == new_pdt && port->mcs == new_mcs)2077return 0;20782079/* Teardown the old pdt, if there is one */2080if (port->pdt != DP_PEER_DEVICE_NONE) {2081if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {2082/*2083* If the new PDT would also have an i2c bus,2084* don't bother with reregistering it2085*/2086if (new_pdt != DP_PEER_DEVICE_NONE &&2087drm_dp_mst_is_end_device(new_pdt, new_mcs)) {2088port->pdt = new_pdt;2089port->mcs = new_mcs;2090return 0;2091}20922093/* remove i2c over sideband */2094drm_dp_mst_unregister_i2c_bus(port);2095} else {2096mutex_lock(&mgr->lock);2097drm_dp_mst_topology_put_mstb(port->mstb);2098port->mstb = NULL;2099mutex_unlock(&mgr->lock);2100}2101}21022103port->pdt = new_pdt;2104port->mcs = new_mcs;21052106if (port->pdt != DP_PEER_DEVICE_NONE) {2107if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {2108/* add i2c over sideband */2109ret = drm_dp_mst_register_i2c_bus(port);2110} else {2111lct = drm_dp_calculate_rad(port, rad);2112mstb = drm_dp_add_mst_branch_device(lct, rad);2113if (!mstb) {2114ret = -ENOMEM;2115drm_err(mgr->dev, "Failed to create MSTB for port %p", port);2116goto out;2117}21182119mutex_lock(&mgr->lock);2120port->mstb = mstb;2121mstb->mgr = port->mgr;2122mstb->port_parent = port;21232124/*2125* Make sure this port's memory allocation stays2126* around until its child MSTB releases it2127*/2128drm_dp_mst_get_port_malloc(port);2129mutex_unlock(&mgr->lock);21302131/* And make sure we send a link address for this */2132ret = 1;2133}2134}21352136out:2137if (ret < 0)2138port->pdt = DP_PEER_DEVICE_NONE;2139return ret;2140}21412142/**2143* drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband2144* @aux: Fake sideband AUX CH2145* @offset: address of the (first) register to read2146* @buffer: buffer to store the register values2147* @size: number of bytes in @buffer2148*2149* Performs the same functionality for remote devices via2150* sideband messaging as drm_dp_dpcd_read() does for local2151* devices via actual AUX CH.2152*2153* Return: Number of bytes read, or negative error code on failure.2154*/2155ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,2156unsigned int offset, void *buffer, size_t size)2157{2158struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,2159aux);21602161return drm_dp_send_dpcd_read(port->mgr, port,2162offset, size, buffer);2163}21642165/**2166* drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband2167* @aux: Fake sideband AUX CH2168* @offset: address of the (first) register to write2169* @buffer: buffer containing the values to write2170* @size: number of bytes in @buffer2171*2172* Performs the same functionality for remote devices via2173* sideband messaging as drm_dp_dpcd_write() does for local2174* devices via actual AUX CH.2175*2176* Return: number of bytes written on success, negative error code on failure.2177*/2178ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,2179unsigned int offset, void *buffer, size_t size)2180{2181struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,2182aux);21832184return drm_dp_send_dpcd_write(port->mgr, port,2185offset, size, buffer);2186}21872188static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, guid_t *guid)2189{2190int ret = 0;21912192guid_copy(&mstb->guid, guid);21932194if (!drm_dp_validate_guid(mstb->mgr, &mstb->guid)) {2195struct drm_dp_aux *aux;2196u8 buf[UUID_SIZE];21972198export_guid(buf, &mstb->guid);21992200if (mstb->port_parent)2201aux = &mstb->port_parent->aux;2202else2203aux = mstb->mgr->aux;22042205ret = drm_dp_dpcd_write_data(aux, DP_GUID, buf, sizeof(buf));2206}22072208return ret;2209}22102211static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,2212int pnum,2213char *proppath,2214size_t proppath_size)2215{2216int i;2217char temp[8];22182219snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);2220for (i = 0; i < (mstb->lct - 1); i++) {2221int shift = (i % 2) ? 0 : 4;2222int port_num = (mstb->rad[i / 2] >> shift) & 0xf;22232224snprintf(temp, sizeof(temp), "-%d", port_num);2225strlcat(proppath, temp, proppath_size);2226}2227snprintf(temp, sizeof(temp), "-%d", pnum);2228strlcat(proppath, temp, proppath_size);2229}22302231/**2232* drm_dp_mst_connector_late_register() - Late MST connector registration2233* @connector: The MST connector2234* @port: The MST port for this connector2235*2236* Helper to register the remote aux device for this MST port. Drivers should2237* call this from their mst connector's late_register hook to enable MST aux2238* devices.2239*2240* Return: 0 on success, negative error code on failure.2241*/2242int drm_dp_mst_connector_late_register(struct drm_connector *connector,2243struct drm_dp_mst_port *port)2244{2245drm_dbg_kms(port->mgr->dev, "registering %s remote bus for %s\n",2246port->aux.name, connector->kdev->kobj.name);22472248port->aux.dev = connector->kdev;2249return drm_dp_aux_register_devnode(&port->aux);2250}2251EXPORT_SYMBOL(drm_dp_mst_connector_late_register);22522253/**2254* drm_dp_mst_connector_early_unregister() - Early MST connector unregistration2255* @connector: The MST connector2256* @port: The MST port for this connector2257*2258* Helper to unregister the remote aux device for this MST port, registered by2259* drm_dp_mst_connector_late_register(). Drivers should call this from their mst2260* connector's early_unregister hook.2261*/2262void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,2263struct drm_dp_mst_port *port)2264{2265drm_dbg_kms(port->mgr->dev, "unregistering %s remote bus for %s\n",2266port->aux.name, connector->kdev->kobj.name);2267drm_dp_aux_unregister_devnode(&port->aux);2268}2269EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);22702271static void2272drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,2273struct drm_dp_mst_port *port)2274{2275struct drm_dp_mst_topology_mgr *mgr = port->mgr;2276char proppath[255];2277int ret;22782279build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));2280port->connector = mgr->cbs->add_connector(mgr, port, proppath);2281if (!port->connector) {2282ret = -ENOMEM;2283goto error;2284}22852286if (port->pdt != DP_PEER_DEVICE_NONE &&2287drm_dp_mst_is_end_device(port->pdt, port->mcs) &&2288drm_dp_mst_port_is_logical(port))2289port->cached_edid = drm_edid_read_ddc(port->connector,2290&port->aux.ddc);22912292drm_connector_dynamic_register(port->connector);2293return;22942295error:2296drm_err(mgr->dev, "Failed to create connector for port %p: %d\n", port, ret);2297}22982299/*2300* Drop a topology reference, and unlink the port from the in-memory topology2301* layout2302*/2303static void2304drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,2305struct drm_dp_mst_port *port)2306{2307mutex_lock(&mgr->lock);2308port->parent->num_ports--;2309list_del(&port->next);2310mutex_unlock(&mgr->lock);2311drm_dp_mst_topology_put_port(port);2312}23132314static struct drm_dp_mst_port *2315drm_dp_mst_add_port(struct drm_device *dev,2316struct drm_dp_mst_topology_mgr *mgr,2317struct drm_dp_mst_branch *mstb, u8 port_number)2318{2319struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);23202321if (!port)2322return NULL;23232324kref_init(&port->topology_kref);2325kref_init(&port->malloc_kref);2326port->parent = mstb;2327port->port_num = port_number;2328port->mgr = mgr;2329port->aux.name = "DPMST";2330port->aux.dev = dev->dev;2331port->aux.is_remote = true;23322333/* initialize the MST downstream port's AUX crc work queue */2334port->aux.drm_dev = dev;2335drm_dp_remote_aux_init(&port->aux);23362337/*2338* Make sure the memory allocation for our parent branch stays2339* around until our own memory allocation is released2340*/2341drm_dp_mst_get_mstb_malloc(mstb);23422343return port;2344}23452346static int2347drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,2348struct drm_device *dev,2349struct drm_dp_link_addr_reply_port *port_msg)2350{2351struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;2352struct drm_dp_mst_port *port;2353int ret;2354u8 new_pdt = DP_PEER_DEVICE_NONE;2355bool new_mcs = 0;2356bool created = false, send_link_addr = false, changed = false;23572358port = drm_dp_get_port(mstb, port_msg->port_number);2359if (!port) {2360port = drm_dp_mst_add_port(dev, mgr, mstb,2361port_msg->port_number);2362if (!port)2363return -ENOMEM;2364created = true;2365changed = true;2366} else if (!port->input && port_msg->input_port && port->connector) {2367/* Since port->connector can't be changed here, we create a2368* new port if input_port changes from 0 to 12369*/2370drm_dp_mst_topology_unlink_port(mgr, port);2371drm_dp_mst_topology_put_port(port);2372port = drm_dp_mst_add_port(dev, mgr, mstb,2373port_msg->port_number);2374if (!port)2375return -ENOMEM;2376changed = true;2377created = true;2378} else if (port->input && !port_msg->input_port) {2379changed = true;2380} else if (port->connector) {2381/* We're updating a port that's exposed to userspace, so do it2382* under lock2383*/2384drm_modeset_lock(&mgr->base.lock, NULL);23852386changed = port->ddps != port_msg->ddps ||2387(port->ddps &&2388(port->ldps != port_msg->legacy_device_plug_status ||2389port->dpcd_rev != port_msg->dpcd_revision ||2390port->mcs != port_msg->mcs ||2391port->pdt != port_msg->peer_device_type ||2392port->num_sdp_stream_sinks !=2393port_msg->num_sdp_stream_sinks));2394}23952396port->input = port_msg->input_port;2397if (!port->input)2398new_pdt = port_msg->peer_device_type;2399new_mcs = port_msg->mcs;2400port->ddps = port_msg->ddps;2401port->ldps = port_msg->legacy_device_plug_status;2402port->dpcd_rev = port_msg->dpcd_revision;2403port->num_sdp_streams = port_msg->num_sdp_streams;2404port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;24052406/* manage mstb port lists with mgr lock - take a reference2407for this list */2408if (created) {2409mutex_lock(&mgr->lock);2410drm_dp_mst_topology_get_port(port);2411list_add(&port->next, &mstb->ports);2412mstb->num_ports++;2413mutex_unlock(&mgr->lock);2414}24152416/*2417* Reprobe PBN caps on both hotplug, and when re-probing the link2418* for our parent mstb2419*/2420if (port->ddps && !port->input) {2421ret = drm_dp_send_enum_path_resources(mgr, mstb,2422port);2423if (ret == 1)2424changed = true;2425} else {2426port->full_pbn = 0;2427}24282429ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);2430if (ret == 1) {2431send_link_addr = true;2432} else if (ret < 0) {2433drm_err(dev, "Failed to change PDT on port %p: %d\n", port, ret);2434goto fail;2435}24362437/*2438* If this port wasn't just created, then we're reprobing because2439* we're coming out of suspend. In this case, always resend the link2440* address if there's an MSTB on this port2441*/2442if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&2443port->mcs)2444send_link_addr = true;24452446if (port->connector)2447drm_modeset_unlock(&mgr->base.lock);2448else if (!port->input)2449drm_dp_mst_port_add_connector(mstb, port);24502451if (send_link_addr && port->mstb) {2452ret = drm_dp_send_link_address(mgr, port->mstb);2453if (ret == 1) /* MSTB below us changed */2454changed = true;2455else if (ret < 0)2456goto fail_put;2457}24582459/* put reference to this port */2460drm_dp_mst_topology_put_port(port);2461return changed;24622463fail:2464drm_dp_mst_topology_unlink_port(mgr, port);2465if (port->connector)2466drm_modeset_unlock(&mgr->base.lock);2467fail_put:2468drm_dp_mst_topology_put_port(port);2469return ret;2470}24712472static int2473drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,2474struct drm_dp_connection_status_notify *conn_stat)2475{2476struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;2477struct drm_dp_mst_port *port;2478int old_ddps, ret;2479u8 new_pdt;2480bool new_mcs;2481bool dowork = false, create_connector = false;24822483port = drm_dp_get_port(mstb, conn_stat->port_number);2484if (!port)2485return 0;24862487if (port->connector) {2488if (!port->input && conn_stat->input_port) {2489/*2490* We can't remove a connector from an already exposed2491* port, so just throw the port out and make sure we2492* reprobe the link address of it's parent MSTB2493*/2494drm_dp_mst_topology_unlink_port(mgr, port);2495mstb->link_address_sent = false;2496dowork = true;2497goto out;2498}24992500/* Locking is only needed if the port's exposed to userspace */2501drm_modeset_lock(&mgr->base.lock, NULL);2502} else if (port->input && !conn_stat->input_port) {2503create_connector = true;2504/* Reprobe link address so we get num_sdp_streams */2505mstb->link_address_sent = false;2506dowork = true;2507}25082509old_ddps = port->ddps;2510port->input = conn_stat->input_port;2511port->ldps = conn_stat->legacy_device_plug_status;2512port->ddps = conn_stat->displayport_device_plug_status;25132514if (old_ddps != port->ddps) {2515if (port->ddps && !port->input)2516drm_dp_send_enum_path_resources(mgr, mstb, port);2517else2518port->full_pbn = 0;2519}25202521new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;2522new_mcs = conn_stat->message_capability_status;2523ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);2524if (ret == 1) {2525dowork = true;2526} else if (ret < 0) {2527drm_err(mgr->dev, "Failed to change PDT for port %p: %d\n", port, ret);2528dowork = false;2529}25302531if (port->connector)2532drm_modeset_unlock(&mgr->base.lock);2533else if (create_connector)2534drm_dp_mst_port_add_connector(mstb, port);25352536out:2537drm_dp_mst_topology_put_port(port);2538return dowork;2539}25402541static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,2542u8 lct, u8 *rad)2543{2544struct drm_dp_mst_branch *mstb;2545struct drm_dp_mst_port *port;2546int i, ret;2547/* find the port by iterating down */25482549mutex_lock(&mgr->lock);2550mstb = mgr->mst_primary;25512552if (!mstb)2553goto out;25542555for (i = 1; i < lct; i++) {2556int port_num = drm_dp_mst_get_ufp_num_at_lct_from_rad(i + 1, rad);25572558list_for_each_entry(port, &mstb->ports, next) {2559if (port->port_num == port_num) {2560mstb = port->mstb;2561if (!mstb) {2562drm_err(mgr->dev,2563"failed to lookup MSTB with lct %d, rad %02x\n",2564lct, rad[0]);2565goto out;2566}25672568break;2569}2570}2571}2572ret = drm_dp_mst_topology_try_get_mstb(mstb);2573if (!ret)2574mstb = NULL;2575out:2576mutex_unlock(&mgr->lock);2577return mstb;2578}25792580static struct drm_dp_mst_branch *2581get_mst_branch_device_by_guid_helper(struct drm_dp_mst_branch *mstb,2582const guid_t *guid)2583{2584struct drm_dp_mst_branch *found_mstb;2585struct drm_dp_mst_port *port;25862587if (!mstb)2588return NULL;25892590if (guid_equal(&mstb->guid, guid))2591return mstb;25922593list_for_each_entry(port, &mstb->ports, next) {2594found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);25952596if (found_mstb)2597return found_mstb;2598}25992600return NULL;2601}26022603static struct drm_dp_mst_branch *2604drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,2605const guid_t *guid)2606{2607struct drm_dp_mst_branch *mstb;2608int ret;26092610/* find the port by iterating down */2611mutex_lock(&mgr->lock);26122613mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);2614if (mstb) {2615ret = drm_dp_mst_topology_try_get_mstb(mstb);2616if (!ret)2617mstb = NULL;2618}26192620mutex_unlock(&mgr->lock);2621return mstb;2622}26232624static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,2625struct drm_dp_mst_branch *mstb)2626{2627struct drm_dp_mst_port *port;2628int ret;2629bool changed = false;26302631if (!mstb->link_address_sent) {2632ret = drm_dp_send_link_address(mgr, mstb);2633if (ret == 1)2634changed = true;2635else if (ret < 0)2636return ret;2637}26382639list_for_each_entry(port, &mstb->ports, next) {2640if (port->input || !port->ddps || !port->mstb)2641continue;26422643ret = drm_dp_check_and_send_link_address(mgr, port->mstb);2644if (ret == 1)2645changed = true;2646else if (ret < 0)2647return ret;2648}26492650return changed;2651}26522653static void drm_dp_mst_link_probe_work(struct work_struct *work)2654{2655struct drm_dp_mst_topology_mgr *mgr =2656container_of(work, struct drm_dp_mst_topology_mgr, work);2657struct drm_device *dev = mgr->dev;2658struct drm_dp_mst_branch *mstb;2659int ret;2660bool clear_payload_id_table;26612662mutex_lock(&mgr->probe_lock);26632664mutex_lock(&mgr->lock);2665clear_payload_id_table = !mgr->payload_id_table_cleared;2666mgr->payload_id_table_cleared = true;26672668mstb = mgr->mst_primary;2669if (mstb) {2670ret = drm_dp_mst_topology_try_get_mstb(mstb);2671if (!ret)2672mstb = NULL;2673}2674mutex_unlock(&mgr->lock);2675if (!mstb) {2676mutex_unlock(&mgr->probe_lock);2677return;2678}26792680/*2681* Certain branch devices seem to incorrectly report an available_pbn2682* of 0 on downstream sinks, even after clearing the2683* DP_PAYLOAD_ALLOCATE_* registers in2684* drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C2685* 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make2686* things work again.2687*/2688if (clear_payload_id_table) {2689drm_dbg_kms(dev, "Clearing payload ID table\n");2690drm_dp_send_clear_payload_id_table(mgr, mstb);2691}26922693ret = drm_dp_check_and_send_link_address(mgr, mstb);2694drm_dp_mst_topology_put_mstb(mstb);26952696mutex_unlock(&mgr->probe_lock);2697if (ret > 0)2698drm_kms_helper_hotplug_event(dev);2699}27002701static void drm_dp_mst_queue_probe_work(struct drm_dp_mst_topology_mgr *mgr)2702{2703queue_work(system_long_wq, &mgr->work);2704}27052706static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,2707guid_t *guid)2708{2709if (!guid_is_null(guid))2710return true;27112712guid_gen(guid);27132714return false;2715}27162717static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg,2718u8 port_num, u32 offset, u8 num_bytes)2719{2720struct drm_dp_sideband_msg_req_body req;27212722req.req_type = DP_REMOTE_DPCD_READ;2723req.u.dpcd_read.port_number = port_num;2724req.u.dpcd_read.dpcd_address = offset;2725req.u.dpcd_read.num_bytes = num_bytes;2726drm_dp_encode_sideband_req(&req, msg);2727}27282729static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,2730bool up, u8 *msg, int len)2731{2732int ret;2733int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;2734int tosend, total, offset;2735int retries = 0;27362737retry:2738total = len;2739offset = 0;2740do {2741tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);27422743ret = drm_dp_dpcd_write_data(mgr->aux, regbase + offset,2744&msg[offset],2745tosend);2746if (ret == -EIO && retries < 5) {2747retries++;2748goto retry;2749} else if (ret < 0) {2750drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret);27512752return -EIO;2753}2754offset += tosend;2755total -= tosend;2756} while (total > 0);2757return 0;2758}27592760static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,2761struct drm_dp_sideband_msg_tx *txmsg)2762{2763struct drm_dp_mst_branch *mstb = txmsg->dst;2764u8 req_type;27652766req_type = txmsg->msg[0] & 0x7f;2767if (req_type == DP_CONNECTION_STATUS_NOTIFY ||2768req_type == DP_RESOURCE_STATUS_NOTIFY ||2769req_type == DP_CLEAR_PAYLOAD_ID_TABLE)2770hdr->broadcast = 1;2771else2772hdr->broadcast = 0;2773hdr->path_msg = txmsg->path_msg;2774if (hdr->broadcast) {2775hdr->lct = 1;2776hdr->lcr = 6;2777} else {2778hdr->lct = mstb->lct;2779hdr->lcr = mstb->lct - 1;2780}27812782memcpy(hdr->rad, mstb->rad, hdr->lct / 2);27832784return 0;2785}2786/*2787* process a single block of the next message in the sideband queue2788*/2789static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,2790struct drm_dp_sideband_msg_tx *txmsg,2791bool up)2792{2793u8 chunk[48];2794struct drm_dp_sideband_msg_hdr hdr;2795int len, space, idx, tosend;2796int ret;27972798if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT)2799return 0;28002801memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));28022803if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED)2804txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;28052806/* make hdr from dst mst */2807ret = set_hdr_from_dst_qlock(&hdr, txmsg);2808if (ret < 0)2809return ret;28102811/* amount left to send in this message */2812len = txmsg->cur_len - txmsg->cur_offset;28132814/* 48 - sideband msg size - 1 byte for data CRC, x header bytes */2815space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);28162817tosend = min(len, space);2818if (len == txmsg->cur_len)2819hdr.somt = 1;2820if (space >= len)2821hdr.eomt = 1;282228232824hdr.msg_len = tosend + 1;2825drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);2826memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);2827/* add crc at end */2828drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);2829idx += tosend + 1;28302831ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);2832if (ret) {2833if (drm_debug_enabled(DRM_UT_DP)) {2834struct drm_printer p = drm_dbg_printer(mgr->dev,2835DRM_UT_DP,2836DBG_PREFIX);28372838drm_printf(&p, "sideband msg failed to send\n");2839drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);2840}2841return ret;2842}28432844txmsg->cur_offset += tosend;2845if (txmsg->cur_offset == txmsg->cur_len) {2846txmsg->state = DRM_DP_SIDEBAND_TX_SENT;2847return 1;2848}2849return 0;2850}28512852static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)2853{2854struct drm_dp_sideband_msg_tx *txmsg;2855int ret;28562857WARN_ON(!mutex_is_locked(&mgr->qlock));28582859/* construct a chunk from the first msg in the tx_msg queue */2860if (list_empty(&mgr->tx_msg_downq))2861return;28622863txmsg = list_first_entry(&mgr->tx_msg_downq,2864struct drm_dp_sideband_msg_tx, next);2865ret = process_single_tx_qlock(mgr, txmsg, false);2866if (ret < 0) {2867drm_dbg_kms(mgr->dev, "failed to send msg in q %d\n", ret);2868list_del(&txmsg->next);2869txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;2870wake_up_all(&mgr->tx_waitq);2871}2872}28732874static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,2875struct drm_dp_sideband_msg_tx *txmsg)2876{2877mutex_lock(&mgr->qlock);2878list_add_tail(&txmsg->next, &mgr->tx_msg_downq);28792880if (drm_debug_enabled(DRM_UT_DP)) {2881struct drm_printer p = drm_dbg_printer(mgr->dev, DRM_UT_DP,2882DBG_PREFIX);28832884drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);2885}28862887if (list_is_singular(&mgr->tx_msg_downq))2888process_single_down_tx_qlock(mgr);2889mutex_unlock(&mgr->qlock);2890}28912892static void2893drm_dp_dump_link_address(const struct drm_dp_mst_topology_mgr *mgr,2894struct drm_dp_link_address_ack_reply *reply)2895{2896struct drm_dp_link_addr_reply_port *port_reply;2897int i;28982899for (i = 0; i < reply->nports; i++) {2900port_reply = &reply->ports[i];2901drm_dbg_kms(mgr->dev,2902"port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",2903i,2904port_reply->input_port,2905port_reply->peer_device_type,2906port_reply->port_number,2907port_reply->dpcd_revision,2908port_reply->mcs,2909port_reply->ddps,2910port_reply->legacy_device_plug_status,2911port_reply->num_sdp_streams,2912port_reply->num_sdp_stream_sinks);2913}2914}29152916static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,2917struct drm_dp_mst_branch *mstb)2918{2919struct drm_dp_sideband_msg_tx *txmsg;2920struct drm_dp_link_address_ack_reply *reply;2921struct drm_dp_mst_port *port, *tmp;2922int i, ret, port_mask = 0;2923bool changed = false;29242925txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);2926if (!txmsg)2927return -ENOMEM;29282929txmsg->dst = mstb;2930build_link_address(txmsg);29312932mstb->link_address_sent = true;2933drm_dp_queue_down_tx(mgr, txmsg);29342935/* FIXME: Actually do some real error handling here */2936ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);2937if (ret < 0) {2938drm_err(mgr->dev, "Sending link address failed with %d\n", ret);2939goto out;2940}2941if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {2942drm_err(mgr->dev, "link address NAK received\n");2943ret = -EIO;2944goto out;2945}29462947reply = &txmsg->reply.u.link_addr;2948drm_dbg_kms(mgr->dev, "link address reply: %d\n", reply->nports);2949drm_dp_dump_link_address(mgr, reply);29502951ret = drm_dp_check_mstb_guid(mstb, &reply->guid);2952if (ret) {2953char buf[64];29542955drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf));2956drm_err(mgr->dev, "GUID check on %s failed: %d\n", buf, ret);2957goto out;2958}29592960for (i = 0; i < reply->nports; i++) {2961port_mask |= BIT(reply->ports[i].port_number);2962ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,2963&reply->ports[i]);2964if (ret == 1)2965changed = true;2966else if (ret < 0)2967goto out;2968}29692970/* Prune any ports that are currently a part of mstb in our in-memory2971* topology, but were not seen in this link address. Usually this2972* means that they were removed while the topology was out of sync,2973* e.g. during suspend/resume2974*/2975mutex_lock(&mgr->lock);2976list_for_each_entry_safe(port, tmp, &mstb->ports, next) {2977if (port_mask & BIT(port->port_num))2978continue;29792980drm_dbg_kms(mgr->dev, "port %d was not in link address, removing\n",2981port->port_num);2982list_del(&port->next);2983drm_dp_mst_topology_put_port(port);2984changed = true;2985}2986mutex_unlock(&mgr->lock);29872988out:2989if (ret < 0)2990mstb->link_address_sent = false;2991kfree(txmsg);2992return ret < 0 ? ret : changed;2993}29942995static void2996drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,2997struct drm_dp_mst_branch *mstb)2998{2999struct drm_dp_sideband_msg_tx *txmsg;3000int ret;30013002txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);3003if (!txmsg)3004return;30053006txmsg->dst = mstb;3007build_clear_payload_id_table(txmsg);30083009drm_dp_queue_down_tx(mgr, txmsg);30103011ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);3012if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)3013drm_dbg_kms(mgr->dev, "clear payload table id nak received\n");30143015kfree(txmsg);3016}30173018static int3019drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,3020struct drm_dp_mst_branch *mstb,3021struct drm_dp_mst_port *port)3022{3023struct drm_dp_enum_path_resources_ack_reply *path_res;3024struct drm_dp_sideband_msg_tx *txmsg;3025int ret;30263027txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);3028if (!txmsg)3029return -ENOMEM;30303031txmsg->dst = mstb;3032build_enum_path_resources(txmsg, port->port_num);30333034drm_dp_queue_down_tx(mgr, txmsg);30353036ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);3037if (ret > 0) {3038ret = 0;3039path_res = &txmsg->reply.u.path_resources;30403041if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {3042drm_dbg_kms(mgr->dev, "enum path resources nak received\n");3043} else {3044if (port->port_num != path_res->port_number)3045DRM_ERROR("got incorrect port in response\n");30463047drm_dbg_kms(mgr->dev, "enum path resources %d: %d %d\n",3048path_res->port_number,3049path_res->full_payload_bw_number,3050path_res->avail_payload_bw_number);30513052/*3053* If something changed, make sure we send a3054* hotplug3055*/3056if (port->full_pbn != path_res->full_payload_bw_number ||3057port->fec_capable != path_res->fec_capable)3058ret = 1;30593060port->full_pbn = path_res->full_payload_bw_number;3061port->fec_capable = path_res->fec_capable;3062}3063}30643065kfree(txmsg);3066return ret;3067}30683069static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)3070{3071if (!mstb->port_parent)3072return NULL;30733074if (mstb->port_parent->mstb != mstb)3075return mstb->port_parent;30763077return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);3078}30793080/*3081* Searches upwards in the topology starting from mstb to try to find the3082* closest available parent of mstb that's still connected to the rest of the3083* topology. This can be used in order to perform operations like releasing3084* payloads, where the branch device which owned the payload may no longer be3085* around and thus would require that the payload on the last living relative3086* be freed instead.3087*/3088static struct drm_dp_mst_branch *3089drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,3090struct drm_dp_mst_branch *mstb,3091int *port_num)3092{3093struct drm_dp_mst_branch *rmstb = NULL;3094struct drm_dp_mst_port *found_port;30953096mutex_lock(&mgr->lock);3097if (!mgr->mst_primary)3098goto out;30993100do {3101found_port = drm_dp_get_last_connected_port_to_mstb(mstb);3102if (!found_port)3103break;31043105if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {3106rmstb = found_port->parent;3107*port_num = found_port->port_num;3108} else {3109/* Search again, starting from this parent */3110mstb = found_port->parent;3111}3112} while (!rmstb);3113out:3114mutex_unlock(&mgr->lock);3115return rmstb;3116}31173118static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,3119struct drm_dp_mst_port *port,3120int id,3121int pbn)3122{3123struct drm_dp_sideband_msg_tx *txmsg;3124struct drm_dp_mst_branch *mstb;3125int ret, port_num;3126u8 sinks[DRM_DP_MAX_SDP_STREAMS];3127int i;31283129port_num = port->port_num;3130mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);3131if (!mstb) {3132mstb = drm_dp_get_last_connected_port_and_mstb(mgr,3133port->parent,3134&port_num);31353136if (!mstb)3137return -EINVAL;3138}31393140txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);3141if (!txmsg) {3142ret = -ENOMEM;3143goto fail_put;3144}31453146for (i = 0; i < port->num_sdp_streams; i++)3147sinks[i] = i;31483149txmsg->dst = mstb;3150build_allocate_payload(txmsg, port_num,3151id,3152pbn, port->num_sdp_streams, sinks);31533154drm_dp_queue_down_tx(mgr, txmsg);31553156/*3157* FIXME: there is a small chance that between getting the last3158* connected mstb and sending the payload message, the last connected3159* mstb could also be removed from the topology. In the future, this3160* needs to be fixed by restarting the3161* drm_dp_get_last_connected_port_and_mstb() search in the event of a3162* timeout if the topology is still connected to the system.3163*/3164ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);3165if (ret > 0) {3166if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)3167ret = -EINVAL;3168else3169ret = 0;3170}3171kfree(txmsg);3172fail_put:3173drm_dp_mst_topology_put_mstb(mstb);3174return ret;3175}31763177int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,3178struct drm_dp_mst_port *port, bool power_up)3179{3180struct drm_dp_sideband_msg_tx *txmsg;3181int ret;31823183port = drm_dp_mst_topology_get_port_validated(mgr, port);3184if (!port)3185return -EINVAL;31863187txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);3188if (!txmsg) {3189drm_dp_mst_topology_put_port(port);3190return -ENOMEM;3191}31923193txmsg->dst = port->parent;3194build_power_updown_phy(txmsg, port->port_num, power_up);3195drm_dp_queue_down_tx(mgr, txmsg);31963197ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);3198if (ret > 0) {3199if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)3200ret = -EINVAL;3201else3202ret = 0;3203}3204kfree(txmsg);3205drm_dp_mst_topology_put_port(port);32063207return ret;3208}3209EXPORT_SYMBOL(drm_dp_send_power_updown_phy);32103211int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,3212struct drm_dp_mst_port *port,3213struct drm_dp_query_stream_enc_status_ack_reply *status)3214{3215struct drm_dp_mst_topology_state *state;3216struct drm_dp_mst_atomic_payload *payload;3217struct drm_dp_sideband_msg_tx *txmsg;3218u8 nonce[7];3219int ret;32203221txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);3222if (!txmsg)3223return -ENOMEM;32243225port = drm_dp_mst_topology_get_port_validated(mgr, port);3226if (!port) {3227ret = -EINVAL;3228goto out_get_port;3229}32303231get_random_bytes(nonce, sizeof(nonce));32323233drm_modeset_lock(&mgr->base.lock, NULL);3234state = to_drm_dp_mst_topology_state(mgr->base.state);3235payload = drm_atomic_get_mst_payload_state(state, port);32363237/*3238* "Source device targets the QUERY_STREAM_ENCRYPTION_STATUS message3239* transaction at the MST Branch device directly connected to the3240* Source"3241*/3242txmsg->dst = mgr->mst_primary;32433244build_query_stream_enc_status(txmsg, payload->vcpi, nonce);32453246drm_dp_queue_down_tx(mgr, txmsg);32473248ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg);3249if (ret < 0) {3250goto out;3251} else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {3252drm_dbg_kms(mgr->dev, "query encryption status nak received\n");3253ret = -ENXIO;3254goto out;3255}32563257ret = 0;3258memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status));32593260out:3261drm_modeset_unlock(&mgr->base.lock);3262drm_dp_mst_topology_put_port(port);3263out_get_port:3264kfree(txmsg);3265return ret;3266}3267EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);32683269static int drm_dp_create_payload_at_dfp(struct drm_dp_mst_topology_mgr *mgr,3270struct drm_dp_mst_atomic_payload *payload)3271{3272return drm_dp_dpcd_write_payload(mgr->aux, payload->vcpi, payload->vc_start_slot,3273payload->time_slots);3274}32753276static int drm_dp_create_payload_to_remote(struct drm_dp_mst_topology_mgr *mgr,3277struct drm_dp_mst_atomic_payload *payload)3278{3279int ret;3280struct drm_dp_mst_port *port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);32813282if (!port)3283return -EIO;32843285ret = drm_dp_payload_send_msg(mgr, port, payload->vcpi, payload->pbn);3286drm_dp_mst_topology_put_port(port);3287return ret;3288}32893290static void drm_dp_destroy_payload_at_remote_and_dfp(struct drm_dp_mst_topology_mgr *mgr,3291struct drm_dp_mst_topology_state *mst_state,3292struct drm_dp_mst_atomic_payload *payload)3293{3294drm_dbg_kms(mgr->dev, "\n");32953296/* it's okay for these to fail */3297if (payload->payload_allocation_status == DRM_DP_MST_PAYLOAD_ALLOCATION_REMOTE) {3298drm_dp_payload_send_msg(mgr, payload->port, payload->vcpi, 0);3299payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_DFP;3300}33013302if (payload->payload_allocation_status == DRM_DP_MST_PAYLOAD_ALLOCATION_DFP)3303drm_dp_dpcd_write_payload(mgr->aux, payload->vcpi, payload->vc_start_slot, 0);3304}33053306/**3307* drm_dp_add_payload_part1() - Execute payload update part 13308* @mgr: Manager to use.3309* @mst_state: The MST atomic state3310* @payload: The payload to write3311*3312* Determines the starting time slot for the given payload, and programs the VCPI for this payload3313* into the DPCD of DPRX. After calling this, the driver should generate ACT and payload packets.3314*3315* Returns: 0 on success, error code on failure.3316*/3317int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,3318struct drm_dp_mst_topology_state *mst_state,3319struct drm_dp_mst_atomic_payload *payload)3320{3321struct drm_dp_mst_port *port;3322int ret;33233324/* Update mst mgr info */3325if (mgr->payload_count == 0)3326mgr->next_start_slot = mst_state->start_slot;33273328payload->vc_start_slot = mgr->next_start_slot;33293330mgr->payload_count++;3331mgr->next_start_slot += payload->time_slots;33323333payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL;33343335/* Allocate payload to immediate downstream facing port */3336port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);3337if (!port) {3338drm_dbg_kms(mgr->dev,3339"VCPI %d for port %p not in topology, not creating a payload to remote\n",3340payload->vcpi, payload->port);3341return -EIO;3342}33433344ret = drm_dp_create_payload_at_dfp(mgr, payload);3345if (ret < 0) {3346drm_dbg_kms(mgr->dev, "Failed to create MST payload for port %p: %d\n",3347payload->port, ret);3348goto put_port;3349}33503351payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_DFP;33523353put_port:3354drm_dp_mst_topology_put_port(port);33553356return ret;3357}3358EXPORT_SYMBOL(drm_dp_add_payload_part1);33593360/**3361* drm_dp_remove_payload_part1() - Remove an MST payload along the virtual channel3362* @mgr: Manager to use.3363* @mst_state: The MST atomic state3364* @payload: The payload to remove3365*3366* Removes a payload along the virtual channel if it was successfully allocated.3367* After calling this, the driver should set HW to generate ACT and then switch to new3368* payload allocation state.3369*/3370void drm_dp_remove_payload_part1(struct drm_dp_mst_topology_mgr *mgr,3371struct drm_dp_mst_topology_state *mst_state,3372struct drm_dp_mst_atomic_payload *payload)3373{3374/* Remove remote payload allocation */3375bool send_remove = false;33763377mutex_lock(&mgr->lock);3378send_remove = drm_dp_mst_port_downstream_of_branch(payload->port, mgr->mst_primary);3379mutex_unlock(&mgr->lock);33803381if (send_remove)3382drm_dp_destroy_payload_at_remote_and_dfp(mgr, mst_state, payload);3383else3384drm_dbg_kms(mgr->dev, "Payload for VCPI %d not in topology, not sending remove\n",3385payload->vcpi);33863387payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL;3388}3389EXPORT_SYMBOL(drm_dp_remove_payload_part1);33903391/**3392* drm_dp_remove_payload_part2() - Remove an MST payload locally3393* @mgr: Manager to use.3394* @mst_state: The MST atomic state3395* @old_payload: The payload with its old state3396* @new_payload: The payload with its latest state3397*3398* Updates the starting time slots of all other payloads which would have been shifted towards3399* the start of the payload ID table as a result of removing a payload. Driver should call this3400* function whenever it removes a payload in its HW. It's independent to the result of payload3401* allocation/deallocation at branch devices along the virtual channel.3402*/3403void drm_dp_remove_payload_part2(struct drm_dp_mst_topology_mgr *mgr,3404struct drm_dp_mst_topology_state *mst_state,3405const struct drm_dp_mst_atomic_payload *old_payload,3406struct drm_dp_mst_atomic_payload *new_payload)3407{3408struct drm_dp_mst_atomic_payload *pos;34093410/* Remove local payload allocation */3411list_for_each_entry(pos, &mst_state->payloads, next) {3412if (pos != new_payload && pos->vc_start_slot > new_payload->vc_start_slot)3413pos->vc_start_slot -= old_payload->time_slots;3414}3415new_payload->vc_start_slot = -1;34163417mgr->payload_count--;3418mgr->next_start_slot -= old_payload->time_slots;34193420if (new_payload->delete)3421drm_dp_mst_put_port_malloc(new_payload->port);34223423new_payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_NONE;3424}3425EXPORT_SYMBOL(drm_dp_remove_payload_part2);3426/**3427* drm_dp_add_payload_part2() - Execute payload update part 23428* @mgr: Manager to use.3429* @payload: The payload to update3430*3431* If @payload was successfully assigned a starting time slot by drm_dp_add_payload_part1(), this3432* function will send the sideband messages to finish allocating this payload.3433*3434* Returns: 0 on success, negative error code on failure.3435*/3436int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,3437struct drm_dp_mst_atomic_payload *payload)3438{3439int ret = 0;34403441/* Skip failed payloads */3442if (payload->payload_allocation_status != DRM_DP_MST_PAYLOAD_ALLOCATION_DFP) {3443drm_dbg_kms(mgr->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",3444payload->port->connector->name);3445return -EIO;3446}34473448/* Allocate payload to remote end */3449ret = drm_dp_create_payload_to_remote(mgr, payload);3450if (ret < 0)3451drm_err(mgr->dev, "Step 2 of creating MST payload for %p failed: %d\n",3452payload->port, ret);3453else3454payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_REMOTE;34553456return ret;3457}3458EXPORT_SYMBOL(drm_dp_add_payload_part2);34593460static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,3461struct drm_dp_mst_port *port,3462int offset, int size, u8 *bytes)3463{3464int ret = 0;3465struct drm_dp_sideband_msg_tx *txmsg;3466struct drm_dp_mst_branch *mstb;34673468mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);3469if (!mstb)3470return -EINVAL;34713472txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);3473if (!txmsg) {3474ret = -ENOMEM;3475goto fail_put;3476}34773478build_dpcd_read(txmsg, port->port_num, offset, size);3479txmsg->dst = port->parent;34803481drm_dp_queue_down_tx(mgr, txmsg);34823483ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);3484if (ret < 0)3485goto fail_free;34863487if (txmsg->reply.reply_type == 1) {3488drm_dbg_kms(mgr->dev, "mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",3489mstb, port->port_num, offset, size);3490ret = -EIO;3491goto fail_free;3492}34933494if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {3495ret = -EPROTO;3496goto fail_free;3497}34983499ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,3500size);3501memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);35023503fail_free:3504kfree(txmsg);3505fail_put:3506drm_dp_mst_topology_put_mstb(mstb);35073508return ret;3509}35103511static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,3512struct drm_dp_mst_port *port,3513int offset, int size, u8 *bytes)3514{3515int ret;3516struct drm_dp_sideband_msg_tx *txmsg;3517struct drm_dp_mst_branch *mstb;35183519mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);3520if (!mstb)3521return -EINVAL;35223523txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);3524if (!txmsg) {3525ret = -ENOMEM;3526goto fail_put;3527}35283529build_dpcd_write(txmsg, port->port_num, offset, size, bytes);3530txmsg->dst = mstb;35313532drm_dp_queue_down_tx(mgr, txmsg);35333534ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);3535if (ret > 0) {3536if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)3537ret = -EIO;3538else3539ret = size;3540}35413542kfree(txmsg);3543fail_put:3544drm_dp_mst_topology_put_mstb(mstb);3545return ret;3546}35473548static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)3549{3550struct drm_dp_sideband_msg_reply_body reply;35513552reply.reply_type = DP_SIDEBAND_REPLY_ACK;3553reply.req_type = req_type;3554drm_dp_encode_sideband_reply(&reply, msg);3555return 0;3556}35573558static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,3559struct drm_dp_mst_branch *mstb,3560int req_type, bool broadcast)3561{3562struct drm_dp_sideband_msg_tx *txmsg;35633564txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);3565if (!txmsg)3566return -ENOMEM;35673568txmsg->dst = mstb;3569drm_dp_encode_up_ack_reply(txmsg, req_type);35703571mutex_lock(&mgr->qlock);3572/* construct a chunk from the first msg in the tx_msg queue */3573process_single_tx_qlock(mgr, txmsg, true);3574mutex_unlock(&mgr->qlock);35753576kfree(txmsg);3577return 0;3578}35793580/**3581* drm_dp_get_vc_payload_bw - get the VC payload BW for an MTP link3582* @link_rate: link rate in 10kbits/s units3583* @link_lane_count: lane count3584*3585* Calculate the total bandwidth of a MultiStream Transport link. The returned3586* value is in units of PBNs/(timeslots/1 MTP). This value can be used to3587* convert the number of PBNs required for a given stream to the number of3588* timeslots this stream requires in each MTP.3589*3590* Returns the BW / timeslot value in 20.12 fixed point format.3591*/3592fixed20_12 drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count)3593{3594int ch_coding_efficiency =3595drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(link_rate));3596fixed20_12 ret;35973598/* See DP v2.0 2.6.4.2, 2.7.6.3 VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */3599ret.full = DIV_ROUND_DOWN_ULL(mul_u32_u32(link_rate * link_lane_count,3600ch_coding_efficiency),3601(1000000ULL * 8 * 5400) >> 12);36023603return ret;3604}3605EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);36063607/**3608* drm_dp_read_mst_cap() - Read the sink's MST mode capability3609* @aux: The DP AUX channel to use3610* @dpcd: A cached copy of the DPCD capabilities for this sink3611*3612* Returns: enum drm_dp_mst_mode to indicate MST mode capability3613*/3614enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux,3615const u8 dpcd[DP_RECEIVER_CAP_SIZE])3616{3617u8 mstm_cap;36183619if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)3620return DRM_DP_SST;36213622if (drm_dp_dpcd_read_byte(aux, DP_MSTM_CAP, &mstm_cap) < 0)3623return DRM_DP_SST;36243625if (mstm_cap & DP_MST_CAP)3626return DRM_DP_MST;36273628if (mstm_cap & DP_SINGLE_STREAM_SIDEBAND_MSG)3629return DRM_DP_SST_SIDEBAND_MSG;36303631return DRM_DP_SST;3632}3633EXPORT_SYMBOL(drm_dp_read_mst_cap);36343635/**3636* drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager3637* @mgr: manager to set state for3638* @mst_state: true to enable MST on this connector - false to disable.3639*3640* This is called by the driver when it detects an MST capable device plugged3641* into a DP MST capable port, or when a DP MST capable device is unplugged.3642*/3643int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)3644{3645int ret = 0;3646struct drm_dp_mst_branch *mstb = NULL;36473648mutex_lock(&mgr->lock);3649if (mst_state == mgr->mst_state)3650goto out_unlock;36513652mgr->mst_state = mst_state;3653/* set the device into MST mode */3654if (mst_state) {3655WARN_ON(mgr->mst_primary);36563657/* get dpcd info */3658ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd);3659if (ret < 0) {3660drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n",3661mgr->aux->name, ret);3662goto out_unlock;3663}36643665/* add initial branch device at LCT 1 */3666mstb = drm_dp_add_mst_branch_device(1, NULL);3667if (mstb == NULL) {3668ret = -ENOMEM;3669goto out_unlock;3670}3671mstb->mgr = mgr;36723673/* give this the main reference */3674mgr->mst_primary = mstb;3675drm_dp_mst_topology_get_mstb(mgr->mst_primary);36763677ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,3678DP_MST_EN |3679DP_UP_REQ_EN |3680DP_UPSTREAM_IS_SRC);3681if (ret < 0)3682goto out_unlock;36833684/* Write reset payload */3685drm_dp_dpcd_clear_payload(mgr->aux);36863687drm_dp_mst_queue_probe_work(mgr);36883689ret = 0;3690} else {3691/* disable MST on the device */3692mstb = mgr->mst_primary;3693mgr->mst_primary = NULL;3694/* this can fail if the device is gone */3695drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL, 0);3696ret = 0;3697mgr->payload_id_table_cleared = false;36983699mgr->reset_rx_state = true;3700}37013702out_unlock:3703mutex_unlock(&mgr->lock);3704if (mstb)3705drm_dp_mst_topology_put_mstb(mstb);3706return ret;37073708}3709EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);37103711static void3712drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)3713{3714struct drm_dp_mst_port *port;37153716/* The link address will need to be re-sent on resume */3717mstb->link_address_sent = false;37183719list_for_each_entry(port, &mstb->ports, next)3720if (port->mstb)3721drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);3722}37233724/**3725* drm_dp_mst_topology_queue_probe - Queue a topology probe3726* @mgr: manager to probe3727*3728* Queue a work to probe the MST topology. Driver's should call this only to3729* sync the topology's HW->SW state after the MST link's parameters have3730* changed in a way the state could've become out-of-sync. This is the case3731* for instance when the link rate between the source and first downstream3732* branch device has switched between UHBR and non-UHBR rates. Except of those3733* cases - for instance when a sink gets plugged/unplugged to a port - the SW3734* state will get updated automatically via MST UP message notifications.3735*/3736void drm_dp_mst_topology_queue_probe(struct drm_dp_mst_topology_mgr *mgr)3737{3738mutex_lock(&mgr->lock);37393740if (drm_WARN_ON(mgr->dev, !mgr->mst_state || !mgr->mst_primary))3741goto out_unlock;37423743drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);3744drm_dp_mst_queue_probe_work(mgr);37453746out_unlock:3747mutex_unlock(&mgr->lock);3748}3749EXPORT_SYMBOL(drm_dp_mst_topology_queue_probe);37503751/**3752* drm_dp_mst_topology_mgr_suspend() - suspend the MST manager3753* @mgr: manager to suspend3754*3755* This function tells the MST device that we can't handle UP messages3756* anymore. This should stop it from sending any since we are suspended.3757*/3758void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)3759{3760mutex_lock(&mgr->lock);3761drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,3762DP_MST_EN | DP_UPSTREAM_IS_SRC);3763mutex_unlock(&mgr->lock);3764flush_work(&mgr->up_req_work);3765flush_work(&mgr->work);3766flush_work(&mgr->delayed_destroy_work);37673768mutex_lock(&mgr->lock);3769if (mgr->mst_state && mgr->mst_primary)3770drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);3771mutex_unlock(&mgr->lock);3772}3773EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);37743775/**3776* drm_dp_mst_topology_mgr_resume() - resume the MST manager3777* @mgr: manager to resume3778* @sync: whether or not to perform topology reprobing synchronously3779*3780* This will fetch DPCD and see if the device is still there,3781* if it is, it will rewrite the MSTM control bits, and return.3782*3783* If the device fails this returns -1, and the driver should do3784* a full MST reprobe, in case we were undocked.3785*3786* During system resume (where it is assumed that the driver will be calling3787* drm_atomic_helper_resume()) this function should be called beforehand with3788* @sync set to true. In contexts like runtime resume where the driver is not3789* expected to be calling drm_atomic_helper_resume(), this function should be3790* called with @sync set to false in order to avoid deadlocking.3791*3792* Returns: -1 if the MST topology was removed while we were suspended, 03793* otherwise.3794*/3795int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,3796bool sync)3797{3798u8 buf[UUID_SIZE];3799guid_t guid;3800int ret;38013802mutex_lock(&mgr->lock);3803if (!mgr->mst_primary)3804goto out_fail;38053806if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {3807drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");3808goto out_fail;3809}38103811ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,3812DP_MST_EN |3813DP_UP_REQ_EN |3814DP_UPSTREAM_IS_SRC);3815if (ret < 0) {3816drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");3817goto out_fail;3818}38193820/* Some hubs forget their guids after they resume */3821ret = drm_dp_dpcd_read_data(mgr->aux, DP_GUID, buf, sizeof(buf));3822if (ret < 0) {3823drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");3824goto out_fail;3825}38263827import_guid(&guid, buf);38283829ret = drm_dp_check_mstb_guid(mgr->mst_primary, &guid);3830if (ret) {3831drm_dbg_kms(mgr->dev, "check mstb failed - undocked during suspend?\n");3832goto out_fail;3833}38343835/*3836* For the final step of resuming the topology, we need to bring the3837* state of our in-memory topology back into sync with reality. So,3838* restart the probing process as if we're probing a new hub3839*/3840drm_dp_mst_queue_probe_work(mgr);3841mutex_unlock(&mgr->lock);38423843if (sync) {3844drm_dbg_kms(mgr->dev,3845"Waiting for link probe work to finish re-syncing topology...\n");3846flush_work(&mgr->work);3847}38483849return 0;38503851out_fail:3852mutex_unlock(&mgr->lock);3853return -1;3854}3855EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);38563857static void reset_msg_rx_state(struct drm_dp_sideband_msg_rx *msg)3858{3859memset(msg, 0, sizeof(*msg));3860}38613862static bool3863drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,3864struct drm_dp_mst_branch **mstb)3865{3866int len;3867u8 replyblock[32];3868int replylen, curreply;3869int ret;3870u8 hdrlen;3871struct drm_dp_sideband_msg_hdr hdr;3872struct drm_dp_sideband_msg_rx *msg =3873up ? &mgr->up_req_recv : &mgr->down_rep_recv;3874int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE :3875DP_SIDEBAND_MSG_DOWN_REP_BASE;38763877if (!up)3878*mstb = NULL;38793880len = min(mgr->max_dpcd_transaction_bytes, 16);3881ret = drm_dp_dpcd_read_data(mgr->aux, basereg, replyblock, len);3882if (ret < 0) {3883drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret);3884return false;3885}38863887ret = drm_dp_decode_sideband_msg_hdr(mgr, &hdr, replyblock, len, &hdrlen);3888if (ret == false) {3889print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16,38901, replyblock, len, false);3891drm_dbg_kms(mgr->dev, "ERROR: failed header\n");3892return false;3893}38943895if (!up) {3896/* Caller is responsible for giving back this reference */3897*mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad);3898if (!*mstb) {3899drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr.lct);3900return false;3901}3902}39033904if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) {3905drm_dbg_kms(mgr->dev, "sideband msg set header failed %d\n", replyblock[0]);3906return false;3907}39083909replylen = min(msg->curchunk_len, (u8)(len - hdrlen));3910ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen);3911if (!ret) {3912drm_dbg_kms(mgr->dev, "sideband msg build failed %d\n", replyblock[0]);3913return false;3914}39153916replylen = msg->curchunk_len + msg->curchunk_hdrlen - len;3917curreply = len;3918while (replylen > 0) {3919len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);3920ret = drm_dp_dpcd_read_data(mgr->aux, basereg + curreply,3921replyblock, len);3922if (ret < 0) {3923drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n",3924len, ret);3925return false;3926}39273928ret = drm_dp_sideband_append_payload(msg, replyblock, len);3929if (!ret) {3930drm_dbg_kms(mgr->dev, "failed to build sideband msg\n");3931return false;3932}39333934curreply += len;3935replylen -= len;3936}3937return true;3938}39393940static int get_msg_request_type(u8 data)3941{3942return data & 0x7f;3943}39443945static bool verify_rx_request_type(struct drm_dp_mst_topology_mgr *mgr,3946const struct drm_dp_sideband_msg_tx *txmsg,3947const struct drm_dp_sideband_msg_rx *rxmsg)3948{3949const struct drm_dp_sideband_msg_hdr *hdr = &rxmsg->initial_hdr;3950const struct drm_dp_mst_branch *mstb = txmsg->dst;3951int tx_req_type = get_msg_request_type(txmsg->msg[0]);3952int rx_req_type = get_msg_request_type(rxmsg->msg[0]);3953char rad_str[64];39543955if (tx_req_type == rx_req_type)3956return true;39573958drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, rad_str, sizeof(rad_str));3959drm_dbg_kms(mgr->dev,3960"Got unexpected MST reply, mstb: %p seqno: %d lct: %d rad: %s rx_req_type: %s (%02x) != tx_req_type: %s (%02x)\n",3961mstb, hdr->seqno, mstb->lct, rad_str,3962drm_dp_mst_req_type_str(rx_req_type), rx_req_type,3963drm_dp_mst_req_type_str(tx_req_type), tx_req_type);39643965return false;3966}39673968static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)3969{3970struct drm_dp_sideband_msg_tx *txmsg;3971struct drm_dp_mst_branch *mstb = NULL;3972struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;39733974if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))3975goto out_clear_reply;39763977/* Multi-packet message transmission, don't clear the reply */3978if (!msg->have_eomt)3979goto out;39803981/* find the message */3982mutex_lock(&mgr->qlock);39833984txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,3985struct drm_dp_sideband_msg_tx, next);39863987/* Were we actually expecting a response, and from this mstb? */3988if (!txmsg || txmsg->dst != mstb) {3989struct drm_dp_sideband_msg_hdr *hdr;39903991hdr = &msg->initial_hdr;3992drm_dbg_kms(mgr->dev, "Got MST reply with no msg %p %d %d %02x %02x\n",3993mstb, hdr->seqno, hdr->lct, hdr->rad[0], msg->msg[0]);39943995mutex_unlock(&mgr->qlock);39963997goto out_clear_reply;3998}39994000if (!verify_rx_request_type(mgr, txmsg, msg)) {4001mutex_unlock(&mgr->qlock);40024003goto out_clear_reply;4004}40054006drm_dp_sideband_parse_reply(mgr, msg, &txmsg->reply);40074008if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {4009drm_dbg_kms(mgr->dev,4010"Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",4011txmsg->reply.req_type,4012drm_dp_mst_req_type_str(txmsg->reply.req_type),4013txmsg->reply.u.nak.reason,4014drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),4015txmsg->reply.u.nak.nak_data);4016}40174018txmsg->state = DRM_DP_SIDEBAND_TX_RX;4019list_del(&txmsg->next);40204021mutex_unlock(&mgr->qlock);40224023wake_up_all(&mgr->tx_waitq);40244025out_clear_reply:4026reset_msg_rx_state(msg);4027out:4028if (mstb)4029drm_dp_mst_topology_put_mstb(mstb);40304031return 0;4032}40334034static bool primary_mstb_probing_is_done(struct drm_dp_mst_topology_mgr *mgr)4035{4036bool probing_done = false;40374038mutex_lock(&mgr->lock);40394040if (mgr->mst_primary && drm_dp_mst_topology_try_get_mstb(mgr->mst_primary)) {4041probing_done = mgr->mst_primary->link_address_sent;4042drm_dp_mst_topology_put_mstb(mgr->mst_primary);4043}40444045mutex_unlock(&mgr->lock);40464047return probing_done;4048}40494050static inline bool4051drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,4052struct drm_dp_pending_up_req *up_req)4053{4054struct drm_dp_mst_branch *mstb = NULL;4055struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;4056struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;4057bool hotplug = false, dowork = false;40584059if (hdr->broadcast) {4060const guid_t *guid = NULL;40614062if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)4063guid = &msg->u.conn_stat.guid;4064else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)4065guid = &msg->u.resource_stat.guid;40664067if (guid)4068mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);4069} else {4070mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);4071}40724073if (!mstb) {4074drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr->lct);4075return false;4076}40774078/* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */4079if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {4080if (!primary_mstb_probing_is_done(mgr)) {4081drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.\n");4082} else {4083dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);4084hotplug = true;4085}4086}40874088drm_dp_mst_topology_put_mstb(mstb);40894090if (dowork)4091queue_work(system_long_wq, &mgr->work);4092return hotplug;4093}40944095static void drm_dp_mst_up_req_work(struct work_struct *work)4096{4097struct drm_dp_mst_topology_mgr *mgr =4098container_of(work, struct drm_dp_mst_topology_mgr,4099up_req_work);4100struct drm_dp_pending_up_req *up_req;4101bool send_hotplug = false;41024103mutex_lock(&mgr->probe_lock);4104while (true) {4105mutex_lock(&mgr->up_req_lock);4106up_req = list_first_entry_or_null(&mgr->up_req_list,4107struct drm_dp_pending_up_req,4108next);4109if (up_req)4110list_del(&up_req->next);4111mutex_unlock(&mgr->up_req_lock);41124113if (!up_req)4114break;41154116send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);4117kfree(up_req);4118}4119mutex_unlock(&mgr->probe_lock);41204121if (send_hotplug)4122drm_kms_helper_hotplug_event(mgr->dev);4123}41244125static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)4126{4127struct drm_dp_pending_up_req *up_req;4128struct drm_dp_mst_branch *mst_primary;4129int ret = 0;41304131if (!drm_dp_get_one_sb_msg(mgr, true, NULL))4132goto out_clear_reply;41334134if (!mgr->up_req_recv.have_eomt)4135return 0;41364137up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);4138if (!up_req) {4139ret = -ENOMEM;4140goto out_clear_reply;4141}41424143INIT_LIST_HEAD(&up_req->next);41444145drm_dp_sideband_parse_req(mgr, &mgr->up_req_recv, &up_req->msg);41464147if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&4148up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {4149drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n",4150up_req->msg.req_type);4151kfree(up_req);4152goto out_clear_reply;4153}41544155mutex_lock(&mgr->lock);4156mst_primary = mgr->mst_primary;4157if (!mst_primary || !drm_dp_mst_topology_try_get_mstb(mst_primary)) {4158mutex_unlock(&mgr->lock);4159kfree(up_req);4160goto out_clear_reply;4161}4162mutex_unlock(&mgr->lock);41634164drm_dp_send_up_ack_reply(mgr, mst_primary, up_req->msg.req_type,4165false);41664167drm_dp_mst_topology_put_mstb(mst_primary);41684169if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {4170const struct drm_dp_connection_status_notify *conn_stat =4171&up_req->msg.u.conn_stat;41724173drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",4174conn_stat->port_number,4175conn_stat->legacy_device_plug_status,4176conn_stat->displayport_device_plug_status,4177conn_stat->message_capability_status,4178conn_stat->input_port,4179conn_stat->peer_device_type);4180} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {4181const struct drm_dp_resource_status_notify *res_stat =4182&up_req->msg.u.resource_stat;41834184drm_dbg_kms(mgr->dev, "Got RSN: pn: %d avail_pbn %d\n",4185res_stat->port_number,4186res_stat->available_pbn);4187}41884189up_req->hdr = mgr->up_req_recv.initial_hdr;4190mutex_lock(&mgr->up_req_lock);4191list_add_tail(&up_req->next, &mgr->up_req_list);4192mutex_unlock(&mgr->up_req_lock);4193queue_work(system_long_wq, &mgr->up_req_work);4194out_clear_reply:4195reset_msg_rx_state(&mgr->up_req_recv);4196return ret;4197}41984199static void update_msg_rx_state(struct drm_dp_mst_topology_mgr *mgr)4200{4201mutex_lock(&mgr->lock);4202if (mgr->reset_rx_state) {4203mgr->reset_rx_state = false;4204reset_msg_rx_state(&mgr->down_rep_recv);4205reset_msg_rx_state(&mgr->up_req_recv);4206}4207mutex_unlock(&mgr->lock);4208}42094210/**4211* drm_dp_mst_hpd_irq_handle_event() - MST hotplug IRQ handle MST event4212* @mgr: manager to notify irq for.4213* @esi: 4 bytes from SINK_COUNT_ESI4214* @ack: 4 bytes used to ack events starting from SINK_COUNT_ESI4215* @handled: whether the hpd interrupt was consumed or not4216*4217* This should be called from the driver when it detects a HPD IRQ,4218* along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The4219* topology manager will process the sideband messages received4220* as indicated in the DEVICE_SERVICE_IRQ_VECTOR_ESI0 and set the4221* corresponding flags that Driver has to ack the DP receiver later.4222*4223* Note that driver shall also call4224* drm_dp_mst_hpd_irq_send_new_request() if the 'handled' is set4225* after calling this function, to try to kick off a new request in4226* the queue if the previous message transaction is completed.4227*4228* See also:4229* drm_dp_mst_hpd_irq_send_new_request()4230*/4231int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, const u8 *esi,4232u8 *ack, bool *handled)4233{4234int ret = 0;4235int sc;4236*handled = false;4237sc = DP_GET_SINK_COUNT(esi[0]);42384239if (sc != mgr->sink_count) {4240mgr->sink_count = sc;4241*handled = true;4242}42434244update_msg_rx_state(mgr);42454246if (esi[1] & DP_DOWN_REP_MSG_RDY) {4247ret = drm_dp_mst_handle_down_rep(mgr);4248*handled = true;4249ack[1] |= DP_DOWN_REP_MSG_RDY;4250}42514252if (esi[1] & DP_UP_REQ_MSG_RDY) {4253ret |= drm_dp_mst_handle_up_req(mgr);4254*handled = true;4255ack[1] |= DP_UP_REQ_MSG_RDY;4256}42574258return ret;4259}4260EXPORT_SYMBOL(drm_dp_mst_hpd_irq_handle_event);42614262/**4263* drm_dp_mst_hpd_irq_send_new_request() - MST hotplug IRQ kick off new request4264* @mgr: manager to notify irq for.4265*4266* This should be called from the driver when mst irq event is handled4267* and acked. Note that new down request should only be sent when4268* previous message transaction is completed. Source is not supposed to generate4269* interleaved message transactions.4270*/4271void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr)4272{4273struct drm_dp_sideband_msg_tx *txmsg;4274bool kick = true;42754276mutex_lock(&mgr->qlock);4277txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,4278struct drm_dp_sideband_msg_tx, next);4279/* If last transaction is not completed yet*/4280if (!txmsg ||4281txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||4282txmsg->state == DRM_DP_SIDEBAND_TX_SENT)4283kick = false;4284mutex_unlock(&mgr->qlock);42854286if (kick)4287drm_dp_mst_kick_tx(mgr);4288}4289EXPORT_SYMBOL(drm_dp_mst_hpd_irq_send_new_request);4290/**4291* drm_dp_mst_detect_port() - get connection status for an MST port4292* @connector: DRM connector for this port4293* @ctx: The acquisition context to use for grabbing locks4294* @mgr: manager for this port4295* @port: pointer to a port4296*4297* This returns the current connection state for a port.4298*/4299int4300drm_dp_mst_detect_port(struct drm_connector *connector,4301struct drm_modeset_acquire_ctx *ctx,4302struct drm_dp_mst_topology_mgr *mgr,4303struct drm_dp_mst_port *port)4304{4305int ret;43064307/* we need to search for the port in the mgr in case it's gone */4308port = drm_dp_mst_topology_get_port_validated(mgr, port);4309if (!port)4310return connector_status_disconnected;43114312ret = drm_modeset_lock(&mgr->base.lock, ctx);4313if (ret)4314goto out;43154316ret = connector_status_disconnected;43174318if (!port->ddps)4319goto out;43204321switch (port->pdt) {4322case DP_PEER_DEVICE_NONE:4323break;4324case DP_PEER_DEVICE_MST_BRANCHING:4325if (!port->mcs)4326ret = connector_status_connected;4327break;43284329case DP_PEER_DEVICE_SST_SINK:4330ret = connector_status_connected;4331/* for logical ports - cache the EDID */4332if (drm_dp_mst_port_is_logical(port) && !port->cached_edid)4333port->cached_edid = drm_edid_read_ddc(connector, &port->aux.ddc);4334break;4335case DP_PEER_DEVICE_DP_LEGACY_CONV:4336if (port->ldps)4337ret = connector_status_connected;4338break;4339}4340out:4341drm_dp_mst_topology_put_port(port);4342return ret;4343}4344EXPORT_SYMBOL(drm_dp_mst_detect_port);43454346/**4347* drm_dp_mst_edid_read() - get EDID for an MST port4348* @connector: toplevel connector to get EDID for4349* @mgr: manager for this port4350* @port: unverified pointer to a port.4351*4352* This returns an EDID for the port connected to a connector,4353* It validates the pointer still exists so the caller doesn't require a4354* reference.4355*/4356const struct drm_edid *drm_dp_mst_edid_read(struct drm_connector *connector,4357struct drm_dp_mst_topology_mgr *mgr,4358struct drm_dp_mst_port *port)4359{4360const struct drm_edid *drm_edid;43614362/* we need to search for the port in the mgr in case it's gone */4363port = drm_dp_mst_topology_get_port_validated(mgr, port);4364if (!port)4365return NULL;43664367if (port->cached_edid)4368drm_edid = drm_edid_dup(port->cached_edid);4369else4370drm_edid = drm_edid_read_ddc(connector, &port->aux.ddc);43714372drm_dp_mst_topology_put_port(port);43734374return drm_edid;4375}4376EXPORT_SYMBOL(drm_dp_mst_edid_read);43774378/**4379* drm_dp_mst_get_edid() - get EDID for an MST port4380* @connector: toplevel connector to get EDID for4381* @mgr: manager for this port4382* @port: unverified pointer to a port.4383*4384* This function is deprecated; please use drm_dp_mst_edid_read() instead.4385*4386* This returns an EDID for the port connected to a connector,4387* It validates the pointer still exists so the caller doesn't require a4388* reference.4389*/4390struct edid *drm_dp_mst_get_edid(struct drm_connector *connector,4391struct drm_dp_mst_topology_mgr *mgr,4392struct drm_dp_mst_port *port)4393{4394const struct drm_edid *drm_edid;4395struct edid *edid;43964397drm_edid = drm_dp_mst_edid_read(connector, mgr, port);43984399edid = drm_edid_duplicate(drm_edid_raw(drm_edid));44004401drm_edid_free(drm_edid);44024403return edid;4404}4405EXPORT_SYMBOL(drm_dp_mst_get_edid);44064407/**4408* drm_dp_atomic_find_time_slots() - Find and add time slots to the state4409* @state: global atomic state4410* @mgr: MST topology manager for the port4411* @port: port to find time slots for4412* @pbn: bandwidth required for the mode in PBN4413*4414* Allocates time slots to @port, replacing any previous time slot allocations it may4415* have had. Any atomic drivers which support MST must call this function in4416* their &drm_encoder_helper_funcs.atomic_check() callback unconditionally to4417* change the current time slot allocation for the new state, and ensure the MST4418* atomic state is added whenever the state of payloads in the topology changes.4419*4420* Allocations set by this function are not checked against the bandwidth4421* restraints of @mgr until the driver calls drm_dp_mst_atomic_check().4422*4423* Additionally, it is OK to call this function multiple times on the same4424* @port as needed. It is not OK however, to call this function and4425* drm_dp_atomic_release_time_slots() in the same atomic check phase.4426*4427* See also:4428* drm_dp_atomic_release_time_slots()4429* drm_dp_mst_atomic_check()4430*4431* Returns:4432* Total slots in the atomic state assigned for this port, or a negative error4433* code if the port no longer exists4434*/4435int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,4436struct drm_dp_mst_topology_mgr *mgr,4437struct drm_dp_mst_port *port, int pbn)4438{4439struct drm_dp_mst_topology_state *topology_state;4440struct drm_dp_mst_atomic_payload *payload = NULL;4441struct drm_connector_state *conn_state;4442int prev_slots = 0, prev_bw = 0, req_slots;44434444topology_state = drm_atomic_get_mst_topology_state(state, mgr);4445if (IS_ERR(topology_state))4446return PTR_ERR(topology_state);44474448conn_state = drm_atomic_get_new_connector_state(state, port->connector);4449topology_state->pending_crtc_mask |= drm_crtc_mask(conn_state->crtc);44504451/* Find the current allocation for this port, if any */4452payload = drm_atomic_get_mst_payload_state(topology_state, port);4453if (payload) {4454prev_slots = payload->time_slots;4455prev_bw = payload->pbn;44564457/*4458* This should never happen, unless the driver tries4459* releasing and allocating the same timeslot allocation,4460* which is an error4461*/4462if (drm_WARN_ON(mgr->dev, payload->delete)) {4463drm_err(mgr->dev,4464"cannot allocate and release time slots on [MST PORT:%p] in the same state\n",4465port);4466return -EINVAL;4467}4468}44694470req_slots = DIV_ROUND_UP(dfixed_const(pbn), topology_state->pbn_div.full);44714472drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] TU %d -> %d\n",4473port->connector->base.id, port->connector->name,4474port, prev_slots, req_slots);4475drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",4476port->connector->base.id, port->connector->name,4477port, prev_bw, pbn);44784479/* Add the new allocation to the state, note the VCPI isn't assigned until the end */4480if (!payload) {4481payload = kzalloc(sizeof(*payload), GFP_KERNEL);4482if (!payload)4483return -ENOMEM;44844485drm_dp_mst_get_port_malloc(port);4486payload->port = port;4487payload->vc_start_slot = -1;4488payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_NONE;4489list_add(&payload->next, &topology_state->payloads);4490}4491payload->time_slots = req_slots;4492payload->pbn = pbn;44934494return req_slots;4495}4496EXPORT_SYMBOL(drm_dp_atomic_find_time_slots);44974498/**4499* drm_dp_atomic_release_time_slots() - Release allocated time slots4500* @state: global atomic state4501* @mgr: MST topology manager for the port4502* @port: The port to release the time slots from4503*4504* Releases any time slots that have been allocated to a port in the atomic4505* state. Any atomic drivers which support MST must call this function4506* unconditionally in their &drm_connector_helper_funcs.atomic_check() callback.4507* This helper will check whether time slots would be released by the new state and4508* respond accordingly, along with ensuring the MST state is always added to the4509* atomic state whenever a new state would modify the state of payloads on the4510* topology.4511*4512* It is OK to call this even if @port has been removed from the system.4513* Additionally, it is OK to call this function multiple times on the same4514* @port as needed. It is not OK however, to call this function and4515* drm_dp_atomic_find_time_slots() on the same @port in a single atomic check4516* phase.4517*4518* See also:4519* drm_dp_atomic_find_time_slots()4520* drm_dp_mst_atomic_check()4521*4522* Returns:4523* 0 on success, negative error code otherwise4524*/4525int drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,4526struct drm_dp_mst_topology_mgr *mgr,4527struct drm_dp_mst_port *port)4528{4529struct drm_dp_mst_topology_state *topology_state;4530struct drm_dp_mst_atomic_payload *payload;4531struct drm_connector_state *old_conn_state, *new_conn_state;4532bool update_payload = true;45334534old_conn_state = drm_atomic_get_old_connector_state(state, port->connector);4535if (!old_conn_state->crtc)4536return 0;45374538/* If the CRTC isn't disabled by this state, don't release it's payload */4539new_conn_state = drm_atomic_get_new_connector_state(state, port->connector);4540if (new_conn_state->crtc) {4541struct drm_crtc_state *crtc_state =4542drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);45434544/* No modeset means no payload changes, so it's safe to not pull in the MST state */4545if (!crtc_state || !drm_atomic_crtc_needs_modeset(crtc_state))4546return 0;45474548if (!crtc_state->mode_changed && !crtc_state->connectors_changed)4549update_payload = false;4550}45514552topology_state = drm_atomic_get_mst_topology_state(state, mgr);4553if (IS_ERR(topology_state))4554return PTR_ERR(topology_state);45554556topology_state->pending_crtc_mask |= drm_crtc_mask(old_conn_state->crtc);4557if (!update_payload)4558return 0;45594560payload = drm_atomic_get_mst_payload_state(topology_state, port);4561if (WARN_ON(!payload)) {4562drm_err(mgr->dev, "No payload for [MST PORT:%p] found in mst state %p\n",4563port, &topology_state->base);4564return -EINVAL;4565}45664567if (new_conn_state->crtc)4568return 0;45694570drm_dbg_atomic(mgr->dev, "[MST PORT:%p] TU %d -> 0\n", port, payload->time_slots);4571if (!payload->delete) {4572payload->pbn = 0;4573payload->delete = true;4574topology_state->payload_mask &= ~BIT(payload->vcpi - 1);4575}45764577return 0;4578}4579EXPORT_SYMBOL(drm_dp_atomic_release_time_slots);45804581/**4582* drm_dp_mst_atomic_setup_commit() - setup_commit hook for MST helpers4583* @state: global atomic state4584*4585* This function saves all of the &drm_crtc_commit structs in an atomic state that touch any CRTCs4586* currently assigned to an MST topology. Drivers must call this hook from their4587* &drm_mode_config_helper_funcs.atomic_commit_setup hook.4588*4589* Returns:4590* 0 if all CRTC commits were retrieved successfully, negative error code otherwise4591*/4592int drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state)4593{4594struct drm_dp_mst_topology_mgr *mgr;4595struct drm_dp_mst_topology_state *mst_state;4596struct drm_crtc *crtc;4597struct drm_crtc_state *crtc_state;4598int i, j, commit_idx, num_commit_deps;45994600for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {4601if (!mst_state->pending_crtc_mask)4602continue;46034604num_commit_deps = hweight32(mst_state->pending_crtc_mask);4605mst_state->commit_deps = kmalloc_array(num_commit_deps,4606sizeof(*mst_state->commit_deps), GFP_KERNEL);4607if (!mst_state->commit_deps)4608return -ENOMEM;4609mst_state->num_commit_deps = num_commit_deps;46104611commit_idx = 0;4612for_each_new_crtc_in_state(state, crtc, crtc_state, j) {4613if (mst_state->pending_crtc_mask & drm_crtc_mask(crtc)) {4614mst_state->commit_deps[commit_idx++] =4615drm_crtc_commit_get(crtc_state->commit);4616}4617}4618}46194620return 0;4621}4622EXPORT_SYMBOL(drm_dp_mst_atomic_setup_commit);46234624/**4625* drm_dp_mst_atomic_wait_for_dependencies() - Wait for all pending commits on MST topologies,4626* prepare new MST state for commit4627* @state: global atomic state4628*4629* Goes through any MST topologies in this atomic state, and waits for any pending commits which4630* touched CRTCs that were/are on an MST topology to be programmed to hardware and flipped to before4631* returning. This is to prevent multiple non-blocking commits affecting an MST topology from racing4632* with eachother by forcing them to be executed sequentially in situations where the only resources4633* the modeset objects in these commits share are an MST topology.4634*4635* This function also prepares the new MST state for commit by performing some state preparation4636* which can't be done until this point, such as reading back the final VC start slots (which are4637* determined at commit-time) from the previous state.4638*4639* All MST drivers must call this function after calling drm_atomic_helper_wait_for_dependencies(),4640* or whatever their equivalent of that is.4641*/4642void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state)4643{4644struct drm_dp_mst_topology_state *old_mst_state, *new_mst_state;4645struct drm_dp_mst_topology_mgr *mgr;4646struct drm_dp_mst_atomic_payload *old_payload, *new_payload;4647int i, j, ret;46484649for_each_oldnew_mst_mgr_in_state(state, mgr, old_mst_state, new_mst_state, i) {4650for (j = 0; j < old_mst_state->num_commit_deps; j++) {4651ret = drm_crtc_commit_wait(old_mst_state->commit_deps[j]);4652if (ret < 0)4653drm_err(state->dev, "Failed to wait for %s: %d\n",4654old_mst_state->commit_deps[j]->crtc->name, ret);4655}46564657/* Now that previous state is committed, it's safe to copy over the start slot4658* and allocation status assignments4659*/4660list_for_each_entry(old_payload, &old_mst_state->payloads, next) {4661if (old_payload->delete)4662continue;46634664new_payload = drm_atomic_get_mst_payload_state(new_mst_state,4665old_payload->port);4666new_payload->vc_start_slot = old_payload->vc_start_slot;4667new_payload->payload_allocation_status =4668old_payload->payload_allocation_status;4669}4670}4671}4672EXPORT_SYMBOL(drm_dp_mst_atomic_wait_for_dependencies);46734674/**4675* drm_dp_mst_root_conn_atomic_check() - Serialize CRTC commits on MST-capable connectors operating4676* in SST mode4677* @new_conn_state: The new connector state of the &drm_connector4678* @mgr: The MST topology manager for the &drm_connector4679*4680* Since MST uses fake &drm_encoder structs, the generic atomic modesetting code isn't able to4681* serialize non-blocking commits happening on the real DP connector of an MST topology switching4682* into/away from MST mode - as the CRTC on the real DP connector and the CRTCs on the connector's4683* MST topology will never share the same &drm_encoder.4684*4685* This function takes care of this serialization issue, by checking a root MST connector's atomic4686* state to determine if it is about to have a modeset - and then pulling in the MST topology state4687* if so, along with adding any relevant CRTCs to &drm_dp_mst_topology_state.pending_crtc_mask.4688*4689* Drivers implementing MST must call this function from the4690* &drm_connector_helper_funcs.atomic_check hook of any physical DP &drm_connector capable of4691* driving MST sinks.4692*4693* Returns:4694* 0 on success, negative error code otherwise4695*/4696int drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state,4697struct drm_dp_mst_topology_mgr *mgr)4698{4699struct drm_atomic_state *state = new_conn_state->state;4700struct drm_connector_state *old_conn_state =4701drm_atomic_get_old_connector_state(state, new_conn_state->connector);4702struct drm_crtc_state *crtc_state;4703struct drm_dp_mst_topology_state *mst_state = NULL;47044705if (new_conn_state->crtc) {4706crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);4707if (crtc_state && drm_atomic_crtc_needs_modeset(crtc_state)) {4708mst_state = drm_atomic_get_mst_topology_state(state, mgr);4709if (IS_ERR(mst_state))4710return PTR_ERR(mst_state);47114712mst_state->pending_crtc_mask |= drm_crtc_mask(new_conn_state->crtc);4713}4714}47154716if (old_conn_state->crtc) {4717crtc_state = drm_atomic_get_new_crtc_state(state, old_conn_state->crtc);4718if (crtc_state && drm_atomic_crtc_needs_modeset(crtc_state)) {4719if (!mst_state) {4720mst_state = drm_atomic_get_mst_topology_state(state, mgr);4721if (IS_ERR(mst_state))4722return PTR_ERR(mst_state);4723}47244725mst_state->pending_crtc_mask |= drm_crtc_mask(old_conn_state->crtc);4726}4727}47284729return 0;4730}4731EXPORT_SYMBOL(drm_dp_mst_root_conn_atomic_check);47324733/**4734* drm_dp_mst_update_slots() - updates the slot info depending on the DP ecoding format4735* @mst_state: mst_state to update4736* @link_encoding_cap: the ecoding format on the link4737*/4738void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap)4739{4740if (link_encoding_cap == DP_CAP_ANSI_128B132B) {4741mst_state->total_avail_slots = 64;4742mst_state->start_slot = 0;4743} else {4744mst_state->total_avail_slots = 63;4745mst_state->start_slot = 1;4746}47474748DRM_DEBUG_KMS("%s encoding format on mst_state 0x%p\n",4749(link_encoding_cap == DP_CAP_ANSI_128B132B) ? "128b/132b":"8b/10b",4750mst_state);4751}4752EXPORT_SYMBOL(drm_dp_mst_update_slots);47534754/**4755* drm_dp_check_act_status() - Polls for ACT handled status.4756* @mgr: manager to use4757*4758* Tries waiting for the MST hub to finish updating it's payload table by4759* polling for the ACT handled bit for up to 3 seconds (yes-some hubs really4760* take that long).4761*4762* Returns:4763* 0 if the ACT was handled in time, negative error code on failure.4764*/4765int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)4766{4767/*4768* There doesn't seem to be any recommended retry count or timeout in4769* the MST specification. Since some hubs have been observed to take4770* over 1 second to update their payload allocations under certain4771* conditions, we use a rather large timeout value of 3 seconds.4772*/4773return drm_dp_dpcd_poll_act_handled(mgr->aux, 3000);4774}4775EXPORT_SYMBOL(drm_dp_check_act_status);47764777/**4778* drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.4779* @clock: dot clock4780* @bpp: bpp as .4 binary fixed point4781*4782* This uses the formula in the spec to calculate the PBN value for a mode.4783*/4784int drm_dp_calc_pbn_mode(int clock, int bpp)4785{4786/*4787* The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on4788* common multiplier to render an integer PBN for all link rate/lane4789* counts combinations4790* calculate4791* peak_kbps = clock * bpp / 164792* peak_kbps *= SSC overhead / 10000004793* peak_kbps /= 8 convert to Kbytes4794* peak_kBps *= (64/54) / 1000 convert to PBN4795*/4796/*4797* TODO: Use the actual link and mode parameters to calculate4798* the overhead. For now it's assumed that these are4799* 4 link lanes, 4096 hactive pixels, which don't add any4800* significant data padding overhead and that there is no DSC4801* or FEC overhead.4802*/4803int overhead = drm_dp_bw_overhead(4, 4096, 0, bpp,4804DRM_DP_BW_OVERHEAD_MST |4805DRM_DP_BW_OVERHEAD_SSC_REF_CLK);48064807return DIV64_U64_ROUND_UP(mul_u32_u32(clock * bpp, 64 * overhead >> 4),48081000000ULL * 8 * 54 * 1000);4809}4810EXPORT_SYMBOL(drm_dp_calc_pbn_mode);48114812/* we want to kick the TX after we've ack the up/down IRQs. */4813static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)4814{4815queue_work(system_long_wq, &mgr->tx_work);4816}48174818/*4819* Helper function for parsing DP device types into convenient strings4820* for use with dp_mst_topology4821*/4822static const char *pdt_to_string(u8 pdt)4823{4824switch (pdt) {4825case DP_PEER_DEVICE_NONE:4826return "NONE";4827case DP_PEER_DEVICE_SOURCE_OR_SST:4828return "SOURCE OR SST";4829case DP_PEER_DEVICE_MST_BRANCHING:4830return "MST BRANCHING";4831case DP_PEER_DEVICE_SST_SINK:4832return "SST SINK";4833case DP_PEER_DEVICE_DP_LEGACY_CONV:4834return "DP LEGACY CONV";4835default:4836return "ERR";4837}4838}48394840static void drm_dp_mst_dump_mstb(struct seq_file *m,4841struct drm_dp_mst_branch *mstb)4842{4843struct drm_dp_mst_port *port;4844int tabs = mstb->lct;4845char prefix[10];4846int i;48474848for (i = 0; i < tabs; i++)4849prefix[i] = '\t';4850prefix[i] = '\0';48514852seq_printf(m, "%smstb - [%p]: num_ports: %d\n", prefix, mstb, mstb->num_ports);4853list_for_each_entry(port, &mstb->ports, next) {4854seq_printf(m, "%sport %d - [%p] (%s - %s): ddps: %d, ldps: %d, sdp: %d/%d, fec: %s, conn: %p\n",4855prefix,4856port->port_num,4857port,4858port->input ? "input" : "output",4859pdt_to_string(port->pdt),4860port->ddps,4861port->ldps,4862port->num_sdp_streams,4863port->num_sdp_stream_sinks,4864port->fec_capable ? "true" : "false",4865port->connector);4866if (port->mstb)4867drm_dp_mst_dump_mstb(m, port->mstb);4868}4869}48704871#define DP_PAYLOAD_TABLE_SIZE 6448724873static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,4874char *buf)4875{4876int i;48774878for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {4879if (drm_dp_dpcd_read_data(mgr->aux,4880DP_PAYLOAD_TABLE_UPDATE_STATUS + i,4881&buf[i], 16) < 0)4882return false;4883}4884return true;4885}48864887static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,4888struct drm_dp_mst_port *port, char *name,4889int namelen)4890{4891struct edid *mst_edid;48924893mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);4894drm_edid_get_monitor_name(mst_edid, name, namelen);4895kfree(mst_edid);4896}48974898/**4899* drm_dp_mst_dump_topology(): dump topology to seq file.4900* @m: seq_file to dump output to4901* @mgr: manager to dump current topology for.4902*4903* helper to dump MST topology to a seq file for debugfs.4904*/4905void drm_dp_mst_dump_topology(struct seq_file *m,4906struct drm_dp_mst_topology_mgr *mgr)4907{4908struct drm_dp_mst_topology_state *state;4909struct drm_dp_mst_atomic_payload *payload;4910int i, ret;49114912static const char *const status[] = {4913"None",4914"Local",4915"DFP",4916"Remote",4917};49184919mutex_lock(&mgr->lock);4920if (mgr->mst_primary)4921drm_dp_mst_dump_mstb(m, mgr->mst_primary);49224923/* dump VCPIs */4924mutex_unlock(&mgr->lock);49254926ret = drm_modeset_lock_single_interruptible(&mgr->base.lock);4927if (ret < 0)4928return;49294930state = to_drm_dp_mst_topology_state(mgr->base.state);4931seq_printf(m, "\n*** Atomic state info ***\n");4932seq_printf(m, "payload_mask: %x, max_payloads: %d, start_slot: %u, pbn_div: %d\n",4933state->payload_mask, mgr->max_payloads, state->start_slot,4934dfixed_trunc(state->pbn_div));49354936seq_printf(m, "\n| idx | port | vcpi | slots | pbn | dsc | status | sink name |\n");4937for (i = 0; i < mgr->max_payloads; i++) {4938list_for_each_entry(payload, &state->payloads, next) {4939char name[14];49404941if (payload->vcpi != i || payload->delete)4942continue;49434944fetch_monitor_name(mgr, payload->port, name, sizeof(name));4945seq_printf(m, " %5d %6d %6d %02d - %02d %5d %5s %8s %19s\n",4946i,4947payload->port->port_num,4948payload->vcpi,4949payload->vc_start_slot,4950payload->vc_start_slot + payload->time_slots - 1,4951payload->pbn,4952payload->dsc_enabled ? "Y" : "N",4953status[payload->payload_allocation_status],4954(*name != 0) ? name : "Unknown");4955}4956}49574958seq_printf(m, "\n*** DPCD Info ***\n");4959mutex_lock(&mgr->lock);4960if (mgr->mst_primary) {4961u8 buf[DP_PAYLOAD_TABLE_SIZE];4962int ret;49634964if (drm_dp_read_dpcd_caps(mgr->aux, buf) < 0) {4965seq_printf(m, "dpcd read failed\n");4966goto out;4967}4968seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);49694970ret = drm_dp_dpcd_read_data(mgr->aux, DP_FAUX_CAP, buf, 2);4971if (ret < 0) {4972seq_printf(m, "faux/mst read failed\n");4973goto out;4974}4975seq_printf(m, "faux/mst: %*ph\n", 2, buf);49764977ret = drm_dp_dpcd_read_data(mgr->aux, DP_MSTM_CTRL, buf, 1);4978if (ret < 0) {4979seq_printf(m, "mst ctrl read failed\n");4980goto out;4981}4982seq_printf(m, "mst ctrl: %*ph\n", 1, buf);49834984/* dump the standard OUI branch header */4985ret = drm_dp_dpcd_read_data(mgr->aux, DP_BRANCH_OUI, buf,4986DP_BRANCH_OUI_HEADER_SIZE);4987if (ret < 0) {4988seq_printf(m, "branch oui read failed\n");4989goto out;4990}4991seq_printf(m, "branch oui: %*phN devid: ", 3, buf);49924993for (i = 0x3; i < 0x8 && buf[i]; i++)4994seq_putc(m, buf[i]);4995seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",4996buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);4997if (dump_dp_payload_table(mgr, buf))4998seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);4999}50005001out:5002mutex_unlock(&mgr->lock);5003drm_modeset_unlock(&mgr->base.lock);5004}5005EXPORT_SYMBOL(drm_dp_mst_dump_topology);50065007static void drm_dp_tx_work(struct work_struct *work)5008{5009struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);50105011mutex_lock(&mgr->qlock);5012if (!list_empty(&mgr->tx_msg_downq))5013process_single_down_tx_qlock(mgr);5014mutex_unlock(&mgr->qlock);5015}50165017static inline void5018drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)5019{5020drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);50215022if (port->connector) {5023drm_connector_unregister(port->connector);5024drm_connector_put(port->connector);5025}50265027drm_dp_mst_put_port_malloc(port);5028}50295030static inline void5031drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)5032{5033struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;5034struct drm_dp_mst_port *port, *port_tmp;5035struct drm_dp_sideband_msg_tx *txmsg, *txmsg_tmp;5036bool wake_tx = false;50375038mutex_lock(&mgr->lock);5039list_for_each_entry_safe(port, port_tmp, &mstb->ports, next) {5040list_del(&port->next);5041drm_dp_mst_topology_put_port(port);5042}5043mutex_unlock(&mgr->lock);50445045/* drop any tx slot msg */5046mutex_lock(&mstb->mgr->qlock);5047list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) {5048if (txmsg->dst != mstb)5049continue;50505051txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;5052list_del(&txmsg->next);5053wake_tx = true;5054}5055mutex_unlock(&mstb->mgr->qlock);50565057if (wake_tx)5058wake_up_all(&mstb->mgr->tx_waitq);50595060drm_dp_mst_put_mstb_malloc(mstb);5061}50625063static void drm_dp_delayed_destroy_work(struct work_struct *work)5064{5065struct drm_dp_mst_topology_mgr *mgr =5066container_of(work, struct drm_dp_mst_topology_mgr,5067delayed_destroy_work);5068bool send_hotplug = false, go_again;50695070/*5071* Not a regular list traverse as we have to drop the destroy5072* connector lock before destroying the mstb/port, to avoid AB->BA5073* ordering between this lock and the config mutex.5074*/5075do {5076go_again = false;50775078for (;;) {5079struct drm_dp_mst_branch *mstb;50805081mutex_lock(&mgr->delayed_destroy_lock);5082mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,5083struct drm_dp_mst_branch,5084destroy_next);5085if (mstb)5086list_del(&mstb->destroy_next);5087mutex_unlock(&mgr->delayed_destroy_lock);50885089if (!mstb)5090break;50915092drm_dp_delayed_destroy_mstb(mstb);5093go_again = true;5094}50955096for (;;) {5097struct drm_dp_mst_port *port;50985099mutex_lock(&mgr->delayed_destroy_lock);5100port = list_first_entry_or_null(&mgr->destroy_port_list,5101struct drm_dp_mst_port,5102next);5103if (port)5104list_del(&port->next);5105mutex_unlock(&mgr->delayed_destroy_lock);51065107if (!port)5108break;51095110drm_dp_delayed_destroy_port(port);5111send_hotplug = true;5112go_again = true;5113}5114} while (go_again);51155116if (send_hotplug)5117drm_kms_helper_hotplug_event(mgr->dev);5118}51195120static struct drm_private_state *5121drm_dp_mst_duplicate_state(struct drm_private_obj *obj)5122{5123struct drm_dp_mst_topology_state *state, *old_state =5124to_dp_mst_topology_state(obj->state);5125struct drm_dp_mst_atomic_payload *pos, *payload;51265127state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);5128if (!state)5129return NULL;51305131__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);51325133INIT_LIST_HEAD(&state->payloads);5134state->commit_deps = NULL;5135state->num_commit_deps = 0;5136state->pending_crtc_mask = 0;51375138list_for_each_entry(pos, &old_state->payloads, next) {5139/* Prune leftover freed timeslot allocations */5140if (pos->delete)5141continue;51425143payload = kmemdup(pos, sizeof(*payload), GFP_KERNEL);5144if (!payload)5145goto fail;51465147drm_dp_mst_get_port_malloc(payload->port);5148list_add(&payload->next, &state->payloads);5149}51505151return &state->base;51525153fail:5154list_for_each_entry_safe(pos, payload, &state->payloads, next) {5155drm_dp_mst_put_port_malloc(pos->port);5156kfree(pos);5157}5158kfree(state);51595160return NULL;5161}51625163static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,5164struct drm_private_state *state)5165{5166struct drm_dp_mst_topology_state *mst_state =5167to_dp_mst_topology_state(state);5168struct drm_dp_mst_atomic_payload *pos, *tmp;5169int i;51705171list_for_each_entry_safe(pos, tmp, &mst_state->payloads, next) {5172/* We only keep references to ports with active payloads */5173if (!pos->delete)5174drm_dp_mst_put_port_malloc(pos->port);5175kfree(pos);5176}51775178for (i = 0; i < mst_state->num_commit_deps; i++)5179drm_crtc_commit_put(mst_state->commit_deps[i]);51805181kfree(mst_state->commit_deps);5182kfree(mst_state);5183}51845185static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,5186struct drm_dp_mst_branch *branch)5187{5188while (port->parent) {5189if (port->parent == branch)5190return true;51915192if (port->parent->port_parent)5193port = port->parent->port_parent;5194else5195break;5196}5197return false;5198}51995200static bool5201drm_dp_mst_port_downstream_of_parent_locked(struct drm_dp_mst_topology_mgr *mgr,5202struct drm_dp_mst_port *port,5203struct drm_dp_mst_port *parent)5204{5205if (!mgr->mst_primary)5206return false;52075208port = drm_dp_mst_topology_get_port_validated_locked(mgr->mst_primary,5209port);5210if (!port)5211return false;52125213if (!parent)5214return true;52155216parent = drm_dp_mst_topology_get_port_validated_locked(mgr->mst_primary,5217parent);5218if (!parent)5219return false;52205221if (!parent->mstb)5222return false;52235224return drm_dp_mst_port_downstream_of_branch(port, parent->mstb);5225}52265227/**5228* drm_dp_mst_port_downstream_of_parent - check if a port is downstream of a parent port5229* @mgr: MST topology manager5230* @port: the port being looked up5231* @parent: the parent port5232*5233* The function returns %true if @port is downstream of @parent. If @parent is5234* %NULL - denoting the root port - the function returns %true if @port is in5235* @mgr's topology.5236*/5237bool5238drm_dp_mst_port_downstream_of_parent(struct drm_dp_mst_topology_mgr *mgr,5239struct drm_dp_mst_port *port,5240struct drm_dp_mst_port *parent)5241{5242bool ret;52435244mutex_lock(&mgr->lock);5245ret = drm_dp_mst_port_downstream_of_parent_locked(mgr, port, parent);5246mutex_unlock(&mgr->lock);52475248return ret;5249}5250EXPORT_SYMBOL(drm_dp_mst_port_downstream_of_parent);52515252static int5253drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,5254struct drm_dp_mst_topology_state *state,5255struct drm_dp_mst_port **failing_port);52565257static int5258drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,5259struct drm_dp_mst_topology_state *state,5260struct drm_dp_mst_port **failing_port)5261{5262struct drm_dp_mst_atomic_payload *payload;5263struct drm_dp_mst_port *port;5264int pbn_used = 0, ret;5265bool found = false;52665267/* Check that we have at least one port in our state that's downstream5268* of this branch, otherwise we can skip this branch5269*/5270list_for_each_entry(payload, &state->payloads, next) {5271if (!payload->pbn ||5272!drm_dp_mst_port_downstream_of_branch(payload->port, mstb))5273continue;52745275found = true;5276break;5277}5278if (!found)5279return 0;52805281if (mstb->port_parent)5282drm_dbg_atomic(mstb->mgr->dev,5283"[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",5284mstb->port_parent->parent, mstb->port_parent, mstb);5285else5286drm_dbg_atomic(mstb->mgr->dev, "[MSTB:%p] Checking bandwidth limits\n", mstb);52875288list_for_each_entry(port, &mstb->ports, next) {5289ret = drm_dp_mst_atomic_check_port_bw_limit(port, state, failing_port);5290if (ret < 0)5291return ret;52925293pbn_used += ret;5294}52955296return pbn_used;5297}52985299static int5300drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,5301struct drm_dp_mst_topology_state *state,5302struct drm_dp_mst_port **failing_port)5303{5304struct drm_dp_mst_atomic_payload *payload;5305int pbn_used = 0;53065307if (port->pdt == DP_PEER_DEVICE_NONE)5308return 0;53095310if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {5311payload = drm_atomic_get_mst_payload_state(state, port);5312if (!payload)5313return 0;53145315/*5316* This could happen if the sink deasserted its HPD line, but5317* the branch device still reports it as attached (PDT != NONE).5318*/5319if (!port->full_pbn) {5320drm_dbg_atomic(port->mgr->dev,5321"[MSTB:%p] [MST PORT:%p] no BW available for the port\n",5322port->parent, port);5323*failing_port = port;5324return -EINVAL;5325}53265327pbn_used = payload->pbn;5328} else {5329pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,5330state,5331failing_port);5332if (pbn_used <= 0)5333return pbn_used;5334}53355336if (pbn_used > port->full_pbn) {5337drm_dbg_atomic(port->mgr->dev,5338"[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",5339port->parent, port, pbn_used, port->full_pbn);5340*failing_port = port;5341return -ENOSPC;5342}53435344drm_dbg_atomic(port->mgr->dev, "[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",5345port->parent, port, pbn_used, port->full_pbn);53465347return pbn_used;5348}53495350static inline int5351drm_dp_mst_atomic_check_payload_alloc_limits(struct drm_dp_mst_topology_mgr *mgr,5352struct drm_dp_mst_topology_state *mst_state)5353{5354struct drm_dp_mst_atomic_payload *payload;5355int avail_slots = mst_state->total_avail_slots, payload_count = 0;53565357list_for_each_entry(payload, &mst_state->payloads, next) {5358/* Releasing payloads is always OK-even if the port is gone */5359if (payload->delete) {5360drm_dbg_atomic(mgr->dev, "[MST PORT:%p] releases all time slots\n",5361payload->port);5362continue;5363}53645365drm_dbg_atomic(mgr->dev, "[MST PORT:%p] requires %d time slots\n",5366payload->port, payload->time_slots);53675368avail_slots -= payload->time_slots;5369if (avail_slots < 0) {5370drm_dbg_atomic(mgr->dev,5371"[MST PORT:%p] not enough time slots in mst state %p (avail=%d)\n",5372payload->port, mst_state, avail_slots + payload->time_slots);5373return -ENOSPC;5374}53755376if (++payload_count > mgr->max_payloads) {5377drm_dbg_atomic(mgr->dev,5378"[MST MGR:%p] state %p has too many payloads (max=%d)\n",5379mgr, mst_state, mgr->max_payloads);5380return -EINVAL;5381}53825383/* Assign a VCPI */5384if (!payload->vcpi) {5385payload->vcpi = ffz(mst_state->payload_mask) + 1;5386drm_dbg_atomic(mgr->dev, "[MST PORT:%p] assigned VCPI #%d\n",5387payload->port, payload->vcpi);5388mst_state->payload_mask |= BIT(payload->vcpi - 1);5389}5390}53915392if (!payload_count)5393mst_state->pbn_div.full = dfixed_const(0);53945395drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p TU pbn_div=%d avail=%d used=%d\n",5396mgr, mst_state, dfixed_trunc(mst_state->pbn_div), avail_slots,5397mst_state->total_avail_slots - avail_slots);53985399return 0;5400}54015402/**5403* drm_dp_mst_add_affected_dsc_crtcs5404* @state: Pointer to the new struct drm_dp_mst_topology_state5405* @mgr: MST topology manager5406*5407* Whenever there is a change in mst topology5408* DSC configuration would have to be recalculated5409* therefore we need to trigger modeset on all affected5410* CRTCs in that topology5411*5412* See also:5413* drm_dp_mst_atomic_enable_dsc()5414*/5415int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)5416{5417struct drm_dp_mst_topology_state *mst_state;5418struct drm_dp_mst_atomic_payload *pos;5419struct drm_connector *connector;5420struct drm_connector_state *conn_state;5421struct drm_crtc *crtc;5422struct drm_crtc_state *crtc_state;54235424mst_state = drm_atomic_get_mst_topology_state(state, mgr);54255426if (IS_ERR(mst_state))5427return PTR_ERR(mst_state);54285429list_for_each_entry(pos, &mst_state->payloads, next) {54305431connector = pos->port->connector;54325433if (!connector)5434return -EINVAL;54355436conn_state = drm_atomic_get_connector_state(state, connector);54375438if (IS_ERR(conn_state))5439return PTR_ERR(conn_state);54405441crtc = conn_state->crtc;54425443if (!crtc)5444continue;54455446if (!drm_dp_mst_dsc_aux_for_port(pos->port))5447continue;54485449crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);54505451if (IS_ERR(crtc_state))5452return PTR_ERR(crtc_state);54535454drm_dbg_atomic(mgr->dev, "[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",5455mgr, crtc);54565457crtc_state->mode_changed = true;5458}5459return 0;5460}5461EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);54625463/**5464* drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off5465* @state: Pointer to the new drm_atomic_state5466* @port: Pointer to the affected MST Port5467* @pbn: Newly recalculated bw required for link with DSC enabled5468* @enable: Boolean flag to enable or disable DSC on the port5469*5470* This function enables DSC on the given Port5471* by recalculating its vcpi from pbn provided5472* and sets dsc_enable flag to keep track of which5473* ports have DSC enabled5474*5475*/5476int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,5477struct drm_dp_mst_port *port,5478int pbn, bool enable)5479{5480struct drm_dp_mst_topology_state *mst_state;5481struct drm_dp_mst_atomic_payload *payload;5482int time_slots = 0;54835484mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);5485if (IS_ERR(mst_state))5486return PTR_ERR(mst_state);54875488payload = drm_atomic_get_mst_payload_state(mst_state, port);5489if (!payload) {5490drm_dbg_atomic(state->dev,5491"[MST PORT:%p] Couldn't find payload in mst state %p\n",5492port, mst_state);5493return -EINVAL;5494}54955496if (payload->dsc_enabled == enable) {5497drm_dbg_atomic(state->dev,5498"[MST PORT:%p] DSC flag is already set to %d, returning %d time slots\n",5499port, enable, payload->time_slots);5500time_slots = payload->time_slots;5501}55025503if (enable) {5504time_slots = drm_dp_atomic_find_time_slots(state, port->mgr, port, pbn);5505drm_dbg_atomic(state->dev,5506"[MST PORT:%p] Enabling DSC flag, reallocating %d time slots on the port\n",5507port, time_slots);5508if (time_slots < 0)5509return -EINVAL;5510}55115512payload->dsc_enabled = enable;55135514return time_slots;5515}5516EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);55175518/**5519* drm_dp_mst_atomic_check_mgr - Check the atomic state of an MST topology manager5520* @state: The global atomic state5521* @mgr: Manager to check5522* @mst_state: The MST atomic state for @mgr5523* @failing_port: Returns the port with a BW limitation5524*5525* Checks the given MST manager's topology state for an atomic update to ensure5526* that it's valid. This includes checking whether there's enough bandwidth to5527* support the new timeslot allocations in the atomic update.5528*5529* Any atomic drivers supporting DP MST must make sure to call this or5530* the drm_dp_mst_atomic_check() function after checking the rest of their state5531* in their &drm_mode_config_funcs.atomic_check() callback.5532*5533* See also:5534* drm_dp_mst_atomic_check()5535* drm_dp_atomic_find_time_slots()5536* drm_dp_atomic_release_time_slots()5537*5538* Returns:5539* - 0 if the new state is valid5540* - %-ENOSPC, if the new state is invalid, because of BW limitation5541* @failing_port is set to:5542*5543* - The non-root port where a BW limit check failed5544* with all the ports downstream of @failing_port passing5545* the BW limit check.5546* The returned port pointer is valid until at least5547* one payload downstream of it exists.5548* - %NULL if the BW limit check failed at the root port5549* with all the ports downstream of the root port passing5550* the BW limit check.5551*5552* - %-EINVAL, if the new state is invalid, because the root port has5553* too many payloads.5554*/5555int drm_dp_mst_atomic_check_mgr(struct drm_atomic_state *state,5556struct drm_dp_mst_topology_mgr *mgr,5557struct drm_dp_mst_topology_state *mst_state,5558struct drm_dp_mst_port **failing_port)5559{5560int ret;55615562*failing_port = NULL;55635564if (!mgr->mst_state)5565return 0;55665567mutex_lock(&mgr->lock);5568ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,5569mst_state,5570failing_port);5571mutex_unlock(&mgr->lock);55725573if (ret < 0)5574return ret;55755576return drm_dp_mst_atomic_check_payload_alloc_limits(mgr, mst_state);5577}5578EXPORT_SYMBOL(drm_dp_mst_atomic_check_mgr);55795580/**5581* drm_dp_mst_atomic_check - Check that the new state of an MST topology in an5582* atomic update is valid5583* @state: Pointer to the new &struct drm_dp_mst_topology_state5584*5585* Checks the given topology state for an atomic update to ensure that it's5586* valid, calling drm_dp_mst_atomic_check_mgr() for all MST manager in the5587* atomic state. This includes checking whether there's enough bandwidth to5588* support the new timeslot allocations in the atomic update.5589*5590* Any atomic drivers supporting DP MST must make sure to call this after5591* checking the rest of their state in their5592* &drm_mode_config_funcs.atomic_check() callback.5593*5594* See also:5595* drm_dp_mst_atomic_check_mgr()5596* drm_dp_atomic_find_time_slots()5597* drm_dp_atomic_release_time_slots()5598*5599* Returns:5600* 0 if the new state is valid, negative error code otherwise.5601*/5602int drm_dp_mst_atomic_check(struct drm_atomic_state *state)5603{5604struct drm_dp_mst_topology_mgr *mgr;5605struct drm_dp_mst_topology_state *mst_state;5606int i, ret = 0;56075608for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {5609struct drm_dp_mst_port *tmp_port;56105611ret = drm_dp_mst_atomic_check_mgr(state, mgr, mst_state, &tmp_port);5612if (ret)5613break;5614}56155616return ret;5617}5618EXPORT_SYMBOL(drm_dp_mst_atomic_check);56195620const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {5621.atomic_duplicate_state = drm_dp_mst_duplicate_state,5622.atomic_destroy_state = drm_dp_mst_destroy_state,5623};5624EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);56255626/**5627* drm_atomic_get_mst_topology_state: get MST topology state5628* @state: global atomic state5629* @mgr: MST topology manager, also the private object in this case5630*5631* This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic5632* state vtable so that the private object state returned is that of a MST5633* topology object.5634*5635* RETURNS:5636* The MST topology state or error pointer.5637*/5638struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,5639struct drm_dp_mst_topology_mgr *mgr)5640{5641return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));5642}5643EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);56445645/**5646* drm_atomic_get_old_mst_topology_state: get old MST topology state in atomic state, if any5647* @state: global atomic state5648* @mgr: MST topology manager, also the private object in this case5649*5650* This function wraps drm_atomic_get_old_private_obj_state() passing in the MST atomic5651* state vtable so that the private object state returned is that of a MST5652* topology object.5653*5654* Returns:5655* The old MST topology state, or NULL if there's no topology state for this MST mgr5656* in the global atomic state5657*/5658struct drm_dp_mst_topology_state *5659drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state,5660struct drm_dp_mst_topology_mgr *mgr)5661{5662struct drm_private_state *old_priv_state =5663drm_atomic_get_old_private_obj_state(state, &mgr->base);56645665return old_priv_state ? to_dp_mst_topology_state(old_priv_state) : NULL;5666}5667EXPORT_SYMBOL(drm_atomic_get_old_mst_topology_state);56685669/**5670* drm_atomic_get_new_mst_topology_state: get new MST topology state in atomic state, if any5671* @state: global atomic state5672* @mgr: MST topology manager, also the private object in this case5673*5674* This function wraps drm_atomic_get_new_private_obj_state() passing in the MST atomic5675* state vtable so that the private object state returned is that of a MST5676* topology object.5677*5678* Returns:5679* The new MST topology state, or NULL if there's no topology state for this MST mgr5680* in the global atomic state5681*/5682struct drm_dp_mst_topology_state *5683drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,5684struct drm_dp_mst_topology_mgr *mgr)5685{5686struct drm_private_state *new_priv_state =5687drm_atomic_get_new_private_obj_state(state, &mgr->base);56885689return new_priv_state ? to_dp_mst_topology_state(new_priv_state) : NULL;5690}5691EXPORT_SYMBOL(drm_atomic_get_new_mst_topology_state);56925693/**5694* drm_dp_mst_topology_mgr_init - initialise a topology manager5695* @mgr: manager struct to initialise5696* @dev: device providing this structure - for i2c addition.5697* @aux: DP helper aux channel to talk to this device5698* @max_dpcd_transaction_bytes: hw specific DPCD transaction limit5699* @max_payloads: maximum number of payloads this GPU can source5700* @conn_base_id: the connector object ID the MST device is connected to.5701*5702* Return 0 for success, or negative error code on failure5703*/5704int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,5705struct drm_device *dev, struct drm_dp_aux *aux,5706int max_dpcd_transaction_bytes, int max_payloads,5707int conn_base_id)5708{5709struct drm_dp_mst_topology_state *mst_state;57105711mutex_init(&mgr->lock);5712mutex_init(&mgr->qlock);5713mutex_init(&mgr->delayed_destroy_lock);5714mutex_init(&mgr->up_req_lock);5715mutex_init(&mgr->probe_lock);5716#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)5717mutex_init(&mgr->topology_ref_history_lock);5718stack_depot_init();5719#endif5720INIT_LIST_HEAD(&mgr->tx_msg_downq);5721INIT_LIST_HEAD(&mgr->destroy_port_list);5722INIT_LIST_HEAD(&mgr->destroy_branch_device_list);5723INIT_LIST_HEAD(&mgr->up_req_list);57245725/*5726* delayed_destroy_work will be queued on a dedicated WQ, so that any5727* requeuing will be also flushed when deiniting the topology manager.5728*/5729mgr->delayed_destroy_wq = alloc_ordered_workqueue("drm_dp_mst_wq", 0);5730if (mgr->delayed_destroy_wq == NULL)5731return -ENOMEM;57325733INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);5734INIT_WORK(&mgr->tx_work, drm_dp_tx_work);5735INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);5736INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);5737init_waitqueue_head(&mgr->tx_waitq);5738mgr->dev = dev;5739mgr->aux = aux;5740mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;5741mgr->max_payloads = max_payloads;5742mgr->conn_base_id = conn_base_id;57435744mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);5745if (mst_state == NULL)5746return -ENOMEM;57475748mst_state->total_avail_slots = 63;5749mst_state->start_slot = 1;57505751mst_state->mgr = mgr;5752INIT_LIST_HEAD(&mst_state->payloads);57535754drm_atomic_private_obj_init(dev, &mgr->base,5755&mst_state->base,5756&drm_dp_mst_topology_state_funcs);57575758return 0;5759}5760EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);57615762/**5763* drm_dp_mst_topology_mgr_destroy() - destroy topology manager.5764* @mgr: manager to destroy5765*/5766void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)5767{5768drm_dp_mst_topology_mgr_set_mst(mgr, false);5769flush_work(&mgr->work);5770/* The following will also drain any requeued work on the WQ. */5771if (mgr->delayed_destroy_wq) {5772destroy_workqueue(mgr->delayed_destroy_wq);5773mgr->delayed_destroy_wq = NULL;5774}5775mgr->dev = NULL;5776mgr->aux = NULL;5777drm_atomic_private_obj_fini(&mgr->base);5778mgr->funcs = NULL;57795780mutex_destroy(&mgr->delayed_destroy_lock);5781mutex_destroy(&mgr->qlock);5782mutex_destroy(&mgr->lock);5783mutex_destroy(&mgr->up_req_lock);5784mutex_destroy(&mgr->probe_lock);5785#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)5786mutex_destroy(&mgr->topology_ref_history_lock);5787#endif5788}5789EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);57905791static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)5792{5793int i;57945795if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)5796return false;57975798for (i = 0; i < num - 1; i++) {5799if (msgs[i].flags & I2C_M_RD ||5800msgs[i].len > 0xff)5801return false;5802}58035804return msgs[num - 1].flags & I2C_M_RD &&5805msgs[num - 1].len <= 0xff;5806}58075808static bool remote_i2c_write_ok(const struct i2c_msg msgs[], int num)5809{5810int i;58115812for (i = 0; i < num - 1; i++) {5813if (msgs[i].flags & I2C_M_RD || !(msgs[i].flags & I2C_M_STOP) ||5814msgs[i].len > 0xff)5815return false;5816}58175818return !(msgs[num - 1].flags & I2C_M_RD) && msgs[num - 1].len <= 0xff;5819}58205821static int drm_dp_mst_i2c_read(struct drm_dp_mst_branch *mstb,5822struct drm_dp_mst_port *port,5823struct i2c_msg *msgs, int num)5824{5825struct drm_dp_mst_topology_mgr *mgr = port->mgr;5826unsigned int i;5827struct drm_dp_sideband_msg_req_body msg;5828struct drm_dp_sideband_msg_tx *txmsg = NULL;5829int ret;58305831memset(&msg, 0, sizeof(msg));5832msg.req_type = DP_REMOTE_I2C_READ;5833msg.u.i2c_read.num_transactions = num - 1;5834msg.u.i2c_read.port_number = port->port_num;5835for (i = 0; i < num - 1; i++) {5836msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;5837msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;5838msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;5839msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);5840}5841msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;5842msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;58435844txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);5845if (!txmsg) {5846ret = -ENOMEM;5847goto out;5848}58495850txmsg->dst = mstb;5851drm_dp_encode_sideband_req(&msg, txmsg);58525853drm_dp_queue_down_tx(mgr, txmsg);58545855ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);5856if (ret > 0) {58575858if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {5859ret = -EREMOTEIO;5860goto out;5861}5862if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {5863ret = -EIO;5864goto out;5865}5866memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);5867ret = num;5868}5869out:5870kfree(txmsg);5871return ret;5872}58735874static int drm_dp_mst_i2c_write(struct drm_dp_mst_branch *mstb,5875struct drm_dp_mst_port *port,5876struct i2c_msg *msgs, int num)5877{5878struct drm_dp_mst_topology_mgr *mgr = port->mgr;5879unsigned int i;5880struct drm_dp_sideband_msg_req_body msg;5881struct drm_dp_sideband_msg_tx *txmsg = NULL;5882int ret;58835884txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);5885if (!txmsg) {5886ret = -ENOMEM;5887goto out;5888}5889for (i = 0; i < num; i++) {5890memset(&msg, 0, sizeof(msg));5891msg.req_type = DP_REMOTE_I2C_WRITE;5892msg.u.i2c_write.port_number = port->port_num;5893msg.u.i2c_write.write_i2c_device_id = msgs[i].addr;5894msg.u.i2c_write.num_bytes = msgs[i].len;5895msg.u.i2c_write.bytes = msgs[i].buf;58965897memset(txmsg, 0, sizeof(*txmsg));5898txmsg->dst = mstb;58995900drm_dp_encode_sideband_req(&msg, txmsg);5901drm_dp_queue_down_tx(mgr, txmsg);59025903ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);5904if (ret > 0) {5905if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {5906ret = -EREMOTEIO;5907goto out;5908}5909} else {5910goto out;5911}5912}5913ret = num;5914out:5915kfree(txmsg);5916return ret;5917}59185919/* I2C device */5920static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter,5921struct i2c_msg *msgs, int num)5922{5923struct drm_dp_aux *aux = adapter->algo_data;5924struct drm_dp_mst_port *port =5925container_of(aux, struct drm_dp_mst_port, aux);5926struct drm_dp_mst_branch *mstb;5927struct drm_dp_mst_topology_mgr *mgr = port->mgr;5928int ret;59295930mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);5931if (!mstb)5932return -EREMOTEIO;59335934if (remote_i2c_read_ok(msgs, num)) {5935ret = drm_dp_mst_i2c_read(mstb, port, msgs, num);5936} else if (remote_i2c_write_ok(msgs, num)) {5937ret = drm_dp_mst_i2c_write(mstb, port, msgs, num);5938} else {5939drm_dbg_kms(mgr->dev, "Unsupported I2C transaction for MST device\n");5940ret = -EIO;5941}59425943drm_dp_mst_topology_put_mstb(mstb);5944return ret;5945}59465947static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)5948{5949return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |5950I2C_FUNC_SMBUS_READ_BLOCK_DATA |5951I2C_FUNC_SMBUS_BLOCK_PROC_CALL |5952I2C_FUNC_10BIT_ADDR;5953}59545955static const struct i2c_algorithm drm_dp_mst_i2c_algo = {5956.functionality = drm_dp_mst_i2c_functionality,5957.master_xfer = drm_dp_mst_i2c_xfer,5958};59595960/**5961* drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX5962* @port: The port to add the I2C bus on5963*5964* Returns 0 on success or a negative error code on failure.5965*/5966static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port)5967{5968struct drm_dp_aux *aux = &port->aux;5969struct device *parent_dev = port->mgr->dev->dev;59705971aux->ddc.algo = &drm_dp_mst_i2c_algo;5972aux->ddc.algo_data = aux;5973aux->ddc.retries = 3;59745975aux->ddc.owner = THIS_MODULE;5976/* FIXME: set the kdev of the port's connector as parent */5977aux->ddc.dev.parent = parent_dev;5978aux->ddc.dev.of_node = parent_dev->of_node;59795980strscpy(aux->ddc.name, aux->name ? aux->name : dev_name(parent_dev),5981sizeof(aux->ddc.name));59825983return i2c_add_adapter(&aux->ddc);5984}59855986/**5987* drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter5988* @port: The port to remove the I2C bus from5989*/5990static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port)5991{5992i2c_del_adapter(&port->aux.ddc);5993}59945995/**5996* drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device5997* @port: The port to check5998*5999* A single physical MST hub object can be represented in the topology6000* by multiple branches, with virtual ports between those branches.6001*6002* As of DP1.4, An MST hub with internal (virtual) ports must expose6003* certain DPCD registers over those ports. See sections 2.6.1.1.16004* and 2.6.1.1.2 of Display Port specification v1.4 for details.6005*6006* May acquire mgr->lock6007*6008* Returns:6009* true if the port is a virtual DP peer device, false otherwise6010*/6011static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)6012{6013struct drm_dp_mst_port *downstream_port;60146015if (!port || port->dpcd_rev < DP_DPCD_REV_14)6016return false;60176018/* Virtual DP Sink (Internal Display Panel) */6019if (drm_dp_mst_port_is_logical(port))6020return true;60216022/* DP-to-HDMI Protocol Converter */6023if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&6024!port->mcs &&6025port->ldps)6026return true;60276028/* DP-to-DP */6029mutex_lock(&port->mgr->lock);6030if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&6031port->mstb &&6032port->mstb->num_ports == 2) {6033list_for_each_entry(downstream_port, &port->mstb->ports, next) {6034if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&6035!downstream_port->input) {6036mutex_unlock(&port->mgr->lock);6037return true;6038}6039}6040}6041mutex_unlock(&port->mgr->lock);60426043return false;6044}60456046/**6047* drm_dp_mst_aux_for_parent() - Get the AUX device for an MST port's parent6048* @port: MST port whose parent's AUX device is returned6049*6050* Return the AUX device for @port's parent or NULL if port's parent is the6051* root port.6052*/6053struct drm_dp_aux *drm_dp_mst_aux_for_parent(struct drm_dp_mst_port *port)6054{6055if (!port->parent || !port->parent->port_parent)6056return NULL;60576058return &port->parent->port_parent->aux;6059}6060EXPORT_SYMBOL(drm_dp_mst_aux_for_parent);60616062/**6063* drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC6064* @port: The port to check. A leaf of the MST tree with an attached display.6065*6066* Depending on the situation, DSC may be enabled via the endpoint aux,6067* the immediately upstream aux, or the connector's physical aux.6068*6069* This is both the correct aux to read DSC_CAPABILITY and the6070* correct aux to write DSC_ENABLED.6071*6072* This operation can be expensive (up to four aux reads), so6073* the caller should cache the return.6074*6075* Returns:6076* NULL if DSC cannot be enabled on this port, otherwise the aux device6077*/6078struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)6079{6080struct drm_dp_mst_port *immediate_upstream_port;6081struct drm_dp_aux *immediate_upstream_aux;6082struct drm_dp_mst_port *fec_port;6083struct drm_dp_desc desc = {};6084u8 upstream_dsc;6085u8 endpoint_fec;6086u8 endpoint_dsc;60876088if (!port)6089return NULL;60906091if (port->parent->port_parent)6092immediate_upstream_port = port->parent->port_parent;6093else6094immediate_upstream_port = NULL;60956096fec_port = immediate_upstream_port;6097while (fec_port) {6098/*6099* Each physical link (i.e. not a virtual port) between the6100* output and the primary device must support FEC6101*/6102if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&6103!fec_port->fec_capable)6104return NULL;61056106fec_port = fec_port->parent->port_parent;6107}61086109/* DP-to-DP peer device */6110if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {6111if (drm_dp_dpcd_read_data(&port->aux,6112DP_DSC_SUPPORT, &endpoint_dsc, 1) < 0)6113return NULL;6114if (drm_dp_dpcd_read_data(&port->aux,6115DP_FEC_CAPABILITY, &endpoint_fec, 1) < 0)6116return NULL;6117if (drm_dp_dpcd_read_data(&immediate_upstream_port->aux,6118DP_DSC_SUPPORT, &upstream_dsc, 1) < 0)6119return NULL;61206121/* Enpoint decompression with DP-to-DP peer device */6122if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&6123(endpoint_fec & DP_FEC_CAPABLE) &&6124(upstream_dsc & DP_DSC_PASSTHROUGH_IS_SUPPORTED)) {6125port->passthrough_aux = &immediate_upstream_port->aux;6126return &port->aux;6127}61286129/* Virtual DPCD decompression with DP-to-DP peer device */6130return &immediate_upstream_port->aux;6131}61326133/* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */6134if (drm_dp_mst_is_virtual_dpcd(port))6135return &port->aux;61366137/*6138* Synaptics quirk6139* Applies to ports for which:6140* - Physical aux has Synaptics OUI6141* - DPv1.4 or higher6142* - Port is on primary branch device6143* - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)6144*/6145if (immediate_upstream_port)6146immediate_upstream_aux = &immediate_upstream_port->aux;6147else6148immediate_upstream_aux = port->mgr->aux;61496150if (drm_dp_read_desc(immediate_upstream_aux, &desc, true))6151return NULL;61526153if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) {6154u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];61556156if (drm_dp_dpcd_read_data(immediate_upstream_aux,6157DP_DSC_SUPPORT, &upstream_dsc, 1) < 0)6158return NULL;61596160if (!(upstream_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED))6161return NULL;61626163if (drm_dp_read_dpcd_caps(immediate_upstream_aux, dpcd_ext) < 0)6164return NULL;61656166if (dpcd_ext[DP_DPCD_REV] >= DP_DPCD_REV_14 &&6167((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) &&6168((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK)6169!= DP_DWN_STRM_PORT_TYPE_ANALOG)))6170return immediate_upstream_aux;6171}61726173/*6174* The check below verifies if the MST sink6175* connected to the GPU is capable of DSC -6176* therefore the endpoint needs to be6177* both DSC and FEC capable.6178*/6179if (drm_dp_dpcd_read_data(&port->aux,6180DP_DSC_SUPPORT, &endpoint_dsc, 1) < 0)6181return NULL;6182if (drm_dp_dpcd_read_data(&port->aux,6183DP_FEC_CAPABILITY, &endpoint_fec, 1) < 0)6184return NULL;6185if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&6186(endpoint_fec & DP_FEC_CAPABLE))6187return &port->aux;61886189return NULL;6190}6191EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);619261936194