Path: blob/master/drivers/infiniband/core/sa_query.c
37212 views
/*1* Copyright (c) 2004 Topspin Communications. All rights reserved.2* Copyright (c) 2005 Voltaire, Inc. All rights reserved.3* Copyright (c) 2006 Intel Corporation. All rights reserved.4*5* This software is available to you under a choice of one of two6* licenses. You may choose to be licensed under the terms of the GNU7* General Public License (GPL) Version 2, available from the file8* COPYING in the main directory of this source tree, or the9* OpenIB.org BSD license below:10*11* Redistribution and use in source and binary forms, with or12* without modification, are permitted provided that the following13* conditions are met:14*15* - Redistributions of source code must retain the above16* copyright notice, this list of conditions and the following17* disclaimer.18*19* - Redistributions in binary form must reproduce the above20* copyright notice, this list of conditions and the following21* disclaimer in the documentation and/or other materials22* provided with the distribution.23*24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE31* SOFTWARE.32*/3334#include <linux/module.h>35#include <linux/init.h>36#include <linux/err.h>37#include <linux/random.h>38#include <linux/spinlock.h>39#include <linux/slab.h>40#include <linux/dma-mapping.h>41#include <linux/kref.h>42#include <linux/idr.h>43#include <linux/workqueue.h>4445#include <rdma/ib_pack.h>46#include <rdma/ib_cache.h>47#include "sa.h"4849MODULE_AUTHOR("Roland Dreier");50MODULE_DESCRIPTION("InfiniBand subnet administration query support");51MODULE_LICENSE("Dual BSD/GPL");5253struct ib_sa_sm_ah {54struct ib_ah *ah;55struct kref ref;56u16 pkey_index;57u8 src_path_mask;58};5960struct ib_sa_port {61struct ib_mad_agent *agent;62struct ib_sa_sm_ah *sm_ah;63struct work_struct update_task;64spinlock_t ah_lock;65u8 port_num;66};6768struct ib_sa_device {69int start_port, end_port;70struct ib_event_handler event_handler;71struct ib_sa_port port[0];72};7374struct ib_sa_query {75void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);76void (*release)(struct ib_sa_query *);77struct ib_sa_client *client;78struct ib_sa_port *port;79struct ib_mad_send_buf *mad_buf;80struct ib_sa_sm_ah *sm_ah;81int id;82};8384struct ib_sa_service_query {85void (*callback)(int, struct ib_sa_service_rec *, void *);86void *context;87struct ib_sa_query sa_query;88};8990struct ib_sa_path_query {91void (*callback)(int, struct ib_sa_path_rec *, void *);92void *context;93struct ib_sa_query sa_query;94};9596struct ib_sa_mcmember_query {97void (*callback)(int, struct ib_sa_mcmember_rec *, void *);98void *context;99struct ib_sa_query sa_query;100};101102static void ib_sa_add_one(struct ib_device *device);103static void ib_sa_remove_one(struct ib_device *device);104105static struct ib_client sa_client = {106.name = "sa",107.add = ib_sa_add_one,108.remove = ib_sa_remove_one109};110111static DEFINE_SPINLOCK(idr_lock);112static DEFINE_IDR(query_idr);113114static DEFINE_SPINLOCK(tid_lock);115static u32 tid;116117#define PATH_REC_FIELD(field) \118.struct_offset_bytes = offsetof(struct ib_sa_path_rec, field), \119.struct_size_bytes = sizeof ((struct ib_sa_path_rec *) 0)->field, \120.field_name = "sa_path_rec:" #field121122static const struct ib_field path_rec_table[] = {123{ PATH_REC_FIELD(service_id),124.offset_words = 0,125.offset_bits = 0,126.size_bits = 64 },127{ PATH_REC_FIELD(dgid),128.offset_words = 2,129.offset_bits = 0,130.size_bits = 128 },131{ PATH_REC_FIELD(sgid),132.offset_words = 6,133.offset_bits = 0,134.size_bits = 128 },135{ PATH_REC_FIELD(dlid),136.offset_words = 10,137.offset_bits = 0,138.size_bits = 16 },139{ PATH_REC_FIELD(slid),140.offset_words = 10,141.offset_bits = 16,142.size_bits = 16 },143{ PATH_REC_FIELD(raw_traffic),144.offset_words = 11,145.offset_bits = 0,146.size_bits = 1 },147{ RESERVED,148.offset_words = 11,149.offset_bits = 1,150.size_bits = 3 },151{ PATH_REC_FIELD(flow_label),152.offset_words = 11,153.offset_bits = 4,154.size_bits = 20 },155{ PATH_REC_FIELD(hop_limit),156.offset_words = 11,157.offset_bits = 24,158.size_bits = 8 },159{ PATH_REC_FIELD(traffic_class),160.offset_words = 12,161.offset_bits = 0,162.size_bits = 8 },163{ PATH_REC_FIELD(reversible),164.offset_words = 12,165.offset_bits = 8,166.size_bits = 1 },167{ PATH_REC_FIELD(numb_path),168.offset_words = 12,169.offset_bits = 9,170.size_bits = 7 },171{ PATH_REC_FIELD(pkey),172.offset_words = 12,173.offset_bits = 16,174.size_bits = 16 },175{ PATH_REC_FIELD(qos_class),176.offset_words = 13,177.offset_bits = 0,178.size_bits = 12 },179{ PATH_REC_FIELD(sl),180.offset_words = 13,181.offset_bits = 12,182.size_bits = 4 },183{ PATH_REC_FIELD(mtu_selector),184.offset_words = 13,185.offset_bits = 16,186.size_bits = 2 },187{ PATH_REC_FIELD(mtu),188.offset_words = 13,189.offset_bits = 18,190.size_bits = 6 },191{ PATH_REC_FIELD(rate_selector),192.offset_words = 13,193.offset_bits = 24,194.size_bits = 2 },195{ PATH_REC_FIELD(rate),196.offset_words = 13,197.offset_bits = 26,198.size_bits = 6 },199{ PATH_REC_FIELD(packet_life_time_selector),200.offset_words = 14,201.offset_bits = 0,202.size_bits = 2 },203{ PATH_REC_FIELD(packet_life_time),204.offset_words = 14,205.offset_bits = 2,206.size_bits = 6 },207{ PATH_REC_FIELD(preference),208.offset_words = 14,209.offset_bits = 8,210.size_bits = 8 },211{ RESERVED,212.offset_words = 14,213.offset_bits = 16,214.size_bits = 48 },215};216217#define MCMEMBER_REC_FIELD(field) \218.struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \219.struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \220.field_name = "sa_mcmember_rec:" #field221222static const struct ib_field mcmember_rec_table[] = {223{ MCMEMBER_REC_FIELD(mgid),224.offset_words = 0,225.offset_bits = 0,226.size_bits = 128 },227{ MCMEMBER_REC_FIELD(port_gid),228.offset_words = 4,229.offset_bits = 0,230.size_bits = 128 },231{ MCMEMBER_REC_FIELD(qkey),232.offset_words = 8,233.offset_bits = 0,234.size_bits = 32 },235{ MCMEMBER_REC_FIELD(mlid),236.offset_words = 9,237.offset_bits = 0,238.size_bits = 16 },239{ MCMEMBER_REC_FIELD(mtu_selector),240.offset_words = 9,241.offset_bits = 16,242.size_bits = 2 },243{ MCMEMBER_REC_FIELD(mtu),244.offset_words = 9,245.offset_bits = 18,246.size_bits = 6 },247{ MCMEMBER_REC_FIELD(traffic_class),248.offset_words = 9,249.offset_bits = 24,250.size_bits = 8 },251{ MCMEMBER_REC_FIELD(pkey),252.offset_words = 10,253.offset_bits = 0,254.size_bits = 16 },255{ MCMEMBER_REC_FIELD(rate_selector),256.offset_words = 10,257.offset_bits = 16,258.size_bits = 2 },259{ MCMEMBER_REC_FIELD(rate),260.offset_words = 10,261.offset_bits = 18,262.size_bits = 6 },263{ MCMEMBER_REC_FIELD(packet_life_time_selector),264.offset_words = 10,265.offset_bits = 24,266.size_bits = 2 },267{ MCMEMBER_REC_FIELD(packet_life_time),268.offset_words = 10,269.offset_bits = 26,270.size_bits = 6 },271{ MCMEMBER_REC_FIELD(sl),272.offset_words = 11,273.offset_bits = 0,274.size_bits = 4 },275{ MCMEMBER_REC_FIELD(flow_label),276.offset_words = 11,277.offset_bits = 4,278.size_bits = 20 },279{ MCMEMBER_REC_FIELD(hop_limit),280.offset_words = 11,281.offset_bits = 24,282.size_bits = 8 },283{ MCMEMBER_REC_FIELD(scope),284.offset_words = 12,285.offset_bits = 0,286.size_bits = 4 },287{ MCMEMBER_REC_FIELD(join_state),288.offset_words = 12,289.offset_bits = 4,290.size_bits = 4 },291{ MCMEMBER_REC_FIELD(proxy_join),292.offset_words = 12,293.offset_bits = 8,294.size_bits = 1 },295{ RESERVED,296.offset_words = 12,297.offset_bits = 9,298.size_bits = 23 },299};300301#define SERVICE_REC_FIELD(field) \302.struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \303.struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \304.field_name = "sa_service_rec:" #field305306static const struct ib_field service_rec_table[] = {307{ SERVICE_REC_FIELD(id),308.offset_words = 0,309.offset_bits = 0,310.size_bits = 64 },311{ SERVICE_REC_FIELD(gid),312.offset_words = 2,313.offset_bits = 0,314.size_bits = 128 },315{ SERVICE_REC_FIELD(pkey),316.offset_words = 6,317.offset_bits = 0,318.size_bits = 16 },319{ SERVICE_REC_FIELD(lease),320.offset_words = 7,321.offset_bits = 0,322.size_bits = 32 },323{ SERVICE_REC_FIELD(key),324.offset_words = 8,325.offset_bits = 0,326.size_bits = 128 },327{ SERVICE_REC_FIELD(name),328.offset_words = 12,329.offset_bits = 0,330.size_bits = 64*8 },331{ SERVICE_REC_FIELD(data8),332.offset_words = 28,333.offset_bits = 0,334.size_bits = 16*8 },335{ SERVICE_REC_FIELD(data16),336.offset_words = 32,337.offset_bits = 0,338.size_bits = 8*16 },339{ SERVICE_REC_FIELD(data32),340.offset_words = 36,341.offset_bits = 0,342.size_bits = 4*32 },343{ SERVICE_REC_FIELD(data64),344.offset_words = 40,345.offset_bits = 0,346.size_bits = 2*64 },347};348349static void free_sm_ah(struct kref *kref)350{351struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);352353ib_destroy_ah(sm_ah->ah);354kfree(sm_ah);355}356357static void update_sm_ah(struct work_struct *work)358{359struct ib_sa_port *port =360container_of(work, struct ib_sa_port, update_task);361struct ib_sa_sm_ah *new_ah;362struct ib_port_attr port_attr;363struct ib_ah_attr ah_attr;364365if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {366printk(KERN_WARNING "Couldn't query port\n");367return;368}369370new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL);371if (!new_ah) {372printk(KERN_WARNING "Couldn't allocate new SM AH\n");373return;374}375376kref_init(&new_ah->ref);377new_ah->src_path_mask = (1 << port_attr.lmc) - 1;378379new_ah->pkey_index = 0;380if (ib_find_pkey(port->agent->device, port->port_num,381IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))382printk(KERN_ERR "Couldn't find index for default PKey\n");383384memset(&ah_attr, 0, sizeof ah_attr);385ah_attr.dlid = port_attr.sm_lid;386ah_attr.sl = port_attr.sm_sl;387ah_attr.port_num = port->port_num;388389new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr);390if (IS_ERR(new_ah->ah)) {391printk(KERN_WARNING "Couldn't create new SM AH\n");392kfree(new_ah);393return;394}395396spin_lock_irq(&port->ah_lock);397if (port->sm_ah)398kref_put(&port->sm_ah->ref, free_sm_ah);399port->sm_ah = new_ah;400spin_unlock_irq(&port->ah_lock);401402}403404static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event)405{406if (event->event == IB_EVENT_PORT_ERR ||407event->event == IB_EVENT_PORT_ACTIVE ||408event->event == IB_EVENT_LID_CHANGE ||409event->event == IB_EVENT_PKEY_CHANGE ||410event->event == IB_EVENT_SM_CHANGE ||411event->event == IB_EVENT_CLIENT_REREGISTER) {412unsigned long flags;413struct ib_sa_device *sa_dev =414container_of(handler, typeof(*sa_dev), event_handler);415struct ib_sa_port *port =416&sa_dev->port[event->element.port_num - sa_dev->start_port];417418if (rdma_port_get_link_layer(handler->device, port->port_num) != IB_LINK_LAYER_INFINIBAND)419return;420421spin_lock_irqsave(&port->ah_lock, flags);422if (port->sm_ah)423kref_put(&port->sm_ah->ref, free_sm_ah);424port->sm_ah = NULL;425spin_unlock_irqrestore(&port->ah_lock, flags);426427queue_work(ib_wq, &sa_dev->port[event->element.port_num -428sa_dev->start_port].update_task);429}430}431432void ib_sa_register_client(struct ib_sa_client *client)433{434atomic_set(&client->users, 1);435init_completion(&client->comp);436}437EXPORT_SYMBOL(ib_sa_register_client);438439void ib_sa_unregister_client(struct ib_sa_client *client)440{441ib_sa_client_put(client);442wait_for_completion(&client->comp);443}444EXPORT_SYMBOL(ib_sa_unregister_client);445446/**447* ib_sa_cancel_query - try to cancel an SA query448* @id:ID of query to cancel449* @query:query pointer to cancel450*451* Try to cancel an SA query. If the id and query don't match up or452* the query has already completed, nothing is done. Otherwise the453* query is canceled and will complete with a status of -EINTR.454*/455void ib_sa_cancel_query(int id, struct ib_sa_query *query)456{457unsigned long flags;458struct ib_mad_agent *agent;459struct ib_mad_send_buf *mad_buf;460461spin_lock_irqsave(&idr_lock, flags);462if (idr_find(&query_idr, id) != query) {463spin_unlock_irqrestore(&idr_lock, flags);464return;465}466agent = query->port->agent;467mad_buf = query->mad_buf;468spin_unlock_irqrestore(&idr_lock, flags);469470ib_cancel_mad(agent, mad_buf);471}472EXPORT_SYMBOL(ib_sa_cancel_query);473474static u8 get_src_path_mask(struct ib_device *device, u8 port_num)475{476struct ib_sa_device *sa_dev;477struct ib_sa_port *port;478unsigned long flags;479u8 src_path_mask;480481sa_dev = ib_get_client_data(device, &sa_client);482if (!sa_dev)483return 0x7f;484485port = &sa_dev->port[port_num - sa_dev->start_port];486spin_lock_irqsave(&port->ah_lock, flags);487src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;488spin_unlock_irqrestore(&port->ah_lock, flags);489490return src_path_mask;491}492493int ib_init_ah_from_path(struct ib_device *device, u8 port_num,494struct ib_sa_path_rec *rec, struct ib_ah_attr *ah_attr)495{496int ret;497u16 gid_index;498int force_grh;499500memset(ah_attr, 0, sizeof *ah_attr);501ah_attr->dlid = be16_to_cpu(rec->dlid);502ah_attr->sl = rec->sl;503ah_attr->src_path_bits = be16_to_cpu(rec->slid) &504get_src_path_mask(device, port_num);505ah_attr->port_num = port_num;506ah_attr->static_rate = rec->rate;507508force_grh = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_ETHERNET;509510if (rec->hop_limit > 1 || force_grh) {511ah_attr->ah_flags = IB_AH_GRH;512ah_attr->grh.dgid = rec->dgid;513514ret = ib_find_cached_gid(device, &rec->sgid, &port_num,515&gid_index);516if (ret)517return ret;518519ah_attr->grh.sgid_index = gid_index;520ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label);521ah_attr->grh.hop_limit = rec->hop_limit;522ah_attr->grh.traffic_class = rec->traffic_class;523}524return 0;525}526EXPORT_SYMBOL(ib_init_ah_from_path);527528static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)529{530unsigned long flags;531532spin_lock_irqsave(&query->port->ah_lock, flags);533if (!query->port->sm_ah) {534spin_unlock_irqrestore(&query->port->ah_lock, flags);535return -EAGAIN;536}537kref_get(&query->port->sm_ah->ref);538query->sm_ah = query->port->sm_ah;539spin_unlock_irqrestore(&query->port->ah_lock, flags);540541query->mad_buf = ib_create_send_mad(query->port->agent, 1,542query->sm_ah->pkey_index,5430, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,544gfp_mask);545if (IS_ERR(query->mad_buf)) {546kref_put(&query->sm_ah->ref, free_sm_ah);547return -ENOMEM;548}549550query->mad_buf->ah = query->sm_ah->ah;551552return 0;553}554555static void free_mad(struct ib_sa_query *query)556{557ib_free_send_mad(query->mad_buf);558kref_put(&query->sm_ah->ref, free_sm_ah);559}560561static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)562{563unsigned long flags;564565memset(mad, 0, sizeof *mad);566567mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION;568mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;569mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;570571spin_lock_irqsave(&tid_lock, flags);572mad->mad_hdr.tid =573cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);574spin_unlock_irqrestore(&tid_lock, flags);575}576577static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)578{579unsigned long flags;580int ret, id;581582retry:583if (!idr_pre_get(&query_idr, gfp_mask))584return -ENOMEM;585spin_lock_irqsave(&idr_lock, flags);586ret = idr_get_new(&query_idr, query, &id);587spin_unlock_irqrestore(&idr_lock, flags);588if (ret == -EAGAIN)589goto retry;590if (ret)591return ret;592593query->mad_buf->timeout_ms = timeout_ms;594query->mad_buf->context[0] = query;595query->id = id;596597ret = ib_post_send_mad(query->mad_buf, NULL);598if (ret) {599spin_lock_irqsave(&idr_lock, flags);600idr_remove(&query_idr, id);601spin_unlock_irqrestore(&idr_lock, flags);602}603604/*605* It's not safe to dereference query any more, because the606* send may already have completed and freed the query in607* another context.608*/609return ret ? ret : id;610}611612void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec)613{614ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);615}616EXPORT_SYMBOL(ib_sa_unpack_path);617618static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,619int status,620struct ib_sa_mad *mad)621{622struct ib_sa_path_query *query =623container_of(sa_query, struct ib_sa_path_query, sa_query);624625if (mad) {626struct ib_sa_path_rec rec;627628ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),629mad->data, &rec);630query->callback(status, &rec, query->context);631} else632query->callback(status, NULL, query->context);633}634635static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)636{637kfree(container_of(sa_query, struct ib_sa_path_query, sa_query));638}639640/**641* ib_sa_path_rec_get - Start a Path get query642* @client:SA client643* @device:device to send query on644* @port_num: port number to send query on645* @rec:Path Record to send in query646* @comp_mask:component mask to send in query647* @timeout_ms:time to wait for response648* @gfp_mask:GFP mask to use for internal allocations649* @callback:function called when query completes, times out or is650* canceled651* @context:opaque user context passed to callback652* @sa_query:query context, used to cancel query653*654* Send a Path Record Get query to the SA to look up a path. The655* callback function will be called when the query completes (or656* fails); status is 0 for a successful response, -EINTR if the query657* is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error658* occurred sending the query. The resp parameter of the callback is659* only valid if status is 0.660*661* If the return value of ib_sa_path_rec_get() is negative, it is an662* error code. Otherwise it is a query ID that can be used to cancel663* the query.664*/665int ib_sa_path_rec_get(struct ib_sa_client *client,666struct ib_device *device, u8 port_num,667struct ib_sa_path_rec *rec,668ib_sa_comp_mask comp_mask,669int timeout_ms, gfp_t gfp_mask,670void (*callback)(int status,671struct ib_sa_path_rec *resp,672void *context),673void *context,674struct ib_sa_query **sa_query)675{676struct ib_sa_path_query *query;677struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);678struct ib_sa_port *port;679struct ib_mad_agent *agent;680struct ib_sa_mad *mad;681int ret;682683if (!sa_dev)684return -ENODEV;685686port = &sa_dev->port[port_num - sa_dev->start_port];687agent = port->agent;688689query = kmalloc(sizeof *query, gfp_mask);690if (!query)691return -ENOMEM;692693query->sa_query.port = port;694ret = alloc_mad(&query->sa_query, gfp_mask);695if (ret)696goto err1;697698ib_sa_client_get(client);699query->sa_query.client = client;700query->callback = callback;701query->context = context;702703mad = query->sa_query.mad_buf->mad;704init_mad(mad, agent);705706query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;707query->sa_query.release = ib_sa_path_rec_release;708mad->mad_hdr.method = IB_MGMT_METHOD_GET;709mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);710mad->sa_hdr.comp_mask = comp_mask;711712ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data);713714*sa_query = &query->sa_query;715716ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);717if (ret < 0)718goto err2;719720return ret;721722err2:723*sa_query = NULL;724ib_sa_client_put(query->sa_query.client);725free_mad(&query->sa_query);726727err1:728kfree(query);729return ret;730}731EXPORT_SYMBOL(ib_sa_path_rec_get);732733static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,734int status,735struct ib_sa_mad *mad)736{737struct ib_sa_service_query *query =738container_of(sa_query, struct ib_sa_service_query, sa_query);739740if (mad) {741struct ib_sa_service_rec rec;742743ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),744mad->data, &rec);745query->callback(status, &rec, query->context);746} else747query->callback(status, NULL, query->context);748}749750static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)751{752kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));753}754755/**756* ib_sa_service_rec_query - Start Service Record operation757* @client:SA client758* @device:device to send request on759* @port_num: port number to send request on760* @method:SA method - should be get, set, or delete761* @rec:Service Record to send in request762* @comp_mask:component mask to send in request763* @timeout_ms:time to wait for response764* @gfp_mask:GFP mask to use for internal allocations765* @callback:function called when request completes, times out or is766* canceled767* @context:opaque user context passed to callback768* @sa_query:request context, used to cancel request769*770* Send a Service Record set/get/delete to the SA to register,771* unregister or query a service record.772* The callback function will be called when the request completes (or773* fails); status is 0 for a successful response, -EINTR if the query774* is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error775* occurred sending the query. The resp parameter of the callback is776* only valid if status is 0.777*778* If the return value of ib_sa_service_rec_query() is negative, it is an779* error code. Otherwise it is a request ID that can be used to cancel780* the query.781*/782int ib_sa_service_rec_query(struct ib_sa_client *client,783struct ib_device *device, u8 port_num, u8 method,784struct ib_sa_service_rec *rec,785ib_sa_comp_mask comp_mask,786int timeout_ms, gfp_t gfp_mask,787void (*callback)(int status,788struct ib_sa_service_rec *resp,789void *context),790void *context,791struct ib_sa_query **sa_query)792{793struct ib_sa_service_query *query;794struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);795struct ib_sa_port *port;796struct ib_mad_agent *agent;797struct ib_sa_mad *mad;798int ret;799800if (!sa_dev)801return -ENODEV;802803port = &sa_dev->port[port_num - sa_dev->start_port];804agent = port->agent;805806if (method != IB_MGMT_METHOD_GET &&807method != IB_MGMT_METHOD_SET &&808method != IB_SA_METHOD_DELETE)809return -EINVAL;810811query = kmalloc(sizeof *query, gfp_mask);812if (!query)813return -ENOMEM;814815query->sa_query.port = port;816ret = alloc_mad(&query->sa_query, gfp_mask);817if (ret)818goto err1;819820ib_sa_client_get(client);821query->sa_query.client = client;822query->callback = callback;823query->context = context;824825mad = query->sa_query.mad_buf->mad;826init_mad(mad, agent);827828query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;829query->sa_query.release = ib_sa_service_rec_release;830mad->mad_hdr.method = method;831mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);832mad->sa_hdr.comp_mask = comp_mask;833834ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),835rec, mad->data);836837*sa_query = &query->sa_query;838839ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);840if (ret < 0)841goto err2;842843return ret;844845err2:846*sa_query = NULL;847ib_sa_client_put(query->sa_query.client);848free_mad(&query->sa_query);849850err1:851kfree(query);852return ret;853}854EXPORT_SYMBOL(ib_sa_service_rec_query);855856static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,857int status,858struct ib_sa_mad *mad)859{860struct ib_sa_mcmember_query *query =861container_of(sa_query, struct ib_sa_mcmember_query, sa_query);862863if (mad) {864struct ib_sa_mcmember_rec rec;865866ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),867mad->data, &rec);868query->callback(status, &rec, query->context);869} else870query->callback(status, NULL, query->context);871}872873static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)874{875kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));876}877878int ib_sa_mcmember_rec_query(struct ib_sa_client *client,879struct ib_device *device, u8 port_num,880u8 method,881struct ib_sa_mcmember_rec *rec,882ib_sa_comp_mask comp_mask,883int timeout_ms, gfp_t gfp_mask,884void (*callback)(int status,885struct ib_sa_mcmember_rec *resp,886void *context),887void *context,888struct ib_sa_query **sa_query)889{890struct ib_sa_mcmember_query *query;891struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);892struct ib_sa_port *port;893struct ib_mad_agent *agent;894struct ib_sa_mad *mad;895int ret;896897if (!sa_dev)898return -ENODEV;899900port = &sa_dev->port[port_num - sa_dev->start_port];901agent = port->agent;902903query = kmalloc(sizeof *query, gfp_mask);904if (!query)905return -ENOMEM;906907query->sa_query.port = port;908ret = alloc_mad(&query->sa_query, gfp_mask);909if (ret)910goto err1;911912ib_sa_client_get(client);913query->sa_query.client = client;914query->callback = callback;915query->context = context;916917mad = query->sa_query.mad_buf->mad;918init_mad(mad, agent);919920query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;921query->sa_query.release = ib_sa_mcmember_rec_release;922mad->mad_hdr.method = method;923mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);924mad->sa_hdr.comp_mask = comp_mask;925926ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),927rec, mad->data);928929*sa_query = &query->sa_query;930931ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);932if (ret < 0)933goto err2;934935return ret;936937err2:938*sa_query = NULL;939ib_sa_client_put(query->sa_query.client);940free_mad(&query->sa_query);941942err1:943kfree(query);944return ret;945}946947static void send_handler(struct ib_mad_agent *agent,948struct ib_mad_send_wc *mad_send_wc)949{950struct ib_sa_query *query = mad_send_wc->send_buf->context[0];951unsigned long flags;952953if (query->callback)954switch (mad_send_wc->status) {955case IB_WC_SUCCESS:956/* No callback -- already got recv */957break;958case IB_WC_RESP_TIMEOUT_ERR:959query->callback(query, -ETIMEDOUT, NULL);960break;961case IB_WC_WR_FLUSH_ERR:962query->callback(query, -EINTR, NULL);963break;964default:965query->callback(query, -EIO, NULL);966break;967}968969spin_lock_irqsave(&idr_lock, flags);970idr_remove(&query_idr, query->id);971spin_unlock_irqrestore(&idr_lock, flags);972973free_mad(query);974ib_sa_client_put(query->client);975query->release(query);976}977978static void recv_handler(struct ib_mad_agent *mad_agent,979struct ib_mad_recv_wc *mad_recv_wc)980{981struct ib_sa_query *query;982struct ib_mad_send_buf *mad_buf;983984mad_buf = (void *) (unsigned long) mad_recv_wc->wc->wr_id;985query = mad_buf->context[0];986987if (query->callback) {988if (mad_recv_wc->wc->status == IB_WC_SUCCESS)989query->callback(query,990mad_recv_wc->recv_buf.mad->mad_hdr.status ?991-EINVAL : 0,992(struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);993else994query->callback(query, -EIO, NULL);995}996997ib_free_recv_mad(mad_recv_wc);998}9991000static void ib_sa_add_one(struct ib_device *device)1001{1002struct ib_sa_device *sa_dev;1003int s, e, i;10041005if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)1006return;10071008if (device->node_type == RDMA_NODE_IB_SWITCH)1009s = e = 0;1010else {1011s = 1;1012e = device->phys_port_cnt;1013}10141015sa_dev = kzalloc(sizeof *sa_dev +1016(e - s + 1) * sizeof (struct ib_sa_port),1017GFP_KERNEL);1018if (!sa_dev)1019return;10201021sa_dev->start_port = s;1022sa_dev->end_port = e;10231024for (i = 0; i <= e - s; ++i) {1025spin_lock_init(&sa_dev->port[i].ah_lock);1026if (rdma_port_get_link_layer(device, i + 1) != IB_LINK_LAYER_INFINIBAND)1027continue;10281029sa_dev->port[i].sm_ah = NULL;1030sa_dev->port[i].port_num = i + s;10311032sa_dev->port[i].agent =1033ib_register_mad_agent(device, i + s, IB_QPT_GSI,1034NULL, 0, send_handler,1035recv_handler, sa_dev);1036if (IS_ERR(sa_dev->port[i].agent))1037goto err;10381039INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);1040}10411042ib_set_client_data(device, &sa_client, sa_dev);10431044/*1045* We register our event handler after everything is set up,1046* and then update our cached info after the event handler is1047* registered to avoid any problems if a port changes state1048* during our initialization.1049*/10501051INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);1052if (ib_register_event_handler(&sa_dev->event_handler))1053goto err;10541055for (i = 0; i <= e - s; ++i)1056if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND)1057update_sm_ah(&sa_dev->port[i].update_task);10581059return;10601061err:1062while (--i >= 0)1063if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND)1064ib_unregister_mad_agent(sa_dev->port[i].agent);10651066kfree(sa_dev);10671068return;1069}10701071static void ib_sa_remove_one(struct ib_device *device)1072{1073struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);1074int i;10751076if (!sa_dev)1077return;10781079ib_unregister_event_handler(&sa_dev->event_handler);10801081flush_workqueue(ib_wq);10821083for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {1084if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) {1085ib_unregister_mad_agent(sa_dev->port[i].agent);1086if (sa_dev->port[i].sm_ah)1087kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);1088}10891090}10911092kfree(sa_dev);1093}10941095static int __init ib_sa_init(void)1096{1097int ret;10981099get_random_bytes(&tid, sizeof tid);11001101ret = ib_register_client(&sa_client);1102if (ret) {1103printk(KERN_ERR "Couldn't register ib_sa client\n");1104goto err1;1105}11061107ret = mcast_init();1108if (ret) {1109printk(KERN_ERR "Couldn't initialize multicast handling\n");1110goto err2;1111}11121113return 0;1114err2:1115ib_unregister_client(&sa_client);1116err1:1117return ret;1118}11191120static void __exit ib_sa_cleanup(void)1121{1122mcast_cleanup();1123ib_unregister_client(&sa_client);1124idr_destroy(&query_idr);1125}11261127module_init(ib_sa_init);1128module_exit(ib_sa_cleanup);112911301131