Path: blob/master/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
15112 views
/*1* Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.2* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.3*4* This software is available to you under a choice of one of two5* licenses. You may choose to be licensed under the terms of the GNU6* General Public License (GPL) Version 2, available from the file7* COPYING in the main directory of this source tree, or the8* OpenIB.org BSD license below:9*10* Redistribution and use in source and binary forms, with or11* without modification, are permitted provided that the following12* conditions are met:13*14* - Redistributions of source code must retain the above15* copyright notice, this list of conditions and the following16* disclaimer.17*18* - Redistributions in binary form must reproduce the above19* copyright notice, this list of conditions and the following20* disclaimer in the documentation and/or other materials21* provided with the distribution.22*23* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,24* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF25* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND26* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS27* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN28* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN29* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE30* SOFTWARE.31*/3233#include <linux/rculist.h>34#include <linux/sched.h>35#include <linux/slab.h>3637#include "ipath_verbs.h"3839/*40* Global table of GID to attached QPs.41* The table is global to all ipath devices since a send from one QP/device42* needs to be locally routed to any locally attached QPs on the same43* or different device.44*/45static struct rb_root mcast_tree;46static DEFINE_SPINLOCK(mcast_lock);4748/**49* ipath_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct50* @qp: the QP to link51*/52static struct ipath_mcast_qp *ipath_mcast_qp_alloc(struct ipath_qp *qp)53{54struct ipath_mcast_qp *mqp;5556mqp = kmalloc(sizeof *mqp, GFP_KERNEL);57if (!mqp)58goto bail;5960mqp->qp = qp;61atomic_inc(&qp->refcount);6263bail:64return mqp;65}6667static void ipath_mcast_qp_free(struct ipath_mcast_qp *mqp)68{69struct ipath_qp *qp = mqp->qp;7071/* Notify ipath_destroy_qp() if it is waiting. */72if (atomic_dec_and_test(&qp->refcount))73wake_up(&qp->wait);7475kfree(mqp);76}7778/**79* ipath_mcast_alloc - allocate the multicast GID structure80* @mgid: the multicast GID81*82* A list of QPs will be attached to this structure.83*/84static struct ipath_mcast *ipath_mcast_alloc(union ib_gid *mgid)85{86struct ipath_mcast *mcast;8788mcast = kmalloc(sizeof *mcast, GFP_KERNEL);89if (!mcast)90goto bail;9192mcast->mgid = *mgid;93INIT_LIST_HEAD(&mcast->qp_list);94init_waitqueue_head(&mcast->wait);95atomic_set(&mcast->refcount, 0);96mcast->n_attached = 0;9798bail:99return mcast;100}101102static void ipath_mcast_free(struct ipath_mcast *mcast)103{104struct ipath_mcast_qp *p, *tmp;105106list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)107ipath_mcast_qp_free(p);108109kfree(mcast);110}111112/**113* ipath_mcast_find - search the global table for the given multicast GID114* @mgid: the multicast GID to search for115*116* Returns NULL if not found.117*118* The caller is responsible for decrementing the reference count if found.119*/120struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid)121{122struct rb_node *n;123unsigned long flags;124struct ipath_mcast *mcast;125126spin_lock_irqsave(&mcast_lock, flags);127n = mcast_tree.rb_node;128while (n) {129int ret;130131mcast = rb_entry(n, struct ipath_mcast, rb_node);132133ret = memcmp(mgid->raw, mcast->mgid.raw,134sizeof(union ib_gid));135if (ret < 0)136n = n->rb_left;137else if (ret > 0)138n = n->rb_right;139else {140atomic_inc(&mcast->refcount);141spin_unlock_irqrestore(&mcast_lock, flags);142goto bail;143}144}145spin_unlock_irqrestore(&mcast_lock, flags);146147mcast = NULL;148149bail:150return mcast;151}152153/**154* ipath_mcast_add - insert mcast GID into table and attach QP struct155* @mcast: the mcast GID table156* @mqp: the QP to attach157*158* Return zero if both were added. Return EEXIST if the GID was already in159* the table but the QP was added. Return ESRCH if the QP was already160* attached and neither structure was added.161*/162static int ipath_mcast_add(struct ipath_ibdev *dev,163struct ipath_mcast *mcast,164struct ipath_mcast_qp *mqp)165{166struct rb_node **n = &mcast_tree.rb_node;167struct rb_node *pn = NULL;168int ret;169170spin_lock_irq(&mcast_lock);171172while (*n) {173struct ipath_mcast *tmcast;174struct ipath_mcast_qp *p;175176pn = *n;177tmcast = rb_entry(pn, struct ipath_mcast, rb_node);178179ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,180sizeof(union ib_gid));181if (ret < 0) {182n = &pn->rb_left;183continue;184}185if (ret > 0) {186n = &pn->rb_right;187continue;188}189190/* Search the QP list to see if this is already there. */191list_for_each_entry_rcu(p, &tmcast->qp_list, list) {192if (p->qp == mqp->qp) {193ret = ESRCH;194goto bail;195}196}197if (tmcast->n_attached == ib_ipath_max_mcast_qp_attached) {198ret = ENOMEM;199goto bail;200}201202tmcast->n_attached++;203204list_add_tail_rcu(&mqp->list, &tmcast->qp_list);205ret = EEXIST;206goto bail;207}208209spin_lock(&dev->n_mcast_grps_lock);210if (dev->n_mcast_grps_allocated == ib_ipath_max_mcast_grps) {211spin_unlock(&dev->n_mcast_grps_lock);212ret = ENOMEM;213goto bail;214}215216dev->n_mcast_grps_allocated++;217spin_unlock(&dev->n_mcast_grps_lock);218219mcast->n_attached++;220221list_add_tail_rcu(&mqp->list, &mcast->qp_list);222223atomic_inc(&mcast->refcount);224rb_link_node(&mcast->rb_node, pn, n);225rb_insert_color(&mcast->rb_node, &mcast_tree);226227ret = 0;228229bail:230spin_unlock_irq(&mcast_lock);231232return ret;233}234235int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)236{237struct ipath_qp *qp = to_iqp(ibqp);238struct ipath_ibdev *dev = to_idev(ibqp->device);239struct ipath_mcast *mcast;240struct ipath_mcast_qp *mqp;241int ret;242243/*244* Allocate data structures since its better to do this outside of245* spin locks and it will most likely be needed.246*/247mcast = ipath_mcast_alloc(gid);248if (mcast == NULL) {249ret = -ENOMEM;250goto bail;251}252mqp = ipath_mcast_qp_alloc(qp);253if (mqp == NULL) {254ipath_mcast_free(mcast);255ret = -ENOMEM;256goto bail;257}258switch (ipath_mcast_add(dev, mcast, mqp)) {259case ESRCH:260/* Neither was used: can't attach the same QP twice. */261ipath_mcast_qp_free(mqp);262ipath_mcast_free(mcast);263ret = -EINVAL;264goto bail;265case EEXIST: /* The mcast wasn't used */266ipath_mcast_free(mcast);267break;268case ENOMEM:269/* Exceeded the maximum number of mcast groups. */270ipath_mcast_qp_free(mqp);271ipath_mcast_free(mcast);272ret = -ENOMEM;273goto bail;274default:275break;276}277278ret = 0;279280bail:281return ret;282}283284int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)285{286struct ipath_qp *qp = to_iqp(ibqp);287struct ipath_ibdev *dev = to_idev(ibqp->device);288struct ipath_mcast *mcast = NULL;289struct ipath_mcast_qp *p, *tmp;290struct rb_node *n;291int last = 0;292int ret;293294spin_lock_irq(&mcast_lock);295296/* Find the GID in the mcast table. */297n = mcast_tree.rb_node;298while (1) {299if (n == NULL) {300spin_unlock_irq(&mcast_lock);301ret = -EINVAL;302goto bail;303}304305mcast = rb_entry(n, struct ipath_mcast, rb_node);306ret = memcmp(gid->raw, mcast->mgid.raw,307sizeof(union ib_gid));308if (ret < 0)309n = n->rb_left;310else if (ret > 0)311n = n->rb_right;312else313break;314}315316/* Search the QP list. */317list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {318if (p->qp != qp)319continue;320/*321* We found it, so remove it, but don't poison the forward322* link until we are sure there are no list walkers.323*/324list_del_rcu(&p->list);325mcast->n_attached--;326327/* If this was the last attached QP, remove the GID too. */328if (list_empty(&mcast->qp_list)) {329rb_erase(&mcast->rb_node, &mcast_tree);330last = 1;331}332break;333}334335spin_unlock_irq(&mcast_lock);336337if (p) {338/*339* Wait for any list walkers to finish before freeing the340* list element.341*/342wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);343ipath_mcast_qp_free(p);344}345if (last) {346atomic_dec(&mcast->refcount);347wait_event(mcast->wait, !atomic_read(&mcast->refcount));348ipath_mcast_free(mcast);349spin_lock_irq(&dev->n_mcast_grps_lock);350dev->n_mcast_grps_allocated--;351spin_unlock_irq(&dev->n_mcast_grps_lock);352}353354ret = 0;355356bail:357return ret;358}359360int ipath_mcast_tree_empty(void)361{362return mcast_tree.rb_node == NULL;363}364365366