aboutsummaryrefslogtreecommitdiff
path: root/drivers/infiniband/core
diff options
context:
space:
mode:
authorOr Gerlitz <ogerlitz@voltaire.com>2006-06-17 20:37:37 -0700
committerRoland Dreier <rolandd@cisco.com>2006-06-17 20:37:37 -0700
commit6c8c1aa25d213a288df381f431ce5b6a155146ec (patch)
tree379670131076d3ca0af68f9e91f6f038e86e0e96 /drivers/infiniband/core
parentd4cb0784fd1ea99ef3d20526811bd5608146fe60 (diff)
IB/fmr: Use device's max_map_map_per_fmr attribute in FMR pool.
When creating a FMR pool, query the IB device and use the returned max_map_map_per_fmr attribute as for the max number of FMR remaps. If the device does not suport querying this attribute, use the original IB_FMR_MAX_REMAPS (32) default. Signed-off-by: Or Gerlitz <ogerlitz@voltaire.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r--drivers/infiniband/core/fmr_pool.c30
1 files changed, 27 insertions, 3 deletions
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 838bf54458d..615fe9cc6c5 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -54,7 +54,7 @@ enum {
/*
* If an FMR is not in use, then the list member will point to either
* its pool's free_list (if the FMR can be mapped again; that is,
- * remap_count < IB_FMR_MAX_REMAPS) or its pool's dirty_list (if the
+ * remap_count < pool->max_remaps) or its pool's dirty_list (if the
* FMR needs to be unmapped before being remapped). In either of
* these cases it is a bug if the ref_count is not 0. In other words,
* if ref_count is > 0, then the list member must not be linked into
@@ -84,6 +84,7 @@ struct ib_fmr_pool {
int pool_size;
int max_pages;
+ int max_remaps;
int dirty_watermark;
int dirty_len;
struct list_head free_list;
@@ -214,8 +215,10 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
{
struct ib_device *device;
struct ib_fmr_pool *pool;
+ struct ib_device_attr *attr;
int i;
int ret;
+ int max_remaps;
if (!params)
return ERR_PTR(-EINVAL);
@@ -228,6 +231,26 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
return ERR_PTR(-ENOSYS);
}
+ attr = kmalloc(sizeof *attr, GFP_KERNEL);
+ if (!attr) {
+ printk(KERN_WARNING "couldn't allocate device attr struct");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = ib_query_device(device, attr);
+ if (ret) {
+ printk(KERN_WARNING "couldn't query device");
+ kfree(attr);
+ return ERR_PTR(ret);
+ }
+
+ if (!attr->max_map_per_fmr)
+ max_remaps = IB_FMR_MAX_REMAPS;
+ else
+ max_remaps = attr->max_map_per_fmr;
+
+ kfree(attr);
+
pool = kmalloc(sizeof *pool, GFP_KERNEL);
if (!pool) {
printk(KERN_WARNING "couldn't allocate pool struct");
@@ -258,6 +281,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
pool->pool_size = 0;
pool->max_pages = params->max_pages_per_fmr;
+ pool->max_remaps = max_remaps;
pool->dirty_watermark = params->dirty_watermark;
pool->dirty_len = 0;
spin_lock_init(&pool->pool_lock);
@@ -279,7 +303,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
struct ib_pool_fmr *fmr;
struct ib_fmr_attr attr = {
.max_pages = params->max_pages_per_fmr,
- .max_maps = IB_FMR_MAX_REMAPS,
+ .max_maps = pool->max_remaps,
.page_shift = params->page_shift
};
@@ -489,7 +513,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
--fmr->ref_count;
if (!fmr->ref_count) {
- if (fmr->remap_count < IB_FMR_MAX_REMAPS) {
+ if (fmr->remap_count < pool->max_remaps) {
list_add_tail(&fmr->list, &pool->free_list);
} else {
list_add_tail(&fmr->list, &pool->dirty_list);