All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCHv2 1/1] IB/core: create struct ib_port_cache
@ 2017-01-05 16:41 Jinpu Wang
       [not found] ` <CAMGffEn-fOOe7s2rniqwcaocoV+jxkeJDHZ6WYJkkXi+CBQbEA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 10+ messages in thread
From: Jinpu Wang @ 2017-01-05 16:41 UTC (permalink / raw)
  To: Doug Ledford, Jason Gunthorpe, Hefty, Sean, Hal Rosenstock,
	linux-rdma-u79uwXL29TY76Z2rM5mHXA, Michael Wang

[-- Attachment #1: Type: text/plain, Size: 16631 bytes --]

Hi Folks,

This is a follow up for http://www.spinics.net/lists/linux-rdma/msg44536.html.
As Jason suggested, I'm moving 4 elments for per port arrays into a seperate
ib_port_cache structure. also change the alloc/free part.

Please apply after the first 4 patches.

Comments and review are welcome!

Thanks,



>From cd8828de6cae70e4ad8b019f19112ce69db83c86 Mon Sep 17 00:00:00 2001
From: Jack Wang <jinpu.wang-EIkl63zCoXaH+58JC4qpiA@public.gmane.org>
Date: Wed, 4 Jan 2017 14:09:05 +0100
Subject: [PATCH] IB/core: create struct ib_port_cache

As Jason suggested, we have 4 elements for per port arrays,
it's better to have a separate structure to represent them.

It simplifies code a bit, 30+ lines of code less :)

Signed-off-by: Jack Wang <jinpu.wang-EIkl63zCoXaH+58JC4qpiA@public.gmane.org>
Reviewed-by: Michael Wang <yun.wang-EIkl63zCoXaH+58JC4qpiA@public.gmane.org>
---
 drivers/infiniband/core/cache.c | 134 +++++++++++++++-------------------------
 include/rdma/ib_verbs.h         |  12 ++--
 2 files changed, 59 insertions(+), 87 deletions(-)

diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index f91886b..2e52021 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -314,14 +314,13 @@ static void make_default_gid(struct  net_device
*dev, union ib_gid *gid)
 int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
              union ib_gid *gid, struct ib_gid_attr *attr)
 {
-    struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
     struct ib_gid_table *table;
     int ix;
     int ret = 0;
     struct net_device *idev;
     int empty;

-    table = ports_table[port - rdma_start_port(ib_dev)];
+    table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;

     if (!memcmp(gid, &zgid, sizeof(*gid)))
         return -EINVAL;
@@ -369,11 +368,10 @@ int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
 int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
              union ib_gid *gid, struct ib_gid_attr *attr)
 {
-    struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
     struct ib_gid_table *table;
     int ix;

-    table = ports_table[port - rdma_start_port(ib_dev)];
+    table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;

     mutex_lock(&table->lock);
     write_lock_irq(&table->rwlock);
@@ -399,12 +397,11 @@ int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
                      struct net_device *ndev)
 {
-    struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
     struct ib_gid_table *table;
     int ix;
     bool deleted = false;

-    table  = ports_table[port - rdma_start_port(ib_dev)];
+    table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;

     mutex_lock(&table->lock);
     write_lock_irq(&table->rwlock);
@@ -428,10 +425,9 @@ int ib_cache_gid_del_all_netdev_gids(struct
ib_device *ib_dev, u8 port,
 static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
                   union ib_gid *gid, struct ib_gid_attr *attr)
 {
-    struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
     struct ib_gid_table *table;

-    table = ports_table[port - rdma_start_port(ib_dev)];
+    table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;

     if (index < 0 || index >= table->sz)
         return -EINVAL;
@@ -455,14 +451,13 @@ static int _ib_cache_gid_table_find(struct
ib_device *ib_dev,
                     unsigned long mask,
                     u8 *port, u16 *index)
 {
-    struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
     struct ib_gid_table *table;
     u8 p;
     int local_index;
     unsigned long flags;

     for (p = 0; p < ib_dev->phys_port_cnt; p++) {
-        table = ports_table[p];
+        table = ib_dev->cache.ports[p].gid;
         read_lock_irqsave(&table->rwlock, flags);
         local_index = find_gid(table, gid, val, false, mask, NULL);
         if (local_index >= 0) {
@@ -503,7 +498,6 @@ int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
                    u16 *index)
 {
     int local_index;
-    struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
     struct ib_gid_table *table;
     unsigned long mask = GID_ATTR_FIND_MASK_GID |
                  GID_ATTR_FIND_MASK_GID_TYPE;
@@ -514,7 +508,7 @@ int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
         port > rdma_end_port(ib_dev))
         return -ENOENT;

-    table = ports_table[port - rdma_start_port(ib_dev)];
+    table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;

     if (ndev)
         mask |= GID_ATTR_FIND_MASK_NETDEV;
@@ -562,21 +556,18 @@ static int ib_cache_gid_find_by_filter(struct
ib_device *ib_dev,
                        void *context,
                        u16 *index)
 {
-    struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
     struct ib_gid_table *table;
     unsigned int i;
     unsigned long flags;
     bool found = false;

-    if (!ports_table)
-        return -EOPNOTSUPP;

     if (port < rdma_start_port(ib_dev) ||
         port > rdma_end_port(ib_dev) ||
         !rdma_protocol_roce(ib_dev, port))
         return -EPROTONOSUPPORT;

-    table = ports_table[port - rdma_start_port(ib_dev)];
+    table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;

     read_lock_irqsave(&table->rwlock, flags);
     for (i = 0; i < table->sz; i++) {
@@ -668,14 +659,13 @@ void ib_cache_gid_set_default_gid(struct
ib_device *ib_dev, u8 port,
                   unsigned long gid_type_mask,
                   enum ib_cache_gid_default_mode mode)
 {
-    struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
     union ib_gid gid;
     struct ib_gid_attr gid_attr;
     struct ib_gid_attr zattr_type = zattr;
     struct ib_gid_table *table;
     unsigned int gid_type;

-    table  = ports_table[port - rdma_start_port(ib_dev)];
+    table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;

     make_default_gid(ndev, &gid);
     memset(&gid_attr, 0, sizeof(gid_attr));
@@ -766,71 +756,64 @@ static int gid_table_reserve_default(struct
ib_device *ib_dev, u8 port,
 static int _gid_table_setup_one(struct ib_device *ib_dev)
 {
     u8 port;
-    struct ib_gid_table **table;
+    struct ib_gid_table *table;
     int err = 0;

-    table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
-    if (!table)
-        return -ENOMEM;
-
     for (port = 0; port < ib_dev->phys_port_cnt; port++) {
         u8 rdma_port = port + rdma_start_port(ib_dev);

-        table[port] =
+        table =
             alloc_gid_table(
                 ib_dev->port_immutable[rdma_port].gid_tbl_len);
-        if (!table[port]) {
+        if (!table) {
             err = -ENOMEM;
             goto rollback_table_setup;
         }

         err = gid_table_reserve_default(ib_dev,
                         port + rdma_start_port(ib_dev),
-                        table[port]);
+                        table);
         if (err)
             goto rollback_table_setup;
+        ib_dev->cache.ports[port].gid = table;
     }

-    ib_dev->cache.gid_cache = table;
     return 0;

 rollback_table_setup:
     for (port = 0; port < ib_dev->phys_port_cnt; port++) {
+        table = ib_dev->cache.ports[port].gid;
+
         cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
-                       table[port]);
-        release_gid_table(table[port]);
+                       table);
+        release_gid_table(table);
     }

-    kfree(table);
     return err;
 }

 static void gid_table_release_one(struct ib_device *ib_dev)
 {
-    struct ib_gid_table **table = ib_dev->cache.gid_cache;
+    struct ib_gid_table *table;
     u8 port;

-    if (!table)
-        return;
-
-    for (port = 0; port < ib_dev->phys_port_cnt; port++)
-        release_gid_table(table[port]);
-
-    kfree(table);
-    ib_dev->cache.gid_cache = NULL;
+    for (port = 0; port < ib_dev->phys_port_cnt; port++) {
+        table = ib_dev->cache.ports[port].gid;
+        release_gid_table(table);
+        ib_dev->cache.ports[port].gid = NULL;
+    }
 }

 static void gid_table_cleanup_one(struct ib_device *ib_dev)
 {
-    struct ib_gid_table **table = ib_dev->cache.gid_cache;
+    struct ib_gid_table *table;
     u8 port;

-    if (!table)
-        return;
-
-    for (port = 0; port < ib_dev->phys_port_cnt; port++)
+    for (port = 0; port < ib_dev->phys_port_cnt; port++) {
+        table = ib_dev->cache.ports[port].gid;
         cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
-                       table[port]);
+                       table);
+    }
 }

 static int gid_table_setup_one(struct ib_device *ib_dev)
@@ -860,12 +843,12 @@ int ib_get_cached_gid(struct ib_device *device,
 {
     int res;
     unsigned long flags;
-    struct ib_gid_table **ports_table = device->cache.gid_cache;
-    struct ib_gid_table *table = ports_table[port_num -
rdma_start_port(device)];
+    struct ib_gid_table *table;

     if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
         return -EINVAL;

+    table = device->cache.ports[port_num - rdma_start_port(device)].gid;
     read_lock_irqsave(&table->rwlock, flags);
     res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
     read_unlock_irqrestore(&table->rwlock, flags);
@@ -917,7 +900,7 @@ int ib_get_cached_pkey(struct ib_device *device,

     read_lock_irqsave(&device->cache.lock, flags);

-    cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
+    cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;

     if (index < 0 || index >= cache->table_len)
         ret = -EINVAL;
@@ -946,7 +929,7 @@ int ib_find_cached_pkey(struct ib_device *device,

     read_lock_irqsave(&device->cache.lock, flags);

-    cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
+    cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;

     *index = -1;

@@ -986,7 +969,7 @@ int ib_find_exact_cached_pkey(struct ib_device *device,

     read_lock_irqsave(&device->cache.lock, flags);

-    cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
+    cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;

     *index = -1;

@@ -1014,7 +997,7 @@ int ib_get_cached_lmc(struct ib_device *device,
         return -EINVAL;

     read_lock_irqsave(&device->cache.lock, flags);
-    *lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
+    *lmc = device->cache.ports[port_num - rdma_start_port(device)].lmc;
     read_unlock_irqrestore(&device->cache.lock, flags);

     return ret;
@@ -1032,7 +1015,8 @@ int ib_get_cached_port_state(struct ib_device   *device,
         return -EINVAL;

     read_lock_irqsave(&device->cache.lock, flags);
-    *port_state = device->cache.port_state_cache[port_num -
rdma_start_port(device)];
+    *port_state = device->cache.ports[port_num
+        - rdma_start_port(device)].port_state;
     read_unlock_irqrestore(&device->cache.lock, flags);

     return ret;
@@ -1051,14 +1035,13 @@ static void ib_cache_update(struct ib_device *device,
     int                        i;
     int                        ret;
     struct ib_gid_table      *table;
-    struct ib_gid_table     **ports_table = device->cache.gid_cache;
     bool               use_roce_gid_table =
                     rdma_cap_roce_gid_table(device, port);

     if (port < rdma_start_port(device) || port > rdma_end_port(device))
         return;

-    table = ports_table[port - rdma_start_port(device)];
+    table = device->cache.ports[port - rdma_start_port(device)].gid;

     tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
     if (!tprops)
@@ -1110,9 +1093,10 @@ static void ib_cache_update(struct ib_device *device,

     write_lock_irq(&device->cache.lock);

-    old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
+    old_pkey_cache = device->cache.ports[port -
+        rdma_start_port(device)].pkey;

-    device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
+    device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache;
     if (!use_roce_gid_table) {
         write_lock(&table->rwlock);
         for (i = 0; i < gid_cache->table_len; i++) {
@@ -1122,8 +1106,8 @@ static void ib_cache_update(struct ib_device *device,
         write_unlock(&table->rwlock);
     }

-    device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
-    device->cache.port_state_cache[port - rdma_start_port(device)] =
+    device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc;
+    device->cache.ports[port - rdma_start_port(device)].port_state =
         tprops->state;

     write_unlock_irq(&device->cache.lock);
@@ -1177,26 +1161,17 @@ int ib_cache_setup_one(struct ib_device *device)

     rwlock_init(&device->cache.lock);

-    device->cache.pkey_cache =
-        kzalloc(sizeof *device->cache.pkey_cache *
+    device->cache.ports =
+        kzalloc(sizeof(*device->cache.ports) *
             (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
-    device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
-                      (rdma_end_port(device) -
-                       rdma_start_port(device) + 1),
-                      GFP_KERNEL);
-    device->cache.port_state_cache = kmalloc(sizeof
*device->cache.port_state_cache *
-                      (rdma_end_port(device) -
-                       rdma_start_port(device) + 1),
-                      GFP_KERNEL);
-    if (!device->cache.pkey_cache || !device->cache.port_state_cache ||
-        !device->cache.lmc_cache) {
+    if (!device->cache.ports) {
         err = -ENOMEM;
-        goto free;
+        goto out;
     }

     err = gid_table_setup_one(device);
     if (err)
-        goto free;
+        goto out;

     for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
         ib_cache_update(device, p + rdma_start_port(device));
@@ -1211,10 +1186,7 @@ int ib_cache_setup_one(struct ib_device *device)

 err:
     gid_table_cleanup_one(device);
-free:
-    kfree(device->cache.pkey_cache);
-    kfree(device->cache.lmc_cache);
-    kfree(device->cache.port_state_cache);
+out:
     return err;
 }

@@ -1228,15 +1200,11 @@ void ib_cache_release_one(struct ib_device *device)
      * all the device's resources when the cache could no
      * longer be accessed.
      */
-    if (device->cache.pkey_cache)
-        for (p = 0;
-             p <= rdma_end_port(device) - rdma_start_port(device); ++p)
-            kfree(device->cache.pkey_cache[p]);
+    for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
+        kfree(device->cache.ports[p].pkey);

     gid_table_release_one(device);
-    kfree(device->cache.pkey_cache);
-    kfree(device->cache.lmc_cache);
-    kfree(device->cache.port_state_cache);
+    kfree(device->cache.ports);
 }

 void ib_cache_cleanup_one(struct ib_device *device)
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index fafa988..e55afec 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1775,13 +1775,17 @@ enum ib_mad_result {

 #define IB_DEVICE_NAME_MAX 64

+struct ib_port_cache {
+    struct ib_pkey_cache  *pkey;
+    struct ib_gid_table   *gid;
+    u8                     lmc;
+    enum ib_port_state     port_state;
+};
+
 struct ib_cache {
     rwlock_t                lock;
     struct ib_event_handler event_handler;
-    struct ib_pkey_cache  **pkey_cache;
-    struct ib_gid_table   **gid_cache;
-    u8                     *lmc_cache;
-    enum ib_port_state     *port_state_cache;
+    struct ib_port_cache   *ports;
 };

 struct ib_dma_mapping_ops {
-- 
2.7.4

-- 
Jinpu Wang
Linux Kernel Developer

ProfitBricks GmbH
Greifswalder Str. 207
D - 10405 Berlin

Tel:       +49 30 577 008  042
Fax:      +49 30 577 008 299
Email:    jinpu.wang-EIkl63zCoXaH+58JC4qpiA@public.gmane.org
URL:      https://www.profitbricks.de

Sitz der Gesellschaft: Berlin
Registergericht: Amtsgericht Charlottenburg, HRB 125506 B
Geschäftsführer: Achim Weiss

[-- Attachment #2: 0001-IB-core-create-struct-ib_port_cache.patch --]
[-- Type: text/x-patch, Size: 14160 bytes --]

From cd8828de6cae70e4ad8b019f19112ce69db83c86 Mon Sep 17 00:00:00 2001
From: Jack Wang <jinpu.wang@profitbricks.com>
Date: Wed, 4 Jan 2017 14:09:05 +0100
Subject: [PATCH] IB/core: create struct ib_port_cache

As Jason suggested, we have 4 elements for per port arrays,
it's better to have a separate structure to represent them.

It simplifies code a bit, 30+ lines of code less :)

Signed-off-by: Jack Wang <jinpu.wang@profitbricks.com>
Reviewed-by: Michael Wang <yun.wang@profitbricks.com>
---
 drivers/infiniband/core/cache.c | 134 +++++++++++++++-------------------------
 include/rdma/ib_verbs.h         |  12 ++--
 2 files changed, 59 insertions(+), 87 deletions(-)

diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index f91886b..2e52021 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -314,14 +314,13 @@ static void make_default_gid(struct  net_device *dev, union ib_gid *gid)
 int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
 		     union ib_gid *gid, struct ib_gid_attr *attr)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 	struct ib_gid_table *table;
 	int ix;
 	int ret = 0;
 	struct net_device *idev;
 	int empty;
 
-	table = ports_table[port - rdma_start_port(ib_dev)];
+	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
 
 	if (!memcmp(gid, &zgid, sizeof(*gid)))
 		return -EINVAL;
@@ -369,11 +368,10 @@ int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
 int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
 		     union ib_gid *gid, struct ib_gid_attr *attr)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 	struct ib_gid_table *table;
 	int ix;
 
-	table = ports_table[port - rdma_start_port(ib_dev)];
+	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
 
 	mutex_lock(&table->lock);
 	write_lock_irq(&table->rwlock);
@@ -399,12 +397,11 @@ int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
 				     struct net_device *ndev)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 	struct ib_gid_table *table;
 	int ix;
 	bool deleted = false;
 
-	table  = ports_table[port - rdma_start_port(ib_dev)];
+	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
 
 	mutex_lock(&table->lock);
 	write_lock_irq(&table->rwlock);
@@ -428,10 +425,9 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
 static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
 			      union ib_gid *gid, struct ib_gid_attr *attr)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 	struct ib_gid_table *table;
 
-	table = ports_table[port - rdma_start_port(ib_dev)];
+	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
 
 	if (index < 0 || index >= table->sz)
 		return -EINVAL;
@@ -455,14 +451,13 @@ static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
 				    unsigned long mask,
 				    u8 *port, u16 *index)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 	struct ib_gid_table *table;
 	u8 p;
 	int local_index;
 	unsigned long flags;
 
 	for (p = 0; p < ib_dev->phys_port_cnt; p++) {
-		table = ports_table[p];
+		table = ib_dev->cache.ports[p].gid;
 		read_lock_irqsave(&table->rwlock, flags);
 		local_index = find_gid(table, gid, val, false, mask, NULL);
 		if (local_index >= 0) {
@@ -503,7 +498,6 @@ int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
 			       u16 *index)
 {
 	int local_index;
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 	struct ib_gid_table *table;
 	unsigned long mask = GID_ATTR_FIND_MASK_GID |
 			     GID_ATTR_FIND_MASK_GID_TYPE;
@@ -514,7 +508,7 @@ int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
 	    port > rdma_end_port(ib_dev))
 		return -ENOENT;
 
-	table = ports_table[port - rdma_start_port(ib_dev)];
+	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
 
 	if (ndev)
 		mask |= GID_ATTR_FIND_MASK_NETDEV;
@@ -562,21 +556,18 @@ static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
 				       void *context,
 				       u16 *index)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 	struct ib_gid_table *table;
 	unsigned int i;
 	unsigned long flags;
 	bool found = false;
 
-	if (!ports_table)
-		return -EOPNOTSUPP;
 
 	if (port < rdma_start_port(ib_dev) ||
 	    port > rdma_end_port(ib_dev) ||
 	    !rdma_protocol_roce(ib_dev, port))
 		return -EPROTONOSUPPORT;
 
-	table = ports_table[port - rdma_start_port(ib_dev)];
+	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
 
 	read_lock_irqsave(&table->rwlock, flags);
 	for (i = 0; i < table->sz; i++) {
@@ -668,14 +659,13 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
 				  unsigned long gid_type_mask,
 				  enum ib_cache_gid_default_mode mode)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 	union ib_gid gid;
 	struct ib_gid_attr gid_attr;
 	struct ib_gid_attr zattr_type = zattr;
 	struct ib_gid_table *table;
 	unsigned int gid_type;
 
-	table  = ports_table[port - rdma_start_port(ib_dev)];
+	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
 
 	make_default_gid(ndev, &gid);
 	memset(&gid_attr, 0, sizeof(gid_attr));
@@ -766,71 +756,64 @@ static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
 static int _gid_table_setup_one(struct ib_device *ib_dev)
 {
 	u8 port;
-	struct ib_gid_table **table;
+	struct ib_gid_table *table;
 	int err = 0;
 
-	table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
-	if (!table)
-		return -ENOMEM;
-
 	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
 		u8 rdma_port = port + rdma_start_port(ib_dev);
 
-		table[port] =
+		table =
 			alloc_gid_table(
 				ib_dev->port_immutable[rdma_port].gid_tbl_len);
-		if (!table[port]) {
+		if (!table) {
 			err = -ENOMEM;
 			goto rollback_table_setup;
 		}
 
 		err = gid_table_reserve_default(ib_dev,
 						port + rdma_start_port(ib_dev),
-						table[port]);
+						table);
 		if (err)
 			goto rollback_table_setup;
+		ib_dev->cache.ports[port].gid = table;
 	}
 
-	ib_dev->cache.gid_cache = table;
 	return 0;
 
 rollback_table_setup:
 	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
+		table = ib_dev->cache.ports[port].gid;
+
 		cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
-				       table[port]);
-		release_gid_table(table[port]);
+				       table);
+		release_gid_table(table);
 	}
 
-	kfree(table);
 	return err;
 }
 
 static void gid_table_release_one(struct ib_device *ib_dev)
 {
-	struct ib_gid_table **table = ib_dev->cache.gid_cache;
+	struct ib_gid_table *table;
 	u8 port;
 
-	if (!table)
-		return;
-
-	for (port = 0; port < ib_dev->phys_port_cnt; port++)
-		release_gid_table(table[port]);
-
-	kfree(table);
-	ib_dev->cache.gid_cache = NULL;
+	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
+		table = ib_dev->cache.ports[port].gid;
+		release_gid_table(table);
+		ib_dev->cache.ports[port].gid = NULL;
+	}
 }
 
 static void gid_table_cleanup_one(struct ib_device *ib_dev)
 {
-	struct ib_gid_table **table = ib_dev->cache.gid_cache;
+	struct ib_gid_table *table;
 	u8 port;
 
-	if (!table)
-		return;
-
-	for (port = 0; port < ib_dev->phys_port_cnt; port++)
+	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
+		table = ib_dev->cache.ports[port].gid;
 		cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
-				       table[port]);
+				       table);
+	}
 }
 
 static int gid_table_setup_one(struct ib_device *ib_dev)
@@ -860,12 +843,12 @@ int ib_get_cached_gid(struct ib_device *device,
 {
 	int res;
 	unsigned long flags;
-	struct ib_gid_table **ports_table = device->cache.gid_cache;
-	struct ib_gid_table *table = ports_table[port_num - rdma_start_port(device)];
+	struct ib_gid_table *table;
 
 	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
 		return -EINVAL;
 
+	table = device->cache.ports[port_num - rdma_start_port(device)].gid;
 	read_lock_irqsave(&table->rwlock, flags);
 	res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
 	read_unlock_irqrestore(&table->rwlock, flags);
@@ -917,7 +900,7 @@ int ib_get_cached_pkey(struct ib_device *device,
 
 	read_lock_irqsave(&device->cache.lock, flags);
 
-	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
+	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
 
 	if (index < 0 || index >= cache->table_len)
 		ret = -EINVAL;
@@ -946,7 +929,7 @@ int ib_find_cached_pkey(struct ib_device *device,
 
 	read_lock_irqsave(&device->cache.lock, flags);
 
-	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
+	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
 
 	*index = -1;
 
@@ -986,7 +969,7 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
 
 	read_lock_irqsave(&device->cache.lock, flags);
 
-	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
+	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
 
 	*index = -1;
 
@@ -1014,7 +997,7 @@ int ib_get_cached_lmc(struct ib_device *device,
 		return -EINVAL;
 
 	read_lock_irqsave(&device->cache.lock, flags);
-	*lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
+	*lmc = device->cache.ports[port_num - rdma_start_port(device)].lmc;
 	read_unlock_irqrestore(&device->cache.lock, flags);
 
 	return ret;
@@ -1032,7 +1015,8 @@ int ib_get_cached_port_state(struct ib_device   *device,
 		return -EINVAL;
 
 	read_lock_irqsave(&device->cache.lock, flags);
-	*port_state = device->cache.port_state_cache[port_num - rdma_start_port(device)];
+	*port_state = device->cache.ports[port_num
+		- rdma_start_port(device)].port_state;
 	read_unlock_irqrestore(&device->cache.lock, flags);
 
 	return ret;
@@ -1051,14 +1035,13 @@ static void ib_cache_update(struct ib_device *device,
 	int                        i;
 	int                        ret;
 	struct ib_gid_table	  *table;
-	struct ib_gid_table	 **ports_table = device->cache.gid_cache;
 	bool			   use_roce_gid_table =
 					rdma_cap_roce_gid_table(device, port);
 
 	if (port < rdma_start_port(device) || port > rdma_end_port(device))
 		return;
 
-	table = ports_table[port - rdma_start_port(device)];
+	table = device->cache.ports[port - rdma_start_port(device)].gid;
 
 	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
 	if (!tprops)
@@ -1110,9 +1093,10 @@ static void ib_cache_update(struct ib_device *device,
 
 	write_lock_irq(&device->cache.lock);
 
-	old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
+	old_pkey_cache = device->cache.ports[port -
+		rdma_start_port(device)].pkey;
 
-	device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
+	device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache;
 	if (!use_roce_gid_table) {
 		write_lock(&table->rwlock);
 		for (i = 0; i < gid_cache->table_len; i++) {
@@ -1122,8 +1106,8 @@ static void ib_cache_update(struct ib_device *device,
 		write_unlock(&table->rwlock);
 	}
 
-	device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
-	device->cache.port_state_cache[port - rdma_start_port(device)] =
+	device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc;
+	device->cache.ports[port - rdma_start_port(device)].port_state =
 		tprops->state;
 
 	write_unlock_irq(&device->cache.lock);
@@ -1177,26 +1161,17 @@ int ib_cache_setup_one(struct ib_device *device)
 
 	rwlock_init(&device->cache.lock);
 
-	device->cache.pkey_cache =
-		kzalloc(sizeof *device->cache.pkey_cache *
+	device->cache.ports =
+		kzalloc(sizeof(*device->cache.ports) *
 			(rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
-	device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
-					  (rdma_end_port(device) -
-					   rdma_start_port(device) + 1),
-					  GFP_KERNEL);
-	device->cache.port_state_cache = kmalloc(sizeof *device->cache.port_state_cache *
-					  (rdma_end_port(device) -
-					   rdma_start_port(device) + 1),
-					  GFP_KERNEL);
-	if (!device->cache.pkey_cache || !device->cache.port_state_cache ||
-	    !device->cache.lmc_cache) {
+	if (!device->cache.ports) {
 		err = -ENOMEM;
-		goto free;
+		goto out;
 	}
 
 	err = gid_table_setup_one(device);
 	if (err)
-		goto free;
+		goto out;
 
 	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
 		ib_cache_update(device, p + rdma_start_port(device));
@@ -1211,10 +1186,7 @@ int ib_cache_setup_one(struct ib_device *device)
 
 err:
 	gid_table_cleanup_one(device);
-free:
-	kfree(device->cache.pkey_cache);
-	kfree(device->cache.lmc_cache);
-	kfree(device->cache.port_state_cache);
+out:
 	return err;
 }
 
@@ -1228,15 +1200,11 @@ void ib_cache_release_one(struct ib_device *device)
 	 * all the device's resources when the cache could no
 	 * longer be accessed.
 	 */
-	if (device->cache.pkey_cache)
-		for (p = 0;
-		     p <= rdma_end_port(device) - rdma_start_port(device); ++p)
-			kfree(device->cache.pkey_cache[p]);
+	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
+		kfree(device->cache.ports[p].pkey);
 
 	gid_table_release_one(device);
-	kfree(device->cache.pkey_cache);
-	kfree(device->cache.lmc_cache);
-	kfree(device->cache.port_state_cache);
+	kfree(device->cache.ports);
 }
 
 void ib_cache_cleanup_one(struct ib_device *device)
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index fafa988..e55afec 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1775,13 +1775,17 @@ enum ib_mad_result {
 
 #define IB_DEVICE_NAME_MAX 64
 
+struct ib_port_cache {
+	struct ib_pkey_cache  *pkey;
+	struct ib_gid_table   *gid;
+	u8                     lmc;
+	enum ib_port_state     port_state;
+};
+
 struct ib_cache {
 	rwlock_t                lock;
 	struct ib_event_handler event_handler;
-	struct ib_pkey_cache  **pkey_cache;
-	struct ib_gid_table   **gid_cache;
-	u8                     *lmc_cache;
-	enum ib_port_state     *port_state_cache;
+	struct ib_port_cache   *ports;
 };
 
 struct ib_dma_mapping_ops {
-- 
2.7.4


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCHv2 1/1] IB/core: create struct ib_port_cache
       [not found] ` <CAMGffEn-fOOe7s2rniqwcaocoV+jxkeJDHZ6WYJkkXi+CBQbEA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2017-01-13 13:12   ` Jack Wang
       [not found]     ` <b1fdf011-dc25-ad63-b223-0700f6e90bc6-EIkl63zCoXaH+58JC4qpiA@public.gmane.org>
  0 siblings, 1 reply; 10+ messages in thread
From: Jack Wang @ 2017-01-13 13:12 UTC (permalink / raw)
  To: Doug Ledford, Jason Gunthorpe, Hefty, Sean, Hal Rosenstock,
	linux-rdma-u79uwXL29TY76Z2rM5mHXA, Michael Wang

Hi Doug, hi Jason and others,

As Doug already merged the first 4 patches to his tree, so I rebased this patch also
on his latest for-4.11 branch (commit 102c5ce082), minor tweak on commit message.
I also configured my thunderbirds to keep the patch format as expected :), so here we go!

Cheers,
Jack

>From dc199e8a8afedc3ece9e6f03a5b085e41c391983 Mon Sep 17 00:00:00 2001
From: Jack Wang <jinpu.wang-EIkl63zCoXaH+58JC4qpiA@public.gmane.org>
Date: Wed, 4 Jan 2017 14:09:05 +0100
Subject: [PATCH] RDMA/core: create struct ib_port_cache

As Jason suggested, we have 4 elements for per port arrays,
it's better to have a separate structure to represent them.

It simplifies code a bit, ~ 30 lines of code less :)

Signed-off-by: Jack Wang <jinpu.wang-EIkl63zCoXaH+58JC4qpiA@public.gmane.org>
Reviewed-by: Michael Wang <yun.wang-EIkl63zCoXaH+58JC4qpiA@public.gmane.org>
---
 drivers/infiniband/core/cache.c | 134 +++++++++++++++-------------------------
 include/rdma/ib_verbs.h         |  12 ++--
 2 files changed, 59 insertions(+), 87 deletions(-)

diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index f91886b..2e52021 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -314,14 +314,13 @@ static void make_default_gid(struct  net_device *dev, union ib_gid *gid)
 int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
 		     union ib_gid *gid, struct ib_gid_attr *attr)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 	struct ib_gid_table *table;
 	int ix;
 	int ret = 0;
 	struct net_device *idev;
 	int empty;
 
-	table = ports_table[port - rdma_start_port(ib_dev)];
+	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
 
 	if (!memcmp(gid, &zgid, sizeof(*gid)))
 		return -EINVAL;
@@ -369,11 +368,10 @@ int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
 int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
 		     union ib_gid *gid, struct ib_gid_attr *attr)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 	struct ib_gid_table *table;
 	int ix;
 
-	table = ports_table[port - rdma_start_port(ib_dev)];
+	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
 
 	mutex_lock(&table->lock);
 	write_lock_irq(&table->rwlock);
@@ -399,12 +397,11 @@ int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
 				     struct net_device *ndev)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 	struct ib_gid_table *table;
 	int ix;
 	bool deleted = false;
 
-	table  = ports_table[port - rdma_start_port(ib_dev)];
+	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
 
 	mutex_lock(&table->lock);
 	write_lock_irq(&table->rwlock);
@@ -428,10 +425,9 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
 static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
 			      union ib_gid *gid, struct ib_gid_attr *attr)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 	struct ib_gid_table *table;
 
-	table = ports_table[port - rdma_start_port(ib_dev)];
+	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
 
 	if (index < 0 || index >= table->sz)
 		return -EINVAL;
@@ -455,14 +451,13 @@ static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
 				    unsigned long mask,
 				    u8 *port, u16 *index)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 	struct ib_gid_table *table;
 	u8 p;
 	int local_index;
 	unsigned long flags;
 
 	for (p = 0; p < ib_dev->phys_port_cnt; p++) {
-		table = ports_table[p];
+		table = ib_dev->cache.ports[p].gid;
 		read_lock_irqsave(&table->rwlock, flags);
 		local_index = find_gid(table, gid, val, false, mask, NULL);
 		if (local_index >= 0) {
@@ -503,7 +498,6 @@ int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
 			       u16 *index)
 {
 	int local_index;
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 	struct ib_gid_table *table;
 	unsigned long mask = GID_ATTR_FIND_MASK_GID |
 			     GID_ATTR_FIND_MASK_GID_TYPE;
@@ -514,7 +508,7 @@ int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
 	    port > rdma_end_port(ib_dev))
 		return -ENOENT;
 
-	table = ports_table[port - rdma_start_port(ib_dev)];
+	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
 
 	if (ndev)
 		mask |= GID_ATTR_FIND_MASK_NETDEV;
@@ -562,21 +556,18 @@ static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
 				       void *context,
 				       u16 *index)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 	struct ib_gid_table *table;
 	unsigned int i;
 	unsigned long flags;
 	bool found = false;
 
-	if (!ports_table)
-		return -EOPNOTSUPP;
 
 	if (port < rdma_start_port(ib_dev) ||
 	    port > rdma_end_port(ib_dev) ||
 	    !rdma_protocol_roce(ib_dev, port))
 		return -EPROTONOSUPPORT;
 
-	table = ports_table[port - rdma_start_port(ib_dev)];
+	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
 
 	read_lock_irqsave(&table->rwlock, flags);
 	for (i = 0; i < table->sz; i++) {
@@ -668,14 +659,13 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
 				  unsigned long gid_type_mask,
 				  enum ib_cache_gid_default_mode mode)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 	union ib_gid gid;
 	struct ib_gid_attr gid_attr;
 	struct ib_gid_attr zattr_type = zattr;
 	struct ib_gid_table *table;
 	unsigned int gid_type;
 
-	table  = ports_table[port - rdma_start_port(ib_dev)];
+	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
 
 	make_default_gid(ndev, &gid);
 	memset(&gid_attr, 0, sizeof(gid_attr));
@@ -766,71 +756,64 @@ static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
 static int _gid_table_setup_one(struct ib_device *ib_dev)
 {
 	u8 port;
-	struct ib_gid_table **table;
+	struct ib_gid_table *table;
 	int err = 0;
 
-	table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
-	if (!table)
-		return -ENOMEM;
-
 	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
 		u8 rdma_port = port + rdma_start_port(ib_dev);
 
-		table[port] =
+		table =
 			alloc_gid_table(
 				ib_dev->port_immutable[rdma_port].gid_tbl_len);
-		if (!table[port]) {
+		if (!table) {
 			err = -ENOMEM;
 			goto rollback_table_setup;
 		}
 
 		err = gid_table_reserve_default(ib_dev,
 						port + rdma_start_port(ib_dev),
-						table[port]);
+						table);
 		if (err)
 			goto rollback_table_setup;
+		ib_dev->cache.ports[port].gid = table;
 	}
 
-	ib_dev->cache.gid_cache = table;
 	return 0;
 
 rollback_table_setup:
 	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
+		table = ib_dev->cache.ports[port].gid;
+
 		cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
-				       table[port]);
-		release_gid_table(table[port]);
+				       table);
+		release_gid_table(table);
 	}
 
-	kfree(table);
 	return err;
 }
 
 static void gid_table_release_one(struct ib_device *ib_dev)
 {
-	struct ib_gid_table **table = ib_dev->cache.gid_cache;
+	struct ib_gid_table *table;
 	u8 port;
 
-	if (!table)
-		return;
-
-	for (port = 0; port < ib_dev->phys_port_cnt; port++)
-		release_gid_table(table[port]);
-
-	kfree(table);
-	ib_dev->cache.gid_cache = NULL;
+	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
+		table = ib_dev->cache.ports[port].gid;
+		release_gid_table(table);
+		ib_dev->cache.ports[port].gid = NULL;
+	}
 }
 
 static void gid_table_cleanup_one(struct ib_device *ib_dev)
 {
-	struct ib_gid_table **table = ib_dev->cache.gid_cache;
+	struct ib_gid_table *table;
 	u8 port;
 
-	if (!table)
-		return;
-
-	for (port = 0; port < ib_dev->phys_port_cnt; port++)
+	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
+		table = ib_dev->cache.ports[port].gid;
 		cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
-				       table[port]);
+				       table);
+	}
 }
 
 static int gid_table_setup_one(struct ib_device *ib_dev)
@@ -860,12 +843,12 @@ int ib_get_cached_gid(struct ib_device *device,
 {
 	int res;
 	unsigned long flags;
-	struct ib_gid_table **ports_table = device->cache.gid_cache;
-	struct ib_gid_table *table = ports_table[port_num - rdma_start_port(device)];
+	struct ib_gid_table *table;
 
 	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
 		return -EINVAL;
 
+	table = device->cache.ports[port_num - rdma_start_port(device)].gid;
 	read_lock_irqsave(&table->rwlock, flags);
 	res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
 	read_unlock_irqrestore(&table->rwlock, flags);
@@ -917,7 +900,7 @@ int ib_get_cached_pkey(struct ib_device *device,
 
 	read_lock_irqsave(&device->cache.lock, flags);
 
-	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
+	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
 
 	if (index < 0 || index >= cache->table_len)
 		ret = -EINVAL;
@@ -946,7 +929,7 @@ int ib_find_cached_pkey(struct ib_device *device,
 
 	read_lock_irqsave(&device->cache.lock, flags);
 
-	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
+	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
 
 	*index = -1;
 
@@ -986,7 +969,7 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
 
 	read_lock_irqsave(&device->cache.lock, flags);
 
-	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
+	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
 
 	*index = -1;
 
@@ -1014,7 +997,7 @@ int ib_get_cached_lmc(struct ib_device *device,
 		return -EINVAL;
 
 	read_lock_irqsave(&device->cache.lock, flags);
-	*lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
+	*lmc = device->cache.ports[port_num - rdma_start_port(device)].lmc;
 	read_unlock_irqrestore(&device->cache.lock, flags);
 
 	return ret;
@@ -1032,7 +1015,8 @@ int ib_get_cached_port_state(struct ib_device   *device,
 		return -EINVAL;
 
 	read_lock_irqsave(&device->cache.lock, flags);
-	*port_state = device->cache.port_state_cache[port_num - rdma_start_port(device)];
+	*port_state = device->cache.ports[port_num
+		- rdma_start_port(device)].port_state;
 	read_unlock_irqrestore(&device->cache.lock, flags);
 
 	return ret;
@@ -1051,14 +1035,13 @@ static void ib_cache_update(struct ib_device *device,
 	int                        i;
 	int                        ret;
 	struct ib_gid_table	  *table;
-	struct ib_gid_table	 **ports_table = device->cache.gid_cache;
 	bool			   use_roce_gid_table =
 					rdma_cap_roce_gid_table(device, port);
 
 	if (port < rdma_start_port(device) || port > rdma_end_port(device))
 		return;
 
-	table = ports_table[port - rdma_start_port(device)];
+	table = device->cache.ports[port - rdma_start_port(device)].gid;
 
 	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
 	if (!tprops)
@@ -1110,9 +1093,10 @@ static void ib_cache_update(struct ib_device *device,
 
 	write_lock_irq(&device->cache.lock);
 
-	old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
+	old_pkey_cache = device->cache.ports[port -
+		rdma_start_port(device)].pkey;
 
-	device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
+	device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache;
 	if (!use_roce_gid_table) {
 		write_lock(&table->rwlock);
 		for (i = 0; i < gid_cache->table_len; i++) {
@@ -1122,8 +1106,8 @@ static void ib_cache_update(struct ib_device *device,
 		write_unlock(&table->rwlock);
 	}
 
-	device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
-	device->cache.port_state_cache[port - rdma_start_port(device)] =
+	device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc;
+	device->cache.ports[port - rdma_start_port(device)].port_state =
 		tprops->state;
 
 	write_unlock_irq(&device->cache.lock);
@@ -1177,26 +1161,17 @@ int ib_cache_setup_one(struct ib_device *device)
 
 	rwlock_init(&device->cache.lock);
 
-	device->cache.pkey_cache =
-		kzalloc(sizeof *device->cache.pkey_cache *
+	device->cache.ports =
+		kzalloc(sizeof(*device->cache.ports) *
 			(rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
-	device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
-					  (rdma_end_port(device) -
-					   rdma_start_port(device) + 1),
-					  GFP_KERNEL);
-	device->cache.port_state_cache = kmalloc(sizeof *device->cache.port_state_cache *
-					  (rdma_end_port(device) -
-					   rdma_start_port(device) + 1),
-					  GFP_KERNEL);
-	if (!device->cache.pkey_cache || !device->cache.port_state_cache ||
-	    !device->cache.lmc_cache) {
+	if (!device->cache.ports) {
 		err = -ENOMEM;
-		goto free;
+		goto out;
 	}
 
 	err = gid_table_setup_one(device);
 	if (err)
-		goto free;
+		goto out;
 
 	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
 		ib_cache_update(device, p + rdma_start_port(device));
@@ -1211,10 +1186,7 @@ int ib_cache_setup_one(struct ib_device *device)
 
 err:
 	gid_table_cleanup_one(device);
-free:
-	kfree(device->cache.pkey_cache);
-	kfree(device->cache.lmc_cache);
-	kfree(device->cache.port_state_cache);
+out:
 	return err;
 }
 
@@ -1228,15 +1200,11 @@ void ib_cache_release_one(struct ib_device *device)
 	 * all the device's resources when the cache could no
 	 * longer be accessed.
 	 */
-	if (device->cache.pkey_cache)
-		for (p = 0;
-		     p <= rdma_end_port(device) - rdma_start_port(device); ++p)
-			kfree(device->cache.pkey_cache[p]);
+	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
+		kfree(device->cache.ports[p].pkey);
 
 	gid_table_release_one(device);
-	kfree(device->cache.pkey_cache);
-	kfree(device->cache.lmc_cache);
-	kfree(device->cache.port_state_cache);
+	kfree(device->cache.ports);
 }
 
 void ib_cache_cleanup_one(struct ib_device *device)
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index fafa988..e55afec 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1775,13 +1775,17 @@ enum ib_mad_result {
 
 #define IB_DEVICE_NAME_MAX 64
 
+struct ib_port_cache {
+	struct ib_pkey_cache  *pkey;
+	struct ib_gid_table   *gid;
+	u8                     lmc;
+	enum ib_port_state     port_state;
+};
+
 struct ib_cache {
 	rwlock_t                lock;
 	struct ib_event_handler event_handler;
-	struct ib_pkey_cache  **pkey_cache;
-	struct ib_gid_table   **gid_cache;
-	u8                     *lmc_cache;
-	enum ib_port_state     *port_state_cache;
+	struct ib_port_cache   *ports;
 };
 
 struct ib_dma_mapping_ops {
-- 
2.7.4

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCHv2 1/1] IB/core: create struct ib_port_cache
       [not found]     ` <b1fdf011-dc25-ad63-b223-0700f6e90bc6-EIkl63zCoXaH+58JC4qpiA@public.gmane.org>
@ 2017-01-13 15:01       ` Leon Romanovsky
       [not found]         ` <20170113150103.GP20392-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
  0 siblings, 1 reply; 10+ messages in thread
From: Leon Romanovsky @ 2017-01-13 15:01 UTC (permalink / raw)
  To: Jack Wang
  Cc: Doug Ledford, Jason Gunthorpe, Hefty, Sean, Hal Rosenstock,
	linux-rdma-u79uwXL29TY76Z2rM5mHXA, Michael Wang

[-- Attachment #1: Type: text/plain, Size: 619 bytes --]

On Fri, Jan 13, 2017 at 02:12:19PM +0100, Jack Wang wrote:
> Hi Doug, hi Jason and others,
>
> As Doug already merged the first 4 patches to his tree, so I rebased this patch also
> on his latest for-4.11 branch (commit 102c5ce082), minor tweak on commit message.
> I also configured my thunderbirds to keep the patch format as expected :), so here we go!

Hi Jack,
I don't know what did you configure, but git send-email is a preferred
way to send linux kernel patches.

Your attached patches are appearing wrongly (wrong subject and mbox garbage) in
the patchworks https://patchwork.kernel.org/patch/9515651/

Thanks

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCHv2 1/1] IB/core: create struct ib_port_cache
       [not found]         ` <20170113150103.GP20392-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
@ 2017-01-13 15:35           ` Jack Wang
       [not found]             ` <2617abae-686e-c8c6-799a-8742156fcd08-EIkl63zCoXaH+58JC4qpiA@public.gmane.org>
  0 siblings, 1 reply; 10+ messages in thread
From: Jack Wang @ 2017-01-13 15:35 UTC (permalink / raw)
  To: Leon Romanovsky
  Cc: Doug Ledford, Jason Gunthorpe, Hefty, Sean, Hal Rosenstock,
	linux-rdma-u79uwXL29TY76Z2rM5mHXA, Michael Wang



On 13.01.2017 16:01, Leon Romanovsky wrote:
> On Fri, Jan 13, 2017 at 02:12:19PM +0100, Jack Wang wrote:
>> Hi Doug, hi Jason and others,
>>
>> As Doug already merged the first 4 patches to his tree, so I rebased this patch also
>> on his latest for-4.11 branch (commit 102c5ce082), minor tweak on commit message.
>> I also configured my thunderbirds to keep the patch format as expected :), so here we go!
> 
> Hi Jack,
> I don't know what did you configure, but git send-email is a preferred
> way to send linux kernel patches.
> 
> Your attached patches are appearing wrongly (wrong subject and mbox garbage) in
> the patchworks https://patchwork.kernel.org/patch/9515651/
> 
> Thanks
> 
Hi Leon,

Thanks, I understand I changed subject in the patch because I notice Doug is using such naming.
But I don't follow "mbox garbage", what do you mean here?

About git send-email, I used to successfully sent patch with the tool, but somehow it's now always get reject 
by gmail. 

I'm not familiar with patchwork, will reply to same thread with proper subject fix the problem or I have to do it in a new thread?

Thanks,
Jack
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCHv2 1/1] IB/core: create struct ib_port_cache
       [not found]             ` <2617abae-686e-c8c6-799a-8742156fcd08-EIkl63zCoXaH+58JC4qpiA@public.gmane.org>
@ 2017-01-15  8:50               ` Leon Romanovsky
       [not found]                 ` <20170115085014.GB20392-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
  0 siblings, 1 reply; 10+ messages in thread
From: Leon Romanovsky @ 2017-01-15  8:50 UTC (permalink / raw)
  To: Jack Wang
  Cc: Doug Ledford, Jason Gunthorpe, Hefty, Sean, Hal Rosenstock,
	linux-rdma-u79uwXL29TY76Z2rM5mHXA, Michael Wang

[-- Attachment #1: Type: text/plain, Size: 1974 bytes --]

On Fri, Jan 13, 2017 at 04:35:54PM +0100, Jack Wang wrote:
>
>
> On 13.01.2017 16:01, Leon Romanovsky wrote:
> > On Fri, Jan 13, 2017 at 02:12:19PM +0100, Jack Wang wrote:
> >> Hi Doug, hi Jason and others,
> >>
> >> As Doug already merged the first 4 patches to his tree, so I rebased this patch also
> >> on his latest for-4.11 branch (commit 102c5ce082), minor tweak on commit message.
> >> I also configured my thunderbirds to keep the patch format as expected :), so here we go!
> >
> > Hi Jack,
> > I don't know what did you configure, but git send-email is a preferred
> > way to send linux kernel patches.
> >
> > Your attached patches are appearing wrongly (wrong subject and mbox garbage) in
> > the patchworks https://patchwork.kernel.org/patch/9515651/
> >
> > Thanks
> >
> Hi Leon,
>
> Thanks, I understand I changed subject in the patch because I notice Doug is using such naming.

You changed the name of the patch which is fine, but you used the old thread
and it was registered under that name in the system.

Take a look on the subject of your patch:
"[PATCH] RDMA/core: create struct ib_port_cache"
and the name of in the system:
"[PATCHv2,1/1] IB/core: create struct ib_port_cache"

https://patchwork.kernel.org/patch/9515651/

> But I don't follow "mbox garbage", what do you mean here?

Your patches contain all the wording at the beginning.
Please download yours https://patchwork.kernel.org/patch/9515651/mbox/
and compare with the one in proper format https://patchwork.kernel.org/patch/9514391/mbox/

>
> About git send-email, I used to successfully sent patch with the tool, but somehow it's now always get reject
> by gmail.

According to the email headers, gmail and git send-email works fine for the most participants here.
>
> I'm not familiar with patchwork, will reply to same thread with proper subject fix the problem or I have to do it in a new thread?

No, please fix the setup before and submit it properly.
Thanks.

>
> Thanks,
> Jack

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCHv3 1/1] RDMA/core: create struct ib_port_cache
       [not found]                 ` <20170115085014.GB20392-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
@ 2017-01-17  9:11                   ` Jack Wang
       [not found]                     ` <f08926e7-54c1-fa48-2105-308f82ddcc23-EIkl63zCoXaH+58JC4qpiA@public.gmane.org>
  0 siblings, 1 reply; 10+ messages in thread
From: Jack Wang @ 2017-01-17  9:11 UTC (permalink / raw)
  To: Leon Romanovsky, Doug Ledford, Jason Gunthorpe
  Cc: Hefty, Sean, Hal Rosenstock, linux-rdma-u79uwXL29TY76Z2rM5mHXA,
	Michael Wang



As Jason suggested, we have 4 elements for per port arrays,
it's better to have a separate structure to represent them.

It simplifies code a bit, ~ 30 lines of code less :)

Signed-off-by: Jack Wang <jinpu.wang-EIkl63zCoXaH+58JC4qpiA@public.gmane.org>
Reviewed-by: Michael Wang <yun.wang-EIkl63zCoXaH+58JC4qpiA@public.gmane.org>
---
v3..v2:
- rebase to Doug's for-4.11 branch
- change subject with RDMA prefix instead of IB prefix

 drivers/infiniband/core/cache.c | 134 +++++++++++++++-------------------------
 include/rdma/ib_verbs.h         |  12 ++--
 2 files changed, 59 insertions(+), 87 deletions(-)

diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index f91886b..2e52021 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -314,14 +314,13 @@ static void make_default_gid(struct  net_device *dev, union ib_gid *gid)
 int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
 		     union ib_gid *gid, struct ib_gid_attr *attr)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 	struct ib_gid_table *table;
 	int ix;
 	int ret = 0;
 	struct net_device *idev;
 	int empty;
 
-	table = ports_table[port - rdma_start_port(ib_dev)];
+	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
 
 	if (!memcmp(gid, &zgid, sizeof(*gid)))
 		return -EINVAL;
@@ -369,11 +368,10 @@ int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
 int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
 		     union ib_gid *gid, struct ib_gid_attr *attr)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 	struct ib_gid_table *table;
 	int ix;
 
-	table = ports_table[port - rdma_start_port(ib_dev)];
+	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
 
 	mutex_lock(&table->lock);
 	write_lock_irq(&table->rwlock);
@@ -399,12 +397,11 @@ int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
 				     struct net_device *ndev)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 	struct ib_gid_table *table;
 	int ix;
 	bool deleted = false;
 
-	table  = ports_table[port - rdma_start_port(ib_dev)];
+	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
 
 	mutex_lock(&table->lock);
 	write_lock_irq(&table->rwlock);
@@ -428,10 +425,9 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
 static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
 			      union ib_gid *gid, struct ib_gid_attr *attr)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 	struct ib_gid_table *table;
 
-	table = ports_table[port - rdma_start_port(ib_dev)];
+	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
 
 	if (index < 0 || index >= table->sz)
 		return -EINVAL;
@@ -455,14 +451,13 @@ static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
 				    unsigned long mask,
 				    u8 *port, u16 *index)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 	struct ib_gid_table *table;
 	u8 p;
 	int local_index;
 	unsigned long flags;
 
 	for (p = 0; p < ib_dev->phys_port_cnt; p++) {
-		table = ports_table[p];
+		table = ib_dev->cache.ports[p].gid;
 		read_lock_irqsave(&table->rwlock, flags);
 		local_index = find_gid(table, gid, val, false, mask, NULL);
 		if (local_index >= 0) {
@@ -503,7 +498,6 @@ int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
 			       u16 *index)
 {
 	int local_index;
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 	struct ib_gid_table *table;
 	unsigned long mask = GID_ATTR_FIND_MASK_GID |
 			     GID_ATTR_FIND_MASK_GID_TYPE;
@@ -514,7 +508,7 @@ int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
 	    port > rdma_end_port(ib_dev))
 		return -ENOENT;
 
-	table = ports_table[port - rdma_start_port(ib_dev)];
+	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
 
 	if (ndev)
 		mask |= GID_ATTR_FIND_MASK_NETDEV;
@@ -562,21 +556,18 @@ static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
 				       void *context,
 				       u16 *index)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 	struct ib_gid_table *table;
 	unsigned int i;
 	unsigned long flags;
 	bool found = false;
 
-	if (!ports_table)
-		return -EOPNOTSUPP;
 
 	if (port < rdma_start_port(ib_dev) ||
 	    port > rdma_end_port(ib_dev) ||
 	    !rdma_protocol_roce(ib_dev, port))
 		return -EPROTONOSUPPORT;
 
-	table = ports_table[port - rdma_start_port(ib_dev)];
+	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
 
 	read_lock_irqsave(&table->rwlock, flags);
 	for (i = 0; i < table->sz; i++) {
@@ -668,14 +659,13 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
 				  unsigned long gid_type_mask,
 				  enum ib_cache_gid_default_mode mode)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 	union ib_gid gid;
 	struct ib_gid_attr gid_attr;
 	struct ib_gid_attr zattr_type = zattr;
 	struct ib_gid_table *table;
 	unsigned int gid_type;
 
-	table  = ports_table[port - rdma_start_port(ib_dev)];
+	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
 
 	make_default_gid(ndev, &gid);
 	memset(&gid_attr, 0, sizeof(gid_attr));
@@ -766,71 +756,64 @@ static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
 static int _gid_table_setup_one(struct ib_device *ib_dev)
 {
 	u8 port;
-	struct ib_gid_table **table;
+	struct ib_gid_table *table;
 	int err = 0;
 
-	table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
-	if (!table)
-		return -ENOMEM;
-
 	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
 		u8 rdma_port = port + rdma_start_port(ib_dev);
 
-		table[port] =
+		table =
 			alloc_gid_table(
 				ib_dev->port_immutable[rdma_port].gid_tbl_len);
-		if (!table[port]) {
+		if (!table) {
 			err = -ENOMEM;
 			goto rollback_table_setup;
 		}
 
 		err = gid_table_reserve_default(ib_dev,
 						port + rdma_start_port(ib_dev),
-						table[port]);
+						table);
 		if (err)
 			goto rollback_table_setup;
+		ib_dev->cache.ports[port].gid = table;
 	}
 
-	ib_dev->cache.gid_cache = table;
 	return 0;
 
 rollback_table_setup:
 	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
+		table = ib_dev->cache.ports[port].gid;
+
 		cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
-				       table[port]);
-		release_gid_table(table[port]);
+				       table);
+		release_gid_table(table);
 	}
 
-	kfree(table);
 	return err;
 }
 
 static void gid_table_release_one(struct ib_device *ib_dev)
 {
-	struct ib_gid_table **table = ib_dev->cache.gid_cache;
+	struct ib_gid_table *table;
 	u8 port;
 
-	if (!table)
-		return;
-
-	for (port = 0; port < ib_dev->phys_port_cnt; port++)
-		release_gid_table(table[port]);
-
-	kfree(table);
-	ib_dev->cache.gid_cache = NULL;
+	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
+		table = ib_dev->cache.ports[port].gid;
+		release_gid_table(table);
+		ib_dev->cache.ports[port].gid = NULL;
+	}
 }
 
 static void gid_table_cleanup_one(struct ib_device *ib_dev)
 {
-	struct ib_gid_table **table = ib_dev->cache.gid_cache;
+	struct ib_gid_table *table;
 	u8 port;
 
-	if (!table)
-		return;
-
-	for (port = 0; port < ib_dev->phys_port_cnt; port++)
+	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
+		table = ib_dev->cache.ports[port].gid;
 		cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
-				       table[port]);
+				       table);
+	}
 }
 
 static int gid_table_setup_one(struct ib_device *ib_dev)
@@ -860,12 +843,12 @@ int ib_get_cached_gid(struct ib_device *device,
 {
 	int res;
 	unsigned long flags;
-	struct ib_gid_table **ports_table = device->cache.gid_cache;
-	struct ib_gid_table *table = ports_table[port_num - rdma_start_port(device)];
+	struct ib_gid_table *table;
 
 	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
 		return -EINVAL;
 
+	table = device->cache.ports[port_num - rdma_start_port(device)].gid;
 	read_lock_irqsave(&table->rwlock, flags);
 	res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
 	read_unlock_irqrestore(&table->rwlock, flags);
@@ -917,7 +900,7 @@ int ib_get_cached_pkey(struct ib_device *device,
 
 	read_lock_irqsave(&device->cache.lock, flags);
 
-	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
+	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
 
 	if (index < 0 || index >= cache->table_len)
 		ret = -EINVAL;
@@ -946,7 +929,7 @@ int ib_find_cached_pkey(struct ib_device *device,
 
 	read_lock_irqsave(&device->cache.lock, flags);
 
-	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
+	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
 
 	*index = -1;
 
@@ -986,7 +969,7 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
 
 	read_lock_irqsave(&device->cache.lock, flags);
 
-	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
+	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
 
 	*index = -1;
 
@@ -1014,7 +997,7 @@ int ib_get_cached_lmc(struct ib_device *device,
 		return -EINVAL;
 
 	read_lock_irqsave(&device->cache.lock, flags);
-	*lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
+	*lmc = device->cache.ports[port_num - rdma_start_port(device)].lmc;
 	read_unlock_irqrestore(&device->cache.lock, flags);
 
 	return ret;
@@ -1032,7 +1015,8 @@ int ib_get_cached_port_state(struct ib_device   *device,
 		return -EINVAL;
 
 	read_lock_irqsave(&device->cache.lock, flags);
-	*port_state = device->cache.port_state_cache[port_num - rdma_start_port(device)];
+	*port_state = device->cache.ports[port_num
+		- rdma_start_port(device)].port_state;
 	read_unlock_irqrestore(&device->cache.lock, flags);
 
 	return ret;
@@ -1051,14 +1035,13 @@ static void ib_cache_update(struct ib_device *device,
 	int                        i;
 	int                        ret;
 	struct ib_gid_table	  *table;
-	struct ib_gid_table	 **ports_table = device->cache.gid_cache;
 	bool			   use_roce_gid_table =
 					rdma_cap_roce_gid_table(device, port);
 
 	if (port < rdma_start_port(device) || port > rdma_end_port(device))
 		return;
 
-	table = ports_table[port - rdma_start_port(device)];
+	table = device->cache.ports[port - rdma_start_port(device)].gid;
 
 	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
 	if (!tprops)
@@ -1110,9 +1093,10 @@ static void ib_cache_update(struct ib_device *device,
 
 	write_lock_irq(&device->cache.lock);
 
-	old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
+	old_pkey_cache = device->cache.ports[port -
+		rdma_start_port(device)].pkey;
 
-	device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
+	device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache;
 	if (!use_roce_gid_table) {
 		write_lock(&table->rwlock);
 		for (i = 0; i < gid_cache->table_len; i++) {
@@ -1122,8 +1106,8 @@ static void ib_cache_update(struct ib_device *device,
 		write_unlock(&table->rwlock);
 	}
 
-	device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
-	device->cache.port_state_cache[port - rdma_start_port(device)] =
+	device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc;
+	device->cache.ports[port - rdma_start_port(device)].port_state =
 		tprops->state;
 
 	write_unlock_irq(&device->cache.lock);
@@ -1177,26 +1161,17 @@ int ib_cache_setup_one(struct ib_device *device)
 
 	rwlock_init(&device->cache.lock);
 
-	device->cache.pkey_cache =
-		kzalloc(sizeof *device->cache.pkey_cache *
+	device->cache.ports =
+		kzalloc(sizeof(*device->cache.ports) *
 			(rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
-	device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
-					  (rdma_end_port(device) -
-					   rdma_start_port(device) + 1),
-					  GFP_KERNEL);
-	device->cache.port_state_cache = kmalloc(sizeof *device->cache.port_state_cache *
-					  (rdma_end_port(device) -
-					   rdma_start_port(device) + 1),
-					  GFP_KERNEL);
-	if (!device->cache.pkey_cache || !device->cache.port_state_cache ||
-	    !device->cache.lmc_cache) {
+	if (!device->cache.ports) {
 		err = -ENOMEM;
-		goto free;
+		goto out;
 	}
 
 	err = gid_table_setup_one(device);
 	if (err)
-		goto free;
+		goto out;
 
 	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
 		ib_cache_update(device, p + rdma_start_port(device));
@@ -1211,10 +1186,7 @@ int ib_cache_setup_one(struct ib_device *device)
 
 err:
 	gid_table_cleanup_one(device);
-free:
-	kfree(device->cache.pkey_cache);
-	kfree(device->cache.lmc_cache);
-	kfree(device->cache.port_state_cache);
+out:
 	return err;
 }
 
@@ -1228,15 +1200,11 @@ void ib_cache_release_one(struct ib_device *device)
 	 * all the device's resources when the cache could no
 	 * longer be accessed.
 	 */
-	if (device->cache.pkey_cache)
-		for (p = 0;
-		     p <= rdma_end_port(device) - rdma_start_port(device); ++p)
-			kfree(device->cache.pkey_cache[p]);
+	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
+		kfree(device->cache.ports[p].pkey);
 
 	gid_table_release_one(device);
-	kfree(device->cache.pkey_cache);
-	kfree(device->cache.lmc_cache);
-	kfree(device->cache.port_state_cache);
+	kfree(device->cache.ports);
 }
 
 void ib_cache_cleanup_one(struct ib_device *device)
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index fafa988..e55afec 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1775,13 +1775,17 @@ enum ib_mad_result {
 
 #define IB_DEVICE_NAME_MAX 64
 
+struct ib_port_cache {
+	struct ib_pkey_cache  *pkey;
+	struct ib_gid_table   *gid;
+	u8                     lmc;
+	enum ib_port_state     port_state;
+};
+
 struct ib_cache {
 	rwlock_t                lock;
 	struct ib_event_handler event_handler;
-	struct ib_pkey_cache  **pkey_cache;
-	struct ib_gid_table   **gid_cache;
-	u8                     *lmc_cache;
-	enum ib_port_state     *port_state_cache;
+	struct ib_port_cache   *ports;
 };
 
 struct ib_dma_mapping_ops {
-- 
2.7.4
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCHv3 1/1] RDMA/core: create struct ib_port_cache
       [not found]                     ` <f08926e7-54c1-fa48-2105-308f82ddcc23-EIkl63zCoXaH+58JC4qpiA@public.gmane.org>
@ 2017-01-17 11:24                       ` Leon Romanovsky
       [not found]                         ` <20170117112426.GL32481-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
  2017-01-24 21:32                       ` Doug Ledford
  1 sibling, 1 reply; 10+ messages in thread
From: Leon Romanovsky @ 2017-01-17 11:24 UTC (permalink / raw)
  To: Jack Wang
  Cc: Doug Ledford, Jason Gunthorpe, Hefty, Sean, Hal Rosenstock,
	linux-rdma-u79uwXL29TY76Z2rM5mHXA, Michael Wang

[-- Attachment #1: Type: text/plain, Size: 15556 bytes --]

On Tue, Jan 17, 2017 at 10:11:12AM +0100, Jack Wang wrote:
>
>
> As Jason suggested, we have 4 elements for per port arrays,
> it's better to have a separate structure to represent them.
>
> It simplifies code a bit, ~ 30 lines of code less :)

The commit message should be descriptive.
You really NEED to read SubmittingPatches before sending patches.
http://lxr.free-electrons.com/source/Documentation/SubmittingPatches#L106

106 2) Describe your changes
107 ------------------------
108
109 Describe your problem.  Whether your patch is a one-line bug fix or
110 5000 lines of a new feature, there must be an underlying problem that
111 motivated you to do this work.  Convince the reviewer that there is a
112 problem worth fixing and that it makes sense for them to read past the
113 first paragraph.

And if I read email headers correctly, you still didn't use "git send-email" to send the patch.

Thanks

>
> Signed-off-by: Jack Wang <jinpu.wang-EIkl63zCoXaH+58JC4qpiA@public.gmane.org>
> Reviewed-by: Michael Wang <yun.wang-EIkl63zCoXaH+58JC4qpiA@public.gmane.org>
> ---
> v3..v2:
> - rebase to Doug's for-4.11 branch
> - change subject with RDMA prefix instead of IB prefix
>
>  drivers/infiniband/core/cache.c | 134 +++++++++++++++-------------------------
>  include/rdma/ib_verbs.h         |  12 ++--
>  2 files changed, 59 insertions(+), 87 deletions(-)
>
> diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
> index f91886b..2e52021 100644
> --- a/drivers/infiniband/core/cache.c
> +++ b/drivers/infiniband/core/cache.c
> @@ -314,14 +314,13 @@ static void make_default_gid(struct  net_device *dev, union ib_gid *gid)
>  int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
>  		     union ib_gid *gid, struct ib_gid_attr *attr)
>  {
> -	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
>  	struct ib_gid_table *table;
>  	int ix;
>  	int ret = 0;
>  	struct net_device *idev;
>  	int empty;
>
> -	table = ports_table[port - rdma_start_port(ib_dev)];
> +	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
>
>  	if (!memcmp(gid, &zgid, sizeof(*gid)))
>  		return -EINVAL;
> @@ -369,11 +368,10 @@ int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
>  int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
>  		     union ib_gid *gid, struct ib_gid_attr *attr)
>  {
> -	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
>  	struct ib_gid_table *table;
>  	int ix;
>
> -	table = ports_table[port - rdma_start_port(ib_dev)];
> +	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
>
>  	mutex_lock(&table->lock);
>  	write_lock_irq(&table->rwlock);
> @@ -399,12 +397,11 @@ int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
>  int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
>  				     struct net_device *ndev)
>  {
> -	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
>  	struct ib_gid_table *table;
>  	int ix;
>  	bool deleted = false;
>
> -	table  = ports_table[port - rdma_start_port(ib_dev)];
> +	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
>
>  	mutex_lock(&table->lock);
>  	write_lock_irq(&table->rwlock);
> @@ -428,10 +425,9 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
>  static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
>  			      union ib_gid *gid, struct ib_gid_attr *attr)
>  {
> -	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
>  	struct ib_gid_table *table;
>
> -	table = ports_table[port - rdma_start_port(ib_dev)];
> +	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
>
>  	if (index < 0 || index >= table->sz)
>  		return -EINVAL;
> @@ -455,14 +451,13 @@ static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
>  				    unsigned long mask,
>  				    u8 *port, u16 *index)
>  {
> -	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
>  	struct ib_gid_table *table;
>  	u8 p;
>  	int local_index;
>  	unsigned long flags;
>
>  	for (p = 0; p < ib_dev->phys_port_cnt; p++) {
> -		table = ports_table[p];
> +		table = ib_dev->cache.ports[p].gid;
>  		read_lock_irqsave(&table->rwlock, flags);
>  		local_index = find_gid(table, gid, val, false, mask, NULL);
>  		if (local_index >= 0) {
> @@ -503,7 +498,6 @@ int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
>  			       u16 *index)
>  {
>  	int local_index;
> -	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
>  	struct ib_gid_table *table;
>  	unsigned long mask = GID_ATTR_FIND_MASK_GID |
>  			     GID_ATTR_FIND_MASK_GID_TYPE;
> @@ -514,7 +508,7 @@ int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
>  	    port > rdma_end_port(ib_dev))
>  		return -ENOENT;
>
> -	table = ports_table[port - rdma_start_port(ib_dev)];
> +	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
>
>  	if (ndev)
>  		mask |= GID_ATTR_FIND_MASK_NETDEV;
> @@ -562,21 +556,18 @@ static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
>  				       void *context,
>  				       u16 *index)
>  {
> -	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
>  	struct ib_gid_table *table;
>  	unsigned int i;
>  	unsigned long flags;
>  	bool found = false;
>
> -	if (!ports_table)
> -		return -EOPNOTSUPP;
>
>  	if (port < rdma_start_port(ib_dev) ||
>  	    port > rdma_end_port(ib_dev) ||
>  	    !rdma_protocol_roce(ib_dev, port))
>  		return -EPROTONOSUPPORT;
>
> -	table = ports_table[port - rdma_start_port(ib_dev)];
> +	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
>
>  	read_lock_irqsave(&table->rwlock, flags);
>  	for (i = 0; i < table->sz; i++) {
> @@ -668,14 +659,13 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
>  				  unsigned long gid_type_mask,
>  				  enum ib_cache_gid_default_mode mode)
>  {
> -	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
>  	union ib_gid gid;
>  	struct ib_gid_attr gid_attr;
>  	struct ib_gid_attr zattr_type = zattr;
>  	struct ib_gid_table *table;
>  	unsigned int gid_type;
>
> -	table  = ports_table[port - rdma_start_port(ib_dev)];
> +	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
>
>  	make_default_gid(ndev, &gid);
>  	memset(&gid_attr, 0, sizeof(gid_attr));
> @@ -766,71 +756,64 @@ static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
>  static int _gid_table_setup_one(struct ib_device *ib_dev)
>  {
>  	u8 port;
> -	struct ib_gid_table **table;
> +	struct ib_gid_table *table;
>  	int err = 0;
>
> -	table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
> -	if (!table)
> -		return -ENOMEM;
> -
>  	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
>  		u8 rdma_port = port + rdma_start_port(ib_dev);
>
> -		table[port] =
> +		table =
>  			alloc_gid_table(
>  				ib_dev->port_immutable[rdma_port].gid_tbl_len);
> -		if (!table[port]) {
> +		if (!table) {
>  			err = -ENOMEM;
>  			goto rollback_table_setup;
>  		}
>
>  		err = gid_table_reserve_default(ib_dev,
>  						port + rdma_start_port(ib_dev),
> -						table[port]);
> +						table);
>  		if (err)
>  			goto rollback_table_setup;
> +		ib_dev->cache.ports[port].gid = table;
>  	}
>
> -	ib_dev->cache.gid_cache = table;
>  	return 0;
>
>  rollback_table_setup:
>  	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
> +		table = ib_dev->cache.ports[port].gid;
> +
>  		cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
> -				       table[port]);
> -		release_gid_table(table[port]);
> +				       table);
> +		release_gid_table(table);
>  	}
>
> -	kfree(table);
>  	return err;
>  }
>
>  static void gid_table_release_one(struct ib_device *ib_dev)
>  {
> -	struct ib_gid_table **table = ib_dev->cache.gid_cache;
> +	struct ib_gid_table *table;
>  	u8 port;
>
> -	if (!table)
> -		return;
> -
> -	for (port = 0; port < ib_dev->phys_port_cnt; port++)
> -		release_gid_table(table[port]);
> -
> -	kfree(table);
> -	ib_dev->cache.gid_cache = NULL;
> +	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
> +		table = ib_dev->cache.ports[port].gid;
> +		release_gid_table(table);
> +		ib_dev->cache.ports[port].gid = NULL;
> +	}
>  }
>
>  static void gid_table_cleanup_one(struct ib_device *ib_dev)
>  {
> -	struct ib_gid_table **table = ib_dev->cache.gid_cache;
> +	struct ib_gid_table *table;
>  	u8 port;
>
> -	if (!table)
> -		return;
> -
> -	for (port = 0; port < ib_dev->phys_port_cnt; port++)
> +	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
> +		table = ib_dev->cache.ports[port].gid;
>  		cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
> -				       table[port]);
> +				       table);
> +	}
>  }
>
>  static int gid_table_setup_one(struct ib_device *ib_dev)
> @@ -860,12 +843,12 @@ int ib_get_cached_gid(struct ib_device *device,
>  {
>  	int res;
>  	unsigned long flags;
> -	struct ib_gid_table **ports_table = device->cache.gid_cache;
> -	struct ib_gid_table *table = ports_table[port_num - rdma_start_port(device)];
> +	struct ib_gid_table *table;
>
>  	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
>  		return -EINVAL;
>
> +	table = device->cache.ports[port_num - rdma_start_port(device)].gid;
>  	read_lock_irqsave(&table->rwlock, flags);
>  	res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
>  	read_unlock_irqrestore(&table->rwlock, flags);
> @@ -917,7 +900,7 @@ int ib_get_cached_pkey(struct ib_device *device,
>
>  	read_lock_irqsave(&device->cache.lock, flags);
>
> -	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
> +	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
>
>  	if (index < 0 || index >= cache->table_len)
>  		ret = -EINVAL;
> @@ -946,7 +929,7 @@ int ib_find_cached_pkey(struct ib_device *device,
>
>  	read_lock_irqsave(&device->cache.lock, flags);
>
> -	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
> +	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
>
>  	*index = -1;
>
> @@ -986,7 +969,7 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
>
>  	read_lock_irqsave(&device->cache.lock, flags);
>
> -	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
> +	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
>
>  	*index = -1;
>
> @@ -1014,7 +997,7 @@ int ib_get_cached_lmc(struct ib_device *device,
>  		return -EINVAL;
>
>  	read_lock_irqsave(&device->cache.lock, flags);
> -	*lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
> +	*lmc = device->cache.ports[port_num - rdma_start_port(device)].lmc;
>  	read_unlock_irqrestore(&device->cache.lock, flags);
>
>  	return ret;
> @@ -1032,7 +1015,8 @@ int ib_get_cached_port_state(struct ib_device   *device,
>  		return -EINVAL;
>
>  	read_lock_irqsave(&device->cache.lock, flags);
> -	*port_state = device->cache.port_state_cache[port_num - rdma_start_port(device)];
> +	*port_state = device->cache.ports[port_num
> +		- rdma_start_port(device)].port_state;
>  	read_unlock_irqrestore(&device->cache.lock, flags);
>
>  	return ret;
> @@ -1051,14 +1035,13 @@ static void ib_cache_update(struct ib_device *device,
>  	int                        i;
>  	int                        ret;
>  	struct ib_gid_table	  *table;
> -	struct ib_gid_table	 **ports_table = device->cache.gid_cache;
>  	bool			   use_roce_gid_table =
>  					rdma_cap_roce_gid_table(device, port);
>
>  	if (port < rdma_start_port(device) || port > rdma_end_port(device))
>  		return;
>
> -	table = ports_table[port - rdma_start_port(device)];
> +	table = device->cache.ports[port - rdma_start_port(device)].gid;
>
>  	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
>  	if (!tprops)
> @@ -1110,9 +1093,10 @@ static void ib_cache_update(struct ib_device *device,
>
>  	write_lock_irq(&device->cache.lock);
>
> -	old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
> +	old_pkey_cache = device->cache.ports[port -
> +		rdma_start_port(device)].pkey;
>
> -	device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
> +	device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache;
>  	if (!use_roce_gid_table) {
>  		write_lock(&table->rwlock);
>  		for (i = 0; i < gid_cache->table_len; i++) {
> @@ -1122,8 +1106,8 @@ static void ib_cache_update(struct ib_device *device,
>  		write_unlock(&table->rwlock);
>  	}
>
> -	device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
> -	device->cache.port_state_cache[port - rdma_start_port(device)] =
> +	device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc;
> +	device->cache.ports[port - rdma_start_port(device)].port_state =
>  		tprops->state;
>
>  	write_unlock_irq(&device->cache.lock);
> @@ -1177,26 +1161,17 @@ int ib_cache_setup_one(struct ib_device *device)
>
>  	rwlock_init(&device->cache.lock);
>
> -	device->cache.pkey_cache =
> -		kzalloc(sizeof *device->cache.pkey_cache *
> +	device->cache.ports =
> +		kzalloc(sizeof(*device->cache.ports) *
>  			(rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
> -	device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
> -					  (rdma_end_port(device) -
> -					   rdma_start_port(device) + 1),
> -					  GFP_KERNEL);
> -	device->cache.port_state_cache = kmalloc(sizeof *device->cache.port_state_cache *
> -					  (rdma_end_port(device) -
> -					   rdma_start_port(device) + 1),
> -					  GFP_KERNEL);
> -	if (!device->cache.pkey_cache || !device->cache.port_state_cache ||
> -	    !device->cache.lmc_cache) {
> +	if (!device->cache.ports) {
>  		err = -ENOMEM;
> -		goto free;
> +		goto out;
>  	}
>
>  	err = gid_table_setup_one(device);
>  	if (err)
> -		goto free;
> +		goto out;
>
>  	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
>  		ib_cache_update(device, p + rdma_start_port(device));
> @@ -1211,10 +1186,7 @@ int ib_cache_setup_one(struct ib_device *device)
>
>  err:
>  	gid_table_cleanup_one(device);
> -free:
> -	kfree(device->cache.pkey_cache);
> -	kfree(device->cache.lmc_cache);
> -	kfree(device->cache.port_state_cache);
> +out:
>  	return err;
>  }
>
> @@ -1228,15 +1200,11 @@ void ib_cache_release_one(struct ib_device *device)
>  	 * all the device's resources when the cache could no
>  	 * longer be accessed.
>  	 */
> -	if (device->cache.pkey_cache)
> -		for (p = 0;
> -		     p <= rdma_end_port(device) - rdma_start_port(device); ++p)
> -			kfree(device->cache.pkey_cache[p]);
> +	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
> +		kfree(device->cache.ports[p].pkey);
>
>  	gid_table_release_one(device);
> -	kfree(device->cache.pkey_cache);
> -	kfree(device->cache.lmc_cache);
> -	kfree(device->cache.port_state_cache);
> +	kfree(device->cache.ports);
>  }
>
>  void ib_cache_cleanup_one(struct ib_device *device)
> diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
> index fafa988..e55afec 100644
> --- a/include/rdma/ib_verbs.h
> +++ b/include/rdma/ib_verbs.h
> @@ -1775,13 +1775,17 @@ enum ib_mad_result {
>
>  #define IB_DEVICE_NAME_MAX 64
>
> +struct ib_port_cache {
> +	struct ib_pkey_cache  *pkey;
> +	struct ib_gid_table   *gid;
> +	u8                     lmc;
> +	enum ib_port_state     port_state;
> +};
> +
>  struct ib_cache {
>  	rwlock_t                lock;
>  	struct ib_event_handler event_handler;
> -	struct ib_pkey_cache  **pkey_cache;
> -	struct ib_gid_table   **gid_cache;
> -	u8                     *lmc_cache;
> -	enum ib_port_state     *port_state_cache;
> +	struct ib_port_cache   *ports;
>  };
>
>  struct ib_dma_mapping_ops {
> --
> 2.7.4

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCHv3 1/1] RDMA/core: create struct ib_port_cache
       [not found]                         ` <20170117112426.GL32481-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
@ 2017-01-17 11:34                           ` Jack Wang
       [not found]                             ` <a68939e6-62d3-0d83-16f4-e7784c6fedad-EIkl63zCoXaH+58JC4qpiA@public.gmane.org>
  0 siblings, 1 reply; 10+ messages in thread
From: Jack Wang @ 2017-01-17 11:34 UTC (permalink / raw)
  To: Leon Romanovsky
  Cc: Doug Ledford, Jason Gunthorpe, Hefty, Sean, Hal Rosenstock,
	linux-rdma-u79uwXL29TY76Z2rM5mHXA, Michael Wang

Hi Leon,

On 17.01.2017 12:24, Leon Romanovsky wrote:
> On Tue, Jan 17, 2017 at 10:11:12AM +0100, Jack Wang wrote:
>>
>>
>> As Jason suggested, we have 4 elements for per port arrays,
>> it's better to have a separate structure to represent them.
>>
>> It simplifies code a bit, ~ 30 lines of code less :)
> 
> The commit message should be descriptive.
> You really NEED to read SubmittingPatches before sending patches.
> http://lxr.free-electrons.com/source/Documentation/SubmittingPatches#L106
Okay, I can improve the commit message.

> 
> 106 2) Describe your changes
> 107 ------------------------
> 108
> 109 Describe your problem.  Whether your patch is a one-line bug fix or
> 110 5000 lines of a new feature, there must be an underlying problem that
> 111 motivated you to do this work.  Convince the reviewer that there is a
> 112 problem worth fixing and that it makes sense for them to read past the
> 113 first paragraph.
> 
> And if I read email headers correctly, you still didn't use "git send-email" to send the patch.

Yes, you're right. But I thought I can have my own preference, or only git send-email is allowed?

Thanks,

> 
> Thanks
> 

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCHv3 1/1] RDMA/core: create struct ib_port_cache
       [not found]                             ` <a68939e6-62d3-0d83-16f4-e7784c6fedad-EIkl63zCoXaH+58JC4qpiA@public.gmane.org>
@ 2017-01-17 11:51                               ` Leon Romanovsky
  0 siblings, 0 replies; 10+ messages in thread
From: Leon Romanovsky @ 2017-01-17 11:51 UTC (permalink / raw)
  To: Jack Wang
  Cc: Doug Ledford, Jason Gunthorpe, Hefty, Sean, Hal Rosenstock,
	linux-rdma-u79uwXL29TY76Z2rM5mHXA, Michael Wang

[-- Attachment #1: Type: text/plain, Size: 1815 bytes --]

On Tue, Jan 17, 2017 at 12:34:18PM +0100, Jack Wang wrote:
> Hi Leon,
>
> On 17.01.2017 12:24, Leon Romanovsky wrote:
> > On Tue, Jan 17, 2017 at 10:11:12AM +0100, Jack Wang wrote:
> >>
> >>
> >> As Jason suggested, we have 4 elements for per port arrays,
> >> it's better to have a separate structure to represent them.
> >>
> >> It simplifies code a bit, ~ 30 lines of code less :)
> >
> > The commit message should be descriptive.
> > You really NEED to read SubmittingPatches before sending patches.
> > http://lxr.free-electrons.com/source/Documentation/SubmittingPatches#L106
> Okay, I can improve the commit message.
>
> >
> > 106 2) Describe your changes
> > 107 ------------------------
> > 108
> > 109 Describe your problem.  Whether your patch is a one-line bug fix or
> > 110 5000 lines of a new feature, there must be an underlying problem that
> > 111 motivated you to do this work.  Convince the reviewer that there is a
> > 112 problem worth fixing and that it makes sense for them to read past the
> > 113 first paragraph.
> >
> > And if I read email headers correctly, you still didn't use "git send-email" to send the patch.
>
> Yes, you're right. But I thought I can have my own preference, or only git send-email is allowed?

It is convenient way to send patches to Linux kernel. In your specific
case, your mail client set wrong email headers which caused to attach
your patch to wrong thread.

So, you can send patches with any tool you want as long as you know what
you are doing and I'm not sure that it is a case here.

Thanks

>
> Thanks,
>
> >
> > Thanks
> >
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCHv3 1/1] RDMA/core: create struct ib_port_cache
       [not found]                     ` <f08926e7-54c1-fa48-2105-308f82ddcc23-EIkl63zCoXaH+58JC4qpiA@public.gmane.org>
  2017-01-17 11:24                       ` Leon Romanovsky
@ 2017-01-24 21:32                       ` Doug Ledford
  1 sibling, 0 replies; 10+ messages in thread
From: Doug Ledford @ 2017-01-24 21:32 UTC (permalink / raw)
  To: Jack Wang, Leon Romanovsky, Jason Gunthorpe
  Cc: Hefty, Sean, Hal Rosenstock, linux-rdma-u79uwXL29TY76Z2rM5mHXA,
	Michael Wang

[-- Attachment #1: Type: text/plain, Size: 630 bytes --]

On Tue, 2017-01-17 at 10:11 +0100, Jack Wang wrote:
> As Jason suggested, we have 4 elements for per port arrays,
> it's better to have a separate structure to represent them.
> 
> It simplifies code a bit, ~ 30 lines of code less :)
> 
> Signed-off-by: Jack Wang <jinpu.wang-EIkl63zCoXaH+58JC4qpiA@public.gmane.org>
> Reviewed-by: Michael Wang <yun.wang-EIkl63zCoXaH+58JC4qpiA@public.gmane.org>

v3 of your patch was applied, thanks.

-- 
Doug Ledford <dledford-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
    GPG KeyID: B826A3330E572FDD
   
Key fingerprint = AE6B 1BDA 122B 23B4 265B  1274 B826 A333 0E57 2FDD

[-- Attachment #2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 819 bytes --]

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2017-01-24 21:32 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-01-05 16:41 [PATCHv2 1/1] IB/core: create struct ib_port_cache Jinpu Wang
     [not found] ` <CAMGffEn-fOOe7s2rniqwcaocoV+jxkeJDHZ6WYJkkXi+CBQbEA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2017-01-13 13:12   ` Jack Wang
     [not found]     ` <b1fdf011-dc25-ad63-b223-0700f6e90bc6-EIkl63zCoXaH+58JC4qpiA@public.gmane.org>
2017-01-13 15:01       ` Leon Romanovsky
     [not found]         ` <20170113150103.GP20392-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
2017-01-13 15:35           ` Jack Wang
     [not found]             ` <2617abae-686e-c8c6-799a-8742156fcd08-EIkl63zCoXaH+58JC4qpiA@public.gmane.org>
2017-01-15  8:50               ` Leon Romanovsky
     [not found]                 ` <20170115085014.GB20392-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
2017-01-17  9:11                   ` [PATCHv3 1/1] RDMA/core: " Jack Wang
     [not found]                     ` <f08926e7-54c1-fa48-2105-308f82ddcc23-EIkl63zCoXaH+58JC4qpiA@public.gmane.org>
2017-01-17 11:24                       ` Leon Romanovsky
     [not found]                         ` <20170117112426.GL32481-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
2017-01-17 11:34                           ` Jack Wang
     [not found]                             ` <a68939e6-62d3-0d83-16f4-e7784c6fedad-EIkl63zCoXaH+58JC4qpiA@public.gmane.org>
2017-01-17 11:51                               ` Leon Romanovsky
2017-01-24 21:32                       ` Doug Ledford

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.