netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Ido Schimmel <idosch@idosch.org>
To: netdev@vger.kernel.org
Cc: davem@davemloft.net, kuba@kernel.org, jiri@mellanox.com,
	mlxsw@mellanox.com, Ido Schimmel <idosch@mellanox.com>
Subject: [PATCH net-next 8/9] mlxsw: spectrum_cnt: Expose devlink resource occupancy for counters
Date: Wed, 18 Mar 2020 15:48:56 +0200	[thread overview]
Message-ID: <20200318134857.1003018-9-idosch@idosch.org> (raw)
In-Reply-To: <20200318134857.1003018-1-idosch@idosch.org>

From: Jiri Pirko <jiri@mellanox.com>

Implement occupancy counting for counters and expose over devlink
resource API.

Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
---
 .../ethernet/mellanox/mlxsw/spectrum_cnt.c    | 63 ++++++++++++++++++-
 1 file changed, 62 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
index 417c512bc7a2..0268f0a6662a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -15,12 +15,14 @@ struct mlxsw_sp_counter_sub_pool {
 	u64 resource_id; /* devlink resource id */
 	unsigned int entry_size;
 	unsigned int bank_count;
+	atomic_t active_entries_count;
 };
 
 struct mlxsw_sp_counter_pool {
 	u64 pool_size;
 	unsigned long *usage; /* Usage bitmap */
 	spinlock_t counter_pool_lock; /* Protects counter pool allocations */
+	atomic_t active_entries_count;
 	unsigned int sub_pools_count;
 	struct mlxsw_sp_counter_sub_pool sub_pools[];
 };
@@ -40,6 +42,13 @@ static const struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = {
 	}
 };
 
+static u64 mlxsw_sp_counter_sub_pool_occ_get(void *priv)
+{
+	const struct mlxsw_sp_counter_sub_pool *sub_pool = priv;
+
+	return atomic_read(&sub_pool->active_entries_count);
+}
+
 static int mlxsw_sp_counter_sub_pools_init(struct mlxsw_sp *mlxsw_sp)
 {
 	struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
@@ -62,12 +71,50 @@ static int mlxsw_sp_counter_sub_pools_init(struct mlxsw_sp *mlxsw_sp)
 						sub_pool->resource_id,
 						&sub_pool->size);
 		if (err)
-			return err;
+			goto err_resource_size_get;
+
+		devlink_resource_occ_get_register(devlink,
+						  sub_pool->resource_id,
+						  mlxsw_sp_counter_sub_pool_occ_get,
+						  sub_pool);
 
 		sub_pool->base_index = base_index;
 		base_index += sub_pool->size;
+		atomic_set(&sub_pool->active_entries_count, 0);
 	}
 	return 0;
+
+err_resource_size_get:
+	for (i--; i >= 0; i--) {
+		sub_pool = &pool->sub_pools[i];
+
+		devlink_resource_occ_get_unregister(devlink,
+						    sub_pool->resource_id);
+	}
+	return err;
+}
+
+static void mlxsw_sp_counter_sub_pools_fini(struct mlxsw_sp *mlxsw_sp)
+{
+	struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+	struct mlxsw_sp_counter_sub_pool *sub_pool;
+	int i;
+
+	for (i = 0; i < pool->sub_pools_count; i++) {
+		sub_pool = &pool->sub_pools[i];
+
+		WARN_ON(atomic_read(&sub_pool->active_entries_count));
+		devlink_resource_occ_get_unregister(devlink,
+						    sub_pool->resource_id);
+	}
+}
+
+static u64 mlxsw_sp_counter_pool_occ_get(void *priv)
+{
+	const struct mlxsw_sp_counter_pool *pool = priv;
+
+	return atomic_read(&pool->active_entries_count);
 }
 
 int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
@@ -88,11 +135,14 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
 	       sub_pools_count * sizeof(*sub_pool));
 	pool->sub_pools_count = sub_pools_count;
 	spin_lock_init(&pool->counter_pool_lock);
+	atomic_set(&pool->active_entries_count, 0);
 
 	err = devlink_resource_size_get(devlink, MLXSW_SP_RESOURCE_COUNTERS,
 					&pool->pool_size);
 	if (err)
 		goto err_pool_resource_size_get;
+	devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_COUNTERS,
+					  mlxsw_sp_counter_pool_occ_get, pool);
 
 	map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long);
 
@@ -111,6 +161,8 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
 err_sub_pools_init:
 	kfree(pool->usage);
 err_usage_alloc:
+	devlink_resource_occ_get_unregister(devlink,
+					    MLXSW_SP_RESOURCE_COUNTERS);
 err_pool_resource_size_get:
 	kfree(pool);
 	return err;
@@ -119,10 +171,15 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
 void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
 {
 	struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
 
+	mlxsw_sp_counter_sub_pools_fini(mlxsw_sp);
 	WARN_ON(find_first_bit(pool->usage, pool->pool_size) !=
 			       pool->pool_size);
+	WARN_ON(atomic_read(&pool->active_entries_count));
 	kfree(pool->usage);
+	devlink_resource_occ_get_unregister(devlink,
+					    MLXSW_SP_RESOURCE_COUNTERS);
 	kfree(pool);
 }
 
@@ -158,6 +215,8 @@ int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
 	spin_unlock(&pool->counter_pool_lock);
 
 	*p_counter_index = entry_index;
+	atomic_add(sub_pool->entry_size, &sub_pool->active_entries_count);
+	atomic_add(sub_pool->entry_size, &pool->active_entries_count);
 	return 0;
 
 err_alloc:
@@ -180,6 +239,8 @@ void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
 	for (i = 0; i < sub_pool->entry_size; i++)
 		__clear_bit(counter_index + i, pool->usage);
 	spin_unlock(&pool->counter_pool_lock);
+	atomic_sub(sub_pool->entry_size, &sub_pool->active_entries_count);
+	atomic_sub(sub_pool->entry_size, &pool->active_entries_count);
 }
 
 int mlxsw_sp_counter_resources_register(struct mlxsw_core *mlxsw_core)
-- 
2.24.1


  parent reply	other threads:[~2020-03-18 13:49 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-18 13:48 [PATCH net-next 0/9] mlxsw: spectrum_cnt: Expose counter resources Ido Schimmel
2020-03-18 13:48 ` [PATCH net-next 1/9] mlxsw: spectrum_cnt: Query bank size from FW resources Ido Schimmel
2020-03-18 13:48 ` [PATCH net-next 2/9] selftests: spectrum-2: Adjust tc_flower_scale limit according to current counter count Ido Schimmel
2020-03-18 13:48 ` [PATCH net-next 3/9] mlxsw: spectrum_cnt: Move sub_pools under per-instance pool struct Ido Schimmel
2020-03-18 13:48 ` [PATCH net-next 4/9] mlxsw: spectrum_cnt: Add entry_size_res_id for each subpool and use it to query entry size Ido Schimmel
2020-03-18 13:48 ` [PATCH net-next 5/9] mlxsw: spectrum_cnt: Expose subpool sizes over devlink resources Ido Schimmel
2020-03-18 13:48 ` [PATCH net-next 6/9] mlxsw: spectrum_cnt: Move config validation along with resource register Ido Schimmel
2020-03-18 13:48 ` [PATCH net-next 7/9] mlxsw: spectrum_cnt: Consolidate subpools initialization Ido Schimmel
2020-03-18 13:48 ` Ido Schimmel [this message]
2020-03-18 13:48 ` [PATCH net-next 9/9] selftests: mlxsw: Add tc action hw_stats tests Ido Schimmel
2020-03-18 20:50 ` [PATCH net-next 0/9] mlxsw: spectrum_cnt: Expose counter resources Jakub Kicinski
2020-03-18 23:46 ` David Miller

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200318134857.1003018-9-idosch@idosch.org \
    --to=idosch@idosch.org \
    --cc=davem@davemloft.net \
    --cc=idosch@mellanox.com \
    --cc=jiri@mellanox.com \
    --cc=kuba@kernel.org \
    --cc=mlxsw@mellanox.com \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).