All of lore.kernel.org
 help / color / mirror / Atom feed
From: Ido Schimmel <idosch@nvidia.com>
To: netdev@vger.kernel.org
Cc: davem@davemloft.net, kuba@kernel.org, pabeni@redhat.com,
	jiri@nvidia.com, vadimp@nvidia.com, petrm@nvidia.com,
	andrew@lunn.ch, dsahern@gmail.com, mlxsw@nvidia.com,
	Ido Schimmel <idosch@nvidia.com>
Subject: [PATCH net-next 09/17] mlxsw: spectrum: Introduce port mapping change event processing
Date: Mon, 18 Apr 2022 09:42:33 +0300	[thread overview]
Message-ID: <20220418064241.2925668-10-idosch@nvidia.com> (raw)
In-Reply-To: <20220418064241.2925668-1-idosch@nvidia.com>

From: Jiri Pirko <jiri@nvidia.com>

Register PMLPE trap and process the port mapping changes delivered
by it by creating related ports. Note that this happens after
provisioning. The INI of the linecard is processed and merged by FW.
PMLPE is generated for each port. Process this mapping change.

Layout of PMLPE is the same as layout of PMLP.

Signed-off-by: Jiri Pirko <jiri@nvidia.com>
Signed-off-by: Ido Schimmel <idosch@nvidia.com>
---
 .../net/ethernet/mellanox/mlxsw/spectrum.c    | 166 +++++++++++++++++-
 .../net/ethernet/mellanox/mlxsw/spectrum.h    |   7 +
 drivers/net/ethernet/mellanox/mlxsw/trap.h    |   2 +
 3 files changed, 166 insertions(+), 9 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index c26c160744d0..c3457a216642 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -481,21 +481,16 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
 }
 
 static int
-mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
-			      struct mlxsw_sp_port_mapping *port_mapping)
+mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp,
+				u16 local_port, char *pmlp_pl,
+				struct mlxsw_sp_port_mapping *port_mapping)
 {
-	char pmlp_pl[MLXSW_REG_PMLP_LEN];
 	bool separate_rxtx;
 	u8 first_lane;
 	u8 module;
 	u8 width;
-	int err;
 	int i;
 
-	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
-	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
-	if (err)
-		return err;
 	module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
 	width = mlxsw_reg_pmlp_width_get(pmlp_pl);
 	separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
@@ -534,6 +529,21 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
 	return 0;
 }
 
+static int
+mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
+			      struct mlxsw_sp_port_mapping *port_mapping)
+{
+	char pmlp_pl[MLXSW_REG_PMLP_LEN];
+	int err;
+
+	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
+	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+	if (err)
+		return err;
+	return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
+					       pmlp_pl, port_mapping);
+}
+
 static int
 mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port,
 			 const struct mlxsw_sp_port_mapping *port_mapping)
@@ -1861,13 +1871,121 @@ static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port)
 	return mlxsw_sp->ports[local_port] != NULL;
 }
 
+static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp,
+					   u16 local_port, bool enable)
+{
+	char pmecr_pl[MLXSW_REG_PMECR_LEN];
+
+	mlxsw_reg_pmecr_pack(pmecr_pl, local_port,
+			     enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT :
+				      MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT);
+	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl);
+}
+
+struct mlxsw_sp_port_mapping_event {
+	struct list_head list;
+	char pmlp_pl[MLXSW_REG_PMLP_LEN];
+};
+
+static void mlxsw_sp_port_mapping_events_work(struct work_struct *work)
+{
+	struct mlxsw_sp_port_mapping_event *event, *next_event;
+	struct mlxsw_sp_port_mapping_events *events;
+	struct mlxsw_sp_port_mapping port_mapping;
+	struct mlxsw_sp *mlxsw_sp;
+	struct devlink *devlink;
+	LIST_HEAD(event_queue);
+	u16 local_port;
+	int err;
+
+	events = container_of(work, struct mlxsw_sp_port_mapping_events, work);
+	mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events);
+	devlink = priv_to_devlink(mlxsw_sp->core);
+
+	spin_lock_bh(&events->queue_lock);
+	list_splice_init(&events->queue, &event_queue);
+	spin_unlock_bh(&events->queue_lock);
+
+	list_for_each_entry_safe(event, next_event, &event_queue, list) {
+		local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl);
+		err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
+						      event->pmlp_pl, &port_mapping);
+		if (err)
+			goto out;
+
+		if (WARN_ON_ONCE(!port_mapping.width))
+			goto out;
+
+		devl_lock(devlink);
+
+		if (!mlxsw_sp_port_created(mlxsw_sp, local_port))
+			mlxsw_sp_port_create(mlxsw_sp, local_port,
+					     false, &port_mapping);
+		else
+			WARN_ON_ONCE(1);
+
+		devl_unlock(devlink);
+
+		mlxsw_sp->port_mapping[local_port] = port_mapping;
+
+out:
+		kfree(event);
+	}
+}
+
+static void
+mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg,
+				    char *pmlp_pl, void *priv)
+{
+	struct mlxsw_sp_port_mapping_events *events;
+	struct mlxsw_sp_port_mapping_event *event;
+	struct mlxsw_sp *mlxsw_sp = priv;
+	u16 local_port;
+
+	local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl);
+	if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
+		return;
+
+	events = &mlxsw_sp->port_mapping_events;
+	event = kmalloc(sizeof(*event), GFP_ATOMIC);
+	if (!event)
+		return;
+	memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl));
+	spin_lock(&events->queue_lock);
+	list_add_tail(&event->list, &events->queue);
+	spin_unlock(&events->queue_lock);
+	mlxsw_core_schedule_work(&events->work);
+}
+
+static void
+__mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp)
+{
+	struct mlxsw_sp_port_mapping_event *event, *next_event;
+	struct mlxsw_sp_port_mapping_events *events;
+
+	events = &mlxsw_sp->port_mapping_events;
+
+	/* Caller needs to make sure that no new event is going to appear. */
+	cancel_work_sync(&events->work);
+	list_for_each_entry_safe(event, next_event, &events->queue, list) {
+		list_del(&event->list);
+		kfree(event);
+	}
+}
+
 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
 {
+	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
 	int i;
 
+	for (i = 1; i < max_ports; i++)
+		mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
+	/* Make sure all scheduled events are processed */
+	__mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
+
 	devl_lock(devlink);
-	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
+	for (i = 1; i < max_ports; i++)
 		if (mlxsw_sp_port_created(mlxsw_sp, i))
 			mlxsw_sp_port_remove(mlxsw_sp, i);
 	mlxsw_sp_cpu_port_remove(mlxsw_sp);
@@ -1880,6 +1998,7 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
 {
 	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+	struct mlxsw_sp_port_mapping_events *events;
 	struct mlxsw_sp_port_mapping *port_mapping;
 	size_t alloc_size;
 	int i;
@@ -1890,6 +2009,17 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
 	if (!mlxsw_sp->ports)
 		return -ENOMEM;
 
+	events = &mlxsw_sp->port_mapping_events;
+	INIT_LIST_HEAD(&events->queue);
+	spin_lock_init(&events->queue_lock);
+	INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work);
+
+	for (i = 1; i < max_ports; i++) {
+		err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true);
+		if (err)
+			goto err_event_enable;
+	}
+
 	devl_lock(devlink);
 	err = mlxsw_sp_cpu_port_create(mlxsw_sp);
 	if (err)
@@ -1910,9 +2040,15 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
 	for (i--; i >= 1; i--)
 		if (mlxsw_sp_port_created(mlxsw_sp, i))
 			mlxsw_sp_port_remove(mlxsw_sp, i);
+	i = max_ports;
 	mlxsw_sp_cpu_port_remove(mlxsw_sp);
 err_cpu_port_create:
 	devl_unlock(devlink);
+err_event_enable:
+	for (i--; i >= 1; i--)
+		mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
+	/* Make sure all scheduled events are processed */
+	__mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
 	kfree(mlxsw_sp->ports);
 	mlxsw_sp->ports = NULL;
 	return err;
@@ -2074,6 +2210,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
 
 err_port_split_create:
 	mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
+
 	return err;
 }
 
@@ -2294,6 +2431,11 @@ static const struct mlxsw_listener mlxsw_sp1_listener[] = {
 	MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
 };
 
+static const struct mlxsw_listener mlxsw_sp2_listener[] = {
+	/* Events */
+	MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE),
+};
+
 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
 {
 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
@@ -3085,6 +3227,8 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
 	mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
 	mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
 	mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
+	mlxsw_sp->listeners = mlxsw_sp2_listener;
+	mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
 	mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
 
 	return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
@@ -3115,6 +3259,8 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
 	mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
 	mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
 	mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
+	mlxsw_sp->listeners = mlxsw_sp2_listener;
+	mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
 	mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
 
 	return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
@@ -3145,6 +3291,8 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
 	mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
 	mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
 	mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
+	mlxsw_sp->listeners = mlxsw_sp2_listener;
+	mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
 	mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
 
 	return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 68f71e77b5c7..928c3a63b6b6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -150,6 +150,12 @@ struct mlxsw_sp_port_mapping {
 	u8 lane;
 };
 
+struct mlxsw_sp_port_mapping_events {
+	struct list_head queue;
+	spinlock_t queue_lock; /* protects queue */
+	struct work_struct work;
+};
+
 struct mlxsw_sp_parsing {
 	refcount_t parsing_depth_ref;
 	u16 parsing_depth;
@@ -165,6 +171,7 @@ struct mlxsw_sp {
 	const unsigned char *mac_mask;
 	struct mlxsw_sp_upper *lags;
 	struct mlxsw_sp_port_mapping *port_mapping;
+	struct mlxsw_sp_port_mapping_events port_mapping_events;
 	struct rhashtable sample_trigger_ht;
 	struct mlxsw_sp_sb *sb;
 	struct mlxsw_sp_bridge *bridge;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 9e070ab3ed76..7405c400f09b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -133,6 +133,8 @@ enum mlxsw_event_trap_id {
 	MLXSW_TRAP_ID_PTP_ING_FIFO = 0x2D,
 	/* PTP Egress FIFO has a new entry */
 	MLXSW_TRAP_ID_PTP_EGR_FIFO = 0x2E,
+	/* Port mapping change */
+	MLXSW_TRAP_ID_PMLPE = 0x32E,
 };
 
 #endif /* _MLXSW_TRAP_H */
-- 
2.33.1


  parent reply	other threads:[~2022-04-18  6:44 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-18  6:42 [PATCH net-next 00/17] Introduce line card support for modular switch Ido Schimmel
2022-04-18  6:42 ` [PATCH net-next 01/17] devlink: add support to create line card and expose to user Ido Schimmel
2022-04-18  6:42 ` [PATCH net-next 02/17] devlink: implement line card provisioning Ido Schimmel
2022-04-18  6:42 ` [PATCH net-next 03/17] devlink: implement line card active state Ido Schimmel
2022-04-18  6:42 ` [PATCH net-next 04/17] devlink: add port to line card relationship set Ido Schimmel
2022-04-18  6:42 ` [PATCH net-next 05/17] mlxsw: spectrum: Allow lane to start from non-zero index Ido Schimmel
2022-04-18  6:42 ` [PATCH net-next 06/17] mlxsw: spectrum: Allocate port mapping array of structs instead of pointers Ido Schimmel
2022-04-18  6:42 ` [PATCH net-next 07/17] mlxsw: reg: Add Ports Mapping Event Configuration Register Ido Schimmel
2022-04-18  6:42 ` [PATCH net-next 08/17] mlxsw: Narrow the critical section of devl_lock during ports creation/removal Ido Schimmel
2022-04-18  6:42 ` Ido Schimmel [this message]
2022-04-18  6:42 ` [PATCH net-next 10/17] mlxsw: reg: Add Management DownStream Device Query Register Ido Schimmel
2022-04-18  6:42 ` [PATCH net-next 11/17] mlxsw: reg: Add Management DownStream Device Control Register Ido Schimmel
2022-04-18  6:42 ` [PATCH net-next 12/17] mlxsw: reg: Add Management Binary Code Transfer Register Ido Schimmel
2022-04-18  6:42 ` [PATCH net-next 13/17] mlxsw: core_linecards: Add line card objects and implement provisioning Ido Schimmel
2022-04-18  6:42 ` [PATCH net-next 14/17] mlxsw: core_linecards: Implement line card activation process Ido Schimmel
2022-04-18  6:42 ` [PATCH net-next 15/17] mlxsw: core: Extend driver ops by remove selected ports op Ido Schimmel
2022-04-18  6:42 ` [PATCH net-next 16/17] mlxsw: spectrum: Add port to linecard mapping Ido Schimmel
2022-04-18  6:42 ` [PATCH net-next 17/17] selftests: mlxsw: Introduce devlink line card provision/unprovision/activation tests Ido Schimmel
2022-04-18 10:10 ` [PATCH net-next 00/17] Introduce line card support for modular switch patchwork-bot+netdevbpf
2022-04-18 14:31 ` David Ahern
2022-04-19 11:55   ` Jiri Pirko
2022-04-19 12:58     ` Ido Schimmel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220418064241.2925668-10-idosch@nvidia.com \
    --to=idosch@nvidia.com \
    --cc=andrew@lunn.ch \
    --cc=davem@davemloft.net \
    --cc=dsahern@gmail.com \
    --cc=jiri@nvidia.com \
    --cc=kuba@kernel.org \
    --cc=mlxsw@nvidia.com \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=petrm@nvidia.com \
    --cc=vadimp@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.