linux-cxl.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Jonathan Cameron <Jonathan.Cameron@Huawei.com>
To: Ben Widawsky <ben.widawsky@intel.com>
Cc: <linux-cxl@vger.kernel.org>,
	Alison Schofield <alison.schofield@intel.com>,
	Dan Williams <dan.j.williams@intel.com>,
	"Ira Weiny" <ira.weiny@intel.com>,
	Vishal Verma <vishal.l.verma@intel.com>
Subject: Re: [PATCH 06/13] cxl/mem: Introduce cxl_mem driver
Date: Fri, 3 Sep 2021 15:52:49 +0100	[thread overview]
Message-ID: <20210903155249.00002b2e@Huawei.com> (raw)
In-Reply-To: <20210902195017.2516472-7-ben.widawsky@intel.com>

On Thu, 2 Sep 2021 12:50:10 -0700
Ben Widawsky <ben.widawsky@intel.com> wrote:

> CXL endpoints that participate in the CXL.mem protocol require extra
> control to ensure architectural constraints are met for device
> management. The most straight-forward way to achieve control of these
> endpoints is with a new driver that can bind to such devices. This
> driver will also be responsible for enumerating the switches that
> connect the endpoint to the hostbridge.
> 
> cxl_core already understands the concept of a memdev, but the core [by
> design] does not comprehend all the topological constraints.
> 
> Signed-off-by: Ben Widawsky <ben.widawsky@intel.com>

Looks fairly standard... FWIW

Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>

> ---
>  .../driver-api/cxl/memory-devices.rst         |  3 ++
>  drivers/cxl/Makefile                          |  3 +-
>  drivers/cxl/core/bus.c                        |  2 +
>  drivers/cxl/core/core.h                       |  1 +
>  drivers/cxl/core/memdev.c                     |  2 +-
>  drivers/cxl/cxl.h                             |  1 +
>  drivers/cxl/mem.c                             | 49 +++++++++++++++++++
>  7 files changed, 59 insertions(+), 2 deletions(-)
>  create mode 100644 drivers/cxl/mem.c
> 
> diff --git a/Documentation/driver-api/cxl/memory-devices.rst b/Documentation/driver-api/cxl/memory-devices.rst
> index a18175bae7a6..00d141071570 100644
> --- a/Documentation/driver-api/cxl/memory-devices.rst
> +++ b/Documentation/driver-api/cxl/memory-devices.rst
> @@ -28,6 +28,9 @@ CXL Memory Device
>  .. kernel-doc:: drivers/cxl/pci.c
>     :internal:
>  
> +.. kernel-doc:: drivers/cxl/mem.c
> +   :doc: cxl mem
> +
>  CXL Core
>  --------
>  .. kernel-doc:: drivers/cxl/cxl.h
> diff --git a/drivers/cxl/Makefile b/drivers/cxl/Makefile
> index d1aaabc940f3..d912ac4e3f0c 100644
> --- a/drivers/cxl/Makefile
> +++ b/drivers/cxl/Makefile
> @@ -1,9 +1,10 @@
>  # SPDX-License-Identifier: GPL-2.0
>  obj-$(CONFIG_CXL_BUS) += core/
> -obj-$(CONFIG_CXL_MEM) += cxl_pci.o
> +obj-$(CONFIG_CXL_MEM) += cxl_mem.o cxl_pci.o
>  obj-$(CONFIG_CXL_ACPI) += cxl_acpi.o
>  obj-$(CONFIG_CXL_PMEM) += cxl_pmem.o
>  
> +cxl_mem-y := mem.o
>  cxl_pci-y := pci.o
>  cxl_acpi-y := acpi.o
>  cxl_pmem-y := pmem.o
> diff --git a/drivers/cxl/core/bus.c b/drivers/cxl/core/bus.c
> index 6202ce5a5ac2..256e55dc2a3b 100644
> --- a/drivers/cxl/core/bus.c
> +++ b/drivers/cxl/core/bus.c
> @@ -641,6 +641,8 @@ static int cxl_device_id(struct device *dev)
>  		return CXL_DEVICE_NVDIMM_BRIDGE;
>  	if (dev->type == &cxl_nvdimm_type)
>  		return CXL_DEVICE_NVDIMM;
> +	if (dev->type == &cxl_memdev_type)
> +		return CXL_DEVICE_ENDPOINT;
>  	return 0;
>  }
>  
> diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
> index e0c9aacc4e9c..dea246cb7c58 100644
> --- a/drivers/cxl/core/core.h
> +++ b/drivers/cxl/core/core.h
> @@ -6,6 +6,7 @@
>  
>  extern const struct device_type cxl_nvdimm_bridge_type;
>  extern const struct device_type cxl_nvdimm_type;
> +extern const struct device_type cxl_memdev_type;
>  
>  extern struct attribute_group cxl_base_attribute_group;
>  
> diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
> index ee61202c7aab..c9dd054bd813 100644
> --- a/drivers/cxl/core/memdev.c
> +++ b/drivers/cxl/core/memdev.c
> @@ -127,7 +127,7 @@ static const struct attribute_group *cxl_memdev_attribute_groups[] = {
>  	NULL,
>  };
>  
> -static const struct device_type cxl_memdev_type = {
> +const struct device_type cxl_memdev_type = {
>  	.name = "cxl_memdev",
>  	.release = cxl_memdev_release,
>  	.devnode = cxl_memdev_devnode,
> diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
> index 708bfe92b596..b48bdbefd949 100644
> --- a/drivers/cxl/cxl.h
> +++ b/drivers/cxl/cxl.h
> @@ -315,6 +315,7 @@ void cxl_driver_unregister(struct cxl_driver *cxl_drv);
>  
>  #define CXL_DEVICE_NVDIMM_BRIDGE	1
>  #define CXL_DEVICE_NVDIMM		2
> +#define CXL_DEVICE_ENDPOINT		3
>  
>  #define MODULE_ALIAS_CXL(type) MODULE_ALIAS("cxl:t" __stringify(type) "*")
>  #define CXL_MODALIAS_FMT "cxl:t%d"
> diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
> new file mode 100644
> index 000000000000..978a54b0a51a
> --- /dev/null
> +++ b/drivers/cxl/mem.c
> @@ -0,0 +1,49 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/* Copyright(c) 2021 Intel Corporation. All rights reserved. */
> +#include <linux/device.h>
> +#include <linux/module.h>
> +
> +#include "cxlmem.h"
> +
> +/**
> + * DOC: cxl mem
> + *
> + * CXL memory endpoint devices and switches are CXL capable devices that are
> + * participating in CXL.mem protocol. Their functionality builds on top of the
> + * CXL.io protocol that allows enumerating and configuring components via
> + * standard PCI mechanisms.
> + *
> + * The cxl_mem driver implements enumeration and control over these CXL
> + * components.
> + */
> +
> +static int cxl_mem_probe(struct device *dev)
> +{
> +	return -EOPNOTSUPP;
> +}
> +
> +static void cxl_mem_remove(struct device *dev)
> +{
> +}
> +
> +static struct cxl_driver cxl_mem_driver = {
> +	.name = "cxl_mem",
> +	.probe = cxl_mem_probe,
> +	.remove = cxl_mem_remove,
> +	.id = CXL_DEVICE_ENDPOINT,
> +};
> +
> +static __init int cxl_mem_init(void)
> +{
> +	return cxl_driver_register(&cxl_mem_driver);
> +}
> +
> +static __exit void cxl_mem_exit(void)
> +{
> +	cxl_driver_unregister(&cxl_mem_driver);
> +}
> +
> +MODULE_LICENSE("GPL v2");
> +module_init(cxl_mem_init);
> +module_exit(cxl_mem_exit);
> +MODULE_IMPORT_NS(CXL);


  reply	other threads:[~2021-09-03 14:52 UTC|newest]

Thread overview: 60+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-02 19:50 [PATCH 00/13] Enumerate midlevel and endpoint decoders Ben Widawsky
2021-09-02 19:50 ` [PATCH 01/13] Documentation/cxl: Add bus internal docs Ben Widawsky
2021-09-03 14:05   ` Jonathan Cameron
2021-09-10 18:20     ` Dan Williams
2021-09-02 19:50 ` [PATCH 02/13] cxl/core/bus: Add kernel docs for decoder ops Ben Widawsky
2021-09-03 14:17   ` Jonathan Cameron
2021-09-10 18:51   ` Dan Williams
2021-09-11 17:25     ` Ben Widawsky
2021-09-02 19:50 ` [PATCH 03/13] cxl/core: Ignore interleave when adding decoders Ben Widawsky
2021-09-03 14:25   ` Jonathan Cameron
2021-09-10 19:00     ` Dan Williams
2021-09-11 17:30       ` Ben Widawsky
2021-09-02 19:50 ` [PATCH 04/13] cxl: Introduce endpoint decoders Ben Widawsky
2021-09-03 14:35   ` Jonathan Cameron
2021-09-13 16:19     ` Ben Widawsky
2021-09-10 19:19   ` Dan Williams
2021-09-13 16:11     ` Ben Widawsky
2021-09-13 22:07       ` Dan Williams
2021-09-13 23:19         ` Ben Widawsky
2021-09-14 21:16           ` Dan Williams
2021-09-02 19:50 ` [PATCH 05/13] cxl/pci: Disambiguate cxl_pci further from cxl_mem Ben Widawsky
2021-09-03 14:45   ` Jonathan Cameron
2021-09-10 19:27   ` Dan Williams
2021-09-02 19:50 ` [PATCH 06/13] cxl/mem: Introduce cxl_mem driver Ben Widawsky
2021-09-03 14:52   ` Jonathan Cameron [this message]
2021-09-10 21:32   ` Dan Williams
2021-09-13 16:46     ` Ben Widawsky
2021-09-13 19:37       ` Dan Williams
2021-09-02 19:50 ` [PATCH 07/13] cxl/memdev: Determine CXL.mem capability Ben Widawsky
2021-09-03 15:21   ` Jonathan Cameron
2021-09-13 19:01     ` Ben Widawsky
2021-09-10 21:59   ` Dan Williams
2021-09-13 22:10     ` Ben Widawsky
2021-09-14 22:42       ` Dan Williams
2021-09-14 22:55         ` Ben Widawsky
2021-09-02 19:50 ` [PATCH 08/13] cxl/mem: Add memdev as a port Ben Widawsky
2021-09-03 15:31   ` Jonathan Cameron
2021-09-10 23:09   ` Dan Williams
2021-09-02 19:50 ` [PATCH 09/13] cxl/pci: Retain map information in cxl_mem_probe Ben Widawsky
2021-09-10 23:12   ` Dan Williams
2021-09-10 23:45     ` Dan Williams
2021-09-02 19:50 ` [PATCH 10/13] cxl/core: Map component registers for ports Ben Widawsky
2021-09-02 22:41   ` Ben Widawsky
2021-09-02 22:42     ` Ben Widawsky
2021-09-03 16:14   ` Jonathan Cameron
2021-09-10 23:52     ` Dan Williams
2021-09-13  8:29       ` Jonathan Cameron
2021-09-10 23:44   ` Dan Williams
2021-09-02 19:50 ` [PATCH 11/13] cxl/core: Convert decoder range to resource Ben Widawsky
2021-09-03 16:16   ` Jonathan Cameron
2021-09-11  0:59   ` Dan Williams
2021-09-02 19:50 ` [PATCH 12/13] cxl/core/bus: Enumerate all HDM decoders Ben Widawsky
2021-09-03 17:43   ` Jonathan Cameron
2021-09-11  1:37     ` Dan Williams
2021-09-11  1:13   ` Dan Williams
2021-09-02 19:50 ` [PATCH 13/13] cxl/mem: Enumerate switch decoders Ben Widawsky
2021-09-03 17:56   ` Jonathan Cameron
2021-09-13 22:12     ` Ben Widawsky
2021-09-14 23:31   ` Dan Williams
2021-09-10 18:15 ` [PATCH 00/13] Enumerate midlevel and endpoint decoders Dan Williams

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210903155249.00002b2e@Huawei.com \
    --to=jonathan.cameron@huawei.com \
    --cc=alison.schofield@intel.com \
    --cc=ben.widawsky@intel.com \
    --cc=dan.j.williams@intel.com \
    --cc=ira.weiny@intel.com \
    --cc=linux-cxl@vger.kernel.org \
    --cc=vishal.l.verma@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).