From mboxrd@z Thu Jan 1 00:00:00 1970 From: Jean-Philippe Brucker Subject: [RFCv2 PATCH 28/36] iommu/arm-smmu-v3: Maintain a SID->device structure Date: Fri, 6 Oct 2017 14:31:55 +0100 Message-ID: <20171006133203.22803-29-jean-philippe.brucker@arm.com> References: <20171006133203.22803-1-jean-philippe.brucker@arm.com> Return-path: In-Reply-To: <20171006133203.22803-1-jean-philippe.brucker-5wv7dgnIgG8@public.gmane.org> Sender: devicetree-owner-u79uwXL29TY76Z2rM5mHXA@public.gmane.org To: linux-arm-kernel-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r@public.gmane.org, linux-pci-u79uwXL29TY76Z2rM5mHXA@public.gmane.org, linux-acpi-u79uwXL29TY76Z2rM5mHXA@public.gmane.org, devicetree-u79uwXL29TY76Z2rM5mHXA@public.gmane.org, iommu-cunTk1MwBs9QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org Cc: joro-zLv9SwRftAIdnm+yROfE0A@public.gmane.org, robh+dt-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org, mark.rutland-5wv7dgnIgG8@public.gmane.org, catalin.marinas-5wv7dgnIgG8@public.gmane.org, will.deacon-5wv7dgnIgG8@public.gmane.org, lorenzo.pieralisi-5wv7dgnIgG8@public.gmane.org, hanjun.guo-QSEj5FYQhm4dnm+yROfE0A@public.gmane.org, sudeep.holla-5wv7dgnIgG8@public.gmane.org, rjw-LthD3rsA81gm4RdzfppkhA@public.gmane.org, lenb-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org, robin.murphy-5wv7dgnIgG8@public.gmane.org, bhelgaas-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org, alex.williamson-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org, tn-nYOzD4b6Jr9Wk0Htik3J/w@public.gmane.org, liubo95-hv44wF8Li93QT0dZR+AlfA@public.gmane.org, thunder.leizhen-hv44wF8Li93QT0dZR+AlfA@public.gmane.org, xieyisheng1-hv44wF8Li93QT0dZR+AlfA@public.gmane.org, gabriele.paoloni-hv44wF8Li93QT0dZR+AlfA@public.gmane.org, nwatters-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org, okaya-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org, rfranz-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org, dwmw2-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org, jacob.jun.pan-VuQAYsv1563Yd54FQh9/CA@public.gmane.org, yi.l.liu-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org, ashok.raj-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org, robdclark-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org List-Id: linux-acpi@vger.kernel.org When handling faults from the event or PRI queue, we need to find the struct device associated to a SID. Add a rb_tree to keep track of SIDs. Signed-off-by: Jean-Philippe Brucker --- drivers/iommu/arm-smmu-v3.c | 104 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 104 insertions(+) diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 7d68c6aecb14..4e915e649643 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -721,10 +721,19 @@ struct arm_smmu_device { /* IOMMU core code handle */ struct iommu_device iommu; + struct rb_root streams; + struct mutex streams_mutex; + /* Notifier for the fault queue */ struct notifier_block faultq_nb; }; +struct arm_smmu_stream { + u32 id; + struct arm_smmu_master_data *master; + struct rb_node node; +}; + /* SMMU private data for each master */ struct arm_smmu_master_data { struct arm_smmu_device *smmu; @@ -732,6 +741,7 @@ struct arm_smmu_master_data { struct arm_smmu_domain *domain; struct list_head list; /* domain->devices */ + struct arm_smmu_stream *streams; struct device *dev; @@ -1571,6 +1581,31 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) return 0; } +static struct arm_smmu_master_data * +arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid) +{ + struct rb_node *node; + struct arm_smmu_stream *stream; + struct arm_smmu_master_data *master = NULL; + + mutex_lock(&smmu->streams_mutex); + node = smmu->streams.rb_node; + while (node) { + stream = rb_entry(node, struct arm_smmu_stream, node); + if (stream->id < sid) { + node = node->rb_right; + } else if (stream->id > sid) { + node = node->rb_left; + } else { + master = stream->master; + break; + } + } + mutex_unlock(&smmu->streams_mutex); + + return master; +} + /* IRQ and event handlers */ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev) { @@ -2555,6 +2590,71 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid) return sid < limit; } +static int arm_smmu_insert_master(struct arm_smmu_device *smmu, + struct arm_smmu_master_data *master) +{ + int i; + int ret = 0; + struct arm_smmu_stream *new_stream, *cur_stream; + struct rb_node **new_node, *parent_node = NULL; + struct iommu_fwspec *fwspec = master->dev->iommu_fwspec; + + master->streams = kcalloc(fwspec->num_ids, + sizeof(struct arm_smmu_stream), GFP_KERNEL); + if (!master->streams) + return -ENOMEM; + + mutex_lock(&smmu->streams_mutex); + for (i = 0; i < fwspec->num_ids && !ret; i++) { + new_stream = &master->streams[i]; + new_stream->id = fwspec->ids[i]; + new_stream->master = master; + + new_node = &(smmu->streams.rb_node); + while (*new_node) { + cur_stream = rb_entry(*new_node, struct arm_smmu_stream, + node); + parent_node = *new_node; + if (cur_stream->id > new_stream->id) { + new_node = &((*new_node)->rb_left); + } else if (cur_stream->id < new_stream->id) { + new_node = &((*new_node)->rb_right); + } else { + dev_warn(master->dev, + "stream %u already in tree\n", + cur_stream->id); + ret = -EINVAL; + break; + } + } + + if (!ret) { + rb_link_node(&new_stream->node, parent_node, new_node); + rb_insert_color(&new_stream->node, &smmu->streams); + } + } + mutex_unlock(&smmu->streams_mutex); + + return ret; +} + +static void arm_smmu_remove_master(struct arm_smmu_device *smmu, + struct arm_smmu_master_data *master) +{ + int i; + struct iommu_fwspec *fwspec = master->dev->iommu_fwspec; + + if (!master->streams) + return; + + mutex_lock(&smmu->streams_mutex); + for (i = 0; i < fwspec->num_ids; i++) + rb_erase(&master->streams[i].node, &smmu->streams); + mutex_unlock(&smmu->streams_mutex); + + kfree(master->streams); +} + static struct iommu_ops arm_smmu_ops; static int arm_smmu_add_device(struct device *dev) @@ -2609,6 +2709,7 @@ static int arm_smmu_add_device(struct device *dev) group = iommu_group_get_for_dev(dev); if (!IS_ERR(group)) { + arm_smmu_insert_master(smmu, master); iommu_group_put(group); iommu_device_link(&smmu->iommu, dev); } @@ -2629,6 +2730,7 @@ static void arm_smmu_remove_device(struct device *dev) smmu = master->smmu; if (master && master->ste.assigned) arm_smmu_detach_dev(dev); + arm_smmu_remove_master(smmu, master); iommu_group_remove_device(dev); iommu_device_unlink(&smmu->iommu, dev); kfree(master); @@ -2936,6 +3038,8 @@ static int arm_smmu_init_structures(struct arm_smmu_device *smmu) spin_lock_init(&smmu->asid_lock); idr_init(&smmu->asid_idr); + mutex_init(&smmu->streams_mutex); + smmu->streams = RB_ROOT; ret = arm_smmu_init_queues(smmu); if (ret) -- 2.13.3 -- To unsubscribe from this list: send the line "unsubscribe devicetree" in the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org More majordomo info at http://vger.kernel.org/majordomo-info.html From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: From: Jean-Philippe Brucker To: linux-arm-kernel@lists.infradead.org, linux-pci@vger.kernel.org, linux-acpi@vger.kernel.org, devicetree@vger.kernel.org, iommu@lists.linux-foundation.org Cc: joro@8bytes.org, robh+dt@kernel.org, mark.rutland@arm.com, catalin.marinas@arm.com, will.deacon@arm.com, lorenzo.pieralisi@arm.com, hanjun.guo@linaro.org, sudeep.holla@arm.com, rjw@rjwysocki.net, lenb@kernel.org, robin.murphy@arm.com, bhelgaas@google.com, alex.williamson@redhat.com, tn@semihalf.com, liubo95@huawei.com, thunder.leizhen@huawei.com, xieyisheng1@huawei.com, gabriele.paoloni@huawei.com, nwatters@codeaurora.org, okaya@codeaurora.org, rfranz@cavium.com, dwmw2@infradead.org, jacob.jun.pan@linux.intel.com, yi.l.liu@intel.com, ashok.raj@intel.com, robdclark@gmail.com Subject: [RFCv2 PATCH 28/36] iommu/arm-smmu-v3: Maintain a SID->device structure Date: Fri, 6 Oct 2017 14:31:55 +0100 Message-Id: <20171006133203.22803-29-jean-philippe.brucker@arm.com> In-Reply-To: <20171006133203.22803-1-jean-philippe.brucker@arm.com> References: <20171006133203.22803-1-jean-philippe.brucker@arm.com> Sender: linux-acpi-owner@vger.kernel.org List-ID: When handling faults from the event or PRI queue, we need to find the struct device associated to a SID. Add a rb_tree to keep track of SIDs. Signed-off-by: Jean-Philippe Brucker --- drivers/iommu/arm-smmu-v3.c | 104 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 104 insertions(+) diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 7d68c6aecb14..4e915e649643 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -721,10 +721,19 @@ struct arm_smmu_device { /* IOMMU core code handle */ struct iommu_device iommu; + struct rb_root streams; + struct mutex streams_mutex; + /* Notifier for the fault queue */ struct notifier_block faultq_nb; }; +struct arm_smmu_stream { + u32 id; + struct arm_smmu_master_data *master; + struct rb_node node; +}; + /* SMMU private data for each master */ struct arm_smmu_master_data { struct arm_smmu_device *smmu; @@ -732,6 +741,7 @@ struct arm_smmu_master_data { struct arm_smmu_domain *domain; struct list_head list; /* domain->devices */ + struct arm_smmu_stream *streams; struct device *dev; @@ -1571,6 +1581,31 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) return 0; } +static struct arm_smmu_master_data * +arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid) +{ + struct rb_node *node; + struct arm_smmu_stream *stream; + struct arm_smmu_master_data *master = NULL; + + mutex_lock(&smmu->streams_mutex); + node = smmu->streams.rb_node; + while (node) { + stream = rb_entry(node, struct arm_smmu_stream, node); + if (stream->id < sid) { + node = node->rb_right; + } else if (stream->id > sid) { + node = node->rb_left; + } else { + master = stream->master; + break; + } + } + mutex_unlock(&smmu->streams_mutex); + + return master; +} + /* IRQ and event handlers */ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev) { @@ -2555,6 +2590,71 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid) return sid < limit; } +static int arm_smmu_insert_master(struct arm_smmu_device *smmu, + struct arm_smmu_master_data *master) +{ + int i; + int ret = 0; + struct arm_smmu_stream *new_stream, *cur_stream; + struct rb_node **new_node, *parent_node = NULL; + struct iommu_fwspec *fwspec = master->dev->iommu_fwspec; + + master->streams = kcalloc(fwspec->num_ids, + sizeof(struct arm_smmu_stream), GFP_KERNEL); + if (!master->streams) + return -ENOMEM; + + mutex_lock(&smmu->streams_mutex); + for (i = 0; i < fwspec->num_ids && !ret; i++) { + new_stream = &master->streams[i]; + new_stream->id = fwspec->ids[i]; + new_stream->master = master; + + new_node = &(smmu->streams.rb_node); + while (*new_node) { + cur_stream = rb_entry(*new_node, struct arm_smmu_stream, + node); + parent_node = *new_node; + if (cur_stream->id > new_stream->id) { + new_node = &((*new_node)->rb_left); + } else if (cur_stream->id < new_stream->id) { + new_node = &((*new_node)->rb_right); + } else { + dev_warn(master->dev, + "stream %u already in tree\n", + cur_stream->id); + ret = -EINVAL; + break; + } + } + + if (!ret) { + rb_link_node(&new_stream->node, parent_node, new_node); + rb_insert_color(&new_stream->node, &smmu->streams); + } + } + mutex_unlock(&smmu->streams_mutex); + + return ret; +} + +static void arm_smmu_remove_master(struct arm_smmu_device *smmu, + struct arm_smmu_master_data *master) +{ + int i; + struct iommu_fwspec *fwspec = master->dev->iommu_fwspec; + + if (!master->streams) + return; + + mutex_lock(&smmu->streams_mutex); + for (i = 0; i < fwspec->num_ids; i++) + rb_erase(&master->streams[i].node, &smmu->streams); + mutex_unlock(&smmu->streams_mutex); + + kfree(master->streams); +} + static struct iommu_ops arm_smmu_ops; static int arm_smmu_add_device(struct device *dev) @@ -2609,6 +2709,7 @@ static int arm_smmu_add_device(struct device *dev) group = iommu_group_get_for_dev(dev); if (!IS_ERR(group)) { + arm_smmu_insert_master(smmu, master); iommu_group_put(group); iommu_device_link(&smmu->iommu, dev); } @@ -2629,6 +2730,7 @@ static void arm_smmu_remove_device(struct device *dev) smmu = master->smmu; if (master && master->ste.assigned) arm_smmu_detach_dev(dev); + arm_smmu_remove_master(smmu, master); iommu_group_remove_device(dev); iommu_device_unlink(&smmu->iommu, dev); kfree(master); @@ -2936,6 +3038,8 @@ static int arm_smmu_init_structures(struct arm_smmu_device *smmu) spin_lock_init(&smmu->asid_lock); idr_init(&smmu->asid_idr); + mutex_init(&smmu->streams_mutex); + smmu->streams = RB_ROOT; ret = arm_smmu_init_queues(smmu); if (ret) -- 2.13.3 From mboxrd@z Thu Jan 1 00:00:00 1970 From: jean-philippe.brucker@arm.com (Jean-Philippe Brucker) Date: Fri, 6 Oct 2017 14:31:55 +0100 Subject: [RFCv2 PATCH 28/36] iommu/arm-smmu-v3: Maintain a SID->device structure In-Reply-To: <20171006133203.22803-1-jean-philippe.brucker@arm.com> References: <20171006133203.22803-1-jean-philippe.brucker@arm.com> Message-ID: <20171006133203.22803-29-jean-philippe.brucker@arm.com> To: linux-arm-kernel@lists.infradead.org List-Id: linux-arm-kernel.lists.infradead.org When handling faults from the event or PRI queue, we need to find the struct device associated to a SID. Add a rb_tree to keep track of SIDs. Signed-off-by: Jean-Philippe Brucker --- drivers/iommu/arm-smmu-v3.c | 104 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 104 insertions(+) diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 7d68c6aecb14..4e915e649643 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -721,10 +721,19 @@ struct arm_smmu_device { /* IOMMU core code handle */ struct iommu_device iommu; + struct rb_root streams; + struct mutex streams_mutex; + /* Notifier for the fault queue */ struct notifier_block faultq_nb; }; +struct arm_smmu_stream { + u32 id; + struct arm_smmu_master_data *master; + struct rb_node node; +}; + /* SMMU private data for each master */ struct arm_smmu_master_data { struct arm_smmu_device *smmu; @@ -732,6 +741,7 @@ struct arm_smmu_master_data { struct arm_smmu_domain *domain; struct list_head list; /* domain->devices */ + struct arm_smmu_stream *streams; struct device *dev; @@ -1571,6 +1581,31 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) return 0; } +static struct arm_smmu_master_data * +arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid) +{ + struct rb_node *node; + struct arm_smmu_stream *stream; + struct arm_smmu_master_data *master = NULL; + + mutex_lock(&smmu->streams_mutex); + node = smmu->streams.rb_node; + while (node) { + stream = rb_entry(node, struct arm_smmu_stream, node); + if (stream->id < sid) { + node = node->rb_right; + } else if (stream->id > sid) { + node = node->rb_left; + } else { + master = stream->master; + break; + } + } + mutex_unlock(&smmu->streams_mutex); + + return master; +} + /* IRQ and event handlers */ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev) { @@ -2555,6 +2590,71 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid) return sid < limit; } +static int arm_smmu_insert_master(struct arm_smmu_device *smmu, + struct arm_smmu_master_data *master) +{ + int i; + int ret = 0; + struct arm_smmu_stream *new_stream, *cur_stream; + struct rb_node **new_node, *parent_node = NULL; + struct iommu_fwspec *fwspec = master->dev->iommu_fwspec; + + master->streams = kcalloc(fwspec->num_ids, + sizeof(struct arm_smmu_stream), GFP_KERNEL); + if (!master->streams) + return -ENOMEM; + + mutex_lock(&smmu->streams_mutex); + for (i = 0; i < fwspec->num_ids && !ret; i++) { + new_stream = &master->streams[i]; + new_stream->id = fwspec->ids[i]; + new_stream->master = master; + + new_node = &(smmu->streams.rb_node); + while (*new_node) { + cur_stream = rb_entry(*new_node, struct arm_smmu_stream, + node); + parent_node = *new_node; + if (cur_stream->id > new_stream->id) { + new_node = &((*new_node)->rb_left); + } else if (cur_stream->id < new_stream->id) { + new_node = &((*new_node)->rb_right); + } else { + dev_warn(master->dev, + "stream %u already in tree\n", + cur_stream->id); + ret = -EINVAL; + break; + } + } + + if (!ret) { + rb_link_node(&new_stream->node, parent_node, new_node); + rb_insert_color(&new_stream->node, &smmu->streams); + } + } + mutex_unlock(&smmu->streams_mutex); + + return ret; +} + +static void arm_smmu_remove_master(struct arm_smmu_device *smmu, + struct arm_smmu_master_data *master) +{ + int i; + struct iommu_fwspec *fwspec = master->dev->iommu_fwspec; + + if (!master->streams) + return; + + mutex_lock(&smmu->streams_mutex); + for (i = 0; i < fwspec->num_ids; i++) + rb_erase(&master->streams[i].node, &smmu->streams); + mutex_unlock(&smmu->streams_mutex); + + kfree(master->streams); +} + static struct iommu_ops arm_smmu_ops; static int arm_smmu_add_device(struct device *dev) @@ -2609,6 +2709,7 @@ static int arm_smmu_add_device(struct device *dev) group = iommu_group_get_for_dev(dev); if (!IS_ERR(group)) { + arm_smmu_insert_master(smmu, master); iommu_group_put(group); iommu_device_link(&smmu->iommu, dev); } @@ -2629,6 +2730,7 @@ static void arm_smmu_remove_device(struct device *dev) smmu = master->smmu; if (master && master->ste.assigned) arm_smmu_detach_dev(dev); + arm_smmu_remove_master(smmu, master); iommu_group_remove_device(dev); iommu_device_unlink(&smmu->iommu, dev); kfree(master); @@ -2936,6 +3038,8 @@ static int arm_smmu_init_structures(struct arm_smmu_device *smmu) spin_lock_init(&smmu->asid_lock); idr_init(&smmu->asid_idr); + mutex_init(&smmu->streams_mutex); + smmu->streams = RB_ROOT; ret = arm_smmu_init_queues(smmu); if (ret) -- 2.13.3