From: Robin Murphy <robin.murphy@arm.com> To: will.deacon@arm.com, joro@8bytes.org Cc: salil.mehta@huawei.com, sunil.goutham@cavium.com, iommu@lists.linux-foundation.org, ray.jui@broadcom.com, linux-arm-kernel@lists.infradead.org, linu.cherian@cavium.com, nwatters@codeaurora.org Subject: [PATCH 2/8] iommu/io-pgtable-arm: Improve split_blk_unmap Date: Thu, 8 Jun 2017 12:52:01 +0100 [thread overview] Message-ID: <ea6da7c87d1d4cba570dd00d1de12a1cf489aecf.1496921366.git.robin.murphy@arm.com> (raw) In-Reply-To: <cover.1496921366.git.robin.murphy@arm.com> The current split_blk_unmap implementation suffers from some inscrutable pointer trickery for creating the tables to replace the block entry, but more than that it also suffers from hideous inefficiency. For example, the most pathological case of unmapping a level 3 page from a level 1 block will allocate 513 lower-level tables to remap the entire block at page granularity, when only 2 are actually needed (the rest can be covered by level 2 block entries). Also, we would like to be able to relax the spinlock requirement in future, for which the roll-back-and-try-again logic for race resolution would be pretty hideous under the current paradigm. Both issues can be resolved most neatly by turning things sideways: instead of repeatedly recursing into __arm_lpae_map() map to build up an entire new sub-table depth-first, we can directly replace the block entry with a next-level table of block/page entries, then repeat by unmapping at the next level if necessary. With a little refactoring of some helper functions, the code ends up not much bigger than before, but considerably easier to follow and to adapt in future. Signed-off-by: Robin Murphy <robin.murphy@arm.com> --- drivers/iommu/io-pgtable-arm.c | 116 ++++++++++++++++++++++++----------------- 1 file changed, 68 insertions(+), 48 deletions(-) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 6e5df5e0a3bd..97d039952367 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -264,13 +264,32 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, unsigned long iova, size_t size, int lvl, arm_lpae_iopte *ptep); +static arm_lpae_iopte __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, + arm_lpae_iopte prot, int lvl, + phys_addr_t paddr) +{ + arm_lpae_iopte pte = prot; + + if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) + pte |= ARM_LPAE_PTE_NS; + + if (lvl == ARM_LPAE_MAX_LEVELS - 1) + pte |= ARM_LPAE_PTE_TYPE_PAGE; + else + pte |= ARM_LPAE_PTE_TYPE_BLOCK; + + pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS; + pte |= pfn_to_iopte(paddr >> data->pg_shift, data); + + return pte; +} + static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, unsigned long iova, phys_addr_t paddr, arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep) { - arm_lpae_iopte pte = prot; - struct io_pgtable_cfg *cfg = &data->iop.cfg; + arm_lpae_iopte pte; if (iopte_leaf(*ptep, lvl)) { /* We require an unmap first */ @@ -289,21 +308,25 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, return -EINVAL; } - if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) - pte |= ARM_LPAE_PTE_NS; - - if (lvl == ARM_LPAE_MAX_LEVELS - 1) - pte |= ARM_LPAE_PTE_TYPE_PAGE; - else - pte |= ARM_LPAE_PTE_TYPE_BLOCK; - - pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS; - pte |= pfn_to_iopte(paddr >> data->pg_shift, data); - - __arm_lpae_set_pte(ptep, pte, cfg); + pte = __arm_lpae_init_pte(data, prot, lvl, paddr); + __arm_lpae_set_pte(ptep, pte, &data->iop.cfg); return 0; } +static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, + arm_lpae_iopte *ptep, + struct io_pgtable_cfg *cfg) +{ + arm_lpae_iopte new; + + new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE; + if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) + new |= ARM_LPAE_PTE_NSTABLE; + + __arm_lpae_set_pte(ptep, new, cfg); + return new; +} + static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, phys_addr_t paddr, size_t size, arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep) @@ -331,10 +354,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, if (!cptep) return -ENOMEM; - pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE; - if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) - pte |= ARM_LPAE_PTE_NSTABLE; - __arm_lpae_set_pte(ptep, pte, cfg); + arm_lpae_install_table(cptep, ptep, cfg); } else if (!iopte_leaf(pte, lvl)) { cptep = iopte_deref(pte, data); } else { @@ -452,40 +472,42 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop) static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, unsigned long iova, size_t size, - arm_lpae_iopte prot, int lvl, - arm_lpae_iopte *ptep, size_t blk_size) + arm_lpae_iopte blk_pte, int lvl, + arm_lpae_iopte *ptep) { - unsigned long blk_start, blk_end; - phys_addr_t blk_paddr; - arm_lpae_iopte table = 0; + struct io_pgtable_cfg *cfg = &data->iop.cfg; + arm_lpae_iopte pte, *tablep; + size_t tablesz = ARM_LPAE_GRANULE(data); + size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data); + int i, unmap_idx = -1; - blk_start = iova & ~(blk_size - 1); - blk_end = blk_start + blk_size; - blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift; + if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) + return 0; - for (; blk_start < blk_end; blk_start += size, blk_paddr += size) { - arm_lpae_iopte *tablep; + tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg); + if (!tablep) + return 0; /* Bytes unmapped */ + if (size == split_sz) + unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data); + + pte = __arm_lpae_init_pte(data, iopte_prot(blk_pte), lvl, + iopte_to_pfn(blk_pte, data) << data->pg_shift); + + for (i = 0; i < tablesz / sizeof(pte); pte += split_sz, i++) { /* Unmap! */ - if (blk_start == iova) + if (i == unmap_idx) continue; - /* __arm_lpae_map expects a pointer to the start of the table */ - tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data); - if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl, - tablep) < 0) { - if (table) { - /* Free the table we allocated */ - tablep = iopte_deref(table, data); - __arm_lpae_free_pgtable(data, lvl + 1, tablep); - } - return 0; /* Bytes unmapped */ - } + __arm_lpae_set_pte(&tablep[i], pte, cfg); } - __arm_lpae_set_pte(ptep, table, &data->iop.cfg); - iova &= ~(blk_size - 1); - io_pgtable_tlb_add_flush(&data->iop, iova, blk_size, blk_size, true); + arm_lpae_install_table(tablep, ptep, cfg); + + if (unmap_idx < 0) + return __arm_lpae_unmap(data, iova, size, lvl, tablep); + + io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); return size; } @@ -495,7 +517,6 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, { arm_lpae_iopte pte; struct io_pgtable *iop = &data->iop; - size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data); /* Something went horribly wrong and we ran out of page table */ if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) @@ -507,7 +528,7 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, return 0; /* If the size matches this level, we're in the right place */ - if (size == blk_size) { + if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) { __arm_lpae_set_pte(ptep, 0, &iop->cfg); if (!iopte_leaf(pte, lvl)) { @@ -527,9 +548,8 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, * Insert a table at the next level to map the old region, * minus the part we want to unmap */ - return arm_lpae_split_blk_unmap(data, iova, size, - iopte_prot(pte), lvl, ptep, - blk_size); + return arm_lpae_split_blk_unmap(data, iova, size, pte, + lvl + 1, ptep); } /* Keep on walkin' */ -- 2.12.2.dirty
WARNING: multiple messages have this Message-ID (diff)
From: robin.murphy@arm.com (Robin Murphy) To: linux-arm-kernel@lists.infradead.org Subject: [PATCH 2/8] iommu/io-pgtable-arm: Improve split_blk_unmap Date: Thu, 8 Jun 2017 12:52:01 +0100 [thread overview] Message-ID: <ea6da7c87d1d4cba570dd00d1de12a1cf489aecf.1496921366.git.robin.murphy@arm.com> (raw) In-Reply-To: <cover.1496921366.git.robin.murphy@arm.com> The current split_blk_unmap implementation suffers from some inscrutable pointer trickery for creating the tables to replace the block entry, but more than that it also suffers from hideous inefficiency. For example, the most pathological case of unmapping a level 3 page from a level 1 block will allocate 513 lower-level tables to remap the entire block at page granularity, when only 2 are actually needed (the rest can be covered by level 2 block entries). Also, we would like to be able to relax the spinlock requirement in future, for which the roll-back-and-try-again logic for race resolution would be pretty hideous under the current paradigm. Both issues can be resolved most neatly by turning things sideways: instead of repeatedly recursing into __arm_lpae_map() map to build up an entire new sub-table depth-first, we can directly replace the block entry with a next-level table of block/page entries, then repeat by unmapping at the next level if necessary. With a little refactoring of some helper functions, the code ends up not much bigger than before, but considerably easier to follow and to adapt in future. Signed-off-by: Robin Murphy <robin.murphy@arm.com> --- drivers/iommu/io-pgtable-arm.c | 116 ++++++++++++++++++++++++----------------- 1 file changed, 68 insertions(+), 48 deletions(-) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 6e5df5e0a3bd..97d039952367 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -264,13 +264,32 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, unsigned long iova, size_t size, int lvl, arm_lpae_iopte *ptep); +static arm_lpae_iopte __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, + arm_lpae_iopte prot, int lvl, + phys_addr_t paddr) +{ + arm_lpae_iopte pte = prot; + + if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) + pte |= ARM_LPAE_PTE_NS; + + if (lvl == ARM_LPAE_MAX_LEVELS - 1) + pte |= ARM_LPAE_PTE_TYPE_PAGE; + else + pte |= ARM_LPAE_PTE_TYPE_BLOCK; + + pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS; + pte |= pfn_to_iopte(paddr >> data->pg_shift, data); + + return pte; +} + static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, unsigned long iova, phys_addr_t paddr, arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep) { - arm_lpae_iopte pte = prot; - struct io_pgtable_cfg *cfg = &data->iop.cfg; + arm_lpae_iopte pte; if (iopte_leaf(*ptep, lvl)) { /* We require an unmap first */ @@ -289,21 +308,25 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, return -EINVAL; } - if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) - pte |= ARM_LPAE_PTE_NS; - - if (lvl == ARM_LPAE_MAX_LEVELS - 1) - pte |= ARM_LPAE_PTE_TYPE_PAGE; - else - pte |= ARM_LPAE_PTE_TYPE_BLOCK; - - pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS; - pte |= pfn_to_iopte(paddr >> data->pg_shift, data); - - __arm_lpae_set_pte(ptep, pte, cfg); + pte = __arm_lpae_init_pte(data, prot, lvl, paddr); + __arm_lpae_set_pte(ptep, pte, &data->iop.cfg); return 0; } +static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, + arm_lpae_iopte *ptep, + struct io_pgtable_cfg *cfg) +{ + arm_lpae_iopte new; + + new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE; + if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) + new |= ARM_LPAE_PTE_NSTABLE; + + __arm_lpae_set_pte(ptep, new, cfg); + return new; +} + static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, phys_addr_t paddr, size_t size, arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep) @@ -331,10 +354,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, if (!cptep) return -ENOMEM; - pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE; - if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) - pte |= ARM_LPAE_PTE_NSTABLE; - __arm_lpae_set_pte(ptep, pte, cfg); + arm_lpae_install_table(cptep, ptep, cfg); } else if (!iopte_leaf(pte, lvl)) { cptep = iopte_deref(pte, data); } else { @@ -452,40 +472,42 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop) static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, unsigned long iova, size_t size, - arm_lpae_iopte prot, int lvl, - arm_lpae_iopte *ptep, size_t blk_size) + arm_lpae_iopte blk_pte, int lvl, + arm_lpae_iopte *ptep) { - unsigned long blk_start, blk_end; - phys_addr_t blk_paddr; - arm_lpae_iopte table = 0; + struct io_pgtable_cfg *cfg = &data->iop.cfg; + arm_lpae_iopte pte, *tablep; + size_t tablesz = ARM_LPAE_GRANULE(data); + size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data); + int i, unmap_idx = -1; - blk_start = iova & ~(blk_size - 1); - blk_end = blk_start + blk_size; - blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift; + if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) + return 0; - for (; blk_start < blk_end; blk_start += size, blk_paddr += size) { - arm_lpae_iopte *tablep; + tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg); + if (!tablep) + return 0; /* Bytes unmapped */ + if (size == split_sz) + unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data); + + pte = __arm_lpae_init_pte(data, iopte_prot(blk_pte), lvl, + iopte_to_pfn(blk_pte, data) << data->pg_shift); + + for (i = 0; i < tablesz / sizeof(pte); pte += split_sz, i++) { /* Unmap! */ - if (blk_start == iova) + if (i == unmap_idx) continue; - /* __arm_lpae_map expects a pointer to the start of the table */ - tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data); - if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl, - tablep) < 0) { - if (table) { - /* Free the table we allocated */ - tablep = iopte_deref(table, data); - __arm_lpae_free_pgtable(data, lvl + 1, tablep); - } - return 0; /* Bytes unmapped */ - } + __arm_lpae_set_pte(&tablep[i], pte, cfg); } - __arm_lpae_set_pte(ptep, table, &data->iop.cfg); - iova &= ~(blk_size - 1); - io_pgtable_tlb_add_flush(&data->iop, iova, blk_size, blk_size, true); + arm_lpae_install_table(tablep, ptep, cfg); + + if (unmap_idx < 0) + return __arm_lpae_unmap(data, iova, size, lvl, tablep); + + io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); return size; } @@ -495,7 +517,6 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, { arm_lpae_iopte pte; struct io_pgtable *iop = &data->iop; - size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data); /* Something went horribly wrong and we ran out of page table */ if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) @@ -507,7 +528,7 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, return 0; /* If the size matches this level, we're in the right place */ - if (size == blk_size) { + if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) { __arm_lpae_set_pte(ptep, 0, &iop->cfg); if (!iopte_leaf(pte, lvl)) { @@ -527,9 +548,8 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, * Insert a table at the next level to map the old region, * minus the part we want to unmap */ - return arm_lpae_split_blk_unmap(data, iova, size, - iopte_prot(pte), lvl, ptep, - blk_size); + return arm_lpae_split_blk_unmap(data, iova, size, pte, + lvl + 1, ptep); } /* Keep on walkin' */ -- 2.12.2.dirty
next prev parent reply other threads:[~2017-06-08 11:52 UTC|newest] Thread overview: 50+ messages / expand[flat|nested] mbox.gz Atom feed top 2017-06-08 11:51 [PATCH 0/8] io-pgtable lock removal Robin Murphy 2017-06-08 11:51 ` Robin Murphy 2017-06-08 11:52 ` [PATCH 1/8] iommu/io-pgtable-arm-v7s: Check table PTEs more precisely Robin Murphy 2017-06-08 11:52 ` Robin Murphy 2017-06-08 11:52 ` Robin Murphy [this message] 2017-06-08 11:52 ` [PATCH 2/8] iommu/io-pgtable-arm: Improve split_blk_unmap Robin Murphy 2017-06-08 11:52 ` [PATCH 3/8] iommu/io-pgtable-arm-v7s: Refactor split_blk_unmap Robin Murphy 2017-06-08 11:52 ` Robin Murphy 2017-06-08 11:52 ` [PATCH 4/8] iommu/io-pgtable: Introduce explicit coherency Robin Murphy 2017-06-08 11:52 ` Robin Murphy 2017-06-08 11:52 ` [PATCH 5/8] iommu/io-pgtable-arm: Support lockless operation Robin Murphy 2017-06-08 11:52 ` Robin Murphy 2017-06-08 11:52 ` [PATCH 6/8] iommu/io-pgtable-arm-v7s: " Robin Murphy 2017-06-08 11:52 ` Robin Murphy 2017-06-08 11:52 ` [PATCH 7/8] iommu/arm-smmu: Remove io-pgtable spinlock Robin Murphy 2017-06-08 11:52 ` Robin Murphy 2017-06-08 11:52 ` [PATCH 8/8] iommu/arm-smmu-v3: " Robin Murphy 2017-06-08 11:52 ` Robin Murphy [not found] ` <cover.1496921366.git.robin.murphy-5wv7dgnIgG8@public.gmane.org> 2017-06-09 19:28 ` [PATCH 0/8] io-pgtable lock removal Nate Watterson 2017-06-09 19:28 ` Nate Watterson [not found] ` <458ad41d-6679-eeca-3c0f-13ccb6c933b6-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org> 2017-06-15 0:40 ` Ray Jui via iommu 2017-06-15 0:40 ` Ray Jui [not found] ` <b7830be1-9a78-e29f-a29c-4798aaa28c0a-dY08KVG/lbpWk0Htik3J/w@public.gmane.org> 2017-06-15 12:25 ` John Garry 2017-06-15 12:25 ` John Garry 2017-06-20 13:37 ` Robin Murphy 2017-06-20 13:37 ` Robin Murphy [not found] ` <cdc1799b-f142-09ed-a7e5-d7fd2e70268f-5wv7dgnIgG8@public.gmane.org> 2017-06-27 16:43 ` Ray Jui via iommu 2017-06-27 16:43 ` Ray Jui [not found] ` <e43ba1fe-696e-fabb-a800-52fadaa2fa93-dY08KVG/lbpWk0Htik3J/w@public.gmane.org> 2017-06-28 11:46 ` Will Deacon 2017-06-28 11:46 ` Will Deacon [not found] ` <20170628114609.GD11053-5wv7dgnIgG8@public.gmane.org> 2017-06-28 17:02 ` Ray Jui via iommu 2017-06-28 17:02 ` Ray Jui [not found] ` <87d53115-3d80-5a3d-6632-c31986cb7018-dY08KVG/lbpWk0Htik3J/w@public.gmane.org> 2017-07-04 17:31 ` Will Deacon 2017-07-04 17:31 ` Will Deacon [not found] ` <20170704173155.GN22175-5wv7dgnIgG8@public.gmane.org> 2017-07-04 17:39 ` Ray Jui via iommu 2017-07-04 17:39 ` Ray Jui [not found] ` <6814b246-22f0-bfaa-5002-a269b2735116-dY08KVG/lbpWk0Htik3J/w@public.gmane.org> 2017-07-05 1:45 ` Ray Jui via iommu 2017-07-05 1:45 ` Ray Jui [not found] ` <2d5f5ef3-32b1-76c6-6869-ff980557f8e8-dY08KVG/lbpWk0Htik3J/w@public.gmane.org> 2017-07-05 8:41 ` Will Deacon 2017-07-05 8:41 ` Will Deacon [not found] ` <20170705084143.GA9378-5wv7dgnIgG8@public.gmane.org> 2017-07-05 23:24 ` Ray Jui via iommu 2017-07-05 23:24 ` Ray Jui [not found] ` <5149280b-a214-249c-c5e2-3712b1f941d2-dY08KVG/lbpWk0Htik3J/w@public.gmane.org> 2017-07-06 15:08 ` Will Deacon 2017-07-06 15:08 ` Will Deacon [not found] ` <20170706150838.GB15574-5wv7dgnIgG8@public.gmane.org> 2017-07-06 18:14 ` Ray Jui via iommu 2017-07-06 18:14 ` Ray Jui [not found] ` <94ba5d4a-0dae-9394-79ef-90da86e49c86-dY08KVG/lbpWk0Htik3J/w@public.gmane.org> 2017-07-07 12:46 ` Will Deacon 2017-07-07 12:46 ` Will Deacon 2017-06-21 15:47 ` Joerg Roedel 2017-06-21 15:47 ` Joerg Roedel
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=ea6da7c87d1d4cba570dd00d1de12a1cf489aecf.1496921366.git.robin.murphy@arm.com \ --to=robin.murphy@arm.com \ --cc=iommu@lists.linux-foundation.org \ --cc=joro@8bytes.org \ --cc=linu.cherian@cavium.com \ --cc=linux-arm-kernel@lists.infradead.org \ --cc=nwatters@codeaurora.org \ --cc=ray.jui@broadcom.com \ --cc=salil.mehta@huawei.com \ --cc=sunil.goutham@cavium.com \ --cc=will.deacon@arm.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.