From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S933862AbdBQL0b (ORCPT ); Fri, 17 Feb 2017 06:26:31 -0500 Received: from mx0b-001b2d01.pphosted.com ([148.163.158.5]:43772 "EHLO mx0a-001b2d01.pphosted.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S933353AbdBQL01 (ORCPT ); Fri, 17 Feb 2017 06:26:27 -0500 From: Anshuman Khandual To: linux-kernel@vger.kernel.org, linux-mm@kvack.org Cc: mhocko@suse.com, vbabka@suse.cz, mgorman@suse.de, minchan@kernel.org, aneesh.kumar@linux.vnet.ibm.com, bsingharora@gmail.com, srikar@linux.vnet.ibm.com, haren@linux.vnet.ibm.com, jglisse@redhat.com, dave.hansen@intel.com, dan.j.williams@intel.com, zi.yan@cs.rutgers.edu Subject: [PATCH 2/6] mm/migrate: Make migrate_mode types non-exclusive Date: Fri, 17 Feb 2017 16:54:49 +0530 X-Mailer: git-send-email 2.9.3 In-Reply-To: <20170217112453.307-1-khandual@linux.vnet.ibm.com> References: <20170217112453.307-1-khandual@linux.vnet.ibm.com> X-TM-AS-MML: disable X-Content-Scanned: Fidelis XPS MAILER x-cbid: 17021711-0044-0000-0000-0000022BC465 X-IBM-AV-DETECTION: SAVI=unused REMOTE=unused XFE=unused x-cbparentid: 17021711-0045-0000-0000-0000068CE25F Message-Id: <20170217112453.307-3-khandual@linux.vnet.ibm.com> X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10432:,, definitions=2017-02-17_09:,, signatures=0 X-Proofpoint-Spam-Details: rule=outbound_notspam policy=outbound score=0 spamscore=0 suspectscore=3 malwarescore=0 phishscore=0 adultscore=0 bulkscore=0 classifier=spam adjust=0 reason=mlx scancount=1 engine=8.0.1-1612050000 definitions=main-1702170108 Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org From: Zi Yan It basically changes the enum declaration from numbers to bit positions so that they can be used in combination which was not the case earlier. No functionality has been changed. Signed-off-by: Zi Yan Signed-off-by: Anshuman Khandual --- include/linux/migrate_mode.h | 8 ++++---- mm/compaction.c | 20 ++++++++++---------- mm/migrate.c | 14 +++++++------- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/include/linux/migrate_mode.h b/include/linux/migrate_mode.h index b3b9acb..89c1700 100644 --- a/include/linux/migrate_mode.h +++ b/include/linux/migrate_mode.h @@ -8,10 +8,10 @@ * MIGRATE_SYNC will block when migrating pages */ enum migrate_mode { - MIGRATE_ASYNC, - MIGRATE_SYNC_LIGHT, - MIGRATE_SYNC, - MIGRATE_ST + MIGRATE_ASYNC = 1<<0, + MIGRATE_SYNC_LIGHT = 1<<1, + MIGRATE_SYNC = 1<<2, + MIGRATE_ST = 1<<3, }; #endif /* MIGRATE_MODE_H_INCLUDED */ diff --git a/mm/compaction.c b/mm/compaction.c index 949198d..1a481af 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -296,7 +296,7 @@ static void update_pageblock_skip(struct compact_control *cc, if (migrate_scanner) { if (pfn > zone->compact_cached_migrate_pfn[0]) zone->compact_cached_migrate_pfn[0] = pfn; - if (cc->mode != MIGRATE_ASYNC && + if (!(cc->mode & MIGRATE_ASYNC) && pfn > zone->compact_cached_migrate_pfn[1]) zone->compact_cached_migrate_pfn[1] = pfn; } else { @@ -329,7 +329,7 @@ static void update_pageblock_skip(struct compact_control *cc, static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags, struct compact_control *cc) { - if (cc->mode == MIGRATE_ASYNC) { + if (cc->mode & MIGRATE_ASYNC) { if (!spin_trylock_irqsave(lock, *flags)) { cc->contended = true; return false; @@ -370,7 +370,7 @@ static bool compact_unlock_should_abort(spinlock_t *lock, } if (need_resched()) { - if (cc->mode == MIGRATE_ASYNC) { + if (cc->mode & MIGRATE_ASYNC) { cc->contended = true; return true; } @@ -393,7 +393,7 @@ static inline bool compact_should_abort(struct compact_control *cc) { /* async compaction aborts if contended */ if (need_resched()) { - if (cc->mode == MIGRATE_ASYNC) { + if (cc->mode & MIGRATE_ASYNC) { cc->contended = true; return true; } @@ -688,7 +688,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, */ while (unlikely(too_many_isolated(zone))) { /* async migration should just abort */ - if (cc->mode == MIGRATE_ASYNC) + if (cc->mode & MIGRATE_ASYNC) return 0; congestion_wait(BLK_RW_ASYNC, HZ/10); @@ -700,7 +700,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, if (compact_should_abort(cc)) return 0; - if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { + if (cc->direct_compaction && (cc->mode & MIGRATE_ASYNC)) { skip_on_failure = true; next_skip_pfn = block_end_pfn(low_pfn, cc->order); } @@ -1195,7 +1195,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, struct page *page; const isolate_mode_t isolate_mode = (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) | - (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0); + (!(cc->mode & MIGRATE_SYNC) ? ISOLATE_ASYNC_MIGRATE : 0); /* * Start at where we last stopped, or beginning of the zone as @@ -1241,7 +1241,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, * Async compaction is optimistic to see if the minimum amount * of work satisfies the allocation. */ - if (cc->mode == MIGRATE_ASYNC && + if ((cc->mode & MIGRATE_ASYNC) && !migrate_async_suitable(get_pageblock_migratetype(page))) continue; @@ -1481,7 +1481,7 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro unsigned long start_pfn = zone->zone_start_pfn; unsigned long end_pfn = zone_end_pfn(zone); const int migratetype = gfpflags_to_migratetype(cc->gfp_mask); - const bool sync = cc->mode != MIGRATE_ASYNC; + const bool sync = !(cc->mode & MIGRATE_ASYNC); ret = compaction_suitable(zone, cc->order, cc->alloc_flags, cc->classzone_idx); @@ -1577,7 +1577,7 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro * order-aligned block, so skip the rest of it. */ if (cc->direct_compaction && - (cc->mode == MIGRATE_ASYNC)) { + (cc->mode & MIGRATE_ASYNC)) { cc->migrate_pfn = block_end_pfn( cc->migrate_pfn - 1, cc->order); /* Draining pcplists is useless in this case */ diff --git a/mm/migrate.c b/mm/migrate.c index 13fa938..63c3682 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -352,7 +352,7 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head, struct buffer_head *bh = head; /* Simple case, sync compaction */ - if (mode != MIGRATE_ASYNC) { + if (!(mode & MIGRATE_ASYNC)) { do { get_bh(bh); lock_buffer(bh); @@ -453,7 +453,7 @@ int migrate_page_move_mapping(struct address_space *mapping, * the mapping back due to an elevated page count, we would have to * block waiting on other references to be dropped. */ - if (mode == MIGRATE_ASYNC && head && + if ((mode & MIGRATE_ASYNC) && head && !buffer_migrate_lock_buffers(head, mode)) { page_ref_unfreeze(page, expected_count); spin_unlock_irq(&mapping->tree_lock); @@ -739,7 +739,7 @@ int buffer_migrate_page(struct address_space *mapping, * with an IRQ-safe spinlock held. In the sync case, the buffers * need to be locked now */ - if (mode != MIGRATE_ASYNC) + if (!(mode & MIGRATE_ASYNC)) BUG_ON(!buffer_migrate_lock_buffers(head, mode)); ClearPagePrivate(page); @@ -821,7 +821,7 @@ static int fallback_migrate_page(struct address_space *mapping, { if (PageDirty(page)) { /* Only writeback pages in full synchronous migration */ - if (mode != MIGRATE_SYNC) + if (!(mode & MIGRATE_SYNC)) return -EBUSY; return writeout(mapping, page); } @@ -930,7 +930,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, bool is_lru = !__PageMovable(page); if (!trylock_page(page)) { - if (!force || mode == MIGRATE_ASYNC) + if (!force || (mode & MIGRATE_ASYNC)) goto out; /* @@ -959,7 +959,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, * the retry loop is too short and in the sync-light case, * the overhead of stalling is too much */ - if (mode != MIGRATE_SYNC) { + if (!(mode & MIGRATE_SYNC)) { rc = -EBUSY; goto out_unlock; } @@ -1229,7 +1229,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, return -ENOMEM; if (!trylock_page(hpage)) { - if (!force || mode != MIGRATE_SYNC) + if (!force || !(mode & MIGRATE_SYNC)) goto out; lock_page(hpage); } -- 2.9.3