All of lore.kernel.org
 help / color / mirror / Atom feed
From: Waiman Long <longman@redhat.com>
To: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@redhat.com>, Will Deacon <will.deacon@arm.com>,
	linux-kernel@vger.kernel.org,
	Bart Van Assche <bvanassche@acm.org>
Subject: Re: [PATCH 4/5] locking/lockdep: Reuse free chain_hlocks entries
Date: Fri, 13 Dec 2019 10:02:26 -0500	[thread overview]
Message-ID: <d639c80c-d60f-d5ac-8559-7942798dc92e@redhat.com> (raw)
In-Reply-To: <20191213102525.GA2844@hirez.programming.kicks-ass.net>

On 12/13/19 5:25 AM, Peter Zijlstra wrote:
> On Thu, Dec 12, 2019 at 05:35:24PM -0500, Waiman Long wrote:
>
>> diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
>> index 4c23ab7d27c2..999cd714e0d1 100644
>> --- a/kernel/locking/lockdep_internals.h
>> +++ b/kernel/locking/lockdep_internals.h
>> @@ -107,8 +107,15 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ =
>>  #endif
>>  
>>  #define MAX_LOCKDEP_CHAINS	(1UL << MAX_LOCKDEP_CHAINS_BITS)
>> -
>>  #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
>> +#define MAX_CHAIN_HLOCKS_BLOCKS	1024
>> +#define CHAIN_HLOCKS_HASH	8
>> +
>> +struct chain_hlocks_block {
>> +	unsigned int		   depth: 8,
>> +				   base :24;
>> +	struct chain_hlocks_block *next;
>> +};
>>  
>>  extern struct list_head all_lock_classes;
>>  extern struct lock_chain lock_chains[];
> This doesn't need to be in the header, there are no users outside of the
> stuff you wrote below.
>
That is true.
>> +#ifdef CONFIG_PROVE_LOCKING
>> +static struct chain_hlocks_block chain_hlocks_blocks[MAX_CHAIN_HLOCKS_BLOCKS];
>> +static struct chain_hlocks_block *chain_hlocks_block_hash[CHAIN_HLOCKS_HASH];
>> +static struct chain_hlocks_block *free_chain_hlocks_blocks;
>> +
>> +/*
>> + * Graph lock must be held before calling the chain_hlocks_block functions.
>> + * Chain hlocks of depth 1-(CHAIN_HLOCKS_HASH-1) is mapped directly to
>> + * chain_hlocks_block_hash[1-(CHAIN_HLOCKS_HASH-1)]. All other sizes
>> + * are mapped to chain_hlocks_block_hash[0].
>> + */
>> +static inline struct chain_hlocks_block *alloc_chain_hlocks_block(void)
>> +{
>> +	struct chain_hlocks_block *block = free_chain_hlocks_blocks;
>> +
>> +	WARN_ONCE(!debug_locks_silent && !block,
>> +		  "Running out of chain_hlocks_block\n");
>> +	free_chain_hlocks_blocks = block ? block->next : NULL;
>> +	return block;
>> +}
>> +
>> +static inline void free_chain_hlocks_block(struct chain_hlocks_block *block)
>> +{
>> +	block->next = free_chain_hlocks_blocks;
>> +	free_chain_hlocks_blocks = block;
>> +}
>> +
>> +static inline void push_chain_hlocks_block(struct chain_hlocks_block *block)
>> +{
>> +	int hash, depth = block->depth;
>> +
>> +	hash = (depth >= CHAIN_HLOCKS_HASH) ? 0 : depth;
>> +	block->next = chain_hlocks_block_hash[hash];
>> +	chain_hlocks_block_hash[hash] = block;
>> +	nr_free_chain_hlocks += depth;
>> +}
> I would argue this is not a hash, these are buckets. You're doing
> regular size buckets.

I think I used the wrong terminology here. Bucket is a better description.


>
>> +static inline struct chain_hlocks_block *pop_chain_hlocks_block(int depth)
>> +{
>> +	struct chain_hlocks_block *curr, **pprev;
>> +
>> +	if (!nr_free_chain_hlocks)
>> +		return NULL;
>> +
>> +	if (depth < CHAIN_HLOCKS_HASH) {
>> +		curr = chain_hlocks_block_hash[depth];
>> +		if (curr) {
>> +			chain_hlocks_block_hash[depth] = curr->next;
>> +			nr_free_chain_hlocks -= depth;
>> +		}
>> +		return curr;
>> +	}
>> +
>> +	/*
>> +	 * For depth >= CHAIN_HLOCKS_HASH, it is not really a pop operation.
>> +	 * Instead, the first entry with the right size is returned.
>> +	 */
>> +	curr  = chain_hlocks_block_hash[0];
>> +	pprev = chain_hlocks_block_hash;
>> +
>> +	while (curr) {
>> +		if (curr->depth == depth)
>> +			break;
>> +		pprev = &(curr->next);
>> +		curr = curr->next;
>> +	}
>> +
>> +	if (curr) {
>> +		*pprev = curr->next;
>> +		nr_free_chain_hlocks -= depth;
>> +	}
>> +	return curr;
>> +}
>> +#else
>> +static inline void free_chain_hlocks_block(struct chain_hlocks_block *block) { }
>> +static inline struct chain_hlocks_block *pop_chain_hlocks_block(int depth)
>> +{
>> +	return NULL;
>> +}
>> +#endif /* CONFIG_PROVE_LOCKING */
> You've made a bit of a mess of things though. Why is that push/pop crud
> exposed? Why didn't you build a self contained allocator?
>
> All you really want is:
>
> u16 *alloc_chain(int size);
> void free_chain(int size, u16 *chain);
>
>
> An implementation would be something along these lines (completely
> untested, fresh from the keyboard):
>
> struct chain_block {
> 	int size;
> 	int base;
> 	struct chain_block *next;
> };
>
> struct chain_block chain_blocks[MAX_BLOCKS];
> struct chain_block *free_blocks;
>
>
> struct chain_block init_block = {
> 	.size = MAX_LOCKDEP_CHAIN_HLOCKS,
> 	.base = 0,
> 	.next = NULL,
> };
>
> struct chain_block *free_list[MAX_BUCKETS] = {
> 	&init_block, /* 0 */
> };
>
>
> void free_block(struct chain_block *b)
> {
> 	b->next = free_blocks;
> 	free_blocks = b;
> }
>
> struct chain_block *alloc_block(void)
> {
> 	struct chain_block *block = free_blocks;
> 	free_blocks = block->next;
> 	return block;
> }
>
> struct chain_block *pop_block(struct chain_block **bp)
> {
> 	struct chain_block *b = *bp;
> 	if (!b)
> 		return NULL;
> 	*bp = b->next;
> }
>
> void push_block(struct chain_block **bp, struct chain_block *b)
> {
> 	b->next = *bp;
> 	*bp = b;
> }
>
> /* could contemplate ilog2() buckets */
> int size2bucket(int size)
> {
> 	return size >= MAX_BUCKET ? 0 : size;
> }
>
> /* bucket[0] is mixed size */
> struct chain_block *pop_block_0(struct chain_block **bp, int size)
> {
> 	struct chain_block **p = bp, *b = *bp;
> 	if (!b)
> 		return NULL;
>
> 	p = bp;
> 	while (b && b->size < size) {
> 		p = &b->next;
> 		b = b->next;
> 	}
> 	if (!b)
> 		return NULL;
>
> 	*p = b->next;
> 	return b;
> }
>
> u16 *alloc_chain(int size)
> {
> 	int i, bucket = size2bucket(size);
> 	struct chain_block *b;
> 	u16 *chain;
>
> 	if (!bucket) {
> 		b = pop_block_0(&free_list[0], size);
> 		if (b)
> 			goto got_block;
> 	} else {
> 		b = pop_block(&free_list[bucket]);
> 		if (b)
> 			goto got_block;
>
> 		/* pop a large block, hope the fragment is still useful */
> 		b = pop_block_0(&free_list[0], size);
> 		if (b)
> 			goto got_block;
>
> 		for (i = MAX_BUCKETS-1; i > bucket; i--)
> 			b = pop_block(&free_list[bucket]);
> 			if (b)
> 				goto got_block;
> 		}
> 	}
> 	return NULL;
>
> got_block:
>
> 	chain = chain_hlocks + b->base;
> 	b->base += size;
> 	b->size -= size;
>
> 	if (b->size) {
> 		/* return the fragment */
> 		bucket = size2bucket(b->size);
> 		push_block(&free_list[bucket], b);
> 	} else {
> 		free_block(b);
> 	}
>
> 	return chain;
> }
>
> void free_chain(int size, u16 *chain)
> {
> 	struct chain_block *b = alloc_block();
> 	int bucket = size2bucket(size);
>
> 	if (!b) {
> 		// leak stuff;
> 		return;
> 	}
>
> 	b->size = size;
> 	b->base = chain - chain_hlocks;
>
> 	push_bucket(&free_list[bucket], b);
> }
>
> void init_blocks(void)
> {
> 	int i;
>
> 	for (i = 0; i < MAX_BLOCKS; i++)
> 		free_block(chain_blocks[i]);
> }
>
Thanks for the suggestion. I will incorporate your idea in the next
version of the patch.

Cheers,
Longman


  parent reply	other threads:[~2019-12-13 20:38 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-12-12 22:35 [PATCH 0/5] locking/lockdep: Reuse zapped chain_hlocks entries Waiman Long
2019-12-12 22:35 ` [PATCH 1/5] locking/lockdep: Track number of zapped classes Waiman Long
2019-12-12 22:35 ` [PATCH 2/5] locking/lockdep: Track leaked chain_hlocks entries Waiman Long
2019-12-12 22:35 ` [PATCH 3/5] locking/lockdep: Track number of zapped lock chains Waiman Long
2019-12-12 22:35 ` [PATCH 4/5] locking/lockdep: Reuse free chain_hlocks entries Waiman Long
2019-12-13 10:25   ` Peter Zijlstra
2019-12-13 10:50     ` Peter Zijlstra
2019-12-13 16:02       ` Waiman Long
2019-12-13 16:05         ` Waiman Long
2019-12-13 18:12         ` Peter Zijlstra
     [not found]           ` <7ca26a9a-003f-6f24-08e4-f01b80e3e962@redhat.com>
2019-12-13 18:47             ` Peter Zijlstra
2019-12-13 20:08               ` Waiman Long
2019-12-15 17:06                 ` Waiman Long
2019-12-13 15:02     ` Waiman Long [this message]
2019-12-13 18:43     ` Peter Zijlstra
2019-12-12 22:35 ` [PATCH 5/5] locking/lockdep: Decrement irq context counters when removing lock chain Waiman Long

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=d639c80c-d60f-d5ac-8559-7942798dc92e@redhat.com \
    --to=longman@redhat.com \
    --cc=bvanassche@acm.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=peterz@infradead.org \
    --cc=will.deacon@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.