mm-commits.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Andrew Morton <akpm@linux-foundation.org>
To: akpm@linux-foundation.org, laniel_francis@privacyrequired.com,
	linux-mm@kvack.org, mm-commits@vger.kernel.org,
	torvalds@linux-foundation.org
Subject: [patch 093/118] string.h: move fortified functions definitions in a dedicated header.
Date: Thu, 25 Feb 2021 17:21:20 -0800	[thread overview]
Message-ID: <20210226012120.4bh5TuJm0%akpm@linux-foundation.org> (raw)
In-Reply-To: <20210225171452.713967e96554bb6a53e44a19@linux-foundation.org>

From: Francis Laniel <laniel_francis@privacyrequired.com>
Subject: string.h: move fortified functions definitions in a dedicated header.

This patch adds fortify-string.h to contain fortified functions
definitions.  Thus, the code is more separated and compile time is
approximately 1% faster for people who do not set CONFIG_FORTIFY_SOURCE.

Link: https://lkml.kernel.org/r/20210111092141.22946-1-laniel_francis@privacyrequired.com
Link: https://lkml.kernel.org/r/20210111092141.22946-2-laniel_francis@privacyrequired.com
Signed-off-by: Francis Laniel <laniel_francis@privacyrequired.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 include/linux/fortify-string.h |  302 +++++++++++++++++++++++++++++++
 include/linux/string.h         |  282 ----------------------------
 2 files changed, 303 insertions(+), 281 deletions(-)

--- /dev/null
+++ a/include/linux/fortify-string.h
@@ -0,0 +1,302 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FORTIFY_STRING_H_
+#define _LINUX_FORTIFY_STRING_H_
+
+
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr);
+extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp);
+extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
+extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
+extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
+extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat);
+extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy);
+extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen);
+extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat);
+extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy);
+#else
+#define __underlying_memchr	__builtin_memchr
+#define __underlying_memcmp	__builtin_memcmp
+#define __underlying_memcpy	__builtin_memcpy
+#define __underlying_memmove	__builtin_memmove
+#define __underlying_memset	__builtin_memset
+#define __underlying_strcat	__builtin_strcat
+#define __underlying_strcpy	__builtin_strcpy
+#define __underlying_strlen	__builtin_strlen
+#define __underlying_strncat	__builtin_strncat
+#define __underlying_strncpy	__builtin_strncpy
+#endif
+
+__FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
+{
+	size_t p_size = __builtin_object_size(p, 1);
+
+	if (__builtin_constant_p(size) && p_size < size)
+		__write_overflow();
+	if (p_size < size)
+		fortify_panic(__func__);
+	return __underlying_strncpy(p, q, size);
+}
+
+__FORTIFY_INLINE char *strcat(char *p, const char *q)
+{
+	size_t p_size = __builtin_object_size(p, 1);
+
+	if (p_size == (size_t)-1)
+		return __underlying_strcat(p, q);
+	if (strlcat(p, q, p_size) >= p_size)
+		fortify_panic(__func__);
+	return p;
+}
+
+__FORTIFY_INLINE __kernel_size_t strlen(const char *p)
+{
+	__kernel_size_t ret;
+	size_t p_size = __builtin_object_size(p, 1);
+
+	/* Work around gcc excess stack consumption issue */
+	if (p_size == (size_t)-1 ||
+		(__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0'))
+		return __underlying_strlen(p);
+	ret = strnlen(p, p_size);
+	if (p_size <= ret)
+		fortify_panic(__func__);
+	return ret;
+}
+
+extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
+__FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen)
+{
+	size_t p_size = __builtin_object_size(p, 1);
+	__kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
+
+	if (p_size <= ret && maxlen != ret)
+		fortify_panic(__func__);
+	return ret;
+}
+
+/* defined after fortified strlen to reuse it */
+extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
+__FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
+{
+	size_t ret;
+	size_t p_size = __builtin_object_size(p, 1);
+	size_t q_size = __builtin_object_size(q, 1);
+
+	if (p_size == (size_t)-1 && q_size == (size_t)-1)
+		return __real_strlcpy(p, q, size);
+	ret = strlen(q);
+	if (size) {
+		size_t len = (ret >= size) ? size - 1 : ret;
+
+		if (__builtin_constant_p(len) && len >= p_size)
+			__write_overflow();
+		if (len >= p_size)
+			fortify_panic(__func__);
+		__underlying_memcpy(p, q, len);
+		p[len] = '\0';
+	}
+	return ret;
+}
+
+/* defined after fortified strnlen to reuse it */
+extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy);
+__FORTIFY_INLINE ssize_t strscpy(char *p, const char *q, size_t size)
+{
+	size_t len;
+	/* Use string size rather than possible enclosing struct size. */
+	size_t p_size = __builtin_object_size(p, 1);
+	size_t q_size = __builtin_object_size(q, 1);
+
+	/* If we cannot get size of p and q default to call strscpy. */
+	if (p_size == (size_t) -1 && q_size == (size_t) -1)
+		return __real_strscpy(p, q, size);
+
+	/*
+	 * If size can be known at compile time and is greater than
+	 * p_size, generate a compile time write overflow error.
+	 */
+	if (__builtin_constant_p(size) && size > p_size)
+		__write_overflow();
+
+	/*
+	 * This call protects from read overflow, because len will default to q
+	 * length if it smaller than size.
+	 */
+	len = strnlen(q, size);
+	/*
+	 * If len equals size, we will copy only size bytes which leads to
+	 * -E2BIG being returned.
+	 * Otherwise we will copy len + 1 because of the final '\O'.
+	 */
+	len = len == size ? size : len + 1;
+
+	/*
+	 * Generate a runtime write overflow error if len is greater than
+	 * p_size.
+	 */
+	if (len > p_size)
+		fortify_panic(__func__);
+
+	/*
+	 * We can now safely call vanilla strscpy because we are protected from:
+	 * 1. Read overflow thanks to call to strnlen().
+	 * 2. Write overflow thanks to above ifs.
+	 */
+	return __real_strscpy(p, q, len);
+}
+
+/* defined after fortified strlen and strnlen to reuse them */
+__FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
+{
+	size_t p_len, copy_len;
+	size_t p_size = __builtin_object_size(p, 1);
+	size_t q_size = __builtin_object_size(q, 1);
+
+	if (p_size == (size_t)-1 && q_size == (size_t)-1)
+		return __underlying_strncat(p, q, count);
+	p_len = strlen(p);
+	copy_len = strnlen(q, count);
+	if (p_size < p_len + copy_len + 1)
+		fortify_panic(__func__);
+	__underlying_memcpy(p + p_len, q, copy_len);
+	p[p_len + copy_len] = '\0';
+	return p;
+}
+
+__FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size)
+{
+	size_t p_size = __builtin_object_size(p, 0);
+
+	if (__builtin_constant_p(size) && p_size < size)
+		__write_overflow();
+	if (p_size < size)
+		fortify_panic(__func__);
+	return __underlying_memset(p, c, size);
+}
+
+__FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
+{
+	size_t p_size = __builtin_object_size(p, 0);
+	size_t q_size = __builtin_object_size(q, 0);
+
+	if (__builtin_constant_p(size)) {
+		if (p_size < size)
+			__write_overflow();
+		if (q_size < size)
+			__read_overflow2();
+	}
+	if (p_size < size || q_size < size)
+		fortify_panic(__func__);
+	return __underlying_memcpy(p, q, size);
+}
+
+__FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size)
+{
+	size_t p_size = __builtin_object_size(p, 0);
+	size_t q_size = __builtin_object_size(q, 0);
+
+	if (__builtin_constant_p(size)) {
+		if (p_size < size)
+			__write_overflow();
+		if (q_size < size)
+			__read_overflow2();
+	}
+	if (p_size < size || q_size < size)
+		fortify_panic(__func__);
+	return __underlying_memmove(p, q, size);
+}
+
+extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan);
+__FORTIFY_INLINE void *memscan(void *p, int c, __kernel_size_t size)
+{
+	size_t p_size = __builtin_object_size(p, 0);
+
+	if (__builtin_constant_p(size) && p_size < size)
+		__read_overflow();
+	if (p_size < size)
+		fortify_panic(__func__);
+	return __real_memscan(p, c, size);
+}
+
+__FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size)
+{
+	size_t p_size = __builtin_object_size(p, 0);
+	size_t q_size = __builtin_object_size(q, 0);
+
+	if (__builtin_constant_p(size)) {
+		if (p_size < size)
+			__read_overflow();
+		if (q_size < size)
+			__read_overflow2();
+	}
+	if (p_size < size || q_size < size)
+		fortify_panic(__func__);
+	return __underlying_memcmp(p, q, size);
+}
+
+__FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size)
+{
+	size_t p_size = __builtin_object_size(p, 0);
+
+	if (__builtin_constant_p(size) && p_size < size)
+		__read_overflow();
+	if (p_size < size)
+		fortify_panic(__func__);
+	return __underlying_memchr(p, c, size);
+}
+
+void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv);
+__FORTIFY_INLINE void *memchr_inv(const void *p, int c, size_t size)
+{
+	size_t p_size = __builtin_object_size(p, 0);
+
+	if (__builtin_constant_p(size) && p_size < size)
+		__read_overflow();
+	if (p_size < size)
+		fortify_panic(__func__);
+	return __real_memchr_inv(p, c, size);
+}
+
+extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup);
+__FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp)
+{
+	size_t p_size = __builtin_object_size(p, 0);
+
+	if (__builtin_constant_p(size) && p_size < size)
+		__read_overflow();
+	if (p_size < size)
+		fortify_panic(__func__);
+	return __real_kmemdup(p, size, gfp);
+}
+
+/* defined after fortified strlen and memcpy to reuse them */
+__FORTIFY_INLINE char *strcpy(char *p, const char *q)
+{
+	size_t p_size = __builtin_object_size(p, 1);
+	size_t q_size = __builtin_object_size(q, 1);
+	size_t size;
+
+	if (p_size == (size_t)-1 && q_size == (size_t)-1)
+		return __underlying_strcpy(p, q);
+	size = strlen(q) + 1;
+	/* test here to use the more stringent object size */
+	if (p_size < size)
+		fortify_panic(__func__);
+	memcpy(p, q, size);
+	return p;
+}
+
+/* Don't use these outside the FORITFY_SOURCE implementation */
+#undef __underlying_memchr
+#undef __underlying_memcmp
+#undef __underlying_memcpy
+#undef __underlying_memmove
+#undef __underlying_memset
+#undef __underlying_strcat
+#undef __underlying_strcpy
+#undef __underlying_strlen
+#undef __underlying_strncat
+#undef __underlying_strncpy
+
+#endif /* _LINUX_FORTIFY_STRING_H_ */
--- a/include/linux/string.h~stringh-move-fortified-functions-definitions-in-a-dedicated-header
+++ a/include/linux/string.h
@@ -266,287 +266,7 @@ void __read_overflow3(void) __compiletim
 void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter");
 
 #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
-
-#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
-extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr);
-extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp);
-extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
-extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
-extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
-extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat);
-extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy);
-extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen);
-extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat);
-extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy);
-#else
-#define __underlying_memchr	__builtin_memchr
-#define __underlying_memcmp	__builtin_memcmp
-#define __underlying_memcpy	__builtin_memcpy
-#define __underlying_memmove	__builtin_memmove
-#define __underlying_memset	__builtin_memset
-#define __underlying_strcat	__builtin_strcat
-#define __underlying_strcpy	__builtin_strcpy
-#define __underlying_strlen	__builtin_strlen
-#define __underlying_strncat	__builtin_strncat
-#define __underlying_strncpy	__builtin_strncpy
-#endif
-
-__FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
-{
-	size_t p_size = __builtin_object_size(p, 1);
-	if (__builtin_constant_p(size) && p_size < size)
-		__write_overflow();
-	if (p_size < size)
-		fortify_panic(__func__);
-	return __underlying_strncpy(p, q, size);
-}
-
-__FORTIFY_INLINE char *strcat(char *p, const char *q)
-{
-	size_t p_size = __builtin_object_size(p, 1);
-	if (p_size == (size_t)-1)
-		return __underlying_strcat(p, q);
-	if (strlcat(p, q, p_size) >= p_size)
-		fortify_panic(__func__);
-	return p;
-}
-
-__FORTIFY_INLINE __kernel_size_t strlen(const char *p)
-{
-	__kernel_size_t ret;
-	size_t p_size = __builtin_object_size(p, 1);
-
-	/* Work around gcc excess stack consumption issue */
-	if (p_size == (size_t)-1 ||
-	    (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0'))
-		return __underlying_strlen(p);
-	ret = strnlen(p, p_size);
-	if (p_size <= ret)
-		fortify_panic(__func__);
-	return ret;
-}
-
-extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
-__FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen)
-{
-	size_t p_size = __builtin_object_size(p, 1);
-	__kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
-	if (p_size <= ret && maxlen != ret)
-		fortify_panic(__func__);
-	return ret;
-}
-
-/* defined after fortified strlen to reuse it */
-extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
-__FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
-{
-	size_t ret;
-	size_t p_size = __builtin_object_size(p, 1);
-	size_t q_size = __builtin_object_size(q, 1);
-	if (p_size == (size_t)-1 && q_size == (size_t)-1)
-		return __real_strlcpy(p, q, size);
-	ret = strlen(q);
-	if (size) {
-		size_t len = (ret >= size) ? size - 1 : ret;
-		if (__builtin_constant_p(len) && len >= p_size)
-			__write_overflow();
-		if (len >= p_size)
-			fortify_panic(__func__);
-		__underlying_memcpy(p, q, len);
-		p[len] = '\0';
-	}
-	return ret;
-}
-
-/* defined after fortified strnlen to reuse it */
-extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy);
-__FORTIFY_INLINE ssize_t strscpy(char *p, const char *q, size_t size)
-{
-	size_t len;
-	/* Use string size rather than possible enclosing struct size. */
-	size_t p_size = __builtin_object_size(p, 1);
-	size_t q_size = __builtin_object_size(q, 1);
-
-	/* If we cannot get size of p and q default to call strscpy. */
-	if (p_size == (size_t) -1 && q_size == (size_t) -1)
-		return __real_strscpy(p, q, size);
-
-	/*
-	 * If size can be known at compile time and is greater than
-	 * p_size, generate a compile time write overflow error.
-	 */
-	if (__builtin_constant_p(size) && size > p_size)
-		__write_overflow();
-
-	/*
-	 * This call protects from read overflow, because len will default to q
-	 * length if it smaller than size.
-	 */
-	len = strnlen(q, size);
-	/*
-	 * If len equals size, we will copy only size bytes which leads to
-	 * -E2BIG being returned.
-	 * Otherwise we will copy len + 1 because of the final '\O'.
-	 */
-	len = len == size ? size : len + 1;
-
-	/*
-	 * Generate a runtime write overflow error if len is greater than
-	 * p_size.
-	 */
-	if (len > p_size)
-		fortify_panic(__func__);
-
-	/*
-	 * We can now safely call vanilla strscpy because we are protected from:
-	 * 1. Read overflow thanks to call to strnlen().
-	 * 2. Write overflow thanks to above ifs.
-	 */
-	return __real_strscpy(p, q, len);
-}
-
-/* defined after fortified strlen and strnlen to reuse them */
-__FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
-{
-	size_t p_len, copy_len;
-	size_t p_size = __builtin_object_size(p, 1);
-	size_t q_size = __builtin_object_size(q, 1);
-	if (p_size == (size_t)-1 && q_size == (size_t)-1)
-		return __underlying_strncat(p, q, count);
-	p_len = strlen(p);
-	copy_len = strnlen(q, count);
-	if (p_size < p_len + copy_len + 1)
-		fortify_panic(__func__);
-	__underlying_memcpy(p + p_len, q, copy_len);
-	p[p_len + copy_len] = '\0';
-	return p;
-}
-
-__FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size)
-{
-	size_t p_size = __builtin_object_size(p, 0);
-	if (__builtin_constant_p(size) && p_size < size)
-		__write_overflow();
-	if (p_size < size)
-		fortify_panic(__func__);
-	return __underlying_memset(p, c, size);
-}
-
-__FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
-{
-	size_t p_size = __builtin_object_size(p, 0);
-	size_t q_size = __builtin_object_size(q, 0);
-	if (__builtin_constant_p(size)) {
-		if (p_size < size)
-			__write_overflow();
-		if (q_size < size)
-			__read_overflow2();
-	}
-	if (p_size < size || q_size < size)
-		fortify_panic(__func__);
-	return __underlying_memcpy(p, q, size);
-}
-
-__FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size)
-{
-	size_t p_size = __builtin_object_size(p, 0);
-	size_t q_size = __builtin_object_size(q, 0);
-	if (__builtin_constant_p(size)) {
-		if (p_size < size)
-			__write_overflow();
-		if (q_size < size)
-			__read_overflow2();
-	}
-	if (p_size < size || q_size < size)
-		fortify_panic(__func__);
-	return __underlying_memmove(p, q, size);
-}
-
-extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan);
-__FORTIFY_INLINE void *memscan(void *p, int c, __kernel_size_t size)
-{
-	size_t p_size = __builtin_object_size(p, 0);
-	if (__builtin_constant_p(size) && p_size < size)
-		__read_overflow();
-	if (p_size < size)
-		fortify_panic(__func__);
-	return __real_memscan(p, c, size);
-}
-
-__FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size)
-{
-	size_t p_size = __builtin_object_size(p, 0);
-	size_t q_size = __builtin_object_size(q, 0);
-	if (__builtin_constant_p(size)) {
-		if (p_size < size)
-			__read_overflow();
-		if (q_size < size)
-			__read_overflow2();
-	}
-	if (p_size < size || q_size < size)
-		fortify_panic(__func__);
-	return __underlying_memcmp(p, q, size);
-}
-
-__FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size)
-{
-	size_t p_size = __builtin_object_size(p, 0);
-	if (__builtin_constant_p(size) && p_size < size)
-		__read_overflow();
-	if (p_size < size)
-		fortify_panic(__func__);
-	return __underlying_memchr(p, c, size);
-}
-
-void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv);
-__FORTIFY_INLINE void *memchr_inv(const void *p, int c, size_t size)
-{
-	size_t p_size = __builtin_object_size(p, 0);
-	if (__builtin_constant_p(size) && p_size < size)
-		__read_overflow();
-	if (p_size < size)
-		fortify_panic(__func__);
-	return __real_memchr_inv(p, c, size);
-}
-
-extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup);
-__FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp)
-{
-	size_t p_size = __builtin_object_size(p, 0);
-	if (__builtin_constant_p(size) && p_size < size)
-		__read_overflow();
-	if (p_size < size)
-		fortify_panic(__func__);
-	return __real_kmemdup(p, size, gfp);
-}
-
-/* defined after fortified strlen and memcpy to reuse them */
-__FORTIFY_INLINE char *strcpy(char *p, const char *q)
-{
-	size_t p_size = __builtin_object_size(p, 1);
-	size_t q_size = __builtin_object_size(q, 1);
-	size_t size;
-	if (p_size == (size_t)-1 && q_size == (size_t)-1)
-		return __underlying_strcpy(p, q);
-	size = strlen(q) + 1;
-	/* test here to use the more stringent object size */
-	if (p_size < size)
-		fortify_panic(__func__);
-	memcpy(p, q, size);
-	return p;
-}
-
-/* Don't use these outside the FORITFY_SOURCE implementation */
-#undef __underlying_memchr
-#undef __underlying_memcmp
-#undef __underlying_memcpy
-#undef __underlying_memmove
-#undef __underlying_memset
-#undef __underlying_strcat
-#undef __underlying_strcpy
-#undef __underlying_strlen
-#undef __underlying_strncat
-#undef __underlying_strncpy
+#include <linux/fortify-string.h>
 #endif
 
 /**
_

  parent reply	other threads:[~2021-02-26  1:23 UTC|newest]

Thread overview: 123+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-26  1:14 incoming Andrew Morton
2021-02-26  1:15 ` [patch 001/118] mm: make pagecache tagged lookups return only head pages Andrew Morton
2021-02-26  1:15 ` [patch 002/118] mm/shmem: use pagevec_lookup in shmem_unlock_mapping Andrew Morton
2021-02-26  1:15 ` [patch 003/118] mm/swap: optimise get_shadow_from_swap_cache Andrew Morton
2021-02-26  1:15 ` [patch 004/118] mm: add FGP_ENTRY Andrew Morton
2021-02-26  1:15 ` [patch 005/118] mm/filemap: rename find_get_entry to mapping_get_entry Andrew Morton
2021-02-26  1:15 ` [patch 006/118] mm/filemap: add helper for finding pages Andrew Morton
2021-02-26  1:15 ` [patch 007/118] mm/filemap: add mapping_seek_hole_data Andrew Morton
2021-02-26  1:15 ` [patch 008/118] iomap: use mapping_seek_hole_data Andrew Morton
2021-02-26  1:15 ` [patch 009/118] mm: add and use find_lock_entries Andrew Morton
2021-02-26  1:16 ` [patch 010/118] mm: add an 'end' parameter to find_get_entries Andrew Morton
2021-02-26  1:16 ` [patch 011/118] mm: add an 'end' parameter to pagevec_lookup_entries Andrew Morton
2021-02-26  1:16 ` [patch 012/118] mm: remove nr_entries parameter from pagevec_lookup_entries Andrew Morton
2021-02-26  1:16 ` [patch 013/118] mm: pass pvec directly to find_get_entries Andrew Morton
2021-02-26  1:16 ` [patch 014/118] mm: remove pagevec_lookup_entries Andrew Morton
2021-02-26  1:16 ` [patch 015/118] mm,thp,shmem: limit shmem THP alloc gfp_mask Andrew Morton
2021-02-26  1:16 ` [patch 016/118] mm,thp,shm: limit gfp mask to no more than specified Andrew Morton
2021-02-26  1:16 ` [patch 017/118] mm,thp,shmem: make khugepaged obey tmpfs mount flags Andrew Morton
2021-02-26  1:16 ` [patch 018/118] mm,shmem,thp: limit shmem THP allocations to requested zones Andrew Morton
2021-02-26  1:16 ` [patch 019/118] mm: cma: allocate cma areas bottom-up Andrew Morton
2021-02-26  1:16 ` [patch 020/118] mm/cma: expose all pages to the buddy if activation of an area fails Andrew Morton
2021-02-26  1:16 ` [patch 021/118] mm/page_alloc: count CMA pages per zone and print them in /proc/zoneinfo Andrew Morton
2021-02-26  1:16 ` [patch 022/118] mm: cma: print region name on failure Andrew Morton
2021-02-26  1:16 ` [patch 023/118] mm: vmstat: fix NOHZ wakeups for node stat changes Andrew Morton
2021-02-26  1:16 ` [patch 024/118] mm: vmstat: add some comments on internal storage of byte items Andrew Morton
2021-02-26  1:16 ` [patch 025/118] mm/vmstat.c: erase latency in vmstat_shepherd Andrew Morton
2021-02-26  1:16 ` [patch 026/118] mm: move pfn_to_online_page() out of line Andrew Morton
2021-02-26  1:17 ` [patch 027/118] mm: teach pfn_to_online_page() to consider subsection validity Andrew Morton
2021-02-26  1:17 ` [patch 028/118] mm: teach pfn_to_online_page() about ZONE_DEVICE section collisions Andrew Morton
2021-02-26  1:17 ` [patch 029/118] mm: fix memory_failure() handling of dax-namespace metadata Andrew Morton
2021-02-26  1:17 ` [patch 030/118] mm/memory_hotplug: rename all existing 'memhp' into 'mhp' Andrew Morton
2021-02-26  1:17 ` [patch 031/118] mm/memory_hotplug: MEMHP_MERGE_RESOURCE -> MHP_MERGE_RESOURCE Andrew Morton
2021-02-26  1:17 ` [patch 032/118] mm/memory_hotplug: use helper function zone_end_pfn() to get end_pfn Andrew Morton
2021-02-26  1:17 ` [patch 033/118] drivers/base/memory: don't store phys_device in memory blocks Andrew Morton
2021-02-26  1:17 ` [patch 034/118] Documentation: sysfs/memory: clarify some memory block device properties Andrew Morton
2021-02-26  1:17 ` [patch 035/118] mm/memory_hotplug: prevalidate the address range being added with platform Andrew Morton
2021-02-26  1:17 ` [patch 036/118] arm64/mm: define arch_get_mappable_range() Andrew Morton
2021-02-26  1:17 ` [patch 037/118] s390/mm: " Andrew Morton
2021-02-26  1:17 ` [patch 038/118] virtio-mem: check against mhp_get_pluggable_range() which memory we can hotplug Andrew Morton
2021-02-26  1:17 ` [patch 039/118] mm/mlock: stop counting mlocked pages when none vma is found Andrew Morton
2021-02-26  1:17 ` [patch 040/118] mm/rmap: correct some obsolete comments of anon_vma Andrew Morton
2021-02-26  1:17 ` [patch 041/118] mm/rmap: remove unneeded semicolon in page_not_mapped() Andrew Morton
2021-02-26  1:17 ` [patch 042/118] mm/rmap: fix obsolete comment in __page_check_anon_rmap() Andrew Morton
2021-02-26  1:18 ` [patch 043/118] mm/rmap: use page_not_mapped in try_to_unmap() Andrew Morton
2021-02-26  1:18 ` [patch 044/118] mm/rmap: correct obsolete comment of page_get_anon_vma() Andrew Morton
2021-02-26  1:18 ` [patch 045/118] mm/rmap: fix potential pte_unmap on an not mapped pte Andrew Morton
2021-02-26  1:18 ` [patch 046/118] mm: zswap: clean up confusing comment Andrew Morton
2021-02-26  1:18 ` [patch 047/118] mm/zswap: add the flag can_sleep_mapped Andrew Morton
2021-02-26  1:18 ` [patch 048/118] mm: set the sleep_mapped to true for zbud and z3fold Andrew Morton
2021-02-26  1:18 ` [patch 049/118] mm/zsmalloc.c: convert to use kmem_cache_zalloc in cache_alloc_zspage() Andrew Morton
2021-02-26  1:18 ` [patch 050/118] zsmalloc: account the number of compacted pages correctly Andrew Morton
2021-02-26  1:18 ` [patch 051/118] mm/zsmalloc.c: use page_private() to access page->private Andrew Morton
2021-02-26  1:18 ` [patch 052/118] mm: page-flags.h: Typo fix (It -> If) Andrew Morton
2021-02-26  1:18 ` [patch 053/118] mm/dmapool: use might_alloc() Andrew Morton
2021-02-26  1:18 ` [patch 054/118] mm/backing-dev.c: " Andrew Morton
2021-02-26  1:18 ` [patch 055/118] mm/early_ioremap.c: use __func__ instead of function name Andrew Morton
2021-02-26  1:18 ` [patch 056/118] mm: add Kernel Electric-Fence infrastructure Andrew Morton
2021-02-26  1:18 ` [patch 057/118] x86, kfence: enable KFENCE for x86 Andrew Morton
2021-02-26  1:19 ` [patch 058/118] arm64, kfence: enable KFENCE for ARM64 Andrew Morton
2021-02-26  1:19 ` [patch 059/118] kfence: use pt_regs to generate stack trace on faults Andrew Morton
2021-02-26  1:19 ` [patch 060/118] mm, kfence: insert KFENCE hooks for SLAB Andrew Morton
2021-02-26  1:19 ` [patch 061/118] mm, kfence: insert KFENCE hooks for SLUB Andrew Morton
2021-02-26  1:19 ` [patch 062/118] kfence, kasan: make KFENCE compatible with KASAN Andrew Morton
2021-02-26  1:19 ` [patch 063/118] kfence, Documentation: add KFENCE documentation Andrew Morton
2021-02-26  1:19 ` [patch 064/118] kfence: add test suite Andrew Morton
2021-02-26  1:19 ` [patch 065/118] MAINTAINERS: add entry for KFENCE Andrew Morton
2021-02-26  1:19 ` [patch 066/118] kfence: report sensitive information based on no_hash_pointers Andrew Morton
2021-02-26  1:19 ` [patch 067/118] tracing: add error_report_end trace point Andrew Morton
2021-02-26 14:03   ` Steven Rostedt
2021-02-26  1:19 ` [patch 068/118] kfence: use error_report_end tracepoint Andrew Morton
2021-02-26  1:19 ` [patch 069/118] kasan: " Andrew Morton
2021-02-26  1:19 ` [patch 070/118] kasan, mm: don't save alloc stacks twice Andrew Morton
2021-02-26  1:19 ` [patch 071/118] kasan, mm: optimize kmalloc poisoning Andrew Morton
2021-02-26  1:20 ` [patch 072/118] kasan: optimize large " Andrew Morton
2021-02-26  1:20 ` [patch 073/118] kasan: clean up setting free info in kasan_slab_free Andrew Morton
2021-02-26  1:20 ` [patch 074/118] kasan: unify large kfree checks Andrew Morton
2021-02-26  1:20 ` [patch 075/118] kasan: rework krealloc tests Andrew Morton
2021-02-26  1:20 ` [patch 076/118] kasan, mm: fail krealloc on freed objects Andrew Morton
2021-02-26  1:20 ` [patch 077/118] kasan, mm: optimize krealloc poisoning Andrew Morton
2021-02-26  1:20 ` [patch 078/118] kasan: ensure poisoning size alignment Andrew Morton
2021-02-26  1:20 ` [patch 079/118] arm64: kasan: simplify and inline MTE functions Andrew Morton
2021-02-26  1:20 ` [patch 080/118] kasan: inline HW_TAGS helper functions Andrew Morton
2021-02-26  1:20 ` [patch 081/118] kasan: clarify that only first bug is reported in HW_TAGS Andrew Morton
2021-02-26  1:20 ` [patch 082/118] alpha: remove CONFIG_EXPERIMENTAL from defconfigs Andrew Morton
2021-02-26  1:20 ` [patch 083/118] proc/wchan: use printk format instead of lookup_symbol_name() Andrew Morton
2021-02-26  1:20 ` [patch 084/118] proc: use kvzalloc for our kernel buffer Andrew Morton
2021-02-26  1:20 ` [patch 085/118] sysctl.c: fix underflow value setting risk in vm_table Andrew Morton
2021-02-26  1:20 ` [patch 086/118] include/linux: remove repeated words Andrew Morton
2021-02-26  1:21 ` [patch 087/118] treewide: Miguel has moved Andrew Morton
2021-02-26  1:21 ` [patch 088/118] groups: use flexible-array member in struct group_info Andrew Morton
2021-02-26  1:21 ` [patch 089/118] groups: simplify struct group_info allocation Andrew Morton
2021-02-26  1:21 ` [patch 090/118] kernel: delete repeated words in comments Andrew Morton
2021-02-26  1:21 ` [patch 091/118] MAINTAINERS: add uapi directories to API/ABI section Andrew Morton
2021-02-27 19:18   ` Michael Kerrisk (man-pages)
2021-02-26  1:21 ` [patch 092/118] lib/genalloc.c: change return type to unsigned long for bitmap_set_ll Andrew Morton
2021-02-26  1:21 ` Andrew Morton [this message]
2021-02-26  1:21 ` [patch 094/118] lib: stackdepot: add support to configure STACK_HASH_SIZE Andrew Morton
2021-02-26  1:21 ` [patch 095/118] lib: stackdepot: add support to disable stack depot Andrew Morton
2021-02-26  1:21 ` [patch 096/118] lib: stackdepot: fix ignoring return value warning Andrew Morton
2021-02-26  1:21 ` [patch 097/118] lib/cmdline: remove an unneeded local variable in next_arg() Andrew Morton
2021-02-26  1:21 ` [patch 098/118] include/linux/bitops.h: spelling s/synomyn/synonym/ Andrew Morton
2021-02-26  1:21 ` [patch 099/118] checkpatch: improve blank line after declaration test Andrew Morton
2021-02-26  1:21 ` [patch 100/118] checkpatch: ignore warning designated initializers using NR_CPUS Andrew Morton
2021-02-26  1:21 ` [patch 101/118] checkpatch: trivial style fixes Andrew Morton
2021-02-26  1:21 ` [patch 102/118] checkpatch: prefer ftrace over function entry/exit printks Andrew Morton
2021-02-26  1:21 ` [patch 103/118] checkpatch: improve TYPECAST_INT_CONSTANT test message Andrew Morton
2021-02-26  1:21 ` [patch 104/118] checkpatch: add warning for avoiding .L prefix symbols in assembly files Andrew Morton
2021-02-26  1:22 ` [patch 105/118] checkpatch: add kmalloc_array_node to unnecessary OOM message check Andrew Morton
2021-02-26  1:22 ` [patch 106/118] checkpatch: don't warn about colon termination in linker scripts Andrew Morton
2021-02-26  1:22 ` [patch 107/118] checkpatch: do not apply "initialise globals to 0" check to BPF progs Andrew Morton
2021-02-26  1:22 ` [patch 108/118] init/version.c: remove Version_<LINUX_VERSION_CODE> symbol Andrew Morton
2021-02-26  1:22 ` [patch 109/118] init: clean up early_param_on_off() macro Andrew Morton
2021-02-26  1:22 ` [patch 110/118] init/Kconfig: fix a typo in CC_VERSION_TEXT help text Andrew Morton
2021-02-26  1:22 ` [patch 111/118] fs/coredump: use kmap_local_page() Andrew Morton
2021-02-26  1:22 ` [patch 112/118] seq_file: document how per-entry resources are managed Andrew Morton
2021-02-26  1:22 ` [patch 113/118] x86: fix seq_file iteration for pat/memtype.c Andrew Morton
2021-02-26  1:22 ` [patch 114/118] scripts/gdb: fix list_for_each Andrew Morton
2021-02-26  1:22 ` [patch 115/118] kgdb: fix to kill breakpoints on initmem after boot Andrew Morton
2021-02-26  1:22 ` [patch 116/118] ubsan: remove overflow checks Andrew Morton
2021-02-26  1:22 ` [patch 117/118] initramfs: panic with memory information Andrew Morton
2021-02-26  1:22 ` [patch 118/118] MIPS: make userspace mapping young by default Andrew Morton
2021-02-26 17:55 ` incoming Linus Torvalds
2021-02-26 19:16   ` incoming Andrew Morton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210226012120.4bh5TuJm0%akpm@linux-foundation.org \
    --to=akpm@linux-foundation.org \
    --cc=laniel_francis@privacyrequired.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mm-commits@vger.kernel.org \
    --cc=torvalds@linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).