diff --git a/config/kernel-kmem.m4 b/config/kernel-kmem.m4 new file mode 100644 index 0000000..43f9e72 --- /dev/null +++ b/config/kernel-kmem.m4 @@ -0,0 +1,108 @@ +dnl # +dnl # Enabled by default it provides a minimal level of memory tracking. +dnl # A total count of bytes allocated is kept for each alloc and free. +dnl # Then at module unload time a report to the console will be printed +dnl # if memory was leaked. +dnl # +AC_DEFUN([SPL_AC_DEBUG_KMEM], [ + AC_ARG_ENABLE([debug-kmem], + [AS_HELP_STRING([--enable-debug-kmem], + [Enable basic kmem accounting @<:@default=no@:>@])], + [], + [enable_debug_kmem=no]) + + AS_IF([test "x$enable_debug_kmem" = xyes], + [ + KERNELCPPFLAGS="${KERNELCPPFLAGS} -DDEBUG_KMEM" + DEBUG_KMEM="_with_debug_kmem" + AC_DEFINE([DEBUG_KMEM], [1], + [Define to 1 to enable basic kmem accounting]) + ], [ + DEBUG_KMEM="_without_debug_kmem" + ]) + + AC_SUBST(DEBUG_KMEM) + AC_MSG_CHECKING([whether basic kmem accounting is enabled]) + AC_MSG_RESULT([$enable_debug_kmem]) +]) + +dnl # +dnl # Disabled by default it provides detailed memory tracking. This +dnl # feature also requires --enable-debug-kmem to be set. When enabled +dnl # not only will total bytes be tracked but also the location of every +dnl # alloc and free. When the SPL module is unloaded a list of all leaked +dnl # addresses and where they were allocated will be dumped to the console. +dnl # Enabling this feature has a significant impact on performance but it +dnl # makes finding memory leaks pretty straight forward. +dnl # +AC_DEFUN([SPL_AC_DEBUG_KMEM_TRACKING], [ + AC_ARG_ENABLE([debug-kmem-tracking], + [AS_HELP_STRING([--enable-debug-kmem-tracking], + [Enable detailed kmem tracking @<:@default=no@:>@])], + [], + [enable_debug_kmem_tracking=no]) + + AS_IF([test "x$enable_debug_kmem_tracking" = xyes], + [ + KERNELCPPFLAGS="${KERNELCPPFLAGS} -DDEBUG_KMEM_TRACKING" + DEBUG_KMEM_TRACKING="_with_debug_kmem_tracking" + AC_DEFINE([DEBUG_KMEM_TRACKING], [1], + [Define to 1 to enable detailed kmem tracking]) + ], [ + DEBUG_KMEM_TRACKING="_without_debug_kmem_tracking" + ]) + + AC_SUBST(DEBUG_KMEM_TRACKING) + AC_MSG_CHECKING([whether detailed kmem tracking is enabled]) + AC_MSG_RESULT([$enable_debug_kmem_tracking]) +]) + +dnl # +dnl # 4.12 API, +dnl # Added kvmalloc allocation strategy +dnl # +AC_DEFUN([ZFS_AC_KERNEL_SRC_KVMALLOC], [ + ZFS_LINUX_TEST_SRC([kvmalloc], [ + #include + ],[ + void *p __attribute__ ((unused)); + + p = kvmalloc(0, GFP_KERNEL); + ]) +]) + +AC_DEFUN([ZFS_AC_KERNEL_KVMALLOC], [ + AC_MSG_CHECKING([whether kvmalloc(ptr, flags) is available]) + ZFS_LINUX_TEST_RESULT([kvmalloc], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_KVMALLOC, 1, [kvmalloc exists]) + ],[ + AC_MSG_RESULT(no) + ]) +]) + +dnl # +dnl # 5.8 API, +dnl # __vmalloc PAGE_KERNEL removal +dnl # +AC_DEFUN([ZFS_AC_KERNEL_SRC_VMALLOC_PAGE_KERNEL], [ + ZFS_LINUX_TEST_SRC([__vmalloc], [ + #include + #include + ],[ + void *p __attribute__ ((unused)); + + p = __vmalloc(0, GFP_KERNEL, PAGE_KERNEL); + ]) +]) + +AC_DEFUN([ZFS_AC_KERNEL_VMALLOC_PAGE_KERNEL], [ + AC_MSG_CHECKING([whether __vmalloc(ptr, flags, pageflags) is available]) + ZFS_LINUX_TEST_RESULT([__vmalloc], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_VMALLOC_PAGE_KERNEL, 1, [__vmalloc page flags exists]) + ],[ + AC_MSG_RESULT(no) + ]) +]) +- \ No newline at end of file diff --git a/config/kernel.m4 b/config/kernel.m4 index b67fcef..23edfdc 100644 --- a/config/kernel.m4 +++ b/config/kernel.m4 @@ -45,6 +45,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [ ZFS_AC_KERNEL_SRC_SCHED ZFS_AC_KERNEL_SRC_USLEEP_RANGE ZFS_AC_KERNEL_SRC_KMEM_CACHE + ZFS_AC_KERNEL_SRC_VMALLOC_PAGE_KERNEL ZFS_AC_KERNEL_SRC_WAIT ZFS_AC_KERNEL_SRC_INODE_TIMES ZFS_AC_KERNEL_SRC_INODE_LOCK @@ -163,6 +164,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [ ZFS_AC_KERNEL_SCHED ZFS_AC_KERNEL_USLEEP_RANGE ZFS_AC_KERNEL_KMEM_CACHE + ZFS_AC_KERNEL_VMALLOC_PAGE_KERNEL ZFS_AC_KERNEL_WAIT ZFS_AC_KERNEL_INODE_TIMES ZFS_AC_KERNEL_INODE_LOCK diff --git a/include/spl/sys/kmem.h b/include/spl/sys/kmem.h index 72d3a77..62bb36e 100644 --- a/include/spl/sys/kmem.h +++ b/include/spl/sys/kmem.h @@ -169,6 +169,15 @@ extern void *spl_kmem_alloc(size_t sz, int fl, const char *func, int line); extern void *spl_kmem_zalloc(size_t sz, int fl, const char *func, int line); extern void spl_kmem_free(const void *ptr, size_t sz); +/* + * * 5.8 API change, pgprot_t argument removed. + * */ +#ifdef HAVE_VMALLOC_PAGE_KERNEL +#define spl_vmalloc(size, flags) __vmalloc(size, flags, PAGE_KERNEL) +#else +#define spl_vmalloc(size, flags) __vmalloc(size, flags) +#endif + /* * The following functions are only available for internal use. */ diff --git a/module/spl/spl-kmem-cache.c b/module/spl/spl-kmem-cache.c index d71b4b3..4866b29 100644 --- a/module/spl/spl-kmem-cache.c +++ b/module/spl/spl-kmem-cache.c @@ -203,7 +203,7 @@ kv_alloc(spl_kmem_cache_t *skc, int size, int flags) ASSERT(ISP2(size)); ptr = (void *)__get_free_pages(lflags, get_order(size)); } else { - ptr = __vmalloc(size, lflags | __GFP_HIGHMEM, PAGE_KERNEL); + ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM); } /* Resulting allocated memory will be page aligned */ @@ -1242,7 +1242,7 @@ spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj) * allocation. * * However, this can't be applied to KVM_VMEM due to a bug that - * __vmalloc() doesn't honor gfp flags in page table allocation. + * spl_vmalloc() doesn't honor gfp flags in page table allocation. */ if (!(skc->skc_flags & KMC_VMEM)) { rc = __spl_cache_grow(skc, flags | KM_NOSLEEP); diff --git a/module/spl/spl-kmem.c b/module/spl/spl-kmem.c index cee69ad..c1ddb06 100644 --- a/module/spl/spl-kmem.c +++ b/module/spl/spl-kmem.c @@ -172,7 +172,7 @@ spl_kmem_alloc_impl(size_t size, int flags, int node) * kmem_zalloc() callers. * * For vmem_alloc() and vmem_zalloc() callers it is permissible - * to use __vmalloc(). However, in general use of __vmalloc() + * to use spl_vmalloc(). However, in general use of spl_vmalloc() * is strongly discouraged because a global lock must be * acquired. Contention on this lock can significantly * impact performance so frequently manipulating the virtual @@ -180,8 +180,7 @@ spl_kmem_alloc_impl(size_t size, int flags, int node) */ if ((size > spl_kmem_alloc_max) || use_vmem) { if (flags & KM_VMEM) { - ptr = __vmalloc(size, lflags | __GFP_HIGHMEM, - PAGE_KERNEL); + ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM); } else { return (NULL); } @@ -194,7 +193,7 @@ spl_kmem_alloc_impl(size_t size, int flags, int node) /* * For vmem_alloc() and vmem_zalloc() callers retry immediately - * using __vmalloc() which is unlikely to fail. + * using spl_vmalloc() which is unlikely to fail. */ if ((flags & KM_VMEM) && (use_vmem == 0)) { use_vmem = 1;