# SPDX-License-Identifier: GPL-2.0-only
menu "Memory Management options"
config [31mCONFIG_SELECT_MEMORY_MODEL[0m
def_bool y
depends on [31mCONFIG_ARCH_SELECT_MEMORY_MODEL[0m
choice
prompt "Memory model"
depends on [31mCONFIG_SELECT_MEMORY_MODEL[0m
default [31mCONFIG_DISCONTIGMEM_MANUAL[0m if [31mCONFIG_ARCH_DISCONTIGMEM_DEFAULT[0m
default [31mCONFIG_SPARSEMEM_MANUAL[0m if [31mCONFIG_ARCH_SPARSEMEM_DEFAULT[0m
default [31mCONFIG_FLATMEM_MANUAL[0m
help
This option allows you to change some of the ways that
Linux manages its memory internally. Most users will
only have one option here selected by the architecture
configuration. This is normal.
config [31mCONFIG_FLATMEM_MANUAL[0m
bool "Flat Memory"
depends on !([31mCONFIG_ARCH_DISCONTIGMEM_ENABLE[0m || [31mCONFIG_ARCH_SPARSEMEM_ENABLE[0m) || [31mCONFIG_ARCH_FLATMEM_ENABLE[0m
help
This option is best suited for non-[31mCONFIG_NUMA[0m systems with
flat address space. The [31mCONFIG_FLATMEM[0m is the most efficient
system in terms of performance and resource consumption
and it is the best option for smaller systems.
For systems that have holes in their physical address
spaces and for features like [31mCONFIG_NUMA[0m and memory hotplug,
choose "Sparse Memory"
If unsure, choose this option (Flat Memory) over any other.
config [31mCONFIG_DISCONTIGMEM_MANUAL[0m
bool "Discontiguous Memory"
depends on [31mCONFIG_ARCH_DISCONTIGMEM_ENABLE[0m
help
This option provides enhanced support for discontiguous
memory systems, over [31mCONFIG_FLATMEM[0m. These systems have holes
in their physical address spaces, and this option provides
more efficient handling of these holes.
Although "Discontiguous Memory" is still used by several
architectures, it is considered deprecated in favor of
"Sparse Memory".
If unsure, choose "Sparse Memory" over this option.
config [31mCONFIG_SPARSEMEM_MANUAL[0m
bool "Sparse Memory"
depends on [31mCONFIG_ARCH_SPARSEMEM_ENABLE[0m
help
This will be the only option for some systems, including
memory hot-plug systems. This is normal.
This option provides efficient support for systems with
holes is their physical address space and allows memory
hot-plug and hot-remove.
If unsure, choose "Flat Memory" over this option.
endchoice
config [31mCONFIG_DISCONTIGMEM[0m
def_bool y
depends on (![31mCONFIG_SELECT_MEMORY_MODEL[0m && [31mCONFIG_ARCH_DISCONTIGMEM_ENABLE[0m) || [31mCONFIG_DISCONTIGMEM_MANUAL[0m
config [31mCONFIG_SPARSEMEM[0m
def_bool y
depends on (![31mCONFIG_SELECT_MEMORY_MODEL[0m && [31mCONFIG_ARCH_SPARSEMEM_ENABLE[0m) || [31mCONFIG_SPARSEMEM_MANUAL[0m
config [31mCONFIG_FLATMEM[0m
def_bool y
depends on (![31mCONFIG_DISCONTIGMEM[0m && ![31mCONFIG_SPARSEMEM[0m) || [31mCONFIG_FLATMEM_MANUAL[0m
config [31mCONFIG_FLAT_NODE_MEM_MAP[0m
def_bool y
depends on ![31mCONFIG_SPARSEMEM[0m
#
# Both the [31mCONFIG_NUMA[0m code and [31mCONFIG_DISCONTIGMEM[0m use arrays of pg_data_t's
# to represent different areas of memory. This variable allows
# those dependencies to exist individually.
#
config [31mCONFIG_NEED_MULTIPLE_NODES[0m
def_bool y
depends on [31mCONFIG_DISCONTIGMEM[0m || [31mCONFIG_NUMA[0m
config [31mCONFIG_HAVE_MEMORY_PRESENT[0m
def_bool y
depends on [31mCONFIG_ARCH_HAVE_MEMORY_PRESENT[0m || [31mCONFIG_SPARSEMEM[0m
#
# [31mCONFIG_SPARSEMEM_EXTREME[0m (which is the default) does some bootmem
# allocations when memory_present() is called. If this cannot
# be done on your architecture, select this option. However,
# statically allocating the mem_section[] array can potentially
# consume vast quantities of .bss, so be careful.
#
# This option will also potentially produce smaller runtime code
# with gcc 3.4 and later.
#
config [31mCONFIG_SPARSEMEM_STATIC[0m
bool
#
# Architecture platforms which require a two level mem_section in [31mCONFIG_SPARSEMEM[0m
# must select this option. This is usually for architecture platforms with
# an extremely sparse physical address space.
#
config [31mCONFIG_SPARSEMEM_EXTREME[0m
def_bool y
depends on [31mCONFIG_SPARSEMEM[0m && ![31mCONFIG_SPARSEMEM_STATIC[0m
config [31mCONFIG_SPARSEMEM_VMEMMAP_ENABLE[0m
bool
config [31mCONFIG_SPARSEMEM_VMEMMAP[0m
bool "Sparse Memory virtual memmap"
depends on [31mCONFIG_SPARSEMEM[0m && [31mCONFIG_SPARSEMEM_VMEMMAP_ENABLE[0m
default y
help
[31mCONFIG_SPARSEMEM_VMEMMAP[0m uses a virtually mapped memmap to optimise
pfn_to_page and page_to_pfn operations. This is the most
efficient option when sufficient kernel resources are available.
config [31mCONFIG_HAVE_MEMBLOCK_NODE_MAP[0m
bool
config [31mCONFIG_HAVE_MEMBLOCK_PHYS_MAP[0m
bool
config [31mCONFIG_HAVE_FAST_GUP[0m
depends on [31mCONFIG_MMU[0m
bool
config [31mCONFIG_ARCH_KEEP_MEMBLOCK[0m
bool
config [31mCONFIG_MEMORY_ISOLATION[0m
bool
#
# Only be set on architectures that have completely implemented memory hotplug
# feature. If you are not sure, don't touch it.
#
config [31mCONFIG_HAVE_BOOTMEM_INFO_NODE[0m
def_bool n
# eventually, we can have this option just 'select [31mCONFIG_SPARSEMEM[0m'
config [31mCONFIG_MEMORY_HOTPLUG[0m
bool "Allow for memory hot-add"
depends on [31mCONFIG_SPARSEMEM[0m || [31mCONFIG_X86_64_ACPI_NUMA[0m
depends on [31mCONFIG_ARCH_ENABLE_MEMORY_HOTPLUG[0m
config [31mCONFIG_MEMORY_HOTPLUG_SPARSE[0m
def_bool y
depends on [31mCONFIG_SPARSEMEM[0m && [31mCONFIG_MEMORY_HOTPLUG[0m
config [31mCONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE[0m
bool "Online the newly added memory blocks by default"
depends on [31mCONFIG_MEMORY_HOTPLUG[0m
help
This option sets the default policy setting for memory hotplug
onlining policy (/sys/devices/system/memory/auto_online_blocks) which
determines what happens to newly added memory regions. Policy setting
can always be changed at runtime.
See Documentation/admin-guide/mm/memory-hotplug.rst for more information.
Say Y here if you want all hot-plugged memory blocks to appear in
'online' state by default.
Say N here if you want the default policy to keep all hot-plugged
memory blocks in 'offline' state.
config [31mCONFIG_MEMORY_HOTREMOVE[0m
bool "Allow for memory hot remove"
select [31mCONFIG_MEMORY_ISOLATION[0m
select [31mCONFIG_HAVE_BOOTMEM_INFO_NODE[0m if ([31mCONFIG_X86_64[0m || [31mCONFIG_PPC64[0m)
depends on [31mCONFIG_MEMORY_HOTPLUG[0m && [31mCONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE[0m
depends on [31mCONFIG_MIGRATION[0m
# Heavily threaded applications may benefit from splitting the mm-wide
# page_table_lock, so that faults on different parts of the user address
# space can be handled with less contention: split it at this [31mCONFIG_NR_CPUS[0m.
# Default to 4 for wider testing, though 8 might be more appropriate.
# [31mCONFIG_ARM[0m's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
# PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
# [31mCONFIG_DEBUG_SPINLOCK[0m and [31mCONFIG_DEBUG_LOCK_ALLOC[0m spinlock_t also enlarge struct page.
#
config [31mCONFIG_SPLIT_PTLOCK_CPUS[0m
int
default "999999" if ![31mCONFIG_MMU[0m
default "999999" if [31mCONFIG_ARM[0m && ![31mCONFIG_CPU_CACHE_VIPT[0m
default "999999" if [31mCONFIG_PARISC[0m && ![31mCONFIG_PA20[0m
default "4"
config [31mCONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK[0m
bool
#
# support for memory balloon
config [31mCONFIG_MEMORY_BALLOON[0m
bool
#
# support for memory balloon compaction
config [31mCONFIG_BALLOON_COMPACTION[0m
bool "Allow for balloon memory compaction/migration"
def_bool y
depends on [31mCONFIG_COMPACTION[0m && [31mCONFIG_MEMORY_BALLOON[0m
help
Memory fragmentation introduced by ballooning might reduce
significantly the number of 2MB contiguous memory blocks that can be
used within a guest, thus imposing performance penalties associated
with the reduced number of transparent huge pages that could be used
by the guest workload. Allowing the compaction & migration for memory
pages enlisted as being part of memory balloon devices avoids the
scenario aforementioned and helps improving memory defragmentation.
#
# support for memory compaction
config [31mCONFIG_COMPACTION[0m
bool "Allow for memory compaction"
def_bool y
select [31mCONFIG_MIGRATION[0m
depends on [31mCONFIG_MMU[0m
help
Compaction is the only memory management component to form
high order (larger physically contiguous) memory blocks
reliably. The page allocator relies on compaction heavily and
the lack of the feature can lead to unexpected OOM killer
invocations for high order memory requests. You shouldn't
disable this option unless there really is a strong reason for
it and then we would be really interested to hear about that at
linux-mm@kvack.org.
#
# support for page migration
#
config [31mCONFIG_MIGRATION[0m
bool "Page migration"
def_bool y
depends on ([31mCONFIG_NUMA[0m || [31mCONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE[0m || [31mCONFIG_COMPACTION[0m || [31mCONFIG_CMA[0m) && [31mCONFIG_MMU[0m
help
Allows the migration of the physical location of pages of processes
while the virtual addresses are not changed. This is useful in
two situations. The first is on [31mCONFIG_NUMA[0m systems to put pages nearer
to the processors accessing. The second is when allocating huge
pages as migration can relocate pages to satisfy a huge page
allocation instead of reclaiming.
config [31mCONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION[0m
bool
config [31mCONFIG_ARCH_ENABLE_THP_MIGRATION[0m
bool
config [31mCONFIG_CONTIG_ALLOC[0m
def_bool ([31mCONFIG_MEMORY_ISOLATION[0m && [31mCONFIG_COMPACTION[0m) || [31mCONFIG_CMA[0m
config [31mCONFIG_PHYS_ADDR_T_64BIT[0m
def_bool [31mCONFIG_64BIT[0m
config [31mCONFIG_BOUNCE[0m
bool "Enable bounce buffers"
default y
depends on [31mCONFIG_BLOCK[0m && [31mCONFIG_MMU[0m && ([31mCONFIG_ZONE_DMA[0m || [31mCONFIG_HIGHMEM[0m)
help
Enable bounce buffers for devices that cannot access
the full range of memory available to the CPU. Enabled
by default when [31mCONFIG_ZONE_DMA[0m or [31mCONFIG_HIGHMEM[0m is selected, but you
may say n to override this.
config [31mCONFIG_VIRT_TO_BUS[0m
bool
help
An architecture should select this if it implements the
deprecated interface virt_to_bus(). All new architectures
should probably not select this.
config [31mCONFIG_MMU_NOTIFIER[0m
bool
select [31mCONFIG_SRCU[0m
config [31mCONFIG_KSM[0m
bool "Enable KSM for page merging"
depends on [31mCONFIG_MMU[0m
select [31mCONFIG_XXHASH[0m
help
Enable Kernel Samepage Merging: [31mCONFIG_KSM[0m periodically scans those areas
of an application's address space that an app has advised may be
mergeable. When it finds pages of identical content, it replaces
the many instances by a single page with that content, so
saving memory until one or another app needs to modify the content.
Recommended for use with [31mCONFIG_KVM[0m, or with other duplicative applications.
See Documentation/vm/ksm.rst for more information: [31mCONFIG_KSM[0m is inactive
until a program has madvised that an area is MADV_MERGEABLE, and
root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
config [31mCONFIG_DEFAULT_MMAP_MIN_ADDR[0m
int "Low address space to protect from user allocation"
depends on [31mCONFIG_MMU[0m
default 4096
help
This is the portion of low virtual memory which should be protected
from userspace allocation. Keeping a user from writing to low pages
can help reduce the impact of kernel NULL pointer bugs.
For most ia64, ppc64 and x86 users with lots of address space
a value of 65536 is reasonable and should cause no problems.
On arm and other archs it should not be higher than 32768.
Programs which use vm86 functionality or have some need to map
this low address space will need CAP_SYS_RAWIO or disable this
protection by setting the value to 0.
This value can be changed after boot using the
/proc/sys/vm/mmap_min_addr tunable.
config [31mCONFIG_ARCH_SUPPORTS_MEMORY_FAILURE[0m
bool
config [31mCONFIG_MEMORY_FAILURE[0m
depends on [31mCONFIG_MMU[0m
depends on [31mCONFIG_ARCH_SUPPORTS_MEMORY_FAILURE[0m
bool "Enable recovery from hardware memory errors"
select [31mCONFIG_MEMORY_ISOLATION[0m
select [31mCONFIG_RAS[0m
help
Enables code to recover from some memory failures on systems
with [31mCONFIG_MCA[0m recovery. This allows a system to continue running
even when some of its memory has uncorrected errors. This requires
special hardware support and typically ECC memory.
config [31mCONFIG_HWPOISON_INJECT[0m
tristate "HWPoison pages injector"
depends on [31mCONFIG_MEMORY_FAILURE[0m && [31mCONFIG_DEBUG_KERNEL[0m && [31mCONFIG_PROC_FS[0m
select [31mCONFIG_PROC_PAGE_MONITOR[0m
config [31mCONFIG_NOMMU_INITIAL_TRIM_EXCESS[0m
int "Turn on mmap() excess space trimming before booting"
depends on ![31mCONFIG_MMU[0m
default 1
help
The NOMMU mmap() frequently needs to allocate large contiguous chunks
of memory on which to store mappings, but it can only ask the system
allocator for chunks in 2^N*PAGE_SIZE amounts - which is frequently
more than it requires. To deal with this, mmap() is able to trim off
the excess and return it to the allocator.
If trimming is enabled, the excess is trimmed off and returned to the
system allocator, which can cause extra fragmentation, particularly
if there are a lot of transient processes.
If trimming is disabled, the excess is kept, but not used, which for
long-term mappings means that the space is wasted.
Trimming can be dynamically controlled through a sysctl option
(/proc/sys/vm/nr_trim_pages) which specifies the minimum number of
excess pages there must be before trimming should occur, or zero if
no trimming is to occur.
This option specifies the initial value of this option. The default
of 1 says that all excess pages should be trimmed.
See Documentation/nommu-mmap.txt for more information.
config [31mCONFIG_TRANSPARENT_HUGEPAGE[0m
bool "Transparent Hugepage Support"
depends on [31mCONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE[0m
select [31mCONFIG_COMPACTION[0m
select [31mCONFIG_XARRAY_MULTI[0m
help
Transparent Hugepages allows the kernel to use huge pages and
huge tlb transparently to the applications whenever possible.
This feature can improve computing performance to certain
applications by speeding up page faults during memory
allocation, by reducing the number of tlb misses and by speeding
up the pagetable walking.
If memory constrained on embedded, you may want to say N.
choice
prompt "Transparent Hugepage Support sysfs defaults"
depends on [31mCONFIG_TRANSPARENT_HUGEPAGE[0m
default [31mCONFIG_TRANSPARENT_HUGEPAGE_ALWAYS[0m
help
Selects the sysfs defaults for Transparent Hugepage Support.
config [31mCONFIG_TRANSPARENT_HUGEPAGE_ALWAYS[0m
bool "always"
help
Enabling Transparent Hugepage always, can increase the
memory footprint of applications without a guaranteed
benefit but it will work automatically for all applications.
config [31mCONFIG_TRANSPARENT_HUGEPAGE_MADVISE[0m
bool "madvise"
help
Enabling Transparent Hugepage madvise, will only provide a
performance improvement benefit to the applications using
madvise(MADV_HUGEPAGE) but it won't risk to increase the
memory footprint of applications without a guaranteed
benefit.
endchoice
config [31mCONFIG_ARCH_WANTS_THP_SWAP[0m
def_bool n
config [31mCONFIG_THP_SWAP[0m
def_bool y
depends on [31mCONFIG_TRANSPARENT_HUGEPAGE[0m && [31mCONFIG_ARCH_WANTS_THP_SWAP[0m && [31mCONFIG_SWAP[0m
help
Swap transparent huge pages in one piece, without splitting.
XXX: For now, swap cluster backing transparent huge page
will be split after swapout.
For selection by architectures with reasonable THP sizes.
config [31mCONFIG_TRANSPARENT_HUGE_PAGECACHE[0m
def_bool y
depends on [31mCONFIG_TRANSPARENT_HUGEPAGE[0m
#
# UP and nommu archs use km based percpu allocator
#
config [31mCONFIG_NEED_PER_CPU_KM[0m
depends on ![31mCONFIG_SMP[0m
bool
default y
config [31mCONFIG_CLEANCACHE[0m
bool "Enable cleancache driver to cache clean pages if tmem is present"
help
Cleancache can be thought of as a page-granularity victim cache
for clean pages that the kernel's pageframe replacement algorithm
(PFRA) would like to keep around, but can't since there isn't enough
memory. So when the PFRA "evicts" a page, it first attempts to use
cleancache code to put the data contained in that page into
"transcendent memory", memory that is not directly accessible or
addressable by the kernel and is of unknown and possibly
time-varying size. And when a cleancache-enabled
filesystem wishes to access a page in a file on disk, it first
checks cleancache to see if it already contains it; if it does,
the page is copied into the kernel and a disk access is avoided.
When a transcendent memory driver is available (such as zcache or
Xen transcendent memory), a significant I/O reduction
may be achieved. When none is available, all cleancache calls
are reduced to a single pointer-compare-against-NULL resulting
in a negligible performance hit.
If unsure, say Y to enable cleancache
config [31mCONFIG_FRONTSWAP[0m
bool "Enable frontswap to cache swap pages if tmem is present"
depends on [31mCONFIG_SWAP[0m
help
Frontswap is so named because it can be thought of as the opposite
of a "backing" store for a swap device. The data is stored into
"transcendent memory", memory that is not directly accessible or
addressable by the kernel and is of unknown and possibly
time-varying size. When space in transcendent memory is available,
a significant swap I/O reduction may be achieved. When none is
available, all frontswap calls are reduced to a single pointer-
compare-against-NULL resulting in a negligible performance hit
and swap data is stored as normal on the matching swap device.
If unsure, say Y to enable frontswap.
config [31mCONFIG_CMA[0m
bool "Contiguous Memory Allocator"
depends on [31mCONFIG_MMU[0m
select [31mCONFIG_MIGRATION[0m
select [31mCONFIG_MEMORY_ISOLATION[0m
help
This enables the Contiguous Memory Allocator which allows other
subsystems to allocate big physically-contiguous blocks of memory.
[31mCONFIG_CMA[0m reserves a region of memory and allows only movable pages to
be allocated from it. This way, the kernel can use the memory for
pagecache and when a subsystem requests for contiguous area, the
allocated pages are migrated away to serve the contiguous request.
If unsure, say "n".
config [31mCONFIG_CMA_DEBUG[0m
bool "CMA debug messages (DEVELOPMENT)"
depends on [31mCONFIG_DEBUG_KERNEL[0m && [31mCONFIG_CMA[0m
help
Turns on debug messages in [31mCONFIG_CMA[0m. This produces KERN_DEBUG
messages for every [31mCONFIG_CMA[0m call as well as various messages while
processing calls such as dma_alloc_from_contiguous().
This option does not affect warning and error messages.
config [31mCONFIG_CMA_DEBUGFS[0m
bool "CMA debugfs interface"
depends on [31mCONFIG_CMA[0m && [31mCONFIG_DEBUG_FS[0m
help
Turns on the DebugFS interface for [31mCONFIG_CMA[0m.
config [31mCONFIG_CMA_AREAS[0m
int "Maximum count of the CMA areas"
depends on [31mCONFIG_CMA[0m
default 7
help
[31mCONFIG_CMA[0m allows to create [31mCONFIG_CMA[0m areas for particular purpose, mainly,
used as device private area. This parameter sets the maximum
number of [31mCONFIG_CMA[0m area in the system.
If unsure, leave the default value "7".
config [31mCONFIG_MEM_SOFT_DIRTY[0m
bool "Track memory changes"
depends on [31mCONFIG_CHECKPOINT_RESTORE[0m && [31mCONFIG_HAVE_ARCH_SOFT_DIRTY[0m && [31mCONFIG_PROC_FS[0m
select [31mCONFIG_PROC_PAGE_MONITOR[0m
help
This option enables memory changes tracking by introducing a
soft-dirty bit on pte-s. This bit it set when someone writes
into a page just as regular dirty bit, but unlike the latter
it can be cleared by hands.
See Documentation/admin-guide/mm/soft-dirty.rst for more details.
config [31mCONFIG_ZSWAP[0m
bool "Compressed cache for swap pages (EXPERIMENTAL)"
depends on [31mCONFIG_FRONTSWAP[0m && [31mCONFIG_CRYPTO[0m=y
select [31mCONFIG_CRYPTO_LZO[0m
select [31mCONFIG_ZPOOL[0m
help
[31mCONFIG_A[0m lightweight compressed cache for swap pages. It takes
pages that are in the process of being swapped out and attempts to
compress them into a dynamically allocated RAM-based memory pool.
This can result in a significant I/O reduction on swap device and,
in the case where decompressing from RAM is faster that swap device
reads, can also improve workload performance.
This is marked experimental because it is a new feature (as of
v3.11) that interacts heavily with memory reclaim. While these
interactions don't cause any known issues on simple memory setups,
they have not be fully explored on the large set of potential
configurations and workloads that exist.
config [31mCONFIG_ZPOOL[0m
tristate "Common API for compressed memory storage"
help
Compressed memory storage API. This allows using either zbud or
zsmalloc.
config [31mCONFIG_ZBUD[0m
tristate "Low (Up to 2x) density storage for compressed pages"
help
[31mCONFIG_A[0m special purpose allocator for storing compressed pages.
It is designed to store up to two compressed pages per physical
page. While this design limits storage density, it has simple and
deterministic reclaim properties that make it preferable to a higher
density approach when reclaim will be used.
config [31mCONFIG_Z3FOLD[0m
tristate "Up to 3x density storage for compressed pages"
depends on [31mCONFIG_ZPOOL[0m
help
[31mCONFIG_A[0m special purpose allocator for storing compressed pages.
It is designed to store up to three compressed pages per physical
page. It is a [31mCONFIG_ZBUD[0m derivative so the simplicity and determinism are
still there.
config [31mCONFIG_ZSMALLOC[0m
tristate "Memory allocator for compressed pages"
depends on [31mCONFIG_MMU[0m
help
zsmalloc is a slab-based memory allocator designed to store
compressed RAM pages. zsmalloc uses virtual memory mapping
in order to reduce fragmentation. However, this results in a
non-standard allocator interface where a handle, not a pointer, is
returned by an alloc(). This handle must be mapped in order to
access the allocated space.
config [31mCONFIG_PGTABLE_MAPPING[0m
bool "Use page table mapping to access object in zsmalloc"
depends on [31mCONFIG_ZSMALLOC[0m
help
By default, zsmalloc uses a copy-based object mapping method to
access allocations that span two pages. However, if a particular
architecture (ex, [31mCONFIG_ARM[0m) performs VM mapping faster than copying,
then you should select this. This causes zsmalloc to use page table
mapping rather than copying for object mapping.
You can check speed with zsmalloc benchmark:
https://github.com/spartacus06/zsmapbench
config [31mCONFIG_ZSMALLOC_STAT[0m
bool "Export zsmalloc statistics"
depends on [31mCONFIG_ZSMALLOC[0m
select [31mCONFIG_DEBUG_FS[0m
help
This option enables code in the zsmalloc to collect various
statistics about whats happening in zsmalloc and exports that
information to userspace via debugfs.
If unsure, say N.
config [31mCONFIG_GENERIC_EARLY_IOREMAP[0m
bool
config [31mCONFIG_MAX_STACK_SIZE_MB[0m
int "Maximum user stack size for 32-bit processes (MB)"
default 80
range 8 2048
depends on [31mCONFIG_STACK_GROWSUP[0m && (![31mCONFIG_64BIT[0m || [31mCONFIG_COMPAT[0m)
help
This is the maximum stack size in Megabytes in the VM layout of 32-bit
user processes when the stack grows upwards (currently only on parisc
arch). The stack will be located at the highest memory address minus
the given value, unless the RLIMIT_STACK hard limit is changed to a
smaller value in which case that is used.
[31mCONFIG_A[0m sane initial value is 80 MB.
config [31mCONFIG_DEFERRED_STRUCT_PAGE_INIT[0m
bool "Defer initialisation of struct pages to kthreads"
depends on [31mCONFIG_SPARSEMEM[0m
depends on ![31mCONFIG_NEED_PER_CPU_KM[0m
depends on [31mCONFIG_64BIT[0m
help
Ordinarily all struct pages are initialised during early boot in a
single thread. On very large machines this can take a considerable
amount of time. If this option is set, large machines will bring up
a subset of memmap at boot and then initialise the rest in parallel
by starting one-off "pgdatinitX" kernel thread for each node X. This
has a potential performance impact on processes running early in the
lifetime of the system until these kthreads finish the
initialisation.
config [31mCONFIG_IDLE_PAGE_TRACKING[0m
bool "Enable idle page tracking"
depends on [31mCONFIG_SYSFS[0m && [31mCONFIG_MMU[0m
select [31mCONFIG_PAGE_EXTENSION[0m if ![31mCONFIG_64BIT[0m
help
This feature allows to estimate the amount of user pages that have
not been touched during a given period of time. This information can
be useful to tune memory cgroup limits and/or for job placement
within a compute cluster.
See Documentation/admin-guide/mm/idle_page_tracking.rst for
more details.
config [31mCONFIG_ARCH_HAS_PTE_DEVMAP[0m
bool
config [31mCONFIG_ZONE_DEVICE[0m
bool "Device memory (pmem, HMM, etc...) hotplug support"
depends on [31mCONFIG_MEMORY_HOTPLUG[0m
depends on [31mCONFIG_MEMORY_HOTREMOVE[0m
depends on [31mCONFIG_SPARSEMEM_VMEMMAP[0m
depends on [31mCONFIG_ARCH_HAS_PTE_DEVMAP[0m
select [31mCONFIG_XARRAY_MULTI[0m
help
Device memory hotplug support allows for establishing pmem,
or other device driver discovered memory regions, in the
memmap. This allows pfn_to_page() lookups of otherwise
"device-physical" addresses which is needed for using a [31mCONFIG_DAX[0m
mapping in an O_DIRECT operation, among other things.
If [31mCONFIG_FS_DAX[0m is enabled, then say Y.
config [31mCONFIG_DEV_PAGEMAP_OPS[0m
bool
#
# Helpers to mirror range of the CPU page tables of a process into device page
# tables.
#
config [31mCONFIG_HMM_MIRROR[0m
bool
depends on [31mCONFIG_MMU[0m
depends on [31mCONFIG_MMU_NOTIFIER[0m
config [31mCONFIG_DEVICE_PRIVATE[0m
bool "Unaddressable device memory (GPU memory, ...)"
depends on [31mCONFIG_ZONE_DEVICE[0m
select [31mCONFIG_DEV_PAGEMAP_OPS[0m
help
Allows creation of struct pages to represent unaddressable device
memory; i.e., memory that is only accessible from the device (or
group of devices). You likely also want to select [31mCONFIG_HMM_MIRROR[0m.
config [31mCONFIG_FRAME_VECTOR[0m
bool
config [31mCONFIG_ARCH_USES_HIGH_VMA_FLAGS[0m
bool
config [31mCONFIG_ARCH_HAS_PKEYS[0m
bool
config [31mCONFIG_PERCPU_STATS[0m
bool "Collect percpu memory statistics"
help
This feature collects and exposes statistics via debugfs. The
information includes global and per chunk statistics, which can
be used to help understand percpu memory usage.
config [31mCONFIG_GUP_BENCHMARK[0m
bool "Enable infrastructure for get_user_pages_fast() benchmarking"
help
Provides /sys/kernel/debug/gup_benchmark that helps with testing
performance of get_user_pages_fast().
See tools/testing/selftests/vm/gup_benchmark.c
config [31mCONFIG_GUP_GET_PTE_LOW_HIGH[0m
bool
config [31mCONFIG_READ_ONLY_THP_FOR_FS[0m
bool "Read-only THP for filesystems (EXPERIMENTAL)"
depends on [31mCONFIG_TRANSPARENT_HUGE_PAGECACHE[0m && [31mCONFIG_SHMEM[0m
help
Allow khugepaged to put read-only file-backed pages in THP.
This is marked experimental because it is a new feature. Write
support of file THPs will be developed in the next few release
cycles.
config [31mCONFIG_ARCH_HAS_PTE_SPECIAL[0m
bool
#
# Some architectures require a special hugepage directory format that is
# required to support multiple hugepage sizes. For example a4fe3ce76
# "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
# introduced it on powerpc. This allows for a more flexible hugepage
# pagetable layouts.
#
config [31mCONFIG_ARCH_HAS_HUGEPD[0m
bool
endmenu