Staff Wanted
by organisation@france-israel.fr
Do you want to start making $5900 a day - starting today?
That’s $5900 today, tomorrow...and for LIFE.
Join this profit opportunity and by this time tomorrow,
you will have received a $5900 deposit into your private bank account.
>>>
Click here to make $5900 by this time tomorrow.
2 years, 8 months
Interesting work
by slotterio@fc-pc.ch
Do you want to start making $5900 a day - starting today?
That’s $5900 today, tomorrow...and for LIFE.
Join this profit opportunity and by this time tomorrow,
you will have received a $5900 deposit into your private bank account.
>>>
Click here to make $5900 by this time tomorrow.
2 years, 8 months
[External] [RFC PATCH v1 6/6] arch/x86/mm: create page table mapping for DRAM and NVDIMM both
by Huaisheng HS1 Ye
Create PTE, PMD, PUD and P4D levels page table mapping for physical
addresses of DRAM and NVDIMM both. Here E820_TYPE_PMEM represents
the region of e820_table.
Signed-off-by: Huaisheng Ye <yehs1(a)lenovo.com>
Signed-off-by: Ocean He <hehy1(a)lenovo.com>
---
arch/x86/mm/init_64.c | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index af11a28..c03c2091 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -420,6 +420,10 @@ void __init cleanup_highmap(void)
if (!after_bootmem &&
!e820__mapped_any(paddr & PAGE_MASK, paddr_next,
E820_TYPE_RAM) &&
+#ifdef CONFIG_ZONE_NVM
+ !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
+ E820_TYPE_PMEM) &&
+#endif
!e820__mapped_any(paddr & PAGE_MASK, paddr_next,
E820_TYPE_RESERVED_KERN))
set_pte(pte, __pte(0));
@@ -475,6 +479,10 @@ void __init cleanup_highmap(void)
if (!after_bootmem &&
!e820__mapped_any(paddr & PMD_MASK, paddr_next,
E820_TYPE_RAM) &&
+#ifdef CONFIG_ZONE_NVM
+ !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
+ E820_TYPE_PMEM) &&
+#endif
!e820__mapped_any(paddr & PMD_MASK, paddr_next,
E820_TYPE_RESERVED_KERN))
set_pmd(pmd, __pmd(0));
@@ -561,6 +569,10 @@ void __init cleanup_highmap(void)
if (!after_bootmem &&
!e820__mapped_any(paddr & PUD_MASK, paddr_next,
E820_TYPE_RAM) &&
+#ifdef CONFIG_ZONE_NVM
+ !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
+ E820_TYPE_PMEM) &&
+#endif
!e820__mapped_any(paddr & PUD_MASK, paddr_next,
E820_TYPE_RESERVED_KERN))
set_pud(pud, __pud(0));
@@ -647,6 +659,10 @@ void __init cleanup_highmap(void)
if (!after_bootmem &&
!e820__mapped_any(paddr & P4D_MASK, paddr_next,
E820_TYPE_RAM) &&
+#ifdef CONFIG_ZONE_NVM
+ !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
+ E820_TYPE_PMEM) &&
+#endif
!e820__mapped_any(paddr & P4D_MASK, paddr_next,
E820_TYPE_RESERVED_KERN))
set_p4d(p4d, __p4d(0));
--
1.8.3.1
2 years, 8 months
[External] [RFC PATCH v1 5/6] mm: get zone spanned pages separately for DRAM and NVDIMM
by Huaisheng HS1 Ye
DRAM and NVDIMM are divided into separate zones, thus NVM
zone is dedicated for NVDIMMs.
During zone_spanned_pages_in_node, spanned pages of zones
are calculated separately for DRAM and NVDIMM by flags
MEMBLOCK_NONE and MEMBLOCK_NVDIMM.
Signed-off-by: Huaisheng Ye <yehs1(a)lenovo.com>
Signed-off-by: Ocean He <hehy1(a)lenovo.com>
---
mm/nobootmem.c | 5 +++--
mm/page_alloc.c | 40 ++++++++++++++++++++++++++++++++++++++++
2 files changed, 43 insertions(+), 2 deletions(-)
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 9b02fda..19b5291 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -143,8 +143,9 @@ static unsigned long __init free_low_memory_core_early(void)
* because in some case like Node0 doesn't have RAM installed
* low ram will be on Node1
*/
- for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
- NULL)
+ for_each_free_mem_range(i, NUMA_NO_NODE,
+ MEMBLOCK_NONE | MEMBLOCK_NVDIMM,
+ &start, &end, NULL)
count += __free_memory_core(start, end);
return count;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d8bd20d..3fd0d95 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4221,6 +4221,11 @@ static inline void finalise_ac(gfp_t gfp_mask,
* also used as the starting point for the zonelist iterator. It
* may get reset for allocations that ignore memory policies.
*/
+#ifdef CONFIG_ZONE_NVM
+ /* Bypass ZONE_NVM for Normal alloctions */
+ if (ac->high_zoneidx > ZONE_NVM)
+ ac->high_zoneidx = ZONE_NORMAL;
+#endif
ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
ac->high_zoneidx, ac->nodemask);
}
@@ -5808,6 +5813,10 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
unsigned long *zone_end_pfn,
unsigned long *ignored)
{
+#ifdef CONFIG_ZONE_NVM
+ unsigned long start_pfn, end_pfn;
+#endif
+
/* When hotadd a new node from cpu_up(), the node should be empty */
if (!node_start_pfn && !node_end_pfn)
return 0;
@@ -5815,6 +5824,26 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
/* Get the start and end of the zone */
*zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
*zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
+
+#ifdef CONFIG_ZONE_NVM
+ /*
+ * Use zone_type to adjust zone size again.
+ */
+ if (zone_type == ZONE_NVM) {
+ get_pfn_range_for_nid_with_flags(nid, &start_pfn, &end_pfn,
+ MEMBLOCK_NVDIMM);
+ } else {
+ get_pfn_range_for_nid_with_flags(nid, &start_pfn, &end_pfn,
+ MEMBLOCK_NONE);
+ }
+
+ if (*zone_end_pfn < start_pfn || *zone_start_pfn > end_pfn)
+ return 0;
+ /* Move the zone boundaries inside the possile_pfn if necessary */
+ *zone_end_pfn = min(*zone_end_pfn, end_pfn);
+ *zone_start_pfn = max(*zone_start_pfn, start_pfn);
+#endif
+
adjust_zone_range_for_zone_movable(nid, zone_type,
node_start_pfn, node_end_pfn,
zone_start_pfn, zone_end_pfn);
@@ -6680,6 +6709,17 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
start_pfn = end_pfn;
}
+#ifdef CONFIG_ZONE_NVM
+ /*
+ * Adjust nvm zone included in normal zone
+ */
+ get_pfn_range_for_nid_with_flags(MAX_NUMNODES, &start_pfn, &end_pfn,
+ MEMBLOCK_NVDIMM);
+
+ arch_zone_lowest_possible_pfn[ZONE_NVM] = start_pfn;
+ arch_zone_highest_possible_pfn[ZONE_NVM] = end_pfn;
+#endif
+
/* Find the PFNs that ZONE_MOVABLE begins at in each node */
memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
find_zone_movable_pfns_for_nodes();
--
1.8.3.1
2 years, 8 months
[External] [RFC PATCH v1 2/6] mm/page_alloc.c: get pfn range with flags of memblock
by Huaisheng HS1 Ye
This is used to expand the interface of get_pfn_range_for_nid with
flags of memblock, so mm can get pfn range with special flags.
Signed-off-by: Huaisheng Ye <yehs1(a)lenovo.com>
Signed-off-by: Ocean He <hehy1(a)lenovo.com>
---
include/linux/mm.h | 4 ++++
mm/page_alloc.c | 17 ++++++++++++++++-
2 files changed, 20 insertions(+), 1 deletion(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ad06d42..8abf9c9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2046,6 +2046,10 @@ extern unsigned long absent_pages_in_range(unsigned long start_pfn,
unsigned long end_pfn);
extern void get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn);
+extern void get_pfn_range_for_nid_with_flags(unsigned int nid,
+ unsigned long *start_pfn,
+ unsigned long *end_pfn,
+ unsigned long flags);
extern unsigned long find_min_pfn_with_active_regions(void);
extern void free_bootmem_with_active_regions(int nid,
unsigned long max_low_pfn);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1741dd2..266c065 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5705,13 +5705,28 @@ void __init sparse_memory_present_with_active_regions(int nid)
void __meminit get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn)
{
+ get_pfn_range_for_nid_with_flags(nid, start_pfn, end_pfn,
+ MEMBLOCK_MAX_TYPE);
+}
+
+/*
+ * If MAX_NUMNODES, includes all node memmory regions.
+ * If MEMBLOCK_MAX_TYPE, includes all memory regions with or without Flags.
+ */
+
+void __meminit get_pfn_range_for_nid_with_flags(unsigned int nid,
+ unsigned long *start_pfn,
+ unsigned long *end_pfn,
+ unsigned long flags)
+{
unsigned long this_start_pfn, this_end_pfn;
int i;
*start_pfn = -1UL;
*end_pfn = 0;
- for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
+ for_each_mem_pfn_range_with_flags(i, nid, &this_start_pfn,
+ &this_end_pfn, NULL, flags) {
*start_pfn = min(*start_pfn, this_start_pfn);
*end_pfn = max(*end_pfn, this_end_pfn);
}
--
1.8.3.1
2 years, 8 months
RE: [External] [RFC PATCH v1 1/6] mm/memblock: Expand definition of flags to support NVDIMM
by Huaisheng HS1 Ye
This patch makes mm to have capability to get special regions
from memblock.
During boot process, memblock marks NVDIMM regions with flag
MEMBLOCK_NVDIMM, also expands the interface of functions and
macros with flags.
Signed-off-by: Huaisheng Ye <yehs1(a)lenovo.com>
Signed-off-by: Ocean He <hehy1(a)lenovo.com>
---
include/linux/memblock.h | 19 +++++++++++++++++++
mm/memblock.c | 46 +++++++++++++++++++++++++++++++++++++++++-----
2 files changed, 60 insertions(+), 5 deletions(-)
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index f92ea77..cade5c8d 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -26,6 +26,8 @@ enum {
MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
MEMBLOCK_MIRROR = 0x2, /* mirrored region */
MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
+ MEMBLOCK_NVDIMM = 0x8, /* NVDIMM region */
+ MEMBLOCK_MAX_TYPE = 0x10 /* all regions */
};
struct memblock_region {
@@ -89,6 +91,8 @@ bool memblock_overlaps_region(struct memblock_type *type,
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
+int memblock_mark_nvdimm(phys_addr_t base, phys_addr_t size);
+int memblock_clear_nvdimm(phys_addr_t base, phys_addr_t size);
ulong choose_memblock_flags(void);
/* Low level functions */
@@ -167,6 +171,11 @@ void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
i != (u64)ULLONG_MAX; \
__next_reserved_mem_region(&i, p_start, p_end))
+static inline bool memblock_is_nvdimm(struct memblock_region *m)
+{
+ return m->flags & MEMBLOCK_NVDIMM;
+}
+
static inline bool memblock_is_hotpluggable(struct memblock_region *m)
{
return m->flags & MEMBLOCK_HOTPLUG;
@@ -187,6 +196,11 @@ int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
unsigned long *end_pfn);
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
unsigned long *out_end_pfn, int *out_nid);
+void __next_mem_pfn_range_with_flags(int *idx, int nid,
+ unsigned long *out_start_pfn,
+ unsigned long *out_end_pfn,
+ int *out_nid,
+ unsigned long flags);
/**
* for_each_mem_pfn_range - early memory pfn range iterator
@@ -201,6 +215,11 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
+
+#define for_each_mem_pfn_range_with_flags(i, nid, p_start, p_end, p_nid, flags) \
+ for (i = -1, __next_mem_pfn_range_with_flags(&i, nid, p_start, p_end, p_nid, flags);\
+ i >= 0; __next_mem_pfn_range_with_flags(&i, nid, p_start, p_end, p_nid, flags))
+
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
/**
diff --git a/mm/memblock.c b/mm/memblock.c
index 48376bd..7699637 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -771,6 +771,16 @@ int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
}
+int __init_memblock memblock_mark_nvdimm(phys_addr_t base, phys_addr_t size)
+{
+ return memblock_setclr_flag(base, size, 1, MEMBLOCK_NVDIMM);
+}
+
+int __init_memblock memblock_clear_nvdimm(phys_addr_t base, phys_addr_t size)
+{
+ return memblock_setclr_flag(base, size, 0, MEMBLOCK_NVDIMM);
+}
+
/**
* memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
* @base: the base phys addr of the region
@@ -891,6 +901,10 @@ void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags,
if (nid != NUMA_NO_NODE && nid != m_nid)
continue;
+ /* skip nvdimm memory regions if needed */
+ if (!(flags & MEMBLOCK_NVDIMM) && memblock_is_nvdimm(m))
+ continue;
+
/* skip hotpluggable memory regions if needed */
if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
continue;
@@ -1007,6 +1021,10 @@ void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags,
if (nid != NUMA_NO_NODE && nid != m_nid)
continue;
+ /* skip nvdimm memory regions if needed */
+ if (!(flags & MEMBLOCK_NVDIMM) && memblock_is_nvdimm(m))
+ continue;
+
/* skip hotpluggable memory regions if needed */
if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
continue;
@@ -1070,12 +1088,9 @@ void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags,
}
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
-/*
- * Common iterator interface used to define for_each_mem_range().
- */
-void __init_memblock __next_mem_pfn_range(int *idx, int nid,
+void __init_memblock __next_mem_pfn_range_with_flags(int *idx, int nid,
unsigned long *out_start_pfn,
- unsigned long *out_end_pfn, int *out_nid)
+ unsigned long *out_end_pfn, int *out_nid, unsigned long flags)
{
struct memblock_type *type = &memblock.memory;
struct memblock_region *r;
@@ -1085,6 +1100,16 @@ void __init_memblock __next_mem_pfn_range(int *idx, int nid,
if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
continue;
+
+ /*
+ * Use "flags & r->flags " to find region with multi-flags
+ * Use "flags == r->flags" to include region flags of MEMBLOCK_NONE
+ * Set flags = MEMBLOCK_MAX_TYPE to ignore to check flags
+ */
+
+ if ((flags != MEMBLOCK_MAX_TYPE) && (flags != r->flags) && !(flags & r->flags))
+ continue;
+
if (nid == MAX_NUMNODES || nid == r->nid)
break;
}
@@ -1101,6 +1126,17 @@ void __init_memblock __next_mem_pfn_range(int *idx, int nid,
*out_nid = r->nid;
}
+/*
+ * Common iterator interface used to define for_each_mem_range().
+ */
+void __init_memblock __next_mem_pfn_range(int *idx, int nid,
+ unsigned long *out_start_pfn,
+ unsigned long *out_end_pfn, int *out_nid)
+{
+ __next_mem_pfn_range_with_flags(idx, nid, out_start_pfn, out_end_pfn,
+ out_nid, MEMBLOCK_MAX_TYPE);
+}
+
/**
* memblock_set_node - set node ID on memblock regions
* @base: base of area to set node ID for
--
1.8.3.1
2 years, 8 months
Welcome to our company
by piero@cortoitaliacinema.com
Do you want to start making $5900 a day - starting today?
That’s $5900 today, tomorrow...and for LIFE.
Join this profit opportunity and by this time tomorrow,
you will have received a $5900 deposit into your private bank account.
>>>
Click here to make $5900 by this time tomorrow.
2 years, 8 months