File kexec-tools-ppc64-parse-ibm-dynamic-memory.patch of Package kexec-tools
From: Hari Bathini <hbathini@linux.vnet.ibm.com>
Date: Tue, 20 Feb 2018 19:48:09 +0530
Subject: kexec/ppc64: add support to parse ibm, dynamic-memory-v2 property
References: bsc#1081789, LTC#164625
Upstream: merged
Git-commit: b10924a7da3ca48c04982cd23daf04882afb1a87
Add support to parse the new 'ibm,dynamic-memory-v2' property in the
'ibm,dynamic-reconfiguration-memory' node. This replaces the old
'ibm,dynamic-memory' property and is enabled in the kernel with a
patch series that starts with commit 0c38ed6f6f0b ("powerpc/pseries:
Enable support of ibm,dynamic-memory-v2"). All LMBs that share the same
flags and are adjacent are grouped together in the newer version of the
property making it compact to represent larger memory configurations.
Signed-off-by: Hari Bathini <hbathini@linux.vnet.ibm.com>
Mahesh Jagannath Salgaonkar <mahesh@linux.vnet.ibm.com>
Signed-off-by: Simon Horman <horms@verge.net.au>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
kexec/arch/ppc64/crashdump-ppc64.c | 23 +++++++--
kexec/arch/ppc64/crashdump-ppc64.h | 14 ++++-
kexec/arch/ppc64/kexec-ppc64.c | 35 ++++++++++----
kexec/fs2dt.c | 92 ++++++++++++++++++++++---------------
4 files changed, 111 insertions(+), 53 deletions(-)
--- a/kexec/arch/ppc64/crashdump-ppc64.c
+++ b/kexec/arch/ppc64/crashdump-ppc64.c
@@ -39,6 +39,10 @@
#define DEVTREE_CRASHKERNEL_BASE "/proc/device-tree/chosen/linux,crashkernel-base"
#define DEVTREE_CRASHKERNEL_SIZE "/proc/device-tree/chosen/linux,crashkernel-size"
+unsigned int num_of_lmb_sets;
+unsigned int is_dyn_mem_v2;
+uint64_t lmb_size;
+
static struct crash_elf_info elf_info64 =
{
class: ELFCLASS64,
@@ -127,6 +131,7 @@ static int get_dyn_reconf_crash_memory_r
{
uint64_t start, end;
uint64_t startrange, endrange;
+ uint64_t size;
char fname[128], buf[32];
FILE *file;
unsigned int i;
@@ -135,6 +140,8 @@ static int get_dyn_reconf_crash_memory_r
strcpy(fname, "/proc/device-tree/");
strcat(fname, "ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory");
+ if (is_dyn_mem_v2)
+ strcat(fname, "-v2");
if ((file = fopen(fname, "r")) == NULL) {
perror(fname);
return -1;
@@ -142,8 +149,9 @@ static int get_dyn_reconf_crash_memory_r
fseek(file, 4, SEEK_SET);
startrange = endrange = 0;
- for (i = 0; i < num_of_lmbs; i++) {
- if ((n = fread(buf, 1, 24, file)) < 0) {
+ size = lmb_size;
+ for (i = 0; i < num_of_lmb_sets; i++) {
+ if ((n = fread(buf, 1, LMB_ENTRY_SIZE, file)) < 0) {
perror(fname);
fclose(file);
return -1;
@@ -156,8 +164,15 @@ static int get_dyn_reconf_crash_memory_r
return -1;
}
- start = be64_to_cpu(((uint64_t *)buf)[DRCONF_ADDR]);
- end = start + lmb_size;
+ /*
+ * If the property is ibm,dynamic-memory-v2, the first 4 bytes
+ * tell the number of sequential LMBs in this entry.
+ */
+ if (is_dyn_mem_v2)
+ size = be32_to_cpu(((unsigned int *)buf)[0]) * lmb_size;
+
+ start = be64_to_cpu(*((uint64_t *)&buf[DRCONF_ADDR]));
+ end = start + size;
if (start == 0 && end >= (BACKUP_SRC_END + 1))
start = BACKUP_SRC_END + 1;
--- a/kexec/arch/ppc64/crashdump-ppc64.h
+++ b/kexec/arch/ppc64/crashdump-ppc64.h
@@ -34,10 +34,18 @@ extern unsigned int rtas_size;
extern uint64_t opal_base;
extern uint64_t opal_size;
-uint64_t lmb_size;
-unsigned int num_of_lmbs;
+/*
+ * In case of ibm,dynamic-memory-v2 property, this is the number of LMB
+ * sets where each set represents a group of sequential LMB entries. In
+ * case of ibm,dynamic-memory property, the number of LMB sets is nothing
+ * but the total number of LMB entries.
+ */
+extern unsigned int num_of_lmb_sets;
+extern unsigned int is_dyn_mem_v2;
+extern uint64_t lmb_size;
-#define DRCONF_ADDR 0
+#define LMB_ENTRY_SIZE 24
+#define DRCONF_ADDR (is_dyn_mem_v2 ? 4 : 0)
#define DRCONF_FLAGS 20
#endif /* CRASHDUMP_PPC64_H */
--- a/kexec/arch/ppc64/kexec-ppc64.c
+++ b/kexec/arch/ppc64/kexec-ppc64.c
@@ -149,6 +149,7 @@ static void add_base_memory_range(uint64
static int get_dyn_reconf_base_ranges(void)
{
uint64_t start, end;
+ uint64_t size;
char fname[128], buf[32];
FILE *file;
unsigned int i;
@@ -166,29 +167,35 @@ static int get_dyn_reconf_base_ranges(vo
return -1;
}
/*
- * lmb_size, num_of_lmbs(global variables) are
+ * lmb_size, num_of_lmb_sets(global variables) are
* initialized once here.
*/
- lmb_size = be64_to_cpu(((uint64_t *)buf)[0]);
+ size = lmb_size = be64_to_cpu(((uint64_t *)buf)[0]);
fclose(file);
strcpy(fname, "/proc/device-tree/");
strcat(fname,
"ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory");
if ((file = fopen(fname, "r")) == NULL) {
- perror(fname);
- return -1;
+ strcat(fname, "-v2");
+ if ((file = fopen(fname, "r")) == NULL) {
+ perror(fname);
+ return -1;
+ }
+
+ is_dyn_mem_v2 = 1;
}
- /* first 4 bytes tell the number of lmbs */
+
+ /* first 4 bytes tell the number of lmb set entries */
if (fread(buf, 1, 4, file) != 4) {
perror(fname);
fclose(file);
return -1;
}
- num_of_lmbs = be32_to_cpu(((unsigned int *)buf)[0]);
+ num_of_lmb_sets = be32_to_cpu(((unsigned int *)buf)[0]);
- for (i = 0; i < num_of_lmbs; i++) {
- if ((n = fread(buf, 1, 24, file)) < 0) {
+ for (i = 0; i < num_of_lmb_sets; i++) {
+ if ((n = fread(buf, 1, LMB_ENTRY_SIZE, file)) < 0) {
perror(fname);
fclose(file);
return -1;
@@ -196,13 +203,21 @@ static int get_dyn_reconf_base_ranges(vo
if (nr_memory_ranges >= max_memory_ranges)
return -1;
- start = be64_to_cpu(((uint64_t *)buf)[0]);
- end = start + lmb_size;
+ /*
+ * If the property is ibm,dynamic-memory-v2, the first 4 bytes
+ * tell the number of sequential LMBs in this entry.
+ */
+ if (is_dyn_mem_v2)
+ size = be32_to_cpu(((unsigned int *)buf)[0]) * lmb_size;
+
+ start = be64_to_cpu(*((uint64_t *)&buf[DRCONF_ADDR]));
+ end = start + size;
add_base_memory_range(start, end);
}
fclose(file);
return 0;
}
+
/* Sort the base ranges in memory - this is useful for ensuring that our
* ranges are in ascending order, even if device-tree read of memory nodes
* is done differently. Also, could be used for other range coalescing later
--- a/kexec/fs2dt.c
+++ b/kexec/fs2dt.c
@@ -217,11 +217,12 @@ static uint64_t add_ranges(uint64_t **ra
static void add_dyn_reconf_usable_mem_property__(int fd)
{
char fname[MAXPATH], *bname;
- uint64_t buf[32];
+ char buf[32];
+ uint32_t lmbs_in_set = 1;
uint64_t *ranges;
int ranges_size = MEM_RANGE_CHUNK_SZ;
uint64_t base, end, rngs_cnt;
- size_t i;
+ size_t i, j;
int rlen = 0;
int tmp_indx;
@@ -242,43 +243,61 @@ static void add_dyn_reconf_usable_mem_pr
ranges_size*8);
rlen = 0;
- for (i = 0; i < num_of_lmbs; i++) {
- if (read(fd, buf, 24) < 0)
+ for (i = 0; i < num_of_lmb_sets; i++) {
+ if (read(fd, buf, LMB_ENTRY_SIZE) < 0)
die("unrecoverable error: error reading \"%s\": %s\n",
pathname, strerror(errno));
- base = be64_to_cpu((uint64_t) buf[0]);
- end = base + lmb_size;
- if (~0ULL - base < end)
- die("unrecoverable error: mem property overflow\n");
-
- tmp_indx = rlen++;
-
- rngs_cnt = add_ranges(&ranges, &ranges_size, rlen,
- base, end);
- if (rngs_cnt == 0) {
- /* We still need to add a counter for every LMB because
- * the kernel parsing code is dumb. We just have
- * a zero in this case, with no following base/len.
- */
- ranges[tmp_indx] = 0;
- /* rlen is already just tmp_indx+1 as we didn't write
- * anything. Check array size here, as we'll probably
- * go on for a while writing zeros now.
- */
- if (rlen >= (ranges_size-1)) {
- ranges_size += MEM_RANGE_CHUNK_SZ;
- ranges = realloc(ranges, ranges_size*8);
- if (!ranges)
- die("unrecoverable error: can't"
- " realloc %d bytes for"
- " ranges.\n",
- ranges_size*8);
+ /*
+ * If the property is ibm,dynamic-memory-v2, the first 4 bytes
+ * tell the number of sequential LMBs in this entry. Else, if
+ * the property is ibm,dynamic-memory, each entry represents
+ * one LMB. Make sure to add an entry for each LMB as kernel
+ * looks for a counter for every LMB.
+ */
+ if (is_dyn_mem_v2)
+ lmbs_in_set = be32_to_cpu(((unsigned int *)buf)[0]);
+
+ base = be64_to_cpu(*((uint64_t *)&buf[DRCONF_ADDR]));
+ for (j = 0; j < lmbs_in_set; j++) {
+ end = base + lmb_size;
+ if (~0ULL - base < end) {
+ die("unrecoverable error: mem property"
+ " overflow\n");
}
- } else {
- /* Store the count of (base, size) duple */
- ranges[tmp_indx] = cpu_to_be64(rngs_cnt);
- rlen += rngs_cnt * 2;
+
+ tmp_indx = rlen++;
+
+ rngs_cnt = add_ranges(&ranges, &ranges_size, rlen,
+ base, end);
+ if (rngs_cnt == 0) {
+ /* We still need to add a counter for every LMB
+ * because the kernel parsing code is dumb. We
+ * just have a zero in this case, with no
+ * following base/len.
+ */
+ ranges[tmp_indx] = 0;
+
+ /* rlen is already just tmp_indx+1 as we didn't
+ * write anything. Check array size here, as we
+ * will probably go on writing zeros for a while
+ */
+ if (rlen >= (ranges_size-1)) {
+ ranges_size += MEM_RANGE_CHUNK_SZ;
+ ranges = realloc(ranges, ranges_size*8);
+ if (!ranges)
+ die("unrecoverable error: can't"
+ " realloc %d bytes for"
+ " ranges.\n",
+ ranges_size*8);
+ }
+ } else {
+ /* Store the count of (base, size) duple */
+ ranges[tmp_indx] = cpu_to_be64(rngs_cnt);
+ rlen += rngs_cnt * 2;
+ }
+
+ base = end;
}
}
@@ -298,7 +317,8 @@ static void add_dyn_reconf_usable_mem_pr
static void add_dyn_reconf_usable_mem_property(struct dirent *dp, int fd)
{
- if (!strcmp(dp->d_name, "ibm,dynamic-memory") && usablemem_rgns.size)
+ if ((!strcmp(dp->d_name, "ibm,dynamic-memory-v2") ||
+ !strcmp(dp->d_name, "ibm,dynamic-memory")) && usablemem_rgns.size)
add_dyn_reconf_usable_mem_property__(fd);
}
#else