File crash-update-recognition-of-x86_64-CPU_ENTRY_AREA.patch of Package crash.10062

From: Dave Anderson <anderson@redhat.com>
Date: Fri, 1 Jun 2018 10:58:00 -0400
Subject: Update the recognition of x86_64 CPU_ENTRY_AREA
References: bsc#1090127, bsc#1104743
Upstream: merged
Git-commit: da49e2010b3cb88b4755d69d38fe90af6ba218b2

Update for the recognition of the new x86_64 CPU_ENTRY_AREA virtual
address range introduced in Linux 4.15.  The memory range exists
above the vmemmap range and below the mapped kernel static text/data
region, and where all of the x86_64 exception stacks have been moved.
Without the patch, reads from the new memory region fail because the
address range is not recognized as a legitimate virtual address.
Most notable is the failure of "bt" on tasks whose backtraces
originate from any of the exception stacks, which fail with the two
error messages "bt: seek error: kernel virtual address: <address>
type: stack contents" followed by "bt: read of stack at <address>
failed".
(anderson@redhat.com)

---
 defs.h   |    5 +++++
 x86_64.c |   47 +++++++++++++++++++++++++++++++++++------------
 2 files changed, 40 insertions(+), 12 deletions(-)

--- a/defs.h
+++ b/defs.h
@@ -3392,6 +3392,9 @@ struct arm64_stackframe {
 #define VSYSCALL_START             0xffffffffff600000
 #define VSYSCALL_END               0xffffffffff601000
 
+#define CPU_ENTRY_AREA_START       0xfffffe0000000000
+#define CPU_ENTRY_AREA_END         0xfffffe7fffffffff
+
 #define PTOV(X)               ((unsigned long)(X)+(machdep->kvbase))
 #define VTOP(X)               x86_64_VTOP((ulong)(X))
 #define IS_VMALLOC_ADDR(X)    x86_64_IS_VMALLOC_ADDR((ulong)(X))
@@ -5830,6 +5833,8 @@ struct machine_specific {
 	ulong kpti_entry_stack;
 	ulong kpti_entry_stack_size;
 	ulong ptrs_per_pgd;
+	ulong cpu_entry_area_start;
+	ulong cpu_entry_area_end;
 };
 
 #define KSYMS_START    (0x1)
--- a/x86_64.c
+++ b/x86_64.c
@@ -407,6 +407,11 @@ x86_64_init(int when)
 				machdep->machspec->modules_end = MODULES_END_2_6_31;
 			}
 		}
+		if (STRUCT_EXISTS("cpu_entry_area")) {
+			machdep->machspec->cpu_entry_area_start = CPU_ENTRY_AREA_START;	
+			machdep->machspec->cpu_entry_area_end = CPU_ENTRY_AREA_END;	
+		}
+
                 STRUCT_SIZE_INIT(cpuinfo_x86, "cpuinfo_x86");
 		/* 
 		 * Before 2.6.25 the structure was called gate_struct
@@ -865,20 +870,21 @@ x86_64_dump_machdep_table(ulong arg)
 
 	/* pml4 and upml is legacy for extension modules */
 	if (ms->pml4) {
-		fprintf(fp, "			  pml4: %lx\n", (ulong)ms->pml4);
-		fprintf(fp, "		last_pml4_read: %lx\n", (ulong)ms->last_pml4_read);
+		fprintf(fp, "                     pml4: %lx\n", (ulong)ms->pml4);
+		fprintf(fp, "           last_pml4_read: %lx\n", (ulong)ms->last_pml4_read);
 
 	} else {
-		fprintf(fp, "		      pml4: (unused)\n");
-		fprintf(fp, "	    last_pml4_read: (unused)\n");
+		fprintf(fp, "                     pml4: (unused)\n");
+		fprintf(fp, "           last_pml4_read: (unused)\n");
 	}
 
 	if (ms->upml) {
-		fprintf(fp, "		      upml: %lx\n", (ulong)ms->upml);
-		fprintf(fp, "	    last_upml_read: %lx\n", (ulong)ms->last_upml_read);
+		fprintf(fp, "                     upml: %lx\n", (ulong)ms->upml);
+		fprintf(fp, "           last_upml_read: %lx\n", (ulong)ms->last_upml_read);
 	} else {
-		fprintf(fp, "		      upml: (unused)\n");
-		fprintf(fp, "	    last_upml_read: (unused)\n");
+		fprintf(fp, "                 GART_end: %lx\n", ms->GART_end);
+		fprintf(fp, "                     upml: (unused)\n");
+		fprintf(fp, "           last_upml_read: (unused)\n");
 	}
 
 	if (ms->p4d) {
@@ -1002,10 +1008,14 @@ x86_64_dump_machdep_table(ulong arg)
 			fprintf(fp, "\n   ");
 		fprintf(fp, "%016lx ", ms->stkinfo.ibase[c]);
 	}
-	fprintf(fp, "\n                 kpti_entry_stack_size: %ld", ms->kpti_entry_stack_size);
-	fprintf(fp, "\n                      kpti_entry_stack: ");
+	fprintf(fp, "\n    kpti_entry_stack_size: ");
+	if (ms->kpti_entry_stack_size)
+		fprintf(fp, "%ld", ms->kpti_entry_stack_size);
+	else
+		fprintf(fp, "(unused)");
+	fprintf(fp, "\n         kpti_entry_stack: ");
 	if (machdep->flags & KPTI) {
-		fprintf(fp, "%lx\n   ", ms->kpti_entry_stack);
+		fprintf(fp, "(percpu: %lx):\n   ", ms->kpti_entry_stack);
 		for (c = 0; c < cpus; c++) {
 			if (c && !(c%4))
 				fprintf(fp, "\n   ");
@@ -1014,6 +1024,16 @@ x86_64_dump_machdep_table(ulong arg)
 		fprintf(fp, "\n");
 	} else
 		fprintf(fp, "(unused)\n");
+	fprintf(fp, "     cpu_entry_area_start: ");
+	if (ms->cpu_entry_area_start)
+		fprintf(fp, "%016lx\n", (ulong)ms->cpu_entry_area_start);
+	else
+		fprintf(fp, "(unused)\n");
+	fprintf(fp, "       cpu_entry_area_end: ");
+	if (ms->cpu_entry_area_end)
+		fprintf(fp, "%016lx\n", (ulong)ms->cpu_entry_area_end);
+	else
+		fprintf(fp, "(unused)\n");
 }
 
 /*
@@ -1572,7 +1592,10 @@ x86_64_IS_VMALLOC_ADDR(ulong vaddr)
                 ((machdep->flags & VMEMMAP) && 
 		 (vaddr >= VMEMMAP_VADDR && vaddr <= VMEMMAP_END)) ||
                 (vaddr >= MODULES_VADDR && vaddr <= MODULES_END) ||
-		(vaddr >= VSYSCALL_START && vaddr < VSYSCALL_END));
+		(vaddr >= VSYSCALL_START && vaddr < VSYSCALL_END) ||
+		(machdep->machspec->cpu_entry_area_start && 
+		 vaddr >= machdep->machspec->cpu_entry_area_start &&
+		 vaddr <= machdep->machspec->cpu_entry_area_end));
 }
 
 static int