6282875 large page kernel heap slows down mdb ::kgrep command
6281150 lpkmem slows down the checkpoint phase of CPR
6282867 DMMU primary context register can start with invalid page size codes during CPR resume
--- a/usr/src/cmd/mdb/common/modules/genunix/genunix.c Tue Jun 14 17:51:52 2005 -0700
+++ b/usr/src/cmd/mdb/common/modules/genunix/genunix.c Tue Jun 14 19:12:41 2005 -0700
@@ -1324,6 +1324,8 @@
uintptr_t kg_kvseg32;
uintptr_t kg_kvseg_core;
uintptr_t kg_segkpm;
+ uintptr_t kg_heap_lp_base;
+ uintptr_t kg_heap_lp_end;
} kgrep_walk_data_t;
static int
@@ -1345,19 +1347,39 @@
static int
kgrep_walk_vseg(uintptr_t addr, const vmem_seg_t *seg, kgrep_walk_data_t *kg)
{
+ /*
+ * skip large page heap address range - it is scanned by walking
+ * allocated vmem_segs in the heap_lp_arena
+ */
+ if (seg->vs_start == kg->kg_heap_lp_base &&
+ seg->vs_end == kg->kg_heap_lp_end)
+ return (WALK_NEXT);
+
+ return (kg->kg_cb(seg->vs_start, seg->vs_end, kg->kg_cbdata));
+}
+
+/*ARGSUSED*/
+static int
+kgrep_xwalk_vseg(uintptr_t addr, const vmem_seg_t *seg, kgrep_walk_data_t *kg)
+{
return (kg->kg_cb(seg->vs_start, seg->vs_end, kg->kg_cbdata));
}
static int
kgrep_walk_vmem(uintptr_t addr, const vmem_t *vmem, kgrep_walk_data_t *kg)
{
+ mdb_walk_cb_t walk_vseg = (mdb_walk_cb_t)kgrep_walk_vseg;
+
if (strcmp(vmem->vm_name, "heap") != 0 &&
strcmp(vmem->vm_name, "heap32") != 0 &&
- strcmp(vmem->vm_name, "heap_core") != 0)
+ strcmp(vmem->vm_name, "heap_core") != 0 &&
+ strcmp(vmem->vm_name, "heap_lp") != 0)
return (WALK_NEXT);
- if (mdb_pwalk("vmem_alloc",
- (mdb_walk_cb_t)kgrep_walk_vseg, kg, addr) == -1) {
+ if (strcmp(vmem->vm_name, "heap_lp") == 0)
+ walk_vseg = (mdb_walk_cb_t)kgrep_xwalk_vseg;
+
+ if (mdb_pwalk("vmem_alloc", walk_vseg, kg, addr) == -1) {
mdb_warn("couldn't walk vmem_alloc for vmem %p", addr);
return (WALK_ERR);
}
@@ -1402,6 +1424,16 @@
return (DCMD_ERR);
}
+ if (mdb_readvar(&kg.kg_heap_lp_base, "heap_lp_base") == -1) {
+ mdb_warn("failed to read 'heap_lp_base'\n");
+ return (DCMD_ERR);
+ }
+
+ if (mdb_readvar(&kg.kg_heap_lp_end, "heap_lp_end") == -1) {
+ mdb_warn("failed to read 'heap_lp_end'\n");
+ return (DCMD_ERR);
+ }
+
kg.kg_cb = cb;
kg.kg_cbdata = cbdata;
kg.kg_kvseg = (uintptr_t)kvseg.st_value;
--- a/usr/src/uts/common/cpr/cpr_dump.c Tue Jun 14 17:51:52 2005 -0700
+++ b/usr/src/uts/common/cpr/cpr_dump.c Tue Jun 14 19:12:41 2005 -0700
@@ -486,11 +486,11 @@
/*
- * cpr_walk() is called many 100x with a range within kvseg;
+ * cpr_xwalk() is called many 100x with a range within kvseg or kvseg_reloc;
* a page-count from each range is accumulated at arg->pages.
*/
static void
-cpr_walk(void *arg, void *base, size_t size)
+cpr_xwalk(void *arg, void *base, size_t size)
{
struct cpr_walkinfo *cwip = arg;
@@ -500,6 +500,30 @@
cwip->ranges++;
}
+/*
+ * cpr_walk() is called many 100x with a range within kvseg or kvseg_reloc;
+ * a page-count from each range is accumulated at arg->pages.
+ */
+static void
+cpr_walk(void *arg, void *base, size_t size)
+{
+ caddr_t addr = base;
+ caddr_t addr_end = addr + size;
+
+ /*
+ * If we are about to start walking the range of addresses we
+ * carved out of the kernel heap for the large page heap walk
+ * heap_lp_arena to find what segments are actually populated
+ */
+ if (SEGKMEM_USE_LARGEPAGES &&
+ addr == heap_lp_base && addr_end == heap_lp_end &&
+ vmem_size(heap_lp_arena, VMEM_ALLOC) < size) {
+ vmem_walk(heap_lp_arena, VMEM_ALLOC, cpr_xwalk, arg);
+ } else {
+ cpr_xwalk(arg, base, size);
+ }
+}
+
/*
* faster scan of kvseg using vmem_walk() to visit
--- a/usr/src/uts/common/vm/seg_kmem.c Tue Jun 14 17:51:52 2005 -0700
+++ b/usr/src/uts/common/vm/seg_kmem.c Tue Jun 14 19:12:41 2005 -0700
@@ -140,7 +140,7 @@
size_t segkmem_kmemlp_quantum = 0x400000; /* 4MB */
size_t segkmem_heaplp_quantum;
-static vmem_t *heap_lp_arena;
+vmem_t *heap_lp_arena;
static vmem_t *kmem_lp_arena;
static vmem_t *segkmem_ppa_arena;
static segkmem_lpcb_t segkmem_lpcb;
--- a/usr/src/uts/common/vm/seg_kmem.h Tue Jun 14 17:51:52 2005 -0700
+++ b/usr/src/uts/common/vm/seg_kmem.h Tue Jun 14 19:12:41 2005 -0700
@@ -52,6 +52,7 @@
extern char *heap_lp_end; /* end of kernel large page heap arena */
extern struct seg kvseg; /* primary kernel heap segment */
extern struct seg kvseg_core; /* "core" kernel heap segment */
+extern vmem_t *heap_lp_arena; /* kernel large page heap arena */
extern vmem_t *heap_arena; /* primary kernel heap arena */
extern vmem_t *hat_memload_arena; /* HAT translation arena */
extern struct seg kvseg32; /* 32-bit kernel heap segment */
--- a/usr/src/uts/sun4u/os/cpr_impl.c Tue Jun 14 17:51:52 2005 -0700
+++ b/usr/src/uts/sun4u/os/cpr_impl.c Tue Jun 14 19:12:41 2005 -0700
@@ -63,6 +63,7 @@
#include <sys/memlist.h>
#include <sys/bootconf.h>
#include <sys/thread.h>
+#include <vm/vm_dep.h>
extern void cpr_clear_bitmaps(void);
extern void dtlb_wr_entry(uint_t, tte_t *, uint64_t *);
@@ -222,6 +223,15 @@
char *str;
cpu_t *cp;
+ uint64_t kctx = kcontextreg;
+
+ /*
+ * Do not allow setting page size codes in MMU primary context
+ * register while using cif wrapper. This is needed to work
+ * arround OBP incorrect handling of this MMU register.
+ */
+ kcontextreg = 0;
+
/*
* reset cpu_ready_set so x_calls work properly
*/
@@ -286,6 +296,9 @@
i_cpr_cif_setup(CIF_UNLINK);
(void) i_cpr_prom_pages(CPR_PROM_RESTORE);
+
+ /* allow setting page size codes in MMU primary context register */
+ kcontextreg = kctx;
}
@@ -587,7 +600,7 @@
* TLB miss handling.
*/
m_info.mmu_ctx_sec = INVALID_CONTEXT;
- m_info.mmu_ctx_pri = sfmmu_getctx_pri();
+ m_info.mmu_ctx_pri = KCONTEXT;
tinfo = (uintptr_t)curthread;
m_info.thrp = (cpr_ptr)tinfo;