usr/src/uts/common/cpr/cpr_dump.c
changeset 5 3fb270f22f8d
parent 0 68f95e015346
child 931 92fdf4004904
--- a/usr/src/uts/common/cpr/cpr_dump.c	Tue Jun 14 17:51:52 2005 -0700
+++ b/usr/src/uts/common/cpr/cpr_dump.c	Tue Jun 14 19:12:41 2005 -0700
@@ -486,11 +486,11 @@
 
 
 /*
- * cpr_walk() is called many 100x with a range within kvseg;
+ * cpr_xwalk() is called many 100x with a range within kvseg or kvseg_reloc;
  * a page-count from each range is accumulated at arg->pages.
  */
 static void
-cpr_walk(void *arg, void *base, size_t size)
+cpr_xwalk(void *arg, void *base, size_t size)
 {
 	struct cpr_walkinfo *cwip = arg;
 
@@ -500,6 +500,30 @@
 	cwip->ranges++;
 }
 
+/*
+ * cpr_walk() is called many 100x with a range within kvseg or kvseg_reloc;
+ * a page-count from each range is accumulated at arg->pages.
+ */
+static void
+cpr_walk(void *arg, void *base, size_t size)
+{
+	caddr_t addr = base;
+	caddr_t addr_end = addr + size;
+
+	/*
+	 * If we are about to start walking the range of addresses we
+	 * carved out of the kernel heap for the large page heap walk
+	 * heap_lp_arena to find what segments are actually populated
+	 */
+	if (SEGKMEM_USE_LARGEPAGES &&
+	    addr == heap_lp_base && addr_end == heap_lp_end &&
+	    vmem_size(heap_lp_arena, VMEM_ALLOC) < size) {
+		vmem_walk(heap_lp_arena, VMEM_ALLOC, cpr_xwalk, arg);
+	} else {
+		cpr_xwalk(arg, base, size);
+	}
+}
+
 
 /*
  * faster scan of kvseg using vmem_walk() to visit