|
1 # |
|
2 # This patch was included to address: |
|
3 # 16988337 Stolen time should be visible in output of top(1) |
|
4 # |
|
5 # Upstream BugDB: https://sourceforge.net/p/unixtop/bugs/ |
|
6 # Upstream BugURL: https://sourceforge.net/p/unixtop/bugs/55/ |
|
7 # |
|
8 --- a/machine/m_sunos5.c Fri May 24 15:39:52 2013 +0200 |
|
9 +++ b/machine/m_sunos5.c Wed Nov 05 13:48:21 2014 +0100 |
|
10 @@ -7,8 +7,7 @@ |
|
11 * This is the machine-dependent module for SunOS 5.x (Solaris 2). |
|
12 * There is some support for MP architectures. |
|
13 * This makes top work on all revisions of SunOS 5 from 5.0 |
|
14 - * through 5.9 (otherwise known as Solaris 9). It has not been |
|
15 - * tested on SunOS 5.10. |
|
16 + * through 5.11 (otherwise known as Solaris 11). |
|
17 * |
|
18 * AUTHORS: Torsten Kasch <[email protected]> |
|
19 * Robert Boucher <[email protected]> |
|
20 @@ -18,6 +17,7 @@ |
|
21 * Petri Kutvonen <[email protected]> |
|
22 * Casper Dik <[email protected]> |
|
23 * Tim Pugh <[email protected]> |
|
24 + * Stanislav Kozina <[email protected]> |
|
25 */ |
|
26 |
|
27 #define _KMEMUSER |
|
28 @@ -112,6 +112,11 @@ |
|
29 #define USE_KSTAT |
|
30 #endif |
|
31 #ifdef USE_KSTAT |
|
32 + |
|
33 +# if OSREV >= 510 |
|
34 +# define USE_NAMED_CPU_KSTAT |
|
35 +# endif |
|
36 + |
|
37 #include <kstat.h> |
|
38 /* |
|
39 * Some kstats are fixed at 32 bits, these will be specified as ui32; some |
|
40 @@ -235,18 +240,35 @@ |
|
41 */ |
|
42 struct cpustats |
|
43 { |
|
44 - unsigned int states[CPUSTATES]; |
|
45 - uint_t pswitch; |
|
46 - uint_t trap; |
|
47 - uint_t intr; |
|
48 - uint_t syscall; |
|
49 - uint_t sysfork; |
|
50 - uint_t sysvfork; |
|
51 - uint_t pfault; |
|
52 - uint_t pgin; |
|
53 - uint_t pgout; |
|
54 + uint64_t states[CPUSTATES]; |
|
55 + uint64_t pswitch; |
|
56 + uint64_t trap; |
|
57 + uint64_t intr; |
|
58 + uint64_t syscall; |
|
59 + uint64_t sysfork; |
|
60 + uint64_t sysvfork; |
|
61 + uint64_t pfault; |
|
62 + uint64_t pgin; |
|
63 + uint64_t pgout; |
|
64 }; |
|
65 |
|
66 +#ifdef USE_NAMED_CPU_KSTAT |
|
67 +static int cpu_ticks_idle_index = -1; |
|
68 +static int cpu_ticks_user_index = -1; |
|
69 +static int cpu_ticks_kernel_index = -1; |
|
70 +static int cpu_ticks_stolen_index = -1; |
|
71 +static int pswitch_index = -1; |
|
72 +static int trap_index = -1; |
|
73 +static int intr_index = -1; |
|
74 +static int syscall_index = -1; |
|
75 +static int sysfork_index = -1; |
|
76 +static int sysvfork_index = -1; |
|
77 +static int hat_fault_index = -1; |
|
78 +static int as_fault_index = -1; |
|
79 +static int pgin_index = -1; |
|
80 +static int pgout_index = -1; |
|
81 +#endif |
|
82 + |
|
83 /* |
|
84 * GCC assumes that all doubles are aligned. Unfortunately it |
|
85 * doesn't round up the structure size to be a multiple of 8. |
|
86 @@ -280,9 +302,12 @@ |
|
87 |
|
88 int cpu_states[CPUSTATES]; |
|
89 char *cpustatenames[] = |
|
90 -{"idle", "user", "kernel", "iowait", "swap", NULL}; |
|
91 -#define CPUSTATE_IOWAIT 3 |
|
92 -#define CPUSTATE_SWAP 4 |
|
93 +{"idle", "user", "kernel", "stolen", "swap", NULL}; |
|
94 +#define CPUSTATE_IDLE 0 |
|
95 +#define CPUSTATE_USER 1 |
|
96 +#define CPUSTATE_KERNEL 2 |
|
97 +#define CPUSTATE_STOLEN 3 |
|
98 +#define CPUSTATE_SWAP 4 |
|
99 |
|
100 |
|
101 /* these are for detailing the memory statistics */ |
|
102 @@ -773,36 +798,33 @@ |
|
103 } |
|
104 |
|
105 /* |
|
106 - * int kstat_safe_namematch(int num, kstat_t *ksp, char *name, void *buf) |
|
107 - * |
|
108 - * Safe scan of kstat chain for names starting with "name". Matches |
|
109 - * are copied in to "ksp", and kstat_read is called on each match using |
|
110 - * "buf" as a buffer of length "size". The actual number of records |
|
111 - * found is returned. Up to "num" kstats are copied in to "ksp", but |
|
112 - * no more. If any kstat_read indicates that the chain has changed, then |
|
113 - * the whole process is restarted. |
|
114 + * Safe scan of kstat chain for names starting with "module", "name" or |
|
115 + * "class". Every condition is ignored if set to empty string. |
|
116 + * kstat_read is called on each match. The actual number of records |
|
117 + * found is returned. If any kstat_read indicates that the chain has changed, |
|
118 + * then the whole process is restarted. |
|
119 */ |
|
120 |
|
121 int |
|
122 -kstat_safe_namematch(int num, kstat_t **ksparg, char *name, void *buf, int size) |
|
123 - |
|
124 +kstat_safe_lookup(int num, kstat_t **ksparg, char *class, char *module, |
|
125 + char *name) |
|
126 { |
|
127 kstat_t *ks; |
|
128 kstat_t **ksp; |
|
129 kid_t new_kcid; |
|
130 - int namelen; |
|
131 + int classlen, modlen, namelen; |
|
132 int count; |
|
133 int changed; |
|
134 - char *cbuf; |
|
135 |
|
136 - dprintf("kstat_safe_namematch(%d, %08x, %s, %08x, %d)\n", |
|
137 - num, ksparg, name, buf, size); |
|
138 + dprintf("kstat_safe_lookup(%d, %08x, %s, %s, %s)\n", |
|
139 + num, ksparg, class, module, name); |
|
140 |
|
141 + classlen = strlen(class); |
|
142 + modlen = strlen(module); |
|
143 namelen = strlen(name); |
|
144 |
|
145 do { |
|
146 /* initialize before the scan */ |
|
147 - cbuf = (char *)buf; |
|
148 ksp = ksparg; |
|
149 count = 0; |
|
150 changed = 0; |
|
151 @@ -810,18 +832,23 @@ |
|
152 /* scan the chain for matching kstats */ |
|
153 for (ks = kc->kc_chain; ks != NULL; ks = ks->ks_next) |
|
154 { |
|
155 - if (strncmp(ks->ks_name, name, namelen) == 0) |
|
156 + if ((strncmp(ks->ks_class, class, classlen) == 0) && |
|
157 + (strncmp(ks->ks_module, module, modlen) == 0) && |
|
158 + (strncmp(ks->ks_name, name, namelen) == 0)) |
|
159 { |
|
160 + dprintf("kstat_safe_lookup found instance %d (%p)\n", |
|
161 + ks->ks_instance, ks); |
|
162 + |
|
163 /* this kstat matches: save it if there is room */ |
|
164 if (count++ < num) |
|
165 { |
|
166 /* read the kstat */ |
|
167 - new_kcid = kstat_read(kc, ks, cbuf); |
|
168 + new_kcid = kstat_read(kc, ks, NULL); |
|
169 |
|
170 /* if the chain changed, update it */ |
|
171 if (new_kcid != kcid) |
|
172 { |
|
173 - dprintf("kstat_safe_namematch: chain changed to %d...updating\n", |
|
174 + dprintf("kstat_safe_lookup: chain changed to %d...updating\n", |
|
175 new_kcid); |
|
176 changed = 1; |
|
177 kcid = kstat_chain_update(kc); |
|
178 @@ -832,18 +859,64 @@ |
|
179 } |
|
180 |
|
181 /* move to the next buffers */ |
|
182 - cbuf += size; |
|
183 *ksp++ = ks; |
|
184 } |
|
185 } |
|
186 } |
|
187 } while(changed); |
|
188 |
|
189 - dprintf("kstat_safe_namematch returns %d\n", count); |
|
190 + dprintf("kstat_safe_lookup returns %d\n", count); |
|
191 |
|
192 return count; |
|
193 } |
|
194 |
|
195 +#ifdef USE_NAMED_CPU_KSTAT |
|
196 +/* |
|
197 + * If index_ptr integer value is > -1 then the index points to the |
|
198 + * string entry in the ks_data that we are interested in. Otherwise |
|
199 + * we will need to walk the array. |
|
200 + */ |
|
201 +static uint64_t |
|
202 +kstat_safe_datalookup(kstat_t *kstat, char *name, int *index_ptr) |
|
203 +{ |
|
204 + int i; |
|
205 + int size; |
|
206 + int index; |
|
207 + char *namep, *datap; |
|
208 + kstat_named_t *data; |
|
209 + |
|
210 + if (kstat->ks_type != KSTAT_TYPE_NAMED) |
|
211 + return (0); |
|
212 + |
|
213 + size = sizeof (kstat_named_t); |
|
214 + namep = KSTAT_NAMED_PTR(kstat)->name; |
|
215 + |
|
216 + index = *index_ptr; |
|
217 + if (index >= 0) { |
|
218 + /* Short cut to the information. */ |
|
219 + datap = kstat->ks_data; |
|
220 + data = (kstat_named_t *)&datap[size*index]; |
|
221 + if (data->data_type != KSTAT_DATA_UINT64) |
|
222 + return (0); |
|
223 + return (data->value.ui64); |
|
224 + } |
|
225 + |
|
226 + /* Need to go find the string. */ |
|
227 + data = kstat->ks_data; |
|
228 + for (i = 0; i < kstat->ks_ndata; i++) { |
|
229 + if (strcmp(name, namep) == 0) { |
|
230 + *index_ptr = i; |
|
231 + if (data->data_type != KSTAT_DATA_UINT64) |
|
232 + return (0); |
|
233 + return (data->value.ui64); |
|
234 + } |
|
235 + namep += size; |
|
236 + data++; |
|
237 + } |
|
238 + return (0); |
|
239 +} |
|
240 +#endif /* USE_NAMED_CPU_KSTAT */ |
|
241 + |
|
242 static kstat_t *ks_system_misc = NULL; |
|
243 |
|
244 #endif /* USE_KSTAT */ |
|
245 @@ -936,36 +1009,55 @@ |
|
246 |
|
247 { |
|
248 #ifdef USE_KSTAT |
|
249 +#ifdef USE_NAMED_CPU_KSTAT |
|
250 + static kstat_t **cpu_sys_ks = NULL, **cpu_vm_ks = NULL; |
|
251 + kstat_t *cpu_sys_stat_p, *cpu_vm_stat_p; |
|
252 +#else |
|
253 static kstat_t **cpu_ks = NULL; |
|
254 - static cpu_stat_t *cpu_stat = NULL; |
|
255 + cpu_stat_t cpu_stat; |
|
256 +#endif |
|
257 + static kid_t cpu_kcid = 0; |
|
258 static unsigned int nelems = 0; |
|
259 - cpu_stat_t *cpu_stat_p; |
|
260 - int i, cpu_num; |
|
261 + static int cpu_num = 0, cpu_vm_num = 0; |
|
262 + int i; |
|
263 struct cpustats *cpustats_p; |
|
264 |
|
265 dprintf("get_cpustats(%d -> %d, %08x)\n", cnt, *cnt, cpustats); |
|
266 |
|
267 - while (nelems > 0 ? |
|
268 - (cpu_num = kstat_safe_namematch(nelems, |
|
269 - cpu_ks, |
|
270 - "cpu_stat", |
|
271 - cpu_stat, |
|
272 - sizeof(cpu_stat_t))) > nelems : |
|
273 - (cpu_num = get_ncpus()) > 0) |
|
274 + /* Grab initial number of cpus */ |
|
275 + if (nelems == 0) { |
|
276 + cpu_vm_num = cpu_num = get_ncpus(); |
|
277 + if (cpu_num <= 0) |
|
278 + return (cpustats); |
|
279 + } |
|
280 + |
|
281 +reload: |
|
282 + /* Make sure we have enough space for all cpus */ |
|
283 + while (cpu_kcid != kcid || cpu_num != cpu_vm_num || cpu_num > nelems) |
|
284 { |
|
285 /* reallocate the arrays */ |
|
286 dprintf("realloc from %d to %d\n", nelems, cpu_num); |
|
287 nelems = cpu_num; |
|
288 - if (cpu_ks != NULL) |
|
289 - { |
|
290 - free(cpu_ks); |
|
291 +#ifdef USE_NAMED_CPU_KSTAT |
|
292 + free(cpu_sys_ks); |
|
293 + free(cpu_vm_ks); |
|
294 + cpu_sys_ks = (kstat_t **)calloc(nelems, sizeof(kstat_t *)); |
|
295 + cpu_vm_ks = (kstat_t **)calloc(nelems, sizeof(kstat_t *)); |
|
296 + |
|
297 + if (cpu_sys_ks == NULL || cpu_vm_ks == NULL) { |
|
298 + puts("Allocation failed\n"); |
|
299 + abort(); |
|
300 } |
|
301 + |
|
302 + cpu_num = kstat_safe_lookup(nelems, cpu_sys_ks, "misc", "cpu", "sys"); |
|
303 + cpu_vm_num = kstat_safe_lookup(nelems, cpu_vm_ks, "misc", "cpu", "vm"); |
|
304 +#else |
|
305 + free(cpu_ks); |
|
306 cpu_ks = (kstat_t **)calloc(nelems, sizeof(kstat_t *)); |
|
307 - if (cpu_stat != NULL) |
|
308 - { |
|
309 - free(cpu_stat); |
|
310 - } |
|
311 - cpu_stat = (cpu_stat_t *)malloc(nelems * sizeof(cpu_stat_t)); |
|
312 + cpu_num = cpu_vm_num = |
|
313 + kstat_safe_lookup(nelems, cpu_ks, "misc", "cpu_stat", ""); |
|
314 +#endif |
|
315 + cpu_kcid = kcid; |
|
316 } |
|
317 |
|
318 /* do we have more cpus than our caller? */ |
|
319 @@ -975,36 +1067,96 @@ |
|
320 dprintf("realloc array from %d to %d\n", *cnt, cpu_num); |
|
321 *cnt = cpu_num; |
|
322 cpustats = (struct cpustats *)realloc(cpustats, |
|
323 - cpu_num * sizeof(struct cpustats)); |
|
324 + cpu_num * sizeof(struct cpustats)); |
|
325 } |
|
326 |
|
327 - cpu_stat_p = cpu_stat; |
|
328 cpustats_p = cpustats; |
|
329 for (i = 0; i < cpu_num; i++) |
|
330 { |
|
331 - dprintf("cpu %d %08x: idle %u, user %u, syscall %u\n", i, cpu_stat_p, |
|
332 - cpu_stat_p->cpu_sysinfo.cpu[0], |
|
333 - cpu_stat_p->cpu_sysinfo.cpu[1], |
|
334 - cpu_stat_p->cpu_sysinfo.syscall); |
|
335 +#ifdef USE_NAMED_CPU_KSTAT |
|
336 + cpu_sys_stat_p = cpu_sys_ks[i]; |
|
337 + cpu_vm_stat_p = cpu_vm_ks[i]; |
|
338 |
|
339 - cpustats_p->states[CPU_IDLE] = cpu_stat_p->cpu_sysinfo.cpu[CPU_IDLE]; |
|
340 - cpustats_p->states[CPU_USER] = cpu_stat_p->cpu_sysinfo.cpu[CPU_USER]; |
|
341 - cpustats_p->states[CPU_KERNEL] = cpu_stat_p->cpu_sysinfo.cpu[CPU_KERNEL]; |
|
342 - cpustats_p->states[CPUSTATE_IOWAIT] = cpu_stat_p->cpu_sysinfo.wait[W_IO] + |
|
343 - cpu_stat_p->cpu_sysinfo.wait[W_PIO]; |
|
344 - cpustats_p->states[CPUSTATE_SWAP] = cpu_stat_p->cpu_sysinfo.wait[W_SWAP]; |
|
345 - cpustats_p->pswitch = cpu_stat_p->cpu_sysinfo.pswitch; |
|
346 - cpustats_p->trap = cpu_stat_p->cpu_sysinfo.trap; |
|
347 - cpustats_p->intr = cpu_stat_p->cpu_sysinfo.intr; |
|
348 - cpustats_p->syscall = cpu_stat_p->cpu_sysinfo.syscall; |
|
349 - cpustats_p->sysfork = cpu_stat_p->cpu_sysinfo.sysfork; |
|
350 - cpustats_p->sysvfork = cpu_stat_p->cpu_sysinfo.sysvfork; |
|
351 - cpustats_p->pfault = cpu_stat_p->cpu_vminfo.hat_fault + |
|
352 - cpu_stat_p->cpu_vminfo.as_fault; |
|
353 - cpustats_p->pgin = cpu_stat_p->cpu_vminfo.pgin; |
|
354 - cpustats_p->pgout = cpu_stat_p->cpu_vminfo.pgout; |
|
355 + /* |
|
356 + * If kstat id has changed, update the kstat headers and re-allocate |
|
357 + * the arrays accordingly. |
|
358 + */ |
|
359 + if (((cpu_kcid = kstat_read(kc, cpu_sys_stat_p, NULL)) != kcid) || |
|
360 + ((cpu_kcid = kstat_read(kc, cpu_vm_stat_p, NULL)) != kcid)) |
|
361 + goto reload; |
|
362 + |
|
363 + dprintf("cpu %d %08x: idle %llu, user %llu, syscall %llu\n", |
|
364 + i, cpu_sys_stat_p, |
|
365 + kstat_safe_datalookup(cpu_sys_stat_p, "cpu_ticks_idle", |
|
366 + &cpu_ticks_idle_index), |
|
367 + kstat_safe_datalookup(cpu_sys_stat_p, "cpu_ticks_user", |
|
368 + &cpu_ticks_user_index), |
|
369 + kstat_safe_datalookup(cpu_sys_stat_p, "syscall", |
|
370 + &syscall_index)); |
|
371 + |
|
372 + cpustats_p->states[CPUSTATE_IDLE] = |
|
373 + kstat_safe_datalookup(cpu_sys_stat_p, "cpu_ticks_idle", |
|
374 + &cpu_ticks_idle_index); |
|
375 + cpustats_p->states[CPUSTATE_USER] = |
|
376 + kstat_safe_datalookup(cpu_sys_stat_p, "cpu_ticks_user", |
|
377 + &cpu_ticks_user_index); |
|
378 + cpustats_p->states[CPUSTATE_KERNEL] = |
|
379 + kstat_safe_datalookup(cpu_sys_stat_p, "cpu_ticks_kernel", |
|
380 + &cpu_ticks_kernel_index); |
|
381 + cpustats_p->states[CPUSTATE_STOLEN] = |
|
382 + kstat_safe_datalookup(cpu_sys_stat_p, "cpu_ticks_stolen", |
|
383 + &cpu_ticks_stolen_index); |
|
384 + cpustats_p->states[CPUSTATE_SWAP] = 0; |
|
385 + cpustats_p->pswitch = kstat_safe_datalookup(cpu_sys_stat_p, "pswitch", |
|
386 + &pswitch_index); |
|
387 + cpustats_p->trap = kstat_safe_datalookup(cpu_sys_stat_p, "trap", |
|
388 + &trap_index); |
|
389 + cpustats_p->intr = kstat_safe_datalookup(cpu_sys_stat_p, "intr", |
|
390 + &intr_index); |
|
391 + cpustats_p->syscall = kstat_safe_datalookup(cpu_sys_stat_p, "syscall", |
|
392 + &syscall_index); |
|
393 + cpustats_p->sysfork = kstat_safe_datalookup(cpu_sys_stat_p, "sysfork", |
|
394 + &sysfork_index); |
|
395 + cpustats_p->sysvfork = kstat_safe_datalookup(cpu_sys_stat_p, "sysvfork", |
|
396 + &sysvfork_index); |
|
397 + cpustats_p->pfault = kstat_safe_datalookup(cpu_vm_stat_p, "hat_fault", |
|
398 + &hat_fault_index) + |
|
399 + kstat_safe_datalookup(cpu_vm_stat_p, "as_fault", |
|
400 + &as_fault_index); |
|
401 + cpustats_p->pgin = kstat_safe_datalookup(cpu_vm_stat_p, "pgin", |
|
402 + &pgin_index); |
|
403 + cpustats_p->pgout = kstat_safe_datalookup(cpu_vm_stat_p, "pgout", |
|
404 + &pgout_index); |
|
405 +#else |
|
406 + if ((cpu_kcid = kstat_read(kc, cpu_ks[i], &cpu_stat)) != kcid) |
|
407 + goto reload; |
|
408 + |
|
409 + dprintf("cpu %d %08x: idle %u, user %u, syscall %u\n", i, cpu_stat, |
|
410 + cpu_stat.cpu_sysinfo.cpu[CPUSTATE_IDLE], |
|
411 + cpu_stat.cpu_sysinfo.cpu[CPUSTATE_USER], |
|
412 + cpu_stat.cpu_sysinfo.syscall); |
|
413 + |
|
414 + cpustats_p->states[CPUSTATE_IDLE] = |
|
415 + (uint64_t)cpu_stat.cpu_sysinfo.cpu[CPU_IDLE]; |
|
416 + cpustats_p->states[CPUSTATE_USER] = |
|
417 + (uint64_t)cpu_stat.cpu_sysinfo.cpu[CPU_USER]; |
|
418 + cpustats_p->states[CPUSTATE_KERNEL] = |
|
419 + (uint64_t)cpu_stat.cpu_sysinfo.cpu[CPU_KERNEL]; |
|
420 + cpustats_p->states[CPUSTATE_STOLEN] = 0; |
|
421 + cpustats_p->states[CPUSTATE_SWAP] = |
|
422 + (uint64_t)cpu_stat.cpu_sysinfo.wait[W_SWAP]; |
|
423 + cpustats_p->pswitch = (uint64_t)cpu_stat.cpu_sysinfo.pswitch; |
|
424 + cpustats_p->trap = (uint64_t)cpu_stat.cpu_sysinfo.trap; |
|
425 + cpustats_p->intr = (uint64_t)cpu_stat.cpu_sysinfo.intr; |
|
426 + cpustats_p->syscall = (uint64_t)cpu_stat.cpu_sysinfo.syscall; |
|
427 + cpustats_p->sysfork = (uint64_t)cpu_stat.cpu_sysinfo.sysfork; |
|
428 + cpustats_p->sysvfork = (uint64_t)cpu_stat.cpu_sysinfo.sysvfork; |
|
429 + cpustats_p->pfault = (uint64_t)cpu_stat.cpu_vminfo.hat_fault + |
|
430 + (uint64_t)cpu_stat.cpu_vminfo.as_fault; |
|
431 + cpustats_p->pgin = (uint64_t)cpu_stat.cpu_vminfo.pgin; |
|
432 + cpustats_p->pgout = (uint64_t)cpu_stat.cpu_vminfo.pgout; |
|
433 +#endif |
|
434 cpustats_p++; |
|
435 - cpu_stat_p++; |
|
436 } |
|
437 |
|
438 cpucount = cpu_num; |
|
439 @@ -1035,11 +1187,10 @@ |
|
440 /* get struct cpu for this processor */ |
|
441 (void) getkval (cpu_offset[i], (int *)(&cpu), sizeof (struct cpu), "cpu"); |
|
442 |
|
443 - (*cp_stats_p)[CPU_IDLE] = cpu.cpu_stat.cpu_sysinfo.cpu[CPU_IDLE]; |
|
444 - (*cp_stats_p)[CPU_USER] = cpu.cpu_stat.cpu_sysinfo.cpu[CPU_USER]; |
|
445 - (*cp_stats_p)[CPU_KERNEL] = cpu.cpu_stat.cpu_sysinfo.cpu[CPU_KERNEL]; |
|
446 - (*cp_stats_p)[CPUSTATE_IOWAIT] = cpu.cpu_stat.cpu_sysinfo.wait[W_IO] + |
|
447 - cpu.cpu_stat.cpu_sysinfo.wait[W_PIO]; |
|
448 + (*cp_stats_p)[CPUSTATE_IDLE] = cpu.cpu_stat.cpu_sysinfo.cpu[CPU_IDLE]; |
|
449 + (*cp_stats_p)[CPUSTATE_USER] = cpu.cpu_stat.cpu_sysinfo.cpu[CPU_USER]; |
|
450 + (*cp_stats_p)[CPUSTATE_KERNEL] = cpu.cpu_stat.cpu_sysinfo.cpu[CPU_KERNEL]; |
|
451 + (*cp_stats_p)[CPUSTATE_STOLEN] = 0; |
|
452 (*cp_stats_p)[CPUSTATE_SWAP] = cpu.cpu_stat.cpu_sysinfo.wait[W_SWAP]; |
|
453 cp_stats_p++; |
|
454 } |
|
455 @@ -1394,14 +1545,55 @@ |
|
456 return (0); |
|
457 } |
|
458 |
|
459 +static void |
|
460 +percentages64(int cnt, int *out, uint64_t *new, uint64_t *old, uint64_t *diffs) |
|
461 +{ |
|
462 + int i; |
|
463 + uint64_t change; |
|
464 + uint64_t total_change; |
|
465 + uint64_t *dp; |
|
466 + uint64_t half_total; |
|
467 + |
|
468 + /* initialization */ |
|
469 + total_change = 0; |
|
470 + dp = diffs; |
|
471 + |
|
472 + /* calculate changes for each state and the overall change */ |
|
473 + for (i = 0; i < cnt; i++) { |
|
474 + /* |
|
475 + * Don't worry about wrapping - even at hz=1GHz, a |
|
476 + * u_int64_t will last at least 544 years. |
|
477 + */ |
|
478 + change = *new - *old; |
|
479 + total_change += (*dp++ = change); |
|
480 + *old++ = *new++; |
|
481 + } |
|
482 + |
|
483 + /* avoid divide by zero potential */ |
|
484 + if (total_change == 0) |
|
485 + total_change = 1; |
|
486 + |
|
487 + /* calculate percentages based on overall change, rounding up */ |
|
488 + half_total = total_change / 2; |
|
489 + for (i = 0; i < cnt; i++) |
|
490 + *out++ = (int)((*diffs++ * 1000 + half_total) / total_change); |
|
491 +} |
|
492 + |
|
493 +static uint_t |
|
494 +diff_per_second64(uint64_t x, uint64_t y) |
|
495 +{ |
|
496 + uint64_t diff = y > x ? UINT64_MAX - y + x + 1 : x - y; |
|
497 + return ((uint_t)(diff * 1000 / time_elapsed())); |
|
498 +} |
|
499 + |
|
500 void |
|
501 get_system_info (struct system_info *si) |
|
502 { |
|
503 int avenrun[3]; |
|
504 |
|
505 - static long cp_time[CPUSTATES]; |
|
506 - static long cp_old[CPUSTATES]; |
|
507 - static long cp_diff[CPUSTATES]; |
|
508 + static uint64_t cp_time[CPUSTATES]; |
|
509 + static uint64_t cp_old[CPUSTATES]; |
|
510 + static uint64_t cp_diff[CPUSTATES]; |
|
511 static struct cpustats *cpustats = NULL; |
|
512 static struct cpustats sum_current; |
|
513 static struct cpustats sum_old; |
|
514 @@ -1440,7 +1632,7 @@ |
|
515 } |
|
516 |
|
517 /* convert cp_time counts to percentages */ |
|
518 - (void) percentages (CPUSTATES, cpu_states, cp_time, cp_old, cp_diff); |
|
519 + (void) percentages64 (CPUSTATES, cpu_states, cp_time, cp_old, cp_diff); |
|
520 |
|
521 /* get mpid -- process id of last process */ |
|
522 if (kd) |
|
523 @@ -1467,15 +1659,15 @@ |
|
524 } |
|
525 |
|
526 /* get kernel data */ |
|
527 - kernel_stats[KERNEL_CSWITCH] = diff_per_second(sum_current.pswitch, sum_old.pswitch); |
|
528 - kernel_stats[KERNEL_TRAP] = diff_per_second(sum_current.trap, sum_old.trap); |
|
529 - kernel_stats[KERNEL_INTR] = diff_per_second(sum_current.intr, sum_old.intr); |
|
530 - kernel_stats[KERNEL_SYSCALL] = diff_per_second(sum_current.syscall, sum_old.syscall); |
|
531 - kernel_stats[KERNEL_FORK] = diff_per_second(sum_current.sysfork + sum_current.sysvfork, |
|
532 + kernel_stats[KERNEL_CSWITCH] = diff_per_second64(sum_current.pswitch, sum_old.pswitch); |
|
533 + kernel_stats[KERNEL_TRAP] = diff_per_second64(sum_current.trap, sum_old.trap); |
|
534 + kernel_stats[KERNEL_INTR] = diff_per_second64(sum_current.intr, sum_old.intr); |
|
535 + kernel_stats[KERNEL_SYSCALL] = diff_per_second64(sum_current.syscall, sum_old.syscall); |
|
536 + kernel_stats[KERNEL_FORK] = diff_per_second64(sum_current.sysfork + sum_current.sysvfork, |
|
537 sum_old.sysfork + sum_old.sysvfork); |
|
538 - kernel_stats[KERNEL_PFAULT] = diff_per_second(sum_current.pfault, sum_old.pfault); |
|
539 - kernel_stats[KERNEL_PGIN] = pagetok(diff_per_second(sum_current.pgin, sum_old.pgin)); |
|
540 - kernel_stats[KERNEL_PGOUT] = pagetok(diff_per_second(sum_current.pgout, sum_old.pgout)); |
|
541 + kernel_stats[KERNEL_PFAULT] = diff_per_second64(sum_current.pfault, sum_old.pfault); |
|
542 + kernel_stats[KERNEL_PGIN] = pagetok(diff_per_second64(sum_current.pgin, sum_old.pgin)); |
|
543 + kernel_stats[KERNEL_PGOUT] = pagetok(diff_per_second64(sum_current.pgout, sum_old.pgout)); |
|
544 |
|
545 |
|
546 /* set arrays and strings */ |