6287 } |
6263 } |
6288 |
6264 |
6289 return (MDI_SUCCESS); |
6265 return (MDI_SUCCESS); |
6290 } |
6266 } |
6291 |
6267 |
6292 /* |
6268 void * |
6293 * XXX This list should include all phci drivers needed during boot time |
6269 mdi_client_get_vhci_private(dev_info_t *dip) |
6294 * though it currently contains "fp" only. |
6270 { |
6295 * Hopefully, the mechanism provided here will be replaced with a better |
6271 ASSERT(mdi_component_is_client(dip, NULL) == MDI_SUCCESS); |
6296 * mechanism by vhci driven enumeration project. |
6272 if (mdi_component_is_client(dip, NULL) == MDI_SUCCESS) { |
6297 */ |
6273 mdi_client_t *ct; |
6298 static char *phci_driver_list[] = { "fp" }; |
6274 ct = i_devi_get_client(dip); |
6299 #define N_PHCI_DRIVERS (sizeof (phci_driver_list) / sizeof (char *)) |
6275 return (ct->ct_vprivate); |
6300 |
6276 } |
|
6277 return (NULL); |
|
6278 } |
|
6279 |
|
6280 void |
|
6281 mdi_client_set_vhci_private(dev_info_t *dip, void *data) |
|
6282 { |
|
6283 ASSERT(mdi_component_is_client(dip, NULL) == MDI_SUCCESS); |
|
6284 if (mdi_component_is_client(dip, NULL) == MDI_SUCCESS) { |
|
6285 mdi_client_t *ct; |
|
6286 ct = i_devi_get_client(dip); |
|
6287 ct->ct_vprivate = data; |
|
6288 } |
|
6289 } |
|
6290 /* |
|
6291 * mdi_pi_get_vhci_private(): |
|
6292 * Get the vhci private information associated with the |
|
6293 * mdi_pathinfo node |
|
6294 */ |
|
6295 void * |
|
6296 mdi_pi_get_vhci_private(mdi_pathinfo_t *pip) |
|
6297 { |
|
6298 caddr_t vprivate = NULL; |
|
6299 if (pip) { |
|
6300 vprivate = MDI_PI(pip)->pi_vprivate; |
|
6301 } |
|
6302 return (vprivate); |
|
6303 } |
|
6304 |
|
6305 /* |
|
6306 * mdi_pi_set_vhci_private(): |
|
6307 * Set the vhci private information in the mdi_pathinfo node |
|
6308 */ |
|
6309 void |
|
6310 mdi_pi_set_vhci_private(mdi_pathinfo_t *pip, void *priv) |
|
6311 { |
|
6312 if (pip) { |
|
6313 MDI_PI(pip)->pi_vprivate = priv; |
|
6314 } |
|
6315 } |
|
6316 |
|
6317 /* |
|
6318 * mdi_phci_get_vhci_private(): |
|
6319 * Get the vhci private information associated with the |
|
6320 * mdi_phci node |
|
6321 */ |
|
6322 void * |
|
6323 mdi_phci_get_vhci_private(dev_info_t *dip) |
|
6324 { |
|
6325 ASSERT(mdi_component_is_phci(dip, NULL) == MDI_SUCCESS); |
|
6326 if (mdi_component_is_phci(dip, NULL) == MDI_SUCCESS) { |
|
6327 mdi_phci_t *ph; |
|
6328 ph = i_devi_get_phci(dip); |
|
6329 return (ph->ph_vprivate); |
|
6330 } |
|
6331 return (NULL); |
|
6332 } |
|
6333 |
|
6334 /* |
|
6335 * mdi_phci_set_vhci_private(): |
|
6336 * Set the vhci private information in the mdi_phci node |
|
6337 */ |
|
6338 void |
|
6339 mdi_phci_set_vhci_private(dev_info_t *dip, void *priv) |
|
6340 { |
|
6341 ASSERT(mdi_component_is_phci(dip, NULL) == MDI_SUCCESS); |
|
6342 if (mdi_component_is_phci(dip, NULL) == MDI_SUCCESS) { |
|
6343 mdi_phci_t *ph; |
|
6344 ph = i_devi_get_phci(dip); |
|
6345 ph->ph_vprivate = priv; |
|
6346 } |
|
6347 } |
|
6348 |
|
6349 /* |
|
6350 * List of vhci class names: |
|
6351 * A vhci class name must be in this list only if the corresponding vhci |
|
6352 * driver intends to use the mdi provided bus config implementation |
|
6353 * (i.e., mdi_vhci_bus_config()). |
|
6354 */ |
|
6355 static char *vhci_class_list[] = { MDI_HCI_CLASS_SCSI, MDI_HCI_CLASS_IB }; |
|
6356 #define N_VHCI_CLASSES (sizeof (vhci_class_list) / sizeof (char *)) |
|
6357 |
|
6358 /* |
|
6359 * Built-in list of phci drivers for every vhci class. |
|
6360 * All phci drivers expect iscsi have root device support. |
|
6361 */ |
|
6362 static mdi_phci_driver_info_t scsi_phci_driver_list[] = { |
|
6363 { "fp", 1 }, |
|
6364 { "iscsi", 0 }, |
|
6365 { "ibsrp", 1 } |
|
6366 }; |
|
6367 |
|
6368 static mdi_phci_driver_info_t ib_phci_driver_list[] = { "tavor", 1 }; |
|
6369 |
|
6370 /* |
|
6371 * During boot time, the on-disk vhci cache for every vhci class is read |
|
6372 * in the form of an nvlist and stored here. |
|
6373 */ |
|
6374 static nvlist_t *vhcache_nvl[N_VHCI_CLASSES]; |
|
6375 |
|
6376 /* nvpair names in vhci cache nvlist */ |
|
6377 #define MDI_VHCI_CACHE_VERSION 1 |
|
6378 #define MDI_NVPNAME_VERSION "version" |
|
6379 #define MDI_NVPNAME_PHCIS "phcis" |
|
6380 #define MDI_NVPNAME_CTADDRMAP "clientaddrmap" |
|
6381 |
|
6382 typedef enum { |
|
6383 VHCACHE_NOT_REBUILT, |
|
6384 VHCACHE_PARTIALLY_BUILT, |
|
6385 VHCACHE_FULLY_BUILT |
|
6386 } vhcache_build_status_t; |
|
6387 |
|
6388 /* |
|
6389 * Given vhci class name, return its on-disk vhci cache filename. |
|
6390 * Memory for the returned filename which includes the full path is allocated |
|
6391 * by this function. |
|
6392 */ |
|
6393 static char * |
|
6394 vhclass2vhcache_filename(char *vhclass) |
|
6395 { |
|
6396 char *filename; |
|
6397 int len; |
|
6398 static char *fmt = "/etc/devices/mdi_%s_cache"; |
|
6399 |
|
6400 /* |
|
6401 * fmt contains the on-disk vhci cache file name format; |
|
6402 * for scsi_vhci the filename is "/etc/devices/mdi_scsi_vhci_cache". |
|
6403 */ |
|
6404 |
|
6405 /* the -1 below is to account for "%s" in the format string */ |
|
6406 len = strlen(fmt) + strlen(vhclass) - 1; |
|
6407 filename = kmem_alloc(len, KM_SLEEP); |
|
6408 (void) snprintf(filename, len, fmt, vhclass); |
|
6409 ASSERT(len == (strlen(filename) + 1)); |
|
6410 return (filename); |
|
6411 } |
|
6412 |
|
6413 /* |
|
6414 * initialize the vhci cache related data structures and read the on-disk |
|
6415 * vhci cached data into memory. |
|
6416 */ |
6301 static void |
6417 static void |
6302 i_mdi_attach_phci_drivers() |
6418 setup_vhci_cache(mdi_vhci_t *vh) |
|
6419 { |
|
6420 mdi_vhci_config_t *vhc; |
|
6421 mdi_vhci_cache_t *vhcache; |
|
6422 int i; |
|
6423 nvlist_t *nvl = NULL; |
|
6424 |
|
6425 vhc = kmem_zalloc(sizeof (mdi_vhci_config_t), KM_SLEEP); |
|
6426 vh->vh_config = vhc; |
|
6427 vhcache = &vhc->vhc_vhcache; |
|
6428 |
|
6429 vhc->vhc_vhcache_filename = vhclass2vhcache_filename(vh->vh_class); |
|
6430 |
|
6431 mutex_init(&vhc->vhc_lock, NULL, MUTEX_DEFAULT, NULL); |
|
6432 cv_init(&vhc->vhc_cv, NULL, CV_DRIVER, NULL); |
|
6433 |
|
6434 rw_init(&vhcache->vhcache_lock, NULL, RW_DRIVER, NULL); |
|
6435 |
|
6436 /* |
|
6437 * Create string hash; same as mod_hash_create_strhash() except that |
|
6438 * we use NULL key destructor. |
|
6439 */ |
|
6440 vhcache->vhcache_client_hash = mod_hash_create_extended(vh->vh_class, |
|
6441 mdi_bus_config_cache_hash_size, |
|
6442 mod_hash_null_keydtor, mod_hash_null_valdtor, |
|
6443 mod_hash_bystr, NULL, mod_hash_strkey_cmp, KM_SLEEP); |
|
6444 |
|
6445 setup_phci_driver_list(vh); |
|
6446 |
|
6447 /* |
|
6448 * The on-disk vhci cache is read during booting prior to the |
|
6449 * lights-out period by mdi_read_devices_files(). |
|
6450 */ |
|
6451 for (i = 0; i < N_VHCI_CLASSES; i++) { |
|
6452 if (strcmp(vhci_class_list[i], vh->vh_class) == 0) { |
|
6453 nvl = vhcache_nvl[i]; |
|
6454 vhcache_nvl[i] = NULL; |
|
6455 break; |
|
6456 } |
|
6457 } |
|
6458 |
|
6459 /* |
|
6460 * this is to cover the case of some one manually causing unloading |
|
6461 * (or detaching) and reloading (or attaching) of a vhci driver. |
|
6462 */ |
|
6463 if (nvl == NULL && modrootloaded) |
|
6464 nvl = read_on_disk_vhci_cache(vh->vh_class); |
|
6465 |
|
6466 if (nvl != NULL) { |
|
6467 rw_enter(&vhcache->vhcache_lock, RW_WRITER); |
|
6468 if (mainnvl_to_vhcache(vhcache, nvl) == MDI_SUCCESS) |
|
6469 vhcache->vhcache_flags |= MDI_VHCI_CACHE_SETUP_DONE; |
|
6470 else { |
|
6471 cmn_err(CE_WARN, |
|
6472 "%s: data file corrupted, will recreate\n", |
|
6473 vhc->vhc_vhcache_filename); |
|
6474 } |
|
6475 rw_exit(&vhcache->vhcache_lock); |
|
6476 nvlist_free(nvl); |
|
6477 } |
|
6478 |
|
6479 vhc->vhc_cbid = callb_add(stop_vhcache_flush_thread, vhc, |
|
6480 CB_CL_UADMIN_PRE_VFS, "mdi_vhcache_flush"); |
|
6481 } |
|
6482 |
|
6483 /* |
|
6484 * free all vhci cache related resources |
|
6485 */ |
|
6486 static int |
|
6487 destroy_vhci_cache(mdi_vhci_t *vh) |
|
6488 { |
|
6489 mdi_vhci_config_t *vhc = vh->vh_config; |
|
6490 mdi_vhci_cache_t *vhcache = &vhc->vhc_vhcache; |
|
6491 mdi_vhcache_phci_t *cphci, *cphci_next; |
|
6492 mdi_vhcache_client_t *cct, *cct_next; |
|
6493 mdi_vhcache_pathinfo_t *cpi, *cpi_next; |
|
6494 |
|
6495 if (stop_vhcache_async_threads(vhc) != MDI_SUCCESS) |
|
6496 return (MDI_FAILURE); |
|
6497 |
|
6498 kmem_free(vhc->vhc_vhcache_filename, |
|
6499 strlen(vhc->vhc_vhcache_filename) + 1); |
|
6500 |
|
6501 if (vhc->vhc_phci_driver_list) |
|
6502 free_phci_driver_list(vhc); |
|
6503 |
|
6504 mod_hash_destroy_strhash(vhcache->vhcache_client_hash); |
|
6505 |
|
6506 for (cphci = vhcache->vhcache_phci_head; cphci != NULL; |
|
6507 cphci = cphci_next) { |
|
6508 cphci_next = cphci->cphci_next; |
|
6509 free_vhcache_phci(cphci); |
|
6510 } |
|
6511 |
|
6512 for (cct = vhcache->vhcache_client_head; cct != NULL; cct = cct_next) { |
|
6513 cct_next = cct->cct_next; |
|
6514 for (cpi = cct->cct_cpi_head; cpi != NULL; cpi = cpi_next) { |
|
6515 cpi_next = cpi->cpi_next; |
|
6516 free_vhcache_pathinfo(cpi); |
|
6517 } |
|
6518 free_vhcache_client(cct); |
|
6519 } |
|
6520 |
|
6521 rw_destroy(&vhcache->vhcache_lock); |
|
6522 |
|
6523 mutex_destroy(&vhc->vhc_lock); |
|
6524 cv_destroy(&vhc->vhc_cv); |
|
6525 kmem_free(vhc, sizeof (mdi_vhci_config_t)); |
|
6526 return (MDI_SUCCESS); |
|
6527 } |
|
6528 |
|
6529 /* |
|
6530 * Setup the list of phci drivers associated with the specified vhci class. |
|
6531 * MDI uses this information to rebuild bus config cache if in case the |
|
6532 * cache is not available or corrupted. |
|
6533 */ |
|
6534 static void |
|
6535 setup_phci_driver_list(mdi_vhci_t *vh) |
|
6536 { |
|
6537 mdi_vhci_config_t *vhc = vh->vh_config; |
|
6538 mdi_phci_driver_info_t *driver_list; |
|
6539 char **driver_list1; |
|
6540 uint_t ndrivers, ndrivers1; |
|
6541 int i, j; |
|
6542 |
|
6543 if (strcmp(vh->vh_class, MDI_HCI_CLASS_SCSI) == 0) { |
|
6544 driver_list = scsi_phci_driver_list; |
|
6545 ndrivers = sizeof (scsi_phci_driver_list) / |
|
6546 sizeof (mdi_phci_driver_info_t); |
|
6547 } else if (strcmp(vh->vh_class, MDI_HCI_CLASS_IB) == 0) { |
|
6548 driver_list = ib_phci_driver_list; |
|
6549 ndrivers = sizeof (ib_phci_driver_list) / |
|
6550 sizeof (mdi_phci_driver_info_t); |
|
6551 } else { |
|
6552 driver_list = NULL; |
|
6553 ndrivers = 0; |
|
6554 } |
|
6555 |
|
6556 /* |
|
6557 * The driver.conf file of a vhci driver can specify additional |
|
6558 * phci drivers using a project private "phci-drivers" property. |
|
6559 */ |
|
6560 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, vh->vh_dip, |
|
6561 DDI_PROP_DONTPASS, "phci-drivers", &driver_list1, |
|
6562 &ndrivers1) != DDI_PROP_SUCCESS) |
|
6563 ndrivers1 = 0; |
|
6564 |
|
6565 vhc->vhc_nphci_drivers = ndrivers + ndrivers1; |
|
6566 if (vhc->vhc_nphci_drivers == 0) |
|
6567 return; |
|
6568 |
|
6569 vhc->vhc_phci_driver_list = kmem_alloc( |
|
6570 sizeof (mdi_phci_driver_info_t) * vhc->vhc_nphci_drivers, KM_SLEEP); |
|
6571 |
|
6572 for (i = 0; i < ndrivers; i++) { |
|
6573 vhc->vhc_phci_driver_list[i].phdriver_name = |
|
6574 i_ddi_strdup(driver_list[i].phdriver_name, KM_SLEEP); |
|
6575 vhc->vhc_phci_driver_list[i].phdriver_root_support = |
|
6576 driver_list[i].phdriver_root_support; |
|
6577 } |
|
6578 |
|
6579 for (j = 0; j < ndrivers1; j++, i++) { |
|
6580 vhc->vhc_phci_driver_list[i].phdriver_name = |
|
6581 i_ddi_strdup(driver_list1[j], KM_SLEEP); |
|
6582 vhc->vhc_phci_driver_list[i].phdriver_root_support = 1; |
|
6583 } |
|
6584 |
|
6585 if (ndrivers1) |
|
6586 ddi_prop_free(driver_list1); |
|
6587 } |
|
6588 |
|
6589 /* |
|
6590 * Free the memory allocated for the phci driver list |
|
6591 */ |
|
6592 static void |
|
6593 free_phci_driver_list(mdi_vhci_config_t *vhc) |
|
6594 { |
|
6595 int i; |
|
6596 |
|
6597 if (vhc->vhc_phci_driver_list == NULL) |
|
6598 return; |
|
6599 |
|
6600 for (i = 0; i < vhc->vhc_nphci_drivers; i++) { |
|
6601 kmem_free(vhc->vhc_phci_driver_list[i].phdriver_name, |
|
6602 strlen(vhc->vhc_phci_driver_list[i].phdriver_name) + 1); |
|
6603 } |
|
6604 |
|
6605 kmem_free(vhc->vhc_phci_driver_list, |
|
6606 sizeof (mdi_phci_driver_info_t) * vhc->vhc_nphci_drivers); |
|
6607 } |
|
6608 |
|
6609 /* |
|
6610 * Stop all vhci cache related async threads and free their resources. |
|
6611 */ |
|
6612 static int |
|
6613 stop_vhcache_async_threads(mdi_vhci_config_t *vhc) |
|
6614 { |
|
6615 mdi_async_client_config_t *acc, *acc_next; |
|
6616 |
|
6617 mutex_enter(&vhc->vhc_lock); |
|
6618 vhc->vhc_flags |= MDI_VHC_EXIT; |
|
6619 ASSERT(vhc->vhc_acc_thrcount >= 0); |
|
6620 cv_broadcast(&vhc->vhc_cv); |
|
6621 |
|
6622 while ((vhc->vhc_flags & MDI_VHC_VHCACHE_FLUSH_THREAD) || |
|
6623 (vhc->vhc_flags & MDI_VHC_BUILD_VHCI_CACHE_THREAD) || |
|
6624 vhc->vhc_acc_thrcount != 0) { |
|
6625 mutex_exit(&vhc->vhc_lock); |
|
6626 delay(1); |
|
6627 mutex_enter(&vhc->vhc_lock); |
|
6628 } |
|
6629 |
|
6630 vhc->vhc_flags &= ~MDI_VHC_EXIT; |
|
6631 |
|
6632 for (acc = vhc->vhc_acc_list_head; acc != NULL; acc = acc_next) { |
|
6633 acc_next = acc->acc_next; |
|
6634 free_async_client_config(acc); |
|
6635 } |
|
6636 vhc->vhc_acc_list_head = NULL; |
|
6637 vhc->vhc_acc_list_tail = NULL; |
|
6638 vhc->vhc_acc_count = 0; |
|
6639 |
|
6640 if (vhc->vhc_flags & MDI_VHC_VHCACHE_DIRTY) { |
|
6641 vhc->vhc_flags &= ~MDI_VHC_VHCACHE_DIRTY; |
|
6642 mutex_exit(&vhc->vhc_lock); |
|
6643 if (flush_vhcache(vhc, 0) != MDI_SUCCESS) { |
|
6644 vhcache_dirty(vhc); |
|
6645 return (MDI_FAILURE); |
|
6646 } |
|
6647 } else |
|
6648 mutex_exit(&vhc->vhc_lock); |
|
6649 |
|
6650 if (callb_delete(vhc->vhc_cbid) != 0) |
|
6651 return (MDI_FAILURE); |
|
6652 |
|
6653 return (MDI_SUCCESS); |
|
6654 } |
|
6655 |
|
6656 /* |
|
6657 * Stop vhci cache flush thread |
|
6658 */ |
|
6659 /* ARGSUSED */ |
|
6660 static boolean_t |
|
6661 stop_vhcache_flush_thread(void *arg, int code) |
|
6662 { |
|
6663 mdi_vhci_config_t *vhc = (mdi_vhci_config_t *)arg; |
|
6664 |
|
6665 mutex_enter(&vhc->vhc_lock); |
|
6666 vhc->vhc_flags |= MDI_VHC_EXIT; |
|
6667 cv_broadcast(&vhc->vhc_cv); |
|
6668 |
|
6669 while (vhc->vhc_flags & MDI_VHC_VHCACHE_FLUSH_THREAD) { |
|
6670 mutex_exit(&vhc->vhc_lock); |
|
6671 delay(1); |
|
6672 mutex_enter(&vhc->vhc_lock); |
|
6673 } |
|
6674 |
|
6675 if (vhc->vhc_flags & MDI_VHC_VHCACHE_DIRTY) { |
|
6676 vhc->vhc_flags &= ~MDI_VHC_VHCACHE_DIRTY; |
|
6677 mutex_exit(&vhc->vhc_lock); |
|
6678 (void) flush_vhcache(vhc, 1); |
|
6679 } else |
|
6680 mutex_exit(&vhc->vhc_lock); |
|
6681 |
|
6682 return (B_TRUE); |
|
6683 } |
|
6684 |
|
6685 /* |
|
6686 * Enqueue the vhcache phci (cphci) at the tail of the list |
|
6687 */ |
|
6688 static void |
|
6689 enqueue_vhcache_phci(mdi_vhci_cache_t *vhcache, mdi_vhcache_phci_t *cphci) |
|
6690 { |
|
6691 cphci->cphci_next = NULL; |
|
6692 if (vhcache->vhcache_phci_head == NULL) |
|
6693 vhcache->vhcache_phci_head = cphci; |
|
6694 else |
|
6695 vhcache->vhcache_phci_tail->cphci_next = cphci; |
|
6696 vhcache->vhcache_phci_tail = cphci; |
|
6697 } |
|
6698 |
|
6699 /* |
|
6700 * Enqueue the vhcache pathinfo (cpi) at the tail of the list |
|
6701 */ |
|
6702 static void |
|
6703 enqueue_tail_vhcache_pathinfo(mdi_vhcache_client_t *cct, |
|
6704 mdi_vhcache_pathinfo_t *cpi) |
|
6705 { |
|
6706 cpi->cpi_next = NULL; |
|
6707 if (cct->cct_cpi_head == NULL) |
|
6708 cct->cct_cpi_head = cpi; |
|
6709 else |
|
6710 cct->cct_cpi_tail->cpi_next = cpi; |
|
6711 cct->cct_cpi_tail = cpi; |
|
6712 } |
|
6713 |
|
6714 /* |
|
6715 * Enqueue the vhcache pathinfo (cpi) at the correct location in the |
|
6716 * ordered list. All cpis which do not have MDI_CPI_HINT_PATH_DOES_NOT_EXIST |
|
6717 * flag set come at the beginning of the list. All cpis which have this |
|
6718 * flag set come at the end of the list. |
|
6719 */ |
|
6720 static void |
|
6721 enqueue_vhcache_pathinfo(mdi_vhcache_client_t *cct, |
|
6722 mdi_vhcache_pathinfo_t *newcpi) |
|
6723 { |
|
6724 mdi_vhcache_pathinfo_t *cpi, *prev_cpi; |
|
6725 |
|
6726 if (cct->cct_cpi_head == NULL || |
|
6727 (newcpi->cpi_flags & MDI_CPI_HINT_PATH_DOES_NOT_EXIST)) |
|
6728 enqueue_tail_vhcache_pathinfo(cct, newcpi); |
|
6729 else { |
|
6730 for (cpi = cct->cct_cpi_head, prev_cpi = NULL; cpi != NULL && |
|
6731 !(cpi->cpi_flags & MDI_CPI_HINT_PATH_DOES_NOT_EXIST); |
|
6732 prev_cpi = cpi, cpi = cpi->cpi_next) |
|
6733 ; |
|
6734 |
|
6735 if (prev_cpi == NULL) |
|
6736 cct->cct_cpi_head = newcpi; |
|
6737 else |
|
6738 prev_cpi->cpi_next = newcpi; |
|
6739 |
|
6740 newcpi->cpi_next = cpi; |
|
6741 |
|
6742 if (cpi == NULL) |
|
6743 cct->cct_cpi_tail = newcpi; |
|
6744 } |
|
6745 } |
|
6746 |
|
6747 /* |
|
6748 * Enqueue the vhcache client (cct) at the tail of the list |
|
6749 */ |
|
6750 static void |
|
6751 enqueue_vhcache_client(mdi_vhci_cache_t *vhcache, |
|
6752 mdi_vhcache_client_t *cct) |
|
6753 { |
|
6754 cct->cct_next = NULL; |
|
6755 if (vhcache->vhcache_client_head == NULL) |
|
6756 vhcache->vhcache_client_head = cct; |
|
6757 else |
|
6758 vhcache->vhcache_client_tail->cct_next = cct; |
|
6759 vhcache->vhcache_client_tail = cct; |
|
6760 } |
|
6761 |
|
6762 static void |
|
6763 free_string_array(char **str, int nelem) |
|
6764 { |
|
6765 int i; |
|
6766 |
|
6767 if (str) { |
|
6768 for (i = 0; i < nelem; i++) { |
|
6769 if (str[i]) |
|
6770 kmem_free(str[i], strlen(str[i]) + 1); |
|
6771 } |
|
6772 kmem_free(str, sizeof (char *) * nelem); |
|
6773 } |
|
6774 } |
|
6775 |
|
6776 static void |
|
6777 free_vhcache_phci(mdi_vhcache_phci_t *cphci) |
|
6778 { |
|
6779 kmem_free(cphci->cphci_path, strlen(cphci->cphci_path) + 1); |
|
6780 kmem_free(cphci, sizeof (*cphci)); |
|
6781 } |
|
6782 |
|
6783 static void |
|
6784 free_vhcache_pathinfo(mdi_vhcache_pathinfo_t *cpi) |
|
6785 { |
|
6786 kmem_free(cpi->cpi_addr, strlen(cpi->cpi_addr) + 1); |
|
6787 kmem_free(cpi, sizeof (*cpi)); |
|
6788 } |
|
6789 |
|
6790 static void |
|
6791 free_vhcache_client(mdi_vhcache_client_t *cct) |
|
6792 { |
|
6793 kmem_free(cct->cct_name_addr, strlen(cct->cct_name_addr) + 1); |
|
6794 kmem_free(cct, sizeof (*cct)); |
|
6795 } |
|
6796 |
|
6797 static char * |
|
6798 vhcache_mknameaddr(char *ct_name, char *ct_addr, int *ret_len) |
|
6799 { |
|
6800 char *name_addr; |
|
6801 int len; |
|
6802 |
|
6803 len = strlen(ct_name) + strlen(ct_addr) + 2; |
|
6804 name_addr = kmem_alloc(len, KM_SLEEP); |
|
6805 (void) snprintf(name_addr, len, "%s@%s", ct_name, ct_addr); |
|
6806 |
|
6807 if (ret_len) |
|
6808 *ret_len = len; |
|
6809 return (name_addr); |
|
6810 } |
|
6811 |
|
6812 /* |
|
6813 * Copy the contents of paddrnvl to vhci cache. |
|
6814 * paddrnvl nvlist contains path information for a vhci client. |
|
6815 * See the comment in mainnvl_to_vhcache() for the format of this nvlist. |
|
6816 */ |
|
6817 static void |
|
6818 paddrnvl_to_vhcache(nvlist_t *nvl, mdi_vhcache_phci_t *cphci_list[], |
|
6819 mdi_vhcache_client_t *cct) |
|
6820 { |
|
6821 nvpair_t *nvp = NULL; |
|
6822 mdi_vhcache_pathinfo_t *cpi; |
|
6823 uint_t nelem; |
|
6824 uint32_t *val; |
|
6825 |
|
6826 while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) { |
|
6827 ASSERT(nvpair_type(nvp) == DATA_TYPE_UINT32_ARRAY); |
|
6828 cpi = kmem_zalloc(sizeof (*cpi), KM_SLEEP); |
|
6829 cpi->cpi_addr = i_ddi_strdup(nvpair_name(nvp), KM_SLEEP); |
|
6830 (void) nvpair_value_uint32_array(nvp, &val, &nelem); |
|
6831 ASSERT(nelem == 2); |
|
6832 cpi->cpi_cphci = cphci_list[val[0]]; |
|
6833 cpi->cpi_flags = val[1]; |
|
6834 enqueue_tail_vhcache_pathinfo(cct, cpi); |
|
6835 } |
|
6836 } |
|
6837 |
|
6838 /* |
|
6839 * Copy the contents of caddrmapnvl to vhci cache. |
|
6840 * caddrmapnvl nvlist contains vhci client address to phci client address |
|
6841 * mappings. See the comment in mainnvl_to_vhcache() for the format of |
|
6842 * this nvlist. |
|
6843 */ |
|
6844 static void |
|
6845 caddrmapnvl_to_vhcache(mdi_vhci_cache_t *vhcache, nvlist_t *nvl, |
|
6846 mdi_vhcache_phci_t *cphci_list[]) |
|
6847 { |
|
6848 nvpair_t *nvp = NULL; |
|
6849 nvlist_t *paddrnvl; |
|
6850 mdi_vhcache_client_t *cct; |
|
6851 |
|
6852 while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) { |
|
6853 ASSERT(nvpair_type(nvp) == DATA_TYPE_NVLIST); |
|
6854 cct = kmem_zalloc(sizeof (*cct), KM_SLEEP); |
|
6855 cct->cct_name_addr = i_ddi_strdup(nvpair_name(nvp), KM_SLEEP); |
|
6856 (void) nvpair_value_nvlist(nvp, &paddrnvl); |
|
6857 paddrnvl_to_vhcache(paddrnvl, cphci_list, cct); |
|
6858 /* the client must contain at least one path */ |
|
6859 ASSERT(cct->cct_cpi_head != NULL); |
|
6860 |
|
6861 enqueue_vhcache_client(vhcache, cct); |
|
6862 (void) mod_hash_insert(vhcache->vhcache_client_hash, |
|
6863 (mod_hash_key_t)cct->cct_name_addr, (mod_hash_val_t)cct); |
|
6864 } |
|
6865 } |
|
6866 |
|
6867 /* |
|
6868 * Copy the contents of the main nvlist to vhci cache. |
|
6869 * |
|
6870 * VHCI busconfig cached data is stored in the form of a nvlist on the disk. |
|
6871 * The nvlist contains the mappings between the vhci client addresses and |
|
6872 * their corresponding phci client addresses. |
|
6873 * |
|
6874 * The structure of the nvlist is as follows: |
|
6875 * |
|
6876 * Main nvlist: |
|
6877 * NAME TYPE DATA |
|
6878 * version int32 version number |
|
6879 * phcis string array array of phci paths |
|
6880 * clientaddrmap nvlist_t c2paddrs_nvl (see below) |
|
6881 * |
|
6882 * structure of c2paddrs_nvl: |
|
6883 * NAME TYPE DATA |
|
6884 * caddr1 nvlist_t paddrs_nvl1 |
|
6885 * caddr2 nvlist_t paddrs_nvl2 |
|
6886 * ... |
|
6887 * where caddr1, caddr2, ... are vhci client name and addresses in the |
|
6888 * form of "<clientname>@<clientaddress>". |
|
6889 * (for example: "ssd@2000002037cd9f72"); |
|
6890 * paddrs_nvl1, paddrs_nvl2, .. are nvlists that contain path information. |
|
6891 * |
|
6892 * structure of paddrs_nvl: |
|
6893 * NAME TYPE DATA |
|
6894 * pi_addr1 uint32_array (phci-id, cpi_flags) |
|
6895 * pi_addr2 uint32_array (phci-id, cpi_flags) |
|
6896 * ... |
|
6897 * where pi_addr1, pi_addr2, ... are bus specific addresses of pathinfo nodes |
|
6898 * (so called pi_addrs, for example: "w2100002037cd9f72,0"); |
|
6899 * phci-ids are integers that identify PHCIs to which the |
|
6900 * the bus specific address belongs to. These integers are used as an index |
|
6901 * into to the phcis string array in the main nvlist to get the PHCI path. |
|
6902 */ |
|
6903 static int |
|
6904 mainnvl_to_vhcache(mdi_vhci_cache_t *vhcache, nvlist_t *nvl) |
|
6905 { |
|
6906 char **phcis, **phci_namep; |
|
6907 uint_t nphcis; |
|
6908 mdi_vhcache_phci_t *cphci, **cphci_list; |
|
6909 nvlist_t *caddrmapnvl; |
|
6910 int32_t ver; |
|
6911 int i; |
|
6912 size_t cphci_list_size; |
|
6913 |
|
6914 ASSERT(RW_WRITE_HELD(&vhcache->vhcache_lock)); |
|
6915 |
|
6916 if (nvlist_lookup_int32(nvl, MDI_NVPNAME_VERSION, &ver) != 0 || |
|
6917 ver != MDI_VHCI_CACHE_VERSION) |
|
6918 return (MDI_FAILURE); |
|
6919 |
|
6920 if (nvlist_lookup_string_array(nvl, MDI_NVPNAME_PHCIS, &phcis, |
|
6921 &nphcis) != 0) |
|
6922 return (MDI_SUCCESS); |
|
6923 |
|
6924 ASSERT(nphcis > 0); |
|
6925 |
|
6926 cphci_list_size = sizeof (mdi_vhcache_phci_t *) * nphcis; |
|
6927 cphci_list = kmem_alloc(cphci_list_size, KM_SLEEP); |
|
6928 for (i = 0, phci_namep = phcis; i < nphcis; i++, phci_namep++) { |
|
6929 cphci = kmem_zalloc(sizeof (mdi_vhcache_phci_t), KM_SLEEP); |
|
6930 cphci->cphci_path = i_ddi_strdup(*phci_namep, KM_SLEEP); |
|
6931 enqueue_vhcache_phci(vhcache, cphci); |
|
6932 cphci_list[i] = cphci; |
|
6933 } |
|
6934 |
|
6935 ASSERT(vhcache->vhcache_phci_head != NULL); |
|
6936 |
|
6937 if (nvlist_lookup_nvlist(nvl, MDI_NVPNAME_CTADDRMAP, &caddrmapnvl) == 0) |
|
6938 caddrmapnvl_to_vhcache(vhcache, caddrmapnvl, cphci_list); |
|
6939 |
|
6940 kmem_free(cphci_list, cphci_list_size); |
|
6941 return (MDI_SUCCESS); |
|
6942 } |
|
6943 |
|
6944 /* |
|
6945 * Build paddrnvl for the specified client using the information in the |
|
6946 * vhci cache and add it to the caddrmapnnvl. |
|
6947 * Returns 0 on success, errno on failure. |
|
6948 */ |
|
6949 static int |
|
6950 vhcache_to_paddrnvl(mdi_vhci_cache_t *vhcache, mdi_vhcache_client_t *cct, |
|
6951 nvlist_t *caddrmapnvl) |
|
6952 { |
|
6953 mdi_vhcache_pathinfo_t *cpi; |
|
6954 nvlist_t *nvl; |
|
6955 int err; |
|
6956 uint32_t val[2]; |
|
6957 |
|
6958 ASSERT(RW_LOCK_HELD(&vhcache->vhcache_lock)); |
|
6959 |
|
6960 if ((err = nvlist_alloc(&nvl, 0, KM_SLEEP)) != 0) |
|
6961 return (err); |
|
6962 |
|
6963 for (cpi = cct->cct_cpi_head; cpi != NULL; cpi = cpi->cpi_next) { |
|
6964 val[0] = cpi->cpi_cphci->cphci_id; |
|
6965 val[1] = cpi->cpi_flags; |
|
6966 if ((err = nvlist_add_uint32_array(nvl, cpi->cpi_addr, val, 2)) |
|
6967 != 0) |
|
6968 goto out; |
|
6969 } |
|
6970 |
|
6971 err = nvlist_add_nvlist(caddrmapnvl, cct->cct_name_addr, nvl); |
|
6972 out: |
|
6973 nvlist_free(nvl); |
|
6974 return (err); |
|
6975 } |
|
6976 |
|
6977 /* |
|
6978 * Build caddrmapnvl using the information in the vhci cache |
|
6979 * and add it to the mainnvl. |
|
6980 * Returns 0 on success, errno on failure. |
|
6981 */ |
|
6982 static int |
|
6983 vhcache_to_caddrmapnvl(mdi_vhci_cache_t *vhcache, nvlist_t *mainnvl) |
|
6984 { |
|
6985 mdi_vhcache_client_t *cct; |
|
6986 nvlist_t *nvl; |
|
6987 int err; |
|
6988 |
|
6989 ASSERT(RW_LOCK_HELD(&vhcache->vhcache_lock)); |
|
6990 |
|
6991 if ((err = nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP)) != 0) |
|
6992 return (err); |
|
6993 |
|
6994 for (cct = vhcache->vhcache_client_head; cct != NULL; |
|
6995 cct = cct->cct_next) { |
|
6996 if ((err = vhcache_to_paddrnvl(vhcache, cct, nvl)) != 0) |
|
6997 goto out; |
|
6998 } |
|
6999 |
|
7000 err = nvlist_add_nvlist(mainnvl, MDI_NVPNAME_CTADDRMAP, nvl); |
|
7001 out: |
|
7002 nvlist_free(nvl); |
|
7003 return (err); |
|
7004 } |
|
7005 |
|
7006 /* |
|
7007 * Build nvlist using the information in the vhci cache. |
|
7008 * See the comment in mainnvl_to_vhcache() for the format of the nvlist. |
|
7009 * Returns nvl on success, NULL on failure. |
|
7010 */ |
|
7011 static nvlist_t * |
|
7012 vhcache_to_mainnvl(mdi_vhci_cache_t *vhcache) |
|
7013 { |
|
7014 mdi_vhcache_phci_t *cphci; |
|
7015 uint_t phci_count; |
|
7016 char **phcis; |
|
7017 nvlist_t *nvl; |
|
7018 int err, i; |
|
7019 |
|
7020 if ((err = nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP)) != 0) { |
|
7021 nvl = NULL; |
|
7022 goto out; |
|
7023 } |
|
7024 |
|
7025 if ((err = nvlist_add_int32(nvl, MDI_NVPNAME_VERSION, |
|
7026 MDI_VHCI_CACHE_VERSION)) != 0) |
|
7027 goto out; |
|
7028 |
|
7029 rw_enter(&vhcache->vhcache_lock, RW_READER); |
|
7030 if (vhcache->vhcache_phci_head == NULL) { |
|
7031 rw_exit(&vhcache->vhcache_lock); |
|
7032 return (nvl); |
|
7033 } |
|
7034 |
|
7035 phci_count = 0; |
|
7036 for (cphci = vhcache->vhcache_phci_head; cphci != NULL; |
|
7037 cphci = cphci->cphci_next) |
|
7038 cphci->cphci_id = phci_count++; |
|
7039 |
|
7040 /* build phci pathname list */ |
|
7041 phcis = kmem_alloc(sizeof (char *) * phci_count, KM_SLEEP); |
|
7042 for (cphci = vhcache->vhcache_phci_head, i = 0; cphci != NULL; |
|
7043 cphci = cphci->cphci_next, i++) |
|
7044 phcis[i] = i_ddi_strdup(cphci->cphci_path, KM_SLEEP); |
|
7045 |
|
7046 err = nvlist_add_string_array(nvl, MDI_NVPNAME_PHCIS, phcis, |
|
7047 phci_count); |
|
7048 free_string_array(phcis, phci_count); |
|
7049 |
|
7050 if (err == 0 && |
|
7051 (err = vhcache_to_caddrmapnvl(vhcache, nvl)) == 0) { |
|
7052 rw_exit(&vhcache->vhcache_lock); |
|
7053 return (nvl); |
|
7054 } |
|
7055 |
|
7056 rw_exit(&vhcache->vhcache_lock); |
|
7057 out: |
|
7058 if (nvl) |
|
7059 nvlist_free(nvl); |
|
7060 return (NULL); |
|
7061 } |
|
7062 |
|
7063 /* |
|
7064 * Lookup vhcache phci structure for the specified phci path. |
|
7065 */ |
|
7066 static mdi_vhcache_phci_t * |
|
7067 lookup_vhcache_phci_by_name(mdi_vhci_cache_t *vhcache, char *phci_path) |
|
7068 { |
|
7069 mdi_vhcache_phci_t *cphci; |
|
7070 |
|
7071 ASSERT(RW_LOCK_HELD(&vhcache->vhcache_lock)); |
|
7072 |
|
7073 for (cphci = vhcache->vhcache_phci_head; cphci != NULL; |
|
7074 cphci = cphci->cphci_next) { |
|
7075 if (strcmp(cphci->cphci_path, phci_path) == 0) |
|
7076 return (cphci); |
|
7077 } |
|
7078 |
|
7079 return (NULL); |
|
7080 } |
|
7081 |
|
7082 /* |
|
7083 * Lookup vhcache phci structure for the specified phci. |
|
7084 */ |
|
7085 static mdi_vhcache_phci_t * |
|
7086 lookup_vhcache_phci_by_addr(mdi_vhci_cache_t *vhcache, mdi_phci_t *ph) |
|
7087 { |
|
7088 mdi_vhcache_phci_t *cphci; |
|
7089 |
|
7090 ASSERT(RW_LOCK_HELD(&vhcache->vhcache_lock)); |
|
7091 |
|
7092 for (cphci = vhcache->vhcache_phci_head; cphci != NULL; |
|
7093 cphci = cphci->cphci_next) { |
|
7094 if (cphci->cphci_phci == ph) |
|
7095 return (cphci); |
|
7096 } |
|
7097 |
|
7098 return (NULL); |
|
7099 } |
|
7100 |
|
7101 /* |
|
7102 * Add the specified phci to the vhci cache if not already present. |
|
7103 */ |
|
7104 static void |
|
7105 vhcache_phci_add(mdi_vhci_config_t *vhc, mdi_phci_t *ph) |
|
7106 { |
|
7107 mdi_vhci_cache_t *vhcache = &vhc->vhc_vhcache; |
|
7108 mdi_vhcache_phci_t *cphci; |
|
7109 char *pathname; |
|
7110 int cache_updated; |
|
7111 |
|
7112 rw_enter(&vhcache->vhcache_lock, RW_WRITER); |
|
7113 |
|
7114 pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP); |
|
7115 (void) ddi_pathname(ph->ph_dip, pathname); |
|
7116 if ((cphci = lookup_vhcache_phci_by_name(vhcache, pathname)) |
|
7117 != NULL) { |
|
7118 cphci->cphci_phci = ph; |
|
7119 cache_updated = 0; |
|
7120 } else { |
|
7121 cphci = kmem_zalloc(sizeof (*cphci), KM_SLEEP); |
|
7122 cphci->cphci_path = i_ddi_strdup(pathname, KM_SLEEP); |
|
7123 cphci->cphci_phci = ph; |
|
7124 enqueue_vhcache_phci(vhcache, cphci); |
|
7125 cache_updated = 1; |
|
7126 } |
|
7127 rw_exit(&vhcache->vhcache_lock); |
|
7128 |
|
7129 kmem_free(pathname, MAXPATHLEN); |
|
7130 if (cache_updated) |
|
7131 vhcache_dirty(vhc); |
|
7132 } |
|
7133 |
|
7134 /* |
|
7135 * Remove the reference to the specified phci from the vhci cache. |
|
7136 */ |
|
7137 static void |
|
7138 vhcache_phci_remove(mdi_vhci_config_t *vhc, mdi_phci_t *ph) |
|
7139 { |
|
7140 mdi_vhci_cache_t *vhcache = &vhc->vhc_vhcache; |
|
7141 mdi_vhcache_phci_t *cphci; |
|
7142 |
|
7143 rw_enter(&vhcache->vhcache_lock, RW_WRITER); |
|
7144 if ((cphci = lookup_vhcache_phci_by_addr(vhcache, ph)) != NULL) { |
|
7145 /* do not remove the actual mdi_vhcache_phci structure */ |
|
7146 cphci->cphci_phci = NULL; |
|
7147 } |
|
7148 rw_exit(&vhcache->vhcache_lock); |
|
7149 } |
|
7150 |
|
7151 static void |
|
7152 init_vhcache_lookup_token(mdi_vhcache_lookup_token_t *dst, |
|
7153 mdi_vhcache_lookup_token_t *src) |
|
7154 { |
|
7155 if (src == NULL) { |
|
7156 dst->lt_cct = NULL; |
|
7157 dst->lt_cct_lookup_time = 0; |
|
7158 } else { |
|
7159 dst->lt_cct = src->lt_cct; |
|
7160 dst->lt_cct_lookup_time = src->lt_cct_lookup_time; |
|
7161 } |
|
7162 } |
|
7163 |
|
7164 /* |
|
7165 * Look up vhcache client for the specified client. |
|
7166 */ |
|
7167 static mdi_vhcache_client_t * |
|
7168 lookup_vhcache_client(mdi_vhci_cache_t *vhcache, char *ct_name, char *ct_addr, |
|
7169 mdi_vhcache_lookup_token_t *token) |
|
7170 { |
|
7171 mod_hash_val_t hv; |
|
7172 char *name_addr; |
|
7173 int len; |
|
7174 |
|
7175 ASSERT(RW_LOCK_HELD(&vhcache->vhcache_lock)); |
|
7176 |
|
7177 /* |
|
7178 * If no vhcache clean occurred since the last lookup, we can |
|
7179 * simply return the cct from the last lookup operation. |
|
7180 * It works because ccts are never freed except during the vhcache |
|
7181 * cleanup operation. |
|
7182 */ |
|
7183 if (token != NULL && |
|
7184 vhcache->vhcache_clean_time < token->lt_cct_lookup_time) |
|
7185 return (token->lt_cct); |
|
7186 |
|
7187 name_addr = vhcache_mknameaddr(ct_name, ct_addr, &len); |
|
7188 if (mod_hash_find(vhcache->vhcache_client_hash, |
|
7189 (mod_hash_key_t)name_addr, &hv) == 0) { |
|
7190 if (token) { |
|
7191 token->lt_cct = (mdi_vhcache_client_t *)hv; |
|
7192 token->lt_cct_lookup_time = lbolt64; |
|
7193 } |
|
7194 } else { |
|
7195 if (token) { |
|
7196 token->lt_cct = NULL; |
|
7197 token->lt_cct_lookup_time = 0; |
|
7198 } |
|
7199 hv = NULL; |
|
7200 } |
|
7201 kmem_free(name_addr, len); |
|
7202 return ((mdi_vhcache_client_t *)hv); |
|
7203 } |
|
7204 |
|
7205 /* |
|
7206 * Add the specified path to the vhci cache if not already present. |
|
7207 * Also add the vhcache client for the client corresponding to this path |
|
7208 * if it doesn't already exist. |
|
7209 */ |
|
7210 static void |
|
7211 vhcache_pi_add(mdi_vhci_config_t *vhc, struct mdi_pathinfo *pip) |
|
7212 { |
|
7213 mdi_vhci_cache_t *vhcache = &vhc->vhc_vhcache; |
|
7214 mdi_vhcache_client_t *cct; |
|
7215 mdi_vhcache_pathinfo_t *cpi; |
|
7216 mdi_phci_t *ph = pip->pi_phci; |
|
7217 mdi_client_t *ct = pip->pi_client; |
|
7218 int cache_updated = 0; |
|
7219 |
|
7220 rw_enter(&vhcache->vhcache_lock, RW_WRITER); |
|
7221 |
|
7222 /* if vhcache client for this pip doesn't already exist, add it */ |
|
7223 if ((cct = lookup_vhcache_client(vhcache, ct->ct_drvname, ct->ct_guid, |
|
7224 NULL)) == NULL) { |
|
7225 cct = kmem_zalloc(sizeof (*cct), KM_SLEEP); |
|
7226 cct->cct_name_addr = vhcache_mknameaddr(ct->ct_drvname, |
|
7227 ct->ct_guid, NULL); |
|
7228 enqueue_vhcache_client(vhcache, cct); |
|
7229 (void) mod_hash_insert(vhcache->vhcache_client_hash, |
|
7230 (mod_hash_key_t)cct->cct_name_addr, (mod_hash_val_t)cct); |
|
7231 cache_updated = 1; |
|
7232 } |
|
7233 |
|
7234 for (cpi = cct->cct_cpi_head; cpi != NULL; cpi = cpi->cpi_next) { |
|
7235 if (cpi->cpi_cphci->cphci_phci == ph && |
|
7236 strcmp(cpi->cpi_addr, pip->pi_addr) == 0) { |
|
7237 cpi->cpi_pip = pip; |
|
7238 if (cpi->cpi_flags & MDI_CPI_HINT_PATH_DOES_NOT_EXIST) { |
|
7239 cpi->cpi_flags &= |
|
7240 ~MDI_CPI_HINT_PATH_DOES_NOT_EXIST; |
|
7241 sort_vhcache_paths(cct); |
|
7242 cache_updated = 1; |
|
7243 } |
|
7244 break; |
|
7245 } |
|
7246 } |
|
7247 |
|
7248 if (cpi == NULL) { |
|
7249 cpi = kmem_zalloc(sizeof (*cpi), KM_SLEEP); |
|
7250 cpi->cpi_addr = i_ddi_strdup(pip->pi_addr, KM_SLEEP); |
|
7251 cpi->cpi_cphci = lookup_vhcache_phci_by_addr(vhcache, ph); |
|
7252 ASSERT(cpi->cpi_cphci != NULL); |
|
7253 cpi->cpi_pip = pip; |
|
7254 enqueue_vhcache_pathinfo(cct, cpi); |
|
7255 cache_updated = 1; |
|
7256 } |
|
7257 |
|
7258 rw_exit(&vhcache->vhcache_lock); |
|
7259 |
|
7260 if (cache_updated) |
|
7261 vhcache_dirty(vhc); |
|
7262 } |
|
7263 |
|
7264 /* |
|
7265 * Remove the reference to the specified path from the vhci cache. |
|
7266 */ |
|
7267 static void |
|
7268 vhcache_pi_remove(mdi_vhci_config_t *vhc, struct mdi_pathinfo *pip) |
|
7269 { |
|
7270 mdi_vhci_cache_t *vhcache = &vhc->vhc_vhcache; |
|
7271 mdi_client_t *ct = pip->pi_client; |
|
7272 mdi_vhcache_client_t *cct; |
|
7273 mdi_vhcache_pathinfo_t *cpi; |
|
7274 |
|
7275 rw_enter(&vhcache->vhcache_lock, RW_WRITER); |
|
7276 if ((cct = lookup_vhcache_client(vhcache, ct->ct_drvname, ct->ct_guid, |
|
7277 NULL)) != NULL) { |
|
7278 for (cpi = cct->cct_cpi_head; cpi != NULL; |
|
7279 cpi = cpi->cpi_next) { |
|
7280 if (cpi->cpi_pip == pip) { |
|
7281 cpi->cpi_pip = NULL; |
|
7282 break; |
|
7283 } |
|
7284 } |
|
7285 } |
|
7286 rw_exit(&vhcache->vhcache_lock); |
|
7287 } |
|
7288 |
|
7289 /* |
|
7290 * Flush the vhci cache to disk. |
|
7291 * Returns MDI_SUCCESS on success, MDI_FAILURE on failure. |
|
7292 */ |
|
7293 static int |
|
7294 flush_vhcache(mdi_vhci_config_t *vhc, int force_flag) |
|
7295 { |
|
7296 nvlist_t *nvl; |
|
7297 int err; |
|
7298 int rv; |
|
7299 |
|
7300 /* |
|
7301 * It is possible that the system may shutdown before |
|
7302 * i_ddi_io_initialized (during stmsboot for example). To allow for |
|
7303 * flushing the cache in this case do not check for |
|
7304 * i_ddi_io_initialized when force flag is set. |
|
7305 */ |
|
7306 if (force_flag == 0 && !i_ddi_io_initialized()) |
|
7307 return (MDI_FAILURE); |
|
7308 |
|
7309 if ((nvl = vhcache_to_mainnvl(&vhc->vhc_vhcache)) != NULL) { |
|
7310 err = fwrite_nvlist(vhc->vhc_vhcache_filename, nvl); |
|
7311 nvlist_free(nvl); |
|
7312 } else |
|
7313 err = EFAULT; |
|
7314 |
|
7315 rv = MDI_SUCCESS; |
|
7316 mutex_enter(&vhc->vhc_lock); |
|
7317 if (err != 0) { |
|
7318 if (err == EROFS) { |
|
7319 vhc->vhc_flags |= MDI_VHC_READONLY_FS; |
|
7320 vhc->vhc_flags &= ~(MDI_VHC_VHCACHE_FLUSH_ERROR | |
|
7321 MDI_VHC_VHCACHE_DIRTY); |
|
7322 } else { |
|
7323 if (!(vhc->vhc_flags & MDI_VHC_VHCACHE_FLUSH_ERROR)) { |
|
7324 cmn_err(CE_CONT, "%s: update failed\n", |
|
7325 vhc->vhc_vhcache_filename); |
|
7326 vhc->vhc_flags |= MDI_VHC_VHCACHE_FLUSH_ERROR; |
|
7327 } |
|
7328 rv = MDI_FAILURE; |
|
7329 } |
|
7330 } else if (vhc->vhc_flags & MDI_VHC_VHCACHE_FLUSH_ERROR) { |
|
7331 cmn_err(CE_CONT, |
|
7332 "%s: update now ok\n", vhc->vhc_vhcache_filename); |
|
7333 vhc->vhc_flags &= ~MDI_VHC_VHCACHE_FLUSH_ERROR; |
|
7334 } |
|
7335 mutex_exit(&vhc->vhc_lock); |
|
7336 |
|
7337 return (rv); |
|
7338 } |
|
7339 |
|
7340 /* |
|
7341 * Call flush_vhcache() to flush the vhci cache at the scheduled time. |
|
7342 * Exits itself if left idle for the idle timeout period. |
|
7343 */ |
|
7344 static void |
|
7345 vhcache_flush_thread(void *arg) |
|
7346 { |
|
7347 mdi_vhci_config_t *vhc = (mdi_vhci_config_t *)arg; |
|
7348 clock_t idle_time, quit_at_ticks; |
|
7349 callb_cpr_t cprinfo; |
|
7350 |
|
7351 /* number of seconds to sleep idle before exiting */ |
|
7352 idle_time = mdi_vhcache_flush_daemon_idle_time * TICKS_PER_SECOND; |
|
7353 |
|
7354 CALLB_CPR_INIT(&cprinfo, &vhc->vhc_lock, callb_generic_cpr, |
|
7355 "mdi_vhcache_flush"); |
|
7356 mutex_enter(&vhc->vhc_lock); |
|
7357 for (; ; ) { |
|
7358 while (!(vhc->vhc_flags & MDI_VHC_EXIT) && |
|
7359 (vhc->vhc_flags & MDI_VHC_VHCACHE_DIRTY)) { |
|
7360 if (ddi_get_lbolt() < vhc->vhc_flush_at_ticks) { |
|
7361 CALLB_CPR_SAFE_BEGIN(&cprinfo); |
|
7362 (void) cv_timedwait(&vhc->vhc_cv, |
|
7363 &vhc->vhc_lock, vhc->vhc_flush_at_ticks); |
|
7364 CALLB_CPR_SAFE_END(&cprinfo, &vhc->vhc_lock); |
|
7365 } else { |
|
7366 vhc->vhc_flags &= ~MDI_VHC_VHCACHE_DIRTY; |
|
7367 mutex_exit(&vhc->vhc_lock); |
|
7368 |
|
7369 if (flush_vhcache(vhc, 0) != MDI_SUCCESS) |
|
7370 vhcache_dirty(vhc); |
|
7371 |
|
7372 mutex_enter(&vhc->vhc_lock); |
|
7373 } |
|
7374 } |
|
7375 |
|
7376 quit_at_ticks = ddi_get_lbolt() + idle_time; |
|
7377 |
|
7378 while (!(vhc->vhc_flags & MDI_VHC_EXIT) && |
|
7379 !(vhc->vhc_flags & MDI_VHC_VHCACHE_DIRTY) && |
|
7380 ddi_get_lbolt() < quit_at_ticks) { |
|
7381 CALLB_CPR_SAFE_BEGIN(&cprinfo); |
|
7382 (void) cv_timedwait(&vhc->vhc_cv, &vhc->vhc_lock, |
|
7383 quit_at_ticks); |
|
7384 CALLB_CPR_SAFE_END(&cprinfo, &vhc->vhc_lock); |
|
7385 } |
|
7386 |
|
7387 if ((vhc->vhc_flags & MDI_VHC_EXIT) || |
|
7388 !(vhc->vhc_flags & MDI_VHC_VHCACHE_DIRTY)) |
|
7389 goto out; |
|
7390 } |
|
7391 |
|
7392 out: |
|
7393 vhc->vhc_flags &= ~MDI_VHC_VHCACHE_FLUSH_THREAD; |
|
7394 /* CALLB_CPR_EXIT releases the vhc->vhc_lock */ |
|
7395 CALLB_CPR_EXIT(&cprinfo); |
|
7396 } |
|
7397 |
|
7398 /* |
|
7399 * Make vhci cache dirty and schedule flushing by vhcache flush thread. |
|
7400 */ |
|
7401 static void |
|
7402 vhcache_dirty(mdi_vhci_config_t *vhc) |
|
7403 { |
|
7404 mdi_vhci_cache_t *vhcache = &vhc->vhc_vhcache; |
|
7405 int create_thread; |
|
7406 |
|
7407 rw_enter(&vhcache->vhcache_lock, RW_READER); |
|
7408 /* do not flush cache until the cache is fully built */ |
|
7409 if (!(vhcache->vhcache_flags & MDI_VHCI_CACHE_SETUP_DONE)) { |
|
7410 rw_exit(&vhcache->vhcache_lock); |
|
7411 return; |
|
7412 } |
|
7413 rw_exit(&vhcache->vhcache_lock); |
|
7414 |
|
7415 mutex_enter(&vhc->vhc_lock); |
|
7416 if (vhc->vhc_flags & MDI_VHC_READONLY_FS) { |
|
7417 mutex_exit(&vhc->vhc_lock); |
|
7418 return; |
|
7419 } |
|
7420 |
|
7421 vhc->vhc_flags |= MDI_VHC_VHCACHE_DIRTY; |
|
7422 vhc->vhc_flush_at_ticks = ddi_get_lbolt() + |
|
7423 mdi_vhcache_flush_delay * TICKS_PER_SECOND; |
|
7424 if (vhc->vhc_flags & MDI_VHC_VHCACHE_FLUSH_THREAD) { |
|
7425 cv_broadcast(&vhc->vhc_cv); |
|
7426 create_thread = 0; |
|
7427 } else { |
|
7428 vhc->vhc_flags |= MDI_VHC_VHCACHE_FLUSH_THREAD; |
|
7429 create_thread = 1; |
|
7430 } |
|
7431 mutex_exit(&vhc->vhc_lock); |
|
7432 |
|
7433 if (create_thread) |
|
7434 (void) thread_create(NULL, 0, vhcache_flush_thread, vhc, |
|
7435 0, &p0, TS_RUN, minclsyspri); |
|
7436 } |
|
7437 |
|
7438 /* |
|
7439 * phci bus config structure - one for for each phci bus config operation that |
|
7440 * we initiate on behalf of a vhci. |
|
7441 */ |
|
7442 typedef struct mdi_phci_bus_config_s { |
|
7443 char *phbc_phci_path; |
|
7444 struct mdi_vhci_bus_config_s *phbc_vhbusconfig; /* vhci bus config */ |
|
7445 struct mdi_phci_bus_config_s *phbc_next; |
|
7446 } mdi_phci_bus_config_t; |
|
7447 |
|
7448 /* vhci bus config structure - one for each vhci bus config operation */ |
|
7449 typedef struct mdi_vhci_bus_config_s { |
|
7450 ddi_bus_config_op_t vhbc_op; /* bus config op */ |
|
7451 major_t vhbc_op_major; /* bus config op major */ |
|
7452 uint_t vhbc_op_flags; /* bus config op flags */ |
|
7453 kmutex_t vhbc_lock; |
|
7454 kcondvar_t vhbc_cv; |
|
7455 int vhbc_thr_count; |
|
7456 } mdi_vhci_bus_config_t; |
|
7457 |
|
7458 /* |
|
7459 * bus config the specified phci |
|
7460 */ |
|
7461 static void |
|
7462 bus_config_phci(void *arg) |
|
7463 { |
|
7464 mdi_phci_bus_config_t *phbc = (mdi_phci_bus_config_t *)arg; |
|
7465 mdi_vhci_bus_config_t *vhbc = phbc->phbc_vhbusconfig; |
|
7466 dev_info_t *ph_dip; |
|
7467 |
|
7468 /* |
|
7469 * first configure all path components upto phci and then configure |
|
7470 * the phci children. |
|
7471 */ |
|
7472 if ((ph_dip = e_ddi_hold_devi_by_path(phbc->phbc_phci_path, 0)) |
|
7473 != NULL) { |
|
7474 if (vhbc->vhbc_op == BUS_CONFIG_DRIVER || |
|
7475 vhbc->vhbc_op == BUS_UNCONFIG_DRIVER) { |
|
7476 (void) ndi_devi_config_driver(ph_dip, |
|
7477 vhbc->vhbc_op_flags, |
|
7478 vhbc->vhbc_op_major); |
|
7479 } else |
|
7480 (void) ndi_devi_config(ph_dip, |
|
7481 vhbc->vhbc_op_flags); |
|
7482 |
|
7483 /* release the hold that e_ddi_hold_devi_by_path() placed */ |
|
7484 ndi_rele_devi(ph_dip); |
|
7485 } |
|
7486 |
|
7487 kmem_free(phbc->phbc_phci_path, strlen(phbc->phbc_phci_path) + 1); |
|
7488 kmem_free(phbc, sizeof (*phbc)); |
|
7489 |
|
7490 mutex_enter(&vhbc->vhbc_lock); |
|
7491 vhbc->vhbc_thr_count--; |
|
7492 if (vhbc->vhbc_thr_count == 0) |
|
7493 cv_broadcast(&vhbc->vhbc_cv); |
|
7494 mutex_exit(&vhbc->vhbc_lock); |
|
7495 } |
|
7496 |
|
7497 /* |
|
7498 * Bus config all phcis associated with the vhci in parallel. |
|
7499 * op must be BUS_CONFIG_DRIVER or BUS_CONFIG_ALL. |
|
7500 */ |
|
7501 static void |
|
7502 bus_config_all_phcis(mdi_vhci_cache_t *vhcache, uint_t flags, |
|
7503 ddi_bus_config_op_t op, major_t maj) |
|
7504 { |
|
7505 mdi_phci_bus_config_t *phbc_head = NULL, *phbc, *phbc_next; |
|
7506 mdi_vhci_bus_config_t *vhbc; |
|
7507 mdi_vhcache_phci_t *cphci; |
|
7508 |
|
7509 rw_enter(&vhcache->vhcache_lock, RW_READER); |
|
7510 if (vhcache->vhcache_phci_head == NULL) { |
|
7511 rw_exit(&vhcache->vhcache_lock); |
|
7512 return; |
|
7513 } |
|
7514 |
|
7515 vhbc = kmem_zalloc(sizeof (*vhbc), KM_SLEEP); |
|
7516 |
|
7517 for (cphci = vhcache->vhcache_phci_head; cphci != NULL; |
|
7518 cphci = cphci->cphci_next) { |
|
7519 phbc = kmem_zalloc(sizeof (*phbc), KM_SLEEP); |
|
7520 phbc->phbc_phci_path = i_ddi_strdup(cphci->cphci_path, |
|
7521 KM_SLEEP); |
|
7522 phbc->phbc_vhbusconfig = vhbc; |
|
7523 phbc->phbc_next = phbc_head; |
|
7524 phbc_head = phbc; |
|
7525 vhbc->vhbc_thr_count++; |
|
7526 } |
|
7527 rw_exit(&vhcache->vhcache_lock); |
|
7528 |
|
7529 vhbc->vhbc_op = op; |
|
7530 vhbc->vhbc_op_major = maj; |
|
7531 vhbc->vhbc_op_flags = NDI_NO_EVENT | |
|
7532 (flags & (NDI_CONFIG_REPROBE | NDI_DRV_CONF_REPROBE)); |
|
7533 mutex_init(&vhbc->vhbc_lock, NULL, MUTEX_DEFAULT, NULL); |
|
7534 cv_init(&vhbc->vhbc_cv, NULL, CV_DRIVER, NULL); |
|
7535 |
|
7536 /* now create threads to initiate bus config on all phcis in parallel */ |
|
7537 for (phbc = phbc_head; phbc != NULL; phbc = phbc_next) { |
|
7538 phbc_next = phbc->phbc_next; |
|
7539 if (mdi_mtc_off) |
|
7540 bus_config_phci((void *)phbc); |
|
7541 else |
|
7542 (void) thread_create(NULL, 0, bus_config_phci, phbc, |
|
7543 0, &p0, TS_RUN, minclsyspri); |
|
7544 } |
|
7545 |
|
7546 mutex_enter(&vhbc->vhbc_lock); |
|
7547 /* wait until all threads exit */ |
|
7548 while (vhbc->vhbc_thr_count > 0) |
|
7549 cv_wait(&vhbc->vhbc_cv, &vhbc->vhbc_lock); |
|
7550 mutex_exit(&vhbc->vhbc_lock); |
|
7551 |
|
7552 mutex_destroy(&vhbc->vhbc_lock); |
|
7553 cv_destroy(&vhbc->vhbc_cv); |
|
7554 kmem_free(vhbc, sizeof (*vhbc)); |
|
7555 } |
|
7556 |
|
7557 /* |
|
7558 * Perform BUS_CONFIG_ONE on the specified child of the phci. |
|
7559 * The path includes the child component in addition to the phci path. |
|
7560 */ |
|
7561 static int |
|
7562 bus_config_one_phci_child(char *path) |
|
7563 { |
|
7564 dev_info_t *ph_dip, *child; |
|
7565 char *devnm; |
|
7566 int rv = MDI_FAILURE; |
|
7567 |
|
7568 /* extract the child component of the phci */ |
|
7569 devnm = strrchr(path, '/'); |
|
7570 *devnm++ = '\0'; |
|
7571 |
|
7572 /* |
|
7573 * first configure all path components upto phci and then |
|
7574 * configure the phci child. |
|
7575 */ |
|
7576 if ((ph_dip = e_ddi_hold_devi_by_path(path, 0)) != NULL) { |
|
7577 if (ndi_devi_config_one(ph_dip, devnm, &child, NDI_NO_EVENT) == |
|
7578 NDI_SUCCESS) { |
|
7579 /* |
|
7580 * release the hold that ndi_devi_config_one() placed |
|
7581 */ |
|
7582 ndi_rele_devi(child); |
|
7583 rv = MDI_SUCCESS; |
|
7584 } |
|
7585 |
|
7586 /* release the hold that e_ddi_hold_devi_by_path() placed */ |
|
7587 ndi_rele_devi(ph_dip); |
|
7588 } |
|
7589 |
|
7590 devnm--; |
|
7591 *devnm = '/'; |
|
7592 return (rv); |
|
7593 } |
|
7594 |
|
7595 /* |
|
7596 * Build a list of phci client paths for the specified vhci client. |
|
7597 * The list includes only those phci client paths which aren't configured yet. |
|
7598 */ |
|
7599 static mdi_phys_path_t * |
|
7600 build_phclient_path_list(mdi_vhcache_client_t *cct, char *ct_name) |
|
7601 { |
|
7602 mdi_vhcache_pathinfo_t *cpi; |
|
7603 mdi_phys_path_t *pp_head = NULL, *pp_tail = NULL, *pp; |
|
7604 int config_path, len; |
|
7605 |
|
7606 for (cpi = cct->cct_cpi_head; cpi != NULL; cpi = cpi->cpi_next) { |
|
7607 /* |
|
7608 * include only those paths that aren't configured. |
|
7609 */ |
|
7610 config_path = 0; |
|
7611 if (cpi->cpi_pip == NULL) |
|
7612 config_path = 1; |
|
7613 else { |
|
7614 MDI_PI_LOCK(cpi->cpi_pip); |
|
7615 if (MDI_PI_IS_INIT(cpi->cpi_pip)) |
|
7616 config_path = 1; |
|
7617 MDI_PI_UNLOCK(cpi->cpi_pip); |
|
7618 } |
|
7619 |
|
7620 if (config_path) { |
|
7621 pp = kmem_alloc(sizeof (*pp), KM_SLEEP); |
|
7622 len = strlen(cpi->cpi_cphci->cphci_path) + |
|
7623 strlen(ct_name) + strlen(cpi->cpi_addr) + 3; |
|
7624 pp->phys_path = kmem_alloc(len, KM_SLEEP); |
|
7625 (void) snprintf(pp->phys_path, len, "%s/%s@%s", |
|
7626 cpi->cpi_cphci->cphci_path, ct_name, |
|
7627 cpi->cpi_addr); |
|
7628 pp->phys_path_next = NULL; |
|
7629 |
|
7630 if (pp_head == NULL) |
|
7631 pp_head = pp; |
|
7632 else |
|
7633 pp_tail->phys_path_next = pp; |
|
7634 pp_tail = pp; |
|
7635 } |
|
7636 } |
|
7637 |
|
7638 return (pp_head); |
|
7639 } |
|
7640 |
|
7641 /* |
|
7642 * Free the memory allocated for phci client path list. |
|
7643 */ |
|
7644 static void |
|
7645 free_phclient_path_list(mdi_phys_path_t *pp_head) |
|
7646 { |
|
7647 mdi_phys_path_t *pp, *pp_next; |
|
7648 |
|
7649 for (pp = pp_head; pp != NULL; pp = pp_next) { |
|
7650 pp_next = pp->phys_path_next; |
|
7651 kmem_free(pp->phys_path, strlen(pp->phys_path) + 1); |
|
7652 kmem_free(pp, sizeof (*pp)); |
|
7653 } |
|
7654 } |
|
7655 |
|
7656 /* |
|
7657 * Allocated async client structure and initialize with the specified values. |
|
7658 */ |
|
7659 static mdi_async_client_config_t * |
|
7660 alloc_async_client_config(char *ct_name, char *ct_addr, |
|
7661 mdi_phys_path_t *pp_head, mdi_vhcache_lookup_token_t *tok) |
|
7662 { |
|
7663 mdi_async_client_config_t *acc; |
|
7664 |
|
7665 acc = kmem_alloc(sizeof (*acc), KM_SLEEP); |
|
7666 acc->acc_ct_name = i_ddi_strdup(ct_name, KM_SLEEP); |
|
7667 acc->acc_ct_addr = i_ddi_strdup(ct_addr, KM_SLEEP); |
|
7668 acc->acc_phclient_path_list_head = pp_head; |
|
7669 init_vhcache_lookup_token(&acc->acc_token, tok); |
|
7670 acc->acc_next = NULL; |
|
7671 return (acc); |
|
7672 } |
|
7673 |
|
7674 /* |
|
7675 * Free the memory allocated for the async client structure and their members. |
|
7676 */ |
|
7677 static void |
|
7678 free_async_client_config(mdi_async_client_config_t *acc) |
|
7679 { |
|
7680 if (acc->acc_phclient_path_list_head) |
|
7681 free_phclient_path_list(acc->acc_phclient_path_list_head); |
|
7682 kmem_free(acc->acc_ct_name, strlen(acc->acc_ct_name) + 1); |
|
7683 kmem_free(acc->acc_ct_addr, strlen(acc->acc_ct_addr) + 1); |
|
7684 kmem_free(acc, sizeof (*acc)); |
|
7685 } |
|
7686 |
|
7687 /* |
|
7688 * Sort vhcache pathinfos (cpis) of the specified client. |
|
7689 * All cpis which do not have MDI_CPI_HINT_PATH_DOES_NOT_EXIST |
|
7690 * flag set come at the beginning of the list. All cpis which have this |
|
7691 * flag set come at the end of the list. |
|
7692 */ |
|
7693 static void |
|
7694 sort_vhcache_paths(mdi_vhcache_client_t *cct) |
|
7695 { |
|
7696 mdi_vhcache_pathinfo_t *cpi, *cpi_next, *cpi_head; |
|
7697 |
|
7698 cpi_head = cct->cct_cpi_head; |
|
7699 cct->cct_cpi_head = cct->cct_cpi_tail = NULL; |
|
7700 for (cpi = cpi_head; cpi != NULL; cpi = cpi_next) { |
|
7701 cpi_next = cpi->cpi_next; |
|
7702 enqueue_vhcache_pathinfo(cct, cpi); |
|
7703 } |
|
7704 } |
|
7705 |
|
7706 /* |
|
7707 * Verify whether MDI_CPI_HINT_PATH_DOES_NOT_EXIST flag setting is correct for |
|
7708 * every vhcache pathinfo of the specified client. If not adjust the flag |
|
7709 * setting appropriately. |
|
7710 * |
|
7711 * Note that MDI_CPI_HINT_PATH_DOES_NOT_EXIST flag is persisted in the |
|
7712 * on-disk vhci cache. So every time this flag is updated the cache must be |
|
7713 * flushed. |
|
7714 */ |
|
7715 static void |
|
7716 adjust_sort_vhcache_paths(mdi_vhci_config_t *vhc, char *ct_name, char *ct_addr, |
|
7717 mdi_vhcache_lookup_token_t *tok) |
|
7718 { |
|
7719 mdi_vhci_cache_t *vhcache = &vhc->vhc_vhcache; |
|
7720 mdi_vhcache_client_t *cct; |
|
7721 mdi_vhcache_pathinfo_t *cpi; |
|
7722 |
|
7723 rw_enter(&vhcache->vhcache_lock, RW_READER); |
|
7724 if ((cct = lookup_vhcache_client(vhcache, ct_name, ct_addr, tok)) |
|
7725 == NULL) { |
|
7726 rw_exit(&vhcache->vhcache_lock); |
|
7727 return; |
|
7728 } |
|
7729 |
|
7730 /* |
|
7731 * to avoid unnecessary on-disk cache updates, first check if an |
|
7732 * update is really needed. If no update is needed simply return. |
|
7733 */ |
|
7734 for (cpi = cct->cct_cpi_head; cpi != NULL; cpi = cpi->cpi_next) { |
|
7735 if ((cpi->cpi_pip != NULL && |
|
7736 (cpi->cpi_flags & MDI_CPI_HINT_PATH_DOES_NOT_EXIST)) || |
|
7737 (cpi->cpi_pip == NULL && |
|
7738 !(cpi->cpi_flags & MDI_CPI_HINT_PATH_DOES_NOT_EXIST))) { |
|
7739 break; |
|
7740 } |
|
7741 } |
|
7742 if (cpi == NULL) { |
|
7743 rw_exit(&vhcache->vhcache_lock); |
|
7744 return; |
|
7745 } |
|
7746 |
|
7747 if (rw_tryupgrade(&vhcache->vhcache_lock) == 0) { |
|
7748 rw_exit(&vhcache->vhcache_lock); |
|
7749 rw_enter(&vhcache->vhcache_lock, RW_WRITER); |
|
7750 if ((cct = lookup_vhcache_client(vhcache, ct_name, ct_addr, |
|
7751 tok)) == NULL) { |
|
7752 rw_exit(&vhcache->vhcache_lock); |
|
7753 return; |
|
7754 } |
|
7755 } |
|
7756 |
|
7757 for (cpi = cct->cct_cpi_head; cpi != NULL; cpi = cpi->cpi_next) { |
|
7758 if (cpi->cpi_pip != NULL) |
|
7759 cpi->cpi_flags &= ~MDI_CPI_HINT_PATH_DOES_NOT_EXIST; |
|
7760 else |
|
7761 cpi->cpi_flags |= MDI_CPI_HINT_PATH_DOES_NOT_EXIST; |
|
7762 } |
|
7763 sort_vhcache_paths(cct); |
|
7764 |
|
7765 rw_exit(&vhcache->vhcache_lock); |
|
7766 vhcache_dirty(vhc); |
|
7767 } |
|
7768 |
|
7769 /* |
|
7770 * Configure all specified paths of the client. |
|
7771 */ |
|
7772 static void |
|
7773 config_client_paths_sync(mdi_vhci_config_t *vhc, char *ct_name, char *ct_addr, |
|
7774 mdi_phys_path_t *pp_head, mdi_vhcache_lookup_token_t *tok) |
|
7775 { |
|
7776 mdi_phys_path_t *pp; |
|
7777 |
|
7778 for (pp = pp_head; pp != NULL; pp = pp->phys_path_next) |
|
7779 (void) bus_config_one_phci_child(pp->phys_path); |
|
7780 adjust_sort_vhcache_paths(vhc, ct_name, ct_addr, tok); |
|
7781 } |
|
7782 |
|
7783 /* |
|
7784 * Dequeue elements from vhci async client config list and bus configure |
|
7785 * their corresponding phci clients. |
|
7786 */ |
|
7787 static void |
|
7788 config_client_paths_thread(void *arg) |
|
7789 { |
|
7790 mdi_vhci_config_t *vhc = (mdi_vhci_config_t *)arg; |
|
7791 mdi_async_client_config_t *acc; |
|
7792 clock_t quit_at_ticks; |
|
7793 clock_t idle_time = mdi_async_config_idle_time * TICKS_PER_SECOND; |
|
7794 callb_cpr_t cprinfo; |
|
7795 |
|
7796 CALLB_CPR_INIT(&cprinfo, &vhc->vhc_lock, callb_generic_cpr, |
|
7797 "mdi_config_client_paths"); |
|
7798 |
|
7799 for (; ; ) { |
|
7800 quit_at_ticks = ddi_get_lbolt() + idle_time; |
|
7801 |
|
7802 mutex_enter(&vhc->vhc_lock); |
|
7803 while (!(vhc->vhc_flags & MDI_VHC_EXIT) && |
|
7804 vhc->vhc_acc_list_head == NULL && |
|
7805 ddi_get_lbolt() < quit_at_ticks) { |
|
7806 CALLB_CPR_SAFE_BEGIN(&cprinfo); |
|
7807 (void) cv_timedwait(&vhc->vhc_cv, &vhc->vhc_lock, |
|
7808 quit_at_ticks); |
|
7809 CALLB_CPR_SAFE_END(&cprinfo, &vhc->vhc_lock); |
|
7810 } |
|
7811 |
|
7812 if ((vhc->vhc_flags & MDI_VHC_EXIT) || |
|
7813 vhc->vhc_acc_list_head == NULL) |
|
7814 goto out; |
|
7815 |
|
7816 acc = vhc->vhc_acc_list_head; |
|
7817 vhc->vhc_acc_list_head = acc->acc_next; |
|
7818 if (vhc->vhc_acc_list_head == NULL) |
|
7819 vhc->vhc_acc_list_tail = NULL; |
|
7820 vhc->vhc_acc_count--; |
|
7821 mutex_exit(&vhc->vhc_lock); |
|
7822 |
|
7823 config_client_paths_sync(vhc, acc->acc_ct_name, |
|
7824 acc->acc_ct_addr, acc->acc_phclient_path_list_head, |
|
7825 &acc->acc_token); |
|
7826 |
|
7827 free_async_client_config(acc); |
|
7828 } |
|
7829 |
|
7830 out: |
|
7831 vhc->vhc_acc_thrcount--; |
|
7832 /* CALLB_CPR_EXIT releases the vhc->vhc_lock */ |
|
7833 CALLB_CPR_EXIT(&cprinfo); |
|
7834 } |
|
7835 |
|
7836 /* |
|
7837 * Arrange for all the phci client paths (pp_head) for the specified client |
|
7838 * to be bus configured asynchronously by a thread. |
|
7839 */ |
|
7840 static void |
|
7841 config_client_paths_async(mdi_vhci_config_t *vhc, char *ct_name, char *ct_addr, |
|
7842 mdi_phys_path_t *pp_head, mdi_vhcache_lookup_token_t *tok) |
|
7843 { |
|
7844 mdi_async_client_config_t *acc, *newacc; |
|
7845 int create_thread; |
|
7846 |
|
7847 if (pp_head == NULL) |
|
7848 return; |
|
7849 |
|
7850 if (mdi_mtc_off) { |
|
7851 config_client_paths_sync(vhc, ct_name, ct_addr, pp_head, tok); |
|
7852 free_phclient_path_list(pp_head); |
|
7853 return; |
|
7854 } |
|
7855 |
|
7856 newacc = alloc_async_client_config(ct_name, ct_addr, pp_head, tok); |
|
7857 ASSERT(newacc); |
|
7858 |
|
7859 mutex_enter(&vhc->vhc_lock); |
|
7860 for (acc = vhc->vhc_acc_list_head; acc != NULL; acc = acc->acc_next) { |
|
7861 if (strcmp(ct_name, acc->acc_ct_name) == 0 && |
|
7862 strcmp(ct_addr, acc->acc_ct_addr) == 0) { |
|
7863 free_async_client_config(newacc); |
|
7864 mutex_exit(&vhc->vhc_lock); |
|
7865 return; |
|
7866 } |
|
7867 } |
|
7868 |
|
7869 if (vhc->vhc_acc_list_head == NULL) |
|
7870 vhc->vhc_acc_list_head = newacc; |
|
7871 else |
|
7872 vhc->vhc_acc_list_tail->acc_next = newacc; |
|
7873 vhc->vhc_acc_list_tail = newacc; |
|
7874 vhc->vhc_acc_count++; |
|
7875 if (vhc->vhc_acc_count <= vhc->vhc_acc_thrcount) { |
|
7876 cv_broadcast(&vhc->vhc_cv); |
|
7877 create_thread = 0; |
|
7878 } else { |
|
7879 vhc->vhc_acc_thrcount++; |
|
7880 create_thread = 1; |
|
7881 } |
|
7882 mutex_exit(&vhc->vhc_lock); |
|
7883 |
|
7884 if (create_thread) |
|
7885 (void) thread_create(NULL, 0, config_client_paths_thread, vhc, |
|
7886 0, &p0, TS_RUN, minclsyspri); |
|
7887 } |
|
7888 |
|
7889 /* |
|
7890 * Return number of online paths for the specified client. |
|
7891 */ |
|
7892 static int |
|
7893 nonline_paths(mdi_vhcache_client_t *cct) |
|
7894 { |
|
7895 mdi_vhcache_pathinfo_t *cpi; |
|
7896 int online_count = 0; |
|
7897 |
|
7898 for (cpi = cct->cct_cpi_head; cpi != NULL; cpi = cpi->cpi_next) { |
|
7899 if (cpi->cpi_pip != NULL) { |
|
7900 MDI_PI_LOCK(cpi->cpi_pip); |
|
7901 if (cpi->cpi_pip->pi_state == MDI_PATHINFO_STATE_ONLINE) |
|
7902 online_count++; |
|
7903 MDI_PI_UNLOCK(cpi->cpi_pip); |
|
7904 } |
|
7905 } |
|
7906 |
|
7907 return (online_count); |
|
7908 } |
|
7909 |
|
7910 /* |
|
7911 * Bus configure all paths for the specified vhci client. |
|
7912 * If at least one path for the client is already online, the remaining paths |
|
7913 * will be configured asynchronously. Otherwise, it synchronously configures |
|
7914 * the paths until at least one path is online and then rest of the paths |
|
7915 * will be configured asynchronously. |
|
7916 */ |
|
7917 static void |
|
7918 config_client_paths(mdi_vhci_config_t *vhc, char *ct_name, char *ct_addr) |
|
7919 { |
|
7920 mdi_vhci_cache_t *vhcache = &vhc->vhc_vhcache; |
|
7921 mdi_phys_path_t *pp_head, *pp; |
|
7922 mdi_vhcache_client_t *cct; |
|
7923 mdi_vhcache_lookup_token_t tok; |
|
7924 |
|
7925 ASSERT(RW_LOCK_HELD(&vhcache->vhcache_lock)); |
|
7926 |
|
7927 init_vhcache_lookup_token(&tok, NULL); |
|
7928 |
|
7929 if (ct_name == NULL || ct_addr == NULL || |
|
7930 (cct = lookup_vhcache_client(vhcache, ct_name, ct_addr, &tok)) |
|
7931 == NULL || |
|
7932 (pp_head = build_phclient_path_list(cct, ct_name)) == NULL) { |
|
7933 rw_exit(&vhcache->vhcache_lock); |
|
7934 return; |
|
7935 } |
|
7936 |
|
7937 /* if at least one path is online, configure the rest asynchronously */ |
|
7938 if (nonline_paths(cct) > 0) { |
|
7939 rw_exit(&vhcache->vhcache_lock); |
|
7940 config_client_paths_async(vhc, ct_name, ct_addr, pp_head, &tok); |
|
7941 return; |
|
7942 } |
|
7943 |
|
7944 rw_exit(&vhcache->vhcache_lock); |
|
7945 |
|
7946 for (pp = pp_head; pp != NULL; pp = pp->phys_path_next) { |
|
7947 if (bus_config_one_phci_child(pp->phys_path) == MDI_SUCCESS) { |
|
7948 rw_enter(&vhcache->vhcache_lock, RW_READER); |
|
7949 |
|
7950 if ((cct = lookup_vhcache_client(vhcache, ct_name, |
|
7951 ct_addr, &tok)) == NULL) { |
|
7952 rw_exit(&vhcache->vhcache_lock); |
|
7953 goto out; |
|
7954 } |
|
7955 |
|
7956 if (nonline_paths(cct) > 0 && |
|
7957 pp->phys_path_next != NULL) { |
|
7958 rw_exit(&vhcache->vhcache_lock); |
|
7959 config_client_paths_async(vhc, ct_name, ct_addr, |
|
7960 pp->phys_path_next, &tok); |
|
7961 pp->phys_path_next = NULL; |
|
7962 goto out; |
|
7963 } |
|
7964 |
|
7965 rw_exit(&vhcache->vhcache_lock); |
|
7966 } |
|
7967 } |
|
7968 |
|
7969 adjust_sort_vhcache_paths(vhc, ct_name, ct_addr, &tok); |
|
7970 out: |
|
7971 free_phclient_path_list(pp_head); |
|
7972 } |
|
7973 |
|
7974 static void |
|
7975 single_threaded_vhconfig_enter(mdi_vhci_config_t *vhc) |
|
7976 { |
|
7977 mutex_enter(&vhc->vhc_lock); |
|
7978 while (vhc->vhc_flags & MDI_VHC_SINGLE_THREADED) |
|
7979 cv_wait(&vhc->vhc_cv, &vhc->vhc_lock); |
|
7980 vhc->vhc_flags |= MDI_VHC_SINGLE_THREADED; |
|
7981 mutex_exit(&vhc->vhc_lock); |
|
7982 } |
|
7983 |
|
7984 static void |
|
7985 single_threaded_vhconfig_exit(mdi_vhci_config_t *vhc) |
|
7986 { |
|
7987 mutex_enter(&vhc->vhc_lock); |
|
7988 vhc->vhc_flags &= ~MDI_VHC_SINGLE_THREADED; |
|
7989 cv_broadcast(&vhc->vhc_cv); |
|
7990 mutex_exit(&vhc->vhc_lock); |
|
7991 } |
|
7992 |
|
7993 /* |
|
7994 * Attach the phci driver instances associated with the vhci: |
|
7995 * If root is mounted attach all phci driver instances. |
|
7996 * If root is not mounted, attach the instances of only those phci |
|
7997 * drivers that have the root support. |
|
7998 */ |
|
7999 static void |
|
8000 attach_phci_drivers(mdi_vhci_config_t *vhc, int root_mounted) |
6303 { |
8001 { |
6304 int i; |
8002 int i; |
6305 major_t m; |
8003 major_t m; |
6306 |
8004 |
6307 for (i = 0; i < N_PHCI_DRIVERS; i++) { |
8005 for (i = 0; i < vhc->vhc_nphci_drivers; i++) { |
6308 m = ddi_name_to_major(phci_driver_list[i]); |
8006 if (root_mounted == 0 && |
|
8007 vhc->vhc_phci_driver_list[i].phdriver_root_support == 0) |
|
8008 continue; |
|
8009 |
|
8010 m = ddi_name_to_major( |
|
8011 vhc->vhc_phci_driver_list[i].phdriver_name); |
6309 if (m != (major_t)-1) { |
8012 if (m != (major_t)-1) { |
6310 if (ddi_hold_installed_driver(m) != NULL) |
8013 if (ddi_hold_installed_driver(m) != NULL) |
6311 ddi_rele_driver(m); |
8014 ddi_rele_driver(m); |
6312 } |
8015 } |
6313 } |
8016 } |
6314 } |
8017 } |
6315 |
8018 |
6316 /* bus config the specified phci */ |
8019 /* |
|
8020 * Build vhci cache: |
|
8021 * |
|
8022 * Attach phci driver instances and then drive BUS_CONFIG_ALL on |
|
8023 * the phci driver instances. During this process the cache gets built. |
|
8024 * |
|
8025 * Cache is built fully if the root is mounted (i.e., root_mounted is nonzero). |
|
8026 * |
|
8027 * If the root is not mounted, phci drivers that do not have root support |
|
8028 * are not attached. As a result the cache is built partially. The entries |
|
8029 * in the cache reflect only those phci drivers that have root support. |
|
8030 */ |
|
8031 static vhcache_build_status_t |
|
8032 build_vhci_cache(mdi_vhci_config_t *vhc, int root_mounted) |
|
8033 { |
|
8034 mdi_vhci_cache_t *vhcache = &vhc->vhc_vhcache; |
|
8035 |
|
8036 rw_enter(&vhcache->vhcache_lock, RW_READER); |
|
8037 if (vhcache->vhcache_flags & MDI_VHCI_CACHE_SETUP_DONE) { |
|
8038 rw_exit(&vhcache->vhcache_lock); |
|
8039 return (VHCACHE_NOT_REBUILT); |
|
8040 } |
|
8041 rw_exit(&vhcache->vhcache_lock); |
|
8042 |
|
8043 attach_phci_drivers(vhc, root_mounted); |
|
8044 bus_config_all_phcis(vhcache, NDI_DRV_CONF_REPROBE | NDI_NO_EVENT, |
|
8045 BUS_CONFIG_ALL, (major_t)-1); |
|
8046 |
|
8047 if (root_mounted) { |
|
8048 rw_enter(&vhcache->vhcache_lock, RW_WRITER); |
|
8049 vhcache->vhcache_flags |= MDI_VHCI_CACHE_SETUP_DONE; |
|
8050 rw_exit(&vhcache->vhcache_lock); |
|
8051 vhcache_dirty(vhc); |
|
8052 return (VHCACHE_FULLY_BUILT); |
|
8053 } else |
|
8054 return (VHCACHE_PARTIALLY_BUILT); |
|
8055 } |
|
8056 |
|
8057 /* |
|
8058 * Wait until the root is mounted and then build the vhci cache. |
|
8059 */ |
6317 static void |
8060 static void |
6318 i_mdi_phci_bus_config(void *arg) |
8061 build_vhci_cache_thread(void *arg) |
6319 { |
8062 { |
6320 mdi_phci_config_t *phc = (mdi_phci_config_t *)arg; |
8063 mdi_vhci_config_t *vhc = (mdi_vhci_config_t *)arg; |
6321 mdi_vhci_config_t *vhc; |
8064 |
6322 dev_info_t *ph_dip; |
8065 mutex_enter(&vhc->vhc_lock); |
6323 int rv; |
8066 while (!modrootloaded && !(vhc->vhc_flags & MDI_VHC_EXIT)) { |
6324 |
8067 (void) cv_timedwait(&vhc->vhc_cv, &vhc->vhc_lock, |
6325 ASSERT(phc); |
8068 ddi_get_lbolt() + 10 * TICKS_PER_SECOND); |
6326 vhc = phc->phc_vhc; |
8069 } |
6327 ASSERT(vhc->vhc_op == BUS_CONFIG_ALL || |
8070 if (vhc->vhc_flags & MDI_VHC_EXIT) |
6328 vhc->vhc_op == BUS_CONFIG_DRIVER); |
8071 goto out; |
6329 |
8072 |
6330 /* |
8073 mutex_exit(&vhc->vhc_lock); |
6331 * Must have already held the phci parent in |
8074 |
6332 * i_mdi_bus_config_all_phcis(). |
8075 /* |
6333 * First configure the phci itself. |
8076 * Now that the root is mounted. So build_vhci_cache() will build |
6334 */ |
8077 * the full cache. |
6335 rv = ndi_devi_config_one(phc->phc_parent_dip, phc->phc_devnm + 1, |
8078 */ |
6336 &ph_dip, vhc->vhc_flags); |
8079 (void) build_vhci_cache(vhc, 1); |
6337 |
8080 |
6338 /* release the hold that i_mdi_bus_config_all_phcis() placed */ |
8081 mutex_enter(&vhc->vhc_lock); |
6339 ndi_rele_devi(phc->phc_parent_dip); |
8082 out: |
6340 |
8083 vhc->vhc_flags &= ~MDI_VHC_BUILD_VHCI_CACHE_THREAD; |
6341 if (rv == NDI_SUCCESS) { |
8084 mutex_exit(&vhc->vhc_lock); |
6342 /* now bus config the phci */ |
8085 } |
6343 if (vhc->vhc_op == BUS_CONFIG_DRIVER) { |
8086 |
6344 (void) ndi_devi_config_driver(ph_dip, vhc->vhc_flags, |
8087 /* |
6345 vhc->vhc_major); |
8088 * Build vhci cache - a wrapper for build_vhci_cache(). |
6346 } else |
8089 * |
6347 (void) ndi_devi_config(ph_dip, vhc->vhc_flags); |
8090 * In a normal case on-disk vhci cache is read and setup during booting. |
6348 |
8091 * But if the on-disk vhci cache is not there or deleted or corrupted then |
6349 /* release the hold that ndi_devi_config_one() placed */ |
8092 * this function sets up the vhci cache. |
6350 ndi_rele_devi(ph_dip); |
8093 * |
6351 } |
8094 * The cache is built fully if the root is mounted. |
6352 } |
8095 * |
6353 |
8096 * If the root is not mounted, initially the cache is built reflecting only |
6354 /* |
8097 * those driver entries that have the root support. A separate thread is |
6355 * Bus config all registered phcis associated with the vhci in parallel. |
8098 * created to handle the creation of full cache. This thread will wait |
6356 * This process guarantees that the child nodes are enumerated under the vhci, |
8099 * until the root is mounted and then rebuilds the cache. |
6357 * but not necessarily attached. |
|
6358 * op must be BUS_CONFIG_DRIVER or BUS_CONFIG_ALL. |
|
6359 */ |
8100 */ |
6360 static int |
8101 static int |
6361 i_mdi_bus_config_all_phcis(dev_info_t *vdip, uint_t flags, |
8102 e_build_vhci_cache(mdi_vhci_config_t *vhc) |
6362 ddi_bus_config_op_t op, major_t maj, int optimize) |
8103 { |
6363 { |
8104 vhcache_build_status_t rv; |
6364 mdi_vhci_t *vh; |
8105 |
6365 mdi_phci_t *ph; |
8106 single_threaded_vhconfig_enter(vhc); |
6366 mdi_phci_config_t *phc; |
8107 |
6367 int64_t req_time; |
8108 mutex_enter(&vhc->vhc_lock); |
6368 int phci_count, rv; |
8109 if (vhc->vhc_flags & MDI_VHC_BUILD_VHCI_CACHE_THREAD) { |
6369 static int first_time = 1; |
8110 if (modrootloaded) { |
6370 |
8111 cv_broadcast(&vhc->vhc_cv); |
6371 ASSERT(op == BUS_CONFIG_ALL || op == BUS_CONFIG_DRIVER); |
8112 /* wait until build vhci cache thread exits */ |
6372 ASSERT(!DEVI_BUSY_OWNED(vdip)); |
8113 while (vhc->vhc_flags & MDI_VHC_BUILD_VHCI_CACHE_THREAD) |
6373 |
8114 cv_wait(&vhc->vhc_cv, &vhc->vhc_lock); |
6374 MDI_DEBUG(2, (CE_NOTE, vdip, |
8115 rv = VHCACHE_FULLY_BUILT; |
6375 "!MDI: %s on all phcis: major = %d, flags = 0x%x, optimize = %d\n", |
8116 } else { |
6376 (op == BUS_CONFIG_DRIVER) ? "BUS_CONFIG_DRIVER" : "BUS_CONFIG_ALL", |
8117 /* |
6377 (int)maj, flags, optimize)); |
8118 * The presense of MDI_VHC_BUILD_VHCI_CACHE_THREAD |
6378 |
8119 * flag indicates that the cache has already been |
6379 vh = i_devi_get_vhci(vdip); |
8120 * partially built. |
6380 ASSERT(vh); |
8121 */ |
6381 |
8122 rv = VHCACHE_PARTIALLY_BUILT; |
6382 mutex_enter(&mdi_mutex); |
8123 } |
6383 |
8124 |
6384 req_time = lbolt64; |
8125 mutex_exit(&vhc->vhc_lock); |
6385 |
8126 single_threaded_vhconfig_exit(vhc); |
6386 /* |
8127 return (rv); |
6387 * Reduce unnecessary BUS_CONFIG_ALLs when opening stale |
8128 } |
6388 * /dev/[r]dsk links. |
8129 mutex_exit(&vhc->vhc_lock); |
6389 */ |
8130 |
6390 if (optimize && (req_time < vh->vh_bus_config.vhc_cutoff_time)) { |
8131 rv = build_vhci_cache(vhc, modrootloaded); |
6391 mutex_exit(&mdi_mutex); |
8132 |
6392 return (MDI_SUCCESS); |
8133 if (rv == VHCACHE_PARTIALLY_BUILT) { |
6393 } |
|
6394 |
|
6395 /* |
|
6396 * To initiate bus configs on all phcis in parallel, create a taskq |
|
6397 * with multiple threads. Since creation of a taskq is a heavy weight |
|
6398 * operation, taskq is created once per vhci and destroyed only when |
|
6399 * vhci unregisters with mdi. |
|
6400 * |
|
6401 * If multiple bus config requests arrive at a time, bus configs on |
|
6402 * phcis are initiated on behalf of one of the requests. Other requests |
|
6403 * wait until the bus configs on phcis is done. |
|
6404 * |
|
6405 * When a BUS_CONFIG_ALL on phcis completes, the following is done |
|
6406 * to avoid more of unnecessary bus configs. |
|
6407 * |
|
6408 * o all BUS_CONFIG_ALL requests currently waiting with optimize |
|
6409 * flag set are returned, i.e., no new BUS_CONFIG_ALL is initiated |
|
6410 * on phcis on behalf of these requests. |
|
6411 * |
|
6412 * o all BUS_CONFIG_ALL or BUS_CONFIG_DRIVER requests currently |
|
6413 * waiting but have arrived prior to initiating BUS_CONFIG_ALL on |
|
6414 * phcis are also returned. |
|
6415 * |
|
6416 * In other cases a new BUS_CONFIG_ALL or BUS_CONFIG_DRIVER is |
|
6417 * initiated on phcis on behalf of a new request. |
|
6418 */ |
|
6419 |
|
6420 /* check if a bus config on phcis is in progress */ |
|
6421 while (vh->vh_bus_config.vhc_start_time != 0) { |
|
6422 ddi_bus_config_op_t current_op; |
|
6423 int64_t start_time; |
|
6424 |
|
6425 current_op = vh->vh_bus_config.vhc_op; |
|
6426 start_time = vh->vh_bus_config.vhc_start_time; |
|
6427 |
|
6428 /* wait until the current bus configs on phcis are done */ |
|
6429 while (vh->vh_bus_config.vhc_start_time == start_time) |
|
6430 cv_wait(&vh->vh_bus_config.vhc_cv, &mdi_mutex); |
|
6431 |
|
6432 if (current_op == BUS_CONFIG_ALL && |
|
6433 vh->vh_bus_config.vhc_cutoff_time > 0 && (optimize || |
|
6434 req_time < start_time)) { |
|
6435 mutex_exit(&mdi_mutex); |
|
6436 return (MDI_SUCCESS); |
|
6437 } |
|
6438 } |
|
6439 |
|
6440 /* |
|
6441 * At this point we are single threaded until vh_bus_config.start_time |
|
6442 * is reset to 0 at the end of this function. |
|
6443 */ |
|
6444 |
|
6445 vh->vh_bus_config.vhc_op = op; |
|
6446 vh->vh_bus_config.vhc_major = maj; |
|
6447 vh->vh_bus_config.vhc_flags = flags; |
|
6448 vh->vh_bus_config.vhc_start_time = lbolt64; |
|
6449 |
|
6450 if (first_time && strcmp(vh->vh_class, MDI_HCI_CLASS_SCSI) == 0) { |
|
6451 mutex_exit(&mdi_mutex); |
|
6452 i_mdi_attach_phci_drivers(); |
|
6453 mutex_enter(&mdi_mutex); |
|
6454 first_time = 0; |
|
6455 } |
|
6456 |
|
6457 ASSERT(vh->vh_phci_count >= 0); |
|
6458 if (vh->vh_phci_count == 0) { |
|
6459 rv = MDI_SUCCESS; |
|
6460 goto out1; |
|
6461 } |
|
6462 |
|
6463 /* |
|
6464 * Create a taskq to initiate bus configs in parallel on phcis. |
|
6465 * Taskq allocation can be done in mdi_vhci_register() routine |
|
6466 * instead of here. For most systems, doing it here on demand saves |
|
6467 * resources as this code path is never called most of the times. |
|
6468 */ |
|
6469 if (vh->vh_bus_config.vhc_taskq == NULL) { |
|
6470 /* |
8134 /* |
6471 * it is ok even if vh->vh_phci_count changes after we release |
8135 * create a thread; this thread will wait until the root is |
6472 * the mdi_mutex as phci_count is used just as an |
8136 * mounted and then fully rebuilds the cache. |
6473 * advisory number to taskq_create. |
|
6474 */ |
8137 */ |
6475 phci_count = vh->vh_phci_count; |
8138 mutex_enter(&vhc->vhc_lock); |
6476 mutex_exit(&mdi_mutex); |
8139 vhc->vhc_flags |= MDI_VHC_BUILD_VHCI_CACHE_THREAD; |
6477 |
8140 mutex_exit(&vhc->vhc_lock); |
6478 /* |
8141 (void) thread_create(NULL, 0, build_vhci_cache_thread, |
6479 * As we are single threaded, it is ok to access the |
8142 vhc, 0, &p0, TS_RUN, minclsyspri); |
6480 * vh_bus_config.taskq member of vh outside of mdi_mutex |
8143 } |
6481 */ |
8144 |
6482 if ((vh->vh_bus_config.vhc_taskq = taskq_create( |
8145 single_threaded_vhconfig_exit(vhc); |
6483 "mdi_bus_config_taskq", mdi_max_bus_config_threads, |
|
6484 MDI_TASKQ_PRI, phci_count, INT_MAX, |
|
6485 TASKQ_PREPOPULATE | TASKQ_DYNAMIC)) == NULL) { |
|
6486 rv = MDI_FAILURE; |
|
6487 goto out; |
|
6488 } |
|
6489 |
|
6490 mutex_enter(&mdi_mutex); |
|
6491 } |
|
6492 |
|
6493 /* allocate at least vh->vh_phci_count phci bus config structures */ |
|
6494 while (vh->vh_bus_config.vhc_phc_cnt < vh->vh_phci_count) { |
|
6495 int count; |
|
6496 |
|
6497 count = vh->vh_phci_count - vh->vh_bus_config.vhc_phc_cnt; |
|
6498 mutex_exit(&mdi_mutex); |
|
6499 while (count--) { |
|
6500 phc = kmem_alloc(sizeof (*phc), KM_SLEEP); |
|
6501 phc->phc_vhc = &vh->vh_bus_config; |
|
6502 /* |
|
6503 * there is no need to hold a lock here as we |
|
6504 * are single threaded and no one else manipulates |
|
6505 * the list while we are here. |
|
6506 */ |
|
6507 phc->phc_next = vh->vh_bus_config.vhc_phc; |
|
6508 vh->vh_bus_config.vhc_phc = phc; |
|
6509 vh->vh_bus_config.vhc_phc_cnt++; |
|
6510 } |
|
6511 mutex_enter(&mdi_mutex); |
|
6512 /* |
|
6513 * as new phcis could register with mdi after we dropped |
|
6514 * the mdi_mutex, we need to recheck the vh->vh_phci_count. |
|
6515 * Hence the while loop. |
|
6516 */ |
|
6517 } |
|
6518 |
|
6519 for (ph = vh->vh_phci_head, phc = vh->vh_bus_config.vhc_phc; |
|
6520 ph != NULL; ph = ph->ph_next, phc = phc->phc_next) { |
|
6521 |
|
6522 ASSERT(phc != NULL); |
|
6523 |
|
6524 /* build a phci config handle to be passed to a taskq thread */ |
|
6525 MDI_PHCI_LOCK(ph); |
|
6526 ASSERT(ph->ph_dip); |
|
6527 |
|
6528 /* |
|
6529 * We need to hold the phci dip before bus configuring the phci. |
|
6530 * But placing a hold on the phci dip is not safe here due to |
|
6531 * the race with phci detach. To get around this race, |
|
6532 * we place a hold on the phci dip's parent and note down |
|
6533 * the phci's name@addr. Later, in i_mdi_phci_bus_config(), |
|
6534 * we'll first configure the phci itself before bus |
|
6535 * configuring the phci. |
|
6536 */ |
|
6537 phc->phc_parent_dip = ddi_get_parent(ph->ph_dip); |
|
6538 ndi_hold_devi(phc->phc_parent_dip); |
|
6539 (void) ddi_deviname(ph->ph_dip, phc->phc_devnm); |
|
6540 MDI_PHCI_UNLOCK(ph); |
|
6541 } |
|
6542 |
|
6543 phci_count = vh->vh_phci_count; |
|
6544 if (vh->vh_bus_config.vhc_cutoff_time == -1) |
|
6545 vh->vh_bus_config.vhc_cutoff_time = 0; |
|
6546 mutex_exit(&mdi_mutex); |
|
6547 |
|
6548 MDI_DEBUG(2, (CE_NOTE, vdip, |
|
6549 "!MDI: initiating %s on all phcis, major = %d, flags = 0x%x\n", |
|
6550 (op == BUS_CONFIG_DRIVER) ? "BUS_CONFIG_DRIVER" : "BUS_CONFIG_ALL", |
|
6551 (int)maj, flags)); |
|
6552 |
|
6553 /* |
|
6554 * again, no need to hold a lock here as we are single threaded and |
|
6555 * no one else manipulates the list while we are here. |
|
6556 */ |
|
6557 for (phc = vh->vh_bus_config.vhc_phc; phci_count--; |
|
6558 phc = phc->phc_next) { |
|
6559 (void) taskq_dispatch(vh->vh_bus_config.vhc_taskq, |
|
6560 i_mdi_phci_bus_config, phc, TQ_SLEEP); |
|
6561 } |
|
6562 |
|
6563 /* wait until all phci bus configs are done */ |
|
6564 taskq_wait(vh->vh_bus_config.vhc_taskq); |
|
6565 rv = MDI_SUCCESS; |
|
6566 |
|
6567 out: |
|
6568 mutex_enter(&mdi_mutex); |
|
6569 out1: |
|
6570 vh->vh_bus_config.vhc_start_time = 0; |
|
6571 if (op == BUS_CONFIG_ALL && vh->vh_bus_config.vhc_cutoff_time != -1) { |
|
6572 vh->vh_bus_config.vhc_cutoff_time = lbolt64 + |
|
6573 (int64_t)drv_usectohz(mdi_bus_config_timeout * 1000000); |
|
6574 } |
|
6575 cv_broadcast(&vh->vh_bus_config.vhc_cv); |
|
6576 mutex_exit(&mdi_mutex); |
|
6577 |
|
6578 MDI_DEBUG(2, (CE_NOTE, vdip, "!MDI: %s on all phcis %s\n", |
|
6579 (op == BUS_CONFIG_DRIVER) ? "BUS_CONFIG_DRIVER" : "BUS_CONFIG_ALL", |
|
6580 (rv == MDI_SUCCESS) ? "successful" : "failed")); |
|
6581 |
|
6582 return (rv); |
8146 return (rv); |
6583 } |
8147 } |
6584 |
8148 |
6585 /* |
8149 /* |
6586 * A simple bus config implementation for vhcis with the assumption that all |
8150 * Generic vhci bus config implementation: |
6587 * phcis are always registered with MDI. |
|
6588 * |
8151 * |
6589 * BUS_CONFIG_ALL |
8152 * Parameters |
|
8153 * vdip vhci dip |
|
8154 * flags bus config flags |
|
8155 * op bus config operation |
|
8156 * The remaining parameters are bus config operation specific |
6590 * |
8157 * |
6591 * Do BUS_CONFIG_ALL on all phcis associated with the vhci. |
8158 * for BUS_CONFIG_ONE |
|
8159 * arg pointer to name@addr |
|
8160 * child upon successful return from this function, *child will be |
|
8161 * set to the configured and held devinfo child node of vdip. |
|
8162 * ct_addr pointer to client address (i.e. GUID) |
6592 * |
8163 * |
6593 * BUS_CONFIG_DRIVER |
8164 * for BUS_CONFIG_DRIVER |
|
8165 * arg major number of the driver |
|
8166 * child and ct_addr parameters are ignored |
6594 * |
8167 * |
6595 * Do BUS_CONFIG_DRIVER on all phcis associated with the vhci. |
8168 * for BUS_CONFIG_ALL |
|
8169 * arg, child, and ct_addr parameters are ignored |
6596 * |
8170 * |
6597 * BUS_CONFIG_ONE |
8171 * Note that for the rest of the bus config operations, this function simply |
6598 * |
8172 * calls the framework provided default bus config routine. |
6599 * If the requested child has already been enumerated under the vhci |
|
6600 * configure the child and return. Otherwise do BUS_CONFIG_ALL on all |
|
6601 * phcis associated with the vhci. |
|
6602 */ |
8173 */ |
6603 int |
8174 int |
6604 mdi_vhci_bus_config(dev_info_t *vdip, uint_t flags, ddi_bus_config_op_t op, |
8175 mdi_vhci_bus_config(dev_info_t *vdip, uint_t flags, ddi_bus_config_op_t op, |
6605 void *arg, dev_info_t **child) |
8176 void *arg, dev_info_t **child, char *ct_addr) |
6606 { |
8177 { |
6607 int rv = MDI_SUCCESS; |
8178 mdi_vhci_t *vh = i_devi_get_vhci(vdip); |
|
8179 mdi_vhci_config_t *vhc = vh->vh_config; |
|
8180 mdi_vhci_cache_t *vhcache = &vhc->vhc_vhcache; |
|
8181 vhcache_build_status_t rv = VHCACHE_NOT_REBUILT; |
|
8182 char *cp; |
6608 |
8183 |
6609 /* |
8184 /* |
6610 * While bus configuring phcis, the phci driver interactions with MDI |
8185 * While bus configuring phcis, the phci driver interactions with MDI |
6611 * cause child nodes to be enumerated under the vhci node for which |
8186 * cause child nodes to be enumerated under the vhci node for which |
6612 * they need to ndi_devi_enter the vhci node. |
8187 * they need to ndi_devi_enter the vhci node. |