181 * TQ_FRONT: Puts the new task at the front of the queue. Be careful. |
185 * TQ_FRONT: Puts the new task at the front of the queue. Be careful. |
182 * |
186 * |
183 * NOTE: Dynamic task queues are much more likely to fail in |
187 * NOTE: Dynamic task queues are much more likely to fail in |
184 * taskq_dispatch() (especially if TQ_NOQUEUE was specified), so it |
188 * taskq_dispatch() (especially if TQ_NOQUEUE was specified), so it |
185 * is important to have backup strategies handling such failures. |
189 * is important to have backup strategies handling such failures. |
|
190 * |
|
191 * void taskq_dispatch_ent(tq, func, arg, flags, tqent) |
|
192 * |
|
193 * This is a light-weight form of taskq_dispatch(), that uses a |
|
194 * preallocated taskq_ent_t structure for scheduling. As a |
|
195 * result, it does not perform allocations and cannot ever fail. |
|
196 * Note especially that it cannot be used with TASKQ_DYNAMIC |
|
197 * taskqs. The memory for the tqent must not be modified or used |
|
198 * until the function (func) is called. (However, func itself |
|
199 * may safely modify or free this memory, once it is called.) |
|
200 * Note that the taskq framework will NOT free this memory. |
186 * |
201 * |
187 * void taskq_wait(tq): |
202 * void taskq_wait(tq): |
188 * |
203 * |
189 * Waits for all previously scheduled tasks to complete. |
204 * Waits for all previously scheduled tasks to complete. |
190 * |
205 * |
1116 * Returns: NULL if dispatch failed. |
1131 * Returns: NULL if dispatch failed. |
1117 * non-NULL if task dispatched successfully. |
1132 * non-NULL if task dispatched successfully. |
1118 * Actual return value is the pointer to taskq entry that was used to |
1133 * Actual return value is the pointer to taskq entry that was used to |
1119 * dispatch a task. This is useful for debugging. |
1134 * dispatch a task. This is useful for debugging. |
1120 */ |
1135 */ |
1121 /* ARGSUSED */ |
|
1122 taskqid_t |
1136 taskqid_t |
1123 taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) |
1137 taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) |
1124 { |
1138 { |
1125 taskq_bucket_t *bucket = NULL; /* Which bucket needs extension */ |
1139 taskq_bucket_t *bucket = NULL; /* Which bucket needs extension */ |
1126 taskq_ent_t *tqe = NULL; |
1140 taskq_ent_t *tqe = NULL; |
1132 |
1146 |
1133 if (!(tq->tq_flags & TASKQ_DYNAMIC)) { |
1147 if (!(tq->tq_flags & TASKQ_DYNAMIC)) { |
1134 /* |
1148 /* |
1135 * TQ_NOQUEUE flag can't be used with non-dynamic task queues. |
1149 * TQ_NOQUEUE flag can't be used with non-dynamic task queues. |
1136 */ |
1150 */ |
1137 ASSERT(! (flags & TQ_NOQUEUE)); |
1151 ASSERT(!(flags & TQ_NOQUEUE)); |
1138 /* |
1152 /* |
1139 * Enqueue the task to the underlying queue. |
1153 * Enqueue the task to the underlying queue. |
1140 */ |
1154 */ |
1141 mutex_enter(&tq->tq_lock); |
1155 mutex_enter(&tq->tq_lock); |
1142 |
1156 |
1144 |
1158 |
1145 if ((tqe = taskq_ent_alloc(tq, flags)) == NULL) { |
1159 if ((tqe = taskq_ent_alloc(tq, flags)) == NULL) { |
1146 mutex_exit(&tq->tq_lock); |
1160 mutex_exit(&tq->tq_lock); |
1147 return (NULL); |
1161 return (NULL); |
1148 } |
1162 } |
|
1163 /* Make sure we start without any flags */ |
|
1164 tqe->tqent_un.tqent_flags = 0; |
|
1165 |
1149 if (flags & TQ_FRONT) { |
1166 if (flags & TQ_FRONT) { |
1150 TQ_ENQUEUE_FRONT(tq, tqe, func, arg); |
1167 TQ_ENQUEUE_FRONT(tq, tqe, func, arg); |
1151 } else { |
1168 } else { |
1152 TQ_ENQUEUE(tq, tqe, func, arg); |
1169 TQ_ENQUEUE(tq, tqe, func, arg); |
1153 } |
1170 } |
1271 mutex_exit(&tq->tq_lock); |
1288 mutex_exit(&tq->tq_lock); |
1272 |
1289 |
1273 return ((taskqid_t)tqe); |
1290 return ((taskqid_t)tqe); |
1274 } |
1291 } |
1275 |
1292 |
|
1293 void |
|
1294 taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags, |
|
1295 taskq_ent_t *tqe) |
|
1296 { |
|
1297 ASSERT(func != NULL); |
|
1298 ASSERT(!(tq->tq_flags & TASKQ_DYNAMIC)); |
|
1299 |
|
1300 /* |
|
1301 * Mark it as a prealloc'd task. This is important |
|
1302 * to ensure that we don't free it later. |
|
1303 */ |
|
1304 tqe->tqent_un.tqent_flags |= TQENT_FLAG_PREALLOC; |
|
1305 /* |
|
1306 * Enqueue the task to the underlying queue. |
|
1307 */ |
|
1308 mutex_enter(&tq->tq_lock); |
|
1309 |
|
1310 if (flags & TQ_FRONT) { |
|
1311 TQ_ENQUEUE_FRONT(tq, tqe, func, arg); |
|
1312 } else { |
|
1313 TQ_ENQUEUE(tq, tqe, func, arg); |
|
1314 } |
|
1315 mutex_exit(&tq->tq_lock); |
|
1316 } |
|
1317 |
1276 /* |
1318 /* |
1277 * Wait for all pending tasks to complete. |
1319 * Wait for all pending tasks to complete. |
1278 * Calling taskq_wait from a task will cause deadlock. |
1320 * Calling taskq_wait from a task will cause deadlock. |
1279 */ |
1321 */ |
1280 void |
1322 void |
1458 |
1500 |
1459 taskq_t *tq = arg; |
1501 taskq_t *tq = arg; |
1460 taskq_ent_t *tqe; |
1502 taskq_ent_t *tqe; |
1461 callb_cpr_t cprinfo; |
1503 callb_cpr_t cprinfo; |
1462 hrtime_t start, end; |
1504 hrtime_t start, end; |
|
1505 boolean_t freeit; |
1463 |
1506 |
1464 curthread->t_taskq = tq; /* mark ourselves for taskq_member() */ |
1507 curthread->t_taskq = tq; /* mark ourselves for taskq_member() */ |
1465 |
1508 |
1466 if (curproc != &p0 && (tq->tq_flags & TASKQ_DUTY_CYCLE)) { |
1509 if (curproc != &p0 && (tq->tq_flags & TASKQ_DUTY_CYCLE)) { |
1467 sysdc_thread_enter(curthread, tq->tq_DC, |
1510 sysdc_thread_enter(curthread, tq->tq_DC, |
1544 |
1587 |
1545 tqe->tqent_prev->tqent_next = tqe->tqent_next; |
1588 tqe->tqent_prev->tqent_next = tqe->tqent_next; |
1546 tqe->tqent_next->tqent_prev = tqe->tqent_prev; |
1589 tqe->tqent_next->tqent_prev = tqe->tqent_prev; |
1547 mutex_exit(&tq->tq_lock); |
1590 mutex_exit(&tq->tq_lock); |
1548 |
1591 |
|
1592 /* |
|
1593 * For prealloc'd tasks, we don't free anything. We |
|
1594 * have to check this now, because once we call the |
|
1595 * function for a prealloc'd taskq, we can't touch the |
|
1596 * tqent any longer (calling the function returns the |
|
1597 * ownershp of the tqent back to caller of |
|
1598 * taskq_dispatch.) |
|
1599 */ |
|
1600 if ((!(tq->tq_flags & TASKQ_DYNAMIC)) && |
|
1601 (tqe->tqent_un.tqent_flags & TQENT_FLAG_PREALLOC)) { |
|
1602 /* clear pointers to assist assertion checks */ |
|
1603 tqe->tqent_next = tqe->tqent_prev = NULL; |
|
1604 freeit = B_FALSE; |
|
1605 } else { |
|
1606 freeit = B_TRUE; |
|
1607 } |
|
1608 |
1549 rw_enter(&tq->tq_threadlock, RW_READER); |
1609 rw_enter(&tq->tq_threadlock, RW_READER); |
1550 start = gethrtime(); |
1610 start = gethrtime(); |
1551 DTRACE_PROBE2(taskq__exec__start, taskq_t *, tq, |
1611 DTRACE_PROBE2(taskq__exec__start, taskq_t *, tq, |
1552 taskq_ent_t *, tqe); |
1612 taskq_ent_t *, tqe); |
1553 tqe->tqent_func(tqe->tqent_arg); |
1613 tqe->tqent_func(tqe->tqent_arg); |
1558 |
1618 |
1559 mutex_enter(&tq->tq_lock); |
1619 mutex_enter(&tq->tq_lock); |
1560 tq->tq_totaltime += end - start; |
1620 tq->tq_totaltime += end - start; |
1561 tq->tq_executed++; |
1621 tq->tq_executed++; |
1562 |
1622 |
1563 taskq_ent_free(tq, tqe); |
1623 if (freeit) |
|
1624 taskq_ent_free(tq, tqe); |
1564 } |
1625 } |
1565 |
1626 |
1566 if (tq->tq_nthreads_max == 1) |
1627 if (tq->tq_nthreads_max == 1) |
1567 tq->tq_thread = NULL; |
1628 tq->tq_thread = NULL; |
1568 else |
1629 else |
1598 * Worker per-entry thread for dynamic dispatches. |
1659 * Worker per-entry thread for dynamic dispatches. |
1599 */ |
1660 */ |
1600 static void |
1661 static void |
1601 taskq_d_thread(taskq_ent_t *tqe) |
1662 taskq_d_thread(taskq_ent_t *tqe) |
1602 { |
1663 { |
1603 taskq_bucket_t *bucket = tqe->tqent_bucket; |
1664 taskq_bucket_t *bucket = tqe->tqent_un.tqent_bucket; |
1604 taskq_t *tq = bucket->tqbucket_taskq; |
1665 taskq_t *tq = bucket->tqbucket_taskq; |
1605 kmutex_t *lock = &bucket->tqbucket_lock; |
1666 kmutex_t *lock = &bucket->tqbucket_lock; |
1606 kcondvar_t *cv = &tqe->tqent_cv; |
1667 kcondvar_t *cv = &tqe->tqent_cv; |
1607 callb_cpr_t cprinfo; |
1668 callb_cpr_t cprinfo; |
1608 clock_t w; |
1669 clock_t w; |