Root/drivers/target/target_core_tmr.c

1/*******************************************************************************
2 * Filename: target_core_tmr.c
3 *
4 * This file contains SPC-3 task management infrastructure
5 *
6 * Copyright (c) 2009,2010 Rising Tide Systems
7 * Copyright (c) 2009,2010 Linux-iSCSI.org
8 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 *
25 ******************************************************************************/
26
27#include <linux/slab.h>
28#include <linux/spinlock.h>
29#include <linux/list.h>
30#include <linux/export.h>
31#include <scsi/scsi.h>
32#include <scsi/scsi_cmnd.h>
33
34#include <target/target_core_base.h>
35#include <target/target_core_backend.h>
36#include <target/target_core_fabric.h>
37#include <target/target_core_configfs.h>
38
39#include "target_core_internal.h"
40#include "target_core_alua.h"
41#include "target_core_pr.h"
42
43int core_tmr_alloc_req(
44    struct se_cmd *se_cmd,
45    void *fabric_tmr_ptr,
46    u8 function,
47    gfp_t gfp_flags)
48{
49    struct se_tmr_req *tmr;
50
51    tmr = kzalloc(sizeof(struct se_tmr_req), gfp_flags);
52    if (!tmr) {
53        pr_err("Unable to allocate struct se_tmr_req\n");
54        return -ENOMEM;
55    }
56
57    se_cmd->se_cmd_flags |= SCF_SCSI_TMR_CDB;
58    se_cmd->se_tmr_req = tmr;
59    tmr->task_cmd = se_cmd;
60    tmr->fabric_tmr_ptr = fabric_tmr_ptr;
61    tmr->function = function;
62    INIT_LIST_HEAD(&tmr->tmr_list);
63
64    return 0;
65}
66EXPORT_SYMBOL(core_tmr_alloc_req);
67
68void core_tmr_release_req(
69    struct se_tmr_req *tmr)
70{
71    struct se_device *dev = tmr->tmr_dev;
72    unsigned long flags;
73
74    if (!dev) {
75        kfree(tmr);
76        return;
77    }
78
79    spin_lock_irqsave(&dev->se_tmr_lock, flags);
80    list_del(&tmr->tmr_list);
81    spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
82
83    kfree(tmr);
84}
85
86static void core_tmr_handle_tas_abort(
87    struct se_node_acl *tmr_nacl,
88    struct se_cmd *cmd,
89    int tas,
90    int fe_count)
91{
92    if (!fe_count) {
93        transport_cmd_finish_abort(cmd, 1);
94        return;
95    }
96    /*
97     * TASK ABORTED status (TAS) bit support
98    */
99    if ((tmr_nacl &&
100         (tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
101        transport_send_task_abort(cmd);
102
103    transport_cmd_finish_abort(cmd, 0);
104}
105
106static int target_check_cdb_and_preempt(struct list_head *list,
107        struct se_cmd *cmd)
108{
109    struct t10_pr_registration *reg;
110
111    if (!list)
112        return 0;
113    list_for_each_entry(reg, list, pr_reg_abort_list) {
114        if (reg->pr_res_key == cmd->pr_res_key)
115            return 0;
116    }
117
118    return 1;
119}
120
121void core_tmr_abort_task(
122    struct se_device *dev,
123    struct se_tmr_req *tmr,
124    struct se_session *se_sess)
125{
126    struct se_cmd *se_cmd, *tmp_cmd;
127    unsigned long flags;
128    int ref_tag;
129
130    spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
131    list_for_each_entry_safe(se_cmd, tmp_cmd,
132            &se_sess->sess_cmd_list, se_cmd_list) {
133
134        if (dev != se_cmd->se_dev)
135            continue;
136        ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd);
137        if (tmr->ref_task_tag != ref_tag)
138            continue;
139
140        printk("ABORT_TASK: Found referenced %s task_tag: %u\n",
141            se_cmd->se_tfo->get_fabric_name(), ref_tag);
142
143        spin_lock_irq(&se_cmd->t_state_lock);
144        if (se_cmd->transport_state & CMD_T_COMPLETE) {
145            printk("ABORT_TASK: ref_tag: %u already complete, skipping\n", ref_tag);
146            spin_unlock_irq(&se_cmd->t_state_lock);
147            spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
148            goto out;
149        }
150        se_cmd->transport_state |= CMD_T_ABORTED;
151        spin_unlock_irq(&se_cmd->t_state_lock);
152
153        list_del_init(&se_cmd->se_cmd_list);
154        kref_get(&se_cmd->cmd_kref);
155        spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
156
157        cancel_work_sync(&se_cmd->work);
158        transport_wait_for_tasks(se_cmd);
159        /*
160         * Now send SAM_STAT_TASK_ABORTED status for the referenced
161         * se_cmd descriptor..
162         */
163        transport_send_task_abort(se_cmd);
164        /*
165         * Also deal with possible extra acknowledge reference..
166         */
167        if (se_cmd->se_cmd_flags & SCF_ACK_KREF)
168            target_put_sess_cmd(se_sess, se_cmd);
169
170        target_put_sess_cmd(se_sess, se_cmd);
171
172        printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
173                " ref_tag: %d\n", ref_tag);
174        tmr->response = TMR_FUNCTION_COMPLETE;
175        return;
176    }
177    spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
178
179out:
180    printk("ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST for ref_tag: %d\n",
181            tmr->ref_task_tag);
182    tmr->response = TMR_TASK_DOES_NOT_EXIST;
183}
184
185static void core_tmr_drain_tmr_list(
186    struct se_device *dev,
187    struct se_tmr_req *tmr,
188    struct list_head *preempt_and_abort_list)
189{
190    LIST_HEAD(drain_tmr_list);
191    struct se_tmr_req *tmr_p, *tmr_pp;
192    struct se_cmd *cmd;
193    unsigned long flags;
194    /*
195     * Release all pending and outgoing TMRs aside from the received
196     * LUN_RESET tmr..
197     */
198    spin_lock_irqsave(&dev->se_tmr_lock, flags);
199    list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
200        /*
201         * Allow the received TMR to return with FUNCTION_COMPLETE.
202         */
203        if (tmr_p == tmr)
204            continue;
205
206        cmd = tmr_p->task_cmd;
207        if (!cmd) {
208            pr_err("Unable to locate struct se_cmd for TMR\n");
209            continue;
210        }
211        /*
212         * If this function was called with a valid pr_res_key
213         * parameter (eg: for PROUT PREEMPT_AND_ABORT service action
214         * skip non regisration key matching TMRs.
215         */
216        if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
217            continue;
218
219        spin_lock(&cmd->t_state_lock);
220        if (!(cmd->transport_state & CMD_T_ACTIVE)) {
221            spin_unlock(&cmd->t_state_lock);
222            continue;
223        }
224        if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
225            spin_unlock(&cmd->t_state_lock);
226            continue;
227        }
228        spin_unlock(&cmd->t_state_lock);
229
230        list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
231    }
232    spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
233
234    list_for_each_entry_safe(tmr_p, tmr_pp, &drain_tmr_list, tmr_list) {
235        list_del_init(&tmr_p->tmr_list);
236        cmd = tmr_p->task_cmd;
237
238        pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
239            " Response: 0x%02x, t_state: %d\n",
240            (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
241            tmr_p->function, tmr_p->response, cmd->t_state);
242
243        transport_cmd_finish_abort(cmd, 1);
244    }
245}
246
247static void core_tmr_drain_state_list(
248    struct se_device *dev,
249    struct se_cmd *prout_cmd,
250    struct se_node_acl *tmr_nacl,
251    int tas,
252    struct list_head *preempt_and_abort_list)
253{
254    LIST_HEAD(drain_task_list);
255    struct se_cmd *cmd, *next;
256    unsigned long flags;
257    int fe_count;
258
259    /*
260     * Complete outstanding commands with TASK_ABORTED SAM status.
261     *
262     * This is following sam4r17, section 5.6 Aborting commands, Table 38
263     * for TMR LUN_RESET:
264     *
265     * a) "Yes" indicates that each command that is aborted on an I_T nexus
266     * other than the one that caused the SCSI device condition is
267     * completed with TASK ABORTED status, if the TAS bit is set to one in
268     * the Control mode page (see SPC-4). "No" indicates that no status is
269     * returned for aborted commands.
270     *
271     * d) If the logical unit reset is caused by a particular I_T nexus
272     * (e.g., by a LOGICAL UNIT RESET task management function), then "yes"
273     * (TASK_ABORTED status) applies.
274     *
275     * Otherwise (e.g., if triggered by a hard reset), "no"
276     * (no TASK_ABORTED SAM status) applies.
277     *
278     * Note that this seems to be independent of TAS (Task Aborted Status)
279     * in the Control Mode Page.
280     */
281    spin_lock_irqsave(&dev->execute_task_lock, flags);
282    list_for_each_entry_safe(cmd, next, &dev->state_list, state_list) {
283        /*
284         * For PREEMPT_AND_ABORT usage, only process commands
285         * with a matching reservation key.
286         */
287        if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
288            continue;
289
290        /*
291         * Not aborting PROUT PREEMPT_AND_ABORT CDB..
292         */
293        if (prout_cmd == cmd)
294            continue;
295
296        list_move_tail(&cmd->state_list, &drain_task_list);
297        cmd->state_active = false;
298    }
299    spin_unlock_irqrestore(&dev->execute_task_lock, flags);
300
301    while (!list_empty(&drain_task_list)) {
302        cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
303        list_del(&cmd->state_list);
304
305        pr_debug("LUN_RESET: %s cmd: %p"
306            " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d"
307            "cdb: 0x%02x\n",
308            (preempt_and_abort_list) ? "Preempt" : "", cmd,
309            cmd->se_tfo->get_task_tag(cmd), 0,
310            cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
311            cmd->t_task_cdb[0]);
312        pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
313            " -- CMD_T_ACTIVE: %d"
314            " CMD_T_STOP: %d CMD_T_SENT: %d\n",
315            cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
316            (cmd->transport_state & CMD_T_ACTIVE) != 0,
317            (cmd->transport_state & CMD_T_STOP) != 0,
318            (cmd->transport_state & CMD_T_SENT) != 0);
319
320        /*
321         * If the command may be queued onto a workqueue cancel it now.
322         *
323         * This is equivalent to removal from the execute queue in the
324         * loop above, but we do it down here given that
325         * cancel_work_sync may block.
326         */
327        if (cmd->t_state == TRANSPORT_COMPLETE)
328            cancel_work_sync(&cmd->work);
329
330        spin_lock_irqsave(&cmd->t_state_lock, flags);
331        target_stop_cmd(cmd, &flags);
332
333        fe_count = atomic_read(&cmd->t_fe_count);
334
335        if (!(cmd->transport_state & CMD_T_ACTIVE)) {
336            pr_debug("LUN_RESET: got CMD_T_ACTIVE for"
337                " cdb: %p, t_fe_count: %d dev: %p\n", cmd,
338                fe_count, dev);
339            cmd->transport_state |= CMD_T_ABORTED;
340            spin_unlock_irqrestore(&cmd->t_state_lock, flags);
341
342            core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
343            continue;
344        }
345        pr_debug("LUN_RESET: Got !CMD_T_ACTIVE for cdb: %p,"
346            " t_fe_count: %d dev: %p\n", cmd, fe_count, dev);
347        cmd->transport_state |= CMD_T_ABORTED;
348        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
349
350        core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
351    }
352}
353
354int core_tmr_lun_reset(
355        struct se_device *dev,
356        struct se_tmr_req *tmr,
357        struct list_head *preempt_and_abort_list,
358        struct se_cmd *prout_cmd)
359{
360    struct se_node_acl *tmr_nacl = NULL;
361    struct se_portal_group *tmr_tpg = NULL;
362    int tas;
363        /*
364     * TASK_ABORTED status bit, this is configurable via ConfigFS
365     * struct se_device attributes. spc4r17 section 7.4.6 Control mode page
366     *
367     * A task aborted status (TAS) bit set to zero specifies that aborted
368     * tasks shall be terminated by the device server without any response
369     * to the application client. A TAS bit set to one specifies that tasks
370     * aborted by the actions of an I_T nexus other than the I_T nexus on
371     * which the command was received shall be completed with TASK ABORTED
372     * status (see SAM-4).
373     */
374    tas = dev->se_sub_dev->se_dev_attrib.emulate_tas;
375    /*
376     * Determine if this se_tmr is coming from a $FABRIC_MOD
377     * or struct se_device passthrough..
378     */
379    if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
380        tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
381        tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
382        if (tmr_nacl && tmr_tpg) {
383            pr_debug("LUN_RESET: TMR caller fabric: %s"
384                " initiator port %s\n",
385                tmr_tpg->se_tpg_tfo->get_fabric_name(),
386                tmr_nacl->initiatorname);
387        }
388    }
389    pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
390        (preempt_and_abort_list) ? "Preempt" : "TMR",
391        dev->transport->name, tas);
392
393    core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
394    core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas,
395                preempt_and_abort_list);
396
397    /*
398     * Clear any legacy SPC-2 reservation when called during
399     * LOGICAL UNIT RESET
400     */
401    if (!preempt_and_abort_list &&
402         (dev->dev_flags & DF_SPC2_RESERVATIONS)) {
403        spin_lock(&dev->dev_reservation_lock);
404        dev->dev_reserved_node_acl = NULL;
405        dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
406        spin_unlock(&dev->dev_reservation_lock);
407        pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
408    }
409
410    spin_lock_irq(&dev->stats_lock);
411    dev->num_resets++;
412    spin_unlock_irq(&dev->stats_lock);
413
414    pr_debug("LUN_RESET: %s for [%s] Complete\n",
415            (preempt_and_abort_list) ? "Preempt" : "TMR",
416            dev->transport->name);
417    return 0;
418}
419
420

Archive Download this file



interactive