From: Vladislav Bolkhovitin on
This patch contains file scst_targ.c.

Signed-off-by: Vladislav Bolkhovitin <vst(a)vlnb.net>
---
scst_targ.c | 5712 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 5712 insertions(+)

diff -uprN orig/linux-2.6.33/drivers/scst/scst_targ.c linux-2.6.33/drivers/scst/scst_targ.c
--- orig/linux-2.6.33/drivers/scst/scst_targ.c
+++ linux-2.6.33/drivers/scst/scst_targ.c
@@ -0,0 +1,5712 @@
+/*
+ * scst_targ.c
+ *
+ * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst(a)vlnb.net>
+ * Copyright (C) 2004 - 2005 Leonid Stoljar
+ * Copyright (C) 2007 - 2010 ID7 Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2
+ * of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/smp_lock.h>
+#include <linux/unistd.h>
+#include <linux/string.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+
+#include "scst.h"
+#include "scst_priv.h"
+
+#if 0 /* Temporary left for future performance investigations */
+/* Deleting it don't forget to delete write_cmd_count */
+#define CONFIG_SCST_ORDERED_READS
+#endif
+
+#if 0 /* Let's disable it for now to see if users will complain about it */
+/* Deleting it don't forget to delete write_cmd_count */
+#define CONFIG_SCST_PER_DEVICE_CMD_COUNT_LIMIT
+#endif
+
+static void scst_cmd_set_sn(struct scst_cmd *cmd);
+static int __scst_init_cmd(struct scst_cmd *cmd);
+static void scst_finish_cmd_mgmt(struct scst_cmd *cmd);
+static struct scst_cmd *__scst_find_cmd_by_tag(struct scst_session *sess,
+ uint64_t tag, bool to_abort);
+static void scst_process_redirect_cmd(struct scst_cmd *cmd,
+ enum scst_exec_context context, int check_retries);
+
+static inline void scst_schedule_tasklet(struct scst_cmd *cmd)
+{
+ struct scst_tasklet *t = &scst_tasklets[smp_processor_id()];
+ unsigned long flags;
+
+ spin_lock_irqsave(&t->tasklet_lock, flags);
+ TRACE_DBG("Adding cmd %p to tasklet %d cmd list", cmd,
+ smp_processor_id());
+ list_add_tail(&cmd->cmd_list_entry, &t->tasklet_cmd_list);
+ spin_unlock_irqrestore(&t->tasklet_lock, flags);
+
+ tasklet_schedule(&t->tasklet);
+}
+
+/**
+ * scst_rx_cmd() - create new command
+ * @sess: SCST session
+ * @lun: LUN for the command
+ * @lun_len: length of the LUN in bytes
+ * @cdb: CDB of the command
+ * @cdb_len: length of the CDB in bytes
+ * @atomic: true, if current context is atomic
+ *
+ * Description:
+ * Creates new SCST command. Returns new command on success or
+ * NULL otherwise.
+ *
+ * Must not be called in parallel with scst_unregister_session() for the
+ * same session.
+ */
+struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
+ const uint8_t *lun, int lun_len,
+ const uint8_t *cdb, int cdb_len, int atomic)
+{
+ struct scst_cmd *cmd;
+
+#ifdef CONFIG_SCST_EXTRACHECKS
+ if (unlikely(sess->shut_phase != SCST_SESS_SPH_READY)) {
+ PRINT_CRIT_ERROR("%s",
+ "New cmd while shutting down the session");
+ BUG();
+ }
+#endif
+
+ cmd = scst_alloc_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
+ if (cmd == NULL)
+ goto out;
+
+ cmd->sess = sess;
+ cmd->tgt = sess->tgt;
+ cmd->tgtt = sess->tgt->tgtt;
+
+ cmd->lun = scst_unpack_lun(lun, lun_len);
+ if (unlikely(cmd->lun == NO_SUCH_LUN)) {
+ PRINT_ERROR("Wrong LUN %d, finishing cmd", -1);
+ scst_set_cmd_error(cmd,
+ SCST_LOAD_SENSE(scst_sense_lun_not_supported));
+ }
+
+ /*
+ * For cdb_len 0 defer the error reporting until scst_cmd_init_done(),
+ * scst_set_cmd_error() supports nested calls.
+ */
+ if (unlikely(cdb_len > SCST_MAX_CDB_SIZE)) {
+ PRINT_ERROR("Too big CDB len %d, finishing cmd", cdb_len);
+ cdb_len = SCST_MAX_CDB_SIZE;
+ scst_set_cmd_error(cmd,
+ SCST_LOAD_SENSE(scst_sense_invalid_message));
+ }
+
+ memcpy(cmd->cdb, cdb, cdb_len);
+ cmd->cdb_len = cdb_len;
+
+ TRACE_DBG("cmd %p, sess %p", cmd, sess);
+ scst_sess_get(sess);
+
+out:
+ return cmd;
+}
+EXPORT_SYMBOL(scst_rx_cmd);
+
+/*
+ * No locks, but might be on IRQ. Returns 0 on success, <0 if processing of
+ * this command should be stopped.
+ */
+static int scst_init_cmd(struct scst_cmd *cmd, enum scst_exec_context *context)
+{
+ int rc, res = 0;
+
+ /* See the comment in scst_do_job_init() */
+ if (unlikely(!list_empty(&scst_init_cmd_list))) {
+ TRACE_MGMT_DBG("%s", "init cmd list busy");
+ goto out_redirect;
+ }
+ /*
+ * Memory barrier isn't necessary here, because CPU appears to
+ * be self-consistent and we don't care about the race, described
+ * in comment in scst_do_job_init().
+ */
+
+ rc = __scst_init_cmd(cmd);
+ if (unlikely(rc > 0))
+ goto out_redirect;
+ else if (unlikely(rc != 0)) {
+ res = 1;
+ goto out;
+ }
+
+ EXTRACHECKS_BUG_ON(*context == SCST_CONTEXT_SAME);
+
+ /* Small context optimization */
+ if (((*context == SCST_CONTEXT_TASKLET) ||
+ (*context == SCST_CONTEXT_DIRECT_ATOMIC)) &&
+ scst_cmd_is_expected_set(cmd)) {
+ if (cmd->expected_data_direction & SCST_DATA_WRITE) {
+ if (!test_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
+ &cmd->tgt_dev->tgt_dev_flags))
+ *context = SCST_CONTEXT_THREAD;
+ } else {
+ if (!test_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
+ &cmd->tgt_dev->tgt_dev_flags))
+ *context = SCST_CONTEXT_THREAD;
+ }
+ }
+
+out:
+ return res;
+
+out_redirect:
+ if (cmd->preprocessing_only) {
+ /*
+ * Poor man solution for single threaded targets, where
+ * blocking receiver at least sometimes means blocking all.
+ * For instance, iSCSI target won't be able to receive
+ * Data-Out PDUs.
+ */
+ BUG_ON(*context != SCST_CONTEXT_DIRECT);
+ scst_set_busy(cmd);
+ scst_set_cmd_abnormal_done_state(cmd);
+ res = 1;
+ /* Keep initiator away from too many BUSY commands */
+ msleep(50);
+ } else {
+ unsigned long flags;
+ spin_lock_irqsave(&scst_init_lock, flags);
+ TRACE_MGMT_DBG("Adding cmd %p to init cmd list (scst_cmd_count "
+ "%d)", cmd, atomic_read(&scst_cmd_count));
+ list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
+ if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
+ scst_init_poll_cnt++;
+ spin_unlock_irqrestore(&scst_init_lock, flags);
+ wake_up(&scst_init_cmd_list_waitQ);
+ res = -1;
+ }
+ goto out;
+}
+
+/**
+ * scst_cmd_init_done() - the command's initialization done
+ * @cmd: SCST command
+ * @pref_context: preferred command execution context
+ *
+ * Description:
+ * Notifies SCST that the driver finished its part of the command
+ * initialization, and the command is ready for execution.
+ * The second argument sets preferred command execition context.
+ * See SCST_CONTEXT_* constants for details.
+ *
+ * !!IMPORTANT!!
+ *
+ * If cmd->set_sn_on_restart_cmd not set, this function, as well as
+ * scst_cmd_init_stage1_done() and scst_restart_cmd(), must not be
+ * called simultaneously for the same session (more precisely,
+ * for the same session/LUN, i.e. tgt_dev), i.e. they must be
+ * somehow externally serialized. This is needed to have lock free fast
+ * path in scst_cmd_set_sn(). For majority of targets those functions are
+ * naturally serialized by the single source of commands. Only iSCSI
+ * immediate commands with multiple connections per session seems to be an
+ * exception. For it, some mutex/lock shall be used for the serialization.
+ */
+void scst_cmd_init_done(struct scst_cmd *cmd,
+ enum scst_exec_context pref_context)
+{
+ unsigned long flags;
+ struct scst_session *sess = cmd->sess;
+ int rc;
+
+ scst_set_start_time(cmd);
+
+ TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
+ TRACE(TRACE_SCSI, "tag=%llu, lun=%lld, CDB len=%d, queue_type=%x "
+ "(cmd %p)", (long long unsigned int)cmd->tag,
+ (long long unsigned int)cmd->lun, cmd->cdb_len,
+ cmd->queue_type, cmd);
+ PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_RCV_BOT, "Recieving CDB",
+ cmd->cdb, cmd->cdb_len);
+
+#ifdef CONFIG_SCST_EXTRACHECKS
+ if (unlikely((in_irq() || irqs_disabled())) &&
+ ((pref_context == SCST_CONTEXT_DIRECT) ||
+ (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
+ PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
+ "SCST_CONTEXT_THREAD instead", pref_context,
+ cmd->tgtt->name);
+ pref_context = SCST_CONTEXT_THREAD;
+ }
+#endif
+
+ atomic_inc(&sess->sess_cmd_count);
+
+ spin_lock_irqsave(&sess->sess_list_lock, flags);
+
+ if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
+ /*
+ * We must always keep commands in the sess list from the
+ * very beginning, because otherwise they can be missed during
+ * TM processing. This check is needed because there might be
+ * old, i.e. deferred, commands and new, i.e. just coming, ones.
+ */
+ if (cmd->sess_cmd_list_entry.next == NULL)
+ list_add_tail(&cmd->sess_cmd_list_entry,
+ &sess->sess_cmd_list);
+ switch (sess->init_phase) {
+ case SCST_SESS_IPH_SUCCESS:
+ break;
+ case SCST_SESS_IPH_INITING:
+ TRACE_DBG("Adding cmd %p to init deferred cmd list",
+ cmd);
+ list_add_tail(&cmd->cmd_list_entry,
+ &sess->init_deferred_cmd_list);
+ spin_unlock_irqrestore(&sess->sess_list_lock, flags);
+ goto out;
+ case SCST_SESS_IPH_FAILED:
+ spin_unlock_irqrestore(&sess->sess_list_lock, flags);
+ scst_set_busy(cmd);
+ scst_set_cmd_abnormal_done_state(cmd);
+ goto active;
+ default:
+ BUG();
+ }
+ } else
+ list_add_tail(&cmd->sess_cmd_list_entry,
+ &sess->sess_cmd_list);
+
+ spin_unlock_irqrestore(&sess->sess_list_lock, flags);
+
+ if (unlikely(cmd->cdb_len == 0)) {
+ PRINT_ERROR("%s", "Wrong CDB len 0, finishing cmd");
+ scst_set_cmd_error(cmd,
+ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
+ scst_set_cmd_abnormal_done_state(cmd);
+ goto active;
+ }
+
+ if (unlikely(cmd->queue_type >= SCST_CMD_QUEUE_ACA)) {
+ PRINT_ERROR("Unsupported queue type %d", cmd->queue_type);
+ scst_set_cmd_error(cmd,
+ SCST_LOAD_SENSE(scst_sense_invalid_message));
+ goto active;
+ }
+
+ /*
+ * Cmd must be inited here to preserve the order. In case if cmd
+ * already preliminary completed by target driver we need to init
+ * cmd anyway to find out in which format we should return sense.
+ */
+ cmd->state = SCST_CMD_STATE_INIT;
+ rc = scst_init_cmd(cmd, &pref_context);
+ if (unlikely(rc < 0))
+ goto out;
+
+active:
+ /* Here cmd must not be in any cmd list, no locks */
+ switch (pref_context) {
+ case SCST_CONTEXT_TASKLET:
+ scst_schedule_tasklet(cmd);
+ break;
+
+ case SCST_CONTEXT_DIRECT:
+ scst_process_active_cmd(cmd, false);
+ break;
+
+ case SCST_CONTEXT_DIRECT_ATOMIC:
+ scst_process_active_cmd(cmd, true);
+ break;
+
+ default:
+ PRINT_ERROR("Context %x is undefined, using the thread one",
+ pref_context);
+ /* go through */
+ case SCST_CONTEXT_THREAD:
+ spin_lock_irqsave(&cmd->cmd_threads->cmd_list_lock, flags);
+ TRACE_DBG("Adding cmd %p to active cmd list", cmd);
+ if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
+ list_add(&cmd->cmd_list_entry,
+ &cmd->cmd_threads->active_cmd_list);
+ else
+ list_add_tail(&cmd->cmd_list_entry,
+ &cmd->cmd_threads->active_cmd_list);
+ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
+ spin_unlock_irqrestore(&cmd->cmd_threads->cmd_list_lock, flags);
+ break;
+ }
+
+out:
+ return;
+}
+EXPORT_SYMBOL(scst_cmd_init_done);
+
+static int scst_pre_parse(struct scst_cmd *cmd)
+{
+ int res = SCST_CMD_STATE_RES_CONT_SAME;
+ struct scst_device *dev = cmd->dev;
+ int rc;
+
+#ifdef CONFIG_SCST_STRICT_SERIALIZING
+ cmd->inc_expected_sn_on_done = 1;
+#else
+ cmd->inc_expected_sn_on_done = dev->handler->exec_sync ||
+ (!dev->has_own_order_mgmt &&
+ (dev->queue_alg == SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER ||
+ cmd->queue_type == SCST_CMD_QUEUE_ORDERED));
+#endif
+
+ /*
+ * Expected transfer data supplied by the SCSI transport via the
+ * target driver are untrusted, so we prefer to fetch them from CDB.
+ * Additionally, not all transports support supplying the expected
+ * transfer data.
+ */
+
+ rc = scst_get_cdb_info(cmd);
+ if (unlikely(rc != 0)) {
+ if (rc > 0) {
+ PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
+ goto out_err;
+ }
+
+ EXTRACHECKS_BUG_ON(cmd->op_flags & SCST_INFO_VALID);
+
+ cmd->cdb_len = scst_get_cdb_len(cmd->cdb);
+
+ TRACE(TRACE_MINOR, "Unknown opcode 0x%02x for %s. "
+ "Should you update scst_scsi_op_table?",
+ cmd->cdb[0], dev->handler->name);
+ PRINT_BUFF_FLAG(TRACE_MINOR, "Failed CDB", cmd->cdb,
+ cmd->cdb_len);
+ } else {
+ EXTRACHECKS_BUG_ON(!(cmd->op_flags & SCST_INFO_VALID));
+ }
+
+ cmd->state = SCST_CMD_STATE_DEV_PARSE;
+
+ TRACE_DBG("op_name <%s> (cmd %p), direction=%d "
+ "(expected %d, set %s), transfer_len=%d (expected "
+ "len %d), flags=%d", cmd->op_name, cmd,
+ cmd->data_direction, cmd->expected_data_direction,
+ scst_cmd_is_expected_set(cmd) ? "yes" : "no",
+ cmd->bufflen, cmd->expected_transfer_len,
+ cmd->op_flags);
+
+out:
+ return res;
+
+out_err:
+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
+ scst_set_cmd_abnormal_done_state(cmd);
+ res = SCST_CMD_STATE_RES_CONT_SAME;
+ goto out;
+}
+
+#ifndef CONFIG_SCST_USE_EXPECTED_VALUES
+static bool scst_is_allowed_to_mismatch_cmd(struct scst_cmd *cmd)
+{
+ bool res = false;
+
+ /* VERIFY commands with BYTCHK unset shouldn't fail here */
+ if ((cmd->op_flags & SCST_VERIFY_BYTCHK_MISMATCH_ALLOWED) &&
+ (cmd->cdb[1] & BYTCHK) == 0) {
+ res = true;
+ goto out;
+ }
+
+ switch (cmd->cdb[0]) {
+ case TEST_UNIT_READY:
+ /* Crazy VMware people sometimes do TUR with READ direction */
+ res = true;
+ break;
+ }
+
+out:
+ return res;
+}
+#endif
+
+static int scst_parse_cmd(struct scst_cmd *cmd)
+{
+ int res = SCST_CMD_STATE_RES_CONT_SAME;
+ int state;
+ struct scst_device *dev = cmd->dev;
+ int orig_bufflen = cmd->bufflen;
+
+ if (likely(!scst_is_cmd_fully_local(cmd))) {
+ if (unlikely(!dev->handler->parse_atomic &&
+ scst_cmd_atomic(cmd))) {
+ /*
+ * It shouldn't be because of the SCST_TGT_DEV_AFTER_*
+ * optimization.
+ */
+ TRACE_DBG("Dev handler %s parse() needs thread "
+ "context, rescheduling", dev->handler->name);
+ res = SCST_CMD_STATE_RES_NEED_THREAD;
+ goto out;
+ }
+
+ TRACE_DBG("Calling dev handler %s parse(%p)",
+ dev->handler->name, cmd);
+ TRACE_BUFF_FLAG(TRACE_SND_BOT, "Parsing: ",
+ cmd->cdb, cmd->cdb_len);
+ scst_set_cur_start(cmd);
+ state = dev->handler->parse(cmd);
+ /* Caution: cmd can be already dead here */
+ TRACE_DBG("Dev handler %s parse() returned %d",
+ dev->handler->name, state);
+
+ switch (state) {
+ case SCST_CMD_STATE_NEED_THREAD_CTX:
+ scst_set_parse_time(cmd);
+ TRACE_DBG("Dev handler %s parse() requested thread "
+ "context, rescheduling", dev->handler->name);
+ res = SCST_CMD_STATE_RES_NEED_THREAD;
+ goto out;
+
+ case SCST_CMD_STATE_STOP:
+ TRACE_DBG("Dev handler %s parse() requested stop "
+ "processing", dev->handler->name);
+ res = SCST_CMD_STATE_RES_CONT_NEXT;
+ goto out;
+ }
+
+ scst_set_parse_time(cmd);
+
+ if (state == SCST_CMD_STATE_DEFAULT)
+ state = SCST_CMD_STATE_PREPARE_SPACE;
+ } else
+ state = SCST_CMD_STATE_PREPARE_SPACE;
+
+ if (unlikely(state == SCST_CMD_STATE_PRE_XMIT_RESP))
+ goto set_res;
+
+ if (unlikely(!(cmd->op_flags & SCST_INFO_VALID))) {
+#ifdef CONFIG_SCST_USE_EXPECTED_VALUES
+ if (scst_cmd_is_expected_set(cmd)) {
+ TRACE(TRACE_MINOR, "Using initiator supplied values: "
+ "direction %d, transfer_len %d",
+ cmd->expected_data_direction,
+ cmd->expected_transfer_len);
+ cmd->data_direction = cmd->expected_data_direction;
+ cmd->bufflen = cmd->expected_transfer_len;
+ } else {
+ PRINT_ERROR("Unknown opcode 0x%02x for %s and "
+ "target %s not supplied expected values",
+ cmd->cdb[0], dev->handler->name, cmd->tgtt->name);
+ scst_set_cmd_error(cmd,
+ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
+ goto out_done;
+ }
+#else
+ PRINT_ERROR("Unknown opcode %x", cmd->cdb[0]);
+ scst_set_cmd_error(cmd,
+ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
+ goto out_done;
+#endif
+ }
+
+ if (unlikely(cmd->cdb_len == -1)) {
+ PRINT_ERROR("Unable to get CDB length for "
+ "opcode 0x%02x. Returning INVALID "
+ "OPCODE", cmd->cdb[0]);
+ scst_set_cmd_error(cmd,
+ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
+ goto out_done;
+ }
+
+ EXTRACHECKS_BUG_ON(cmd->cdb_len == 0);
+
+ TRACE(TRACE_SCSI, "op_name <%s> (cmd %p), direction=%d "
+ "(expected %d, set %s), transfer_len=%d (expected "
+ "len %d), flags=%d", cmd->op_name, cmd,
+ cmd->data_direction, cmd->expected_data_direction,
+ scst_cmd_is_expected_set(cmd) ? "yes" : "no",
+ cmd->bufflen, cmd->expected_transfer_len,
+ cmd->op_flags);
+
+ if (unlikely((cmd->op_flags & SCST_UNKNOWN_LENGTH) != 0)) {
+ if (scst_cmd_is_expected_set(cmd)) {
+ /*
+ * Command data length can't be easily
+ * determined from the CDB. ToDo, all such
+ * commands processing should be fixed. Until
+ * it's done, get the length from the supplied
+ * expected value, but limit it to some
+ * reasonable value (15MB).
+ */
+ cmd->bufflen = min(cmd->expected_transfer_len,
+ 15*1024*1024);
+ cmd->op_flags &= ~SCST_UNKNOWN_LENGTH;
+ } else {
+ PRINT_ERROR("Unknown data transfer length for opcode "
+ "0x%x (handler %s, target %s)", cmd->cdb[0],
+ dev->handler->name, cmd->tgtt->name);
+ PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
+ scst_set_cmd_error(cmd,
+ SCST_LOAD_SENSE(scst_sense_invalid_message));
+ goto out_done;
+ }
+ }
+
+ if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
+ PRINT_ERROR("NACA bit in control byte CDB is not supported "
+ "(opcode 0x%02x)", cmd->cdb[0]);
+ scst_set_cmd_error(cmd,
+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
+ goto out_done;
+ }
+
+ if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
+ PRINT_ERROR("Linked commands are not supported "
+ "(opcode 0x%02x)", cmd->cdb[0]);
+ scst_set_cmd_error(cmd,
+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
+ goto out_done;
+ }
+
+ if (cmd->dh_data_buf_alloced &&
+ unlikely((orig_bufflen > cmd->bufflen))) {
+ PRINT_ERROR("Dev handler supplied data buffer (size %d), "
+ "is less, than required (size %d)", cmd->bufflen,
+ orig_bufflen);
+ PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
+ goto out_hw_error;
+ }
+
+#ifdef CONFIG_SCST_EXTRACHECKS
+ if ((cmd->bufflen != 0) &&
+ ((cmd->data_direction == SCST_DATA_NONE) ||
+ ((cmd->sg == NULL) && (state > SCST_CMD_STATE_PREPARE_SPACE)))) {
+ PRINT_ERROR("Dev handler %s parse() returned "
+ "invalid cmd data_direction %d, bufflen %d, state %d "
+ "or sg %p (opcode 0x%x)", dev->handler->name,
+ cmd->data_direction, cmd->bufflen, state, cmd->sg,
+ cmd->cdb[0]);
+ PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
+ goto out_hw_error;
+ }
+#endif
+
+ if (scst_cmd_is_expected_set(cmd)) {
+#ifdef CONFIG_SCST_USE_EXPECTED_VALUES
+# ifdef CONFIG_SCST_EXTRACHECKS
+ if (unlikely((cmd->data_direction != cmd->expected_data_direction) ||
+ (cmd->bufflen != cmd->expected_transfer_len))) {
+ TRACE(TRACE_MINOR, "Expected values don't match "
+ "decoded ones: data_direction %d, "
+ "expected_data_direction %d, "
+ "bufflen %d, expected_transfer_len %d",
+ cmd->data_direction,
+ cmd->expected_data_direction,
+ cmd->bufflen, cmd->expected_transfer_len);
+ PRINT_BUFF_FLAG(TRACE_MINOR, "Suspicious CDB",
+ cmd->cdb, cmd->cdb_len);
+ }
+# endif
+ cmd->data_direction = cmd->expected_data_direction;
+ cmd->bufflen = cmd->expected_transfer_len;
+#else
+ if (unlikely(cmd->data_direction !=
+ cmd->expected_data_direction)) {
+ if (((cmd->expected_data_direction != SCST_DATA_NONE) ||
+ (cmd->bufflen != 0)) &&
+ !scst_is_allowed_to_mismatch_cmd(cmd)) {
+ PRINT_ERROR("Expected data direction %d for "
+ "opcode 0x%02x (handler %s, target %s) "
+ "doesn't match decoded value %d",
+ cmd->expected_data_direction,
+ cmd->cdb[0], dev->handler->name,
+ cmd->tgtt->name, cmd->data_direction);
+ PRINT_BUFFER("Failed CDB", cmd->cdb,
+ cmd->cdb_len);
+ scst_set_cmd_error(cmd,
+ SCST_LOAD_SENSE(scst_sense_invalid_message));
+ goto out_done;
+ }
+ }
+ if (unlikely(cmd->bufflen != cmd->expected_transfer_len)) {
+ TRACE(TRACE_MINOR, "Warning: expected "
+ "transfer length %d for opcode 0x%02x "
+ "(handler %s, target %s) doesn't match "
+ "decoded value %d. Faulty initiator "
+ "(e.g. VMware is known to be such) or "
+ "scst_scsi_op_table should be updated?",
+ cmd->expected_transfer_len, cmd->cdb[0],
+ dev->handler->name, cmd->tgtt->name,
+ cmd->bufflen);
+ PRINT_BUFF_FLAG(TRACE_MINOR, "Suspicious CDB",
+ cmd->cdb, cmd->cdb_len);
+ /* Needed, e.g., to get immediate iSCSI data */
+ cmd->bufflen = max(cmd->bufflen,
+ cmd->expected_transfer_len);
+ }
+#endif
+ }
+
+ if (unlikely(cmd->data_direction == SCST_DATA_UNKNOWN)) {
+ PRINT_ERROR("Unknown data direction. Opcode 0x%x, handler %s, "
+ "target %s", cmd->cdb[0], dev->handler->name,
+ cmd->tgtt->name);
+ PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
+ goto out_hw_error;
+ }
+
+set_res:
+ if (cmd->data_len == -1)
+ cmd->data_len = cmd->bufflen;
+
+ if (cmd->bufflen == 0) {
+ /*
+ * According to SPC bufflen 0 for data transfer commands isn't
+ * an error, so we need to fix the transfer direction.
+ */
+ cmd->data_direction = SCST_DATA_NONE;
+ }
+
+#ifdef CONFIG_SCST_EXTRACHECKS
+ switch (state) {
+ case SCST_CMD_STATE_PREPARE_SPACE:
+ case SCST_CMD_STATE_PRE_PARSE:
+ case SCST_CMD_STATE_DEV_PARSE:
+ case SCST_CMD_STATE_RDY_TO_XFER:
+ case SCST_CMD_STATE_TGT_PRE_EXEC:
+ case SCST_CMD_STATE_SEND_FOR_EXEC:
+ case SCST_CMD_STATE_LOCAL_EXEC:
+ case SCST_CMD_STATE_REAL_EXEC:
+ case SCST_CMD_STATE_PRE_DEV_DONE:
+ case SCST_CMD_STATE_DEV_DONE:
+ case SCST_CMD_STATE_PRE_XMIT_RESP:
+ case SCST_CMD_STATE_XMIT_RESP:
+ case SCST_CMD_STATE_FINISHED:
+ case SCST_CMD_STATE_FINISHED_INTERNAL:
+#endif
+ cmd->state = state;
+ res = SCST_CMD_STATE_RES_CONT_SAME;
+#ifdef CONFIG_SCST_EXTRACHECKS
+ break;
+
+ default:
+ if (state >= 0) {
+ PRINT_ERROR("Dev handler %s parse() returned "
+ "invalid cmd state %d (opcode %d)",
+ dev->handler->name, state, cmd->cdb[0]);
+ } else {
+ PRINT_ERROR("Dev handler %s parse() returned "
+ "error %d (opcode %d)", dev->handler->name,
+ state, cmd->cdb[0]);
+ }
+ goto out_hw_error;
+ }
+#endif
+
+ if (cmd->resp_data_len == -1) {
+ if (cmd->data_direction & SCST_DATA_READ)
+ cmd->resp_data_len = cmd->bufflen;
+ else
+ cmd->resp_data_len = 0;
+ }
+
+ /* We already completed (with an error) */
+ if (unlikely(cmd->completed))
+ goto out_done;
+
+out:
+ return res;
+
+out_hw_error:
+ /* dev_done() will be called as part of the regular cmd's finish */
+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
+
+out_done:
+ scst_set_cmd_abnormal_done_state(cmd);
+ res = SCST_CMD_STATE_RES_CONT_SAME;
+ goto out;
+}
+
+static int scst_prepare_space(struct scst_cmd *cmd)
+{
+ int r = 0, res = SCST_CMD_STATE_RES_CONT_SAME;
+
+ if (cmd->data_direction == SCST_DATA_NONE)
+ goto done;
+
+ if (cmd->tgt_need_alloc_data_buf) {
+ int orig_bufflen = cmd->bufflen;
+
+ TRACE_MEM("Custom tgt data buf allocation requested (cmd %p)",
+ cmd);
+
+ scst_set_cur_start(cmd);
+ r = cmd->tgtt->alloc_data_buf(cmd);
+ scst_set_alloc_buf_time(cmd);
+
+ if (r > 0)
+ goto alloc;
+ else if (r == 0) {
+ if (unlikely(cmd->bufflen == 0)) {
+ /* See comment in scst_alloc_space() */
+ if (cmd->sg == NULL)
+ goto alloc;
+ }
+
+ cmd->tgt_data_buf_alloced = 1;
+
+ if (unlikely(orig_bufflen < cmd->bufflen)) {
+ PRINT_ERROR("Target driver allocated data "
+ "buffer (size %d), is less, than "
+ "required (size %d)", orig_bufflen,
+ cmd->bufflen);
+ goto out_error;
+ }
+ TRACE_MEM("tgt_data_buf_alloced (cmd %p)", cmd);
+ } else
+ goto check;
+ }
+
+alloc:
+ if (!cmd->tgt_data_buf_alloced && !cmd->dh_data_buf_alloced) {
+ r = scst_alloc_space(cmd);
+ } else if (cmd->dh_data_buf_alloced && !cmd->tgt_data_buf_alloced) {
+ TRACE_MEM("dh_data_buf_alloced set (cmd %p)", cmd);
+ r = 0;
+ } else if (cmd->tgt_data_buf_alloced && !cmd->dh_data_buf_alloced) {
+ TRACE_MEM("tgt_data_buf_alloced set (cmd %p)", cmd);
+ cmd->sg = cmd->tgt_sg;
+ cmd->sg_cnt = cmd->tgt_sg_cnt;
+ cmd->in_sg = cmd->tgt_in_sg;
+ cmd->in_sg_cnt = cmd->tgt_in_sg_cnt;
+ r = 0;
+ } else {
+ TRACE_MEM("Both *_data_buf_alloced set (cmd %p, sg %p, "
+ "sg_cnt %d, tgt_sg %p, tgt_sg_cnt %d)", cmd, cmd->sg,
+ cmd->sg_cnt, cmd->tgt_sg, cmd->tgt_sg_cnt);
+ r = 0;
+ }
+
+check:
+ if (r != 0) {
+ if (scst_cmd_atomic(cmd)) {
+ TRACE_MEM("%s", "Atomic memory allocation failed, "
+ "rescheduling to the thread");
+ res = SCST_CMD_STATE_RES_NEED_THREAD;
+ goto out;
+ } else
+ goto out_no_space;
+ }
+
+done:
+ if (cmd->preprocessing_only)
+ cmd->state = SCST_CMD_STATE_PREPROCESSING_DONE;
+ else if (cmd->data_direction & SCST_DATA_WRITE)
+ cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
+ else
+ cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
+
+out:
+ return res;
+
+out_no_space:
+ TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
+ "(size %d), sending BUSY or QUEUE FULL status", cmd->bufflen);
+ scst_set_busy(cmd);
+ scst_set_cmd_abnormal_done_state(cmd);
+ res = SCST_CMD_STATE_RES_CONT_SAME;
+ goto out;
+
+out_error:
+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
+ scst_set_cmd_abnormal_done_state(cmd);
+ res = SCST_CMD_STATE_RES_CONT_SAME;
+ goto out;
+}
+
+static int scst_preprocessing_done(struct scst_cmd *cmd)
+{
+ int res;
+
+ EXTRACHECKS_BUG_ON(!cmd->preprocessing_only);
+
+ cmd->preprocessing_only = 0;
+
+ res = SCST_CMD_STATE_RES_CONT_NEXT;
+ cmd->state = SCST_CMD_STATE_PREPROCESSING_DONE_CALLED;
+
+ TRACE_DBG("Calling preprocessing_done(cmd %p)", cmd);
+ scst_set_cur_start(cmd);
+ cmd->tgtt->preprocessing_done(cmd);
+ TRACE_DBG("%s", "preprocessing_done() returned");
+ return res;
+}
+
+/**
+ * scst_restart_cmd() - restart execution of the command
+ * @cmd: SCST commands
+ * @status: completion status
+ * @pref_context: preferred command execition context
+ *
+ * Description:
+ * Notifies SCST that the driver finished its part of the command's
+ * preprocessing and it is ready for further processing.
+ *
+ * The second argument sets completion status
+ * (see SCST_PREPROCESS_STATUS_* constants for details)
+ *
+ * See also comment for scst_cmd_init_done() for the serialization
+ * requirements.
+ */
+void scst_restart_cmd(struct scst_cmd *cmd, int status,
+ enum scst_exec_context pref_context)
+{
+
+ scst_set_restart_waiting_time(cmd);
+
+ TRACE_DBG("Preferred context: %d", pref_context);
+ TRACE_DBG("tag=%llu, status=%#x",
+ (long long unsigned int)scst_cmd_get_tag(cmd),
+ status);
+
+#ifdef CONFIG_SCST_EXTRACHECKS
+ if ((in_irq() || irqs_disabled()) &&
+ ((pref_context == SCST_CONTEXT_DIRECT) ||
+ (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
+ PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
+ "SCST_CONTEXT_THREAD instead", pref_context,
+ cmd->tgtt->name);
+ pref_context = SCST_CONTEXT_THREAD;
+ }
+#endif
+
+ switch (status) {
+ case SCST_PREPROCESS_STATUS_SUCCESS:
+ if (cmd->data_direction & SCST_DATA_WRITE)
+ cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
+ else
+ cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
+ if (cmd->set_sn_on_restart_cmd)
+ scst_cmd_set_sn(cmd);
+ /* Small context optimization */
+ if ((pref_context == SCST_CONTEXT_TASKLET) ||
+ (pref_context == SCST_CONTEXT_DIRECT_ATOMIC) ||
+ ((pref_context == SCST_CONTEXT_SAME) &&
+ scst_cmd_atomic(cmd))) {
+ if (cmd->data_direction & SCST_DATA_WRITE) {
+ if (!test_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
+ &cmd->tgt_dev->tgt_dev_flags))
+ pref_context = SCST_CONTEXT_THREAD;
+ } else {
+ if (!test_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
+ &cmd->tgt_dev->tgt_dev_flags))
+ pref_context = SCST_CONTEXT_THREAD;
+ }
+ }
+ break;
+
+ case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
+ scst_set_cmd_abnormal_done_state(cmd);
+ break;
+
+ case SCST_PREPROCESS_STATUS_ERROR_FATAL:
+ set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
+ /* go through */
+ case SCST_PREPROCESS_STATUS_ERROR:
+ if (cmd->sense != NULL)
+ scst_set_cmd_error(cmd,
+ SCST_LOAD_SENSE(scst_sense_hardw_error));
+ scst_set_cmd_abnormal_done_state(cmd);
+ break;
+
+ default:
+ PRINT_ERROR("%s() received unknown status %x", __func__,
+ status);
+ scst_set_cmd_abnormal_done_state(cmd);
+ break;
+ }
+
+ scst_process_redirect_cmd(cmd, pref_context, 1);
+ return;
+}
+EXPORT_SYMBOL(scst_restart_cmd);
+
+static int scst_rdy_to_xfer(struct scst_cmd *cmd)
+{
+ int res, rc;
+ struct scst_tgt_template *tgtt = cmd->tgtt;
+
+ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
+ TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
+ goto out_dev_done;
+ }
+
+ if ((tgtt->rdy_to_xfer == NULL) || unlikely(cmd->internal)) {
+ cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
+ res = SCST_CMD_STATE_RES_CONT_SAME;
+ goto out;
+ }
+
+ if (unlikely(!tgtt->rdy_to_xfer_atomic && scst_cmd_atomic(cmd))) {
+ /*
+ * It shouldn't be because of the SCST_TGT_DEV_AFTER_*
+ * optimization.
+ */
+ TRACE_DBG("Target driver %s rdy_to_xfer() needs thread "
+ "context, rescheduling", tgtt->name);
+ res = SCST_CMD_STATE_RES_NEED_THREAD;
+ goto out;
+ }
+
+ while (1) {
+ int finished_cmds = atomic_read(&cmd->tgt->finished_cmds);
+
+ res = SCST_CMD_STATE_RES_CONT_NEXT;
+ cmd->state = SCST_CMD_STATE_DATA_WAIT;
+
+ if (tgtt->on_hw_pending_cmd_timeout != NULL) {
+ struct scst_session *sess = cmd->sess;
+ cmd->hw_pending_start = jiffies;
+ cmd->cmd_hw_pending = 1;
+ if (!test_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags)) {
+ TRACE_DBG("Sched HW pending work for sess %p "
+ "(max time %d)", sess,
+ tgtt->max_hw_pending_time);
+ set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED,
+ &sess->sess_aflags);
+ schedule_delayed_work(&sess->hw_pending_work,
+ tgtt->max_hw_pending_time * HZ);
+ }
+ }
+
+ scst_set_cur_start(cmd);
+
+ TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
+#ifdef CONFIG_SCST_DEBUG_RETRY
+ if (((scst_random() % 100) == 75))
+ rc = SCST_TGT_RES_QUEUE_FULL;
+ else
+#endif
+ rc = tgtt->rdy_to_xfer(cmd);
+ TRACE_DBG("rdy_to_xfer() returned %d", rc);
+
+ if (likely(rc == SCST_TGT_RES_SUCCESS))
+ goto out;
+
+ scst_set_rdy_to_xfer_time(cmd);
+
+ cmd->cmd_hw_pending = 0;
+
+ /* Restore the previous state */
+ cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
+
+ switch (rc) {
+ case SCST_TGT_RES_QUEUE_FULL:
+ if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
+ break;
+ else
+ continue;
+
+ case SCST_TGT_RES_NEED_THREAD_CTX:
+ TRACE_DBG("Target driver %s "
+ "rdy_to_xfer() requested thread "
+ "context, rescheduling", tgtt->name);
+ res = SCST_CMD_STATE_RES_NEED_THREAD;
+ break;
+
+ default:
+ goto out_error_rc;
+ }
+ break;
+ }
+
+out:
+ return res;
+
+out_error_rc:
+ if (rc == SCST_TGT_RES_FATAL_ERROR) {
+ PRINT_ERROR("Target driver %s rdy_to_xfer() returned "
+ "fatal error", tgtt->name);
+ } else {
+ PRINT_ERROR("Target driver %s rdy_to_xfer() returned invalid "
+ "value %d", tgtt->name, rc);
+ }
+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
+
+out_dev_done:
+ scst_set_cmd_abnormal_done_state(cmd);
+ res = SCST_CMD_STATE_RES_CONT_SAME;
+ goto out;
+}
+
+/* No locks, but might be in IRQ */
+static void scst_process_redirect_cmd(struct scst_cmd *cmd,
+ enum scst_exec_context context, int check_retries)
+{
+ struct scst_tgt *tgt = cmd->tgt;
+ unsigned long flags;
+
+ TRACE_DBG("Context: %x", context);
+
+ if (context == SCST_CONTEXT_SAME)
+ context = scst_cmd_atomic(cmd) ? SCST_CONTEXT_DIRECT_ATOMIC :
+ SCST_CONTEXT_DIRECT;
+
+ switch (context) {
+ case SCST_CONTEXT_DIRECT_ATOMIC:
+ scst_process_active_cmd(cmd, true);
+ break;
+
+ case SCST_CONTEXT_DIRECT:
+ if (check_retries)
+ scst_check_retries(tgt);
+ scst_process_active_cmd(cmd, false);
+ break;
+
+ default:
+ PRINT_ERROR("Context %x is unknown, using the thread one",
+ context);
+ /* go through */
+ case SCST_CONTEXT_THREAD:
+ if (check_retries)
+ scst_check_retries(tgt);
+ spin_lock_irqsave(&cmd->cmd_threads->cmd_list_lock, flags);
+ TRACE_DBG("Adding cmd %p to active cmd list", cmd);
+ if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
+ list_add(&cmd->cmd_list_entry,
+ &cmd->cmd_threads->active_cmd_list);
+ else
+ list_add_tail(&cmd->cmd_list_entry,
+ &cmd->cmd_threads->active_cmd_list);
+ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
+ spin_unlock_irqrestore(&cmd->cmd_threads->cmd_list_lock, flags);
+ break;
+
+ case SCST_CONTEXT_TASKLET:
+ if (check_retries)
+ scst_check_retries(tgt);
+ scst_schedule_tasklet(cmd);
+ break;
+ }
+ return;
+}
+
+/**
+ * scst_rx_data() - the command's data received
+ * @cmd: SCST commands
+ * @status: data receiving completion status
+ * @pref_context: preferred command execition context
+ *
+ * Description:
+ * Notifies SCST that the driver received all the necessary data
+ * and the command is ready for further processing.
+ *
+ * The second argument sets data receiving completion status
+ * (see SCST_RX_STATUS_* constants for details)
+ */
+void scst_rx_data(struct scst_cmd *cmd, int status,
+ enum scst_exec_context pref_context)
+{
+
+ scst_set_rdy_to_xfer_time(cmd);
+
+ TRACE_DBG("Preferred context: %d", pref_context);
+ TRACE(TRACE_SCSI, "cmd %p, status %#x", cmd, status);
+
+ cmd->cmd_hw_pending = 0;
+
+#ifdef CONFIG_SCST_EXTRACHECKS
+ if ((in_irq() || irqs_disabled()) &&
+ ((pref_context == SCST_CONTEXT_DIRECT) ||
+ (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
+ PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
+ "SCST_CONTEXT_THREAD instead", pref_context,
+ cmd->tgtt->name);
+ pref_context = SCST_CONTEXT_THREAD;
+ }
+#endif
+
+ switch (status) {
+ case SCST_RX_STATUS_SUCCESS:
+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
+ if (trace_flag & TRACE_RCV_BOT) {
+ int i;
+ struct scatterlist *sg;
+ if (cmd->in_sg != NULL)
+ sg = cmd->in_sg;
+ else if (cmd->tgt_in_sg != NULL)
+ sg = cmd->tgt_in_sg;
+ else if (cmd->tgt_sg != NULL)
+ sg = cmd->tgt_sg;
+ else
+ sg = cmd->sg;
+ if (sg != NULL) {
+ TRACE_RECV_BOT("RX data for cmd %p "
+ "(sg_cnt %d, sg %p, sg[0].page %p)",
+ cmd, cmd->tgt_sg_cnt, sg,
+ (void *)sg_page(&sg[0]));
+ for (i = 0; i < cmd->tgt_sg_cnt; ++i) {
+ PRINT_BUFF_FLAG(TRACE_RCV_BOT, "RX sg",
+ sg_virt(&sg[i]), sg[i].length);
+ }
+ }
+ }
+#endif
+ cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
+
+ /* Small context optimization */
+ if ((pref_context == SCST_CONTEXT_TASKLET) ||
+ (pref_context == SCST_CONTEXT_DIRECT_ATOMIC) ||
+ ((pref_context == SCST_CONTEXT_SAME) &&
+ scst_cmd_atomic(cmd))) {
+ if (!test_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
+ &cmd->tgt_dev->tgt_dev_flags))
+ pref_context = SCST_CONTEXT_THREAD;
+ }
+ break;
+
+ case SCST_RX_STATUS_ERROR_SENSE_SET:
+ scst_set_cmd_abnormal_done_state(cmd);
+ break;
+
+ case SCST_RX_STATUS_ERROR_FATAL:
+ set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
+ /* go through */
+ case SCST_RX_STATUS_ERROR:
+ scst_set_cmd_error(cmd,
+ SCST_LOAD_SENSE(scst_sense_hardw_error));
+ scst_set_cmd_abnormal_done_state(cmd);
+ break;
+
+ default:
+ PRINT_ERROR("scst_rx_data() received unknown status %x",
+ status);
+ scst_set_cmd_abnormal_done_state(cmd);
+ break;
+ }
+
+ scst_process_redirect_cmd(cmd, pref_context, 1);
+ return;
+}
+EXPORT_SYMBOL(scst_rx_data);
+
+static int scst_tgt_pre_exec(struct scst_cmd *cmd)
+{
+ int res = SCST_CMD_STATE_RES_CONT_SAME, rc;
+
+ cmd->state = SCST_CMD_STATE_SEND_FOR_EXEC;
+
+ if ((cmd->tgtt->pre_exec == NULL) || unlikely(cmd->internal))
+ goto out;
+
+ TRACE_DBG("Calling pre_exec(%p)", cmd);
+ scst_set_cur_start(cmd);
+ rc = cmd->tgtt->pre_exec(cmd);
+ scst_set_pre_exec_time(cmd);
+ TRACE_DBG("pre_exec() returned %d", rc);
+
+ if (unlikely(rc != SCST_PREPROCESS_STATUS_SUCCESS)) {
+ switch (rc) {
+ case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
+ scst_set_cmd_abnormal_done_state(cmd);
+ break;
+ case SCST_PREPROCESS_STATUS_ERROR_FATAL:
+ set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
+ /* go through */
+ case SCST_PREPROCESS_STATUS_ERROR:
+ scst_set_cmd_error(cmd,
+ SCST_LOAD_SENSE(scst_sense_hardw_error));
+ scst_set_cmd_abnormal_done_state(cmd);
+ break;
+ case SCST_PREPROCESS_STATUS_NEED_THREAD:
+ TRACE_DBG("Target driver's %s pre_exec() requested "
+ "thread context, rescheduling",
+ cmd->tgtt->name);
+ res = SCST_CMD_STATE_RES_NEED_THREAD;
+ cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+
+out:
+ return res;
+}
+
+static void scst_do_cmd_done(struct scst_cmd *cmd, int result,
+ const uint8_t *rq_sense, int rq_sense_len, int resid)
+{
+
+ scst_set_exec_time(cmd);
+
+ cmd->status = result & 0xff;
+ cmd->msg_status = msg_byte(result);
+ cmd->host_status = host_byte(result);
+ cmd->driver_status = driver_byte(result);
+ if (unlikely(resid != 0)) {
+#ifdef CONFIG_SCST_EXTRACHECKS
+ if ((resid < 0) || (resid > cmd->resp_data_len)) {
+ PRINT_ERROR("Wrong resid %d (cmd->resp_data_len=%d, "
+ "op %x)", resid, cmd->resp_data_len,
+ cmd->cdb[0]);
+ } else
+#endif
+ scst_set_resp_data_len(cmd, cmd->resp_data_len - resid);
+ }
+
+ if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION)) {
+ /* We might have double reset UA here */
+ cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
+ cmd->dbl_ua_orig_data_direction = cmd->data_direction;
+
+ scst_alloc_set_sense(cmd, 1, rq_sense, rq_sense_len);
+ }
+
+ TRACE(TRACE_SCSI, "cmd %p, result=%x, cmd->status=%x, resid=%d, "
+ "cmd->msg_status=%x, cmd->host_status=%x, "
+ "cmd->driver_status=%x (cmd %p)", cmd, result, cmd->status, resid,
+ cmd->msg_status, cmd->host_status, cmd->driver_status, cmd);
+
+ cmd->completed = 1;
+ return;
+}
+
+/* For small context optimization */
+static inline enum scst_exec_context scst_optimize_post_exec_context(
+ struct scst_cmd *cmd, enum scst_exec_context context)
+{
+ if (((context == SCST_CONTEXT_SAME) && scst_cmd_atomic(cmd)) ||
+ (context == SCST_CONTEXT_TASKLET) ||
+ (context == SCST_CONTEXT_DIRECT_ATOMIC)) {
+ if (!test_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
+ &cmd->tgt_dev->tgt_dev_flags))
+ context = SCST_CONTEXT_THREAD;
+ }
+ return context;
+}
+
+static void scst_cmd_done(void *data, char *sense, int result, int resid)
+{
+ struct scst_cmd *cmd;
+
+ cmd = (struct scst_cmd *)data;
+ if (cmd == NULL)
+ goto out;
+
+ scst_do_cmd_done(cmd, result, sense, SCSI_SENSE_BUFFERSIZE, resid);
+
+ cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
+
+ scst_process_redirect_cmd(cmd,
+ scst_optimize_post_exec_context(cmd, scst_estimate_context()), 0);
+
+out:
+ return;
+}
+
+static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state,
+ enum scst_exec_context pref_context)
+{
+
+ scst_set_exec_time(cmd);
+
+ if (next_state == SCST_CMD_STATE_DEFAULT)
+ next_state = SCST_CMD_STATE_PRE_DEV_DONE;
+
+#if defined(CONFIG_SCST_DEBUG)
+ if (next_state == SCST_CMD_STATE_PRE_DEV_DONE) {
+ if ((trace_flag & TRACE_RCV_TOP) && (cmd->sg != NULL)) {
+ int i;
+ struct scatterlist *sg = cmd->sg;
+ TRACE_RECV_TOP("Exec'd %d S/G(s) at %p sg[0].page at "
+ "%p", cmd->sg_cnt, sg, (void *)sg_page(&sg[0]));
+ for (i = 0; i < cmd->sg_cnt; ++i) {
+ TRACE_BUFF_FLAG(TRACE_RCV_TOP,
+ "Exec'd sg", sg_virt(&sg[i]),
+ sg[i].length);
+ }
+ }
+ }
+#endif
+
+ cmd->state = next_state;
+
+#ifdef CONFIG_SCST_EXTRACHECKS
+ if ((next_state != SCST_CMD_STATE_PRE_DEV_DONE) &&
+ (next_state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
+ (next_state != SCST_CMD_STATE_FINISHED) &&
+ (next_state != SCST_CMD_STATE_FINISHED_INTERNAL)) {
+ PRINT_ERROR("%s() received invalid cmd state %d (opcode %d)",
+ __func__, next_state, cmd->cdb[0]);
+ scst_set_cmd_error(cmd,
+ SCST_LOAD_SENSE(scst_sense_hardw_error));
+ scst_set_cmd_abnormal_done_state(cmd);
+ }
+#endif
+ pref_context = scst_optimize_post_exec_context(cmd, pref_context);
+ scst_process_redirect_cmd(cmd, pref_context, 0);
+ return;
+}
+
+static int scst_report_luns_local(struct scst_cmd *cmd)
+{
+ int res = SCST_EXEC_COMPLETED, rc;
+ int dev_cnt = 0;
+ int buffer_size;
+ int i;
+ struct scst_tgt_dev *tgt_dev = NULL;
+ uint8_t *buffer;
+ int offs, overflow = 0;
+
+ if (scst_cmd_atomic(cmd)) {
+ res = SCST_EXEC_NEED_THREAD;
+ goto out;
+ }
+
+ rc = scst_check_local_events(cmd);
+ if (unlikely(rc != 0))
+ goto out_done;
+
+ cmd->status = 0;
+ cmd->msg_status = 0;
+ cmd->host_status = DID_OK;
+ cmd->driver_status = 0;
+
+ if ((cmd->cdb[2] != 0) && (cmd->cdb[2] != 2)) {
+ PRINT_ERROR("Unsupported SELECT REPORT value %x in REPORT "
+ "LUNS command", cmd->cdb[2]);
+ goto out_err;
+ }
+
+ buffer_size = scst_get_buf_first(cmd, &buffer);
+ if (unlikely(buffer_size == 0))
+ goto out_compl;
+ else if (unlikely(buffer_size < 0))
+ goto out_hw_err;
+
+ if (buffer_size < 16)
+ goto out_put_err;
+
+ memset(buffer, 0, buffer_size);
+ offs = 8;
+
+ /*
+ * cmd won't allow to suspend activities, so we can access
+ * sess->sess_tgt_dev_list_hash without any additional protection.
+ */
+ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
+ struct list_head *sess_tgt_dev_list_head =
+ &cmd->sess->sess_tgt_dev_list_hash[i];
+ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
+ sess_tgt_dev_list_entry) {
+ if (!overflow) {
+ if (offs >= buffer_size) {
+ scst_put_buf(cmd, buffer);
+ buffer_size = scst_get_buf_next(cmd,
+ &buffer);
+ if (buffer_size > 0) {
+ memset(buffer, 0, buffer_size);
+ offs = 0;
+ } else {
+ overflow = 1;
+ goto inc_dev_cnt;
+ }
+ }
+ if ((buffer_size - offs) < 8) {
+ PRINT_ERROR("Buffer allocated for "
+ "REPORT LUNS command doesn't "
+ "allow to fit 8 byte entry "
+ "(buffer_size=%d)",
+ buffer_size);
+ goto out_put_hw_err;
+ }
+ if ((cmd->sess->acg->addr_method == SCST_LUN_ADDR_METHOD_FLAT) &&
+ (tgt_dev->lun != 0)) {
+ buffer[offs] = (tgt_dev->lun >> 8) & 0x3f;
+ buffer[offs] = buffer[offs] | 0x40;
+ buffer[offs+1] = tgt_dev->lun & 0xff;
+ } else {
+ buffer[offs] = (tgt_dev->lun >> 8) & 0xff;
+ buffer[offs+1] = tgt_dev->lun & 0xff;
+ }
+ offs += 8;
+ }
+inc_dev_cnt:
+ dev_cnt++;
+ }
+ }
+ if (!overflow)
+ scst_put_buf(cmd, buffer);
+
+ /* Set the response header */
+ buffer_size = scst_get_buf_first(cmd, &buffer);
+ if (unlikely(buffer_size == 0))
+ goto out_compl;
+ else if (unlikely(buffer_size < 0))
+ goto out_hw_err;
+
+ dev_cnt *= 8;
+ buffer[0] = (dev_cnt >> 24) & 0xff;
+ buffer[1] = (dev_cnt >> 16) & 0xff;
+ buffer[2] = (dev_cnt >> 8) & 0xff;
+ buffer[3] = dev_cnt & 0xff;
+
+ scst_put_buf(cmd, buffer);
+
+ dev_cnt += 8;
+ if (dev_cnt < cmd->resp_data_len)
+ scst_set_resp_data_len(cmd, dev_cnt);
+
+out_compl:
+ cmd->completed = 1;
+
+ /* Clear left sense_reported_luns_data_changed UA, if any. */
+
+ /*
+ * cmd won't allow to suspend activities, so we can access
+ * sess->sess_tgt_dev_list_hash without any additional protection.
+ */
+ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
+ struct list_head *sess_tgt_dev_list_head =
+ &cmd->sess->sess_tgt_dev_list_hash[i];
+
+ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
+ sess_tgt_dev_list_entry) {
+ struct scst_tgt_dev_UA *ua;
+
+ spin_lock_bh(&tgt_dev->tgt_dev_lock);
+ list_for_each_entry(ua, &tgt_dev->UA_list,
+ UA_list_entry) {
+ if (scst_analyze_sense(ua->UA_sense_buffer,
+ ua->UA_valid_sense_len,
+ SCST_SENSE_ALL_VALID,
+ SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed))) {
+ TRACE_MGMT_DBG("Freeing not needed "
+ "REPORTED LUNS DATA CHANGED UA "
+ "%p", ua);
+ list_del(&ua->UA_list_entry);
+ mempool_free(ua, scst_ua_mempool);
+ break;
+ }
+ }
+ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
+ }
+ }
+
+out_done:
+ /* Report the result */
+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
+
+out:
+ return res;
+
+out_put_err:
+ scst_put_buf(cmd, buffer);
+
+out_err:
+ scst_set_cmd_error(cmd,
+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
+ goto out_compl;
+
+out_put_hw_err:
+ scst_put_buf(cmd, buffer);
+
+out_hw_err:
+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
+ goto out_compl;
+}
+
+static int scst_request_sense_local(struct scst_cmd *cmd)
+{
+ int res = SCST_EXEC_COMPLETED, rc;
+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
+ uint8_t *buffer;
+ int buffer_size = 0, sl = 0;
+
+ rc = scst_check_local_events(cmd);
+ if (unlikely(rc != 0))
+ goto out_done;
+
+ cmd->status = 0;
+ cmd->msg_status = 0;
+ cmd->host_status = DID_OK;
+ cmd->driver_status = 0;
+
+ spin_lock_bh(&tgt_dev->tgt_dev_lock);
+
+ if (tgt_dev->tgt_dev_valid_sense_len == 0)
+ goto out_not_completed;
+
+ TRACE(TRACE_SCSI, "%s: Returning stored sense", cmd->op_name);
+
+ buffer_size = scst_get_buf_first(cmd, &buffer);
+ if (unlikely(buffer_size == 0))
+ goto out_compl;
+ else if (unlikely(buffer_size < 0))
+ goto out_hw_err;
+
+ memset(buffer, 0, buffer_size);
+
+ if (((tgt_dev->tgt_dev_sense[0] == 0x70) ||
+ (tgt_dev->tgt_dev_sense[0] == 0x71)) && (cmd->cdb[1] & 1)) {
+ PRINT_WARNING("%s: Fixed format of the saved sense, but "
+ "descriptor format requested. Convertion will "
+ "truncated data", cmd->op_name);
+ PRINT_BUFFER("Original sense", tgt_dev->tgt_dev_sense,
+ tgt_dev->tgt_dev_valid_sense_len);
+
+ buffer_size = min(SCST_STANDARD_SENSE_LEN, buffer_size);
+ sl = scst_set_sense(buffer, buffer_size, true,
+ tgt_dev->tgt_dev_sense[2], tgt_dev->tgt_dev_sense[12],
+ tgt_dev->tgt_dev_sense[13]);
+ } else if (((tgt_dev->tgt_dev_sense[0] == 0x72) ||
+ (tgt_dev->tgt_dev_sense[0] == 0x73)) && !(cmd->cdb[1] & 1)) {
+ PRINT_WARNING("%s: Descriptor format of the "
+ "saved sense, but fixed format requested. Convertion "
+ "will truncated data", cmd->op_name);
+ PRINT_BUFFER("Original sense", tgt_dev->tgt_dev_sense,
+ tgt_dev->tgt_dev_valid_sense_len);
+
+ buffer_size = min(SCST_STANDARD_SENSE_LEN, buffer_size);
+ sl = scst_set_sense(buffer, buffer_size, false,
+ tgt_dev->tgt_dev_sense[1], tgt_dev->tgt_dev_sense[2],
+ tgt_dev->tgt_dev_sense[3]);
+ } else {
+ if (buffer_size >= tgt_dev->tgt_dev_valid_sense_len)
+ sl = tgt_dev->tgt_dev_valid_sense_len;
+ else {
+ sl = buffer_size;
+ PRINT_WARNING("%s: Being returned sense truncated to "
+ "size %d (needed %d)", cmd->op_name,
+ buffer_size, tgt_dev->tgt_dev_valid_sense_len);
+ }
+ memcpy(buffer, tgt_dev->tgt_dev_sense, sl);
+ }
+
+ scst_put_buf(cmd, buffer);
+
+ tgt_dev->tgt_dev_valid_sense_len = 0;
+
+ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
+
+ scst_set_resp_data_len(cmd, sl);
+
+out_compl:
+ cmd->completed = 1;
+
+out_done:
+ /* Report the result */
+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
+
+out:
+ return res;
+
+out_hw_err:
+ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
+ goto out_compl;
+
+out_not_completed:
+ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
+ res = SCST_EXEC_NOT_COMPLETED;
+ goto out;
+}
+
+static int scst_pre_select(struct scst_cmd *cmd)
+{
+ int res = SCST_EXEC_NOT_COMPLETED;
+
+ if (scst_cmd_atomic(cmd)) {
+ res = SCST_EXEC_NEED_THREAD;
+ goto out;
+ }
+
+ scst_block_dev_cmd(cmd, 1);
+
+ /* Check for local events will be done when cmd will be executed */
+
+out:
+ return res;
+}
+
+static int scst_reserve_local(struct scst_cmd *cmd)
+{
+ int res = SCST_EXEC_NOT_COMPLETED, rc;
+ struct scst_device *dev;
+ struct scst_tgt_dev *tgt_dev_tmp;
+
+ if (scst_cmd_atomic(cmd)) {
+ res = SCST_EXEC_NEED_THREAD;
+ goto out;
+ }
+
+ if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
+ PRINT_ERROR("RESERVE_10: 3rdPty RESERVE not implemented "
+ "(lun=%lld)", (long long unsigned int)cmd->lun);
+ scst_set_cmd_error(cmd,
+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
+ goto out_done;
+ }
+
+ dev = cmd->dev;
+
+ if (dev->tst == SCST_CONTR_MODE_ONE_TASK_SET)
+ scst_block_dev_cmd(cmd, 1);
+
+ rc = scst_check_local_events(cmd);
+ if (unlikely(rc != 0))
+ goto out_done;
+
+ spin_lock_bh(&dev->dev_lock);
+
+ if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
+ spin_unlock_bh(&dev->dev_lock);
+ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
+ goto out_done;
+ }
+
+ list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
+ dev_tgt_dev_list_entry) {
+ if (cmd->tgt_dev != tgt_dev_tmp)
+ set_bit(SCST_TGT_DEV_RESERVED,
+ &tgt_dev_tmp->tgt_dev_flags);
+ }
+ dev->dev_reserved = 1;
+
+ spin_unlock_bh(&dev->dev_lock);
+
+out:
+ return res;
+
+out_done:
+ /* Report the result */
+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
+ res = SCST_EXEC_COMPLETED;
+ goto out;
+}
+
+static int scst_release_local(struct scst_cmd *cmd)
+{
+ int res = SCST_EXEC_NOT_COMPLETED, rc;
+ struct scst_tgt_dev *tgt_dev_tmp;
+ struct scst_device *dev;
+
+ if (scst_cmd_atomic(cmd)) {
+ res = SCST_EXEC_NEED_THREAD;
+ goto out;
+ }
+
+ dev = cmd->dev;
+
+ if (dev->tst == SCST_CONTR_MODE_ONE_TASK_SET)
+ scst_block_dev_cmd(cmd, 1);
+
+ rc = scst_check_local_events(cmd);
+ if (unlikely(rc != 0))
+ goto out_done;
+
+ spin_lock_bh(&dev->dev_lock);
+
+ /*
+ * The device could be RELEASED behind us, if RESERVING session
+ * is closed (see scst_free_tgt_dev()), but this actually doesn't
+ * matter, so use lock and no retest for DEV_RESERVED bits again
+ */
+ if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
+ res = SCST_EXEC_COMPLETED;
+ cmd->status = 0;
+ cmd->msg_status = 0;
+ cmd->host_status = DID_OK;
+ cmd->driver_status = 0;
+ cmd->completed = 1;
+ } else {
+ list_for_each_entry(tgt_dev_tmp,
+ &dev->dev_tgt_dev_list,
+ dev_tgt_dev_list_entry) {
+ clear_bit(SCST_TGT_DEV_RESERVED,
+ &tgt_dev_tmp->tgt_dev_flags);
+ }
+ dev->dev_reserved = 0;
+ }
+
+ spin_unlock_bh(&dev->dev_lock);
+
+ if (res == SCST_EXEC_COMPLETED)
+ goto out_done;
+
+out:
+ return res;
+
+out_done:
+ res = SCST_EXEC_COMPLETED;
+ /* Report the result */
+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
+ goto out;
+}
+
+/**
+ * scst_check_local_events() - check if there are any local SCSI events
+ *
+ * Description:
+ * Checks if the command can be executed or there are local events,
+ * like reservatons, pending UAs, etc. Returns < 0 if command must be
+ * aborted, > 0 if there is an event and command should be immediately
+ * completed, or 0 otherwise.
+ *
+ * !! Dev handlers implementing exec() callback must call this function there
+ * !! just before the actual command's execution!
+ *
+ * On call no locks, no IRQ or IRQ-disabled context allowed.
+ */
+int scst_check_local_events(struct scst_cmd *cmd)
+{
+ int res, rc;
+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
+ struct scst_device *dev = cmd->dev;
+
+ /*
+ * There's no race here, because we need to trace commands sent
+ * *after* dev_double_ua_possible flag was set.
+ */
+ if (unlikely(dev->dev_double_ua_possible))
+ cmd->double_ua_possible = 1;
+
+ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
+ TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
+ goto out_uncomplete;
+ }
+
+ /* Reserve check before Unit Attention */
+ if (unlikely(test_bit(SCST_TGT_DEV_RESERVED,
+ &tgt_dev->tgt_dev_flags))) {
+ if ((cmd->op_flags & SCST_REG_RESERVE_ALLOWED) == 0) {
+ scst_set_cmd_error_status(cmd,
+ SAM_STAT_RESERVATION_CONFLICT);
+ goto out_complete;
+ }
+ }
+
+ /* If we had internal bus reset, set the command error unit attention */
+ if ((dev->scsi_dev != NULL) &&
+ unlikely(dev->scsi_dev->was_reset)) {
+ if (scst_is_ua_command(cmd)) {
+ int done = 0;
+ /*
+ * Prevent more than 1 cmd to be triggered by
+ * was_reset.
+ */
+ spin_lock_bh(&dev->dev_lock);
+ if (dev->scsi_dev->was_reset) {
+ TRACE(TRACE_MGMT, "was_reset is %d", 1);
+ scst_set_cmd_error(cmd,
+ SCST_LOAD_SENSE(scst_sense_reset_UA));
+ /*
+ * It looks like it is safe to clear was_reset
+ * here.
+ */
+ dev->scsi_dev->was_reset = 0;
+ done = 1;
+ }
+ spin_unlock_bh(&dev->dev_lock);
+
+ if (done)
+ goto out_complete;
+ }
+ }
+
+ if (unlikely(test_bit(SCST_TGT_DEV_UA_PENDING,
+ &cmd->tgt_dev->tgt_dev_flags))) {
+ if (scst_is_ua_command(cmd)) {
+ rc = scst_set_pending_UA(cmd);
+ if (rc == 0)
+ goto out_complete;
+ }
+ }
+
+ res = 0;
+
+out:
+ return res;
+
+out_complete:
+ res = 1;
+ BUG_ON(!cmd->completed);
+ goto out;
+
+out_uncomplete:
+ res = -1;
+ goto out;
+}
+EXPORT_SYMBOL_GPL(scst_check_local_events);
+
+/* No locks */
+void scst_inc_expected_sn(struct scst_tgt_dev *tgt_dev, atomic_t *slot)
+{
+ if (slot == NULL)
+ goto inc;
+
+ /* Optimized for lockless fast path */
+
+ TRACE_SN("Slot %zd, *cur_sn_slot %d", slot - tgt_dev->sn_slots,
+ atomic_read(slot));
+
+ if (!atomic_dec_and_test(slot))
+ goto out;
+
+ TRACE_SN("Slot is 0 (num_free_sn_slots=%d)",
+ tgt_dev->num_free_sn_slots);
+ if (tgt_dev->num_free_sn_slots < (int)ARRAY_SIZE(tgt_dev->sn_slots)-1) {
+ spin_lock_irq(&tgt_dev->sn_lock);
+ if (likely(tgt_dev->num_free_sn_slots < (int)ARRAY_SIZE(tgt_dev->sn_slots)-1)) {
+ if (tgt_dev->num_free_sn_slots < 0)
+ tgt_dev->cur_sn_slot = slot;
+ /*
+ * To be in-sync with SIMPLE case in scst_cmd_set_sn()
+ */
+ smp_mb();
+ tgt_dev->num_free_sn_slots++;
+ TRACE_SN("Incremented num_free_sn_slots (%d)",
+ tgt_dev->num_free_sn_slots);
+
+ }
+ spin_unlock_irq(&tgt_dev->sn_lock);
+ }
+
+inc:
+ /*
+ * No protection of expected_sn is needed, because only one thread
+ * at time can be here (serialized by sn). Also it is supposed that
+ * there could not be half-incremented halves.
+ */
+ tgt_dev->expected_sn++;
+ /*
+ * Write must be before def_cmd_count read to be in sync. with
+ * scst_post_exec_sn(). See comment in scst_send_for_exec().
+ */
+ smp_mb();
+ TRACE_SN("Next expected_sn: %d", tgt_dev->expected_sn);
+
+out:
+ return;
+}
+
+/* No locks */
+static struct scst_cmd *scst_post_exec_sn(struct scst_cmd *cmd,
+ bool make_active)
+{
+ /* For HQ commands SN is not set */
+ bool inc_expected_sn = !cmd->inc_expected_sn_on_done &&
+ cmd->sn_set && !cmd->retry;
+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
+ struct scst_cmd *res;
+
+ if (inc_expected_sn)
+ scst_inc_expected_sn(tgt_dev, cmd->sn_slot);
+
+ if (make_active) {
+ scst_make_deferred_commands_active(tgt_dev);
+ res = NULL;
+ } else
+ res = scst_check_deferred_commands(tgt_dev);
+ return res;
+}
+
+/* cmd must be additionally referenced to not die inside */
+static int scst_do_real_exec(struct scst_cmd *cmd)
+{
+ int res = SCST_EXEC_NOT_COMPLETED;
+ int rc;
+ bool atomic = scst_cmd_atomic(cmd);
+ struct scst_device *dev = cmd->dev;
+ struct scst_dev_type *handler = dev->handler;
+ struct io_context *old_ctx = NULL;
+ bool ctx_changed = false;
+
+ if (!atomic)
+ ctx_changed = scst_set_io_context(cmd, &old_ctx);
+
+ cmd->state = SCST_CMD_STATE_REAL_EXECUTING;
+
+ if (handler->exec) {
+ if (unlikely(!dev->handler->exec_atomic && atomic)) {
+ /*
+ * It shouldn't be because of the SCST_TGT_DEV_AFTER_*
+ * optimization.
+ */
+ TRACE_DBG("Dev handler %s exec() needs thread "
+ "context, rescheduling", dev->handler->name);
+ res = SCST_EXEC_NEED_THREAD;
+ goto out_restore;
+ }
+
+ TRACE_DBG("Calling dev handler %s exec(%p)",
+ handler->name, cmd);
+ TRACE_BUFF_FLAG(TRACE_SND_TOP, "Execing: ", cmd->cdb,
+ cmd->cdb_len);
+ scst_set_cur_start(cmd);
+ res = handler->exec(cmd);
+ TRACE_DBG("Dev handler %s exec() returned %d",
+ handler->name, res);
+
+ if (res == SCST_EXEC_COMPLETED)
+ goto out_complete;
+ else if (res == SCST_EXEC_NEED_THREAD)
+ goto out_restore;
+
+ scst_set_exec_time(cmd);
+
+ BUG_ON(res != SCST_EXEC_NOT_COMPLETED);
+ }
+
+ TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
+
+ if (unlikely(dev->scsi_dev == NULL)) {
+ PRINT_ERROR("Command for virtual device must be "
+ "processed by device handler (LUN %lld)!",
+ (long long unsigned int)cmd->lun);
+ goto out_error;
+ }
+
+ res = scst_check_local_events(cmd);
+ if (unlikely(res != 0))
+ goto out_done;
+
+#ifndef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
+ if (unlikely(atomic)) {
+ TRACE_DBG("Pass-through exec() can not be called in atomic "
+ "context, rescheduling to the thread (handler %s)",
+ handler->name);
+ res = SCST_EXEC_NEED_THREAD;
+ goto out_restore;
+ }
+#endif
+
+ scst_set_cur_start(cmd);
+
+ rc = scst_scsi_exec_async(cmd, scst_cmd_done);
+ if (unlikely(rc != 0)) {
+ if (atomic) {
+ res = SCST_EXEC_NEED_THREAD;
+ goto out_restore;
+ } else {
+ PRINT_ERROR("scst pass-through exec failed: %x", rc);
+ goto out_error;
+ }
+ }
+
+out_complete:
+ res = SCST_EXEC_COMPLETED;
+
+out_reset_ctx:
+ if (ctx_changed)
+ scst_reset_io_context(cmd->tgt_dev, old_ctx);
+ return res;
+
+out_restore:
+ scst_set_exec_time(cmd);
+ /* Restore the state */
+ cmd->state = SCST_CMD_STATE_REAL_EXEC;
+ goto out_reset_ctx;
+
+out_error:
+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
+ goto out_done;
+
+out_done:
+ res = SCST_EXEC_COMPLETED;
+ /* Report the result */
+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
+ goto out_complete;
+}
+
+static inline int scst_real_exec(struct scst_cmd *cmd)
+{
+ int res;
+
+ BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_SAME != SCST_EXEC_NOT_COMPLETED);
+ BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_NEXT != SCST_EXEC_COMPLETED);
+ BUILD_BUG_ON(SCST_CMD_STATE_RES_NEED_THREAD != SCST_EXEC_NEED_THREAD);
+
+ __scst_cmd_get(cmd);
+
+ res = scst_do_real_exec(cmd);
+
+ if (likely(res == SCST_EXEC_COMPLETED)) {
+ scst_post_exec_sn(cmd, true);
+ if (cmd->dev->scsi_dev != NULL)
+ generic_unplug_device(
+ cmd->dev->scsi_dev->request_queue);
+ } else
+ BUG_ON(res != SCST_EXEC_NEED_THREAD);
+
+ __scst_cmd_put(cmd);
+
+ /* SCST_EXEC_* match SCST_CMD_STATE_RES_* */
+ return res;
+}
+
+static int scst_do_local_exec(struct scst_cmd *cmd)
+{
+ int res;
+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
+
+ /* Check READ_ONLY device status */
+ if ((cmd->op_flags & SCST_WRITE_MEDIUM) &&
+ (tgt_dev->acg_dev->rd_only || cmd->dev->swp ||
+ cmd->dev->rd_only)) {
+ PRINT_WARNING("Attempt of write access to read-only device: "
+ "initiator %s, LUN %lld, op %x",
+ cmd->sess->initiator_name, cmd->lun, cmd->cdb[0]);
+ scst_set_cmd_error(cmd,
+ SCST_LOAD_SENSE(scst_sense_data_protect));
+ goto out_done;
+ }
+
+ if (!scst_is_cmd_local(cmd)) {
+ res = SCST_EXEC_NOT_COMPLETED;
+ goto out;
+ }
+
+ switch (cmd->cdb[0]) {
+ case MODE_SELECT:
+ case MODE_SELECT_10:
+ case LOG_SELECT:
+ res = scst_pre_select(cmd);
+ break;
+ case RESERVE:
+ case RESERVE_10:
+ res = scst_reserve_local(cmd);
+ break;
+ case RELEASE:
+ case RELEASE_10:
+ res = scst_release_local(cmd);
+ break;
+ case REPORT_LUNS:
+ res = scst_report_luns_local(cmd);
+ break;
+ case REQUEST_SENSE:
+ res = scst_request_sense_local(cmd);
+ break;
+ default:
+ res = SCST_EXEC_NOT_COMPLETED;
+ break;
+ }
+
+out:
+ return res;
+
+out_done:
+ /* Report the result */
+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
+ res = SCST_EXEC_COMPLETED;
+ goto out;
+}
+
+static int scst_local_exec(struct scst_cmd *cmd)
+{
+ int res;
+
+ BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_SAME != SCST_EXEC_NOT_COMPLETED);
+ BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_NEXT != SCST_EXEC_COMPLETED);
+ BUILD_BUG_ON(SCST_CMD_STATE_RES_NEED_THREAD != SCST_EXEC_NEED_THREAD);
+
+ __scst_cmd_get(cmd);
+
+ res = scst_do_local_exec(cmd);
+ if (likely(res == SCST_EXEC_NOT_COMPLETED))
+ cmd->state = SCST_CMD_STATE_REAL_EXEC;
+ else if (res == SCST_EXEC_COMPLETED)
+ scst_post_exec_sn(cmd, true);
+ else
+ BUG_ON(res != SCST_EXEC_NEED_THREAD);
+
+ __scst_cmd_put(cmd);
+
+ /* SCST_EXEC_* match SCST_CMD_STATE_RES_* */
+ return res;
+}
+
+static int scst_exec(struct scst_cmd **active_cmd)
+{
+ struct scst_cmd *cmd = *active_cmd;
+ struct scst_cmd *ref_cmd;
+ struct scst_device *dev = cmd->dev;
+ int res = SCST_CMD_STATE_RES_CONT_NEXT, count;
+
+ if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
+ goto out;
+
+ /* To protect tgt_dev */
+ ref_cmd = cmd;
+ __scst_cmd_get(ref_cmd);
+
+ count = 0;
+ while (1) {
+ int rc;
+
+ cmd->sent_for_exec = 1;
+ /*
+ * To sync with scst_abort_cmd(). The above assignment must
+ * be before SCST_CMD_ABORTED test, done later in
+ * scst_check_local_events(). It's far from here, so the order
+ * is virtually guaranteed, but let's have it just in case.
+ */
+ smp_mb();
+
+ cmd->scst_cmd_done = scst_cmd_done_local;
+ cmd->state = SCST_CMD_STATE_LOCAL_EXEC;
+
+ rc = scst_do_local_exec(cmd);
+ if (likely(rc == SCST_EXEC_NOT_COMPLETED))
+ /* Nothing to do */;
+ else if (rc == SCST_EXEC_NEED_THREAD) {
+ TRACE_DBG("%s", "scst_do_local_exec() requested "
+ "thread context, rescheduling");
+ scst_dec_on_dev_cmd(cmd);
+ res = SCST_CMD_STATE_RES_NEED_THREAD;
+ break;
+ } else {
+ BUG_ON(rc != SCST_EXEC_COMPLETED);
+ goto done;
+ }
+
+ cmd->state = SCST_CMD_STATE_REAL_EXEC;
+
+ rc = scst_do_real_exec(cmd);
+ if (likely(rc == SCST_EXEC_COMPLETED))
+ /* Nothing to do */;
+ else if (rc == SCST_EXEC_NEED_THREAD) {
+ TRACE_DBG("scst_real_exec() requested thread "
+ "context, rescheduling (cmd %p)", cmd);
+ scst_dec_on_dev_cmd(cmd);
+ res = SCST_CMD_STATE_RES_NEED_THREAD;
+ break;
+ } else
+ BUG();
+
+done:
+ count++;
+
+ cmd = scst_post_exec_sn(cmd, false);
+ if (cmd == NULL)
+ break;
+
+ if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
+ break;
+
+ __scst_cmd_put(ref_cmd);
+ ref_cmd = cmd;
+ __scst_cmd_get(ref_cmd);
+ }
+
+ *active_cmd = cmd;
+
+ if (count == 0)
+ goto out_put;
+
+ if (dev->scsi_dev != NULL)
+ generic_unplug_device(dev->scsi