Commit c66094bf authored by Hannes Reinecke's avatar Hannes Reinecke Committed by Nicholas Bellinger
Browse files

target_core_alua: Referrals infrastructure



Add infrastructure for referrals.

v2 changes:

 - Fix unsigned long long division in core_alua_state_lba_dependent on
   32-bit  (Fengguang + Chen + Hannes)
 - Fix compile warning in core_alua_state_lba_dependent (nab)
 - Convert segment_* + sectors variables in core_alua_state_lba_dependent
   to u64 (Hannes)

Signed-off-by: default avatarHannes Reinecke <hare@suse.de>
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
parent fbfe858f
Loading
Loading
Loading
Loading
+154 −0
Original line number Diff line number Diff line
@@ -57,6 +57,75 @@ static LIST_HEAD(lu_gps_list);

struct t10_alua_lu_gp *default_lu_gp;

/*
 * REPORT REFERRALS
 *
 * See sbc3r35 section 5.23
 */
sense_reason_t
target_emulate_report_referrals(struct se_cmd *cmd)
{
	struct se_device *dev = cmd->se_dev;
	struct t10_alua_lba_map *map;
	struct t10_alua_lba_map_member *map_mem;
	unsigned char *buf;
	u32 rd_len = 0, off;

	if (cmd->data_length < 4) {
		pr_warn("REPORT REFERRALS allocation length %u too"
			" small\n", cmd->data_length);
		return TCM_INVALID_CDB_FIELD;
	}

	buf = transport_kmap_data_sg(cmd);
	if (!buf)
		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;

	off = 4;
	spin_lock(&dev->t10_alua.lba_map_lock);
	if (list_empty(&dev->t10_alua.lba_map_list)) {
		spin_unlock(&dev->t10_alua.lba_map_lock);
		transport_kunmap_data_sg(cmd);

		return TCM_UNSUPPORTED_SCSI_OPCODE;
	}

	list_for_each_entry(map, &dev->t10_alua.lba_map_list,
			    lba_map_list) {
		int desc_num = off + 3;
		int pg_num;

		off += 4;
		put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
		off += 8;
		put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
		off += 8;
		rd_len += 20;
		pg_num = 0;
		list_for_each_entry(map_mem, &map->lba_map_mem_list,
				    lba_map_mem_list) {
			buf[off++] = map_mem->lba_map_mem_alua_state & 0x0f;
			off++;
			buf[off++] = (map_mem->lba_map_mem_alua_pg_id >> 8) & 0xff;
			buf[off++] = (map_mem->lba_map_mem_alua_pg_id & 0xff);
			rd_len += 4;
			pg_num++;
		}
		buf[desc_num] = pg_num;
	}
	spin_unlock(&dev->t10_alua.lba_map_lock);

	/*
	 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
	 */
	put_unaligned_be16(rd_len, &buf[2]);

	transport_kunmap_data_sg(cmd);

	target_complete_cmd(cmd, GOOD);
	return 0;
}

/*
 * REPORT_TARGET_PORT_GROUPS
 *
@@ -391,6 +460,81 @@ static inline int core_alua_state_nonoptimized(
	return 0;
}

static inline int core_alua_state_lba_dependent(
	struct se_cmd *cmd,
	struct t10_alua_tg_pt_gp *tg_pt_gp,
	u8 *alua_ascq)
{
	struct se_device *dev = cmd->se_dev;
	u64 segment_size, segment_mult, sectors, lba;

	/* Only need to check for cdb actually containing LBAs */
	if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
		return 0;

	spin_lock(&dev->t10_alua.lba_map_lock);
	segment_size = dev->t10_alua.lba_map_segment_size;
	segment_mult = dev->t10_alua.lba_map_segment_multiplier;
	sectors = cmd->data_length / dev->dev_attrib.block_size;

	lba = cmd->t_task_lba;
	while (lba < cmd->t_task_lba + sectors) {
		struct t10_alua_lba_map *cur_map = NULL, *map;
		struct t10_alua_lba_map_member *map_mem;

		list_for_each_entry(map, &dev->t10_alua.lba_map_list,
				    lba_map_list) {
			u64 start_lba, last_lba;
			u64 first_lba = map->lba_map_first_lba;

			if (segment_mult) {
				u64 tmp = lba;
				start_lba = sector_div(tmp, segment_size * segment_mult);

				last_lba = first_lba + segment_size - 1;
				if (start_lba >= first_lba &&
				    start_lba <= last_lba) {
					lba += segment_size;
					cur_map = map;
					break;
				}
			} else {
				last_lba = map->lba_map_last_lba;
				if (lba >= first_lba && lba <= last_lba) {
					lba = last_lba + 1;
					cur_map = map;
					break;
				}
			}
		}
		if (!cur_map) {
			spin_unlock(&dev->t10_alua.lba_map_lock);
			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
			return 1;
		}
		list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
				    lba_map_mem_list) {
			if (map_mem->lba_map_mem_alua_pg_id !=
			    tg_pt_gp->tg_pt_gp_id)
				continue;
			switch(map_mem->lba_map_mem_alua_state) {
			case ALUA_ACCESS_STATE_STANDBY:
				spin_unlock(&dev->t10_alua.lba_map_lock);
				*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
				return 1;
			case ALUA_ACCESS_STATE_UNAVAILABLE:
				spin_unlock(&dev->t10_alua.lba_map_lock);
				*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
				return 1;
			default:
				break;
			}
		}
	}
	spin_unlock(&dev->t10_alua.lba_map_lock);
	return 0;
}

static inline int core_alua_state_standby(
	struct se_cmd *cmd,
	unsigned char *cdb,
@@ -588,6 +732,9 @@ target_alua_state_check(struct se_cmd *cmd)
	case ALUA_ACCESS_STATE_TRANSITION:
		ret = core_alua_state_transition(cmd, cdb, &alua_ascq);
		break;
	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
		ret = core_alua_state_lba_dependent(cmd, tg_pt_gp, &alua_ascq);
		break;
	/*
	 * OFFLINE is a secondary ALUA target port group access state, that is
	 * handled above with struct se_port->sep_tg_pt_secondary_offline=1
@@ -650,6 +797,11 @@ core_alua_check_transition(int state, int valid, int *primary)
			goto not_supported;
		*primary = 1;
		break;
	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
		if (!(valid & ALUA_LBD_SUP))
			goto not_supported;
		*primary = 1;
		break;
	case ALUA_ACCESS_STATE_OFFLINE:
		/*
		 * OFFLINE state is defined as a secondary target port
@@ -685,6 +837,8 @@ static char *core_alua_dump_state(int state)
		return "Active/Optimized";
	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
		return "Active/NonOptimized";
	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
		return "LBA Dependent";
	case ALUA_ACCESS_STATE_STANDBY:
		return "Standby";
	case ALUA_ACCESS_STATE_UNAVAILABLE:
+3 −1
Original line number Diff line number Diff line
@@ -13,12 +13,13 @@
/*
 * ASYMMETRIC ACCESS STATE field
 *
 * from spc4r17 section 6.27 Table 245
 * from spc4r36j section 6.37 Table 307
 */
#define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED	0x0
#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED	0x1
#define ALUA_ACCESS_STATE_STANDBY		0x2
#define ALUA_ACCESS_STATE_UNAVAILABLE		0x3
#define ALUA_ACCESS_STATE_LBA_DEPENDENT		0x4
#define ALUA_ACCESS_STATE_OFFLINE		0xe
#define ALUA_ACCESS_STATE_TRANSITION		0xf

@@ -88,6 +89,7 @@ extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;

extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *);
extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *);
extern sense_reason_t target_emulate_report_referrals(struct se_cmd *);
extern int core_alua_check_nonop_delay(struct se_cmd *);
extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
				struct se_device *, struct se_port *,
+8 −1
Original line number Diff line number Diff line
@@ -2054,6 +2054,13 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
			" transition while TPGS_IMPLICIT_ALUA is disabled\n");
		return -EINVAL;
	}
	if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA &&
	    new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) {
		/* LBA DEPENDENT is only allowed with implicit ALUA */
		pr_err("Unable to process implicit configfs ALUA transition"
		       " while explicit ALUA management is enabled\n");
		return -EINVAL;
	}

	ret = core_alua_do_port_transition(tg_pt_gp, dev,
					NULL, NULL, new_state, 0);
@@ -2188,7 +2195,7 @@ SE_DEV_ALUA_SUPPORT_STATE_SHOW(lba_dependent,
			       tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
SE_DEV_ALUA_SUPPORT_STATE_STORE(lba_dependent,
				tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO | S_IWUSR);
SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO);

SE_DEV_ALUA_SUPPORT_STATE_SHOW(unavailable,
			       tg_pt_gp_alua_supported_states, ALUA_U_SUP);
+2 −0
Original line number Diff line number Diff line
@@ -1439,6 +1439,8 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
	spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
	INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
	spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
	INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
	spin_lock_init(&dev->t10_alua.lba_map_lock);

	dev->t10_wwn.t10_dev = dev;
	dev->t10_alua.t10_dev = dev;
+4 −1
Original line number Diff line number Diff line
@@ -33,7 +33,7 @@

#include "target_core_internal.h"
#include "target_core_ua.h"

#include "target_core_alua.h"

static sense_reason_t
sbc_emulate_readcapacity(struct se_cmd *cmd)
@@ -731,6 +731,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
		case SAI_READ_CAPACITY_16:
			cmd->execute_cmd = sbc_emulate_readcapacity_16;
			break;
		case SAI_REPORT_REFERRALS:
			cmd->execute_cmd = target_emulate_report_referrals;
			break;
		default:
			pr_err("Unsupported SA: 0x%02x\n",
				cmd->t_task_cdb[1] & 0x1f);
Loading