Message ID | 20200924062136.47019-1-joshi.k@samsung.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | null_blk: synchronization fix for zoned device | expand |
On 2020/09/24 15:26, Kanchan Joshi wrote: > Parallel write,read,zone-mgmt operations accessing/altering zone state > and write-pointer may get into race. Avoid the situation by using a new > spinlock for zoned device. > Concurrent zone-appends (on a zone) returning same write-pointer issue > is also avoided using this lock. > > Signed-off-by: Kanchan Joshi <joshi.k@samsung.com> > --- > drivers/block/null_blk.h | 1 + > drivers/block/null_blk_zoned.c | 84 +++++++++++++++++++++++----------- > 2 files changed, 58 insertions(+), 27 deletions(-) > > diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h > index daed4a9c3436..b3f4d62e7c38 100644 > --- a/drivers/block/null_blk.h > +++ b/drivers/block/null_blk.h > @@ -44,6 +44,7 @@ struct nullb_device { > unsigned int nr_zones; > struct blk_zone *zones; > sector_t zone_size_sects; > + spinlock_t zlock; Could you change that to "zone_lock" to be consistent with the other zone related field names which all spell out "zone" ? > > unsigned long size; /* device size in MB */ > unsigned long completion_nsec; /* time in ns to complete a request */ > diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c > index 3d25c9ad2383..04fbf267703a 100644 > --- a/drivers/block/null_blk_zoned.c > +++ b/drivers/block/null_blk_zoned.c > @@ -45,6 +45,7 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q) > if (!dev->zones) > return -ENOMEM; > > + spin_lock_init(&dev->zlock); > if (dev->zone_nr_conv >= dev->nr_zones) { > dev->zone_nr_conv = dev->nr_zones - 1; > pr_info("changed the number of conventional zones to %u", > @@ -124,6 +125,7 @@ int null_report_zones(struct gendisk *disk, sector_t sector, > nr_zones = min(nr_zones, dev->nr_zones - first_zone); > trace_nullb_report_zones(nullb, nr_zones); > > + spin_lock_irq(&dev->zlock); > for (i = 0; i < nr_zones; i++) { > /* > * Stacked DM target drivers will remap the zone information by > @@ -134,10 +136,13 @@ int null_report_zones(struct gendisk *disk, sector_t sector, > memcpy(&zone, &dev->zones[first_zone + i], > sizeof(struct blk_zone)); > error = cb(&zone, i, data); The cb() here may sleep etc. So you cannot have the zone lock around it. Taking the lock around the memcpy only is enough. > - if (error) > + if (error) { > + spin_unlock_irq(&dev->zlock); > return error; > + } > } > > + spin_unlock_irq(&dev->zlock); > return nr_zones; > } > > @@ -147,16 +152,24 @@ size_t null_zone_valid_read_len(struct nullb *nullb, > struct nullb_device *dev = nullb->dev; > struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)]; > unsigned int nr_sectors = len >> SECTOR_SHIFT; > + size_t ret = 0; Please call that valid_len or something more representative of the value. > > + spin_lock_irq(&dev->zlock); > /* Read must be below the write pointer position */ > if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL || > - sector + nr_sectors <= zone->wp) > - return len; > + sector + nr_sectors <= zone->wp) { > + ret = len; > + goto out_unlock; > + } > > if (sector > zone->wp) > - return 0; > + goto out_unlock; > + > + ret = (zone->wp - sector) << SECTOR_SHIFT; > > - return (zone->wp - sector) << SECTOR_SHIFT; > +out_unlock: > + spin_unlock_irq(&dev->zlock); > + return ret; > } > > static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, > @@ -165,17 +178,19 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, > struct nullb_device *dev = cmd->nq->dev; > unsigned int zno = null_zone_no(dev, sector); > struct blk_zone *zone = &dev->zones[zno]; > - blk_status_t ret; > + blk_status_t ret = BLK_STS_OK; > > trace_nullb_zone_op(cmd, zno, zone->cond); > > if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) > return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); > > + spin_lock_irq(&dev->zlock); > switch (zone->cond) { > case BLK_ZONE_COND_FULL: > /* Cannot write to a full zone */ > - return BLK_STS_IOERR; > + ret = BLK_STS_IOERR; > + break; > case BLK_ZONE_COND_EMPTY: > case BLK_ZONE_COND_IMP_OPEN: > case BLK_ZONE_COND_EXP_OPEN: > @@ -193,27 +208,33 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, > else > cmd->rq->__sector = sector; > } else if (sector != zone->wp) { > - return BLK_STS_IOERR; > + ret = BLK_STS_IOERR; > + break; > } > > - if (zone->wp + nr_sectors > zone->start + zone->capacity) > - return BLK_STS_IOERR; > + if (zone->wp + nr_sectors > zone->start + zone->capacity) { > + ret = BLK_STS_IOERR; > + break; > + } > > if (zone->cond != BLK_ZONE_COND_EXP_OPEN) > zone->cond = BLK_ZONE_COND_IMP_OPEN; > > ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); > if (ret != BLK_STS_OK) > - return ret; > + break; > > zone->wp += nr_sectors; > if (zone->wp == zone->start + zone->capacity) > zone->cond = BLK_ZONE_COND_FULL; > - return BLK_STS_OK; > + break; > default: > /* Invalid zone condition */ > - return BLK_STS_IOERR; > + ret = BLK_STS_IOERR; > } > + > + spin_unlock_irq(&dev->zlock); > + return ret; > } > > static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, > @@ -223,7 +244,9 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, > unsigned int zone_no = null_zone_no(dev, sector); > struct blk_zone *zone = &dev->zones[zone_no]; > size_t i; > + blk_status_t ret = BLK_STS_OK; > > + spin_lock_irq(&dev->zlock); > switch (op) { > case REQ_OP_ZONE_RESET_ALL: > for (i = 0; i < dev->nr_zones; i++) { > @@ -234,25 +257,29 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, > } > break; > case REQ_OP_ZONE_RESET: > - if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) > - return BLK_STS_IOERR; > + if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) { > + ret = BLK_STS_IOERR; > + break; > + } > > zone->cond = BLK_ZONE_COND_EMPTY; > zone->wp = zone->start; > break; > case REQ_OP_ZONE_OPEN: > - if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) > - return BLK_STS_IOERR; > - if (zone->cond == BLK_ZONE_COND_FULL) > - return BLK_STS_IOERR; > + if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL || > + zone->cond == BLK_ZONE_COND_FULL) { > + ret = BLK_STS_IOERR; > + break; > + } > > zone->cond = BLK_ZONE_COND_EXP_OPEN; > break; > case REQ_OP_ZONE_CLOSE: > - if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) > - return BLK_STS_IOERR; > - if (zone->cond == BLK_ZONE_COND_FULL) > - return BLK_STS_IOERR; > + if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL || > + zone->cond == BLK_ZONE_COND_FULL) { > + ret = BLK_STS_IOERR; > + break; > + } > > if (zone->wp == zone->start) > zone->cond = BLK_ZONE_COND_EMPTY; > @@ -260,18 +287,21 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, > zone->cond = BLK_ZONE_COND_CLOSED; > break; > case REQ_OP_ZONE_FINISH: > - if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) > - return BLK_STS_IOERR; > + if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) { > + ret = BLK_STS_IOERR; > + break; > + } > > zone->cond = BLK_ZONE_COND_FULL; > zone->wp = zone->start + zone->len; > break; > default: > - return BLK_STS_NOTSUPP; > + ret = BLK_STS_NOTSUPP; > } > > + spin_unlock_irq(&dev->zlock); > trace_nullb_zone_op(cmd, zone_no, zone->cond); > - return BLK_STS_OK; > + return ret; > } I think you can avoid all of these changes by taking the lock around the calls to null_zone_mgmt() and null_zone_write() in null_process_zoned_cmd(). That will make the patch a lot smaller and simplify maintenance. And even, I think that taking the lock on entry to null_process_zoned_cmd() and unlocking on return should even be simpler since that would cover reads too (valid read len). Only report zones would need special treatment. > > blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op, > I think we also need this to have a Cc: stable and a "Fixes" tag too.
Thanks for the review. I'll apply this feedback in V2. On Thu, Sep 24, 2020 at 2:13 PM Damien Le Moal <Damien.LeMoal@wdc.com> wrote: > > On 2020/09/24 15:26, Kanchan Joshi wrote: > > Parallel write,read,zone-mgmt operations accessing/altering zone state > > and write-pointer may get into race. Avoid the situation by using a new > > spinlock for zoned device. > > Concurrent zone-appends (on a zone) returning same write-pointer issue > > is also avoided using this lock. > > > > Signed-off-by: Kanchan Joshi <joshi.k@samsung.com> > > --- > > drivers/block/null_blk.h | 1 + > > drivers/block/null_blk_zoned.c | 84 +++++++++++++++++++++++----------- > > 2 files changed, 58 insertions(+), 27 deletions(-) > > > > diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h > > index daed4a9c3436..b3f4d62e7c38 100644 > > --- a/drivers/block/null_blk.h > > +++ b/drivers/block/null_blk.h > > @@ -44,6 +44,7 @@ struct nullb_device { > > unsigned int nr_zones; > > struct blk_zone *zones; > > sector_t zone_size_sects; > > + spinlock_t zlock; > > Could you change that to "zone_lock" to be consistent with the other zone > related field names which all spell out "zone" ? > > > > > unsigned long size; /* device size in MB */ > > unsigned long completion_nsec; /* time in ns to complete a request */ > > diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c > > index 3d25c9ad2383..04fbf267703a 100644 > > --- a/drivers/block/null_blk_zoned.c > > +++ b/drivers/block/null_blk_zoned.c > > @@ -45,6 +45,7 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q) > > if (!dev->zones) > > return -ENOMEM; > > > > + spin_lock_init(&dev->zlock); > > if (dev->zone_nr_conv >= dev->nr_zones) { > > dev->zone_nr_conv = dev->nr_zones - 1; > > pr_info("changed the number of conventional zones to %u", > > @@ -124,6 +125,7 @@ int null_report_zones(struct gendisk *disk, sector_t sector, > > nr_zones = min(nr_zones, dev->nr_zones - first_zone); > > trace_nullb_report_zones(nullb, nr_zones); > > > > + spin_lock_irq(&dev->zlock); > > for (i = 0; i < nr_zones; i++) { > > /* > > * Stacked DM target drivers will remap the zone information by > > @@ -134,10 +136,13 @@ int null_report_zones(struct gendisk *disk, sector_t sector, > > memcpy(&zone, &dev->zones[first_zone + i], > > sizeof(struct blk_zone)); > > error = cb(&zone, i, data); > > The cb() here may sleep etc. So you cannot have the zone lock around it. Taking > the lock around the memcpy only is enough. > > > - if (error) > > + if (error) { > > + spin_unlock_irq(&dev->zlock); > > return error; > > + } > > } > > > > + spin_unlock_irq(&dev->zlock); > > return nr_zones; > > } > > > > @@ -147,16 +152,24 @@ size_t null_zone_valid_read_len(struct nullb *nullb, > > struct nullb_device *dev = nullb->dev; > > struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)]; > > unsigned int nr_sectors = len >> SECTOR_SHIFT; > > + size_t ret = 0; > > Please call that valid_len or something more representative of the value. > > > > > + spin_lock_irq(&dev->zlock); > > /* Read must be below the write pointer position */ > > if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL || > > - sector + nr_sectors <= zone->wp) > > - return len; > > + sector + nr_sectors <= zone->wp) { > > + ret = len; > > + goto out_unlock; > > + } > > > > if (sector > zone->wp) > > - return 0; > > + goto out_unlock; > > + > > + ret = (zone->wp - sector) << SECTOR_SHIFT; > > > > - return (zone->wp - sector) << SECTOR_SHIFT; > > +out_unlock: > > + spin_unlock_irq(&dev->zlock); > > + return ret; > > } > > > > static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, > > @@ -165,17 +178,19 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, > > struct nullb_device *dev = cmd->nq->dev; > > unsigned int zno = null_zone_no(dev, sector); > > struct blk_zone *zone = &dev->zones[zno]; > > - blk_status_t ret; > > + blk_status_t ret = BLK_STS_OK; > > > > trace_nullb_zone_op(cmd, zno, zone->cond); > > > > if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) > > return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); > > > > + spin_lock_irq(&dev->zlock); > > switch (zone->cond) { > > case BLK_ZONE_COND_FULL: > > /* Cannot write to a full zone */ > > - return BLK_STS_IOERR; > > + ret = BLK_STS_IOERR; > > + break; > > case BLK_ZONE_COND_EMPTY: > > case BLK_ZONE_COND_IMP_OPEN: > > case BLK_ZONE_COND_EXP_OPEN: > > @@ -193,27 +208,33 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, > > else > > cmd->rq->__sector = sector; > > } else if (sector != zone->wp) { > > - return BLK_STS_IOERR; > > + ret = BLK_STS_IOERR; > > + break; > > } > > > > - if (zone->wp + nr_sectors > zone->start + zone->capacity) > > - return BLK_STS_IOERR; > > + if (zone->wp + nr_sectors > zone->start + zone->capacity) { > > + ret = BLK_STS_IOERR; > > + break; > > + } > > > > if (zone->cond != BLK_ZONE_COND_EXP_OPEN) > > zone->cond = BLK_ZONE_COND_IMP_OPEN; > > > > ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); > > if (ret != BLK_STS_OK) > > - return ret; > > + break; > > > > zone->wp += nr_sectors; > > if (zone->wp == zone->start + zone->capacity) > > zone->cond = BLK_ZONE_COND_FULL; > > - return BLK_STS_OK; > > + break; > > default: > > /* Invalid zone condition */ > > - return BLK_STS_IOERR; > > + ret = BLK_STS_IOERR; > > } > > + > > + spin_unlock_irq(&dev->zlock); > > + return ret; > > } > > > > static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, > > @@ -223,7 +244,9 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, > > unsigned int zone_no = null_zone_no(dev, sector); > > struct blk_zone *zone = &dev->zones[zone_no]; > > size_t i; > > + blk_status_t ret = BLK_STS_OK; > > > > + spin_lock_irq(&dev->zlock); > > switch (op) { > > case REQ_OP_ZONE_RESET_ALL: > > for (i = 0; i < dev->nr_zones; i++) { > > @@ -234,25 +257,29 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, > > } > > break; > > case REQ_OP_ZONE_RESET: > > - if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) > > - return BLK_STS_IOERR; > > + if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) { > > + ret = BLK_STS_IOERR; > > + break; > > + } > > > > zone->cond = BLK_ZONE_COND_EMPTY; > > zone->wp = zone->start; > > break; > > case REQ_OP_ZONE_OPEN: > > - if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) > > - return BLK_STS_IOERR; > > - if (zone->cond == BLK_ZONE_COND_FULL) > > - return BLK_STS_IOERR; > > + if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL || > > + zone->cond == BLK_ZONE_COND_FULL) { > > + ret = BLK_STS_IOERR; > > + break; > > + } > > > > zone->cond = BLK_ZONE_COND_EXP_OPEN; > > break; > > case REQ_OP_ZONE_CLOSE: > > - if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) > > - return BLK_STS_IOERR; > > - if (zone->cond == BLK_ZONE_COND_FULL) > > - return BLK_STS_IOERR; > > + if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL || > > + zone->cond == BLK_ZONE_COND_FULL) { > > + ret = BLK_STS_IOERR; > > + break; > > + } > > > > if (zone->wp == zone->start) > > zone->cond = BLK_ZONE_COND_EMPTY; > > @@ -260,18 +287,21 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, > > zone->cond = BLK_ZONE_COND_CLOSED; > > break; > > case REQ_OP_ZONE_FINISH: > > - if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) > > - return BLK_STS_IOERR; > > + if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) { > > + ret = BLK_STS_IOERR; > > + break; > > + } > > > > zone->cond = BLK_ZONE_COND_FULL; > > zone->wp = zone->start + zone->len; > > break; > > default: > > - return BLK_STS_NOTSUPP; > > + ret = BLK_STS_NOTSUPP; > > } > > > > + spin_unlock_irq(&dev->zlock); > > trace_nullb_zone_op(cmd, zone_no, zone->cond); > > - return BLK_STS_OK; > > + return ret; > > } > > I think you can avoid all of these changes by taking the lock around the calls > to null_zone_mgmt() and null_zone_write() in null_process_zoned_cmd(). That will > make the patch a lot smaller and simplify maintenance. And even, I think that > taking the lock on entry to null_process_zoned_cmd() and unlocking on return > should even be simpler since that would cover reads too (valid read len). Only > report zones would need special treatment. > > > > > blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op, > > > > I think we also need this to have a Cc: stable and a "Fixes" tag too. > > -- > Damien Le Moal > Western Digital Research
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h index daed4a9c3436..b3f4d62e7c38 100644 --- a/drivers/block/null_blk.h +++ b/drivers/block/null_blk.h @@ -44,6 +44,7 @@ struct nullb_device { unsigned int nr_zones; struct blk_zone *zones; sector_t zone_size_sects; + spinlock_t zlock; unsigned long size; /* device size in MB */ unsigned long completion_nsec; /* time in ns to complete a request */ diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c index 3d25c9ad2383..04fbf267703a 100644 --- a/drivers/block/null_blk_zoned.c +++ b/drivers/block/null_blk_zoned.c @@ -45,6 +45,7 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q) if (!dev->zones) return -ENOMEM; + spin_lock_init(&dev->zlock); if (dev->zone_nr_conv >= dev->nr_zones) { dev->zone_nr_conv = dev->nr_zones - 1; pr_info("changed the number of conventional zones to %u", @@ -124,6 +125,7 @@ int null_report_zones(struct gendisk *disk, sector_t sector, nr_zones = min(nr_zones, dev->nr_zones - first_zone); trace_nullb_report_zones(nullb, nr_zones); + spin_lock_irq(&dev->zlock); for (i = 0; i < nr_zones; i++) { /* * Stacked DM target drivers will remap the zone information by @@ -134,10 +136,13 @@ int null_report_zones(struct gendisk *disk, sector_t sector, memcpy(&zone, &dev->zones[first_zone + i], sizeof(struct blk_zone)); error = cb(&zone, i, data); - if (error) + if (error) { + spin_unlock_irq(&dev->zlock); return error; + } } + spin_unlock_irq(&dev->zlock); return nr_zones; } @@ -147,16 +152,24 @@ size_t null_zone_valid_read_len(struct nullb *nullb, struct nullb_device *dev = nullb->dev; struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)]; unsigned int nr_sectors = len >> SECTOR_SHIFT; + size_t ret = 0; + spin_lock_irq(&dev->zlock); /* Read must be below the write pointer position */ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL || - sector + nr_sectors <= zone->wp) - return len; + sector + nr_sectors <= zone->wp) { + ret = len; + goto out_unlock; + } if (sector > zone->wp) - return 0; + goto out_unlock; + + ret = (zone->wp - sector) << SECTOR_SHIFT; - return (zone->wp - sector) << SECTOR_SHIFT; +out_unlock: + spin_unlock_irq(&dev->zlock); + return ret; } static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, @@ -165,17 +178,19 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, struct nullb_device *dev = cmd->nq->dev; unsigned int zno = null_zone_no(dev, sector); struct blk_zone *zone = &dev->zones[zno]; - blk_status_t ret; + blk_status_t ret = BLK_STS_OK; trace_nullb_zone_op(cmd, zno, zone->cond); if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); + spin_lock_irq(&dev->zlock); switch (zone->cond) { case BLK_ZONE_COND_FULL: /* Cannot write to a full zone */ - return BLK_STS_IOERR; + ret = BLK_STS_IOERR; + break; case BLK_ZONE_COND_EMPTY: case BLK_ZONE_COND_IMP_OPEN: case BLK_ZONE_COND_EXP_OPEN: @@ -193,27 +208,33 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, else cmd->rq->__sector = sector; } else if (sector != zone->wp) { - return BLK_STS_IOERR; + ret = BLK_STS_IOERR; + break; } - if (zone->wp + nr_sectors > zone->start + zone->capacity) - return BLK_STS_IOERR; + if (zone->wp + nr_sectors > zone->start + zone->capacity) { + ret = BLK_STS_IOERR; + break; + } if (zone->cond != BLK_ZONE_COND_EXP_OPEN) zone->cond = BLK_ZONE_COND_IMP_OPEN; ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); if (ret != BLK_STS_OK) - return ret; + break; zone->wp += nr_sectors; if (zone->wp == zone->start + zone->capacity) zone->cond = BLK_ZONE_COND_FULL; - return BLK_STS_OK; + break; default: /* Invalid zone condition */ - return BLK_STS_IOERR; + ret = BLK_STS_IOERR; } + + spin_unlock_irq(&dev->zlock); + return ret; } static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, @@ -223,7 +244,9 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, unsigned int zone_no = null_zone_no(dev, sector); struct blk_zone *zone = &dev->zones[zone_no]; size_t i; + blk_status_t ret = BLK_STS_OK; + spin_lock_irq(&dev->zlock); switch (op) { case REQ_OP_ZONE_RESET_ALL: for (i = 0; i < dev->nr_zones; i++) { @@ -234,25 +257,29 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, } break; case REQ_OP_ZONE_RESET: - if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) - return BLK_STS_IOERR; + if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) { + ret = BLK_STS_IOERR; + break; + } zone->cond = BLK_ZONE_COND_EMPTY; zone->wp = zone->start; break; case REQ_OP_ZONE_OPEN: - if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) - return BLK_STS_IOERR; - if (zone->cond == BLK_ZONE_COND_FULL) - return BLK_STS_IOERR; + if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL || + zone->cond == BLK_ZONE_COND_FULL) { + ret = BLK_STS_IOERR; + break; + } zone->cond = BLK_ZONE_COND_EXP_OPEN; break; case REQ_OP_ZONE_CLOSE: - if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) - return BLK_STS_IOERR; - if (zone->cond == BLK_ZONE_COND_FULL) - return BLK_STS_IOERR; + if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL || + zone->cond == BLK_ZONE_COND_FULL) { + ret = BLK_STS_IOERR; + break; + } if (zone->wp == zone->start) zone->cond = BLK_ZONE_COND_EMPTY; @@ -260,18 +287,21 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, zone->cond = BLK_ZONE_COND_CLOSED; break; case REQ_OP_ZONE_FINISH: - if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) - return BLK_STS_IOERR; + if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) { + ret = BLK_STS_IOERR; + break; + } zone->cond = BLK_ZONE_COND_FULL; zone->wp = zone->start + zone->len; break; default: - return BLK_STS_NOTSUPP; + ret = BLK_STS_NOTSUPP; } + spin_unlock_irq(&dev->zlock); trace_nullb_zone_op(cmd, zone_no, zone->cond); - return BLK_STS_OK; + return ret; } blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op,
Parallel write,read,zone-mgmt operations accessing/altering zone state and write-pointer may get into race. Avoid the situation by using a new spinlock for zoned device. Concurrent zone-appends (on a zone) returning same write-pointer issue is also avoided using this lock. Signed-off-by: Kanchan Joshi <joshi.k@samsung.com> --- drivers/block/null_blk.h | 1 + drivers/block/null_blk_zoned.c | 84 +++++++++++++++++++++++----------- 2 files changed, 58 insertions(+), 27 deletions(-)