@@ -1405,6 +1405,15 @@ struct lu_kmem_descr {
extern u32 lu_context_tags_default;
extern u32 lu_session_tags_default;
+/* bitflags used in rr / qos allocation */
+enum lq_flag {
+ LQ_DIRTY = 0, /* recalc qos data */
+ LQ_SAME_SPACE, /* the OSTs all have approx.
+ * the same space avail
+ */
+ LQ_RESET, /* zero current penalties */
+};
+
/* round-robin QoS data for LOD/LMV */
struct lu_qos_rr {
spinlock_t lqr_alloc; /* protect allocation index */
@@ -1412,7 +1421,7 @@ struct lu_qos_rr {
u32 lqr_offset_idx;/* aliasing for start_idx */
int lqr_start_count;/* reseed counter */
struct lu_tgt_pool lqr_pool; /* round-robin optimized list */
- unsigned long lqr_dirty:1; /* recalc round-robin list */
+ unsigned long lqr_flags;
};
/* QoS data per MDS/OSS */
@@ -1482,11 +1491,7 @@ struct lu_qos {
unsigned int lq_prio_free; /* priority for free space */
unsigned int lq_threshold_rr;/* priority for rr */
struct lu_qos_rr lq_rr; /* round robin qos data */
- unsigned long lq_dirty:1, /* recalc qos data */
- lq_same_space:1,/* the servers all have approx.
- * the same space avail
- */
- lq_reset:1; /* zero current penalties */
+ unsigned long lq_flags;
};
struct lu_tgt_descs {
@@ -1303,7 +1303,7 @@ static int lmv_statfs_update(void *cookie, int rc)
tgt->ltd_statfs = *osfs;
tgt->ltd_statfs_age = ktime_get_seconds();
spin_unlock(&lmv->lmv_lock);
- lmv->lmv_qos.lq_dirty = 1;
+ set_bit(LQ_DIRTY, &lmv->lmv_qos.lq_flags);
}
return rc;
@@ -133,8 +133,8 @@ static ssize_t qos_prio_free_store(struct kobject *kobj,
return -EINVAL;
lmv->lmv_qos.lq_prio_free = (val << 8) / 100;
- lmv->lmv_qos.lq_dirty = 1;
- lmv->lmv_qos.lq_reset = 1;
+ set_bit(LQ_DIRTY, &lmv->lmv_qos.lq_flags);
+ set_bit(LQ_RESET, &lmv->lmv_qos.lq_flags);
return count;
}
@@ -170,7 +170,7 @@ static ssize_t qos_threshold_rr_store(struct kobject *kobj,
return -EINVAL;
lmv->lmv_qos.lq_threshold_rr = (val << 8) / 100;
- lmv->lmv_qos.lq_dirty = 1;
+ set_bit(LQ_DIRTY, &lmv->lmv_qos.lq_flags);
return count;
}
@@ -80,7 +80,7 @@ u64 lu_prandom_u64_max(u64 ep_ro)
void lu_qos_rr_init(struct lu_qos_rr *lqr)
{
spin_lock_init(&lqr->lqr_alloc);
- lqr->lqr_dirty = 1;
+ set_bit(LQ_DIRTY, &lqr->lqr_flags);
}
EXPORT_SYMBOL(lu_qos_rr_init);
@@ -158,9 +158,8 @@ int lu_qos_add_tgt(struct lu_qos *qos, struct lu_tgt_desc *tgt)
*/
list_add_tail(&svr->lsq_svr_list, &tempsvr->lsq_svr_list);
- qos->lq_dirty = 1;
- qos->lq_rr.lqr_dirty = 1;
-
+ set_bit(LQ_DIRTY, &qos->lq_flags);
+ set_bit(LQ_DIRTY, &qos->lq_rr.lqr_flags);
out:
up_write(&qos->lq_rw_sem);
return rc;
@@ -200,8 +199,8 @@ static int lu_qos_del_tgt(struct lu_qos *qos, struct lu_tgt_desc *ltd)
kfree(svr);
}
- qos->lq_dirty = 1;
- qos->lq_rr.lqr_dirty = 1;
+ set_bit(LQ_DIRTY, &qos->lq_flags);
+ set_bit(LQ_DIRTY, &qos->lq_rr.lqr_flags);
out:
up_write(&qos->lq_rw_sem);
return rc;
@@ -273,8 +272,8 @@ int lu_tgt_descs_init(struct lu_tgt_descs *ltd, bool is_mdt)
/* Set up allocation policy (QoS and RR) */
INIT_LIST_HEAD(<d->ltd_qos.lq_svr_list);
init_rwsem(<d->ltd_qos.lq_rw_sem);
- ltd->ltd_qos.lq_dirty = 1;
- ltd->ltd_qos.lq_reset = 1;
+ set_bit(LQ_DIRTY, <d->ltd_qos.lq_flags);
+ set_bit(LQ_RESET, <d->ltd_qos.lq_flags);
/* Default priority is toward free space balance */
ltd->ltd_qos.lq_prio_free = 232;
/* Default threshold for rr (roughly 17%) */
@@ -416,7 +415,8 @@ void ltd_del_tgt(struct lu_tgt_descs *ltd, struct lu_tgt_desc *tgt)
*/
bool ltd_qos_is_usable(struct lu_tgt_descs *ltd)
{
- if (!ltd->ltd_qos.lq_dirty && ltd->ltd_qos.lq_same_space)
+ if (!test_bit(LQ_DIRTY, <d->ltd_qos.lq_flags) &&
+ test_bit(LQ_SAME_SPACE, <d->ltd_qos.lq_flags))
return false;
if (ltd->ltd_lov_desc.ld_active_tgt_count < 2)
@@ -456,7 +456,7 @@ int ltd_qos_penalties_calc(struct lu_tgt_descs *ltd)
time64_t now, age;
int rc;
- if (!qos->lq_dirty) {
+ if (!test_bit(LQ_DIRTY, &qos->lq_flags)) {
rc = 0;
goto out;
}
@@ -531,7 +531,8 @@ int ltd_qos_penalties_calc(struct lu_tgt_descs *ltd)
tgt->ltd_qos.ltq_penalty_per_obj >>= 1;
age = (now - tgt->ltd_qos.ltq_used) >> 3;
- if (qos->lq_reset || age > 32 * desc->ld_qos_maxage)
+ if (test_bit(LQ_RESET, &qos->lq_flags) ||
+ age > 32 * desc->ld_qos_maxage)
tgt->ltd_qos.ltq_penalty = 0;
else if (age > desc->ld_qos_maxage)
/* Decay tgt penalty. */
@@ -566,31 +567,32 @@ int ltd_qos_penalties_calc(struct lu_tgt_descs *ltd)
svr->lsq_penalty_per_obj >>= 1;
age = (now - svr->lsq_used) >> 3;
- if (qos->lq_reset || age > 32 * desc->ld_qos_maxage)
+ if (test_bit(LQ_RESET, &qos->lq_flags) ||
+ age > 32 * desc->ld_qos_maxage)
svr->lsq_penalty = 0;
else if (age > desc->ld_qos_maxage)
/* Decay server penalty. */
svr->lsq_penalty >>= age / desc->ld_qos_maxage;
}
- qos->lq_dirty = 0;
- qos->lq_reset = 0;
+ clear_bit(LQ_DIRTY, &qos->lq_flags);
+ clear_bit(LQ_RESET, &qos->lq_flags);
/*
* If each tgt has almost same free space, do rr allocation for better
* creation performance
*/
- qos->lq_same_space = 0;
+ clear_bit(LQ_SAME_SPACE, &qos->lq_flags);
if ((ba_max * (256 - qos->lq_threshold_rr)) >> 8 < ba_min &&
(ia_max * (256 - qos->lq_threshold_rr)) >> 8 < ia_min) {
- qos->lq_same_space = 1;
+ set_bit(LQ_SAME_SPACE, &qos->lq_flags);
/* Reset weights for the next time we enter qos mode */
- qos->lq_reset = 1;
+ set_bit(LQ_RESET, &qos->lq_flags);
}
rc = 0;
out:
- if (!rc && qos->lq_same_space)
+ if (!rc && test_bit(LQ_SAME_SPACE, &qos->lq_flags))
return -EAGAIN;
return rc;