Message ID | 20181128031148.357-2-suy.fnst@cn.fujitsu.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | btrfs: implementation of priority aware allocator | expand |
On 28.11.18 г. 5:11 ч., Su Yue wrote: > To implement priority aware allocator, this patch: > Introduces struct btrfs_priority_tree which contains block groups > in same level. > Adds member priority to struct btrfs_block_group_cache and pointer > points to the priority tree it's located. > > Adds member priority_trees to struct btrfs_space_info to represents > priority trees in different raid types. > > Signed-off-by: Su Yue <suy.fnst@cn.fujitsu.com> > --- > fs/btrfs/ctree.h | 24 ++++++++++++++++++++++++ > 1 file changed, 24 insertions(+) > > diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h > index e62824cae00a..5c4651d8a524 100644 > --- a/fs/btrfs/ctree.h > +++ b/fs/btrfs/ctree.h > @@ -437,6 +437,8 @@ struct btrfs_space_info { > struct rw_semaphore groups_sem; > /* for block groups in our same type */ > struct list_head block_groups[BTRFS_NR_RAID_TYPES]; > + /* for priority trees in our same type */ > + struct rb_root priority_trees[BTRFS_NR_RAID_TYPES]; > wait_queue_head_t wait; > > struct kobject kobj; > @@ -558,6 +560,21 @@ struct btrfs_full_stripe_locks_tree { > struct mutex lock; > }; > > +/* > + * Tree to record all block_groups in same priority level. > + * Only used in priority aware allocator. > + */ > +struct btrfs_priority_tree { > + /* protected by groups_sem */ > + struct rb_root block_groups; > + struct rw_semaphore groups_sem; > + > + /* for different level priority trees in same index*/ > + struct rb_node node; > + > + int level; Do you ever expect the level to be a negative number? If not then use u8/u32 depending on the range of levels you expect. > +}; > + > struct btrfs_block_group_cache { > struct btrfs_key key; > struct btrfs_block_group_item item; > @@ -571,6 +588,8 @@ struct btrfs_block_group_cache { > u64 flags; > u64 cache_generation; > > + /* It's used only when priority aware allocator is enabled. */ > + long priority; What's the range of priorities you are expecting, wouldn't an u8 be sufficient, that gives us 256 priorities? > /* > * If the free space extent count exceeds this number, convert the block > * group to bitmaps. > @@ -616,6 +635,9 @@ struct btrfs_block_group_cache { > /* for block groups in the same raid type */ > struct list_head list; > > + /* for block groups in the same priority level */ > + struct rb_node node; > + > /* usage count */ > atomic_t count; > > @@ -670,6 +692,8 @@ struct btrfs_block_group_cache { > > /* Record locked full stripes for RAID5/6 block group */ > struct btrfs_full_stripe_locks_tree full_stripe_locks_root; > + > + struct btrfs_priority_tree *priority_tree; > }; > > /* delayed seq elem */ >
On 11/28/18 4:24 PM, Nikolay Borisov wrote: > > > On 28.11.18 г. 5:11 ч., Su Yue wrote: >> To implement priority aware allocator, this patch: >> Introduces struct btrfs_priority_tree which contains block groups >> in same level. >> Adds member priority to struct btrfs_block_group_cache and pointer >> points to the priority tree it's located. >> >> Adds member priority_trees to struct btrfs_space_info to represents >> priority trees in different raid types. >> >> Signed-off-by: Su Yue <suy.fnst@cn.fujitsu.com> >> --- >> fs/btrfs/ctree.h | 24 ++++++++++++++++++++++++ >> 1 file changed, 24 insertions(+) >> >> diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h >> index e62824cae00a..5c4651d8a524 100644 >> --- a/fs/btrfs/ctree.h >> +++ b/fs/btrfs/ctree.h >> @@ -437,6 +437,8 @@ struct btrfs_space_info { >> struct rw_semaphore groups_sem; >> /* for block groups in our same type */ >> struct list_head block_groups[BTRFS_NR_RAID_TYPES]; >> + /* for priority trees in our same type */ >> + struct rb_root priority_trees[BTRFS_NR_RAID_TYPES]; >> wait_queue_head_t wait; >> >> struct kobject kobj; >> @@ -558,6 +560,21 @@ struct btrfs_full_stripe_locks_tree { >> struct mutex lock; >> }; >> >> +/* >> + * Tree to record all block_groups in same priority level. >> + * Only used in priority aware allocator. >> + */ >> +struct btrfs_priority_tree { >> + /* protected by groups_sem */ >> + struct rb_root block_groups; >> + struct rw_semaphore groups_sem; >> + >> + /* for different level priority trees in same index*/ >> + struct rb_node node; >> + >> + int level; > > Do you ever expect the level to be a negative number? If not then use > u8/u32 depending on the range of levels you expect. > Indeed, level is not expected to be negative. u8 is more proper. >> +}; >> + >> struct btrfs_block_group_cache { >> struct btrfs_key key; >> struct btrfs_block_group_item item; >> @@ -571,6 +588,8 @@ struct btrfs_block_group_cache { >> u64 flags; >> u64 cache_generation; >> >> + /* It's used only when priority aware allocator is enabled. */ >> + long priority; > > What's the range of priorities you are expecting, wouldn't an u8 be > sufficient, that gives us 256 priorities? > The 6th patch introduces three special priorities. That's what I called dirty codes. Thanks, Su >> /* >> * If the free space extent count exceeds this number, convert the block >> * group to bitmaps. >> @@ -616,6 +635,9 @@ struct btrfs_block_group_cache { >> /* for block groups in the same raid type */ >> struct list_head list; >> >> + /* for block groups in the same priority level */ >> + struct rb_node node; >> + >> /* usage count */ >> atomic_t count; >> >> @@ -670,6 +692,8 @@ struct btrfs_block_group_cache { >> >> /* Record locked full stripes for RAID5/6 block group */ >> struct btrfs_full_stripe_locks_tree full_stripe_locks_root; >> + >> + struct btrfs_priority_tree *priority_tree; >> }; >> >> /* delayed seq elem */ >> > >
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index e62824cae00a..5c4651d8a524 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -437,6 +437,8 @@ struct btrfs_space_info { struct rw_semaphore groups_sem; /* for block groups in our same type */ struct list_head block_groups[BTRFS_NR_RAID_TYPES]; + /* for priority trees in our same type */ + struct rb_root priority_trees[BTRFS_NR_RAID_TYPES]; wait_queue_head_t wait; struct kobject kobj; @@ -558,6 +560,21 @@ struct btrfs_full_stripe_locks_tree { struct mutex lock; }; +/* + * Tree to record all block_groups in same priority level. + * Only used in priority aware allocator. + */ +struct btrfs_priority_tree { + /* protected by groups_sem */ + struct rb_root block_groups; + struct rw_semaphore groups_sem; + + /* for different level priority trees in same index*/ + struct rb_node node; + + int level; +}; + struct btrfs_block_group_cache { struct btrfs_key key; struct btrfs_block_group_item item; @@ -571,6 +588,8 @@ struct btrfs_block_group_cache { u64 flags; u64 cache_generation; + /* It's used only when priority aware allocator is enabled. */ + long priority; /* * If the free space extent count exceeds this number, convert the block * group to bitmaps. @@ -616,6 +635,9 @@ struct btrfs_block_group_cache { /* for block groups in the same raid type */ struct list_head list; + /* for block groups in the same priority level */ + struct rb_node node; + /* usage count */ atomic_t count; @@ -670,6 +692,8 @@ struct btrfs_block_group_cache { /* Record locked full stripes for RAID5/6 block group */ struct btrfs_full_stripe_locks_tree full_stripe_locks_root; + + struct btrfs_priority_tree *priority_tree; }; /* delayed seq elem */
To implement priority aware allocator, this patch: Introduces struct btrfs_priority_tree which contains block groups in same level. Adds member priority to struct btrfs_block_group_cache and pointer points to the priority tree it's located. Adds member priority_trees to struct btrfs_space_info to represents priority trees in different raid types. Signed-off-by: Su Yue <suy.fnst@cn.fujitsu.com> --- fs/btrfs/ctree.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+)