diff mbox series

[RFC,1/2] drm/xe: Move mmio read/write functions to xe_mmio.c

Message ID 20230614001315.1552497-2-radhakrishna.sripada@intel.com (mailing list archive)
State New, archived
Headers show
Series Add mmio register rw tracing | expand

Commit Message

Sripada, Radhakrishna June 14, 2023, 12:13 a.m. UTC
Move the register read/write apis to xe_mmio.c to prepare for
adding tracing infrastructure for the same. Adding tracing support
in xe_mmio.h messes with the compilation of the display code.

Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Cc: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Radhakrishna Sripada <radhakrishna.sripada@intel.com>
---
 drivers/gpu/drm/xe/xe_mmio.c | 113 ++++++++++++++++++++++++++++++
 drivers/gpu/drm/xe/xe_mmio.h | 129 ++++-------------------------------
 2 files changed, 128 insertions(+), 114 deletions(-)

Comments

Matt Roper June 14, 2023, 2:02 a.m. UTC | #1
On Tue, Jun 13, 2023 at 05:13:14PM -0700, Radhakrishna Sripada wrote:
> Move the register read/write apis to xe_mmio.c to prepare for
> adding tracing infrastructure for the same. Adding tracing support
> in xe_mmio.h messes with the compilation of the display code.
> 
> Cc: Lucas De Marchi <lucas.demarchi@intel.com>
> Cc: Matt Roper <matthew.d.roper@intel.com>
> Signed-off-by: Radhakrishna Sripada <radhakrishna.sripada@intel.com>
> ---
>  drivers/gpu/drm/xe/xe_mmio.c | 113 ++++++++++++++++++++++++++++++
>  drivers/gpu/drm/xe/xe_mmio.h | 129 ++++-------------------------------
>  2 files changed, 128 insertions(+), 114 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
> index 475b14fe4356..70ad1b6a17a0 100644
> --- a/drivers/gpu/drm/xe/xe_mmio.c
> +++ b/drivers/gpu/drm/xe/xe_mmio.c
> @@ -435,6 +435,119 @@ static const struct xe_reg mmio_read_whitelist[] = {
>  	RING_TIMESTAMP(RENDER_RING_BASE),
>  };
>  
> +inline u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)

We shouldn't have 'inline' on non-static functions in a .c file (and
generally we don't really want it on static functions either since the
compiler can generally do a better job of figuring out whether or not
inlining would be beneficial).


Matt

> +{
> +	struct xe_tile *tile = gt_to_tile(gt);
> +
> +	if (reg.addr < gt->mmio.adj_limit)
> +		reg.addr += gt->mmio.adj_offset;
> +
> +	return readb(tile->mmio.regs + reg.addr);
> +}
> +
> +inline void xe_mmio_write32(struct xe_gt *gt,
> +			    struct xe_reg reg, u32 val)
> +{
> +	struct xe_tile *tile = gt_to_tile(gt);
> +
> +	if (reg.addr < gt->mmio.adj_limit)
> +		reg.addr += gt->mmio.adj_offset;
> +
> +	writel(val, tile->mmio.regs + reg.addr);
> +}
> +
> +inline u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
> +{
> +	struct xe_tile *tile = gt_to_tile(gt);
> +
> +	if (reg.addr < gt->mmio.adj_limit)
> +		reg.addr += gt->mmio.adj_offset;
> +
> +	return readl(tile->mmio.regs + reg.addr);
> +}
> +
> +inline u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr,
> +			 u32 set)
> +{
> +	u32 old, reg_val;
> +
> +	old = xe_mmio_read32(gt, reg);
> +	reg_val = (old & ~clr) | set;
> +	xe_mmio_write32(gt, reg, reg_val);
> +
> +	return old;
> +}
> +
> +inline void xe_mmio_write64(struct xe_gt *gt,
> +			    struct xe_reg reg, u64 val)
> +{
> +	struct xe_tile *tile = gt_to_tile(gt);
> +
> +	if (reg.addr < gt->mmio.adj_limit)
> +		reg.addr += gt->mmio.adj_offset;
> +
> +	writeq(val, tile->mmio.regs + reg.addr);
> +}
> +
> +inline u64 xe_mmio_read64(struct xe_gt *gt, struct xe_reg reg)
> +{
> +	struct xe_tile *tile = gt_to_tile(gt);
> +
> +	if (reg.addr < gt->mmio.adj_limit)
> +		reg.addr += gt->mmio.adj_offset;
> +
> +	return readq(tile->mmio.regs + reg.addr);
> +}
> +
> +inline int xe_mmio_write32_and_verify(struct xe_gt *gt,
> +				      struct xe_reg reg, u32 val,
> +				      u32 mask, u32 eval)
> +{
> +	u32 reg_val;
> +
> +	xe_mmio_write32(gt, reg, val);
> +	reg_val = xe_mmio_read32(gt, reg);
> +
> +	return (reg_val & mask) != eval ? -EINVAL : 0;
> +}
> +
> +inline int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 val,
> +			  u32 mask, u32 timeout_us, u32 *out_val,
> +			  bool atomic)
> +{
> +	ktime_t cur = ktime_get_raw();
> +	const ktime_t end = ktime_add_us(cur, timeout_us);
> +	int ret = -ETIMEDOUT;
> +	s64 wait = 10;
> +	u32 read;
> +
> +	for (;;) {
> +		read = xe_mmio_read32(gt, reg);
> +		if ((read & mask) == val) {
> +			ret = 0;
> +			break;
> +		}
> +
> +		cur = ktime_get_raw();
> +		if (!ktime_before(cur, end))
> +			break;
> +
> +		if (ktime_after(ktime_add_us(cur, wait), end))
> +			wait = ktime_us_delta(end, cur);
> +
> +		if (atomic)
> +			udelay(wait);
> +		else
> +			usleep_range(wait, wait << 1);
> +		wait <<= 1;
> +	}
> +
> +	if (out_val)
> +		*out_val = read;
> +
> +	return ret;
> +}
> +
>  int xe_mmio_ioctl(struct drm_device *dev, void *data,
>  		  struct drm_file *file)
>  {
> diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
> index 3c547d78afba..2aa2c01e60dd 100644
> --- a/drivers/gpu/drm/xe/xe_mmio.h
> +++ b/drivers/gpu/drm/xe/xe_mmio.h
> @@ -20,120 +20,21 @@ struct xe_device;
>  #define GEN12_LMEM_BAR		2
>  
>  int xe_mmio_init(struct xe_device *xe);
> -
> -static inline u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
> -{
> -	struct xe_tile *tile = gt_to_tile(gt);
> -
> -	if (reg.addr < gt->mmio.adj_limit)
> -		reg.addr += gt->mmio.adj_offset;
> -
> -	return readb(tile->mmio.regs + reg.addr);
> -}
> -
> -static inline void xe_mmio_write32(struct xe_gt *gt,
> -				   struct xe_reg reg, u32 val)
> -{
> -	struct xe_tile *tile = gt_to_tile(gt);
> -
> -	if (reg.addr < gt->mmio.adj_limit)
> -		reg.addr += gt->mmio.adj_offset;
> -
> -	writel(val, tile->mmio.regs + reg.addr);
> -}
> -
> -static inline u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
> -{
> -	struct xe_tile *tile = gt_to_tile(gt);
> -
> -	if (reg.addr < gt->mmio.adj_limit)
> -		reg.addr += gt->mmio.adj_offset;
> -
> -	return readl(tile->mmio.regs + reg.addr);
> -}
> -
> -static inline u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr,
> -				u32 set)
> -{
> -	u32 old, reg_val;
> -
> -	old = xe_mmio_read32(gt, reg);
> -	reg_val = (old & ~clr) | set;
> -	xe_mmio_write32(gt, reg, reg_val);
> -
> -	return old;
> -}
> -
> -static inline void xe_mmio_write64(struct xe_gt *gt,
> -				   struct xe_reg reg, u64 val)
> -{
> -	struct xe_tile *tile = gt_to_tile(gt);
> -
> -	if (reg.addr < gt->mmio.adj_limit)
> -		reg.addr += gt->mmio.adj_offset;
> -
> -	writeq(val, tile->mmio.regs + reg.addr);
> -}
> -
> -static inline u64 xe_mmio_read64(struct xe_gt *gt, struct xe_reg reg)
> -{
> -	struct xe_tile *tile = gt_to_tile(gt);
> -
> -	if (reg.addr < gt->mmio.adj_limit)
> -		reg.addr += gt->mmio.adj_offset;
> -
> -	return readq(tile->mmio.regs + reg.addr);
> -}
> -
> -static inline int xe_mmio_write32_and_verify(struct xe_gt *gt,
> -					     struct xe_reg reg, u32 val,
> -					     u32 mask, u32 eval)
> -{
> -	u32 reg_val;
> -
> -	xe_mmio_write32(gt, reg, val);
> -	reg_val = xe_mmio_read32(gt, reg);
> -
> -	return (reg_val & mask) != eval ? -EINVAL : 0;
> -}
> -
> -static inline int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 val,
> -				 u32 mask, u32 timeout_us, u32 *out_val,
> -				 bool atomic)
> -{
> -	ktime_t cur = ktime_get_raw();
> -	const ktime_t end = ktime_add_us(cur, timeout_us);
> -	int ret = -ETIMEDOUT;
> -	s64 wait = 10;
> -	u32 read;
> -
> -	for (;;) {
> -		read = xe_mmio_read32(gt, reg);
> -		if ((read & mask) == val) {
> -			ret = 0;
> -			break;
> -		}
> -
> -		cur = ktime_get_raw();
> -		if (!ktime_before(cur, end))
> -			break;
> -
> -		if (ktime_after(ktime_add_us(cur, wait), end))
> -			wait = ktime_us_delta(end, cur);
> -
> -		if (atomic)
> -			udelay(wait);
> -		else
> -			usleep_range(wait, wait << 1);
> -		wait <<= 1;
> -	}
> -
> -	if (out_val)
> -		*out_val = read;
> -
> -	return ret;
> -}
> -
> +inline u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg);
> +inline void xe_mmio_write32(struct xe_gt *gt,
> +			    struct xe_reg reg, u32 val);
> +inline u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg);
> +inline u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr,
> +			 u32 set);
> +inline void xe_mmio_write64(struct xe_gt *gt,
> +			    struct xe_reg reg, u64 val);
> +inline u64 xe_mmio_read64(struct xe_gt *gt, struct xe_reg reg);
> +inline int xe_mmio_write32_and_verify(struct xe_gt *gt,
> +				      struct xe_reg reg, u32 val,
> +				      u32 mask, u32 eval);
> +inline int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 val,
> +			  u32 mask, u32 timeout_us, u32 *out_val,
> +			  bool atomic);
>  int xe_mmio_ioctl(struct drm_device *dev, void *data,
>  		  struct drm_file *file);
>  
> -- 
> 2.34.1
>
Sripada, Radhakrishna June 14, 2023, 9:18 p.m. UTC | #2
> -----Original Message-----
> From: Roper, Matthew D <matthew.d.roper@intel.com>
> Sent: Tuesday, June 13, 2023 7:03 PM
> To: Sripada, Radhakrishna <radhakrishna.sripada@intel.com>
> Cc: intel-xe@lists.freedesktop.org; intel-gfx@lists.freedesktop.org; De Marchi,
> Lucas <lucas.demarchi@intel.com>
> Subject: Re: [RFC 1/2] drm/xe: Move mmio read/write functions to xe_mmio.c
> 
> On Tue, Jun 13, 2023 at 05:13:14PM -0700, Radhakrishna Sripada wrote:
> > Move the register read/write apis to xe_mmio.c to prepare for
> > adding tracing infrastructure for the same. Adding tracing support
> > in xe_mmio.h messes with the compilation of the display code.
> >
> > Cc: Lucas De Marchi <lucas.demarchi@intel.com>
> > Cc: Matt Roper <matthew.d.roper@intel.com>
> > Signed-off-by: Radhakrishna Sripada <radhakrishna.sripada@intel.com>
> > ---
> >  drivers/gpu/drm/xe/xe_mmio.c | 113 ++++++++++++++++++++++++++++++
> >  drivers/gpu/drm/xe/xe_mmio.h | 129 ++++-------------------------------
> >  2 files changed, 128 insertions(+), 114 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
> > index 475b14fe4356..70ad1b6a17a0 100644
> > --- a/drivers/gpu/drm/xe/xe_mmio.c
> > +++ b/drivers/gpu/drm/xe/xe_mmio.c
> > @@ -435,6 +435,119 @@ static const struct xe_reg mmio_read_whitelist[] = {
> >  	RING_TIMESTAMP(RENDER_RING_BASE),
> >  };
> >
> > +inline u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
> 
> We shouldn't have 'inline' on non-static functions in a .c file (and
> generally we don't really want it on static functions either since the
> compiler can generally do a better job of figuring out whether or not
> inlining would be beneficial).
> 
Sure Matt. Will do that in next rev.

-Radhakrishna(RK) Sripada
> 
> Matt
> 
> > +{
> > +	struct xe_tile *tile = gt_to_tile(gt);
> > +
> > +	if (reg.addr < gt->mmio.adj_limit)
> > +		reg.addr += gt->mmio.adj_offset;
> > +
> > +	return readb(tile->mmio.regs + reg.addr);
> > +}
> > +
> > +inline void xe_mmio_write32(struct xe_gt *gt,
> > +			    struct xe_reg reg, u32 val)
> > +{
> > +	struct xe_tile *tile = gt_to_tile(gt);
> > +
> > +	if (reg.addr < gt->mmio.adj_limit)
> > +		reg.addr += gt->mmio.adj_offset;
> > +
> > +	writel(val, tile->mmio.regs + reg.addr);
> > +}
> > +
> > +inline u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
> > +{
> > +	struct xe_tile *tile = gt_to_tile(gt);
> > +
> > +	if (reg.addr < gt->mmio.adj_limit)
> > +		reg.addr += gt->mmio.adj_offset;
> > +
> > +	return readl(tile->mmio.regs + reg.addr);
> > +}
> > +
> > +inline u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr,
> > +			 u32 set)
> > +{
> > +	u32 old, reg_val;
> > +
> > +	old = xe_mmio_read32(gt, reg);
> > +	reg_val = (old & ~clr) | set;
> > +	xe_mmio_write32(gt, reg, reg_val);
> > +
> > +	return old;
> > +}
> > +
> > +inline void xe_mmio_write64(struct xe_gt *gt,
> > +			    struct xe_reg reg, u64 val)
> > +{
> > +	struct xe_tile *tile = gt_to_tile(gt);
> > +
> > +	if (reg.addr < gt->mmio.adj_limit)
> > +		reg.addr += gt->mmio.adj_offset;
> > +
> > +	writeq(val, tile->mmio.regs + reg.addr);
> > +}
> > +
> > +inline u64 xe_mmio_read64(struct xe_gt *gt, struct xe_reg reg)
> > +{
> > +	struct xe_tile *tile = gt_to_tile(gt);
> > +
> > +	if (reg.addr < gt->mmio.adj_limit)
> > +		reg.addr += gt->mmio.adj_offset;
> > +
> > +	return readq(tile->mmio.regs + reg.addr);
> > +}
> > +
> > +inline int xe_mmio_write32_and_verify(struct xe_gt *gt,
> > +				      struct xe_reg reg, u32 val,
> > +				      u32 mask, u32 eval)
> > +{
> > +	u32 reg_val;
> > +
> > +	xe_mmio_write32(gt, reg, val);
> > +	reg_val = xe_mmio_read32(gt, reg);
> > +
> > +	return (reg_val & mask) != eval ? -EINVAL : 0;
> > +}
> > +
> > +inline int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 val,
> > +			  u32 mask, u32 timeout_us, u32 *out_val,
> > +			  bool atomic)
> > +{
> > +	ktime_t cur = ktime_get_raw();
> > +	const ktime_t end = ktime_add_us(cur, timeout_us);
> > +	int ret = -ETIMEDOUT;
> > +	s64 wait = 10;
> > +	u32 read;
> > +
> > +	for (;;) {
> > +		read = xe_mmio_read32(gt, reg);
> > +		if ((read & mask) == val) {
> > +			ret = 0;
> > +			break;
> > +		}
> > +
> > +		cur = ktime_get_raw();
> > +		if (!ktime_before(cur, end))
> > +			break;
> > +
> > +		if (ktime_after(ktime_add_us(cur, wait), end))
> > +			wait = ktime_us_delta(end, cur);
> > +
> > +		if (atomic)
> > +			udelay(wait);
> > +		else
> > +			usleep_range(wait, wait << 1);
> > +		wait <<= 1;
> > +	}
> > +
> > +	if (out_val)
> > +		*out_val = read;
> > +
> > +	return ret;
> > +}
> > +
> >  int xe_mmio_ioctl(struct drm_device *dev, void *data,
> >  		  struct drm_file *file)
> >  {
> > diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
> > index 3c547d78afba..2aa2c01e60dd 100644
> > --- a/drivers/gpu/drm/xe/xe_mmio.h
> > +++ b/drivers/gpu/drm/xe/xe_mmio.h
> > @@ -20,120 +20,21 @@ struct xe_device;
> >  #define GEN12_LMEM_BAR		2
> >
> >  int xe_mmio_init(struct xe_device *xe);
> > -
> > -static inline u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
> > -{
> > -	struct xe_tile *tile = gt_to_tile(gt);
> > -
> > -	if (reg.addr < gt->mmio.adj_limit)
> > -		reg.addr += gt->mmio.adj_offset;
> > -
> > -	return readb(tile->mmio.regs + reg.addr);
> > -}
> > -
> > -static inline void xe_mmio_write32(struct xe_gt *gt,
> > -				   struct xe_reg reg, u32 val)
> > -{
> > -	struct xe_tile *tile = gt_to_tile(gt);
> > -
> > -	if (reg.addr < gt->mmio.adj_limit)
> > -		reg.addr += gt->mmio.adj_offset;
> > -
> > -	writel(val, tile->mmio.regs + reg.addr);
> > -}
> > -
> > -static inline u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
> > -{
> > -	struct xe_tile *tile = gt_to_tile(gt);
> > -
> > -	if (reg.addr < gt->mmio.adj_limit)
> > -		reg.addr += gt->mmio.adj_offset;
> > -
> > -	return readl(tile->mmio.regs + reg.addr);
> > -}
> > -
> > -static inline u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr,
> > -				u32 set)
> > -{
> > -	u32 old, reg_val;
> > -
> > -	old = xe_mmio_read32(gt, reg);
> > -	reg_val = (old & ~clr) | set;
> > -	xe_mmio_write32(gt, reg, reg_val);
> > -
> > -	return old;
> > -}
> > -
> > -static inline void xe_mmio_write64(struct xe_gt *gt,
> > -				   struct xe_reg reg, u64 val)
> > -{
> > -	struct xe_tile *tile = gt_to_tile(gt);
> > -
> > -	if (reg.addr < gt->mmio.adj_limit)
> > -		reg.addr += gt->mmio.adj_offset;
> > -
> > -	writeq(val, tile->mmio.regs + reg.addr);
> > -}
> > -
> > -static inline u64 xe_mmio_read64(struct xe_gt *gt, struct xe_reg reg)
> > -{
> > -	struct xe_tile *tile = gt_to_tile(gt);
> > -
> > -	if (reg.addr < gt->mmio.adj_limit)
> > -		reg.addr += gt->mmio.adj_offset;
> > -
> > -	return readq(tile->mmio.regs + reg.addr);
> > -}
> > -
> > -static inline int xe_mmio_write32_and_verify(struct xe_gt *gt,
> > -					     struct xe_reg reg, u32 val,
> > -					     u32 mask, u32 eval)
> > -{
> > -	u32 reg_val;
> > -
> > -	xe_mmio_write32(gt, reg, val);
> > -	reg_val = xe_mmio_read32(gt, reg);
> > -
> > -	return (reg_val & mask) != eval ? -EINVAL : 0;
> > -}
> > -
> > -static inline int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 val,
> > -				 u32 mask, u32 timeout_us, u32 *out_val,
> > -				 bool atomic)
> > -{
> > -	ktime_t cur = ktime_get_raw();
> > -	const ktime_t end = ktime_add_us(cur, timeout_us);
> > -	int ret = -ETIMEDOUT;
> > -	s64 wait = 10;
> > -	u32 read;
> > -
> > -	for (;;) {
> > -		read = xe_mmio_read32(gt, reg);
> > -		if ((read & mask) == val) {
> > -			ret = 0;
> > -			break;
> > -		}
> > -
> > -		cur = ktime_get_raw();
> > -		if (!ktime_before(cur, end))
> > -			break;
> > -
> > -		if (ktime_after(ktime_add_us(cur, wait), end))
> > -			wait = ktime_us_delta(end, cur);
> > -
> > -		if (atomic)
> > -			udelay(wait);
> > -		else
> > -			usleep_range(wait, wait << 1);
> > -		wait <<= 1;
> > -	}
> > -
> > -	if (out_val)
> > -		*out_val = read;
> > -
> > -	return ret;
> > -}
> > -
> > +inline u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg);
> > +inline void xe_mmio_write32(struct xe_gt *gt,
> > +			    struct xe_reg reg, u32 val);
> > +inline u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg);
> > +inline u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr,
> > +			 u32 set);
> > +inline void xe_mmio_write64(struct xe_gt *gt,
> > +			    struct xe_reg reg, u64 val);
> > +inline u64 xe_mmio_read64(struct xe_gt *gt, struct xe_reg reg);
> > +inline int xe_mmio_write32_and_verify(struct xe_gt *gt,
> > +				      struct xe_reg reg, u32 val,
> > +				      u32 mask, u32 eval);
> > +inline int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 val,
> > +			  u32 mask, u32 timeout_us, u32 *out_val,
> > +			  bool atomic);
> >  int xe_mmio_ioctl(struct drm_device *dev, void *data,
> >  		  struct drm_file *file);
> >
> > --
> > 2.34.1
> >
> 
> --
> Matt Roper
> Graphics Software Engineer
> Linux GPU Platform Enablement
> Intel Corporation
diff mbox series

Patch

diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index 475b14fe4356..70ad1b6a17a0 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -435,6 +435,119 @@  static const struct xe_reg mmio_read_whitelist[] = {
 	RING_TIMESTAMP(RENDER_RING_BASE),
 };
 
+inline u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
+{
+	struct xe_tile *tile = gt_to_tile(gt);
+
+	if (reg.addr < gt->mmio.adj_limit)
+		reg.addr += gt->mmio.adj_offset;
+
+	return readb(tile->mmio.regs + reg.addr);
+}
+
+inline void xe_mmio_write32(struct xe_gt *gt,
+			    struct xe_reg reg, u32 val)
+{
+	struct xe_tile *tile = gt_to_tile(gt);
+
+	if (reg.addr < gt->mmio.adj_limit)
+		reg.addr += gt->mmio.adj_offset;
+
+	writel(val, tile->mmio.regs + reg.addr);
+}
+
+inline u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
+{
+	struct xe_tile *tile = gt_to_tile(gt);
+
+	if (reg.addr < gt->mmio.adj_limit)
+		reg.addr += gt->mmio.adj_offset;
+
+	return readl(tile->mmio.regs + reg.addr);
+}
+
+inline u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr,
+			 u32 set)
+{
+	u32 old, reg_val;
+
+	old = xe_mmio_read32(gt, reg);
+	reg_val = (old & ~clr) | set;
+	xe_mmio_write32(gt, reg, reg_val);
+
+	return old;
+}
+
+inline void xe_mmio_write64(struct xe_gt *gt,
+			    struct xe_reg reg, u64 val)
+{
+	struct xe_tile *tile = gt_to_tile(gt);
+
+	if (reg.addr < gt->mmio.adj_limit)
+		reg.addr += gt->mmio.adj_offset;
+
+	writeq(val, tile->mmio.regs + reg.addr);
+}
+
+inline u64 xe_mmio_read64(struct xe_gt *gt, struct xe_reg reg)
+{
+	struct xe_tile *tile = gt_to_tile(gt);
+
+	if (reg.addr < gt->mmio.adj_limit)
+		reg.addr += gt->mmio.adj_offset;
+
+	return readq(tile->mmio.regs + reg.addr);
+}
+
+inline int xe_mmio_write32_and_verify(struct xe_gt *gt,
+				      struct xe_reg reg, u32 val,
+				      u32 mask, u32 eval)
+{
+	u32 reg_val;
+
+	xe_mmio_write32(gt, reg, val);
+	reg_val = xe_mmio_read32(gt, reg);
+
+	return (reg_val & mask) != eval ? -EINVAL : 0;
+}
+
+inline int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 val,
+			  u32 mask, u32 timeout_us, u32 *out_val,
+			  bool atomic)
+{
+	ktime_t cur = ktime_get_raw();
+	const ktime_t end = ktime_add_us(cur, timeout_us);
+	int ret = -ETIMEDOUT;
+	s64 wait = 10;
+	u32 read;
+
+	for (;;) {
+		read = xe_mmio_read32(gt, reg);
+		if ((read & mask) == val) {
+			ret = 0;
+			break;
+		}
+
+		cur = ktime_get_raw();
+		if (!ktime_before(cur, end))
+			break;
+
+		if (ktime_after(ktime_add_us(cur, wait), end))
+			wait = ktime_us_delta(end, cur);
+
+		if (atomic)
+			udelay(wait);
+		else
+			usleep_range(wait, wait << 1);
+		wait <<= 1;
+	}
+
+	if (out_val)
+		*out_val = read;
+
+	return ret;
+}
+
 int xe_mmio_ioctl(struct drm_device *dev, void *data,
 		  struct drm_file *file)
 {
diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
index 3c547d78afba..2aa2c01e60dd 100644
--- a/drivers/gpu/drm/xe/xe_mmio.h
+++ b/drivers/gpu/drm/xe/xe_mmio.h
@@ -20,120 +20,21 @@  struct xe_device;
 #define GEN12_LMEM_BAR		2
 
 int xe_mmio_init(struct xe_device *xe);
-
-static inline u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
-{
-	struct xe_tile *tile = gt_to_tile(gt);
-
-	if (reg.addr < gt->mmio.adj_limit)
-		reg.addr += gt->mmio.adj_offset;
-
-	return readb(tile->mmio.regs + reg.addr);
-}
-
-static inline void xe_mmio_write32(struct xe_gt *gt,
-				   struct xe_reg reg, u32 val)
-{
-	struct xe_tile *tile = gt_to_tile(gt);
-
-	if (reg.addr < gt->mmio.adj_limit)
-		reg.addr += gt->mmio.adj_offset;
-
-	writel(val, tile->mmio.regs + reg.addr);
-}
-
-static inline u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
-{
-	struct xe_tile *tile = gt_to_tile(gt);
-
-	if (reg.addr < gt->mmio.adj_limit)
-		reg.addr += gt->mmio.adj_offset;
-
-	return readl(tile->mmio.regs + reg.addr);
-}
-
-static inline u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr,
-				u32 set)
-{
-	u32 old, reg_val;
-
-	old = xe_mmio_read32(gt, reg);
-	reg_val = (old & ~clr) | set;
-	xe_mmio_write32(gt, reg, reg_val);
-
-	return old;
-}
-
-static inline void xe_mmio_write64(struct xe_gt *gt,
-				   struct xe_reg reg, u64 val)
-{
-	struct xe_tile *tile = gt_to_tile(gt);
-
-	if (reg.addr < gt->mmio.adj_limit)
-		reg.addr += gt->mmio.adj_offset;
-
-	writeq(val, tile->mmio.regs + reg.addr);
-}
-
-static inline u64 xe_mmio_read64(struct xe_gt *gt, struct xe_reg reg)
-{
-	struct xe_tile *tile = gt_to_tile(gt);
-
-	if (reg.addr < gt->mmio.adj_limit)
-		reg.addr += gt->mmio.adj_offset;
-
-	return readq(tile->mmio.regs + reg.addr);
-}
-
-static inline int xe_mmio_write32_and_verify(struct xe_gt *gt,
-					     struct xe_reg reg, u32 val,
-					     u32 mask, u32 eval)
-{
-	u32 reg_val;
-
-	xe_mmio_write32(gt, reg, val);
-	reg_val = xe_mmio_read32(gt, reg);
-
-	return (reg_val & mask) != eval ? -EINVAL : 0;
-}
-
-static inline int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 val,
-				 u32 mask, u32 timeout_us, u32 *out_val,
-				 bool atomic)
-{
-	ktime_t cur = ktime_get_raw();
-	const ktime_t end = ktime_add_us(cur, timeout_us);
-	int ret = -ETIMEDOUT;
-	s64 wait = 10;
-	u32 read;
-
-	for (;;) {
-		read = xe_mmio_read32(gt, reg);
-		if ((read & mask) == val) {
-			ret = 0;
-			break;
-		}
-
-		cur = ktime_get_raw();
-		if (!ktime_before(cur, end))
-			break;
-
-		if (ktime_after(ktime_add_us(cur, wait), end))
-			wait = ktime_us_delta(end, cur);
-
-		if (atomic)
-			udelay(wait);
-		else
-			usleep_range(wait, wait << 1);
-		wait <<= 1;
-	}
-
-	if (out_val)
-		*out_val = read;
-
-	return ret;
-}
-
+inline u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg);
+inline void xe_mmio_write32(struct xe_gt *gt,
+			    struct xe_reg reg, u32 val);
+inline u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg);
+inline u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr,
+			 u32 set);
+inline void xe_mmio_write64(struct xe_gt *gt,
+			    struct xe_reg reg, u64 val);
+inline u64 xe_mmio_read64(struct xe_gt *gt, struct xe_reg reg);
+inline int xe_mmio_write32_and_verify(struct xe_gt *gt,
+				      struct xe_reg reg, u32 val,
+				      u32 mask, u32 eval);
+inline int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 val,
+			  u32 mask, u32 timeout_us, u32 *out_val,
+			  bool atomic);
 int xe_mmio_ioctl(struct drm_device *dev, void *data,
 		  struct drm_file *file);