All of lore.kernel.org
 help / color / mirror / Atom feed
From: Huang Rui <ray.huang-5C7GfCeVMHo@public.gmane.org>
To: "Christian König"
	<ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
Cc: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
Subject: Re: [PATCH 7/8] drm/amdgpu: activate paging queue on SDMA v4
Date: Tue, 9 Oct 2018 17:40:14 +0800	[thread overview]
Message-ID: <20181009094013.GG8763@hr-amur2> (raw)
In-Reply-To: <20181008133521.3237-7-christian.koenig-5C7GfCeVMHo@public.gmane.org>

On Mon, Oct 08, 2018 at 03:35:20PM +0200, Christian König wrote:
> Implement all the necessary stuff to get those extra rings working.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Reviewed-by: Huang Rui <ray.huang@amd.com>

We have four queue architecture, currently, if include page queue, we only
use two, is there any use case that we need also activate rlc0/rlc1?

Thanks,
Ray

> ---
>  drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 324 ++++++++++++++++++++++++++++-----
>  1 file changed, 274 insertions(+), 50 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> index 55384bad7a70..a362904d73f7 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> @@ -427,6 +427,57 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
>  	}
>  }
>  
> +/**
> + * sdma_v4_0_page_ring_get_wptr - get the current write pointer
> + *
> + * @ring: amdgpu ring pointer
> + *
> + * Get the current wptr from the hardware (VEGA10+).
> + */
> +static uint64_t sdma_v4_0_page_ring_get_wptr(struct amdgpu_ring *ring)
> +{
> +	struct amdgpu_device *adev = ring->adev;
> +	u64 wptr;
> +
> +	if (ring->use_doorbell) {
> +		/* XXX check if swapping is necessary on BE */
> +		wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
> +	} else {
> +		wptr = RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI);
> +		wptr = wptr << 32;
> +		wptr |= RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR);
> +	}
> +
> +	return wptr >> 2;
> +}
> +
> +/**
> + * sdma_v4_0_ring_set_wptr - commit the write pointer
> + *
> + * @ring: amdgpu ring pointer
> + *
> + * Write the wptr back to the hardware (VEGA10+).
> + */
> +static void sdma_v4_0_page_ring_set_wptr(struct amdgpu_ring *ring)
> +{
> +	struct amdgpu_device *adev = ring->adev;
> +
> +	if (ring->use_doorbell) {
> +		u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
> +
> +		/* XXX check if swapping is necessary on BE */
> +		WRITE_ONCE(*wb, (ring->wptr << 2));
> +		WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
> +	} else {
> +		uint64_t wptr = ring->wptr << 2;
> +
> +		WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR,
> +			    lower_32_bits(wptr));
> +		WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI,
> +			    upper_32_bits(wptr));
> +	}
> +}
> +
>  static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
>  {
>  	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
> @@ -597,6 +648,35 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
>  	/* XXX todo */
>  }
>  
> +/**
> + * sdma_v4_0_page_stop - stop the page async dma engines
> + *
> + * @adev: amdgpu_device pointer
> + *
> + * Stop the page async dma ring buffers (VEGA10).
> + */
> +static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
> +{
> +	struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].page;
> +	struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].page;
> +	u32 rb_cntl, ib_cntl;
> +	int i;
> +
> +	for (i = 0; i < adev->sdma.num_instances; i++) {
> +		rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
> +		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
> +					RB_ENABLE, 0);
> +		WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
> +		ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL);
> +		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL,
> +					IB_ENABLE, 0);
> +		WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
> +	}
> +
> +	sdma0->ready = false;
> +	sdma1->ready = false;
> +}
> +
>  /**
>   * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch
>   *
> @@ -664,6 +744,7 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
>  	if (enable == false) {
>  		sdma_v4_0_gfx_stop(adev);
>  		sdma_v4_0_rlc_stop(adev);
> +		sdma_v4_0_page_stop(adev);
>  	}
>  
>  	for (i = 0; i < adev->sdma.num_instances; i++) {
> @@ -673,6 +754,23 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
>  	}
>  }
>  
> +/**
> + * sdma_v4_0_rb_cntl - get parameters for rb_cntl
> + */
> +static uint32_t sdma_v4_0_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
> +{
> +	/* Set ring buffer size in dwords */
> +	uint32_t rb_bufsz = order_base_2(ring->ring_size / 4);
> +
> +	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
> +#ifdef __BIG_ENDIAN
> +	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
> +	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
> +				RPTR_WRITEBACK_SWAP_ENABLE, 1);
> +#endif
> +	return rb_cntl;
> +}
> +
>  /**
>   * sdma_v4_0_gfx_resume - setup and start the async dma engines
>   *
> @@ -686,7 +784,6 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
>  {
>  	struct amdgpu_ring *ring = &adev->sdma.instance[i].ring;
>  	u32 rb_cntl, ib_cntl, wptr_poll_cntl;
> -	u32 rb_bufsz;
>  	u32 wb_offset;
>  	u32 doorbell;
>  	u32 doorbell_offset;
> @@ -694,15 +791,8 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
>  
>  	wb_offset = (ring->rptr_offs * 4);
>  
> -	/* Set ring buffer size in dwords */
> -	rb_bufsz = order_base_2(ring->ring_size / 4);
>  	rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
> -	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
> -#ifdef __BIG_ENDIAN
> -	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
> -	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
> -				RPTR_WRITEBACK_SWAP_ENABLE, 1);
> -#endif
> +	rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl);
>  	WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
>  
>  	/* Initialize the ring buffer's read and write pointers */
> @@ -717,7 +807,8 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
>  	WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_LO,
>  	       lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
>  
> -	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
> +	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
> +				RPTR_WRITEBACK_ENABLE, 1);
>  
>  	WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE, ring->gpu_addr >> 8);
>  	WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE_HI, ring->gpu_addr >> 40);
> @@ -730,13 +821,11 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
>  	doorbell = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL);
>  	doorbell_offset = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET);
>  
> -	if (ring->use_doorbell) {
> -		doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
> -		doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
> -				OFFSET, ring->doorbell_index);
> -	} else {
> -		doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
> -	}
> +	doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE,
> +				 ring->use_doorbell);
> +	doorbell_offset = REG_SET_FIELD(doorbell_offset,
> +					SDMA0_GFX_DOORBELL_OFFSET,
> +					OFFSET, ring->doorbell_index);
>  	WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL, doorbell);
>  	WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET, doorbell_offset);
>  	adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
> @@ -754,10 +843,9 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
>  	WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI,
>  		    upper_32_bits(wptr_gpu_addr));
>  	wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL);
> -	if (amdgpu_sriov_vf(adev))
> -		wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1);
> -	else
> -		wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0);
> +	wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
> +				       SDMA0_GFX_RB_WPTR_POLL_CNTL,
> +				       F32_POLL_ENABLE, amdgpu_sriov_vf(adev));
>  	WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
>  
>  	/* enable DMA RB */
> @@ -775,6 +863,99 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
>  	ring->ready = true;
>  }
>  
> +/**
> + * sdma_v4_0_page_resume - setup and start the async dma engines
> + *
> + * @adev: amdgpu_device pointer
> + * @i: instance to resume
> + *
> + * Set up the page DMA ring buffers and enable them (VEGA10).
> + * Returns 0 for success, error for failure.
> + */
> +static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
> +{
> +	struct amdgpu_ring *ring = &adev->sdma.instance[i].page;
> +	u32 rb_cntl, ib_cntl, wptr_poll_cntl;
> +	u32 wb_offset;
> +	u32 doorbell;
> +	u32 doorbell_offset;
> +	u64 wptr_gpu_addr;
> +
> +	wb_offset = (ring->rptr_offs * 4);
> +
> +	rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
> +	rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl);
> +	WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
> +
> +	/* Initialize the ring buffer's read and write pointers */
> +	WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR, 0);
> +	WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_HI, 0);
> +	WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR, 0);
> +	WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_HI, 0);
> +
> +	/* set the wb address whether it's enabled or not */
> +	WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_HI,
> +	       upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
> +	WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_LO,
> +	       lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
> +
> +	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
> +				RPTR_WRITEBACK_ENABLE, 1);
> +
> +	WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE, ring->gpu_addr >> 8);
> +	WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE_HI, ring->gpu_addr >> 40);
> +
> +	ring->wptr = 0;
> +
> +	/* before programing wptr to a less value, need set minor_ptr_update first */
> +	WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 1);
> +
> +	doorbell = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL);
> +	doorbell_offset = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET);
> +
> +	doorbell = REG_SET_FIELD(doorbell, SDMA0_PAGE_DOORBELL, ENABLE,
> +				 ring->use_doorbell);
> +	doorbell_offset = REG_SET_FIELD(doorbell_offset,
> +					SDMA0_PAGE_DOORBELL_OFFSET,
> +					OFFSET, ring->doorbell_index);
> +	WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL, doorbell);
> +	WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET, doorbell_offset);
> +	/* TODO: enable doorbell support */
> +	/*adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
> +					      ring->doorbell_index);*/
> +
> +	sdma_v4_0_ring_set_wptr(ring);
> +
> +	/* set minor_ptr_update to 0 after wptr programed */
> +	WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 0);
> +
> +	/* setup the wptr shadow polling */
> +	wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
> +	WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO,
> +		    lower_32_bits(wptr_gpu_addr));
> +	WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI,
> +		    upper_32_bits(wptr_gpu_addr));
> +	wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL);
> +	wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
> +				       SDMA0_PAGE_RB_WPTR_POLL_CNTL,
> +				       F32_POLL_ENABLE, amdgpu_sriov_vf(adev));
> +	WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
> +
> +	/* enable DMA RB */
> +	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL, RB_ENABLE, 1);
> +	WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
> +
> +	ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL);
> +	ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_ENABLE, 1);
> +#ifdef __BIG_ENDIAN
> +	ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_SWAP_ENABLE, 1);
> +#endif
> +	/* enable DMA IBs */
> +	WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
> +
> +	ring->ready = true;
> +}
> +
>  static void
>  sdma_v4_1_update_power_gating(struct amdgpu_device *adev, bool enable)
>  {
> @@ -932,6 +1113,7 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
>  
>  		WREG32_SDMA(i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL, 0);
>  		sdma_v4_0_gfx_resume(adev, i);
> +		sdma_v4_0_page_resume(adev, i);
>  
>  		/* set utc l1 enable flag always to 1 */
>  		temp = RREG32_SDMA(i, mmSDMA0_CNTL);
> @@ -1337,6 +1519,19 @@ static int sdma_v4_0_sw_init(void *handle)
>  				     AMDGPU_SDMA_IRQ_TRAP1);
>  		if (r)
>  			return r;
> +
> +		ring = &adev->sdma.instance[i].page;
> +		ring->ring_obj = NULL;
> +		ring->use_doorbell = false;
> +
> +		sprintf(ring->name, "page%d", i);
> +		r = amdgpu_ring_init(adev, ring, 1024,
> +				     &adev->sdma.trap_irq,
> +				     (i == 0) ?
> +				     AMDGPU_SDMA_IRQ_TRAP0 :
> +				     AMDGPU_SDMA_IRQ_TRAP1);
> +		if (r)
> +			return r;
>  	}
>  
>  	return r;
> @@ -1347,8 +1542,10 @@ static int sdma_v4_0_sw_fini(void *handle)
>  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>  	int i;
>  
> -	for (i = 0; i < adev->sdma.num_instances; i++)
> +	for (i = 0; i < adev->sdma.num_instances; i++) {
>  		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
> +		amdgpu_ring_fini(&adev->sdma.instance[i].page);
> +	}
>  
>  	for (i = 0; i < adev->sdma.num_instances; i++) {
>  		release_firmware(adev->sdma.instance[i].fw);
> @@ -1462,39 +1659,32 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
>  				      struct amdgpu_irq_src *source,
>  				      struct amdgpu_iv_entry *entry)
>  {
> +	uint32_t instance;
> +
>  	DRM_DEBUG("IH: SDMA trap\n");
>  	switch (entry->client_id) {
>  	case SOC15_IH_CLIENTID_SDMA0:
> -		switch (entry->ring_id) {
> -		case 0:
> -			amdgpu_fence_process(&adev->sdma.instance[0].ring);
> -			break;
> -		case 1:
> -			/* XXX compute */
> -			break;
> -		case 2:
> -			/* XXX compute */
> -			break;
> -		case 3:
> -			/* XXX page queue*/
> -			break;
> -		}
> +		instance = 0;
>  		break;
>  	case SOC15_IH_CLIENTID_SDMA1:
> -		switch (entry->ring_id) {
> -		case 0:
> -			amdgpu_fence_process(&adev->sdma.instance[1].ring);
> -			break;
> -		case 1:
> -			/* XXX compute */
> -			break;
> -		case 2:
> -			/* XXX compute */
> -			break;
> -		case 3:
> -			/* XXX page queue*/
> -			break;
> -		}
> +		instance = 1;
> +		break;
> +	default:
> +		return 0;
> +	}
> +
> +	switch (entry->ring_id) {
> +	case 0:
> +		amdgpu_fence_process(&adev->sdma.instance[instance].ring);
> +		break;
> +	case 1:
> +		/* XXX compute */
> +		break;
> +	case 2:
> +		/* XXX compute */
> +		break;
> +	case 3:
> +		amdgpu_fence_process(&adev->sdma.instance[instance].page);
>  		break;
>  	}
>  	return 0;
> @@ -1722,6 +1912,38 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
>  	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
>  };
>  
> +static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = {
> +	.type = AMDGPU_RING_TYPE_SDMA,
> +	.align_mask = 0xf,
> +	.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
> +	.support_64bit_ptrs = true,
> +	.vmhub = AMDGPU_MMHUB,
> +	.get_rptr = sdma_v4_0_ring_get_rptr,
> +	.get_wptr = sdma_v4_0_page_ring_get_wptr,
> +	.set_wptr = sdma_v4_0_page_ring_set_wptr,
> +	.emit_frame_size =
> +		6 + /* sdma_v4_0_ring_emit_hdp_flush */
> +		3 + /* hdp invalidate */
> +		6 + /* sdma_v4_0_ring_emit_pipeline_sync */
> +		/* sdma_v4_0_ring_emit_vm_flush */
> +		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
> +		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
> +		10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
> +	.emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
> +	.emit_ib = sdma_v4_0_ring_emit_ib,
> +	.emit_fence = sdma_v4_0_ring_emit_fence,
> +	.emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
> +	.emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
> +	.emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
> +	.test_ring = sdma_v4_0_ring_test_ring,
> +	.test_ib = sdma_v4_0_ring_test_ib,
> +	.insert_nop = sdma_v4_0_ring_insert_nop,
> +	.pad_ib = sdma_v4_0_ring_pad_ib,
> +	.emit_wreg = sdma_v4_0_ring_emit_wreg,
> +	.emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
> +	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
> +};
> +
>  static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
>  {
>  	int i;
> @@ -1729,6 +1951,8 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
>  	for (i = 0; i < adev->sdma.num_instances; i++) {
>  		adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
>  		adev->sdma.instance[i].ring.me = i;
> +		adev->sdma.instance[i].page.funcs = &sdma_v4_0_page_ring_funcs;
> +		adev->sdma.instance[i].page.me = i;
>  	}
>  }
>  
> -- 
> 2.14.1
> 
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

  parent reply	other threads:[~2018-10-09  9:40 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-10-08 13:35 [PATCH 1/8] drm/amdgpu: fix incorrect use of amdgpu_irq_add_id in si_dma.c Christian König
     [not found] ` <20181008133521.3237-1-christian.koenig-5C7GfCeVMHo@public.gmane.org>
2018-10-08 13:35   ` [PATCH 2/8] drm/amdgpu: fix sdma v4 startup under SRIOV Christian König
     [not found]     ` <20181008133521.3237-2-christian.koenig-5C7GfCeVMHo@public.gmane.org>
2018-10-09  9:17       ` Huang Rui
2018-10-09 10:56         ` Christian König
     [not found]           ` <5ae6a2fe-80d6-858e-dcd2-2d44ab0b76ce-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2018-10-09 11:45             ` Liu, Monk
     [not found]               ` <CY4PR1201MB024521C2A0EA4BAE7272EA6584E70-1iTaO6aE1DBfNQakwlCMTGrFom/aUZj6nBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
2018-10-09 13:03                 ` Koenig, Christian
     [not found]                   ` <dbab4a65-d9ec-8ac7-75bb-86033de043f5-5C7GfCeVMHo@public.gmane.org>
2018-10-10  6:53                     ` Liu, Monk
     [not found]                       ` <CY4PR1201MB0245F26FFD7EE7558A7401B984E00-1iTaO6aE1DBfNQakwlCMTGrFom/aUZj6nBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
2018-10-10  7:24                         ` Ma, Sigil
     [not found]                           ` <CY4PR12MB1351B3D0E5E5A75BFAF7F0D487E00-rpdhrqHFk04aRV2spazHLQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2018-10-10  7:52                             ` Liu, Monk
     [not found]                               ` <CY4PR1201MB024507BB222336DFA92304C784E00-1iTaO6aE1DBfNQakwlCMTGrFom/aUZj6nBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
2018-10-12 14:27                                 ` Koenig, Christian
     [not found]                                   ` <8d7c9d2e-6d4d-34d3-d8dc-102e253610f2-5C7GfCeVMHo@public.gmane.org>
2018-10-16 12:34                                     ` Christian König
     [not found]                                       ` <2512cfee-a603-75c4-bf10-9ae0b4b8c5c7-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2018-10-16 14:42                                         ` Huang Rui
2018-10-08 13:35   ` [PATCH 3/8] drm/amdgpu: add basics for SDMA page queue support Christian König
     [not found]     ` <20181008133521.3237-3-christian.koenig-5C7GfCeVMHo@public.gmane.org>
2018-10-09  9:31       ` Huang Rui
2018-10-08 13:35   ` [PATCH 4/8] drm/amdgpu: remove non gfx specific handling from sdma_v4_0_gfx_resume Christian König
     [not found]     ` <20181008133521.3237-4-christian.koenig-5C7GfCeVMHo@public.gmane.org>
2018-10-09  9:34       ` Huang Rui
2018-10-08 13:35   ` [PATCH 5/8] drm/amdgpu: remove SRIOV " Christian König
     [not found]     ` <20181008133521.3237-5-christian.koenig-5C7GfCeVMHo@public.gmane.org>
2018-10-09  9:35       ` Huang Rui
2018-10-08 13:35   ` [PATCH 6/8] drm/amdgpu: add some [WR]REG32_SDMA macros to sdma_v4_0.c Christian König
     [not found]     ` <20181008133521.3237-6-christian.koenig-5C7GfCeVMHo@public.gmane.org>
2018-10-09  9:36       ` Huang Rui
2018-10-08 13:35   ` [PATCH 7/8] drm/amdgpu: activate paging queue on SDMA v4 Christian König
     [not found]     ` <20181008133521.3237-7-christian.koenig-5C7GfCeVMHo@public.gmane.org>
2018-10-09  9:40       ` Huang Rui [this message]
2018-10-08 13:35   ` [PATCH 8/8] drm/amdgpu: use paging queue for VM page table updates Christian König
     [not found]     ` <20181008133521.3237-8-christian.koenig-5C7GfCeVMHo@public.gmane.org>
2018-10-09  9:43       ` Huang Rui
2018-10-09  8:37   ` [PATCH 1/8] drm/amdgpu: fix incorrect use of amdgpu_irq_add_id in si_dma.c Huang Rui

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20181009094013.GG8763@hr-amur2 \
    --to=ray.huang-5c7gfcevmho@public.gmane.org \
    --cc=amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org \
    --cc=ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.