* [drm-msm:v5.11-for-mesa-ci 2/19] drivers/gpu/drm/msm/adreno/a6xx_gpu.c:847:4: error: implicit declaration of function 'writeq'; did you mean 'writeb'?
@ 2021-04-27 21:48 kernel test robot
0 siblings, 0 replies; only message in thread
From: kernel test robot @ 2021-04-27 21:48 UTC (permalink / raw)
To: kbuild-all
[-- Attachment #1: Type: text/plain, Size: 14198 bytes --]
tree: https://gitlab.freedesktop.org/drm/msm.git v5.11-for-mesa-ci
head: 40abe2528d164d0555ec5812bcd125fb398b23cd
commit: 4e413ff4c0c20b86e02666d86aa7e450fc752d45 [2/19] drm/msm/a6xx: make GPUs SMMU context bank available in it's aperture.
config: arm-defconfig (attached as .config)
compiler: arm-linux-gnueabi-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
git remote add drm-msm https://gitlab.freedesktop.org/drm/msm.git
git fetch --no-tags drm-msm v5.11-for-mesa-ci
git checkout 4e413ff4c0c20b86e02666d86aa7e450fc752d45
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross W=1 ARCH=arm
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
All errors (new ones prefixed by >>):
drivers/gpu/drm/msm/adreno/a6xx_gpu.c: In function 'a6xx_hw_init':
>> drivers/gpu/drm/msm/adreno/a6xx_gpu.c:847:4: error: implicit declaration of function 'writeq'; did you mean 'writeb'? [-Werror=implicit-function-declaration]
847 | writeq(0x48000, reg); /* offset of cb0 from gpu's base */
| ^~~~~~
| writeb
cc1: some warnings being treated as errors
vim +847 drivers/gpu/drm/msm/adreno/a6xx_gpu.c
590
591 #define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
592 A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
593 A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
594 A6XX_RBBM_INT_0_MASK_CP_IB2 | \
595 A6XX_RBBM_INT_0_MASK_CP_IB1 | \
596 A6XX_RBBM_INT_0_MASK_CP_RB | \
597 A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
598 A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
599 A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
600 A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
601 A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
602
603 static int a6xx_hw_init(struct msm_gpu *gpu)
604 {
605 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
606 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
607 int ret;
608
609 /* Make sure the GMU keeps the GPU on while we set it up */
610 a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
611
612 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
613
614 /*
615 * Disable the trusted memory range - we don't actually supported secure
616 * memory rendering at this point in time and we don't want to block off
617 * part of the virtual memory space.
618 */
619 gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
620 REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
621 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
622
623 /* Turn on 64 bit addressing for all blocks */
624 gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1);
625 gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1);
626 gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
627 gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1);
628 gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1);
629 gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
630 gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1);
631 gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1);
632 gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
633 gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1);
634 gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
635 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
636
637 /* enable hardware clockgating */
638 a6xx_set_hwcg(gpu, true);
639
640 /* VBIF/GBIF start*/
641 if (adreno_is_a640(adreno_gpu) || adreno_is_a650(adreno_gpu)) {
642 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620);
643 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620);
644 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620);
645 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
646 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
647 gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3);
648 } else {
649 gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
650 }
651
652 if (adreno_is_a630(adreno_gpu))
653 gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
654
655 /* Make all blocks contribute to the GPU BUSY perf counter */
656 gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
657
658 /* Disable L2 bypass in the UCHE */
659 gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_LO, 0xffffffc0);
660 gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0001ffff);
661 gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_LO, 0xfffff000);
662 gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_HI, 0x0001ffff);
663 gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
664 gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
665
666 if (!adreno_is_a650(adreno_gpu)) {
667 /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
668 gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO,
669 REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
670
671 gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO,
672 REG_A6XX_UCHE_GMEM_RANGE_MAX_HI,
673 0x00100000 + adreno_gpu->gmem - 1);
674 }
675
676 gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
677 gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
678
679 if (adreno_is_a640(adreno_gpu) || adreno_is_a650(adreno_gpu))
680 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
681 else
682 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
683 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
684
685 /* Setting the mem pool size */
686 gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
687
688 /* Setting the primFifo thresholds default values */
689 if (adreno_is_a650(adreno_gpu))
690 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300000);
691 else if (adreno_is_a640(adreno_gpu))
692 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200000);
693 else
694 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, (0x300 << 11));
695
696 /* Set the AHB default slave response to "ERROR" */
697 gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);
698
699 /* Turn on performance counters */
700 gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1);
701
702 /* Select CP0 to always count cycles */
703 gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
704
705 a6xx_set_ubwc_config(gpu);
706
707 /* Enable fault detection */
708 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
709 (1 << 30) | 0x1fffff);
710
711 gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
712
713 /* Set weights for bicubic filtering */
714 if (adreno_is_a650(adreno_gpu)) {
715 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0);
716 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1,
717 0x3fe05ff4);
718 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2,
719 0x3fa0ebee);
720 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3,
721 0x3f5193ed);
722 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4,
723 0x3f0243f0);
724 }
725
726 /* Protect registers from the CP */
727 gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003);
728
729 gpu_write(gpu, REG_A6XX_CP_PROTECT(0),
730 A6XX_PROTECT_RDONLY(0x600, 0x51));
731 gpu_write(gpu, REG_A6XX_CP_PROTECT(1), A6XX_PROTECT_RW(0xae50, 0x2));
732 gpu_write(gpu, REG_A6XX_CP_PROTECT(2), A6XX_PROTECT_RW(0x9624, 0x13));
733 gpu_write(gpu, REG_A6XX_CP_PROTECT(3), A6XX_PROTECT_RW(0x8630, 0x8));
734 gpu_write(gpu, REG_A6XX_CP_PROTECT(4), A6XX_PROTECT_RW(0x9e70, 0x1));
735 gpu_write(gpu, REG_A6XX_CP_PROTECT(5), A6XX_PROTECT_RW(0x9e78, 0x187));
736 gpu_write(gpu, REG_A6XX_CP_PROTECT(6), A6XX_PROTECT_RW(0xf000, 0x810));
737 gpu_write(gpu, REG_A6XX_CP_PROTECT(7),
738 A6XX_PROTECT_RDONLY(0xfc00, 0x3));
739 gpu_write(gpu, REG_A6XX_CP_PROTECT(8), A6XX_PROTECT_RW(0x50e, 0x0));
740 gpu_write(gpu, REG_A6XX_CP_PROTECT(9), A6XX_PROTECT_RDONLY(0x50f, 0x0));
741 gpu_write(gpu, REG_A6XX_CP_PROTECT(10), A6XX_PROTECT_RW(0x510, 0x0));
742 gpu_write(gpu, REG_A6XX_CP_PROTECT(11),
743 A6XX_PROTECT_RDONLY(0x0, 0x4f9));
744 gpu_write(gpu, REG_A6XX_CP_PROTECT(12),
745 A6XX_PROTECT_RDONLY(0x501, 0xa));
746 gpu_write(gpu, REG_A6XX_CP_PROTECT(13),
747 A6XX_PROTECT_RDONLY(0x511, 0x44));
748 gpu_write(gpu, REG_A6XX_CP_PROTECT(14), A6XX_PROTECT_RW(0xe00, 0xe));
749 gpu_write(gpu, REG_A6XX_CP_PROTECT(15), A6XX_PROTECT_RW(0x8e00, 0x0));
750 gpu_write(gpu, REG_A6XX_CP_PROTECT(16), A6XX_PROTECT_RW(0x8e50, 0xf));
751 gpu_write(gpu, REG_A6XX_CP_PROTECT(17), A6XX_PROTECT_RW(0xbe02, 0x0));
752 gpu_write(gpu, REG_A6XX_CP_PROTECT(18),
753 A6XX_PROTECT_RW(0xbe20, 0x11f3));
754 gpu_write(gpu, REG_A6XX_CP_PROTECT(19), A6XX_PROTECT_RW(0x800, 0x82));
755 gpu_write(gpu, REG_A6XX_CP_PROTECT(20), A6XX_PROTECT_RW(0x8a0, 0x8));
756 gpu_write(gpu, REG_A6XX_CP_PROTECT(21), A6XX_PROTECT_RW(0x8ab, 0x19));
757 gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d));
758 gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76));
759 gpu_write(gpu, REG_A6XX_CP_PROTECT(24),
760 A6XX_PROTECT_RDONLY(0x980, 0x4));
761 gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
762
763 /* Enable expanded apriv for targets that support it */
764 if (gpu->hw_apriv) {
765 gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
766 (1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1));
767 }
768
769 /* Enable interrupts */
770 gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
771
772 ret = adreno_hw_init(gpu);
773 if (ret)
774 goto out;
775
776 ret = a6xx_ucode_init(gpu);
777 if (ret)
778 goto out;
779
780 /* Set the ringbuffer address */
781 gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI,
782 gpu->rb[0]->iova);
783
784 /* Targets that support extended APRIV can use the RPTR shadow from
785 * hardware but all the other ones need to disable the feature. Targets
786 * that support the WHERE_AM_I opcode can use that instead
787 */
788 if (adreno_gpu->base.hw_apriv)
789 gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT);
790 else
791 gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
792 MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
793
794 /*
795 * Expanded APRIV and targets that support WHERE_AM_I both need a
796 * privileged buffer to store the RPTR shadow
797 */
798
799 if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) {
800 if (!a6xx_gpu->shadow_bo) {
801 a6xx_gpu->shadow = msm_gem_kernel_new_locked(gpu->dev,
802 sizeof(u32) * gpu->nr_rings,
803 MSM_BO_UNCACHED | MSM_BO_MAP_PRIV,
804 gpu->aspace, &a6xx_gpu->shadow_bo,
805 &a6xx_gpu->shadow_iova);
806
807 if (IS_ERR(a6xx_gpu->shadow))
808 return PTR_ERR(a6xx_gpu->shadow);
809 }
810
811 gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR_LO,
812 REG_A6XX_CP_RB_RPTR_ADDR_HI,
813 shadowptr(a6xx_gpu, gpu->rb[0]));
814 }
815
816 /* Always come up on rb 0 */
817 a6xx_gpu->cur_ring = gpu->rb[0];
818
819 a6xx_gpu->cur_ctx = NULL;
820
821 /* Enable the SQE_to start the CP engine */
822 gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
823
824 ret = a6xx_cp_init(gpu);
825 if (ret)
826 goto out;
827
828 /*
829 * Try to load a zap shader into the secure world. If successful
830 * we can use the CP to switch out of secure mode. If not then we
831 * have no resource but to try to switch ourselves out manually. If we
832 * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
833 * be blocked and a permissions violation will soon follow.
834 */
835 ret = a6xx_zap_shader_init(gpu);
836 if (!ret) {
837 OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
838 OUT_RING(gpu->rb[0], 0x00000000);
839
840 a6xx_flush(gpu, gpu->rb[0]);
841 if (!a6xx_idle(gpu, gpu->rb[0]))
842 return -EINVAL;
843 } else if (ret == -ENODEV) {
844 static bool first = true;
845 if (first) {
846 void __iomem *reg = ioremap(0x05060000, 0x1000);
> 847 writeq(0x48000, reg); /* offset of cb0 from gpu's base */
848 iounmap(reg);
849 }
850 /*
851 * This device does not use zap shader (but print a warning
852 * just in case someone got their dt wrong.. hopefully they
853 * have a debug UART to realize the error of their ways...
854 * if you mess this up you are about to crash horribly)
855 */
856 dev_warn_once(gpu->dev->dev,
857 "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
858 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
859 ret = 0;
860 } else {
861 return ret;
862 }
863
864 out:
865 /*
866 * Tell the GMU that we are done touching the GPU and it can start power
867 * management
868 */
869 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
870
871 if (a6xx_gpu->gmu.legacy) {
872 /* Take the GMU out of its special boot mode */
873 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER);
874 }
875
876 return ret;
877 }
878
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org
[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 54097 bytes --]
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2021-04-27 21:48 UTC | newest]
Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-04-27 21:48 [drm-msm:v5.11-for-mesa-ci 2/19] drivers/gpu/drm/msm/adreno/a6xx_gpu.c:847:4: error: implicit declaration of function 'writeq'; did you mean 'writeb'? kernel test robot
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.