* [anholt:mesa-ci-5.12rc5 2/10] drivers/gpu/drm/msm/adreno/a6xx_gpu.c:899:4: error: implicit declaration of function 'writeq'; did you mean 'writeb'?
@ 2021-04-02 0:58 kernel test robot
0 siblings, 0 replies; only message in thread
From: kernel test robot @ 2021-04-02 0:58 UTC (permalink / raw)
To: kbuild-all
[-- Attachment #1: Type: text/plain, Size: 14257 bytes --]
tree: https://github.com/anholt/linux mesa-ci-5.12rc5
head: 0e046f9f9d98c9c73a74e66b5178780c007de395
commit: 4450b885f797433a9621927cd07c1439f1f4c55e [2/10] drm/msm/a6xx: make GPUs SMMU context bank available in it's aperture.
config: arm-defconfig (attached as .config)
compiler: arm-linux-gnueabi-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://github.com/anholt/linux/commit/4450b885f797433a9621927cd07c1439f1f4c55e
git remote add anholt https://github.com/anholt/linux
git fetch --no-tags anholt mesa-ci-5.12rc5
git checkout 4450b885f797433a9621927cd07c1439f1f4c55e
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=arm
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
All errors (new ones prefixed by >>):
drivers/gpu/drm/msm/adreno/a6xx_gpu.c: In function 'a6xx_hw_init':
>> drivers/gpu/drm/msm/adreno/a6xx_gpu.c:899:4: error: implicit declaration of function 'writeq'; did you mean 'writeb'? [-Werror=implicit-function-declaration]
899 | writeq(0x48000, reg); /* offset of cb0 from gpu's base */
| ^~~~~~
| writeb
cc1: some warnings being treated as errors
vim +899 drivers/gpu/drm/msm/adreno/a6xx_gpu.c
642
643 #define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
644 A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
645 A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
646 A6XX_RBBM_INT_0_MASK_CP_IB2 | \
647 A6XX_RBBM_INT_0_MASK_CP_IB1 | \
648 A6XX_RBBM_INT_0_MASK_CP_RB | \
649 A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
650 A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
651 A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
652 A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
653 A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
654
655 static int a6xx_hw_init(struct msm_gpu *gpu)
656 {
657 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
658 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
659 int ret;
660
661 /* Make sure the GMU keeps the GPU on while we set it up */
662 a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
663
664 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
665
666 /*
667 * Disable the trusted memory range - we don't actually supported secure
668 * memory rendering at this point in time and we don't want to block off
669 * part of the virtual memory space.
670 */
671 gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
672 REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
673 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
674
675 /* Turn on 64 bit addressing for all blocks */
676 gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1);
677 gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1);
678 gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
679 gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1);
680 gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1);
681 gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
682 gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1);
683 gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1);
684 gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
685 gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1);
686 gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
687 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
688
689 /* enable hardware clockgating */
690 a6xx_set_hwcg(gpu, true);
691
692 /* VBIF/GBIF start*/
693 if (adreno_is_a640(adreno_gpu) || adreno_is_a650(adreno_gpu)) {
694 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620);
695 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620);
696 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620);
697 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
698 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
699 gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3);
700 } else {
701 gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
702 }
703
704 if (adreno_is_a630(adreno_gpu))
705 gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
706
707 /* Make all blocks contribute to the GPU BUSY perf counter */
708 gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
709
710 /* Disable L2 bypass in the UCHE */
711 gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_LO, 0xffffffc0);
712 gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0001ffff);
713 gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_LO, 0xfffff000);
714 gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_HI, 0x0001ffff);
715 gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
716 gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
717
718 if (!adreno_is_a650(adreno_gpu)) {
719 /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
720 gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO,
721 REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
722
723 gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO,
724 REG_A6XX_UCHE_GMEM_RANGE_MAX_HI,
725 0x00100000 + adreno_gpu->gmem - 1);
726 }
727
728 gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
729 gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
730
731 if (adreno_is_a640(adreno_gpu) || adreno_is_a650(adreno_gpu))
732 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
733 else
734 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
735 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
736
737 /* Setting the mem pool size */
738 gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
739
740 /* Setting the primFifo thresholds default values */
741 if (adreno_is_a650(adreno_gpu))
742 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300000);
743 else if (adreno_is_a640(adreno_gpu))
744 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200000);
745 else
746 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, (0x300 << 11));
747
748 /* Set the AHB default slave response to "ERROR" */
749 gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);
750
751 /* Turn on performance counters */
752 gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1);
753
754 /* Select CP0 to always count cycles */
755 gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
756
757 a6xx_set_ubwc_config(gpu);
758
759 /* Enable fault detection */
760 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
761 (1 << 30) | 0x1fffff);
762
763 gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
764
765 /* Set weights for bicubic filtering */
766 if (adreno_is_a650(adreno_gpu)) {
767 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0);
768 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1,
769 0x3fe05ff4);
770 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2,
771 0x3fa0ebee);
772 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3,
773 0x3f5193ed);
774 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4,
775 0x3f0243f0);
776 }
777
778 /* Protect registers from the CP */
779 gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003);
780
781 gpu_write(gpu, REG_A6XX_CP_PROTECT(0),
782 A6XX_PROTECT_RDONLY(0x600, 0x51));
783 gpu_write(gpu, REG_A6XX_CP_PROTECT(1), A6XX_PROTECT_RW(0xae50, 0x2));
784 gpu_write(gpu, REG_A6XX_CP_PROTECT(2), A6XX_PROTECT_RW(0x9624, 0x13));
785 gpu_write(gpu, REG_A6XX_CP_PROTECT(3), A6XX_PROTECT_RW(0x8630, 0x8));
786 gpu_write(gpu, REG_A6XX_CP_PROTECT(4), A6XX_PROTECT_RW(0x9e70, 0x1));
787 gpu_write(gpu, REG_A6XX_CP_PROTECT(5), A6XX_PROTECT_RW(0x9e78, 0x187));
788 gpu_write(gpu, REG_A6XX_CP_PROTECT(6), A6XX_PROTECT_RW(0xf000, 0x810));
789 gpu_write(gpu, REG_A6XX_CP_PROTECT(7),
790 A6XX_PROTECT_RDONLY(0xfc00, 0x3));
791 gpu_write(gpu, REG_A6XX_CP_PROTECT(8), A6XX_PROTECT_RW(0x50e, 0x0));
792 gpu_write(gpu, REG_A6XX_CP_PROTECT(9), A6XX_PROTECT_RDONLY(0x50f, 0x0));
793 gpu_write(gpu, REG_A6XX_CP_PROTECT(10), A6XX_PROTECT_RW(0x510, 0x0));
794 gpu_write(gpu, REG_A6XX_CP_PROTECT(11),
795 A6XX_PROTECT_RDONLY(0x0, 0x4f9));
796 gpu_write(gpu, REG_A6XX_CP_PROTECT(12),
797 A6XX_PROTECT_RDONLY(0x501, 0xa));
798 gpu_write(gpu, REG_A6XX_CP_PROTECT(13),
799 A6XX_PROTECT_RDONLY(0x511, 0x44));
800 gpu_write(gpu, REG_A6XX_CP_PROTECT(14), A6XX_PROTECT_RW(0xe00, 0xe));
801 gpu_write(gpu, REG_A6XX_CP_PROTECT(15), A6XX_PROTECT_RW(0x8e00, 0x0));
802 gpu_write(gpu, REG_A6XX_CP_PROTECT(16), A6XX_PROTECT_RW(0x8e50, 0xf));
803 gpu_write(gpu, REG_A6XX_CP_PROTECT(17), A6XX_PROTECT_RW(0xbe02, 0x0));
804 gpu_write(gpu, REG_A6XX_CP_PROTECT(18),
805 A6XX_PROTECT_RW(0xbe20, 0x11f3));
806 gpu_write(gpu, REG_A6XX_CP_PROTECT(19), A6XX_PROTECT_RW(0x800, 0x82));
807 gpu_write(gpu, REG_A6XX_CP_PROTECT(20), A6XX_PROTECT_RW(0x8a0, 0x8));
808 gpu_write(gpu, REG_A6XX_CP_PROTECT(21), A6XX_PROTECT_RW(0x8ab, 0x19));
809 gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d));
810 gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76));
811 gpu_write(gpu, REG_A6XX_CP_PROTECT(24),
812 A6XX_PROTECT_RDONLY(0x980, 0x4));
813 gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
814
815 /* Enable expanded apriv for targets that support it */
816 if (gpu->hw_apriv) {
817 gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
818 (1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1));
819 }
820
821 /* Enable interrupts */
822 gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
823
824 ret = adreno_hw_init(gpu);
825 if (ret)
826 goto out;
827
828 ret = a6xx_ucode_init(gpu);
829 if (ret)
830 goto out;
831
832 /* Set the ringbuffer address */
833 gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI,
834 gpu->rb[0]->iova);
835
836 /* Targets that support extended APRIV can use the RPTR shadow from
837 * hardware but all the other ones need to disable the feature. Targets
838 * that support the WHERE_AM_I opcode can use that instead
839 */
840 if (adreno_gpu->base.hw_apriv)
841 gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT);
842 else
843 gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
844 MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
845
846 /*
847 * Expanded APRIV and targets that support WHERE_AM_I both need a
848 * privileged buffer to store the RPTR shadow
849 */
850
851 if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) {
852 if (!a6xx_gpu->shadow_bo) {
853 a6xx_gpu->shadow = msm_gem_kernel_new_locked(gpu->dev,
854 sizeof(u32) * gpu->nr_rings,
855 MSM_BO_UNCACHED | MSM_BO_MAP_PRIV,
856 gpu->aspace, &a6xx_gpu->shadow_bo,
857 &a6xx_gpu->shadow_iova);
858
859 if (IS_ERR(a6xx_gpu->shadow))
860 return PTR_ERR(a6xx_gpu->shadow);
861 }
862
863 gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR_LO,
864 REG_A6XX_CP_RB_RPTR_ADDR_HI,
865 shadowptr(a6xx_gpu, gpu->rb[0]));
866 }
867
868 /* Always come up on rb 0 */
869 a6xx_gpu->cur_ring = gpu->rb[0];
870
871 a6xx_gpu->cur_ctx = NULL;
872
873 /* Enable the SQE_to start the CP engine */
874 gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
875
876 ret = a6xx_cp_init(gpu);
877 if (ret)
878 goto out;
879
880 /*
881 * Try to load a zap shader into the secure world. If successful
882 * we can use the CP to switch out of secure mode. If not then we
883 * have no resource but to try to switch ourselves out manually. If we
884 * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
885 * be blocked and a permissions violation will soon follow.
886 */
887 ret = a6xx_zap_shader_init(gpu);
888 if (!ret) {
889 OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
890 OUT_RING(gpu->rb[0], 0x00000000);
891
892 a6xx_flush(gpu, gpu->rb[0]);
893 if (!a6xx_idle(gpu, gpu->rb[0]))
894 return -EINVAL;
895 } else if (ret == -ENODEV) {
896 static bool first = true;
897 if (first) {
898 void __iomem *reg = ioremap(0x05060000, 0x1000);
> 899 writeq(0x48000, reg); /* offset of cb0 from gpu's base */
900 iounmap(reg);
901 }
902 /*
903 * This device does not use zap shader (but print a warning
904 * just in case someone got their dt wrong.. hopefully they
905 * have a debug UART to realize the error of their ways...
906 * if you mess this up you are about to crash horribly)
907 */
908 dev_warn_once(gpu->dev->dev,
909 "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
910 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
911 ret = 0;
912 } else {
913 return ret;
914 }
915
916 out:
917 /*
918 * Tell the GMU that we are done touching the GPU and it can start power
919 * management
920 */
921 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
922
923 if (a6xx_gpu->gmu.legacy) {
924 /* Take the GMU out of its special boot mode */
925 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER);
926 }
927
928 return ret;
929 }
930
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org
[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 54352 bytes --]
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2021-04-02 0:58 UTC | newest]
Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-04-02 0:58 [anholt:mesa-ci-5.12rc5 2/10] drivers/gpu/drm/msm/adreno/a6xx_gpu.c:899:4: error: implicit declaration of function 'writeq'; did you mean 'writeb'? kernel test robot
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.