All of lore.kernel.org
 help / color / mirror / Atom feed
* Microblaze MMU patches
@ 2009-04-27  8:31 monstr
  2009-04-27  8:31 ` [PATCH 01/30] microblaze_mmu_v1: Makefiles monstr
  0 siblings, 1 reply; 81+ messages in thread
From: monstr @ 2009-04-27  8:31 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams

Hi All,

here is the set of patches for supporting MMU for Microblaze.

This is the first submission to LKML.

All changes are available in next branch at 
git://git.monstr.eu/linux-2.6-microblaze.git .
gitweb - http://git.monstr.eu/git/gitweb.cgi?p=linux-2.6-microblaze.git;a=shortlog;h=refs/heads/next

I checked all patches with checkpatch.pl and there are some false-positive
issues. 

I'll update mmu_defconfig later.

Thanks for your time and review,
Michal



^ permalink raw reply	[flat|nested] 81+ messages in thread

* [PATCH 01/30] microblaze_mmu_v1: Makefiles
  2009-04-27  8:31 Microblaze MMU patches monstr
@ 2009-04-27  8:31 ` monstr
  2009-04-27  8:31   ` [PATCH 02/30] microblaze_mmu_v1: Kconfig update monstr
  2009-04-27 10:50   ` [PATCH 01/30] microblaze_mmu_v1: Makefiles Arnd Bergmann
  0 siblings, 2 replies; 81+ messages in thread
From: monstr @ 2009-04-27  8:31 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/Makefile        |    4 ++++
 arch/microblaze/boot/Makefile   |    2 ++
 arch/microblaze/kernel/Makefile |    1 +
 arch/microblaze/lib/Makefile    |    3 ++-
 arch/microblaze/mm/Makefile     |    2 ++
 5 files changed, 11 insertions(+), 1 deletions(-)

diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index aaadfa7..7e8db41 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -1,4 +1,8 @@
+ifeq ($(CONFIG_MMU),y)
+UTS_SYSNAME = -DUTS_SYSNAME=\"Linux\"
+else
 UTS_SYSNAME = -DUTS_SYSNAME=\"uClinux\"
+endif
 
 # What CPU vesion are we building for, and crack it open
 # as major.minor.rev
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile
index 844edf4..c2bb043 100644
--- a/arch/microblaze/boot/Makefile
+++ b/arch/microblaze/boot/Makefile
@@ -7,6 +7,8 @@ targets := linux.bin linux.bin.gz
 OBJCOPYFLAGS_linux.bin  := -O binary
 
 $(obj)/linux.bin: vmlinux FORCE
+	[ -n $(CONFIG_INITRAMFS_SOURCE) ] && [ ! -e $(CONFIG_INITRAMFS_SOURCE) ] && \
+	touch $(CONFIG_INITRAMFS_SOURCE) || echo "No CPIO image"
 	$(call if_changed,objcopy)
 	@echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
 
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
index da94bec..f4a5e19 100644
--- a/arch/microblaze/kernel/Makefile
+++ b/arch/microblaze/kernel/Makefile
@@ -15,5 +15,6 @@ obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
 obj-$(CONFIG_SELFMOD)		+= selfmod.o
 obj-$(CONFIG_HEART_BEAT)	+= heartbeat.o
 obj-$(CONFIG_MODULES)		+= microblaze_ksyms.o module.o
+obj-$(CONFIG_MMU)		+= misc.o
 
 obj-y	+= entry$(MMUEXT).o
diff --git a/arch/microblaze/lib/Makefile b/arch/microblaze/lib/Makefile
index d27126b..71c8cb6 100644
--- a/arch/microblaze/lib/Makefile
+++ b/arch/microblaze/lib/Makefile
@@ -10,4 +10,5 @@ else
 lib-y += memcpy.o memmove.o
 endif
 
-lib-y +=  uaccess.o
+lib-$(CONFIG_NO_MMU) += uaccess.o
+lib-$(CONFIG_MMU) += uaccess_old.o
diff --git a/arch/microblaze/mm/Makefile b/arch/microblaze/mm/Makefile
index bf9e447..6c8a924 100644
--- a/arch/microblaze/mm/Makefile
+++ b/arch/microblaze/mm/Makefile
@@ -3,3 +3,5 @@
 #
 
 obj-y := init.o
+
+obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 02/30] microblaze_mmu_v1: Kconfig update
  2009-04-27  8:31 ` [PATCH 01/30] microblaze_mmu_v1: Makefiles monstr
@ 2009-04-27  8:31   ` monstr
  2009-04-27  8:31     ` [PATCH 03/30] microblaze_mmu_v1: Add mmu_defconfig monstr
  2009-04-27 10:50   ` [PATCH 01/30] microblaze_mmu_v1: Makefiles Arnd Bergmann
  1 sibling, 1 reply; 81+ messages in thread
From: monstr @ 2009-04-27  8:31 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/Kconfig |  110 ++++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 109 insertions(+), 1 deletions(-)

diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 8cc312b..52fcad0 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -72,7 +72,8 @@ source "kernel/Kconfig.preempt"
 source "kernel/Kconfig.hz"
 
 config MMU
-	def_bool n
+	bool "MMU support"
+	default n
 
 config NO_MMU
 	bool
@@ -118,6 +119,113 @@ config PROC_DEVICETREE
 
 endmenu
 
+menu "Advanced setup"
+
+config ADVANCED_OPTIONS
+	bool "Prompt for advanced kernel configuration options"
+	depends on MMU
+	help
+	  This option will enable prompting for a variety of advanced kernel
+	  configuration options.  These options can cause the kernel to not
+	  work if they are set incorrectly, but can be used to optimize certain
+	  aspects of kernel memory management.
+
+	  Unless you know what you are doing, say N here.
+
+comment "Default settings for advanced configuration options are used"
+	depends on !ADVANCED_OPTIONS
+
+config HIGHMEM_START_BOOL
+	bool "Set high memory pool address"
+	depends on ADVANCED_OPTIONS && HIGHMEM
+	help
+	  This option allows you to set the base address of the kernel virtual
+	  area used to map high memory pages.  This can be useful in
+	  optimizing the layout of kernel virtual memory.
+
+	  Say N here unless you know what you are doing.
+
+config HIGHMEM_START
+	hex "Virtual start address of high memory pool" if HIGHMEM_START_BOOL
+	depends on MMU
+	default "0xfe000000"
+
+config LOWMEM_SIZE_BOOL
+	bool "Set maximum low memory"
+	depends on ADVANCED_OPTIONS
+	help
+	  This option allows you to set the maximum amount of memory which
+	  will be used as "low memory", that is, memory which the kernel can
+	  access directly, without having to set up a kernel virtual mapping.
+	  This can be useful in optimizing the layout of kernel virtual
+	  memory.
+
+	  Say N here unless you know what you are doing.
+
+config LOWMEM_SIZE
+	hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL
+	depends on MMU
+	default "0x30000000"
+
+config KERNEL_START_BOOL
+	bool "Set custom kernel base address"
+	depends on ADVANCED_OPTIONS
+	help
+	  This option allows you to set the kernel virtual address at which
+	  the kernel will map low memory (the kernel image will be linked at
+	  this address).  This can be useful in optimizing the virtual memory
+	  layout of the system.
+
+	  Say N here unless you know what you are doing.
+
+config KERNEL_START
+	hex "Virtual address of kernel base" if KERNEL_START_BOOL
+	depends on MMU
+	default "0xc0000000"
+
+config TASK_SIZE_BOOL
+	bool "Set custom user task size"
+	depends on ADVANCED_OPTIONS
+	help
+	  This option allows you to set the amount of virtual address space
+	  allocated to user tasks.  This can be useful in optimizing the
+	  virtual memory layout of the system.
+
+	  Say N here unless you know what you are doing.
+
+config TASK_SIZE
+	hex "Size of user task space" if TASK_SIZE_BOOL
+	depends on MMU
+	default "0x80000000"
+
+config CONSISTENT_START_BOOL
+	bool "Set custom consistent memory pool address"
+	depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
+	help
+	  This option allows you to set the base virtual address
+	  of the the consistent memory pool.  This pool of virtual
+	  memory is used to make consistent memory allocations.
+
+config CONSISTENT_START
+	hex "Base virtual address of consistent memory pool" if CONSISTENT_START_BOOL
+	depends on MMU
+	default "0xff100000" if NOT_COHERENT_CACHE
+
+config CONSISTENT_SIZE_BOOL
+	bool "Set custom consistent memory pool size"
+	depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
+	help
+	  This option allows you to set the size of the the
+	  consistent memory pool.  This pool of virtual memory
+	  is used to make consistent memory allocations.
+
+config CONSISTENT_SIZE
+	hex "Size of consistent memory pool" if CONSISTENT_SIZE_BOOL
+	depends on MMU
+	default "0x00200000" if NOT_COHERENT_CACHE
+
+endmenu
+
 source "mm/Kconfig"
 
 menu "Exectuable file formats"
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 03/30] microblaze_mmu_v1: Add mmu_defconfig
  2009-04-27  8:31   ` [PATCH 02/30] microblaze_mmu_v1: Kconfig update monstr
@ 2009-04-27  8:31     ` monstr
  2009-04-27  8:31       ` [PATCH 04/30] microblaze_mmu_v1: MMU update for startup code monstr
  0 siblings, 1 reply; 81+ messages in thread
From: monstr @ 2009-04-27  8:31 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/configs/mmu_defconfig |  795 +++++++++++++++++++++++++++++++++
 1 files changed, 795 insertions(+), 0 deletions(-)
 create mode 100644 arch/microblaze/configs/mmu_defconfig

diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
new file mode 100644
index 0000000..7351fa4
--- /dev/null
+++ b/arch/microblaze/configs/mmu_defconfig
@@ -0,0 +1,795 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.30-rc2
+# Thu Apr 23 11:17:38 2009
+#
+CONFIG_MICROBLAZE=y
+# CONFIG_SWAP is not set
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_TIME=y
+# CONFIG_GENERIC_TIME_VSYSCALL is not set
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=17
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="rootfs.cpio"
+CONFIG_INITRAMFS_ROOT_UID=0
+CONFIG_INITRAMFS_ROOT_GID=0
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+CONFIG_INITRAMFS_COMPRESSION_NONE=y
+# CONFIG_INITRAMFS_COMPRESSION_GZIP is not set
+# CONFIG_INITRAMFS_COMPRESSION_BZIP2 is not set
+# CONFIG_INITRAMFS_COMPRESSION_LZMA is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KALLSYMS_EXTRA_PASS=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_HOTPLUG is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+# CONFIG_BASE_FULL is not set
+# CONFIG_FUTEX is not set
+# CONFIG_EPOLL is not set
+# CONFIG_SIGNALFD is not set
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+# CONFIG_SHMEM is not set
+CONFIG_AIO=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_COMPAT_BRK=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+# CONFIG_SLOW_WORK is not set
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_SLABINFO=y
+CONFIG_BASE_SMALL=1
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_FREEZER is not set
+
+#
+# Platform options
+#
+CONFIG_PLATFORM_GENERIC=y
+CONFIG_OPT_LIB_FUNCTION=y
+CONFIG_OPT_LIB_ASM=y
+CONFIG_ALLOW_EDIT_AUTO=y
+
+#
+# Automatic platform settings from Kconfig.auto
+#
+
+#
+# Definitions for MICROBLAZE0
+#
+CONFIG_KERNEL_BASE_ADDR=0x90000000
+CONFIG_XILINX_MICROBLAZE0_FAMILY="virtex5"
+CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
+CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1
+CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1
+CONFIG_XILINX_MICROBLAZE0_USE_DIV=1
+CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL=2
+CONFIG_XILINX_MICROBLAZE0_USE_FPU=2
+CONFIG_XILINX_MICROBLAZE0_HW_VER="7.10.d"
+
+#
+# Processor type and features
+#
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_HZ_100=y
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=100
+# CONFIG_SCHED_HRTICK is not set
+CONFIG_MMU=y
+
+#
+# Boot options
+#
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttyUL0,115200"
+CONFIG_CMDLINE_FORCE=y
+CONFIG_OF=y
+CONFIG_OF_DEVICE=y
+CONFIG_PROC_DEVICETREE=y
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_HIGHMEM_START=0xfe000000
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_TASK_SIZE=0x80000000
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_UNEVICTABLE_LRU=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+
+#
+# Exectuable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=8192
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+CONFIG_COMPAT_NET_DEV_OPS=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+# CONFIG_MII is not set
+# CONFIG_ETHOC is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+CONFIG_NETDEV_1000=y
+CONFIG_NETDEV_10000=y
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_UARTLITE=y
+CONFIG_SERIAL_UARTLITE_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_RTC is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_REGULATOR is not set
+
+#
+# Multimedia devices
+#
+
+#
+# Multimedia core support
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_DVB_CORE is not set
+# CONFIG_VIDEO_MEDIA is not set
+
+#
+# Multimedia drivers
+#
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+# CONFIG_SOUND is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+CONFIG_FILE_LOCKING=y
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY is not set
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+# CONFIG_PROC_KCORE is not set
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+CONFIG_CIFS=y
+CONFIG_CIFS_STATS=y
+CONFIG_CIFS_STATS2=y
+# CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_XATTR is not set
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_EXPERIMENTAL is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+CONFIG_DEBUG_SLAB=y
+# CONFIG_DEBUG_SLAB_LEAK is not set
+CONFIG_DEBUG_SPINLOCK=y
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_PAGE_POISONING is not set
+# CONFIG_SAMPLES is not set
+CONFIG_EARLY_PRINTK=y
+CONFIG_HEART_BEAT=y
+CONFIG_DEBUG_BOOTMEM=y
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+# CONFIG_CRYPTO_MANAGER is not set
+# CONFIG_CRYPTO_MANAGER2 is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
+CONFIG_NLATTR=y
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 04/30] microblaze_mmu_v1: MMU update for startup code
  2009-04-27  8:31     ` [PATCH 03/30] microblaze_mmu_v1: Add mmu_defconfig monstr
@ 2009-04-27  8:31       ` monstr
  2009-04-27  8:31         ` [PATCH 05/30] microblaze_mmu_v1: Alocate TLB for early console monstr
  0 siblings, 1 reply; 81+ messages in thread
From: monstr @ 2009-04-27  8:31 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/kernel/head.S |  201 +++++++++++++++++++++++++++++++++++++++++
 1 files changed, 201 insertions(+), 0 deletions(-)

diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index 319dc35..172359f 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -3,6 +3,26 @@
  * Copyright (C) 2007-2009 PetaLogix
  * Copyright (C) 2006 Atmark Techno, Inc.
  *
+ * MMU code derived from arch/ppc/kernel/head_4xx.S:
+ *    Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
+ *      Initial PowerPC version.
+ *    Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
+ *      Rewritten for PReP
+ *    Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
+ *      Low-level exception handers, MMU support, and rewrite.
+ *    Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
+ *      PowerPC 8xx modifications.
+ *    Copyright (c) 1998-1999 TiVo, Inc.
+ *      PowerPC 403GCX modifications.
+ *    Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
+ *      PowerPC 403GCX/405GP modifications.
+ *    Copyright 2000 MontaVista Software Inc.
+ *	PPC405 modifications
+ *      PowerPC 403GCX/405GP modifications.
+ * 	Author: MontaVista Software, Inc.
+ *         	frank_rowand@mvista.com or source@mvista.com
+ * 	   	debbie_chu@mvista.com
+ *
  * This file is subject to the terms and conditions of the GNU General Public
  * License. See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -12,6 +32,22 @@
 #include <asm/thread_info.h>
 #include <asm/page.h>
 
+#ifdef CONFIG_MMU
+#include <asm/setup.h> /* COMMAND_LINE_SIZE */
+#include <asm/mmu.h>
+#include <asm/processor.h>
+
+.data
+.global empty_zero_page
+.align 12
+empty_zero_page:
+	.space	4096
+.global swapper_pg_dir
+swapper_pg_dir:
+	.space	4096
+
+#endif /* CONFIG_MMU */
+
 	.text
 ENTRY(_start)
 	mfs	r1, rmsr
@@ -32,6 +68,124 @@ _copy_fdt:
 	addik	r3, r3, -4 /* descrement loop */
 no_fdt_arg:
 
+#ifdef CONFIG_MMU
+
+#ifndef CONFIG_CMDLINE_BOOL
+/*
+ * handling command line
+ * copy command line to __init_end. There is space for storing command line.
+ */
+	or	r6, r0, r0		/* incremment */
+	ori	r4, r0, __init_end	/* load address of command line */
+	tophys(r4,r4)			/* convert to phys address */
+	ori	r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
+_copy_command_line:
+	lbu	r7, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */
+	sb	r7, r4, r6		/* addr[r4+r6]= r7*/
+	addik	r6, r6, 1		/* increment counting */
+	bgtid	r3, _copy_command_line	/* loop for all entries       */
+	addik	r3, r3, -1		/* descrement loop */
+	addik	r5, r4, 0		/* add new space for command line */
+	tovirt(r5,r5)
+#endif /* CONFIG_CMDLINE_BOOL */
+
+#ifdef NOT_COMPILE
+/* save bram context */
+	or	r6, r0, r0				/* incremment */
+	ori	r4, r0, TOPHYS(_bram_load_start)	/* save bram context */
+	ori	r3, r0, (LMB_SIZE - 4)
+_copy_bram:
+	lw	r7, r0, r6		/* r7 = r0 + r6 */
+	sw	r7, r4, r6		/* addr[r4 + r6] = r7*/
+	addik	r6, r6, 4		/* increment counting */
+	bgtid	r3, _copy_bram		/* loop for all entries */
+	addik	r3, r3, -4		/* descrement loop */
+#endif
+	/* We have to turn on the MMU right away. */
+
+	/*
+	 * Set up the initial MMU state so we can do the first level of
+	 * kernel initialization.  This maps the first 16 MBytes of memory 1:1
+	 * virtual to physical.
+	 */
+	nop
+	addik	r3, r0, 63		/* Invalidate all TLB entries */
+_invalidate:
+	mts	rtlbx, r3
+	mts	rtlbhi, r0			/* flush: ensure V is clear   */
+	bgtid	r3, _invalidate		/* loop for all entries       */
+	addik	r3, r3, -1
+	/* sync */
+
+	/*
+	 * We should still be executing code at physical address area
+	 * RAM_BASEADDR at this point. However, kernel code is at
+	 * a virtual address. So, set up a TLB mapping to cover this once
+	 * translation is enabled.
+	 */
+
+	addik	r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */
+	tophys(r4,r3)			/* Load the kernel physical address */
+
+	mts	rpid,r0			/* Load the kernel PID */
+	nop
+	bri	4
+
+	/*
+	 * Configure and load two entries into TLB slots 62 and 63.
+	 * In case we are pinning TLBs, these are reserved in by the
+	 * other TLB functions.  If not reserving, then it doesn't
+	 * matter where they are loaded.
+	 */
+	andi	r4,r4,0xfffffc00	/* Mask off the real page number */
+	ori	r4,r4,(TLB_WR | TLB_EX)	/* Set the write and execute bits */
+
+	andi	r3,r3,0xfffffc00	/* Mask off the effective page number */
+	ori	r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M))
+
+	ori	r6,r0,62		/* TLB slot 62 */
+	mts     rtlbx,r6
+
+	mts	rtlblo,r4		/* Load the data portion of the entry */
+	mts	rtlbhi,r3		/* Load the tag portion of the entry */
+
+	addik	r4, r4, 0x01000000	/* Map next 16 M entries */
+	addik	r3, r3, 0x01000000
+
+        addik	r6,r6,1			/* TLB slot 63 */
+	mts     rtlbx,r6
+
+	mts	rtlblo,r4		/* Load the data portion of the entry */
+	mts	rtlbhi,r3		/* Load the tag portion of the entry */
+
+	/*
+	 * Load a TLB entry for LMB, since we need access to
+	 * the exception vectors, using a 4k real==virtual mapping.
+	 */
+	ori	r6,r0,1			/* TLB slot 1 */
+	mts     rtlbx,r6
+
+	ori	r4,r0,(TLB_WR | TLB_EX)
+	ori	r3,r0,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
+
+	mts	rtlblo,r4		/* Load the data portion of the entry */
+	mts	rtlbhi,r3		/* Load the tag portion of the entry */
+
+	/*
+	 * We now have the lower 16 Meg of RAM mapped into TLB entries, and the
+	 * caches ready to work.
+	 */
+turn_on_mmu:
+	ori	r15,r0,start_here
+	ori	r4,r0,MSR_KERNEL_VMS
+	mts	rmsr,r4
+	nop
+	rted	r15,0			/* enables MMU */
+	nop
+
+start_here:
+#endif /* CONFIG_MMU */
+
 	/* Initialize small data anchors */
 	la	r13, r0, _KERNEL_SDA_BASE_
 	la	r2, r0, _KERNEL_SDA2_BASE_
@@ -51,6 +205,53 @@ no_fdt_arg:
 	brald	r15, r8
 	nop
 
+#ifndef CONFIG_MMU
 	la	r15, r0, machine_halt
 	braid	start_kernel
 	nop
+#else
+	/*
+	 * Initialize the MMU.
+	 */
+	bralid	r15,MMU_init
+	nop
+
+	/* Go back to running unmapped so we can load up new values
+	 * and change to using our exception vectors.
+	 * On the MicroBlaze, all we invalidate the used TLB entries to clear
+	 * the old 16M byte TLB mappings.
+	 */
+	ori	r15,r0,TOPHYS(kernel_load_context)
+	ori	r4,r0,MSR_KERNEL
+	mts	rmsr,r4
+	nop
+	bri	4
+	rted	r15,0
+	nop
+
+	/* Load up the kernel context */
+kernel_load_context:
+	/* clearing kernel context */
+	ori	r5,r0,1		/* TLB slot 1 */
+	mts     rtlbx,r5
+	nop
+	mts	rtlbhi,r0
+	nop
+	ori	r5,r0,62	/* TLB slot 62 */
+	mts     rtlbx,r5
+	nop
+	mts	rtlbhi,r0
+	nop
+	ori	r5,r0,63	/* TLB slot 63 */
+	mts     rtlbx,r5
+	nop
+	mts	rtlbhi,r0
+	nop
+	addi	r15, r0, machine_halt
+	ori	r17, r0, start_kernel
+	ori	r4, r0, MSR_KERNEL_VMS
+	mts	rmsr, r4
+	nop
+	rted	r17, 0		/* enable MMU and jump to start_kernel */
+	nop
+#endif /* CONFIG_MMU */
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 05/30] microblaze_mmu_v1: Alocate TLB for early console
  2009-04-27  8:31       ` [PATCH 04/30] microblaze_mmu_v1: MMU update for startup code monstr
@ 2009-04-27  8:31         ` monstr
  2009-04-27  8:31           ` [PATCH 06/30] microblaze_mmu_v1: TLB low level code monstr
  0 siblings, 1 reply; 81+ messages in thread
From: monstr @ 2009-04-27  8:31 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/include/asm/setup.h   |   10 ++++++++--
 arch/microblaze/kernel/early_printk.c |    3 +++
 2 files changed, 11 insertions(+), 2 deletions(-)

diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h
index 9b98e8e..27f8daf 100644
--- a/arch/microblaze/include/asm/setup.h
+++ b/arch/microblaze/include/asm/setup.h
@@ -1,5 +1,6 @@
 /*
- * Copyright (C) 2007-2008 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2007-2009 PetaLogix
  * Copyright (C) 2006 Atmark Techno, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -18,7 +19,6 @@
 extern unsigned int boot_cpuid; /* move to smp.h */
 
 extern char cmd_line[COMMAND_LINE_SIZE];
-#  endif/* __KERNEL__ */
 
 void early_printk(const char *fmt, ...);
 
@@ -30,6 +30,11 @@ void setup_heartbeat(void);
 
 unsigned long long sched_clock(void);
 
+#   ifdef CONFIG_MMU
+extern void mmu_reset(void);
+extern void early_console_reg_tlb_alloc(unsigned int addr);
+#   endif /* CONFIG_MMU */
+
 void time_init(void);
 void init_IRQ(void);
 void machine_early_init(const char *cmdline, unsigned int ram,
@@ -40,5 +45,6 @@ void machine_shutdown(void);
 void machine_halt(void);
 void machine_power_off(void);
 
+#  endif/* __KERNEL__ */
 # endif /* __ASSEMBLY__ */
 #endif /* _ASM_MICROBLAZE_SETUP_H */
diff --git a/arch/microblaze/kernel/early_printk.c b/arch/microblaze/kernel/early_printk.c
index 4b0f0fd..7de8492 100644
--- a/arch/microblaze/kernel/early_printk.c
+++ b/arch/microblaze/kernel/early_printk.c
@@ -87,6 +87,9 @@ int __init setup_early_printk(char *opt)
 	base_addr = early_uartlite_console();
 	if (base_addr) {
 		early_console_initialized = 1;
+#ifdef CONFIG_MMU
+		early_console_reg_tlb_alloc(base_addr);
+#endif
 		early_printk("early_printk_console is enabled at 0x%08x\n",
 							base_addr);
 
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 06/30] microblaze_mmu_v1: TLB low level code
  2009-04-27  8:31         ` [PATCH 05/30] microblaze_mmu_v1: Alocate TLB for early console monstr
@ 2009-04-27  8:31           ` monstr
  2009-04-27  8:31             ` [PATCH 07/30] microblaze_mmu_v1: MMU initialization monstr
  0 siblings, 1 reply; 81+ messages in thread
From: monstr @ 2009-04-27  8:31 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/kernel/misc.S |  116 +++++++++++++++++++++++++++++++++++++++++
 1 files changed, 116 insertions(+), 0 deletions(-)
 create mode 100644 arch/microblaze/kernel/misc.S

diff --git a/arch/microblaze/kernel/misc.S b/arch/microblaze/kernel/misc.S
new file mode 100644
index 0000000..9f1d94e
--- /dev/null
+++ b/arch/microblaze/kernel/misc.S
@@ -0,0 +1,116 @@
+/*
+ * Miscellaneous low-level MMU functions.
+ *
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
+ * Copyright (C) 2007 Xilinx, Inc.  All rights reserved.
+ *
+ * Derived from arch/ppc/kernel/misc.S
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ */
+
+#include <linux/linkage.h>
+#include <linux/sys.h>
+#include <asm/unistd.h>
+#include <linux/errno.h>
+#include <asm/mmu.h>
+#include <asm/page.h>
+
+	.text
+/*
+ * Flush MMU TLB
+ */
+.globl _tlbia;
+.align 4;
+_tlbia:
+	addik	r12, r0, 63 /* flush all entries (63 - 0) */
+	/* isync */
+_tlbia_1:
+	mts	rtlbx, r12
+	nop
+	mts	rtlbhi, r0 /* flush: ensure V is clear */
+	nop
+	bgtid	r12, _tlbia_1 /* loop for all entries */
+	addik	r12, r12, -1
+	/* sync */
+	rtsd	r15, 8
+	nop
+
+/*
+ * Flush MMU TLB for a particular address (in r5)
+ */
+.globl _tlbie;
+.align 4;
+_tlbie:
+	mts	rtlbsx, r5 /* look up the address in TLB */
+	nop
+	mfs	r12, rtlbx /* Retrieve index */
+	nop
+	blti	r12, _tlbie_1 /* Check if found */
+	mts	rtlbhi, r0 /* flush: ensure V is clear */
+	nop
+_tlbie_1:
+	rtsd	r15, 8
+	nop
+
+/*
+ * Allocate TLB entry for early console
+ */
+.globl early_console_reg_tlb_alloc;
+.align 4;
+early_console_reg_tlb_alloc:
+	/*
+	 * Load a TLB entry for the UART, so that microblaze_progress() can use
+	 * the UARTs nice and early.  We use a 4k real==virtual mapping.
+	 */
+	or	r4,r5,r0
+	andi	r4,r4,0xfffff000
+	ori	r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G)
+
+	andi	r5,r5,0xfffff000
+	ori	r5,r5,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
+
+	mts	rtlbx,r0 /* TLB slot 0 */
+	nop
+	mts	rtlblo,r4 /* Load the data portion of the entry */
+	nop
+	mts	rtlbhi,r5 /* Load the tag portion of the entry */
+	nop
+	rtsd	r15, 8
+	nop
+
+/*
+ * Copy a whole page (4096 bytes).
+ */
+#define COPY_16_BYTES		\
+	lwi	r7, r6, 0;	\
+	lwi	r8, r6, 4;	\
+	lwi	r9, r6, 8;	\
+	lwi	r10, r6, 12;	\
+	swi	r7, r5, 0;	\
+	swi	r8, r5, 4;	\
+	swi	r9, r5, 8;	\
+	swi	r10, r5, 12
+
+
+/* FIXME DCACHE_LINE_BYTES (CONFIG_XILINX_MICROBLAZE0_DCACHE_LINE_LEN * 4)*/
+#define DCACHE_LINE_BYTES (4 * 4)
+
+.globl copy_page;
+.align 4;
+copy_page:
+	ori	r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1
+_copy_page_loop:
+	COPY_16_BYTES
+#if DCACHE_LINE_BYTES >= 32
+	COPY_16_BYTES
+#endif
+	addik	r6, r6, DCACHE_LINE_BYTES
+	addik	r5, r5, DCACHE_LINE_BYTES
+	bneid	r11, _copy_page_loop
+	addik	r11, r11, -1
+	rtsd	r15, 8
+	nop
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 07/30] microblaze_mmu_v1: MMU initialization
  2009-04-27  8:31           ` [PATCH 06/30] microblaze_mmu_v1: TLB low level code monstr
@ 2009-04-27  8:31             ` monstr
  2009-04-27  8:31               ` [PATCH 08/30] microblaze_mmu_v1: mmu.h update monstr
  2009-04-29  5:42               ` [PATCH 07/30] microblaze_mmu_v1: MMU initialization Andrew Morton
  0 siblings, 2 replies; 81+ messages in thread
From: monstr @ 2009-04-27  8:31 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/mm/init.c |  168 ++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 167 insertions(+), 1 deletions(-)

diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index b0c8213..32a54cb 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -23,9 +23,17 @@
 #include <asm/sections.h>
 #include <asm/tlb.h>
 
+#ifndef CONFIG_MMU
 unsigned int __page_offset;
 /* EXPORT_SYMBOL(__page_offset); */
 
+#else
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+int mem_init_done;
+static int init_bootmem_done;
+#endif /* CONFIG_MMU */
+
 char *klimit = _end;
 
 /*
@@ -61,6 +69,7 @@ void __init setup_memory(void)
 {
 	int i;
 	unsigned long map_size;
+#ifndef CONFIG_MMU
 	u32 kernel_align_start, kernel_align_size;
 
 	/* Find main memory where is the kernel */
@@ -93,6 +102,7 @@ void __init setup_memory(void)
 		__func__, kernel_align_start, kernel_align_start
 			+ kernel_align_size, kernel_align_size);
 
+#endif
 	/*
 	 * Kernel:
 	 * start: base phys address of kernel - page align
@@ -121,9 +131,13 @@ void __init setup_memory(void)
 	 * for 4GB of memory, using 4kB pages), plus 1 page
 	 * (in case the address isn't page-aligned).
 	 */
+#ifndef CONFIG_MMU
 	map_size = init_bootmem_node(NODE_DATA(0), PFN_UP(TOPHYS((u32)_end)),
 					min_low_pfn, max_low_pfn);
-
+#else
+	map_size = init_bootmem_node(&contig_page_data,
+		PFN_UP(TOPHYS((u32)_end)), min_low_pfn, max_low_pfn);
+#endif
 	lmb_reserve(PFN_UP(TOPHYS((u32)_end)) << PAGE_SHIFT, map_size);
 
 	/* free bootmem is whole main memory */
@@ -137,6 +151,9 @@ void __init setup_memory(void)
 		reserve_bootmem(lmb.reserved.region[i].base,
 			lmb_size_bytes(&lmb.reserved, i) - 1, BOOTMEM_DEFAULT);
 	}
+#ifdef CONFIG_MMU
+	init_bootmem_done = 1;
+#endif
 	paging_init();
 }
 
@@ -191,11 +208,160 @@ void __init mem_init(void)
 	printk(KERN_INFO "Memory: %luk/%luk available\n",
 	       (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
 	       num_physpages << (PAGE_SHIFT-10));
+#ifdef CONFIG_MMU
+	mem_init_done = 1;
+#endif
 }
 
+#ifndef CONFIG_MMU
 /* Check against bounds of physical memory */
 int ___range_ok(unsigned long addr, unsigned long size)
 {
 	return ((addr < memory_start) ||
 		((addr + size) > memory_end));
 }
+
+#else
+int page_is_ram(unsigned long pfn)
+{
+	return pfn < max_low_pfn;
+}
+
+/*
+ * Check for command-line options that affect what MMU_init will do.
+ */
+static void MMU_setup(void)
+{
+#if 0
+	/* Check for nobats option (used in mapin_ram). */
+	if (strstr(cmd_line, "nobats"))
+		__map_without_bats = 1;
+
+	/* Look for mem= option on command line */
+	if (strstr(cmd_line, "mem=")) {
+		char *p, *q;
+		unsigned long maxmem = 0;
+
+		for (q = cmd_line; (p = strstr(q, "mem=")) != 0; ) {
+			q = p + 4;
+			if (p > cmd_line && p[-1] != ' ')
+				continue;
+			maxmem = strict_strtoul(q, &q, NULL);
+			if (*q == 'k' || *q == 'K') {
+				maxmem <<= 10;
+				++q;
+			} else if (*q == 'm' || *q == 'M') {
+				maxmem <<= 20;
+				++q;
+			}
+		}
+		__max_memory = maxmem;
+	}
+
+/*	if (__max_memory && memory_size > __max_memory)
+		memory_size = __max_memory; */
+
+#endif
+}
+
+/*
+ * MMU_init_hw does the chip-specific initialization of the MMU hardware.
+ */
+static void __init MMU_init_hw(void)
+{
+	/*
+	 * The Zone Protection Register (ZPR) defines how protection will
+	 * be applied to every page which is a member of a given zone. At
+	 * present, we utilize only two of the zones.
+	 * The zone index bits (of ZSEL) in the PTE are used for software
+	 * indicators, except the LSB.  For user access, zone 1 is used,
+	 * for kernel access, zone 0 is used.  We set all but zone 1
+	 * to zero, allowing only kernel access as indicated in the PTE.
+	 * For zone 1, we set a 01 binary (a value of 10 will not work)
+	 * to allow user access as indicated in the PTE.  This also allows
+	 * kernel access as indicated in the PTE.
+	 */
+	__asm__ __volatile__ ("ori r11, r0, 0x10000000;" \
+			"mts rzpr, r11;"
+			: : : "r11");
+}
+
+/*
+ * MMU_init sets up the basic memory mappings for the kernel,
+ * including both RAM and possibly some I/O regions,
+ * and sets up the page tables and the MMU hardware ready to go.
+ */
+
+/* called from head.S */
+asmlinkage void __init MMU_init(void)
+{
+	unsigned int kstart, ksize;
+
+	if (!lmb.reserved.cnt) {
+		printk(KERN_EMERG "Error memory count\n");
+		machine_restart(NULL);
+	}
+
+	if ((u32) lmb.memory.region[0].size < 0x1000000) {
+		printk(KERN_EMERG "Memory must be greater than 16MB\n");
+		machine_restart(NULL);
+	}
+	/* Find main memory where the kernel is */
+	memory_start = (u32) lmb.memory.region[0].base;
+	memory_end = (u32) lmb.memory.region[0].base +
+				(u32) lmb.memory.region[0].size;
+	memory_size = memory_end - memory_start;
+
+	MMU_setup(); /* FIXME parse args from command line - not used */
+
+	/*
+	 * Map out the kernel text/data/bss from the available physical
+	 * memory.
+	 */
+	kstart = __pa(CONFIG_KERNEL_START); /* kernel start */
+	/* kernel size */
+	ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START));
+	lmb_reserve(kstart, ksize);
+
+#if defined(CONFIG_BLK_DEV_INITRD)
+	/* Remove the init RAM disk from the available memory. */
+/*	if (initrd_start) {
+		mem_pieces_remove(&phys_avail, __pa(initrd_start),
+				  initrd_end - initrd_start, 1);
+	}*/
+#endif /* CONFIG_BLK_DEV_INITRD */
+
+	/* Initialize the MMU hardware */
+	MMU_init_hw();
+
+	/* Map in all of RAM starting at CONFIG_KERNEL_START */
+	mapin_ram();
+
+#ifdef HIGHMEM_START_BOOL
+	ioremap_base = HIGHMEM_START;
+#else
+	ioremap_base = 0xfe000000UL;	/* for now, could be 0xfffff000 */
+#endif /* CONFIG_HIGHMEM */
+	ioremap_bot = ioremap_base;
+
+	/* Initialize the context management stuff */
+	mmu_context_init();
+}
+
+/* This is only called until mem_init is done. */
+void __init *early_get_page(void)
+{
+	void *p;
+	if (init_bootmem_done) {
+		p = alloc_bootmem_pages(PAGE_SIZE);
+	} else {
+		/*
+		 * Mem start + 32MB -> here is limit
+		 * because of mem mapping from head.S
+		 */
+		p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE,
+					memory_start + 0x2000000));
+	}
+	return p;
+}
+#endif /* CONFIG_MMU */
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 08/30] microblaze_mmu_v1: mmu.h update
  2009-04-27  8:31             ` [PATCH 07/30] microblaze_mmu_v1: MMU initialization monstr
@ 2009-04-27  8:31               ` monstr
  2009-04-27  8:31                 ` [PATCH 09/30] microblaze_mmu_v1: Page fault handling high level - fault.c monstr
  2009-04-29  5:44                 ` [PATCH 08/30] microblaze_mmu_v1: mmu.h update Andrew Morton
  2009-04-29  5:42               ` [PATCH 07/30] microblaze_mmu_v1: MMU initialization Andrew Morton
  1 sibling, 2 replies; 81+ messages in thread
From: monstr @ 2009-04-27  8:31 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/include/asm/mmu.h |  107 ++++++++++++++++++++++++++++++++++++-
 1 files changed, 105 insertions(+), 2 deletions(-)

diff --git a/arch/microblaze/include/asm/mmu.h b/arch/microblaze/include/asm/mmu.h
index 0e0431d..0ee2feb 100644
--- a/arch/microblaze/include/asm/mmu.h
+++ b/arch/microblaze/include/asm/mmu.h
@@ -1,4 +1,6 @@
 /*
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
  * Copyright (C) 2006 Atmark Techno, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -9,11 +11,112 @@
 #ifndef _ASM_MICROBLAZE_MMU_H
 #define _ASM_MICROBLAZE_MMU_H
 
-#ifndef __ASSEMBLY__
+# ifndef CONFIG_MMU
+#  ifndef __ASSEMBLY__
 typedef struct {
 	struct vm_list_struct	*vmlist;
 	unsigned long		end_brk;
 } mm_context_t;
-#endif /* __ASSEMBLY__ */
+#  endif /* __ASSEMBLY__ */
+# else /* CONFIG_MMU */
+#  ifdef __KERNEL__
+#   ifndef __ASSEMBLY__
 
+/* Default "unsigned long" context */
+typedef unsigned long mm_context_t;
+
+/* Hardware Page Table Entry */
+typedef struct _PTE {
+	unsigned long    v:1;	/* Entry is valid */
+	unsigned long vsid:24;	/* Virtual segment identifier */
+	unsigned long    h:1;	/* Hash algorithm indicator */
+	unsigned long  api:6;	/* Abbreviated page index */
+	unsigned long  rpn:20;	/* Real (physical) page number */
+	unsigned long     :3;	/* Unused */
+	unsigned long    r:1;	/* Referenced */
+	unsigned long    c:1;	/* Changed */
+	unsigned long    w:1;	/* Write-thru cache mode */
+	unsigned long    i:1;	/* Cache inhibited */
+	unsigned long    m:1;	/* Memory coherence */
+	unsigned long    g:1;	/* Guarded */
+	unsigned long     :1;	/* Unused */
+	unsigned long   pp:2;	/* Page protection */
+} PTE;
+
+extern PTE *Hash, *Hash_end;
+extern unsigned long Hash_size, Hash_mask;
+
+/* Values for PP (assumes Ks=0, Kp=1) */
+#  define PP_RWXX	0 /* Supervisor read/write, User none */
+#  define PP_RWRX	1 /* Supervisor read/write, User read */
+#  define PP_RWRW	2 /* Supervisor read/write, User read/write */
+#  define PP_RXRX	3 /* Supervisor read,       User read */
+
+/* Segment Register */
+typedef struct _SEGREG {
+	unsigned long    t:1;	/* Normal or I/O  type */
+	unsigned long   ks:1;	/* Supervisor 'key' (normally 0) */
+	unsigned long   kp:1;	/* User 'key' (normally 1) */
+	unsigned long    n:1;	/* No-execute */
+	unsigned long     :4;	/* Unused */
+	unsigned long vsid:24;	/* Virtual Segment Identifier */
+} SEGREG;
+
+extern void _tlbie(unsigned long va);	/* invalidate a TLB entry */
+extern void _tlbia(void);		/* invalidate all TLB entries */
+#   endif /* __ASSEMBLY__ */
+
+/*
+ * The MicroBlaze processor has a TLB architecture identical to PPC-40x. The
+ * instruction and data sides share a unified, 64-entry, semi-associative
+ * TLB which is maintained totally under software control. In addition, the
+ * instruction side has a hardware-managed, 2,4, or 8-entry, fully-associative
+ * TLB which serves as a first level to the shared TLB. These two TLBs are
+ * known as the UTLB and ITLB, respectively.
+ */
+
+#  define MICROBLAZE_TLB_SIZE 64
+
+/*
+ * TLB entries are defined by a "high" tag portion and a "low" data
+ * portion. The data portion is 32-bits.
+ *
+ * TLB entries are managed entirely under software control by reading,
+ * writing, and searching using the MTS and MFS instructions.
+ */
+
+#  define TLB_LO		1
+#  define TLB_HI		0
+#  define TLB_DATA		TLB_LO
+#  define TLB_TAG		TLB_HI
+
+/* Tag portion */
+#  define TLB_EPN_MASK		0xFFFFFC00 /* Effective Page Number */
+#  define TLB_PAGESZ_MASK	0x00000380
+#  define TLB_PAGESZ(x)		(((x) & 0x7) << 7)
+#  define PAGESZ_1K		0
+#  define PAGESZ_4K		1
+#  define PAGESZ_16K		2
+#  define PAGESZ_64K		3
+#  define PAGESZ_256K		4
+#  define PAGESZ_1M		5
+#  define PAGESZ_4M		6
+#  define PAGESZ_16M		7
+#  define TLB_VALID		0x00000040 /* Entry is valid */
+
+/* Data portion */
+#  define TLB_RPN_MASK		0xFFFFFC00 /* Real Page Number */
+#  define TLB_PERM_MASK		0x00000300
+#  define TLB_EX		0x00000200 /* Instruction execution allowed */
+#  define TLB_WR		0x00000100 /* Writes permitted */
+#  define TLB_ZSEL_MASK		0x000000F0
+#  define TLB_ZSEL(x)		(((x) & 0xF) << 4)
+#  define TLB_ATTR_MASK		0x0000000F
+#  define TLB_W			0x00000008 /* Caching is write-through */
+#  define TLB_I			0x00000004 /* Caching is inhibited */
+#  define TLB_M			0x00000002 /* Memory is coherent */
+#  define TLB_G			0x00000001 /* Memory is guarded from prefetch */
+
+#  endif /* __KERNEL__ */
+# endif /* CONFIG_MMU */
 #endif /* _ASM_MICROBLAZE_MMU_H */
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 09/30] microblaze_mmu_v1: Page fault handling high level - fault.c
  2009-04-27  8:31               ` [PATCH 08/30] microblaze_mmu_v1: mmu.h update monstr
@ 2009-04-27  8:31                 ` monstr
  2009-04-27  8:31                   ` [PATCH 10/30] microblaze_mmu_v1: Context handling - mmu_context.c/h monstr
  2009-04-29  5:46                   ` [PATCH 09/30] microblaze_mmu_v1: Page fault handling high level - fault.c Andrew Morton
  2009-04-29  5:44                 ` [PATCH 08/30] microblaze_mmu_v1: mmu.h update Andrew Morton
  1 sibling, 2 replies; 81+ messages in thread
From: monstr @ 2009-04-27  8:31 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/mm/fault.c |  304 ++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 304 insertions(+), 0 deletions(-)
 create mode 100644 arch/microblaze/mm/fault.c

diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
new file mode 100644
index 0000000..5826ef0
--- /dev/null
+++ b/arch/microblaze/mm/fault.c
@@ -0,0 +1,304 @@
+/*
+ *  arch/microblaze/mm/fault.c
+ *
+ *    Copyright (C) 2007 Xilinx, Inc.  All rights reserved.
+ *
+ *  Derived from "arch/ppc/mm/fault.c"
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Derived from "arch/i386/mm/fault.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  Modified by Cort Dougan and Paul Mackerras.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License.  See the file COPYING in the main directory of this
+ * archive for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+#include <asm/mmu_context.h>
+#include <asm/system.h>
+#include <linux/uaccess.h>
+#include <asm/exceptions.h>
+
+#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
+int debugger_kernel_faults = 1;
+#endif
+
+static unsigned long pte_misses;	/* updated by do_page_fault() */
+static unsigned long pte_errors;	/* updated by do_page_fault() */
+
+/*
+ * Check whether the instruction at regs->pc is a store using
+ * an update addressing form which will update r1.
+ */
+static int store_updates_sp(struct pt_regs *regs)
+{
+	unsigned int inst;
+
+	if (get_user(inst, (unsigned int *)regs->pc))
+		return 0;
+	/* check for 1 in the rD field */
+	if (((inst >> 21) & 0x1f) != 1)
+		return 0;
+	/* check for store opcodes */
+	if ((inst & 0xd0000000) == 0xd0000000)
+		return 1;
+	return 0;
+}
+
+
+/*
+ * bad_page_fault is called when we have a bad access from the kernel.
+ * It is called from do_page_fault above and from some of the procedures
+ * in traps.c.
+ */
+static void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
+{
+	const struct exception_table_entry *fixup;
+/* MS: no context */
+	/* Are we prepared to handle this fault?  */
+	fixup = search_exception_tables(regs->pc);
+	if (fixup) {
+		regs->pc = fixup->fixup;
+		return;
+	}
+
+	/* kernel has accessed a bad area */
+#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
+	if (debugger_kernel_faults)
+		debugger(regs);
+#endif
+	die("kernel access of bad area", regs, sig);
+}
+
+/*
+ * The error_code parameter is ESR for a data fault,
+ * 0 for an instruction fault.
+ */
+void do_page_fault(struct pt_regs *regs, unsigned long address,
+		   unsigned long error_code)
+{
+	struct vm_area_struct *vma;
+	struct mm_struct *mm = current->mm;
+	siginfo_t info;
+	int code = SEGV_MAPERR;
+	int is_write = error_code & ESR_S;
+	int fault;
+
+	regs->ear = address;
+	regs->esr = error_code;
+
+	/* On a kernel SLB miss we can only check for a valid exception entry */
+	if (kernel_mode(regs) && (address >= TASK_SIZE)) {
+		printk(KERN_WARNING "kernel task_size exceed");
+		_exception(SIGSEGV, regs, code, address);
+	}
+
+	/* for instr TLB miss and instr storage exception ESR_S is undefined */
+	if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
+		is_write = 0;
+
+#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
+	if (debugger_fault_handler && regs->trap == 0x300) {
+		debugger_fault_handler(regs);
+		return;
+	}
+#endif /* CONFIG_XMON || CONFIG_KGDB */
+
+	if (in_atomic() || mm == NULL) {
+		/* FIXME */
+		if (kernel_mode(regs)) {
+			printk(KERN_EMERG
+				"Page fault in kernel mode - Oooou!!! pid %d\n",
+				current->pid);
+			_exception(SIGSEGV, regs, code, address);
+			return;
+		}
+		/* in_atomic() in user mode is really bad,
+		   as is current->mm == NULL. */
+		printk(KERN_EMERG "Page fault in user mode with "
+		       "in_atomic(), mm = %p\n", mm);
+		printk(KERN_EMERG "r15 = %lx  MSR = %lx\n",
+		       regs->r15, regs->msr);
+		die("Weird page fault", regs, SIGSEGV);
+	}
+
+	/* When running in the kernel we expect faults to occur only to
+	 * addresses in user space.  All other faults represent errors in the
+	 * kernel and should generate an OOPS.  Unfortunately, in the case of an
+	 * erroneous fault occurring in a code path which already holds mmap_sem
+	 * we will deadlock attempting to validate the fault against the
+	 * address space.  Luckily the kernel only validly references user
+	 * space from well defined areas of code, which are listed in the
+	 * exceptions table.
+	 *
+	 * As the vast majority of faults will be valid we will only perform
+	 * the source reference check when there is a possibility of a deadlock.
+	 * Attempt to lock the address space, if we cannot we then validate the
+	 * source.  If this is invalid we can skip the address space check,
+	 * thus avoiding the deadlock.
+	 */
+	if (!down_read_trylock(&mm->mmap_sem)) {
+		if (kernel_mode(regs) && !search_exception_tables(regs->pc))
+			goto bad_area_nosemaphore;
+
+		down_read(&mm->mmap_sem);
+	}
+
+	vma = find_vma(mm, address);
+	if (!vma)
+		goto bad_area;
+
+	if (vma->vm_start <= address)
+		goto good_area;
+
+	if (!(vma->vm_flags & VM_GROWSDOWN))
+		goto bad_area;
+
+	if (!is_write)
+		goto bad_area;
+
+	/*
+	 * N.B. The ABI allows programs to access up to
+	 * a few hundred bytes below the stack pointer (TBD).
+	 * The kernel signal delivery code writes up to about 1.5kB
+	 * below the stack pointer (r1) before decrementing it.
+	 * The exec code can write slightly over 640kB to the stack
+	 * before setting the user r1.  Thus we allow the stack to
+	 * expand to 1MB without further checks.
+	 */
+	if (address + 0x100000 < vma->vm_end) {
+
+		/* get user regs even if this fault is in kernel mode */
+		struct pt_regs *uregs = current->thread.regs;
+		if (uregs == NULL)
+			goto bad_area;
+
+		/*
+		 * A user-mode access to an address a long way below
+		 * the stack pointer is only valid if the instruction
+		 * is one which would update the stack pointer to the
+		 * address accessed if the instruction completed,
+		 * i.e. either stwu rs,n(r1) or stwux rs,r1,rb
+		 * (or the byte, halfword, float or double forms).
+		 *
+		 * If we don't check this then any write to the area
+		 * between the last mapped region and the stack will
+		 * expand the stack rather than segfaulting.
+		 */
+		if (address + 2048 < uregs->r1
+			&& (kernel_mode(regs) || !store_updates_sp(regs)))
+				goto bad_area;
+	}
+	if (expand_stack(vma, address))
+		goto bad_area;
+
+good_area:
+	code = SEGV_ACCERR;
+
+	/* a write */
+	if (is_write) {
+		if (!(vma->vm_flags & VM_WRITE))
+			goto bad_area;
+	/* a read */
+	} else {
+		/* protection fault */
+		if (error_code & 0x08000000)
+			goto bad_area;
+		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+			goto bad_area;
+	}
+
+	/*
+	 * If for any reason at all we couldn't handle the fault,
+	 * make sure we exit gracefully rather than endlessly redo
+	 * the fault.
+	 */
+survive:
+	fault = handle_mm_fault(mm, vma, address, is_write);
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		if (fault & VM_FAULT_OOM)
+			goto out_of_memory;
+		else if (fault & VM_FAULT_SIGBUS)
+			goto do_sigbus;
+		BUG();
+	}
+	if (fault & VM_FAULT_MAJOR)
+		current->maj_flt++;
+	else
+		current->min_flt++;
+	up_read(&mm->mmap_sem);
+	/*
+	 * keep track of tlb+htab misses that are good addrs but
+	 * just need pte's created via handle_mm_fault()
+	 * -- Cort
+	 */
+	pte_misses++;
+	return;
+
+bad_area:
+	up_read(&mm->mmap_sem);
+
+bad_area_nosemaphore:
+	pte_errors++;
+
+	/* User mode accesses cause a SIGSEGV */
+	if (user_mode(regs)) {
+		_exception(SIGSEGV, regs, code, address);
+/*		info.si_signo = SIGSEGV;
+		info.si_errno = 0;
+		info.si_code = code;
+		info.si_addr = (void *) address;
+		force_sig_info(SIGSEGV, &info, current);*/
+		return;
+	}
+
+	bad_page_fault(regs, address, SIGSEGV);
+	return;
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+	if (current->pid == 1) {
+		yield();
+		down_read(&mm->mmap_sem);
+		goto survive;
+	}
+	up_read(&mm->mmap_sem);
+	printk(KERN_WARNING "VM: killing process %s\n", current->comm);
+	if (user_mode(regs))
+		do_exit(SIGKILL);
+	bad_page_fault(regs, address, SIGKILL);
+	return;
+
+do_sigbus:
+	up_read(&mm->mmap_sem);
+	if (user_mode(regs)) {
+		info.si_signo = SIGBUS;
+		info.si_errno = 0;
+		info.si_code = BUS_ADRERR;
+		info.si_addr = (void __user *)address;
+		force_sig_info(SIGBUS, &info, current);
+		return;
+	}
+	bad_page_fault(regs, address, SIGBUS);
+}
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 10/30] microblaze_mmu_v1: Context handling - mmu_context.c/h
  2009-04-27  8:31                 ` [PATCH 09/30] microblaze_mmu_v1: Page fault handling high level - fault.c monstr
@ 2009-04-27  8:31                   ` monstr
  2009-04-27  8:32                     ` [PATCH 11/30] microblaze_mmu_v1: Page table - ioremap - pgtable.c/h, section update monstr
                                       ` (2 more replies)
  2009-04-29  5:46                   ` [PATCH 09/30] microblaze_mmu_v1: Page fault handling high level - fault.c Andrew Morton
  1 sibling, 3 replies; 81+ messages in thread
From: monstr @ 2009-04-27  8:31 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/include/asm/mmu_context.h |  134 +++++++++++++++++++++++++++++
 arch/microblaze/mm/mmu_context.c          |   70 +++++++++++++++
 2 files changed, 204 insertions(+), 0 deletions(-)
 create mode 100644 arch/microblaze/mm/mmu_context.c

diff --git a/arch/microblaze/include/asm/mmu_context.h b/arch/microblaze/include/asm/mmu_context.h
index 150ca01..2f49649 100644
--- a/arch/microblaze/include/asm/mmu_context.h
+++ b/arch/microblaze/include/asm/mmu_context.h
@@ -1,4 +1,6 @@
 /*
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
  * Copyright (C) 2006 Atmark Techno, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -9,6 +11,7 @@
 #ifndef _ASM_MICROBLAZE_MMU_CONTEXT_H
 #define _ASM_MICROBLAZE_MMU_CONTEXT_H
 
+# ifndef CONFIG_MMU
 # define init_new_context(tsk, mm)		({ 0; })
 
 # define enter_lazy_tlb(mm, tsk)		do {} while (0)
@@ -18,4 +21,135 @@
 # define switch_mm(prev, next, tsk)		do {} while (0)
 # define activate_mm(prev, next)		do {} while (0)
 
+# else /* CONFIG_MMU */
+
+# include <asm/atomic.h>
+# include <asm/bitops.h>
+# include <asm/mmu.h>
+# include <asm-generic/mm_hooks.h>
+
+#  ifdef __KERNEL__
+/*
+ * This function defines the mapping from contexts to VSIDs (virtual
+ * segment IDs).  We use a skew on both the context and the high 4 bits
+ * of the 32-bit virtual address (the "effective segment ID") in order
+ * to spread out the entries in the MMU hash table.  Note, if this
+ * function is changed then arch/ppc/mm/hashtable.S will have to be
+ * changed to correspond.
+ */
+#  define CTX_TO_VSID(ctx, va)	(((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
+				 & 0xffffff)
+
+/*
+   MicroBlaze has 256 contexts, so we can just rotate through these
+   as a way of "switching" contexts.  If the TID of the TLB is zero,
+   the PID/TID comparison is disabled, so we can use a TID of zero
+   to represent all kernel pages as shared among all contexts.
+ */
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+#  define NO_CONTEXT	256
+#  define LAST_CONTEXT	255
+#  define FIRST_CONTEXT	1
+
+/*
+ * Set the current MMU context.
+ * This is done byloading up the segment registers for the user part of the
+ * address space.
+ *
+ * Since the PGD is immediately available, it is much faster to simply
+ * pass this along as a second parameter, which is required for 8xx and
+ * can be used for debugging on all processors (if you happen to have
+ * an Abatron).
+ */
+extern void set_context(mm_context_t context, pgd_t *pgd);
+
+/*
+ * Bitmap of contexts in use.
+ * The size of this bitmap is LAST_CONTEXT + 1 bits.
+ */
+extern unsigned long context_map[];
+
+/*
+ * This caches the next context number that we expect to be free.
+ * Its use is an optimization only, we can't rely on this context
+ * number to be free, but it usually will be.
+ */
+extern mm_context_t next_mmu_context;
+
+/*
+ * Since we don't have sufficient contexts to give one to every task
+ * that could be in the system, we need to be able to steal contexts.
+ * These variables support that.
+ */
+extern atomic_t nr_free_contexts;
+extern struct mm_struct *context_mm[LAST_CONTEXT+1];
+extern void steal_context(void);
+
+/*
+ * Get a new mmu context for the address space described by `mm'.
+ */
+static inline void get_mmu_context(struct mm_struct *mm)
+{
+	mm_context_t ctx;
+
+	if (mm->context != NO_CONTEXT)
+		return;
+	while (atomic_dec_if_positive(&nr_free_contexts) < 0)
+		steal_context();
+	ctx = next_mmu_context;
+	while (test_and_set_bit(ctx, context_map)) {
+		ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
+		if (ctx > LAST_CONTEXT)
+			ctx = 0;
+	}
+	next_mmu_context = (ctx + 1) & LAST_CONTEXT;
+	mm->context = ctx;
+	context_mm[ctx] = mm;
+}
+
+/*
+ * Set up the context for a new address space.
+ */
+#  define init_new_context(tsk, mm)	(((mm)->context = NO_CONTEXT), 0)
+
+/*
+ * We're finished using the context for an address space.
+ */
+static inline void destroy_context(struct mm_struct *mm)
+{
+	if (mm->context != NO_CONTEXT) {
+		clear_bit(mm->context, context_map);
+		mm->context = NO_CONTEXT;
+		atomic_inc(&nr_free_contexts);
+	}
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+			     struct task_struct *tsk)
+{
+	tsk->thread.pgdir = next->pgd;
+	get_mmu_context(next);
+	set_context(next->context, next->pgd);
+}
+
+/*
+ * After we have set current->mm to a new value, this activates
+ * the context for the new mm so we see the new mappings.
+ */
+static inline void activate_mm(struct mm_struct *active_mm,
+			struct mm_struct *mm)
+{
+	current->thread.pgdir = mm->pgd;
+	get_mmu_context(mm);
+	set_context(mm->context, mm->pgd);
+}
+
+extern void mmu_context_init(void);
+
+#  endif /* __KERNEL__ */
+# endif /* CONFIG_MMU */
 #endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */
diff --git a/arch/microblaze/mm/mmu_context.c b/arch/microblaze/mm/mmu_context.c
new file mode 100644
index 0000000..26ff82f
--- /dev/null
+++ b/arch/microblaze/mm/mmu_context.c
@@ -0,0 +1,70 @@
+/*
+ * This file contains the routines for handling the MMU.
+ *
+ *    Copyright (C) 2007 Xilinx, Inc.  All rights reserved.
+ *
+ *  Derived from arch/ppc/mm/4xx_mmu.c:
+ *  -- paulus
+ *
+ *  Derived from arch/ppc/mm/init.c:
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/mm.h>
+#include <linux/init.h>
+
+#include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
+
+mm_context_t next_mmu_context;
+unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
+atomic_t nr_free_contexts;
+struct mm_struct *context_mm[LAST_CONTEXT+1];
+
+/*
+ * Initialize the context management stuff.
+ */
+void __init mmu_context_init(void)
+{
+	/*
+	 * The use of context zero is reserved for the kernel.
+	 * This code assumes FIRST_CONTEXT < 32.
+	 */
+	context_map[0] = (1 << FIRST_CONTEXT) - 1;
+	next_mmu_context = FIRST_CONTEXT;
+	atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
+}
+
+/*
+ * Steal a context from a task that has one at the moment.
+ *
+ * This isn't an LRU system, it just frees up each context in
+ * turn (sort-of pseudo-random replacement :).  This would be the
+ * place to implement an LRU scheme if anyone were motivated to do it.
+ */
+void steal_context(void)
+{
+	struct mm_struct *mm;
+
+	/* free up context `next_mmu_context' */
+	/* if we shouldn't free context 0, don't... */
+	if (next_mmu_context < FIRST_CONTEXT)
+		next_mmu_context = FIRST_CONTEXT;
+	mm = context_mm[next_mmu_context];
+	flush_tlb_mm(mm);
+	destroy_context(mm);
+}
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 11/30] microblaze_mmu_v1: Page table - ioremap - pgtable.c/h, section update
  2009-04-27  8:31                   ` [PATCH 10/30] microblaze_mmu_v1: Context handling - mmu_context.c/h monstr
@ 2009-04-27  8:32                     ` monstr
  2009-04-27  8:32                       ` [PATCH 12/30] microblaze_mmu_v1: io.h MMU update monstr
  2009-04-27 10:55                     ` [PATCH 10/30] microblaze_mmu_v1: Context handling - mmu_context.c/h Arnd Bergmann
  2009-04-27 11:18                     ` Geert Uytterhoeven
  2 siblings, 1 reply; 81+ messages in thread
From: monstr @ 2009-04-27  8:32 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/include/asm/pgtable.h  |  534 ++++++++++++++++++++++++++++++++
 arch/microblaze/include/asm/sections.h |    3 +
 arch/microblaze/mm/pgtable.c           |  286 +++++++++++++++++
 3 files changed, 823 insertions(+), 0 deletions(-)
 create mode 100644 arch/microblaze/mm/pgtable.c

diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index 4df31e4..d1e434d 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -1,4 +1,6 @@
 /*
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
  * Copyright (C) 2006 Atmark Techno, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -14,6 +16,8 @@
 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\
 		remap_pfn_range(vma, vaddr, pfn, size, prot)
 
+#ifndef CONFIG_MMU
+
 #define pgd_present(pgd)	(1) /* pages are always present on non MMU */
 #define pgd_none(pgd)		(0)
 #define pgd_bad(pgd)		(0)
@@ -45,6 +49,536 @@ static inline int pte_file(pte_t pte) { return 0; }
 
 #define arch_enter_lazy_cpu_mode()	do {} while (0)
 
+#else /* CONFIG_MMU */
+
+#include <asm-generic/4level-fixup.h>
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+#include <linux/threads.h>
+#include <asm/processor.h>		/* For TASK_SIZE */
+#include <asm/mmu.h>
+#include <asm/page.h>
+
+#define FIRST_USER_ADDRESS	0
+
+extern unsigned long va_to_phys(unsigned long address);
+extern pte_t *va_to_pte(unsigned long address);
+extern unsigned long ioremap_bot, ioremap_base;
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+
+static inline int pte_special(pte_t pte)	{ return 0; }
+
+static inline pte_t pte_mkspecial(pte_t pte)	{ return pte; }
+
+/* Start and end of the vmalloc area. */
+extern unsigned int memory_size;
+#define VMALLOC_START	(CONFIG_KERNEL_START + memory_size)
+#define VMALLOC_END	ioremap_bot
+#define VMALLOC_VMADDR(x) ((unsigned long)(x))
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash
+ * table containing PTEs, together with a set of 16 segment registers, to
+ * define the virtual to physical address mapping.
+ *
+ * We use the hash table as an extended TLB, i.e. a cache of currently
+ * active mappings.  We maintain a two-level page table tree, much
+ * like that used by the i386, for the sake of the Linux memory
+ * management code.  Low-level assembler code in hashtable.S
+ * (procedure hash_page) is responsible for extracting ptes from the
+ * tree and putting them into the hash table when necessary, and
+ * updating the accessed and modified bits in the page table tree.
+ */
+
+/*
+ * The MicroBlaze processor has a TLB architecture identical to PPC-40x. The
+ * instruction and data sides share a unified, 64-entry, semi-associative
+ * TLB which is maintained totally under software control. In addition, the
+ * instruction side has a hardware-managed, 2,4, or 8-entry, fully-associative
+ * TLB which serves as a first level to the shared TLB. These two TLBs are
+ * known as the UTLB and ITLB, respectively (see "mmu.h" for definitions).
+ */
+
+/*
+ * The normal case is that PTEs are 32-bits and we have a 1-page
+ * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
+ *
+ */
+
+/* PMD_SHIFT determines the size of the area mapped by the PTE pages */
+#define PMD_SHIFT	(PAGE_SHIFT + PTE_SHIFT)
+#define PMD_SIZE	(1UL << PMD_SHIFT)
+#define PMD_MASK	(~(PMD_SIZE-1))
+
+/* PGDIR_SHIFT determines what a top-level page table entry can map */
+#define PGDIR_SHIFT	PMD_SHIFT
+#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
+#define PGDIR_MASK	(~(PGDIR_SIZE-1))
+
+/*
+ * entries per page directory level: our page-table tree is two-level, so
+ * we don't really have any PMD directory.
+ */
+#define PTRS_PER_PTE	(1 << PTE_SHIFT)
+#define PTRS_PER_PMD	1
+#define PTRS_PER_PGD	(1 << (32 - PGDIR_SHIFT))
+
+#define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
+#define FIRST_USER_PGD_NR	0
+
+#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
+#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
+
+#define pte_ERROR(e) \
+	printk(KERN_ERR "%s:%d: bad pte "PTE_FMT".\n", \
+		__FILE__, __LINE__, pte_val(e))
+#define pmd_ERROR(e) \
+	printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \
+		__FILE__, __LINE__, pmd_val(e))
+#define pgd_ERROR(e) \
+	printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
+		__FILE__, __LINE__, pgd_val(e))
+
+/*
+ * Bits in a linux-style PTE.  These match the bits in the
+ * (hardware-defined) PTE as closely as possible.
+ */
+
+/* There are several potential gotchas here.  The hardware TLBLO
+ * field looks like this:
+ *
+ * 0  1  2  3  4  ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ * RPN.....................  0  0 EX WR ZSEL.......  W  I  M  G
+ *
+ * Where possible we make the Linux PTE bits match up with this
+ *
+ * - bits 20 and 21 must be cleared, because we use 4k pages (4xx can
+ * support down to 1k pages), this is done in the TLBMiss exception
+ * handler.
+ * - We use only zones 0 (for kernel pages) and 1 (for user pages)
+ * of the 16 available.  Bit 24-26 of the TLB are cleared in the TLB
+ * miss handler.  Bit 27 is PAGE_USER, thus selecting the correct
+ * zone.
+ * - PRESENT *must* be in the bottom two bits because swap cache
+ * entries use the top 30 bits.  Because 4xx doesn't support SMP
+ * anyway, M is irrelevant so we borrow it for PAGE_PRESENT.  Bit 30
+ * is cleared in the TLB miss handler before the TLB entry is loaded.
+ * - All other bits of the PTE are loaded into TLBLO without
+ *  * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
+ * software PTE bits.  We actually use use bits 21, 24, 25, and
+ * 30 respectively for the software bits: ACCESSED, DIRTY, RW, and
+ * PRESENT.
+ */
+
+/* Definitions for MicroBlaze. */
+#define	_PAGE_GUARDED	0x001	/* G: page is guarded from prefetch */
+#define _PAGE_PRESENT	0x002	/* software: PTE contains a translation */
+#define	_PAGE_NO_CACHE	0x004	/* I: caching is inhibited */
+#define	_PAGE_WRITETHRU	0x008	/* W: caching is write-through */
+#define	_PAGE_USER	0x010	/* matches one of the zone permission bits */
+#define	_PAGE_RW	0x040	/* software: Writes permitted */
+#define	_PAGE_DIRTY	0x080	/* software: dirty page */
+#define _PAGE_HWWRITE	0x100	/* hardware: Dirty & RW, set in exception */
+#define _PAGE_HWEXEC	0x200	/* hardware: EX permission */
+#define _PAGE_ACCESSED	0x400	/* software: R: page referenced */
+#define _PMD_PRESENT	PAGE_MASK
+
+/*
+ * Some bits are unused...
+ */
+#ifndef _PAGE_HASHPTE
+#define _PAGE_HASHPTE	0
+#endif
+#ifndef _PTE_NONE_MASK
+#define _PTE_NONE_MASK	0
+#endif
+#ifndef _PAGE_SHARED
+#define _PAGE_SHARED	0
+#endif
+#ifndef _PAGE_HWWRITE
+#define _PAGE_HWWRITE	0
+#endif
+#ifndef _PAGE_HWEXEC
+#define _PAGE_HWEXEC	0
+#endif
+#ifndef _PAGE_EXEC
+#define _PAGE_EXEC	0
+#endif
+
+#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+/*
+ * Note: the _PAGE_COHERENT bit automatically gets set in the hardware
+ * PTE if CONFIG_SMP is defined (hash_page does this); there is no need
+ * to have it in the Linux PTE, and in fact the bit could be reused for
+ * another purpose.  -- paulus.
+ */
+#define _PAGE_BASE	(_PAGE_PRESENT | _PAGE_ACCESSED)
+#define _PAGE_WRENABLE	(_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
+
+#define _PAGE_KERNEL \
+	(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC)
+
+#define _PAGE_IO	(_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED)
+
+#define PAGE_NONE	__pgprot(_PAGE_BASE)
+#define PAGE_READONLY	__pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_READONLY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_SHARED	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+#define PAGE_SHARED_X \
+		__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
+#define PAGE_COPY	__pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+
+#define PAGE_KERNEL	__pgprot(_PAGE_KERNEL)
+#define PAGE_KERNEL_RO	__pgprot(_PAGE_BASE | _PAGE_SHARED)
+#define PAGE_KERNEL_CI	__pgprot(_PAGE_IO)
+
+/*
+ * We consider execute permission the same as read.
+ * Also, write permissions imply read permissions.
+ */
+#define __P000	PAGE_NONE
+#define __P001	PAGE_READONLY_X
+#define __P010	PAGE_COPY
+#define __P011	PAGE_COPY_X
+#define __P100	PAGE_READONLY
+#define __P101	PAGE_READONLY_X
+#define __P110	PAGE_COPY
+#define __P111	PAGE_COPY_X
+
+#define __S000	PAGE_NONE
+#define __S001	PAGE_READONLY_X
+#define __S010	PAGE_SHARED
+#define __S011	PAGE_SHARED_X
+#define __S100	PAGE_READONLY
+#define __S101	PAGE_READONLY_X
+#define __S110	PAGE_SHARED
+#define __S111	PAGE_SHARED_X
+
+#ifndef __ASSEMBLY__
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned long empty_zero_page[1024];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+#endif /* __ASSEMBLY__ */
+
+#define pte_none(pte)		((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
+#define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)
+#define pte_clear(mm, addr, ptep) \
+	do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)
+
+#define pmd_none(pmd)		(!pmd_val(pmd))
+#define	pmd_bad(pmd)		((pmd_val(pmd) & _PMD_PRESENT) == 0)
+#define	pmd_present(pmd)	((pmd_val(pmd) & _PMD_PRESENT) != 0)
+#define	pmd_clear(pmdp)		do { pmd_val(*(pmdp)) = 0; } while (0)
+
+#define pte_page(x)		(mem_map + (unsigned long) \
+				((pte_val(x) - memory_start) >> PAGE_SHIFT))
+#define PFN_SHIFT_OFFSET	(PAGE_SHIFT)
+
+#define pte_pfn(x)		(pte_val(x) >> PFN_SHIFT_OFFSET)
+
+#define pfn_pte(pfn, prot) \
+	__pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) | pgprot_val(prot))
+
+#ifndef __ASSEMBLY__
+/*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the pgd is never bad, and a pmd always exists (as it's folded
+ * into the pgd entry)
+ */
+static inline int pgd_none(pgd_t pgd)		{ return 0; }
+static inline int pgd_bad(pgd_t pgd)		{ return 0; }
+static inline int pgd_present(pgd_t pgd)	{ return 1; }
+#define pgd_clear(xp)				do { } while (0)
+#define pgd_page(pgd) \
+	((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline int pte_read(pte_t pte)  { return pte_val(pte) & _PAGE_USER; }
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
+static inline int pte_exec(pte_t pte)  { return pte_val(pte) & _PAGE_EXEC; }
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+/* FIXME */
+static inline int pte_file(pte_t pte)		{ return 0; }
+
+static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
+static inline void pte_cache(pte_t pte)   { pte_val(pte) &= ~_PAGE_NO_CACHE; }
+
+static inline pte_t pte_rdprotect(pte_t pte) \
+		{ pte_val(pte) &= ~_PAGE_USER; return pte; }
+static inline pte_t pte_wrprotect(pte_t pte) \
+	{ pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
+static inline pte_t pte_exprotect(pte_t pte) \
+	{ pte_val(pte) &= ~_PAGE_EXEC; return pte; }
+static inline pte_t pte_mkclean(pte_t pte) \
+	{ pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
+static inline pte_t pte_mkold(pte_t pte) \
+	{ pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
+
+static inline pte_t pte_mkread(pte_t pte) \
+	{ pte_val(pte) |= _PAGE_USER; return pte; }
+static inline pte_t pte_mkexec(pte_t pte) \
+	{ pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
+static inline pte_t pte_mkwrite(pte_t pte) \
+	{ pte_val(pte) |= _PAGE_RW; return pte; }
+static inline pte_t pte_mkdirty(pte_t pte) \
+	{ pte_val(pte) |= _PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkyoung(pte_t pte) \
+	{ pte_val(pte) |= _PAGE_ACCESSED; return pte; }
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+
+static inline pte_t mk_pte_phys(phys_addr_t physpage, pgprot_t pgprot)
+{
+	pte_t pte;
+	pte_val(pte) = physpage | pgprot_val(pgprot);
+	return pte;
+}
+
+#define mk_pte(page, pgprot) \
+({									   \
+	pte_t pte;							   \
+	pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + memory_start) |  \
+			pgprot_val(pgprot);				   \
+	pte;								   \
+})
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+	pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
+	return pte;
+}
+
+/*
+ * Atomic PTE updates.
+ *
+ * pte_update clears and sets bit atomically, and returns
+ * the old pte value.
+ * The ((unsigned long)(p+1) - 4) hack is to get to the least-significant
+ * 32 bits of the PTE regardless of whether PTEs are 32 or 64 bits.
+ */
+static inline unsigned long pte_update(pte_t *p, unsigned long clr,
+				unsigned long set)
+{
+	unsigned long old, tmp, msr;
+
+	__asm__ __volatile__("\
+	msrclr	%2, 0x2\n\
+	nop\n\
+	lw	%0, %4, r0\n\
+	andn	%1, %0, %5\n\
+	or	%1, %1, %6\n\
+	sw	%1, %4, r0\n\
+	mts     rmsr, %2\n\
+	nop"
+	: "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p)
+	: "r" ((unsigned long)(p+1) - 4), "r" (clr), "r" (set), "m" (*p)
+	: "cc");
+
+	return old;
+}
+
+/*
+ * set_pte stores a linux PTE into the linux page table.
+ */
+static inline void set_pte(struct mm_struct *mm, unsigned long addr,
+		pte_t *ptep, pte_t pte)
+{
+	*ptep = pte;
+}
+
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+		pte_t *ptep, pte_t pte)
+{
+	*ptep = pte;
+}
+
+static inline int ptep_test_and_clear_young(struct mm_struct *mm,
+		unsigned long addr, pte_t *ptep)
+{
+	return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0;
+}
+
+static inline int ptep_test_and_clear_dirty(struct mm_struct *mm,
+		unsigned long addr, pte_t *ptep)
+{
+	return (pte_update(ptep, \
+		(_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
+}
+
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+		unsigned long addr, pte_t *ptep)
+{
+	return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
+}
+
+/*static inline void ptep_set_wrprotect(struct mm_struct *mm,
+		unsigned long addr, pte_t *ptep)
+{
+	pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
+}*/
+
+static inline void ptep_mkdirty(struct mm_struct *mm,
+		unsigned long addr, pte_t *ptep)
+{
+	pte_update(ptep, 0, _PAGE_DIRTY);
+}
+
+/*#define pte_same(A,B)	(((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)*/
+
+/* Convert pmd entry to page */
+/* our pmd entry is an effective address of pte table*/
+/* returns effective address of the pmd entry*/
+#define pmd_page_kernel(pmd)	((unsigned long) (pmd_val(pmd) & PAGE_MASK))
+
+/* returns struct *page of the pmd entry*/
+#define pmd_page(pmd)	(pfn_to_page(__pa(pmd_val(pmd)) >> PAGE_SHIFT))
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/* to find an entry in a page-table-directory */
+#define pgd_index(address)	 ((address) >> PGDIR_SHIFT)
+#define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
+
+/* Find an entry in the second-level page table.. */
+static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
+{
+	return (pmd_t *) dir;
+}
+
+/* Find an entry in the third-level page table.. */
+#define pte_index(address)		\
+	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, addr)	\
+	((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
+#define pte_offset_map(dir, addr)		\
+	((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
+#define pte_offset_map_nested(dir, addr)	\
+	((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
+
+#define pte_unmap(pte)		kunmap_atomic(pte, KM_PTE0)
+#define pte_unmap_nested(pte)	kunmap_atomic(pte, KM_PTE1)
+
+/* Encode and decode a nonlinear file mapping entry */
+#define PTE_FILE_MAX_BITS	29
+#define pte_to_pgoff(pte)	(pte_val(pte) >> 3)
+#define pgoff_to_pte(off)	((pte_t) { ((off) << 3) })
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+
+/*
+ * When flushing the tlb entry for a page, we also need to flush the hash
+ * table entry.  flush_hash_page is assembler (for speed) in hashtable.S.
+ */
+extern int flush_hash_page(unsigned context, unsigned long va, pte_t *ptep);
+
+/* Add an HPTE to the hash table */
+extern void add_hash_page(unsigned context, unsigned long va, pte_t *ptep);
+
+/*
+ * Encode and decode a swap entry.
+ * Note that the bits we use in a PTE for representing a swap entry
+ * must not include the _PAGE_PRESENT bit, or the _PAGE_HASHPTE bit
+ * (if used).  -- paulus
+ */
+#define __swp_type(entry)		((entry).val & 0x3f)
+#define __swp_offset(entry)	((entry).val >> 6)
+#define __swp_entry(type, offset) \
+		((swp_entry_t) { (type) | ((offset) << 6) })
+#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) >> 2 })
+#define __swp_entry_to_pte(x)	((pte_t) { (x).val << 2 })
+
+
+/* CONFIG_APUS */
+/* For virtual address to physical address conversion */
+extern void cache_clear(__u32 addr, int length);
+extern void cache_push(__u32 addr, int length);
+extern int mm_end_of_chunk(unsigned long addr, int len);
+extern unsigned long iopa(unsigned long addr);
+/* extern unsigned long mm_ptov(unsigned long addr) \
+	__attribute__ ((const)); TBD */
+
+/* Values for nocacheflag and cmode */
+/* These are not used by the APUS kernel_map, but prevents
+ * compilation errors.
+ */
+#define	IOMAP_FULL_CACHING	0
+#define	IOMAP_NOCACHE_SER	1
+#define	IOMAP_NOCACHE_NONSER	2
+#define	IOMAP_NO_COPYBACK	3
+
+/*
+ * Map some physical address range into the kernel address space.
+ */
+extern unsigned long kernel_map(unsigned long paddr, unsigned long size,
+				int nocacheflag, unsigned long *memavailp);
+
+/*
+ * Set cache mode of (kernel space) address range.
+ */
+extern void kernel_set_cachemode(unsigned long address, unsigned long size,
+				unsigned int cmode);
+
+/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+#define kern_addr_valid(addr)	(1)
+
+#define io_remap_page_range remap_page_range
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init()	do { } while (0)
+
+void do_page_fault(struct pt_regs *regs, unsigned long address,
+		   unsigned long error_code);
+
+void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
+			     unsigned int size, int flags);
+
+void __init adjust_total_lowmem(void);
+void mapin_ram(void);
+int map_page(unsigned long va, phys_addr_t pa, int flags);
+
+extern int mem_init_done;
+extern unsigned long ioremap_base;
+extern unsigned long ioremap_bot;
+
+asmlinkage void __init MMU_init(void);
+
+void __init *early_get_page(void);
+
+void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle);
+void consistent_free(void *vaddr);
+void consistent_sync(void *vaddr, size_t size, int direction);
+void consistent_sync_page(struct page *page, unsigned long offset,
+	size_t size, int direction);
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+
+#endif /* CONFIG_MMU */
+
 #ifndef __ASSEMBLY__
 #include <asm-generic/pgtable.h>
 
diff --git a/arch/microblaze/include/asm/sections.h b/arch/microblaze/include/asm/sections.h
index 8434a43..4487e15 100644
--- a/arch/microblaze/include/asm/sections.h
+++ b/arch/microblaze/include/asm/sections.h
@@ -1,4 +1,6 @@
 /*
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
  * Copyright (C) 2006 Atmark Techno, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -14,6 +16,7 @@
 # ifndef __ASSEMBLY__
 extern char _ssbss[], _esbss[];
 extern unsigned long __ivt_start[], __ivt_end[];
+extern char _etext[], _stext[];
 
 #  ifdef CONFIG_MTD_UCLINUX
 extern char *_ebss;
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
new file mode 100644
index 0000000..46c4ca5
--- /dev/null
+++ b/arch/microblaze/mm/pgtable.c
@@ -0,0 +1,286 @@
+/*
+ *  This file contains the routines setting up the linux page tables.
+ *
+ * Copyright (C) 2008 Michal Simek
+ * Copyright (C) 2008 PetaLogix
+ *
+ *    Copyright (C) 2007 Xilinx, Inc.  All rights reserved.
+ *
+ *  Derived from arch/ppc/mm/pgtable.c:
+ *    -- paulus
+ *
+ *  Derived from arch/ppc/mm/init.c:
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  This file is subject to the terms and conditions of the GNU General
+ *  Public License.  See the file COPYING in the main directory of this
+ *  archive for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <linux/io.h>
+#include <asm/mmu.h>
+#include <asm/sections.h>
+
+#define flush_HPTE(X, va, pg)	_tlbie(va)
+
+unsigned long ioremap_base;
+unsigned long ioremap_bot;
+
+/* The maximum lowmem defaults to 768Mb, but this can be configured to
+ * another value.
+ */
+#define MAX_LOW_MEM	CONFIG_LOWMEM_SIZE
+
+#ifndef CONFIG_SMP
+struct pgtable_cache_struct quicklists;
+#endif
+
+static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
+		unsigned long flags)
+{
+	unsigned long v, i;
+	phys_addr_t p;
+	int err;
+
+	/*
+	 * Choose an address to map it to.
+	 * Once the vmalloc system is running, we use it.
+	 * Before then, we use space going down from ioremap_base
+	 * (ioremap_bot records where we're up to).
+	 */
+	p = addr & PAGE_MASK;
+	size = PAGE_ALIGN(addr + size) - p;
+
+	/*
+	 * Don't allow anybody to remap normal RAM that we're using.
+	 * mem_init() sets high_memory so only do the check after that.
+	 *
+	 * However, allow remap of rootfs: TBD
+	 */
+	if (mem_init_done &&
+		p >= memory_start && p < virt_to_phys(high_memory) &&
+		!(p >= virt_to_phys((unsigned long)&__bss_stop) &&
+		p < virt_to_phys((unsigned long)__bss_stop))) {
+		printk(KERN_WARNING "__ioremap(): phys addr "PTE_FMT
+			" is RAM lr %p\n", (unsigned long)p,
+			__builtin_return_address(0));
+		return NULL;
+	}
+
+	if (size == 0)
+		return NULL;
+
+	/*
+	 * Is it already mapped? If the whole area is mapped then we're
+	 * done, otherwise remap it since we want to keep the virt addrs for
+	 * each request contiguous.
+	 *
+	 * We make the assumption here that if the bottom and top
+	 * of the range we want are mapped then it's mapped to the
+	 * same virt address (and this is contiguous).
+	 *  -- Cort
+	 */
+
+	if (mem_init_done) {
+		struct vm_struct *area;
+		area = get_vm_area(size, VM_IOREMAP);
+		if (area == NULL)
+			return NULL;
+		v = VMALLOC_VMADDR(area->addr);
+	} else {
+		v = (ioremap_bot -= size);
+	}
+
+	if ((flags & _PAGE_PRESENT) == 0)
+		flags |= _PAGE_KERNEL;
+	if (flags & _PAGE_NO_CACHE)
+		flags |= _PAGE_GUARDED;
+
+	err = 0;
+	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
+		err = map_page(v + i, p + i, flags);
+	if (err) {
+		if (mem_init_done)
+			vfree((void *)v);
+		return NULL;
+	}
+
+	return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
+}
+
+void __iomem *ioremap(phys_addr_t addr, unsigned long size)
+{
+	return __ioremap(addr, size, _PAGE_NO_CACHE);
+}
+EXPORT_SYMBOL(ioremap);
+
+void iounmap(void *addr)
+{
+	if (addr > high_memory && (unsigned long) addr < ioremap_bot)
+		vfree((void *) (PAGE_MASK & (unsigned long) addr));
+}
+EXPORT_SYMBOL(iounmap);
+
+
+int map_page(unsigned long va, phys_addr_t pa, int flags)
+{
+	pmd_t *pd;
+	pte_t *pg;
+	int err = -ENOMEM;
+	/* spin_lock(&init_mm.page_table_lock); */
+	/* Use upper 10 bits of VA to index the first level map */
+	pd = pmd_offset(pgd_offset_k(va), va);
+	/* Use middle 10 bits of VA to index the second-level map */
+	pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */
+	/* pg = pte_alloc_kernel(&init_mm, pd, va); */
+
+	if (pg != NULL) {
+		err = 0;
+		set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
+				__pgprot(flags)));
+		if (mem_init_done)
+			flush_HPTE(0, va, pmd_val(*pd));
+			/* flush_HPTE(0, va, pg); */
+
+	}
+	/* spin_unlock(&init_mm.page_table_lock); */
+	return err;
+}
+
+void __init adjust_total_lowmem(void)
+{
+/* TBD */
+#if 0
+	unsigned long max_low_mem = MAX_LOW_MEM;
+
+	if (total_lowmem > max_low_mem) {
+		total_lowmem = max_low_mem;
+#ifndef CONFIG_HIGHMEM
+		printk(KERN_INFO "Warning, memory limited to %ld Mb, use "
+				"CONFIG_HIGHMEM to reach %ld Mb\n",
+				max_low_mem >> 20, total_memory >> 20);
+		total_memory = total_lowmem;
+#endif /* CONFIG_HIGHMEM */
+	}
+#endif
+}
+
+static void show_tmem(unsigned long tmem)
+{
+	volatile unsigned long a;
+	a = a + tmem;
+}
+
+/*
+ * Map in all of physical memory starting at CONFIG_KERNEL_START.
+ */
+void __init mapin_ram(void)
+{
+	unsigned long v, p, s, f;
+
+	v = CONFIG_KERNEL_START;
+	p = memory_start;
+	show_tmem(memory_size);
+	for (s = 0; s < memory_size; s += PAGE_SIZE) {
+		f = _PAGE_PRESENT | _PAGE_ACCESSED |
+				_PAGE_SHARED | _PAGE_HWEXEC;
+		if ((char *) v < _stext || (char *) v >= _etext)
+			f |= _PAGE_WRENABLE;
+		else
+			/* On the MicroBlaze, no user access
+			   forces R/W kernel access */
+			f |= _PAGE_USER;
+		map_page(v, p, f);
+		v += PAGE_SIZE;
+		p += PAGE_SIZE;
+	}
+}
+
+/* is x a power of 2? */
+#define is_power_of_2(x)	((x) != 0 && (((x) & ((x) - 1)) == 0))
+
+/*
+ * Set up a mapping for a block of I/O.
+ * virt, phys, size must all be page-aligned.
+ * This should only be called before ioremap is called.
+ */
+void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
+			     unsigned int size, int flags)
+{
+	int i;
+
+	if (virt > CONFIG_KERNEL_START && virt < ioremap_bot)
+		ioremap_bot = ioremap_base = virt;
+
+	/* Put it in the page tables. */
+	for (i = 0; i < size; i += PAGE_SIZE)
+		map_page(virt + i, phys + i, flags);
+}
+
+/* Scan the real Linux page tables and return a PTE pointer for
+ * a virtual address in a context.
+ * Returns true (1) if PTE was found, zero otherwise.  The pointer to
+ * the PTE pointer is unmodified if PTE is not found.
+ */
+static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
+{
+	pgd_t	*pgd;
+	pmd_t	*pmd;
+	pte_t	*pte;
+	int     retval = 0;
+
+	pgd = pgd_offset(mm, addr & PAGE_MASK);
+	if (pgd) {
+		pmd = pmd_offset(pgd, addr & PAGE_MASK);
+		if (pmd_present(*pmd)) {
+			pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
+			if (pte) {
+				retval = 1;
+				*ptep = pte;
+			}
+		}
+	}
+	return retval;
+}
+
+/* Find physical address for this virtual address.  Normally used by
+ * I/O functions, but anyone can call it.
+ */
+unsigned long iopa(unsigned long addr)
+{
+	unsigned long pa;
+
+	pte_t *pte;
+	struct mm_struct *mm;
+
+	/* Allow mapping of user addresses (within the thread)
+	 * for DMA if necessary.
+	 */
+	if (addr < TASK_SIZE)
+		mm = current->mm;
+	else
+		mm = &init_mm;
+
+	pa = 0;
+	if (get_pteptr(mm, addr, &pte))
+		pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
+
+	return pa;
+}
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 12/30] microblaze_mmu_v1: io.h MMU update
  2009-04-27  8:32                     ` [PATCH 11/30] microblaze_mmu_v1: Page table - ioremap - pgtable.c/h, section update monstr
@ 2009-04-27  8:32                       ` monstr
  2009-04-27  8:32                         ` [PATCH 13/30] microblaze_mmu_v1: pgalloc.h and page.h monstr
  0 siblings, 1 reply; 81+ messages in thread
From: monstr @ 2009-04-27  8:32 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/include/asm/io.h |   31 +++++++++++++++++++++++++++++++
 1 files changed, 31 insertions(+), 0 deletions(-)

diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index 8b5853e..5c17342 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -1,4 +1,6 @@
 /*
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2007-2009 PetaLogix
  * Copyright (C) 2006 Atmark Techno, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -12,6 +14,9 @@
 #include <asm/byteorder.h>
 #include <asm/page.h>
 #include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/mm.h>          /* Get struct page {...} */
+
 
 #define IO_SPACE_LIMIT (0xFFFFFFFF)
 
@@ -112,6 +117,30 @@ static inline void writel(unsigned int v, volatile void __iomem *addr)
 #define memcpy_fromio(a, b, c)	memcpy((a), (void *)(b), (c))
 #define memcpy_toio(a, b, c)	memcpy((void *)(a), (b), (c))
 
+#ifdef CONFIG_MMU
+
+#define mm_ptov(addr)		((void *)__phys_to_virt(addr))
+#define mm_vtop(addr)		((unsigned long)__virt_to_phys(addr))
+#define phys_to_virt(addr)	((void *)__phys_to_virt(addr))
+#define virt_to_phys(addr)	((unsigned long)__virt_to_phys(addr))
+#define virt_to_bus(addr)	((unsigned long)__virt_to_phys(addr))
+
+#define __page_address(page) \
+		(PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
+#define page_to_phys(page)	virt_to_phys((void *)__page_address(page))
+#define page_to_bus(page)	(page_to_phys(page))
+#define bus_to_virt(addr)	(phys_to_virt(addr))
+
+extern void iounmap(void *addr);
+/*extern void *__ioremap(phys_addr_t address, unsigned long size,
+		unsigned long flags);*/
+extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
+#define ioremap_writethrough(addr, size) ioremap((addr), (size))
+#define ioremap_nocache(addr, size)      ioremap((addr), (size))
+#define ioremap_fullcache(addr, size)    ioremap((addr), (size))
+
+#else /* CONFIG_MMU */
+
 /**
  *	virt_to_phys - map virtual addresses to physical
  *	@address: address to remap
@@ -160,6 +189,8 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
 #define iounmap(addr)		((void)0)
 #define ioremap_nocache(physaddr, size)	ioremap(physaddr, size)
 
+#endif /* CONFIG_MMU */
+
 /*
  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
  * access
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 13/30] microblaze_mmu_v1: pgalloc.h and page.h
  2009-04-27  8:32                       ` [PATCH 12/30] microblaze_mmu_v1: io.h MMU update monstr
@ 2009-04-27  8:32                         ` monstr
  2009-04-27  8:32                           ` [PATCH 14/30] microblaze_mmu_v1: Update process creation for MMU monstr
  0 siblings, 1 reply; 81+ messages in thread
From: monstr @ 2009-04-27  8:32 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/include/asm/page.h    |  169 ++++++++++++++++++++++--------
 arch/microblaze/include/asm/pgalloc.h |  191 +++++++++++++++++++++++++++++++++
 2 files changed, 317 insertions(+), 43 deletions(-)

diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h
index 7238dcf..c48a360 100644
--- a/arch/microblaze/include/asm/page.h
+++ b/arch/microblaze/include/asm/page.h
@@ -1,6 +1,8 @@
 /*
- * Copyright (C) 2008 Michal Simek
- * Copyright (C) 2008 PetaLogix
+ * VM ops
+ *
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
  * Copyright (C) 2006 Atmark Techno, Inc.
  * Changes for MMU support:
  *    Copyright (C) 2007 Xilinx, Inc.  All rights reserved.
@@ -16,13 +18,17 @@
 #include <linux/pfn.h>
 #include <asm/setup.h>
 
+#ifdef __KERNEL__
+
 /* PAGE_SHIFT determines the page size */
 #define PAGE_SHIFT	(12)
-#define PAGE_SIZE	(1UL << PAGE_SHIFT)
+# ifndef __ASSEMBLY__
+# define PAGE_SIZE	(1UL << PAGE_SHIFT)
+# else
+# define PAGE_SIZE	(1 << PAGE_SHIFT)
+# endif
 #define PAGE_MASK	(~(PAGE_SIZE-1))
 
-#ifdef __KERNEL__
-
 #ifndef __ASSEMBLY__
 
 #define PAGE_UP(addr)	(((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1)))
@@ -35,6 +41,7 @@
 /* align addr on a size boundary - adjust address up if needed */
 #define _ALIGN(addr, size)	_ALIGN_UP(addr, size)
 
+#ifndef CONFIG_MMU
 /*
  * PAGE_OFFSET -- the first address of the first page of memory. When not
  * using MMU this corresponds to the first free page in physical memory (aligned
@@ -43,15 +50,44 @@
 extern unsigned int __page_offset;
 #define PAGE_OFFSET __page_offset
 
-#define copy_page(to, from)			memcpy((to), (from), PAGE_SIZE)
-#define get_user_page(vaddr)			__get_free_page(GFP_KERNEL)
-#define free_user_page(page, addr)		free_page(addr)
+#else /* CONFIG_MMU */
+
+/*
+ * PAGE_OFFSET -- the first address of the first page of memory. With MMU
+ * it is set to the kernel start address (aligned on a page boundary).
+ *
+ * CONFIG_KERNEL_START is defined in arch/microblaze/config.in and used
+ * in arch/microblaze/Makefile.
+ */
+#define PAGE_OFFSET	CONFIG_KERNEL_START
+
+/*
+ * MAP_NR -- given an address, calculate the index of the page struct which
+ * points to the address's page.
+ */
+#define MAP_NR(addr) (((unsigned long)(addr) - PAGE_OFFSET) >> PAGE_SHIFT)
 
-#define clear_page(pgaddr)			memset((pgaddr), 0, PAGE_SIZE)
+/*
+ * The basic type of a PTE - 32 bit physical addressing.
+ */
+typedef unsigned long pte_basic_t;
+#define PTE_SHIFT	(PAGE_SHIFT - 2)	/* 1024 ptes per page */
+#define PTE_FMT		"%.8lx"
 
+#endif /* CONFIG_MMU */
 
-#define clear_user_page(pgaddr, vaddr, page)	memset((pgaddr), 0, PAGE_SIZE)
-#define copy_user_page(vto, vfrom, vaddr, topg) \
+#  ifndef CONFIG_MMU
+#  define copy_page(to, from)			memcpy((to), (from), PAGE_SIZE)
+#  define get_user_page(vaddr)			__get_free_page(GFP_KERNEL)
+#  define free_user_page(page, addr)		free_page(addr)
+#  else /* CONFIG_MMU */
+extern void copy_page(void *to, void *from);
+#  endif /* CONFIG_MMU */
+
+# define clear_page(pgaddr)			memset((pgaddr), 0, PAGE_SIZE)
+
+# define clear_user_page(pgaddr, vaddr, page)	memset((pgaddr), 0, PAGE_SIZE)
+# define copy_user_page(vto, vfrom, vaddr, topg) \
 			memcpy((vto), (vfrom), PAGE_SIZE)
 
 /*
@@ -60,21 +96,32 @@ extern unsigned int __page_offset;
 typedef struct page *pgtable_t;
 typedef struct { unsigned long	pte; }		pte_t;
 typedef struct { unsigned long	pgprot; }	pgprot_t;
+/* FIXME this can depend on linux kernel version */
+#   ifdef CONFIG_MMU
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pgd; } pgd_t;
+#   else /* CONFIG_MMU */
 typedef struct { unsigned long	ste[64]; }	pmd_t;
 typedef struct { pmd_t		pue[1]; }	pud_t;
 typedef struct { pud_t		pge[1]; }	pgd_t;
+#   endif /* CONFIG_MMU */
 
+# define pte_val(x)	((x).pte)
+# define pgprot_val(x)	((x).pgprot)
 
-#define pte_val(x)	((x).pte)
-#define pgprot_val(x)	((x).pgprot)
-#define pmd_val(x)	((x).ste[0])
-#define pud_val(x)	((x).pue[0])
-#define pgd_val(x)	((x).pge[0])
+#   ifdef CONFIG_MMU
+#   define pmd_val(x)      ((x).pmd)
+#   define pgd_val(x)      ((x).pgd)
+#   else  /* CONFIG_MMU */
+#   define pmd_val(x)	((x).ste[0])
+#   define pud_val(x)	((x).pue[0])
+#   define pgd_val(x)	((x).pge[0])
+#   endif  /* CONFIG_MMU */
 
-#define __pte(x)	((pte_t) { (x) })
-#define __pmd(x)	((pmd_t) { (x) })
-#define __pgd(x)	((pgd_t) { (x) })
-#define __pgprot(x)	((pgprot_t) { (x) })
+# define __pte(x)	((pte_t) { (x) })
+# define __pmd(x)	((pmd_t) { (x) })
+# define __pgd(x)	((pgd_t) { (x) })
+# define __pgprot(x)	((pgprot_t) { (x) })
 
 /**
  * Conversions for virtual address, physical address, pfn, and struct
@@ -94,44 +141,80 @@ extern unsigned long max_low_pfn;
 extern unsigned long min_low_pfn;
 extern unsigned long max_pfn;
 
-#define __pa(vaddr)		((unsigned long) (vaddr))
-#define __va(paddr)		((void *) (paddr))
+extern unsigned int memory_start;
+extern unsigned int memory_end;
+extern unsigned int memory_size;
 
-#define phys_to_pfn(phys)	(PFN_DOWN(phys))
-#define pfn_to_phys(pfn)	(PFN_PHYS(pfn))
+extern int page_is_ram(unsigned long pfn);
 
-#define virt_to_pfn(vaddr)	(phys_to_pfn((__pa(vaddr))))
-#define pfn_to_virt(pfn)	__va(pfn_to_phys((pfn)))
+# define phys_to_pfn(phys)	(PFN_DOWN(phys))
+# define pfn_to_phys(pfn)	(PFN_PHYS(pfn))
 
-#define virt_to_page(vaddr)	(pfn_to_page(virt_to_pfn(vaddr)))
-#define page_to_virt(page)	(pfn_to_virt(page_to_pfn(page)))
+# define virt_to_pfn(vaddr)	(phys_to_pfn((__pa(vaddr))))
+# define pfn_to_virt(pfn)	__va(pfn_to_phys((pfn)))
 
-#define page_to_phys(page)	(pfn_to_phys(page_to_pfn(page)))
-#define page_to_bus(page)	(page_to_phys(page))
-#define phys_to_page(paddr)	(pfn_to_page(phys_to_pfn(paddr)))
+#  ifdef CONFIG_MMU
+#  define virt_to_page(kaddr) 	(mem_map +  MAP_NR(kaddr))
+#  else /* CONFIG_MMU */
+#  define virt_to_page(vaddr)	(pfn_to_page(virt_to_pfn(vaddr)))
+#  define page_to_virt(page)	(pfn_to_virt(page_to_pfn(page)))
+#  define page_to_phys(page)	(pfn_to_phys(page_to_pfn(page)))
+#  define page_to_bus(page)	(page_to_phys(page))
+#  define phys_to_page(paddr)	(pfn_to_page(phys_to_pfn(paddr)))
+#  endif /* CONFIG_MMU */
 
-extern unsigned int memory_start;
-extern unsigned int memory_end;
-extern unsigned int memory_size;
+#  ifndef CONFIG_MMU
+#  define pfn_valid(pfn)	((pfn) >= min_low_pfn && (pfn) <= max_mapnr)
+#  define ARCH_PFN_OFFSET	(PAGE_OFFSET >> PAGE_SHIFT)
+#  else /* CONFIG_MMU */
+#  define ARCH_PFN_OFFSET	(memory_start >> PAGE_SHIFT)
+#  define pfn_valid(pfn)	((pfn) < (max_mapnr + ARCH_PFN_OFFSET))
+#  define VALID_PAGE(page) 	((page - mem_map) < max_mapnr)
+#  endif /* CONFIG_MMU */
 
-#define pfn_valid(pfn)		((pfn) >= min_low_pfn && (pfn) < max_mapnr)
+# endif /* __ASSEMBLY__ */
 
-#define ARCH_PFN_OFFSET		(PAGE_OFFSET >> PAGE_SHIFT)
+#define	virt_addr_valid(vaddr)	(pfn_valid(virt_to_pfn(vaddr)))
 
-#else
-#define tophys(rd, rs)	(addik rd, rs, 0)
-#define tovirt(rd, rs)	(addik rd, rs, 0)
-#endif /* __ASSEMBLY__ */
 
-#define virt_addr_valid(vaddr)	(pfn_valid(virt_to_pfn(vaddr)))
+#  ifndef CONFIG_MMU
+#  define __pa(vaddr)	((unsigned long) (vaddr))
+#  define __va(paddr)	((void *) (paddr))
+#  else /* CONFIG_MMU */
+#  define __pa(x)	__virt_to_phys((unsigned long)(x))
+#  define __va(x)	((void *)__phys_to_virt((unsigned long)(x)))
+#  endif /* CONFIG_MMU */
+
 
-/* Convert between virtual and physical address for MMU.  */
-/* Handle MicroBlaze processor with virtual memory.  */
+/* Convert between virtual and physical address for MMU. */
+/* Handle MicroBlaze processor with virtual memory. */
+#ifndef CONFIG_MMU
 #define __virt_to_phys(addr)	addr
 #define __phys_to_virt(addr)	addr
+#define tophys(rd, rs)	addik rd, rs, 0
+#define tovirt(rd, rs)	addik rd, rs, 0
+#else
+#define __virt_to_phys(addr) \
+	((addr) + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START)
+#define __phys_to_virt(addr) \
+	((addr) + CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR)
+#define tophys(rd, rs) \
+	addik rd, rs, (CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START)
+#define tovirt(rd, rs) \
+	addik rd, rs, (CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR)
+#endif /* CONFIG_MMU */
 
 #define TOPHYS(addr)  __virt_to_phys(addr)
 
+#ifdef CONFIG_MMU
+#ifdef CONFIG_CONTIGUOUS_PAGE_ALLOC
+#define WANT_PAGE_VIRTUAL 1 /* page alloc 2 relies on this */
+#endif
+
+#define VM_DATA_DEFAULT_FLAGS	(VM_READ | VM_WRITE | VM_EXEC | \
+				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#endif /* CONFIG_MMU */
+
 #endif /* __KERNEL__ */
 
 #include <asm-generic/memory_model.h>
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index 2a4b354..59a757e 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -1,4 +1,6 @@
 /*
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
  * Copyright (C) 2006 Atmark Techno, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -9,6 +11,195 @@
 #ifndef _ASM_MICROBLAZE_PGALLOC_H
 #define _ASM_MICROBLAZE_PGALLOC_H
 
+#ifdef CONFIG_MMU
+
+#include <linux/kernel.h>	/* For min/max macros */
+#include <linux/highmem.h>
+#include <asm/setup.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+
+#define PGDIR_ORDER	0
+
+/*
+ * This is handled very differently on MicroBlaze since out page tables
+ * are all 0's and I want to be able to use these zero'd pages elsewhere
+ * as well - it gives us quite a speedup.
+ * -- Cort
+ */
+extern struct pgtable_cache_struct {
+	unsigned long *pgd_cache;
+	unsigned long *pte_cache;
+	unsigned long pgtable_cache_sz;
+} quicklists;
+
+#define pgd_quicklist		(quicklists.pgd_cache)
+#define pmd_quicklist		((unsigned long *)0)
+#define pte_quicklist		(quicklists.pte_cache)
+#define pgtable_cache_size	(quicklists.pgtable_cache_sz)
+
+extern unsigned long *zero_cache; /* head linked list of pre-zero'd pages */
+extern atomic_t zero_sz; /* # currently pre-zero'd pages */
+extern atomic_t zeropage_hits; /* # zero'd pages request that we've done */
+extern atomic_t zeropage_calls; /* # zero'd pages request that've been made */
+extern atomic_t zerototal; /* # pages zero'd over time */
+
+#define zero_quicklist		(zero_cache)
+#define zero_cache_sz	 	(zero_sz)
+#define zero_cache_calls	(zeropage_calls)
+#define zero_cache_hits		(zeropage_hits)
+#define zero_cache_total	(zerototal)
+
+/*
+ * return a pre-zero'd page from the list,
+ * return NULL if none available -- Cort
+ */
+extern unsigned long get_zero_page_fast(void);
+
+extern void __bad_pte(pmd_t *pmd);
+
+extern inline pgd_t *get_pgd_slow(void)
+{
+	pgd_t *ret;
+
+	ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGDIR_ORDER);
+	if (ret != NULL)
+		clear_page(ret);
+	return ret;
+}
+
+extern inline pgd_t *get_pgd_fast(void)
+{
+	unsigned long *ret;
+
+	ret = pgd_quicklist;
+	if (ret != NULL) {
+		pgd_quicklist = (unsigned long *)(*ret);
+		ret[0] = 0;
+		pgtable_cache_size--;
+	} else
+		ret = (unsigned long *)get_pgd_slow();
+	return (pgd_t *)ret;
+}
+
+extern inline void free_pgd_fast(pgd_t *pgd)
+{
+	*(unsigned long **)pgd = pgd_quicklist;
+	pgd_quicklist = (unsigned long *) pgd;
+	pgtable_cache_size++;
+}
+
+extern inline void free_pgd_slow(pgd_t *pgd)
+{
+	free_page((unsigned long)pgd);
+}
+
+#define pgd_free(mm, pgd)        free_pgd_fast(pgd)
+#define pgd_alloc(mm)		get_pgd_fast()
+
+#define pmd_pgtable(pmd)	pmd_page(pmd)
+
+/*
+ * We don't have any real pmd's, and this code never triggers because
+ * the pgd will always be present..
+ */
+#define pmd_alloc_one_fast(mm, address)	({ BUG(); ((pmd_t *)1); })
+#define pmd_alloc_one(mm, address)	({ BUG(); ((pmd_t *)2); })
+/* FIXME two definition - look below */
+#define pmd_free(mm, x)			do { } while (0)
+#define pgd_populate(mm, pmd, pte)	BUG()
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+		unsigned long address)
+{
+	pte_t *pte;
+	extern int mem_init_done;
+	extern void *early_get_page(void);
+	if (mem_init_done) {
+		pte = (pte_t *)__get_free_page(GFP_KERNEL |
+					__GFP_REPEAT | __GFP_ZERO);
+	} else {
+		pte = (pte_t *)early_get_page();
+		if (pte)
+			clear_page(pte);
+	}
+	return pte;
+}
+
+static inline struct page *pte_alloc_one(struct mm_struct *mm,
+		unsigned long address)
+{
+	struct page *ptepage;
+
+#ifdef CONFIG_HIGHPTE
+	int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
+#else
+	int flags = GFP_KERNEL | __GFP_REPEAT;
+#endif
+
+	ptepage = alloc_pages(flags, 0);
+	if (ptepage)
+		clear_highpage(ptepage);
+	return ptepage;
+}
+
+static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm,
+		unsigned long address)
+{
+	unsigned long *ret;
+
+	ret = pte_quicklist;
+	if (ret != NULL) {
+		pte_quicklist = (unsigned long *)(*ret);
+		ret[0] = 0;
+		pgtable_cache_size--;
+	}
+	return (pte_t *)ret;
+}
+
+extern inline void pte_free_fast(pte_t *pte)
+{
+	*(unsigned long **)pte = pte_quicklist;
+	pte_quicklist = (unsigned long *) pte;
+	pgtable_cache_size++;
+}
+
+extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+	free_page((unsigned long)pte);
+}
+
+extern inline void pte_free_slow(struct page *ptepage)
+{
+	__free_page(ptepage);
+}
+
+extern inline void pte_free(struct mm_struct *mm, struct page *ptepage)
+{
+	__free_page(ptepage);
+}
+
+#define __pte_free_tlb(tlb, pte)	pte_free((tlb)->mm, (pte))
+
+#define pmd_populate(mm, pmd, pte)	(pmd_val(*(pmd)) = page_address(pte))
+
+#define pmd_populate_kernel(mm, pmd, pte) \
+		(pmd_val(*(pmd)) = (unsigned long) (pte))
+
+/*
+ * We don't have any real pmd's, and this code never triggers because
+ * the pgd will always be present..
+ */
+#define pmd_alloc_one(mm, address)	({ BUG(); ((pmd_t *)2); })
+/*#define pmd_free(mm, x)			do { } while (0)*/
+#define __pmd_free_tlb(tlb, x)		do { } while (0)
+#define pgd_populate(mm, pmd, pte)	BUG()
+
+extern int do_check_pgt_cache(int, int);
+
+#endif /* CONFIG_MMU */
+
 #define check_pgt_cache()	do {} while (0)
 
 #endif /* _ASM_MICROBLAZE_PGALLOC_H */
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 14/30] microblaze_mmu_v1: Update process creation for MMU
  2009-04-27  8:32                         ` [PATCH 13/30] microblaze_mmu_v1: pgalloc.h and page.h monstr
@ 2009-04-27  8:32                           ` monstr
  2009-04-27  8:32                             ` [PATCH 15/30] microblaze_mmu_v1: Update tlb.h and tlbflush.h monstr
  0 siblings, 1 reply; 81+ messages in thread
From: monstr @ 2009-04-27  8:32 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/include/asm/processor.h |   97 +++++++++++++++++++++++++++++-
 arch/microblaze/include/asm/registers.h |   21 ++++++-
 arch/microblaze/include/asm/segment.h   |   10 +++-
 arch/microblaze/kernel/process.c        |   58 ++++++++++++++++++
 4 files changed, 178 insertions(+), 8 deletions(-)

diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h
index 9329029..f206fc1 100644
--- a/arch/microblaze/include/asm/processor.h
+++ b/arch/microblaze/include/asm/processor.h
@@ -1,6 +1,6 @@
 /*
- * Copyright (C) 2008 Michal Simek
- * Copyright (C) 2008 PetaLogix
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
  * Copyright (C) 2006 Atmark Techno, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -26,14 +26,15 @@ extern const struct seq_operations cpuinfo_op;
 # define cpu_sleep()		do {} while (0)
 # define prepare_to_copy(tsk)	do {} while (0)
 
-# endif /* __ASSEMBLY__ */
-
 #define task_pt_regs(tsk) \
 		(((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1)
 
 /* Do necessary setup to start up a newly executed thread. */
 void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp);
 
+# endif /* __ASSEMBLY__ */
+
+# ifndef CONFIG_MMU
 /*
  * User space process size: memory size
  *
@@ -85,4 +86,92 @@ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
 # define KSTK_EIP(tsk)	(0)
 # define KSTK_ESP(tsk)	(0)
 
+# else /* CONFIG_MMU */
+
+/*
+ * This is used to define STACK_TOP, and with MMU it must be below
+ * kernel base to select the correct PGD when handling MMU exceptions.
+ */
+# define TASK_SIZE	(CONFIG_KERNEL_START)
+
+/*
+ * This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+# define TASK_UNMAPPED_BASE	(TASK_SIZE / 8 * 3)
+
+# define THREAD_KSP	0
+
+#  ifndef __ASSEMBLY__
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#  define current_text_addr()	({ __label__ _l; _l: &&_l; })
+
+/* If you change this, you must change the associated assembly-languages
+ * constants defined below, THREAD_*.
+ */
+struct thread_struct {
+	/* kernel stack pointer (must be first field in structure) */
+	unsigned long	ksp;
+	unsigned long	ksp_limit;	/* if ksp <= ksp_limit stack overflow */
+	mm_segment_t	fs;		/* for get_fs() validation */
+	void		*pgdir;		/* root of page-table tree */
+	struct pt_regs	*regs;		/* Pointer to saved register state */
+};
+
+#  define INIT_THREAD { \
+	.ksp   = sizeof init_stack + (unsigned long)init_stack, \
+	.fs    = KERNEL_DS, \
+	.pgdir = swapper_pg_dir, \
+}
+
+/* Do necessary setup to start up a newly executed thread.  */
+void start_thread(struct pt_regs *regs,
+		unsigned long pc, unsigned long usp);
+
+/* Free all resources held by a thread. */
+extern inline void release_thread(struct task_struct *dead_task)
+{
+}
+
+extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
+
+/* Free current thread data structures etc.  */
+static inline void exit_thread(void)
+{
+}
+
+/* Return saved (kernel) PC of a blocked thread.  */
+#  define thread_saved_pc(tsk)	\
+	((tsk)->thread.regs ? (tsk)->thread.regs->r15 : 0)
+
+unsigned long get_wchan(struct task_struct *p);
+
+/* The size allocated for kernel stacks. This _must_ be a power of two! */
+# define KERNEL_STACK_SIZE	0x2000
+
+/* Return some info about the user process TASK.  */
+#  define task_tos(task)	((unsigned long)(task) + KERNEL_STACK_SIZE)
+#  define task_regs(task) ((struct pt_regs *)task_tos(task) - 1)
+
+#  define task_pt_regs_plus_args(tsk) \
+	(((void *)task_pt_regs(tsk)) - STATE_SAVE_ARG_SPACE)
+
+#  define task_sp(task)	(task_regs(task)->r1)
+#  define task_pc(task)	(task_regs(task)->pc)
+/* Grotty old names for some.  */
+#  define KSTK_EIP(task)	(task_pc(task))
+#  define KSTK_ESP(task)	(task_sp(task))
+
+/* FIXME */
+#  define deactivate_mm(tsk, mm)	do { } while (0)
+
+#  define STACK_TOP	TASK_SIZE
+#  define STACK_TOP_MAX	STACK_TOP
+
+#  endif /* __ASSEMBLY__ */
+# endif /* CONFIG_MMU */
 #endif /* _ASM_MICROBLAZE_PROCESSOR_H */
diff --git a/arch/microblaze/include/asm/registers.h b/arch/microblaze/include/asm/registers.h
index 834142d..68c3afb 100644
--- a/arch/microblaze/include/asm/registers.h
+++ b/arch/microblaze/include/asm/registers.h
@@ -1,6 +1,6 @@
 /*
- * Copyright (C) 2008 Michal Simek
- * Copyright (C) 2008 PetaLogix
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
  * Copyright (C) 2006 Atmark Techno, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -30,4 +30,21 @@
 #define FSR_UF		(1<<1) /* Underflow */
 #define FSR_DO		(1<<0) /* Denormalized operand error */
 
+# ifdef CONFIG_MMU
+/* Machine State Register (MSR) Fields */
+# define MSR_UM		(1<<11) /* User Mode */
+# define MSR_UMS	(1<<12) /* User Mode Save */
+# define MSR_VM		(1<<13) /* Virtual Mode */
+# define MSR_VMS	(1<<14) /* Virtual Mode Save */
+
+# define MSR_KERNEL	(MSR_EE | MSR_VM)
+/* # define MSR_USER	(MSR_KERNEL | MSR_UM | MSR_IE) */
+# define MSR_KERNEL_VMS	(MSR_EE | MSR_VMS)
+/* # define MSR_USER_VMS	(MSR_KERNEL_VMS | MSR_UMS | MSR_IE) */
+
+/* Exception State Register (ESR) Fields */
+# define	  ESR_DIZ	(1<<11) /* Zone Protection */
+# define	  ESR_S		(1<<10) /* Store instruction */
+
+# endif /* CONFIG_MMU */
 #endif /* _ASM_MICROBLAZE_REGISTERS_H */
diff --git a/arch/microblaze/include/asm/segment.h b/arch/microblaze/include/asm/segment.h
index 7f5dcc5..18d1c4b 100644
--- a/arch/microblaze/include/asm/segment.h
+++ b/arch/microblaze/include/asm/segment.h
@@ -1,6 +1,6 @@
 /*
- * Copyright (C) 2008 Michal Simek
- * Copyright (C) 2008 PetaLogix
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
  * Copyright (C) 2006 Atmark Techno, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -29,8 +29,14 @@ typedef struct {
  *
  * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
  */
+
+#  ifndef CONFIG_MMU
 #  define KERNEL_DS	((mm_segment_t){0})
 #  define USER_DS	KERNEL_DS
+#  else
+#  define KERNEL_DS	((mm_segment_t){0xFFFFF000})
+#  define USER_DS	((mm_segment_t){0xC0000000})
+#  endif
 
 # define get_ds()	(KERNEL_DS)
 # define get_fs()	(current_thread_info()->addr_limit)
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 07d4fa3..f289ff3 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -126,9 +126,54 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
 	else
 		childregs->r1 = ((unsigned long) ti) + THREAD_SIZE;
 
+#ifndef CONFIG_MMU
 	memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
 	ti->cpu_context.r1 = (unsigned long)childregs;
 	ti->cpu_context.msr = (unsigned long)childregs->msr;
+#else
+
+	/* if creating a kernel thread then update the current reg (we don't
+	 * want to use the parent's value when restoring by POP_STATE) */
+	if (kernel_mode(regs))
+		/* save new current on stack to use POP_STATE */
+		childregs->CURRENT_TASK = (unsigned long)p;
+	/* if returning to user then use the parent's value of this register */
+
+	/* if we're creating a new kernel thread then just zeroing all
+	 * the registers. That's OK for a brand new thread.*/
+	/* Pls. note that some of them will be restored in POP_STATE */
+	if (kernel_mode(regs))
+		memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
+	/* if this thread is created for fork/vfork/clone, then we want to
+	 * restore all the parent's context */
+	/* in addition to the registers which will be restored by POP_STATE */
+	else {
+		ti->cpu_context = *(struct cpu_context *)regs;
+		childregs->msr |= MSR_UMS;
+	}
+
+	/* FIXME STATE_SAVE_PT_OFFSET; */
+	ti->cpu_context.r1  = (unsigned long)childregs - STATE_SAVE_ARG_SPACE;
+	/* we should consider the fact that childregs is a copy of the parent
+	 * regs which were saved immediately after entering the kernel state
+	 * before enabling VM. This MSR will be restored in switch_to and
+	 * RETURN() and we want to have the right machine state there
+	 * specifically this state must have INTs disabled before and enabled
+	 * after performing rtbd
+	 * compose the right MSR for RETURN(). It will work for switch_to also
+	 * excepting for VM and UMS
+	 * don't touch UMS , CARRY and cache bits
+	 * right now MSR is a copy of parent one */
+	childregs->msr |= MSR_BIP;
+	childregs->msr &= ~MSR_EIP;
+	childregs->msr |= MSR_IE;
+	childregs->msr &= ~MSR_VM;
+	childregs->msr |= MSR_VMS;
+	childregs->msr |= MSR_EE; /* exceptions will be enabled*/
+
+	ti->cpu_context.msr = (childregs->msr|MSR_VM);
+	ti->cpu_context.msr &= ~MSR_UMS; /* switch_to to kernel mode */
+#endif
 	ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8;
 
 	if (clone_flags & CLONE_SETTLS)
@@ -137,6 +182,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
 	return 0;
 }
 
+#ifndef CONFIG_MMU
 /*
  * Return saved PC of a blocked thread.
  */
@@ -151,6 +197,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
 	else
 		return ctx->r14;
 }
+#endif
 
 static void kernel_thread_helper(int (*fn)(void *), void *arg)
 {
@@ -188,3 +235,14 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp)
 	regs->r1 = usp;
 	regs->pt_mode = 0;
 }
+
+#ifdef CONFIG_MMU
+#include <linux/elfcore.h>
+/*
+ * Set up a thread for executing a new program
+ */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
+{
+	return 0; /* MicroBlaze has no separate FPU registers */
+}
+#endif /* CONFIG_MMU */
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 15/30] microblaze_mmu_v1: Update tlb.h and tlbflush.h
  2009-04-27  8:32                           ` [PATCH 14/30] microblaze_mmu_v1: Update process creation for MMU monstr
@ 2009-04-27  8:32                             ` monstr
  2009-04-27  8:32                               ` [PATCH 16/30] microblaze_mmu_v1: MMU asm offset update monstr
  0 siblings, 1 reply; 81+ messages in thread
From: monstr @ 2009-04-27  8:32 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/include/asm/tlb.h      |    8 +++++
 arch/microblaze/include/asm/tlbflush.h |   48 ++++++++++++++++++++++++++++++++
 2 files changed, 56 insertions(+), 0 deletions(-)

diff --git a/arch/microblaze/include/asm/tlb.h b/arch/microblaze/include/asm/tlb.h
index d1dfe37..c472d28 100644
--- a/arch/microblaze/include/asm/tlb.h
+++ b/arch/microblaze/include/asm/tlb.h
@@ -1,4 +1,6 @@
 /*
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
  * Copyright (C) 2006 Atmark Techno, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -13,4 +15,10 @@
 
 #include <asm-generic/tlb.h>
 
+#ifdef CONFIG_MMU
+#define tlb_start_vma(tlb, vma)		do { } while (0)
+#define tlb_end_vma(tlb, vma)		do { } while (0)
+#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
+#endif
+
 #endif /* _ASM_MICROBLAZE_TLB_H */
diff --git a/arch/microblaze/include/asm/tlbflush.h b/arch/microblaze/include/asm/tlbflush.h
index d7fe762..eb31a0e 100644
--- a/arch/microblaze/include/asm/tlbflush.h
+++ b/arch/microblaze/include/asm/tlbflush.h
@@ -1,4 +1,6 @@
 /*
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
  * Copyright (C) 2006 Atmark Techno, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -9,6 +11,50 @@
 #ifndef _ASM_MICROBLAZE_TLBFLUSH_H
 #define _ASM_MICROBLAZE_TLBFLUSH_H
 
+#ifdef CONFIG_MMU
+
+#include <linux/sched.h>
+#include <linux/threads.h>
+#include <asm/processor.h>	/* For TASK_SIZE */
+#include <asm/mmu.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+
+extern void _tlbie(unsigned long address);
+extern void _tlbia(void);
+
+#define __tlbia()	_tlbia()
+
+static inline void local_flush_tlb_all(void)
+	{ __tlbia(); }
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+	{ __tlbia(); }
+static inline void local_flush_tlb_page(struct vm_area_struct *vma,
+				unsigned long vmaddr)
+	{ _tlbie(vmaddr); }
+static inline void local_flush_tlb_range(struct vm_area_struct *vma,
+		unsigned long start, unsigned long end)
+	{ __tlbia(); }
+
+#define flush_tlb_kernel_range(start, end)	do { } while (0)
+
+#define update_mmu_cache(vma, addr, pte)	do { } while (0)
+
+#define flush_tlb_all local_flush_tlb_all
+#define flush_tlb_mm local_flush_tlb_mm
+#define flush_tlb_page local_flush_tlb_page
+#define flush_tlb_range local_flush_tlb_range
+
+/*
+ * This is called in munmap when we have freed up some page-table
+ * pages.  We don't need to do anything here, there's nothing special
+ * about our page-table pages.  -- paulus
+ */
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
+	unsigned long start, unsigned long end) { }
+
+#else /* CONFIG_MMU */
+
 #define flush_tlb()				BUG()
 #define flush_tlb_all()				BUG()
 #define flush_tlb_mm(mm)			BUG()
@@ -17,4 +63,6 @@
 #define flush_tlb_pgtables(mm, start, end)	BUG()
 #define flush_tlb_kernel_range(start, end)	BUG()
 
+#endif /* CONFIG_MMU */
+
 #endif /* _ASM_MICROBLAZE_TLBFLUSH_H */
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 16/30] microblaze_mmu_v1: MMU asm offset update
  2009-04-27  8:32                             ` [PATCH 15/30] microblaze_mmu_v1: Update tlb.h and tlbflush.h monstr
@ 2009-04-27  8:32                               ` monstr
  2009-04-27  8:32                                 ` [PATCH 17/30] microblaze_mmu_v1: Add CURRENT_TASK for entry.S monstr
  0 siblings, 1 reply; 81+ messages in thread
From: monstr @ 2009-04-27  8:32 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/kernel/asm-offsets.c |   16 ++++++++++++++++
 1 files changed, 16 insertions(+), 0 deletions(-)

diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c
index aabd9e9..362de6c 100644
--- a/arch/microblaze/kernel/asm-offsets.c
+++ b/arch/microblaze/kernel/asm-offsets.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
  * Copyright (C) 2007-2009 PetaLogix
  * Copyright (C) 2006 Atmark Techno, Inc.
  *
@@ -68,6 +69,21 @@ int main(int argc, char *argv[])
 
 	/* struct task_struct */
 	DEFINE(TS_THREAD_INFO, offsetof(struct task_struct, stack));
+#ifdef CONFIG_MMU
+	DEFINE(TASK_STATE, offsetof(struct task_struct, state));
+	DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
+	DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
+	DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
+	DEFINE(TASK_MM, offsetof(struct task_struct, mm));
+	DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
+	DEFINE(TASK_PID, offsetof(struct task_struct, pid));
+	DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
+	DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
+	BLANK();
+
+	DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
+	BLANK();
+#endif
 
 	/* struct thread_info */
 	DEFINE(TI_TASK, offsetof(struct thread_info, task));
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 17/30] microblaze_mmu_v1: Add CURRENT_TASK for entry.S
  2009-04-27  8:32                               ` [PATCH 16/30] microblaze_mmu_v1: MMU asm offset update monstr
@ 2009-04-27  8:32                                 ` monstr
  2009-04-27  8:32                                   ` [PATCH 18/30] microblaze_mmu_v1: entry.S, entry.h monstr
  0 siblings, 1 reply; 81+ messages in thread
From: monstr @ 2009-04-27  8:32 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/include/asm/current.h |    8 ++++++++
 1 files changed, 8 insertions(+), 0 deletions(-)

diff --git a/arch/microblaze/include/asm/current.h b/arch/microblaze/include/asm/current.h
index 8375ea9..29303ed 100644
--- a/arch/microblaze/include/asm/current.h
+++ b/arch/microblaze/include/asm/current.h
@@ -1,4 +1,6 @@
 /*
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
  * Copyright (C) 2006 Atmark Techno, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -9,6 +11,12 @@
 #ifndef _ASM_MICROBLAZE_CURRENT_H
 #define _ASM_MICROBLAZE_CURRENT_H
 
+/*
+ * Register used to hold the current task pointer while in the kernel.
+ * Any `call clobbered' register without a special meaning should be OK,
+ * but check asm/microblaze/kernel/entry.S to be sure.
+ */
+#define CURRENT_TASK	r31
 # ifndef __ASSEMBLY__
 /*
  * Dedicate r31 to keeping the current task pointer
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 18/30] microblaze_mmu_v1: entry.S, entry.h
  2009-04-27  8:32                                 ` [PATCH 17/30] microblaze_mmu_v1: Add CURRENT_TASK for entry.S monstr
@ 2009-04-27  8:32                                   ` monstr
  2009-04-27  8:32                                     ` [PATCH 19/30] microblaze_mmu_v1: Update exception handling - MMU exception monstr
  0 siblings, 1 reply; 81+ messages in thread
From: monstr @ 2009-04-27  8:32 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/include/asm/entry.h |   37 ++-
 arch/microblaze/kernel/entry.S      | 1116 +++++++++++++++++++++++++++++++++++
 2 files changed, 1151 insertions(+), 2 deletions(-)
 create mode 100644 arch/microblaze/kernel/entry.S

diff --git a/arch/microblaze/include/asm/entry.h b/arch/microblaze/include/asm/entry.h
index e4c3aef..61abbd2 100644
--- a/arch/microblaze/include/asm/entry.h
+++ b/arch/microblaze/include/asm/entry.h
@@ -1,8 +1,8 @@
 /*
  * Definitions used by low-level trap handlers
  *
- * Copyright (C) 2008 Michal Simek
- * Copyright (C) 2007 - 2008 PetaLogix
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2007-2009 PetaLogix
  * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
  *
  * This file is subject to the terms and conditions of the GNU General
@@ -31,7 +31,40 @@ DECLARE_PER_CPU(unsigned int, R11_SAVE); /* Temp variable for entry */
 DECLARE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */
 # endif /* __ASSEMBLY__ */
 
+#ifndef CONFIG_MMU
+
 /* noMMU hasn't any space for args */
 # define STATE_SAVE_ARG_SPACE	(0)
 
+#else /* CONFIG_MMU */
+
+/* If true, system calls save and restore all registers (except result
+ * registers, of course).  If false, then `call clobbered' registers
+ * will not be preserved, on the theory that system calls are basically
+ * function calls anyway, and the caller should be able to deal with it.
+ * This is a security risk, of course, as `internal' values may leak out
+ * after a system call, but that certainly doesn't matter very much for
+ * a processor with no MMU protection!  For a protected-mode kernel, it
+ * would be faster to just zero those registers before returning.
+ *
+ * I can not rely on the glibc implementation. If you turn it off make
+ * sure that r11/r12 is saved in user-space. --KAA
+ *
+ * These are special variables using by the kernel trap/interrupt code
+ * to save registers in, at a time when there are no spare registers we
+ * can use to do so, and we can't depend on the value of the stack
+ * pointer.  This means that they must be within a signed 16-bit
+ * displacement of 0x00000000.
+ */
+
+/* A `state save frame' is a struct pt_regs preceded by some extra space
+ * suitable for a function call stack frame. */
+
+/* Amount of room on the stack reserved for arguments and to satisfy the
+ * C calling conventions, in addition to the space used by the struct
+ * pt_regs that actually holds saved values. */
+#define STATE_SAVE_ARG_SPACE	(6*4) /* Up to six arguments */
+
+#endif /* CONFIG_MMU */
+
 #endif /* _ASM_MICROBLAZE_ENTRY_H */
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
new file mode 100644
index 0000000..91a0e7b
--- /dev/null
+++ b/arch/microblaze/kernel/entry.S
@@ -0,0 +1,1116 @@
+/*
+ * Low-level system-call handling, trap handlers and context-switching
+ *
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
+ * Copyright (C) 2003		John Williams <jwilliams@itee.uq.edu.au>
+ * Copyright (C) 2001,2002	NEC Corporation
+ * Copyright (C) 2001,2002	Miles Bader <miles@gnu.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ *
+ * Written by Miles Bader <miles@gnu.org>
+ * Heavily modified by John Williams for Microblaze
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+
+#include <asm/entry.h>
+#include <asm/current.h>
+#include <asm/processor.h>
+#include <asm/exceptions.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+
+#include <asm/page.h>
+#include <asm/unistd.h>
+
+#include <linux/errno.h>
+#include <asm/signal.h>
+
+/* The size of a state save frame. */
+#define STATE_SAVE_SIZE		(PT_SIZE + STATE_SAVE_ARG_SPACE)
+
+/* The offset of the struct pt_regs in a `state save frame' on the stack. */
+#define PTO	STATE_SAVE_ARG_SPACE /* 24 the space for args */
+
+#define C_ENTRY(name)	.globl name; .align 4; name
+
+/*
+ * Various ways of setting and clearing BIP in flags reg.
+ * This is mucky, but necessary using microblaze version that
+ * allows msr ops to write to BIP
+ */
+#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
+	.macro	clear_bip
+	msrclr	r11, MSR_BIP
+	nop
+	.endm
+
+	.macro	set_bip
+	msrset	r11, MSR_BIP
+	nop
+	.endm
+
+	.macro	clear_eip
+	msrclr	r11, MSR_EIP
+	nop
+	.endm
+
+	.macro	set_ee
+	msrset	r11, MSR_EE
+	nop
+	.endm
+
+	.macro	disable_irq
+	msrclr	r11, MSR_IE
+	nop
+	.endm
+
+	.macro	enable_irq
+	msrset	r11, MSR_IE
+	nop
+	.endm
+
+	.macro	set_ums
+	msrset	r11, MSR_UMS
+	nop
+	msrclr	r11, MSR_VMS
+	nop
+	.endm
+
+	.macro	set_vms
+	msrclr	r11, MSR_UMS
+	nop
+	msrset	r11, MSR_VMS
+	nop
+	.endm
+
+	.macro	clear_vms_ums
+	msrclr	r11, MSR_VMS
+	nop
+	msrclr	r11, MSR_UMS
+	nop
+	.endm
+#else
+	.macro	clear_bip
+	mfs	r11, rmsr
+	nop
+	andi	r11, r11, ~MSR_BIP
+	mts	rmsr, r11
+	nop
+	.endm
+
+	.macro	set_bip
+	mfs	r11, rmsr
+	nop
+	ori	r11, r11, MSR_BIP
+	mts	rmsr, r11
+	nop
+	.endm
+
+	.macro	clear_eip
+	mfs	r11, rmsr
+	nop
+	andi	r11, r11, ~MSR_EIP
+	mts	rmsr, r11
+	nop
+	.endm
+
+	.macro	set_ee
+	mfs	r11, rmsr
+	nop
+	ori	r11, r11, MSR_EE
+	mts	rmsr, r11
+	nop
+	.endm
+
+	.macro	disable_irq
+	mfs	r11, rmsr
+	nop
+	andi	r11, r11, ~MSR_IE
+	mts	rmsr, r11
+	nop
+	.endm
+
+	.macro	enable_irq
+	mfs	r11, rmsr
+	nop
+	ori	r11, r11, MSR_IE
+	mts	rmsr, r11
+	nop
+	.endm
+
+	.macro set_ums
+	mfs	r11, rmsr
+	nop
+	ori	r11, r11, MSR_VMS
+	andni	r11, r11, MSR_UMS
+	mts	rmsr, r11
+	nop
+	.endm
+
+	.macro	set_vms
+	mfs	r11, rmsr
+	nop
+	ori	r11, r11, MSR_VMS
+	andni	r11, r11, MSR_UMS
+	mts	rmsr, r11
+	nop
+	.endm
+
+	.macro	clear_vms_ums
+	mfs	r11, rmsr
+	nop
+	andni	r11, r11, (MSR_VMS|MSR_UMS)
+	mts	rmsr,r11
+	nop
+	.endm
+#endif
+
+/* Define how to call high-level functions. With MMU, virtual mode must be
+ * enabled when calling the high-level function. Clobbers R11.
+ * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
+ */
+
+/* turn on virtual protected mode save */
+#define VM_ON		\
+	set_ums;		\
+	rted	r0, 2f;	\
+2: nop;
+
+/* turn off virtual protected mode save and user mode save*/
+#define VM_OFF			\
+	clear_vms_ums;			\
+	rted	r0, TOPHYS(1f);	\
+1: nop;
+
+#define SAVE_REGS \
+	swi	r2, r1, PTO+PT_R2;	/* Save SDA */			\
+	swi	r5, r1, PTO+PT_R5;					\
+	swi	r6, r1, PTO+PT_R6;					\
+	swi	r7, r1, PTO+PT_R7;					\
+	swi	r8, r1, PTO+PT_R8;					\
+	swi	r9, r1, PTO+PT_R9;					\
+	swi	r10, r1, PTO+PT_R10;					\
+	swi	r11, r1, PTO+PT_R11;	/* save clobbered regs after rval */\
+	swi	r12, r1, PTO+PT_R12;					\
+	swi	r13, r1, PTO+PT_R13;	/* Save SDA2 */			\
+	swi	r14, r1, PTO+PT_PC;	/* PC, before IRQ/trap */	\
+	swi	r15, r1, PTO+PT_R15;	/* Save LP */			\
+	swi	r18, r1, PTO+PT_R18;	/* Save asm scratch reg */	\
+	swi	r19, r1, PTO+PT_R19;					\
+	swi	r20, r1, PTO+PT_R20;					\
+	swi	r21, r1, PTO+PT_R21;					\
+	swi	r22, r1, PTO+PT_R22;					\
+	swi	r23, r1, PTO+PT_R23;					\
+	swi	r24, r1, PTO+PT_R24;					\
+	swi	r25, r1, PTO+PT_R25;					\
+	swi	r26, r1, PTO+PT_R26;					\
+	swi	r27, r1, PTO+PT_R27;					\
+	swi	r28, r1, PTO+PT_R28;					\
+	swi	r29, r1, PTO+PT_R29;					\
+	swi	r30, r1, PTO+PT_R30;					\
+	swi	r31, r1, PTO+PT_R31;	/* Save current task reg */	\
+	mfs	r11, rmsr;		/* save MSR */			\
+	nop;								\
+	swi	r11, r1, PTO+PT_MSR;
+
+#define RESTORE_REGS \
+	lwi	r11, r1, PTO+PT_MSR;					\
+	mts	rmsr , r11;						\
+	nop;								\
+	lwi	r2, r1, PTO+PT_R2;	/* restore SDA */		\
+	lwi	r5, r1, PTO+PT_R5;					\
+	lwi	r6, r1, PTO+PT_R6;					\
+	lwi	r7, r1, PTO+PT_R7;					\
+	lwi	r8, r1, PTO+PT_R8;					\
+	lwi	r9, r1, PTO+PT_R9;					\
+	lwi	r10, r1, PTO+PT_R10;					\
+	lwi	r11, r1, PTO+PT_R11;	/* restore clobbered regs after rval */\
+	lwi	r12, r1, PTO+PT_R12;					\
+	lwi	r13, r1, PTO+PT_R13;	/* restore SDA2 */		\
+	lwi	r14, r1, PTO+PT_PC;	/* RESTORE_LINK PC, before IRQ/trap */\
+	lwi	r15, r1, PTO+PT_R15;	/* restore LP */		\
+	lwi	r18, r1, PTO+PT_R18;	/* restore asm scratch reg */	\
+	lwi	r19, r1, PTO+PT_R19;					\
+	lwi	r20, r1, PTO+PT_R20;					\
+	lwi	r21, r1, PTO+PT_R21;					\
+	lwi	r22, r1, PTO+PT_R22;					\
+	lwi	r23, r1, PTO+PT_R23;					\
+	lwi	r24, r1, PTO+PT_R24;					\
+	lwi	r25, r1, PTO+PT_R25;					\
+	lwi	r26, r1, PTO+PT_R26;					\
+	lwi	r27, r1, PTO+PT_R27;					\
+	lwi	r28, r1, PTO+PT_R28;					\
+	lwi	r29, r1, PTO+PT_R29;					\
+	lwi	r30, r1, PTO+PT_R30;					\
+	lwi	r31, r1, PTO+PT_R31;	/* Restore cur task reg */
+
+.text
+
+/*
+ * User trap.
+ *
+ * System calls are handled here.
+ *
+ * Syscall protocol:
+ * Syscall number in r12, args in r5-r10
+ * Return value in r3
+ *
+ * Trap entered via brki instruction, so BIP bit is set, and interrupts
+ * are masked. This is nice, means we don't have to CLI before state save
+ */
+C_ENTRY(_user_exception):
+	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
+	addi	r14, r14, 4	/* return address is 4 byte after call */
+	swi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11);	/* Save r11 */
+
+	lwi	r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
+	beqi	r11, 1f;		/* Jump ahead if coming from user */
+/* Kernel-mode state save. */
+	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
+	tophys(r1,r11);
+	swi	r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
+	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
+
+	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
+	SAVE_REGS
+
+	addi	r11, r0, 1; 		/* Was in kernel-mode. */
+	swi	r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
+	brid	2f;
+	nop;				/* Fill delay slot */
+
+/* User-mode state save.  */
+1:
+	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11);	/* restore r11 */
+	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
+	tophys(r1,r1);
+	lwi	r1, r1, TS_THREAD_INFO;	/* get stack from task_struct */
+/* calculate kernel stack pointer from task struct 8k */
+	addik	r1, r1, THREAD_SIZE;
+	tophys(r1,r1);
+
+	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
+	SAVE_REGS
+
+	swi	r0, r1, PTO+PT_MODE;			/* Was in user-mode. */
+	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
+	swi	r11, r1, PTO+PT_R1;		/* Store user SP.  */
+	addi	r11, r0, 1;
+	swi	r11, r0, TOPHYS(PER_CPU(KM));	/* Now we're in kernel-mode.  */
+2:	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE));	/* get saved current */
+	/* Save away the syscall number.  */
+	swi	r12, r1, PTO+PT_R0;
+	tovirt(r1,r1)
+
+	la	r15, r0, ret_from_trap-8
+/* where the trap should return need -8 to adjust for rtsd r15, 8*/
+/* Jump to the appropriate function for the system call number in r12
+ * (r12 is not preserved), or return an error if r12 is not valid. The LP
+ * register should point to the location where
+ * the called function should return.  [note that MAKE_SYS_CALL uses label 1] */
+	/* See if the system call number is valid.  */
+	addi	r11, r12, -__NR_syscalls;
+	bgei	r11,1f;
+	/* Figure out which function to use for this system call.  */
+	/* Note Microblaze barrel shift is optional, so don't rely on it */
+	add	r12, r12, r12;			/* convert num -> ptr */
+	add	r12, r12, r12;
+
+	/* Trac syscalls and stored them to r0_ram */
+	lwi	r3, r12, 0x400 + TOPHYS(r0_ram)
+	addi	r3, r3, 1
+	swi	r3, r12, 0x400 + TOPHYS(r0_ram)
+
+	lwi	r12, r12, TOPHYS(sys_call_table); /* Function ptr */
+	/* Make the system call.  to r12*/
+	set_vms;
+	rtid	r12, 0;
+	nop;
+	/* The syscall number is invalid, return an error.  */
+1:	VM_ON;	/* RETURN() expects virtual mode*/
+	addi	r3, r0, -ENOSYS;
+	rtsd	r15,8;		/* looks like a normal subroutine return */
+	or 	r0, r0, r0
+
+
+/* Entry point used to return from a syscall/trap.  */
+/* We re-enable BIP bit before state restore */
+C_ENTRY(ret_from_trap):
+	set_bip;			/*  Ints masked for state restore*/
+	lwi	r11, r1, PTO+PT_MODE;
+/* See if returning to kernel mode, if so, skip resched &c.  */
+	bnei	r11, 2f;
+
+	/* We're returning to user mode, so check for various conditions that
+	 * trigger rescheduling. */
+	/* Get current task ptr into r11 */
+	add	r11, r0, CURRENT_TASK;	/* Get current task ptr into r11 */
+	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
+	lwi	r11, r11, TI_FLAGS;		/* get flags in thread info */
+	andi	r11, r11, _TIF_NEED_RESCHED;
+	beqi	r11, 5f;
+
+	swi	r3, r1, PTO + PT_R3; /* store syscall result */
+	swi	r4, r1, PTO + PT_R4;
+	bralid	r15, schedule;	/* Call scheduler */
+	nop;				/* delay slot */
+	lwi	r3, r1, PTO + PT_R3; /* restore syscall result */
+	lwi	r4, r1, PTO + PT_R4;
+
+	/* Maybe handle a signal */
+5:	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
+	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
+	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
+	andi	r11, r11, _TIF_SIGPENDING;
+	beqi	r11, 1f;		/* Signals to handle, handle them */
+
+	swi	r3, r1, PTO + PT_R3; /* store syscall result */
+	swi	r4, r1, PTO + PT_R4;
+	la	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
+	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
+	addi	r7, r0, 1;		/* Arg 3: int in_syscall */
+	bralid	r15, do_signal;	/* Handle any signals */
+	nop;
+	lwi	r3, r1, PTO + PT_R3; /* restore syscall result */
+	lwi	r4, r1, PTO + PT_R4;
+
+/* Finally, return to user state.  */
+1:	swi	r0, r0, PER_CPU(KM);	/* Now officially in user state. */
+	add	r11, r0, CURRENT_TASK;	/* Get current task ptr into r11 */
+	swi	r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
+	VM_OFF;
+	tophys(r1,r1);
+	RESTORE_REGS;
+	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
+	lwi	r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
+	bri	6f;
+
+/* Return to kernel state.  */
+2:	VM_OFF;
+	tophys(r1,r1);
+	RESTORE_REGS;
+	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
+	tovirt(r1,r1);
+6:
+TRAP_return:		/* Make global symbol for debugging */
+	rtbd	r14, 0;	/* Instructions to return from an IRQ */
+	nop;
+
+
+/* These syscalls need access to the struct pt_regs on the stack, so we
+   implement them in assembly (they're basically all wrappers anyway).  */
+
+C_ENTRY(sys_fork_wrapper):
+	addi	r5, r0, SIGCHLD			/* Arg 0: flags */
+	lwi	r6, r1, PTO+PT_R1	/* Arg 1: child SP (use parent's) */
+	la	r7, r1, PTO			/* Arg 2: parent context */
+	add	r8. r0, r0			/* Arg 3: (unused) */
+	add	r9, r0, r0;			/* Arg 4: (unused) */
+	add	r10, r0, r0;			/* Arg 5: (unused) */
+	brid	do_fork		/* Do real work (tail-call) */
+	nop;
+
+/* This the initial entry point for a new child thread, with an appropriate
+   stack in place that makes it look the the child is in the middle of an
+   syscall.  This function is actually `returned to' from switch_thread
+   (copy_thread makes ret_from_fork the return address in each new thread's
+   saved context).  */
+C_ENTRY(ret_from_fork):
+	bralid	r15, schedule_tail; /* ...which is schedule_tail's arg */
+	add	r3, r5, r0;	/* switch_thread returns the prev task */
+				/* ( in the delay slot ) */
+	add	r3, r0, r0;	/* Child's fork call should return 0. */
+	brid	ret_from_trap;	/* Do normal trap return */
+	nop;
+
+C_ENTRY(sys_vfork_wrapper):
+	la	r5, r1, PTO
+	brid	sys_vfork	/* Do real work (tail-call) */
+	nop
+
+C_ENTRY(sys_clone_wrapper):
+	bnei	r6, 1f;			/* See if child SP arg (arg 1) is 0. */
+	lwi	r6, r1, PTO+PT_R1;	/* If so, use paret's stack ptr */
+1:	la	r7, r1, PTO;			/* Arg 2: parent context */
+	add	r8, r0, r0;			/* Arg 3: (unused) */
+	add	r9, r0, r0;			/* Arg 4: (unused) */
+	add	r10, r0, r0;			/* Arg 5: (unused) */
+	brid	do_fork		/* Do real work (tail-call) */
+	nop;
+
+C_ENTRY(sys_execve_wrapper):
+	la	r8, r1, PTO;		/* add user context as 4th arg */
+	brid	sys_execve;	/* Do real work (tail-call).*/
+	nop;
+
+C_ENTRY(sys_sigsuspend_wrapper):
+	swi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
+	swi	r4, r1, PTO+PT_R4;
+	la	r6, r1, PTO;		/* add user context as 2nd arg */
+	bralid	r15, sys_sigsuspend; /* Do real work.*/
+	nop;
+	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
+	lwi	r4, r1, PTO+PT_R4;
+	bri ret_from_trap /* fall through will not work here due to align */
+	nop;
+
+C_ENTRY(sys_rt_sigsuspend_wrapper):
+	swi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
+	swi	r4, r1, PTO+PT_R4;
+	la	r7, r1, PTO;		/* add user context as 3rd arg */
+	brlid	r15, sys_rt_sigsuspend;	/* Do real work.*/
+	nop;
+	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
+	lwi	r4, r1, PTO+PT_R4;
+	bri ret_from_trap /* fall through will not work here due to align */
+	nop;
+
+
+C_ENTRY(sys_sigreturn_wrapper):
+	swi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
+	swi	r4, r1, PTO+PT_R4;
+	la	r5, r1, PTO;		/* add user context as 1st arg */
+	brlid	r15, sys_sigreturn;	/* Do real work.*/
+	nop;
+	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
+	lwi	r4, r1, PTO+PT_R4;
+	bri ret_from_trap /* fall through will not work here due to align */
+	nop;
+
+C_ENTRY(sys_rt_sigreturn_wrapper):
+	swi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
+	swi	r4, r1, PTO+PT_R4;
+	la	r5, r1, PTO;		/* add user context as 1st arg */
+	brlid	r15, sys_rt_sigreturn	/* Do real work */
+	nop;
+	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
+	lwi	r4, r1, PTO+PT_R4;
+	bri ret_from_trap /* fall through will not work here due to align */
+	nop;
+
+/*
+ * HW EXCEPTION rutine start
+ */
+
+#define SAVE_STATE	\
+	swi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */	\
+	set_bip;	/*equalize initial state for all possible entries*/\
+	clear_eip;							\
+	enable_irq;							\
+	set_ee;								\
+	/* See if already in kernel mode.*/				\
+	lwi	r11, r0, TOPHYS(PER_CPU(KM));				\
+	beqi	r11, 1f;		/* Jump ahead if coming from user */\
+	/* Kernel-mode state save.  */					\
+	/* Reload kernel stack-ptr. */					\
+	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));			\
+	tophys(r1,r11);							\
+	swi	r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */	\
+	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
+	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */\
+	/* store return registers separately because			\
+	 * this macros is use for others exceptions */			\
+	swi	r3, r1, PTO + PT_R3;					\
+	swi	r4, r1, PTO + PT_R4;					\
+	SAVE_REGS							\
+	/* PC, before IRQ/trap - this is one instruction above */	\
+	swi	r17, r1, PTO+PT_PC;					\
+									\
+	addi	r11, r0, 1; 		/* Was in kernel-mode.  */	\
+	swi	r11, r1, PTO+PT_MODE; 	 				\
+	brid	2f;							\
+	nop;				/* Fill delay slot */		\
+1:	/* User-mode state save.  */					\
+	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
+	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
+	tophys(r1,r1);							\
+	lwi	r1, r1, TS_THREAD_INFO;	/* get the thread info */	\
+	addik	r1, r1, THREAD_SIZE;	/* calculate kernel stack pointer */\
+	tophys(r1,r1);							\
+									\
+	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */\
+	/* store return registers separately because this macros	\
+	 * is use for others exceptions */				\
+	swi	r3, r1, PTO + PT_R3; 					\
+	swi	r4, r1, PTO + PT_R4;					\
+	SAVE_REGS							\
+	/* PC, before IRQ/trap - this is one instruction above FIXME*/	\
+	swi	r17, r1, PTO+PT_PC;					\
+									\
+	swi	r0, r1, PTO+PT_MODE; /* Was in user-mode.  */		\
+	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));			\
+	swi	r11, r1, PTO+PT_R1; /* Store user SP.  */		\
+	addi	r11, r0, 1;						\
+	swi	r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
+2:	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
+	/* Save away the syscall number.  */				\
+	swi	r0, r1, PTO+PT_R0;					\
+	tovirt(r1,r1)
+
+C_ENTRY(full_exception_trap):
+	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
+	/* adjust exception address for privileged instruction
+	 * for finding where is it */
+	addik	r17, r17, -4
+	SAVE_STATE /* Save registers */
+	/* FIXME this can be store directly in PT_ESR reg.
+	 * I tested it but there is a fault */
+	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
+	la	r15, r0, ret_from_exc - 8
+	la	r5, r1, PTO		 /* parameter struct pt_regs * regs */
+	mfs	r6, resr
+	nop
+	mfs	r7, rfsr;		/* save FSR */
+	nop
+	la	r12, r0, full_exception
+	set_vms;
+	rtbd	r12, 0;
+	nop;
+
+/*
+ * Unaligned data trap.
+ *
+ * Unaligned data trap last on 4k page is handled here.
+ *
+ * Trap entered via exception, so EE bit is set, and interrupts
+ * are masked.  This is nice, means we don't have to CLI before state save
+ *
+ * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
+ */
+C_ENTRY(unaligned_data_trap):
+	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
+	SAVE_STATE		/* Save registers.*/
+	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
+	la	r15, r0, ret_from_exc-8
+	mfs	r3, resr		/* ESR */
+	nop
+	mfs	r4, rear		/* EAR */
+	nop
+	la	r7, r1, PTO		/* parameter struct pt_regs * regs */
+	la	r12, r0, _unaligned_data_exception
+	set_vms;
+	rtbd	r12, 0;	/* interrupts enabled */
+	nop;
+
+/*
+ * Page fault traps.
+ *
+ * If the real exception handler (from hw_exception_handler.S) didn't find
+ * the mapping for the process, then we're thrown here to handle such situation.
+ *
+ * Trap entered via exceptions, so EE bit is set, and interrupts
+ * are masked.  This is nice, means we don't have to CLI before state save
+ *
+ * Build a standard exception frame for TLB Access errors.  All TLB exceptions
+ * will bail out to this point if they can't resolve the lightweight TLB fault.
+ *
+ * The C function called is in "arch/microblaze/mm/fault.c", declared as:
+ * void do_page_fault(struct pt_regs *regs,
+ *				unsigned long address,
+ *				unsigned long error_code)
+ */
+/* data and intruction trap - which is choose is resolved int fault.c */
+C_ENTRY(page_fault_data_trap):
+	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
+	SAVE_STATE		/* Save registers.*/
+	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
+	la	r15, r0, ret_from_exc-8
+	la	r5, r1, PTO		/* parameter struct pt_regs * regs */
+	mfs	r6, rear		/* parameter unsigned long address */
+	nop
+	mfs	r7, resr		/* parameter unsigned long error_code */
+	nop
+	la	r12, r0, do_page_fault
+	set_vms;
+	rtbd	r12, 0;	/* interrupts enabled */
+	nop;
+
+C_ENTRY(page_fault_instr_trap):
+	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
+	SAVE_STATE		/* Save registers.*/
+	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
+	la	r15, r0, ret_from_exc-8
+	la	r5, r1, PTO		/* parameter struct pt_regs * regs */
+	mfs	r6, rear		/* parameter unsigned long address */
+	nop
+	ori	r7, r0, 0		/* parameter unsigned long error_code */
+	la	r12, r0, do_page_fault
+	set_vms;
+	rtbd	r12, 0;	/* interrupts enabled */
+	nop;
+
+/* Entry point used to return from an exception.  */
+C_ENTRY(ret_from_exc):
+	set_bip;			/*  Ints masked for state restore*/
+	lwi	r11, r1, PTO+PT_MODE;
+	bnei	r11, 2f;		/* See if returning to kernel mode, */
+					/* ... if so, skip resched &c.  */
+
+	/* We're returning to user mode, so check for various conditions that
+	   trigger rescheduling. */
+	/* Get current task ptr into r11 */
+	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
+	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
+	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
+	andi	r11, r11, _TIF_NEED_RESCHED;
+	beqi	r11, 5f;
+
+/* Call the scheduler before returning from a syscall/trap. */
+	bralid	r15, schedule;	/* Call scheduler */
+	nop;				/* delay slot */
+
+	/* Maybe handle a signal */
+5:	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
+	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
+	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
+	andi	r11, r11, _TIF_SIGPENDING;
+	beqi	r11, 1f;		/* Signals to handle, handle them */
+
+	/*
+	 * Handle a signal return; Pending signals should be in r18.
+	 *
+	 * Not all registers are saved by the normal trap/interrupt entry
+	 * points (for instance, call-saved registers (because the normal
+	 * C-compiler calling sequence in the kernel makes sure they're
+	 * preserved), and call-clobbered registers in the case of
+	 * traps), but signal handlers may want to examine or change the
+	 * complete register state.  Here we save anything not saved by
+	 * the normal entry sequence, so that it may be safely restored
+	 * (in a possibly modified form) after do_signal returns.
+	 * store return registers separately because this macros is use
+	 * for others exceptions */
+	swi	r3, r1, PTO + PT_R3;
+	swi	r4, r1, PTO + PT_R4;
+	la	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
+	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
+	addi	r7, r0, 0;		/* Arg 3: int in_syscall */
+	bralid	r15, do_signal;	/* Handle any signals */
+	nop;
+	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
+	lwi	r4, r1, PTO+PT_R4;
+
+/* Finally, return to user state.  */
+1:	swi	r0, r0, PER_CPU(KM);	/* Now officially in user state. */
+	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
+	swi	r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
+	VM_OFF;
+	tophys(r1,r1);
+
+	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
+	lwi	r4, r1, PTO+PT_R4;
+	RESTORE_REGS;
+	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
+
+	lwi	r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
+	bri	6f;
+/* Return to kernel state.  */
+2:	VM_OFF;
+	tophys(r1,r1);
+	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
+	lwi	r4, r1, PTO+PT_R4;
+	RESTORE_REGS;
+	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
+
+	tovirt(r1,r1);
+6:
+EXC_return:		/* Make global symbol for debugging */
+	rtbd	r14, 0;	/* Instructions to return from an IRQ */
+	nop;
+
+/*
+ * HW EXCEPTION rutine end
+ */
+
+/*
+ * Hardware maskable interrupts.
+ *
+ * The stack-pointer (r1) should have already been saved to the memory
+ * location PER_CPU(ENTRY_SP).
+ */
+C_ENTRY(_interrupt):
+/* MS: we are in physical address */
+/* Save registers, switch to proper stack, convert SP to virtual.*/
+	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
+	swi	r11, r0, TOPHYS(PER_CPU(R11_SAVE));
+	/* MS: See if already in kernel mode. */
+	lwi	r11, r0, TOPHYS(PER_CPU(KM));
+	beqi	r11, 1f; /* MS: Jump ahead if coming from user */
+
+/* Kernel-mode state save. */
+	or	r11, r1, r0
+	tophys(r1,r11); /* MS: I have in r1 physical address where stack is */
+/* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/
+	swi	r11, r1, (PT_R1 - PT_SIZE);
+/* MS: restore r11 because of saving in SAVE_REGS */
+	lwi	r11, r0, TOPHYS(PER_CPU(R11_SAVE));
+	/* save registers */
+/* MS: Make room on the stack -> activation record */
+	addik	r1, r1, -STATE_SAVE_SIZE;
+/* MS: store return registers separately because
+ * this macros is use for others exceptions */
+	swi	r3, r1, PTO + PT_R3;
+	swi	r4, r1, PTO + PT_R4;
+	SAVE_REGS
+	/* MS: store mode */
+	addi	r11, r0, 1; /* MS: Was in kernel-mode. */
+	swi	r11, r1, PTO + PT_MODE; /* MS: and save it */
+	brid	2f;
+	nop; /* MS: Fill delay slot */
+
+1:
+/* User-mode state save. */
+/* MS: restore r11 -> FIXME move before SAVE_REG */
+	lwi	r11, r0, TOPHYS(PER_CPU(R11_SAVE));
+ /* MS: get the saved current */
+	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
+	tophys(r1,r1);
+	lwi	r1, r1, TS_THREAD_INFO;
+	addik	r1, r1, THREAD_SIZE;
+	tophys(r1,r1);
+	/* save registers */
+	addik	r1, r1, -STATE_SAVE_SIZE;
+	swi	r3, r1, PTO+PT_R3;
+	swi	r4, r1, PTO+PT_R4;
+	SAVE_REGS
+	/* calculate mode */
+	swi	r0, r1, PTO + PT_MODE;
+	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
+	swi	r11, r1, PTO+PT_R1;
+	/* setup kernel mode to KM */
+	addi	r11, r0, 1;
+	swi	r11, r0, TOPHYS(PER_CPU(KM));
+
+2:
+	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
+	swi	r0, r1, PTO + PT_R0;
+	tovirt(r1,r1)
+	la	r5, r1, PTO;
+	set_vms;
+	la	r11, r0, do_IRQ;
+	la	r15, r0, irq_call;
+irq_call:rtbd	r11, 0;
+	nop;
+
+/* MS: we are in virtual mode */
+ret_from_irq:
+	lwi	r11, r1, PTO + PT_MODE;
+	bnei	r11, 2f;
+
+	add	r11, r0, CURRENT_TASK;
+	lwi	r11, r11, TS_THREAD_INFO;
+	lwi	r11, r11, TI_FLAGS; /* MS: get flags from thread info */
+	andi	r11, r11, _TIF_NEED_RESCHED;
+	beqi	r11, 5f
+	bralid	r15, schedule;
+	nop; /* delay slot */
+
+    /* Maybe handle a signal */
+5:	add	r11, r0, CURRENT_TASK;
+	lwi	r11, r11, TS_THREAD_INFO; /* MS: get thread info */
+	lwi	r11, r11, TI_FLAGS; /* get flags in thread info */
+	andi	r11, r11, _TIF_SIGPENDING;
+	beqid	r11, no_intr_resched
+/* Handle a signal return; Pending signals should be in r18. */
+	addi	r7, r0, 0; /* Arg 3: int in_syscall */
+	la	r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
+	bralid	r15, do_signal;	/* Handle any signals */
+	add	r6, r0, r0; /* Arg 2: sigset_t *oldset */
+
+/* Finally, return to user state. */
+no_intr_resched:
+    /* Disable interrupts, we are now committed to the state restore */
+	disable_irq
+	swi	r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
+	add	r11, r0, CURRENT_TASK;
+	swi	r11, r0, PER_CPU(CURRENT_SAVE);
+	VM_OFF;
+	tophys(r1,r1);
+	lwi	r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
+	lwi	r4, r1, PTO + PT_R4;
+	RESTORE_REGS
+	addik	r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
+	lwi	r1, r1, PT_R1 - PT_SIZE;
+	bri	6f;
+/* MS: Return to kernel state. */
+2:	VM_OFF /* MS: turn off MMU */
+	tophys(r1,r1)
+	lwi	r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
+	lwi	r4, r1, PTO + PT_R4;
+	RESTORE_REGS
+	addik	r1, r1, STATE_SAVE_SIZE	/* MS: Clean up stack space. */
+	tovirt(r1,r1);
+6:
+IRQ_return: /* MS: Make global symbol for debugging */
+	rtid	r14, 0
+	nop
+
+/*
+ * `Debug' trap
+ *  We enter dbtrap in "BIP" (breakpoint) mode.
+ *  So we exit the breakpoint mode with an 'rtbd' and proceed with the
+ *  original dbtrap.
+ *  however, wait to save state first
+ */
+C_ENTRY(_debug_exception):
+	/* BIP bit is set on entry, no interrupts can occur */
+	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
+
+	swi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
+	set_bip;	/*equalize initial state for all possible entries*/
+	clear_eip;
+	enable_irq;
+	lwi	r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
+	beqi	r11, 1f;		/* Jump ahead if coming from user */
+	/* Kernel-mode state save.  */
+	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
+	tophys(r1,r11);
+	swi	r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
+	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
+
+	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
+	swi	r3, r1, PTO + PT_R3;
+	swi	r4, r1, PTO + PT_R4;
+	SAVE_REGS;
+
+	addi	r11, r0, 1; 		/* Was in kernel-mode.  */
+	swi	r11, r1, PTO + PT_MODE;
+	brid	2f;
+	nop;				/* Fill delay slot */
+1:      /* User-mode state save.  */
+	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
+	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
+	tophys(r1,r1);
+	lwi	r1, r1, TS_THREAD_INFO;	/* get the thread info */
+	addik	r1, r1, THREAD_SIZE;	/* calculate kernel stack pointer */
+	tophys(r1,r1);
+
+	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
+	swi	r3, r1, PTO + PT_R3;
+	swi	r4, r1, PTO + PT_R4;
+	SAVE_REGS;
+
+	swi	r0, r1, PTO+PT_MODE; /* Was in user-mode.  */
+	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
+	swi	r11, r1, PTO+PT_R1; /* Store user SP.  */
+	addi	r11, r0, 1;
+	swi	r11, r0, TOPHYS(PER_CPU(KM));	/* Now we're in kernel-mode.  */
+2:	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
+	/* Save away the syscall number.  */
+	swi	r0, r1, PTO+PT_R0;
+	tovirt(r1,r1)
+
+	addi	r5, r0, SIGTRAP		     /* send the trap signal */
+	add	r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
+	addk	r7, r0, r0		     /* 3rd param zero */
+
+	set_vms;
+	la	r11, r0, send_sig;
+	la	r15, r0, dbtrap_call;
+dbtrap_call:	rtbd	r11, 0;
+	nop;
+
+	set_bip;			/*  Ints masked for state restore*/
+	lwi	r11, r1, PTO+PT_MODE;
+	bnei	r11, 2f;
+
+	/* Get current task ptr into r11 */
+	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
+	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
+	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
+	andi	r11, r11, _TIF_NEED_RESCHED;
+	beqi	r11, 5f;
+
+/* Call the scheduler before returning from a syscall/trap. */
+
+	bralid	r15, schedule;	/* Call scheduler */
+	nop;				/* delay slot */
+	/* XXX Is PT_DTRACE handling needed here? */
+	/* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here.  */
+
+	/* Maybe handle a signal */
+5:	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
+	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
+	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
+	andi	r11, r11, _TIF_SIGPENDING;
+	beqi	r11, 1f;		/* Signals to handle, handle them */
+
+/* Handle a signal return; Pending signals should be in r18.  */
+	/* Not all registers are saved by the normal trap/interrupt entry
+	   points (for instance, call-saved registers (because the normal
+	   C-compiler calling sequence in the kernel makes sure they're
+	   preserved), and call-clobbered registers in the case of
+	   traps), but signal handlers may want to examine or change the
+	   complete register state.  Here we save anything not saved by
+	   the normal entry sequence, so that it may be safely restored
+	   (in a possibly modified form) after do_signal returns.  */
+
+	la	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
+	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
+	addi  r7, r0, 0;	/* Arg 3: int in_syscall */
+	bralid	r15, do_signal;	/* Handle any signals */
+	nop;
+
+
+/* Finally, return to user state.  */
+1:	swi	r0, r0, PER_CPU(KM);	/* Now officially in user state. */
+	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
+	swi	r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
+	VM_OFF;
+	tophys(r1,r1);
+
+	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
+	lwi	r4, r1, PTO+PT_R4;
+	RESTORE_REGS
+	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
+
+
+	lwi	r1, r1, PT_R1 - PT_SIZE;
+					/* Restore user stack pointer. */
+	bri	6f;
+
+/* Return to kernel state.  */
+2:	VM_OFF;
+	tophys(r1,r1);
+	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
+	lwi	r4, r1, PTO+PT_R4;
+	RESTORE_REGS
+	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
+
+	tovirt(r1,r1);
+6:
+DBTRAP_return:		/* Make global symbol for debugging */
+	rtbd	r14, 0;	/* Instructions to return from an IRQ */
+	nop;
+
+
+
+ENTRY(_switch_to)
+	/* prepare return value */
+	addk	r3, r0, r31
+
+	/* save registers in cpu_context */
+	/* use r11 and r12, volatile registers, as temp register */
+	/* give start of cpu_context for previous process */
+	addik	r11, r5, TI_CPU_CONTEXT
+	swi	r1, r11, CC_R1
+	swi	r2, r11, CC_R2
+	/* skip volatile registers.
+	 * they are saved on stack when we jumped to _switch_to() */
+	/* dedicated registers */
+	swi	r13, r11, CC_R13
+	swi	r14, r11, CC_R14
+	swi	r15, r11, CC_R15
+	swi	r16, r11, CC_R16
+	swi	r17, r11, CC_R17
+	swi	r18, r11, CC_R18
+	/* save non-volatile registers */
+	swi	r19, r11, CC_R19
+	swi	r20, r11, CC_R20
+	swi	r21, r11, CC_R21
+	swi	r22, r11, CC_R22
+	swi	r23, r11, CC_R23
+	swi	r24, r11, CC_R24
+	swi	r25, r11, CC_R25
+	swi	r26, r11, CC_R26
+	swi	r27, r11, CC_R27
+	swi	r28, r11, CC_R28
+	swi	r29, r11, CC_R29
+	swi	r30, r11, CC_R30
+	/* special purpose registers */
+	mfs	r12, rmsr
+	nop
+	swi	r12, r11, CC_MSR
+	mfs	r12, rear
+	nop
+	swi	r12, r11, CC_EAR
+	mfs	r12, resr
+	nop
+	swi	r12, r11, CC_ESR
+	mfs	r12, rfsr
+	nop
+	swi	r12, r11, CC_FSR
+
+	/* update r31, the current */
+	lwi	r31, r6, TI_TASK/* give me pointer to task which will be next */
+	/* stored it to current_save too */
+	swi	r31, r0, PER_CPU(CURRENT_SAVE)
+
+	/* get new process' cpu context and restore */
+	/* give me start where start context of next task */
+	addik	r11, r6, TI_CPU_CONTEXT
+
+	/* non-volatile registers */
+	lwi	r30, r11, CC_R30
+	lwi	r29, r11, CC_R29
+	lwi	r28, r11, CC_R28
+	lwi	r27, r11, CC_R27
+	lwi	r26, r11, CC_R26
+	lwi	r25, r11, CC_R25
+	lwi	r24, r11, CC_R24
+	lwi	r23, r11, CC_R23
+	lwi	r22, r11, CC_R22
+	lwi	r21, r11, CC_R21
+	lwi	r20, r11, CC_R20
+	lwi	r19, r11, CC_R19
+	/* dedicated registers */
+	lwi	r18, r11, CC_R18
+	lwi	r17, r11, CC_R17
+	lwi	r16, r11, CC_R16
+	lwi	r15, r11, CC_R15
+	lwi	r14, r11, CC_R14
+	lwi	r13, r11, CC_R13
+	/* skip volatile registers */
+	lwi	r2, r11, CC_R2
+	lwi	r1, r11, CC_R1
+
+	/* special purpose registers */
+	lwi	r12, r11, CC_FSR
+	mts	rfsr, r12
+	nop
+	lwi	r12, r11, CC_MSR
+	mts	rmsr, r12
+	nop
+
+	rtsd	r15, 8
+	nop
+
+ENTRY(_reset)
+	brai	0x70; /* Jump back to FS-boot */
+
+ENTRY(_break)
+	mfs	r5, rmsr
+	nop
+	swi	r5, r0, 0x250 + TOPHYS(r0_ram)
+	mfs	r5, resr
+	nop
+	swi	r5, r0, 0x254 + TOPHYS(r0_ram)
+	bri	0
+
+	/* These are compiled and loaded into high memory, then
+	 * copied into place in mach_early_setup */
+	.section	.init.ivt, "ax"
+	.org	0x0
+	/* this is very important - here is the reset vector */
+	/* in current MMU branch you don't care what is here - it is
+	 * used from bootloader site - but this is correct for FS-BOOT */
+	brai	0x70
+	nop
+	brai	TOPHYS(_user_exception); /* syscall handler */
+	brai	TOPHYS(_interrupt);	/* Interrupt handler */
+	brai	TOPHYS(_break);		/* nmi trap handler */
+	brai	TOPHYS(_hw_exception_handler);	/* HW exception handler */
+
+	.org	0x60
+	brai	TOPHYS(_debug_exception);	/* debug trap handler*/
+
+.section .rodata,"a"
+#include "syscall_table.S"
+
+syscall_table_size=(.-sys_call_table)
+
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 19/30] microblaze_mmu_v1: Update exception handling - MMU exception
  2009-04-27  8:32                                   ` [PATCH 18/30] microblaze_mmu_v1: entry.S, entry.h monstr
@ 2009-04-27  8:32                                     ` monstr
  2009-04-27  8:32                                       ` [PATCH 20/30] microblaze_mmu_v1: uaccess MMU update monstr
  0 siblings, 1 reply; 81+ messages in thread
From: monstr @ 2009-04-27  8:32 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/kernel/hw_exception_handler.S |  749 ++++++++++++++++++++++++-
 1 files changed, 742 insertions(+), 7 deletions(-)

diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index cf9486d..617047f 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -53,6 +53,12 @@
  *   - Illegal instruction opcode
  *   - Divide-by-zero
  *
+ *   - Privileged instruction exception (MMU)
+ *   - Data storage exception (MMU)
+ *   - Instruction storage exception (MMU)
+ *   - Data TLB miss exception (MMU)
+ *   - Instruction TLB miss exception (MMU)
+ *
  * Note we disable interrupts during exception handling, otherwise we will
  * possibly get multiple re-entrancy if interrupt handles themselves cause
  * exceptions. JW
@@ -71,9 +77,24 @@
 #include <asm/asm-offsets.h>
 
 /* Helpful Macros */
+#ifndef CONFIG_MMU
 #define EX_HANDLER_STACK_SIZ	(4*19)
+#endif
 #define NUM_TO_REG(num)		r ## num
 
+#ifdef CONFIG_MMU
+/* FIXME you can't change first load of MSR because there is
+ * hardcoded jump bri 4 */
+	#define RESTORE_STATE			\
+		lwi	r3, r1, PT_R3;		\
+		lwi	r4, r1, PT_R4;		\
+		lwi	r5, r1, PT_R5;		\
+		lwi	r6, r1, PT_R6;		\
+		lwi	r11, r1, PT_R11;	\
+		lwi	r31, r1, PT_R31;	\
+		lwi	r1, r0, TOPHYS(r0_ram + 0);
+#endif /* CONFIG_MMU */
+
 #define LWREG_NOP			\
 	bri	ex_handler_unhandled;	\
 	nop;
@@ -106,6 +127,54 @@
 	or	r3, r0, NUM_TO_REG (regnum);		\
 	bri	ex_sw_tail;
 
+#ifdef CONFIG_MMU
+	#define R3_TO_LWREG_VM_V(regnum)		\
+		brid	ex_lw_end_vm;			\
+		swi	r3, r7, 4 * regnum;
+
+	#define R3_TO_LWREG_VM(regnum)			\
+		brid	ex_lw_end_vm;			\
+		or	NUM_TO_REG (regnum), r0, r3;
+
+	#define SWREG_TO_R3_VM_V(regnum)		\
+		brid	ex_sw_tail_vm;			\
+		lwi	r3, r7, 4 * regnum;
+
+	#define SWREG_TO_R3_VM(regnum)			\
+		brid	ex_sw_tail_vm;			\
+		or	r3, r0, NUM_TO_REG (regnum);
+
+	/* Shift right instruction depending on available configuration */
+	#if CONFIG_XILINX_MICROBLAZE0_USE_BARREL > 0
+	#define BSRLI(rD, rA, imm)	\
+		bsrli rD, rA, imm
+	#elif CONFIG_XILINX_MICROBLAZE0_USE_DIV > 0
+	#define BSRLI(rD, rA, imm)	\
+		ori rD, r0, (1 << imm);	\
+		idivu rD, rD, rA
+	#else
+	#define BSRLI(rD, rA, imm) BSRLI ## imm (rD, rA)
+	/* Only the used shift constants defined here - add more if needed */
+	#define BSRLI2(rD, rA)				\
+		srl rD, rA;		/* << 1 */	\
+		srl rD, rD;		/* << 2 */
+	#define BSRLI10(rD, rA)				\
+		srl rD, rA;		/* << 1 */	\
+		srl rD, rD;		/* << 2 */	\
+		srl rD, rD;		/* << 3 */	\
+		srl rD, rD;		/* << 4 */	\
+		srl rD, rD;		/* << 5 */	\
+		srl rD, rD;		/* << 6 */	\
+		srl rD, rD;		/* << 7 */	\
+		srl rD, rD;		/* << 8 */	\
+		srl rD, rD;		/* << 9 */	\
+		srl rD, rD		/* << 10 */
+	#define BSRLI20(rD, rA)		\
+		BSRLI10(rD, rA);	\
+		BSRLI10(rD, rD)
+	#endif
+#endif /* CONFIG_MMU */
+
 .extern other_exception_handler /* Defined in exception.c */
 
 /*
@@ -163,34 +232,119 @@
 
 /* wrappers to restore state before coming to entry.S */
 
+#ifdef CONFIG_MMU
+.section .rodata
+.align 4
+_MB_HW_ExceptionVectorTable:
+/*  0 - Undefined */
+	.long	TOPHYS(ex_handler_unhandled)
+/*  1 - Unaligned data access exception */
+	.long	TOPHYS(handle_unaligned_ex)
+/*  2 - Illegal op-code exception */
+	.long	TOPHYS(full_exception_trapw)
+/*  3 - Instruction bus error exception */
+	.long	TOPHYS(full_exception_trapw)
+/*  4 - Data bus error exception */
+	.long	TOPHYS(full_exception_trapw)
+/*  5 - Divide by zero exception */
+	.long	TOPHYS(full_exception_trapw)
+/*  6 - Floating point unit exception */
+	.long	TOPHYS(full_exception_trapw)
+/*  7 - Privileged instruction exception */
+	.long	TOPHYS(full_exception_trapw)
+/*  8 - 15 - Undefined */
+	.long	TOPHYS(ex_handler_unhandled)
+	.long	TOPHYS(ex_handler_unhandled)
+	.long	TOPHYS(ex_handler_unhandled)
+	.long	TOPHYS(ex_handler_unhandled)
+	.long	TOPHYS(ex_handler_unhandled)
+	.long	TOPHYS(ex_handler_unhandled)
+	.long	TOPHYS(ex_handler_unhandled)
+	.long	TOPHYS(ex_handler_unhandled)
+/* 16 - Data storage exception */
+	.long	TOPHYS(handle_data_storage_exception)
+/* 17 - Instruction storage exception */
+	.long	TOPHYS(handle_instruction_storage_exception)
+/* 18 - Data TLB miss exception */
+	.long	TOPHYS(handle_data_tlb_miss_exception)
+/* 19 - Instruction TLB miss exception */
+	.long	TOPHYS(handle_instruction_tlb_miss_exception)
+/* 20 - 31 - Undefined */
+	.long	TOPHYS(ex_handler_unhandled)
+	.long	TOPHYS(ex_handler_unhandled)
+	.long	TOPHYS(ex_handler_unhandled)
+	.long	TOPHYS(ex_handler_unhandled)
+	.long	TOPHYS(ex_handler_unhandled)
+	.long	TOPHYS(ex_handler_unhandled)
+	.long	TOPHYS(ex_handler_unhandled)
+	.long	TOPHYS(ex_handler_unhandled)
+	.long	TOPHYS(ex_handler_unhandled)
+	.long	TOPHYS(ex_handler_unhandled)
+	.long	TOPHYS(ex_handler_unhandled)
+	.long	TOPHYS(ex_handler_unhandled)
+#endif
+
 .global _hw_exception_handler
 .section .text
 .align 4
 .ent _hw_exception_handler
 _hw_exception_handler:
+#ifndef CONFIG_MMU
 	addik	r1, r1, -(EX_HANDLER_STACK_SIZ); /* Create stack frame */
+#else
+	swi	r1, r0, TOPHYS(r0_ram + 0); /* GET_SP */
+	/* Save date to kernel memory. Here is the problem
+	 * when you came from user space */
+	ori	r1, r0, TOPHYS(r0_ram + 28);
+#endif
 	swi	r3, r1, PT_R3
 	swi	r4, r1, PT_R4
 	swi	r5, r1, PT_R5
 	swi	r6, r1, PT_R6
 
-	mfs	r5, rmsr;
-	nop
-	swi	r5, r1, 0;
-	mfs	r4, rbtr	/* Save BTR before jumping to handler */
-	nop
+#ifdef CONFIG_MMU
+	swi	r11, r1, PT_R11
+	swi	r31, r1, PT_R31
+	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)) /* get saved current */
+#endif
+
 	mfs	r3, resr
 	nop
+	mfs	r4, rear;
+	nop
 
+#ifndef CONFIG_MMU
 	andi	r5, r3, 0x1000;		/* Check ESR[DS] */
 	beqi	r5, not_in_delay_slot;	/* Branch if ESR[DS] not set */
 	mfs	r17, rbtr;	/* ESR[DS] set - return address in BTR */
 	nop
 not_in_delay_slot:
 	swi	r17, r1, PT_R17
+#endif
 
 	andi	r5, r3, 0x1F;		/* Extract ESR[EXC] */
 
+#ifdef CONFIG_MMU
+	/* Calculate exception vector offset = r5 << 2 */
+	addk	r6, r5, r5; /* << 1 */
+	addk	r6, r6, r6; /* << 2 */
+
+/* counting which exception happen */
+	lwi	r5, r0, 0x200 + TOPHYS(r0_ram)
+	addi	r5, r5, 1
+	swi	r5, r0, 0x200 + TOPHYS(r0_ram)
+	lwi	r5, r6, 0x200 + TOPHYS(r0_ram)
+	addi	r5, r5, 1
+	swi	r5, r6, 0x200 + TOPHYS(r0_ram)
+/* end */
+	/* Load the HW Exception vector */
+	lwi	r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable)
+	bra	r6
+
+full_exception_trapw:
+	RESTORE_STATE
+	bri	full_exception_trap
+#else
 	/* Exceptions enabled here. This will allow nested exceptions */
 	mfs	r6, rmsr;
 	nop
@@ -254,6 +408,7 @@ handle_other_ex: /* Handle Other exceptions here */
 	lwi	r18, r1, PT_R18
 
 	bri	ex_handler_done; /* Complete exception handling */
+#endif
 
 /* 0x01 - Unaligned data access exception
  * This occurs when a word access is not aligned on a word boundary,
@@ -265,11 +420,28 @@ handle_other_ex: /* Handle Other exceptions here */
 handle_unaligned_ex:
 	/* Working registers already saved: R3, R4, R5, R6
 	 *  R3 = ESR
-	 *  R4 = BTR
+	 *  R4 = EAR
 	 */
-	mfs	r4, rear;
+#ifdef CONFIG_MMU
+	andi	r6, r3, 0x1000			/* Check ESR[DS] */
+	beqi	r6, _no_delayslot		/* Branch if ESR[DS] not set */
+	mfs	r17, rbtr;	/* ESR[DS] set - return address in BTR */
 	nop
+_no_delayslot:
+#endif
+
+#ifdef CONFIG_MMU
+	/* Check if unaligned address is last on a 4k page */
+		andi	r5, r4, 0xffc
+		xori	r5, r5, 0xffc
+		bnei	r5, _unaligned_ex2
+	_unaligned_ex1:
+		RESTORE_STATE;
+/* Another page must be accessed or physical address not in page table */
+		bri	unaligned_data_trap
 
+	_unaligned_ex2:
+#endif
 	andi	r6, r3, 0x3E0; /* Mask and extract the register operand */
 	srl	r6, r6; /* r6 >> 5 */
 	srl	r6, r6;
@@ -278,6 +450,45 @@ handle_unaligned_ex:
 	srl	r6, r6;
 	/* Store the register operand in a temporary location */
 	sbi	r6, r0, TOPHYS(ex_reg_op);
+#ifdef CONFIG_MMU
+	/* Get physical address */
+	/* If we are faulting a kernel address, we have to use the
+	 * kernel page tables.
+	 */
+	ori	r5, r0, CONFIG_KERNEL_START
+	cmpu	r5, r4, r5
+	bgti	r5, _unaligned_ex3
+	ori	r5, r0, swapper_pg_dir
+	bri	_unaligned_ex4
+
+	/* Get the PGD for the current thread. */
+_unaligned_ex3: /* user thread */
+	addi	r5 ,CURRENT_TASK, TOPHYS(0); /* get current task address */
+	lwi	r5, r5, TASK_THREAD + PGDIR
+_unaligned_ex4:
+	tophys(r5,r5)
+	BSRLI(r6,r4,20)			/* Create L1 (pgdir/pmd) address */
+	andi	r6, r6, 0xffc
+/* Assume pgdir aligned on 4K boundary, no need for "andi r5,r5,0xfffff003" */
+	or	r5, r5, r6
+	lwi	r6, r5, 0		/* Get L1 entry */
+	andi	r5, r6, 0xfffff000	/* Extract L2 (pte) base address. */
+	beqi	r5, _unaligned_ex1	/* Bail if no table */
+
+	tophys(r5,r5)
+	BSRLI(r6,r4,10)			/* Compute PTE address */
+	andi	r6, r6, 0xffc
+	andi	r5, r5, 0xfffff003
+	or	r5, r5, r6
+	lwi	r5, r5, 0		/* Get Linux PTE */
+
+	andi	r6, r5, _PAGE_PRESENT
+	beqi	r6, _unaligned_ex1	/* Bail if no page */
+
+	andi	r5, r5, 0xfffff000	/* Extract RPN */
+	andi	r4, r4, 0x00000fff	/* Extract offset */
+	or	r4, r4, r5		/* Create physical address */
+#endif /* CONFIG_MMU */
 
 	andi	r6, r3, 0x400; /* Extract ESR[S] */
 	bnei	r6, ex_sw;
@@ -355,6 +566,7 @@ ex_shw:
 ex_sw_end: /* Exception handling of store word, ends. */
 
 ex_handler_done:
+#ifndef CONFIG_MMU
 	lwi	r5, r1, 0 /* RMSR */
 	mts	rmsr, r5
 	nop
@@ -366,13 +578,458 @@ ex_handler_done:
 
 	rted	r17, 0
 	addik	r1, r1, (EX_HANDLER_STACK_SIZ); /* Restore stack frame */
+#else
+	RESTORE_STATE;
+	rted	r17, 0
+	nop
+#endif
+
+#ifdef CONFIG_MMU
+	/* Exception vector entry code. This code runs with address translation
+	 * turned off (i.e. using physical addresses). */
+
+	/* Exception vectors. */
+
+	/* 0x10 - Data Storage Exception
+	 * This happens for just a few reasons. U0 set (but we don't do that),
+	 * or zone protection fault (user violation, write to protected page).
+	 * If this is just an update of modified status, we do that quickly
+	 * and exit. Otherwise, we call heavyweight functions to do the work.
+	 */
+	handle_data_storage_exception:
+		/* Working registers already saved: R3, R4, R5, R6
+		 * R3 = ESR
+		 */
+		mfs	r11, rpid
+		nop
+		bri	4
+		mfs	r3, rear		/* Get faulting address */
+		nop
+		/* If we are faulting a kernel address, we have to use the
+		 * kernel page tables.
+		 */
+		ori	r4, r0, CONFIG_KERNEL_START
+		cmpu	r4, r3, r4
+		bgti	r4, ex3
+		/* First, check if it was a zone fault (which means a user
+		 * tried to access a kernel or read-protected page - always
+		 * a SEGV). All other faults here must be stores, so no
+		 * need to check ESR_S as well. */
+		mfs	r4, resr
+		nop
+		andi	r4, r4, 0x800		/* ESR_Z - zone protection */
+		bnei	r4, ex2
+
+		ori	r4, r0, swapper_pg_dir
+		mts	rpid, r0		/* TLB will have 0 TID */
+		nop
+		bri	ex4
+
+		/* Get the PGD for the current thread. */
+	ex3:
+		/* First, check if it was a zone fault (which means a user
+		 * tried to access a kernel or read-protected page - always
+		 * a SEGV). All other faults here must be stores, so no
+		 * need to check ESR_S as well. */
+		mfs	r4, resr
+		nop
+		andi	r4, r4, 0x800		/* ESR_Z */
+		bnei	r4, ex2
+		/* get current task address */
+		addi	r4 ,CURRENT_TASK, TOPHYS(0);
+		lwi	r4, r4, TASK_THREAD+PGDIR
+	ex4:
+		tophys(r4,r4)
+		BSRLI(r5,r3,20)		/* Create L1 (pgdir/pmd) address */
+		andi	r5, r5, 0xffc
+/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
+		or	r4, r4, r5
+		lwi	r4, r4, 0		/* Get L1 entry */
+		andi	r5, r4, 0xfffff000 /* Extract L2 (pte) base address */
+		beqi	r5, ex2			/* Bail if no table */
+
+		tophys(r5,r5)
+		BSRLI(r6,r3,10)			/* Compute PTE address */
+		andi	r6, r6, 0xffc
+		andi	r5, r5, 0xfffff003
+		or	r5, r5, r6
+		lwi	r4, r5, 0		/* Get Linux PTE */
+
+		andi	r6, r4, _PAGE_RW	/* Is it writeable? */
+		beqi	r6, ex2			/* Bail if not */
+
+		/* Update 'changed' */
+		ori	r4, r4, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
+		swi	r4, r5, 0		/* Update Linux page table */
+
+		/* Most of the Linux PTE is ready to load into the TLB LO.
+		 * We set ZSEL, where only the LS-bit determines user access.
+		 * We set execute, because we don't have the granularity to
+		 * properly set this at the page level (Linux problem).
+		 * If shared is set, we cause a zero PID->TID load.
+		 * Many of these bits are software only. Bits we don't set
+		 * here we (properly should) assume have the appropriate value.
+		 */
+		andni	r4, r4, 0x0ce2		/* Make sure 20, 21 are zero */
+		ori	r4, r4, _PAGE_HWEXEC	/* make it executable */
+
+		/* find the TLB index that caused the fault. It has to be here*/
+		mts	rtlbsx, r3
+		nop
+		mfs	r5, rtlbx		/* DEBUG: TBD */
+		nop
+		mts	rtlblo, r4		/* Load TLB LO */
+		nop
+						/* Will sync shadow TLBs */
+
+		/* Done...restore registers and get out of here. */
+		mts	rpid, r11
+		nop
+		bri 4
+
+		RESTORE_STATE;
+		rted	r17, 0
+		nop
+	ex2:
+		/* The bailout. Restore registers to pre-exception conditions
+		 * and call the heavyweights to help us out. */
+		mts	rpid, r11
+		nop
+		bri 4
+		RESTORE_STATE;
+		bri	page_fault_data_trap
+
+
+	/* 0x11 - Instruction Storage Exception
+	 * This is caused by a fetch from non-execute or guarded pages. */
+	handle_instruction_storage_exception:
+		/* Working registers already saved: R3, R4, R5, R6
+		 * R3 = ESR
+		 */
+
+		mfs	r3, rear		/* Get faulting address */
+		nop
+		RESTORE_STATE;
+		bri	page_fault_instr_trap
+
+	/* 0x12 - Data TLB Miss Exception
+	 * As the name implies, translation is not in the MMU, so search the
+	 * page tables and fix it. The only purpose of this function is to
+	 * load TLB entries from the page table if they exist.
+	 */
+	handle_data_tlb_miss_exception:
+		/* Working registers already saved: R3, R4, R5, R6
+		 * R3 = ESR
+		 */
+		mfs	r11, rpid
+		nop
+		bri	4
+		mfs	r3, rear		/* Get faulting address */
+		nop
+
+		/* If we are faulting a kernel address, we have to use the
+		 * kernel page tables. */
+		ori	r4, r0, CONFIG_KERNEL_START
+		cmpu	r4, r3, r4
+		bgti	r4, ex5
+		ori	r4, r0, swapper_pg_dir
+		mts	rpid, r0		/* TLB will have 0 TID */
+		nop
+		bri	ex6
 
+		/* Get the PGD for the current thread. */
+	ex5:
+		/* get current task address */
+		addi	r4 ,CURRENT_TASK, TOPHYS(0);
+		lwi	r4, r4, TASK_THREAD+PGDIR
+	ex6:
+		tophys(r4,r4)
+		BSRLI(r5,r3,20)		/* Create L1 (pgdir/pmd) address */
+		andi	r5, r5, 0xffc
+/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
+		or	r4, r4, r5
+		lwi	r4, r4, 0		/* Get L1 entry */
+		andi	r5, r4, 0xfffff000 /* Extract L2 (pte) base address */
+		beqi	r5, ex7			/* Bail if no table */
+
+		tophys(r5,r5)
+		BSRLI(r6,r3,10)			/* Compute PTE address */
+		andi	r6, r6, 0xffc
+		andi	r5, r5, 0xfffff003
+		or	r5, r5, r6
+		lwi	r4, r5, 0		/* Get Linux PTE */
+
+		andi	r6, r4, _PAGE_PRESENT
+		beqi	r6, ex7
+
+		ori	r4, r4, _PAGE_ACCESSED
+		swi	r4, r5, 0
+
+		/* Most of the Linux PTE is ready to load into the TLB LO.
+		 * We set ZSEL, where only the LS-bit determines user access.
+		 * We set execute, because we don't have the granularity to
+		 * properly set this at the page level (Linux problem).
+		 * If shared is set, we cause a zero PID->TID load.
+		 * Many of these bits are software only. Bits we don't set
+		 * here we (properly should) assume have the appropriate value.
+		 */
+		andni	r4, r4, 0x0ce2		/* Make sure 20, 21 are zero */
+
+		bri	finish_tlb_load
+	ex7:
+		/* The bailout. Restore registers to pre-exception conditions
+		 * and call the heavyweights to help us out.
+		 */
+		mts	rpid, r11
+		nop
+		bri	4
+		RESTORE_STATE;
+		bri	page_fault_data_trap
+
+	/* 0x13 - Instruction TLB Miss Exception
+	 * Nearly the same as above, except we get our information from
+	 * different registers and bailout to a different point.
+	 */
+	handle_instruction_tlb_miss_exception:
+		/* Working registers already saved: R3, R4, R5, R6
+		 *  R3 = ESR
+		 */
+		mfs	r11, rpid
+		nop
+		bri	4
+		mfs	r3, rear		/* Get faulting address */
+		nop
+
+		/* If we are faulting a kernel address, we have to use the
+		 * kernel page tables.
+		 */
+		ori	r4, r0, CONFIG_KERNEL_START
+		cmpu	r4, r3, r4
+		bgti	r4, ex8
+		ori	r4, r0, swapper_pg_dir
+		mts	rpid, r0		/* TLB will have 0 TID */
+		nop
+		bri	ex9
+
+		/* Get the PGD for the current thread. */
+	ex8:
+		/* get current task address */
+		addi	r4 ,CURRENT_TASK, TOPHYS(0);
+		lwi	r4, r4, TASK_THREAD+PGDIR
+	ex9:
+		tophys(r4,r4)
+		BSRLI(r5,r3,20)		/* Create L1 (pgdir/pmd) address */
+		andi	r5, r5, 0xffc
+/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
+		or	r4, r4, r5
+		lwi	r4, r4, 0		/* Get L1 entry */
+		andi	r5, r4, 0xfffff000 /* Extract L2 (pte) base address */
+		beqi	r5, ex10		/* Bail if no table */
+
+		tophys(r5,r5)
+		BSRLI(r6,r3,10)			/* Compute PTE address */
+		andi	r6, r6, 0xffc
+		andi	r5, r5, 0xfffff003
+		or	r5, r5, r6
+		lwi	r4, r5, 0		/* Get Linux PTE */
+
+		andi	r6, r4, _PAGE_PRESENT
+		beqi	r6, ex7
+
+		ori	r4, r4, _PAGE_ACCESSED
+		swi	r4, r5, 0
+
+		/* Most of the Linux PTE is ready to load into the TLB LO.
+		 * We set ZSEL, where only the LS-bit determines user access.
+		 * We set execute, because we don't have the granularity to
+		 * properly set this at the page level (Linux problem).
+		 * If shared is set, we cause a zero PID->TID load.
+		 * Many of these bits are software only. Bits we don't set
+		 * here we (properly should) assume have the appropriate value.
+		 */
+		andni	r4, r4, 0x0ce2		/* Make sure 20, 21 are zero */
+
+		bri	finish_tlb_load
+	ex10:
+		/* The bailout. Restore registers to pre-exception conditions
+		 * and call the heavyweights to help us out.
+		 */
+		mts	rpid, r11
+		nop
+		bri 4
+		RESTORE_STATE;
+		bri	page_fault_instr_trap
+
+/* Both the instruction and data TLB miss get to this point to load the TLB.
+ *	r3 - EA of fault
+ *	r4 - TLB LO (info from Linux PTE)
+ *	r5, r6 - available to use
+ *	PID - loaded with proper value when we get here
+ *	Upon exit, we reload everything and RFI.
+ * A common place to load the TLB.
+ */
+	tlb_index:
+		.long	0 /* MS: storing last used tlb index */
+	finish_tlb_load:
+		/* MS: load the last used TLB index. */
+		lwi	r5, r0, TOPHYS(tlb_index)
+		addik	r5, r5, 1 /* MS: inc tlb_index -> use next one */
+
+/* MS: FIXME this is potentional fault, because this is mask not count */
+		andi	r5, r5, (MICROBLAZE_TLB_SIZE-1)
+	#if !defined(CONFIG_SERIAL_8250_CONSOLE) && defined(CONFIG_EARLY_PRINTK)
+/* MS: test if the TLB location is 0 - tlb0 store uartlite for early_printk
+ * if is 0 only setup next tlb index. Zero index is never replaced
+ */
+			bnei	r5, sem
+			ori	r5, r0, 1
+	sem:
+	#endif
+		/* MS: save back current TLB index */
+		swi	r5, r0, TOPHYS(tlb_index)
+
+		ori	r4, r4, _PAGE_HWEXEC	/* make it executable */
+		mts	rtlbx, r5		/* MS: save current TLB */
+		nop
+		mts	rtlblo,	r4		/* MS: save to TLB LO */
+		nop
+
+		/* Create EPN. This is the faulting address plus a static
+		 * set of bits. These are size, valid, E, U0, and ensure
+		 * bits 20 and 21 are zero.
+		 */
+		andi	r3, r3, 0xfffff000
+		ori	r3, r3, 0x0c0
+		mts	rtlbhi,	r3		/* Load TLB HI */
+		nop
+
+		/* Done...restore registers and get out of here. */
+	ex12:
+		mts	rpid, r11
+		nop
+		bri 4
+		RESTORE_STATE;
+		rted	r17, 0
+		nop
+
+	/* extern void giveup_fpu(struct task_struct *prev)
+	 *
+	 * The MicroBlaze processor may have an FPU, so this should not just
+	 * return: TBD.
+	 */
+	.globl giveup_fpu;
+	.align 4;
+	giveup_fpu:
+		bralid	r15,0			/* TBD */
+		nop
+
+	/* At present, this routine just hangs. - extern void abort(void) */
+	.globl abort;
+	.align 4;
+	abort:
+		br	r0
+
+	.globl set_context;
+	.align 4;
+	set_context:
+		mts	rpid, r5	/* Shadow TLBs are automatically */
+		nop
+		bri	4		/* flushed by changing PID */
+		rtsd	r15,8
+		nop
+
+#endif
 .end _hw_exception_handler
 
+#ifdef CONFIG_MMU
+/* Unaligned data access exception last on a 4k page for MMU.
+ * When this is called, we are in virtual mode with exceptions enabled
+ * and registers 1-13,15,17,18 saved.
+ *
+ * R3 = ESR
+ * R4 = EAR
+ * R7 = pointer to saved registers (struct pt_regs *regs)
+ *
+ * This handler perform the access, and returns via ret_from_exc.
+ */
+.global _unaligned_data_exception
+.ent _unaligned_data_exception
+_unaligned_data_exception:
+	andi	r8, r3, 0x3E0;	/* Mask and extract the register operand */
+	BSRLI(r8,r8,2);		/* r8 >> 2 = register operand * 8 */
+	andi	r6, r3, 0x400;	/* Extract ESR[S] */
+	bneid	r6, ex_sw_vm;
+	andi	r6, r3, 0x800;	/* Extract ESR[W] - delay slot */
+ex_lw_vm:
+	beqid	r6, ex_lhw_vm;
+	lbui	r5, r4, 0;	/* Exception address in r4 - delay slot */
+/* Load a word, byte-by-byte from destination address and save it in tmp space*/
+	la	r6, r0, ex_tmp_data_loc_0;
+	sbi	r5, r6, 0;
+	lbui	r5, r4, 1;
+	sbi	r5, r6, 1;
+	lbui	r5, r4, 2;
+	sbi	r5, r6, 2;
+	lbui	r5, r4, 3;
+	sbi	r5, r6, 3;
+	brid	ex_lw_tail_vm;
+/* Get the destination register value into r3 - delay slot */
+	lwi	r3, r6, 0;
+ex_lhw_vm:
+	/* Load a half-word, byte-by-byte from destination address and
+	 * save it in tmp space */
+	la	r6, r0, ex_tmp_data_loc_0;
+	sbi	r5, r6, 0;
+	lbui	r5, r4, 1;
+	sbi	r5, r6, 1;
+	lhui	r3, r6, 0;	/* Get the destination register value into r3 */
+ex_lw_tail_vm:
+	/* Form load_word jump table offset (lw_table_vm + (8 * regnum)) */
+	addik	r5, r8, lw_table_vm;
+	bra	r5;
+ex_lw_end_vm:			/* Exception handling of load word, ends */
+	brai	ret_from_exc;
+ex_sw_vm:
+/* Form store_word jump table offset (sw_table_vm + (8 * regnum)) */
+	addik	r5, r8, sw_table_vm;
+	bra	r5;
+ex_sw_tail_vm:
+	la	r5, r0, ex_tmp_data_loc_0;
+	beqid	r6, ex_shw_vm;
+	swi	r3, r5, 0;	/* Get the word - delay slot */
+	/* Store the word, byte-by-byte into destination address */
+	lbui	r3, r5, 0;
+	sbi	r3, r4, 0;
+	lbui	r3, r5, 1;
+	sbi	r3, r4, 1;
+	lbui	r3, r5, 2;
+	sbi	r3, r4, 2;
+	lbui	r3, r5, 3;
+	brid	ret_from_exc;
+	sbi	r3, r4, 3;	/* Delay slot */
+ex_shw_vm:
+	/* Store the lower half-word, byte-by-byte into destination address */
+	lbui	r3, r5, 2;
+	sbi	r3, r4, 0;
+	lbui	r3, r5, 3;
+	brid	ret_from_exc;
+	sbi	r3, r4, 1;	/* Delay slot */
+ex_sw_end_vm:			/* Exception handling of store word, ends. */
+.end _unaligned_data_exception
+#endif /* CONFIG_MMU */
+
 ex_handler_unhandled:
 /* FIXME add handle function for unhandled exception - dump register */
 	bri 0
 
+/*
+ * hw_exception_handler Jump Table
+ * - Contains code snippets for each register that caused the unalign exception
+ * - Hence exception handler is NOT self-modifying
+ * - Separate table for load exceptions and store exceptions.
+ * - Each table is of size: (8 * 32) = 256 bytes
+ */
+
 .section .text
 .align 4
 lw_table:
@@ -407,7 +1064,11 @@ lw_r27:		R3_TO_LWREG	(27);
 lw_r28:		R3_TO_LWREG	(28);
 lw_r29:		R3_TO_LWREG	(29);
 lw_r30:		R3_TO_LWREG	(30);
+#ifdef CONFIG_MMU
+lw_r31: 	R3_TO_LWREG_V	(31);
+#else
 lw_r31:		R3_TO_LWREG	(31);
+#endif
 
 sw_table:
 sw_r0:		SWREG_TO_R3	(0);
@@ -441,7 +1102,81 @@ sw_r27:		SWREG_TO_R3	(27);
 sw_r28:		SWREG_TO_R3	(28);
 sw_r29:		SWREG_TO_R3	(29);
 sw_r30:		SWREG_TO_R3	(30);
+#ifdef CONFIG_MMU
+sw_r31:		SWREG_TO_R3_V	(31);
+#else
 sw_r31:		SWREG_TO_R3	(31);
+#endif
+
+#ifdef CONFIG_MMU
+lw_table_vm:
+lw_r0_vm:	R3_TO_LWREG_VM		(0);
+lw_r1_vm:	R3_TO_LWREG_VM_V	(1);
+lw_r2_vm:	R3_TO_LWREG_VM_V	(2);
+lw_r3_vm:	R3_TO_LWREG_VM_V	(3);
+lw_r4_vm:	R3_TO_LWREG_VM_V	(4);
+lw_r5_vm:	R3_TO_LWREG_VM_V	(5);
+lw_r6_vm:	R3_TO_LWREG_VM_V	(6);
+lw_r7_vm:	R3_TO_LWREG_VM_V	(7);
+lw_r8_vm:	R3_TO_LWREG_VM_V	(8);
+lw_r9_vm:	R3_TO_LWREG_VM_V	(9);
+lw_r10_vm:	R3_TO_LWREG_VM_V	(10);
+lw_r11_vm:	R3_TO_LWREG_VM_V	(11);
+lw_r12_vm:	R3_TO_LWREG_VM_V	(12);
+lw_r13_vm:	R3_TO_LWREG_VM_V	(13);
+lw_r14_vm:	R3_TO_LWREG_VM		(14);
+lw_r15_vm:	R3_TO_LWREG_VM_V	(15);
+lw_r16_vm:	R3_TO_LWREG_VM		(16);
+lw_r17_vm:	R3_TO_LWREG_VM_V	(17);
+lw_r18_vm:	R3_TO_LWREG_VM_V	(18);
+lw_r19_vm:	R3_TO_LWREG_VM		(19);
+lw_r20_vm:	R3_TO_LWREG_VM		(20);
+lw_r21_vm:	R3_TO_LWREG_VM		(21);
+lw_r22_vm:	R3_TO_LWREG_VM		(22);
+lw_r23_vm:	R3_TO_LWREG_VM		(23);
+lw_r24_vm:	R3_TO_LWREG_VM		(24);
+lw_r25_vm:	R3_TO_LWREG_VM		(25);
+lw_r26_vm:	R3_TO_LWREG_VM		(26);
+lw_r27_vm:	R3_TO_LWREG_VM		(27);
+lw_r28_vm:	R3_TO_LWREG_VM		(28);
+lw_r29_vm:	R3_TO_LWREG_VM		(29);
+lw_r30_vm:	R3_TO_LWREG_VM		(30);
+lw_r31_vm:	R3_TO_LWREG_VM_V	(31);
+
+sw_table_vm:
+sw_r0_vm:	SWREG_TO_R3_VM		(0);
+sw_r1_vm:	SWREG_TO_R3_VM_V	(1);
+sw_r2_vm:	SWREG_TO_R3_VM_V	(2);
+sw_r3_vm:	SWREG_TO_R3_VM_V	(3);
+sw_r4_vm:	SWREG_TO_R3_VM_V	(4);
+sw_r5_vm:	SWREG_TO_R3_VM_V	(5);
+sw_r6_vm:	SWREG_TO_R3_VM_V	(6);
+sw_r7_vm:	SWREG_TO_R3_VM_V	(7);
+sw_r8_vm:	SWREG_TO_R3_VM_V	(8);
+sw_r9_vm:	SWREG_TO_R3_VM_V	(9);
+sw_r10_vm:	SWREG_TO_R3_VM_V	(10);
+sw_r11_vm:	SWREG_TO_R3_VM_V	(11);
+sw_r12_vm:	SWREG_TO_R3_VM_V	(12);
+sw_r13_vm:	SWREG_TO_R3_VM_V	(13);
+sw_r14_vm:	SWREG_TO_R3_VM		(14);
+sw_r15_vm:	SWREG_TO_R3_VM_V	(15);
+sw_r16_vm:	SWREG_TO_R3_VM		(16);
+sw_r17_vm:	SWREG_TO_R3_VM_V	(17);
+sw_r18_vm:	SWREG_TO_R3_VM_V	(18);
+sw_r19_vm:	SWREG_TO_R3_VM		(19);
+sw_r20_vm:	SWREG_TO_R3_VM		(20);
+sw_r21_vm:	SWREG_TO_R3_VM		(21);
+sw_r22_vm:	SWREG_TO_R3_VM		(22);
+sw_r23_vm:	SWREG_TO_R3_VM		(23);
+sw_r24_vm:	SWREG_TO_R3_VM		(24);
+sw_r25_vm:	SWREG_TO_R3_VM		(25);
+sw_r26_vm:	SWREG_TO_R3_VM		(26);
+sw_r27_vm:	SWREG_TO_R3_VM		(27);
+sw_r28_vm:	SWREG_TO_R3_VM		(28);
+sw_r29_vm:	SWREG_TO_R3_VM		(29);
+sw_r30_vm:	SWREG_TO_R3_VM		(30);
+sw_r31_vm:	SWREG_TO_R3_VM_V	(31);
+#endif /* CONFIG_MMU */
 
 /* Temporary data structures used in the handler */
 .section .data
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 20/30] microblaze_mmu_v1: uaccess MMU update
  2009-04-27  8:32                                     ` [PATCH 19/30] microblaze_mmu_v1: Update exception handling - MMU exception monstr
@ 2009-04-27  8:32                                       ` monstr
  2009-04-27  8:32                                         ` [PATCH 21/30] microblaze_mmu_v1: Add MMU related exceptions handling monstr
                                                           ` (2 more replies)
  0 siblings, 3 replies; 81+ messages in thread
From: monstr @ 2009-04-27  8:32 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/include/asm/uaccess.h |  298 +++++++++++++++++++++++++++------
 arch/microblaze/lib/uaccess_old.S     |  135 +++++++++++++++
 2 files changed, 381 insertions(+), 52 deletions(-)
 create mode 100644 arch/microblaze/lib/uaccess_old.S

diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 5a3ffc3..78e8246 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -1,4 +1,6 @@
 /*
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
  * Copyright (C) 2006 Atmark Techno, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -26,6 +28,10 @@
 #define VERIFY_READ	0
 #define VERIFY_WRITE	1
 
+#define __clear_user(addr, n)	(memset((void *)(addr), 0, (n)), 0)
+
+#ifndef CONFIG_MMU
+
 extern int ___range_ok(unsigned long addr, unsigned long size);
 
 #define __range_ok(addr, size) \
@@ -39,63 +45,64 @@ extern inline int bad_user_access_length(void)
 	return 0;
 }
 /* FIXME this is function for optimalization -> memcpy */
-#define __get_user(var, ptr)					\
-	({							\
-		int __gu_err = 0;				\
-		switch (sizeof(*(ptr))) {			\
-		case 1:						\
-		case 2:						\
-		case 4:						\
-			(var) = *(ptr);				\
-			break;					\
-		case 8:						\
-			memcpy((void *) &(var), (ptr), 8);	\
-			break;					\
-		default:					\
-			(var) = 0;				\
-			__gu_err = __get_user_bad();		\
-			break;					\
-		}						\
-		__gu_err;					\
-	})
+#define __get_user(var, ptr)				\
+({							\
+	int __gu_err = 0;				\
+	switch (sizeof(*(ptr))) {			\
+	case 1:						\
+	case 2:						\
+	case 4:						\
+		(var) = *(ptr);				\
+		break;					\
+	case 8:						\
+		memcpy((void *) &(var), (ptr), 8);	\
+		break;					\
+	default:					\
+		(var) = 0;				\
+		__gu_err = __get_user_bad();		\
+		break;					\
+	}						\
+	__gu_err;					\
+})
 
 #define __get_user_bad()	(bad_user_access_length(), (-EFAULT))
 
+/* FIXME is not there defined __pu_val */
 #define __put_user(var, ptr)					\
-	({							\
-		int __pu_err = 0;				\
-		switch (sizeof(*(ptr))) {			\
-		case 1:						\
-		case 2:						\
-		case 4:						\
-			*(ptr) = (var);				\
-			break;					\
-		case 8: {					\
-			typeof(*(ptr)) __pu_val = var;		\
-			memcpy(ptr, &__pu_val, sizeof(__pu_val));\
-			}					\
-			break;					\
-		default:					\
-			__pu_err = __put_user_bad();		\
-			break;					\
-		}							\
-		__pu_err;						\
-	})
+({								\
+	int __pu_err = 0;					\
+	switch (sizeof(*(ptr))) {				\
+	case 1:							\
+	case 2:							\
+	case 4:							\
+		*(ptr) = (var);					\
+		break;						\
+	case 8: {						\
+		typeof(*(ptr)) __pu_val = (var);		\
+		memcpy(ptr, &__pu_val, sizeof(__pu_val));	\
+		}						\
+		break;						\
+	default:						\
+		__pu_err = __put_user_bad();			\
+		break;						\
+	}							\
+	__pu_err;						\
+})
 
 #define __put_user_bad()	(bad_user_access_length(), (-EFAULT))
 
-#define put_user(x, ptr)	__put_user(x, ptr)
-#define get_user(x, ptr)	__get_user(x, ptr)
-
-#define copy_to_user(to, from, n)		(memcpy(to, from, n), 0)
-#define copy_from_user(to, from, n)		(memcpy(to, from, n), 0)
+#define put_user(x, ptr)	__put_user((x), (ptr))
+#define get_user(x, ptr)	__get_user((x), (ptr))
 
-#define __copy_to_user(to, from, n)		(copy_to_user(to, from, n))
-#define __copy_from_user(to, from, n)		(copy_from_user(to, from, n))
-#define __copy_to_user_inatomic(to, from, n)	(__copy_to_user(to, from, n))
-#define __copy_from_user_inatomic(to, from, n)	(__copy_from_user(to, from, n))
+#define copy_to_user(to, from, n)	(memcpy((to), (from), (n)), 0)
+#define copy_from_user(to, from, n)	(memcpy((to), (from), (n)), 0)
 
-#define __clear_user(addr, n)	(memset((void *)addr, 0, n), 0)
+#define __copy_to_user(to, from, n)	(copy_to_user((to), (from), (n)))
+#define __copy_from_user(to, from, n)	(copy_from_user((to), (from), (n)))
+#define __copy_to_user_inatomic(to, from, n) \
+			(__copy_to_user((to), (from), (n)))
+#define __copy_from_user_inatomic(to, from, n) \
+			(__copy_from_user((to), (from), (n)))
 
 static inline unsigned long clear_user(void *addr, unsigned long size)
 {
@@ -104,13 +111,200 @@ static inline unsigned long clear_user(void *addr, unsigned long size)
 	return size;
 }
 
-/* Returns 0 if exception not found and fixup otherwise. */
+/* Returns 0 if exception not found and fixup otherwise.  */
 extern unsigned long search_exception_table(unsigned long);
 
+extern long strncpy_from_user(char *dst, const char *src, long count);
+extern long strnlen_user(const char *src, long count);
+
+#else /* CONFIG_MMU */
+
+/*
+ * Address is valid if:
+ *  - "addr", "addr + size" and "size" are all below the limit
+ */
+#define access_ok(type, addr, size) \
+	(get_fs().seg > (((unsigned long)(addr)) | \
+		(size) | ((unsigned long)(addr) + (size))))
+
+/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
+ type?"WRITE":"READ",addr,size,get_fs().seg)) */
+
+/*
+ * All the __XXX versions macros/functions below do not perform
+ * access checking. It is assumed that the necessary checks have been
+ * already performed before the finction (macro) is called.
+ */
+
+#define get_user(x, ptr)						\
+({									\
+	access_ok(VERIFY_READ, (ptr), sizeof(*(ptr)))			\
+		? __get_user((x), (ptr)) : -EFAULT;			\
+})
+
+#define put_user(x, ptr)						\
+({									\
+	access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr)))			\
+		? __put_user((x), (ptr)) : -EFAULT;			\
+})
+
+#define __get_user(x, ptr)						\
+({									\
+	unsigned long __gu_val;						\
+	/*unsigned long __gu_ptr = (unsigned long)(ptr);*/		\
+	long __gu_err;							\
+	switch (sizeof(*(ptr))) {					\
+	case 1:								\
+		__get_user_asm("lbu", (ptr), __gu_val, __gu_err);	\
+		break;							\
+	case 2:								\
+		__get_user_asm("lhu", (ptr), __gu_val, __gu_err);	\
+		break;							\
+	case 4:								\
+		__get_user_asm("lw", (ptr), __gu_val, __gu_err);	\
+		break;							\
+	default:							\
+		__gu_val = 0; __gu_err = -EINVAL;			\
+	}								\
+	x = (__typeof__(*(ptr))) __gu_val;				\
+	__gu_err;							\
+})
+
+#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err)		\
+({									\
+	__asm__ __volatile__ (						\
+			"1:"	insn	" %1, %2, r0;			\
+				addk	%0, r0, r0;			\
+			2:						\
+			.section .fixup,\"ax\";				\
+			3:	brid	2b;				\
+				addik	%0, r0, %3;			\
+			.previous;					\
+			.section __ex_table,\"a\";			\
+			.word	1b,3b;					\
+			.previous;"					\
+		: "=r"(__gu_err), "=r"(__gu_val)			\
+		: "r"(__gu_ptr), "i"(-EFAULT)				\
+	);								\
+})
+
+#define __put_user(x, ptr)						\
+({									\
+	__typeof__(*(ptr)) __gu_val = x;				\
+	long __gu_err = 0;						\
+	switch (sizeof(__gu_val)) {					\
+	case 1:								\
+		__put_user_asm("sb", (ptr), __gu_val, __gu_err);	\
+		break;							\
+	case 2: 							\
+		__put_user_asm("sh", (ptr), __gu_val, __gu_err);	\
+		break;							\
+	case 4:								\
+		__put_user_asm("sw", (ptr), __gu_val, __gu_err);	\
+		break;							\
+	case 8:								\
+		__put_user_asm_8((ptr), __gu_val, __gu_err);		\
+		break;							\
+	default:							\
+		__gu_err = -EINVAL;					\
+	}								\
+	__gu_err;							\
+})
+
+#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err)	\
+({							\
+__asm__ __volatile__ ("	lwi	%0, %1, 0;		\
+		1:	swi	%0, %2, 0;		\
+			lwi	%0, %1, 4;		\
+		2:	swi	%0, %2, 4;		\
+			addk	%0,r0,r0;		\
+		3:					\
+		.section .fixup,\"ax\";			\
+		4:	brid	3b;			\
+			addik	%0, r0, %3;		\
+		.previous;				\
+		.section __ex_table,\"a\";		\
+		.word	1b,4b,2b,4b;			\
+		.previous;"				\
+	: "=&r"(__gu_err)				\
+	: "r"(&__gu_val),				\
+	"r"(__gu_ptr), "i"(-EFAULT)			\
+	);						\
+})
+
+#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err)	\
+({								\
+	__asm__ __volatile__ (					\
+			"1:"	insn	" %1, %2, r0;		\
+				addk	%0, r0, r0;		\
+			2:					\
+			.section .fixup,\"ax\";			\
+			3:	brid	2b;			\
+				addik	%0, r0, %3;		\
+			.previous;				\
+			.section __ex_table,\"a\";		\
+			.word	1b,3b;				\
+			.previous;"				\
+		: "=r"(__gu_err)				\
+		: "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT)	\
+	);							\
+})
+
+/*
+ * Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail.
+ */
+static inline int clear_user(char *to, int size)
+{
+	if (size && access_ok(VERIFY_WRITE, to, size)) {
+		__asm__ __volatile__ ("				\
+				1:				\
+					sb	r0, %2, r0;	\
+					addik	%0, %0, -1;	\
+					bneid	%0, 1b;		\
+					addik	%2, %2, 1;	\
+				2:				\
+				.section __ex_table,\"a\";	\
+				.word	1b,2b;			\
+				.section .text;"		\
+			: "=r"(size)				\
+			: "0"(size), "r"(to)
+		);
+	}
+	return size;
+}
+
+extern unsigned long __copy_tofrom_user(void __user *to,
+		const void __user *from, unsigned long size);
+
+#define copy_to_user(to, from, n)					\
+	(access_ok(VERIFY_WRITE, (to), (n)) ?				\
+		__copy_tofrom_user((void __user *)(to),			\
+			(__force const void __user *)(from), (n))	\
+		: -EFAULT)
+
+#define __copy_to_user(to, from, n)	copy_to_user((to), (from), (n))
+#define __copy_to_user_inatomic(to, from, n)	copy_to_user((to), (from), (n))
+
+#define copy_from_user(to, from, n)					\
+	(access_ok(VERIFY_READ, (from), (n)) ?				\
+		__copy_tofrom_user((__force void __user *)(to),		\
+			(void __user *)(from), (n))			\
+		: -EFAULT)
+
+#define __copy_from_user(to, from, n)	copy_from_user((to), (from), (n))
+#define __copy_from_user_inatomic(to, from, n) \
+		copy_from_user((to), (from), (n))
+
+extern int __strncpy_user(char *to, const char __user *from, int len);
+extern int __strnlen_user(const char __user *sstr, int len);
+
+#define strncpy_from_user(to, from, len)	\
+		(access_ok(VERIFY_READ, from, 1) ?	\
+			__strncpy_user(to, from, len) : -EFAULT)
+#define strnlen_user(str, len)	\
+		(access_ok(VERIFY_READ, str, 1) ? __strnlen_user(str, len) : 0)
 
-extern long strncpy_from_user(char *dst, const char __user *src, long count);
-extern long strnlen_user(const char __user *src, long count);
-extern long __strncpy_from_user(char *dst, const char __user *src, long count);
+#endif /* CONFIG_MMU */
 
 /*
  * The exception table consists of pairs of addresses: the first is the
diff --git a/arch/microblaze/lib/uaccess_old.S b/arch/microblaze/lib/uaccess_old.S
new file mode 100644
index 0000000..88b1fa7
--- /dev/null
+++ b/arch/microblaze/lib/uaccess_old.S
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2009 PetaLogix
+ * Copyright (C) 2007 LynuxWorks, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/errno.h>
+#include <linux/linkage.h>
+
+/*
+ * int __strncpy_user(char *to, char *from, int len);
+ *
+ * Returns:
+ *  -EFAULT  for an exception
+ *  len      if we hit the buffer limit
+ *  bytes copied
+ */
+
+	.text
+.globl __strncpy_user;
+.align 4;
+__strncpy_user:
+
+	/*
+	 * r5 - to
+	 * r6 - from
+	 * r7 - len
+	 * r3 - temp count
+	 * r4 - temp val
+	 */
+	addik	r3,r7,0		/* temp_count = len */
+	beqi	r3,3f
+1:
+	lbu	r4,r6,r0
+	sb	r4,r5,r0
+
+	addik	r3,r3,-1
+	beqi	r3,2f		/* break on len */
+
+	addik	r5,r5,1
+	bneid	r4,1b
+	addik	r6,r6,1		/* delay slot */
+	addik	r3,r3,1		/* undo "temp_count--" */
+2:
+	rsubk	r3,r3,r7	/* temp_count = len - temp_count */
+3:
+	rtsd	r15,8
+	nop
+
+
+	.section	.fixup, "ax"
+	.align	2
+4:
+	brid	3b
+	addik	r3,r0, -EFAULT
+
+	.section	__ex_table, "a"
+	.word	1b,4b
+
+/*
+ * int __strnlen_user(char __user *str, int maxlen);
+ *
+ * Returns:
+ *  0 on error
+ *  maxlen + 1  if no NUL byte found within maxlen bytes
+ *  size of the string (including NUL byte)
+ */
+
+	.text
+.globl __strnlen_user;
+.align 4;
+__strnlen_user:
+	addik	r3,r6,0
+	beqi	r3,3f
+1:
+	lbu	r4,r5,r0
+	beqid	r4,2f		/* break on NUL */
+	addik	r3,r3,-1	/* delay slot */
+
+	bneid	r3,1b
+	addik	r5,r5,1		/* delay slot */
+
+	addik	r3,r3,-1	/* for break on len */
+2:
+	rsubk	r3,r3,r6
+3:
+	rtsd	r15,8
+	nop
+
+
+	.section	.fixup,"ax"
+4:
+	brid	3b
+	addk	r3,r0,r0
+
+	.section	__ex_table,"a"
+	.word	1b,4b
+
+/*
+ * int __copy_user(char *to, char *from, int len)
+ * Return:
+ *   0 on success
+ *   number of not copied bytes on error
+ */
+	.text
+.globl __copy_tofrom_user;
+.align 4;
+__copy_tofrom_user:
+	/*
+	 * r5 - to
+	 * r6 - from
+	 * r7, r3 - count
+	 * r4 - tempval
+	 */
+	addik	r3,r7,0
+	beqi	r3,3f
+1:
+	lbu	r4,r6,r0
+	addik	r6,r6,1
+2:
+	sb	r4,r5,r0
+	addik	r3,r3,-1
+	bneid	r3,1b
+	addik	r5,r5,1		/* delay slot */
+3:
+	rtsd	r15,8
+	nop
+
+
+	.section	__ex_table,"a"
+	.word	1b,3b,2b,3b
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 21/30] microblaze_mmu_v1: Add MMU related exceptions handling
  2009-04-27  8:32                                       ` [PATCH 20/30] microblaze_mmu_v1: uaccess MMU update monstr
@ 2009-04-27  8:32                                         ` monstr
  2009-04-27  8:32                                           ` [PATCH 22/30] microblaze_mmu_v1: Update linker script for MMU monstr
  2009-04-29  5:54                                           ` [PATCH 21/30] microblaze_mmu_v1: Add MMU related exceptions handling Andrew Morton
  2009-04-29  5:53                                         ` [PATCH 20/30] microblaze_mmu_v1: uaccess MMU update Andrew Morton
  2009-04-29 11:26                                         ` Arnd Bergmann
  2 siblings, 2 replies; 81+ messages in thread
From: monstr @ 2009-04-27  8:32 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/kernel/exceptions.c |   43 +++++++++++++++++++++++++++++-----
 1 files changed, 36 insertions(+), 7 deletions(-)

diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c
index 4a8a406..38be9fa 100644
--- a/arch/microblaze/kernel/exceptions.c
+++ b/arch/microblaze/kernel/exceptions.c
@@ -21,9 +21,9 @@
 
 #include <asm/exceptions.h>
 #include <asm/entry.h>		/* For KM CPU var */
-#include <asm/uaccess.h>
-#include <asm/errno.h>
-#include <asm/ptrace.h>
+#include <linux/uaccess.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
 #include <asm/current.h>
 
 #define MICROBLAZE_ILL_OPCODE_EXCEPTION	0x02
@@ -66,6 +66,11 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
 asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
 							int fsr, int addr)
 {
+#ifdef CONFIG_MMU
+	int code;
+	addr = regs->pc;
+#endif
+
 #if 0
 	printk(KERN_WARNING "Exception %02x in %s mode, FSR=%08x PC=%08x ESR=%08x\n",
 			type, user_mode(regs) ? "user" : "kernel", fsr,
@@ -74,7 +79,13 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
 
 	switch (type & 0x1F) {
 	case MICROBLAZE_ILL_OPCODE_EXCEPTION:
-		_exception(SIGILL, regs, ILL_ILLOPC, addr);
+		if (user_mode(regs)) {
+			printk(KERN_WARNING "Illegal opcode exception in user mode.\n");
+			_exception(SIGILL, regs, ILL_ILLOPC, addr);
+			return;
+		}
+		printk(KERN_WARNING "Illegal opcode exception in kernel mode.\n");
+		die("opcode exception", regs, SIGBUS);
 		break;
 	case MICROBLAZE_IBUS_EXCEPTION:
 		if (user_mode(regs)) {
@@ -95,11 +106,16 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
 		die("bus exception", regs, SIGBUS);
 		break;
 	case MICROBLAZE_DIV_ZERO_EXCEPTION:
-		printk(KERN_WARNING "Divide by zero exception\n");
-		_exception(SIGILL, regs, ILL_ILLOPC, addr);
+		if (user_mode(regs)) {
+			printk(KERN_WARNING "Divide by zero exception in user mode\n");
+			_exception(SIGILL, regs, ILL_ILLOPC, addr);
+			return;
+		}
+		printk(KERN_WARNING "Divide by zero exception in kernel mode.\n");
+		die("Divide by exception", regs, SIGBUS);
 		break;
-
 	case MICROBLAZE_FPU_EXCEPTION:
+		printk(KERN_WARNING "FPU exception\n");
 		/* IEEE FP exception */
 		/* I removed fsr variable and use code var for storing fsr */
 		if (fsr & FSR_IO)
@@ -115,7 +131,20 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
 		_exception(SIGFPE, regs, fsr, addr);
 		break;
 
+#ifdef CONFIG_MMU
+	case MICROBLAZE_PRIVILEG_EXCEPTION:
+		printk(KERN_WARNING "Privileg exception\n");
+		/* "brk r0,r0" - used as debug breakpoint */
+		if (get_user(code, (unsigned long *)regs->pc) == 0
+			&& code == 0x980c0000) {
+			_exception(SIGTRAP, regs, TRAP_BRKPT, addr);
+		} else {
+			_exception(SIGILL, regs, ILL_PRVOPC, addr);
+		}
+		break;
+#endif
 	default:
+	/* FIXME what to do in unexpected exception */
 		printk(KERN_WARNING "Unexpected exception %02x "
 			"PC=%08x in %s mode\n", type, (unsigned int) addr,
 			kernel_mode(regs) ? "kernel" : "user");
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 22/30] microblaze_mmu_v1: Update linker script for MMU
  2009-04-27  8:32                                         ` [PATCH 21/30] microblaze_mmu_v1: Add MMU related exceptions handling monstr
@ 2009-04-27  8:32                                           ` monstr
  2009-04-27  8:32                                             ` [PATCH 23/30] microblaze_mmu_v1: Enable fork syscall for MMU and add fork as vfork for noMMU monstr
  2009-04-29 11:27                                             ` [PATCH 22/30] microblaze_mmu_v1: Update linker script for MMU Arnd Bergmann
  2009-04-29  5:54                                           ` [PATCH 21/30] microblaze_mmu_v1: Add MMU related exceptions handling Andrew Morton
  1 sibling, 2 replies; 81+ messages in thread
From: monstr @ 2009-04-27  8:32 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/kernel/vmlinux.lds.S |    5 ++++-
 1 files changed, 4 insertions(+), 1 deletions(-)

diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index 840385e..50b4bcf 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -17,8 +17,11 @@ ENTRY(_start)
 jiffies = jiffies_64 + 4;
 
 SECTIONS {
+#ifdef CONFIG_MMU
+	. = CONFIG_KERNEL_START;
+#else
 	. = CONFIG_KERNEL_BASE_ADDR;
-
+#endif
 	.text : {
 		_text = . ;
 		_stext = . ;
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 23/30] microblaze_mmu_v1: Enable fork syscall for MMU and add fork as vfork for noMMU
  2009-04-27  8:32                                           ` [PATCH 22/30] microblaze_mmu_v1: Update linker script for MMU monstr
@ 2009-04-27  8:32                                             ` monstr
  2009-04-27  8:32                                               ` [PATCH 24/30] microblaze_mmu_v1: Traps MMU update monstr
  2009-04-27 11:43                                               ` [PATCH 23/30] microblaze_mmu_v1: Enable fork syscall for MMU and add fork as vfork for noMMU John Williams
  2009-04-29 11:27                                             ` [PATCH 22/30] microblaze_mmu_v1: Update linker script for MMU Arnd Bergmann
  1 sibling, 2 replies; 81+ messages in thread
From: monstr @ 2009-04-27  8:32 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/kernel/entry-nommu.S   |    3 ++-
 arch/microblaze/kernel/syscall_table.S |    2 +-
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S
index f24b126..89f978a 100644
--- a/arch/microblaze/kernel/entry-nommu.S
+++ b/arch/microblaze/kernel/entry-nommu.S
@@ -10,7 +10,7 @@
 
 #include <linux/linkage.h>
 #include <asm/thread_info.h>
-#include <asm/errno.h>
+#include <linux/errno.h>
 #include <asm/entry.h>
 #include <asm/asm-offsets.h>
 #include <asm/registers.h>
@@ -551,6 +551,7 @@ no_work_pending:
 	rtid	r14, 0
 	nop
 
+sys_fork_wrapper:
 sys_vfork_wrapper:
 	brid	sys_vfork
 	addk	r5, r1, r0
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 3bb42ec..0b1bcaf 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -2,7 +2,7 @@ ENTRY(sys_call_table)
 	.long sys_restart_syscall	/* 0 - old "setup()" system call,
 					 * used for restarting */
 	.long sys_exit
-	.long sys_ni_syscall		/* was fork */
+	.long sys_fork_wrapper
 	.long sys_read
 	.long sys_write
 	.long sys_open			/* 5 */
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 24/30] microblaze_mmu_v1: Traps MMU update
  2009-04-27  8:32                                             ` [PATCH 23/30] microblaze_mmu_v1: Enable fork syscall for MMU and add fork as vfork for noMMU monstr
@ 2009-04-27  8:32                                               ` monstr
  2009-04-27  8:32                                                 ` [PATCH 25/30] microblaze_mmu_v1: Update signal returning address monstr
  2009-04-27 11:43                                               ` [PATCH 23/30] microblaze_mmu_v1: Enable fork syscall for MMU and add fork as vfork for noMMU John Williams
  1 sibling, 1 reply; 81+ messages in thread
From: monstr @ 2009-04-27  8:32 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/include/asm/exceptions.h |   10 ++++++-
 arch/microblaze/kernel/traps.c           |   36 ++++++++++++++++++++++++++++++
 2 files changed, 44 insertions(+), 2 deletions(-)

diff --git a/arch/microblaze/include/asm/exceptions.h b/arch/microblaze/include/asm/exceptions.h
index 24ca540..b55b132 100644
--- a/arch/microblaze/include/asm/exceptions.h
+++ b/arch/microblaze/include/asm/exceptions.h
@@ -1,8 +1,8 @@
 /*
  * Preliminary support for HW exception handing for Microblaze
  *
- * Copyright (C) 2008 Michal Simek
- * Copyright (C) 2008 PetaLogix
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
  * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
  *
  * This file is subject to the terms and conditions of the GNU General
@@ -64,6 +64,12 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
 void die(const char *str, struct pt_regs *fp, long err);
 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr);
 
+#ifdef CONFIG_MMU
+void __bug(const char *file, int line, void *data);
+int bad_trap(int trap_num, struct pt_regs *regs);
+int debug_trap(struct pt_regs *regs);
+#endif /* CONFIG_MMU */
+
 #if defined(CONFIG_XMON)
 extern void xmon(struct pt_regs *regs);
 extern int xmon_bpt(struct pt_regs *regs);
diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c
index 293ef48..ffe48ad 100644
--- a/arch/microblaze/kernel/traps.c
+++ b/arch/microblaze/kernel/traps.c
@@ -22,6 +22,7 @@ void trap_init(void)
 	__enable_hw_exceptions();
 }
 
+#ifndef CONFIG_MMU
 void __bad_xchg(volatile void *ptr, int size)
 {
 	printk(KERN_INFO "xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n",
@@ -29,6 +30,7 @@ void __bad_xchg(volatile void *ptr, int size)
 	BUG();
 }
 EXPORT_SYMBOL(__bad_xchg);
+#endif
 
 static int kstack_depth_to_print = 24;
 
@@ -105,3 +107,37 @@ void dump_stack(void)
 	show_stack(NULL, NULL);
 }
 EXPORT_SYMBOL(dump_stack);
+
+#ifdef CONFIG_MMU
+void __bug(const char *file, int line, void *data)
+{
+	if (data)
+		printk(KERN_CRIT "kernel BUG at %s:%d (data = %p)!\n",
+			file, line, data);
+	else
+		printk(KERN_CRIT "kernel BUG at %s:%d!\n", file, line);
+
+	machine_halt();
+}
+
+int bad_trap(int trap_num, struct pt_regs *regs)
+{
+	printk(KERN_CRIT
+		"unimplemented trap %d called at 0x%08lx, pid %d!\n",
+		trap_num, regs->pc, current->pid);
+	return -ENOSYS;
+}
+
+int debug_trap(struct pt_regs *regs)
+{
+	int i;
+	printk(KERN_CRIT "debug trap\n");
+	for (i = 0; i < 32; i++) {
+		/* printk("r%i:%08X\t",i,regs->gpr[i]); */
+		if ((i % 4) == 3)
+			printk(KERN_CRIT "\n");
+	}
+	printk(KERN_CRIT "pc:%08lX\tmsr:%08lX\n", regs->pc, regs->msr);
+	return -ENOSYS;
+}
+#endif
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 25/30] microblaze_mmu_v1: Update signal returning address
  2009-04-27  8:32                                               ` [PATCH 24/30] microblaze_mmu_v1: Traps MMU update monstr
@ 2009-04-27  8:32                                                 ` monstr
  2009-04-27  8:32                                                   ` [PATCH 26/30] microblaze_mmu_v1: Update cacheflush.h monstr
  0 siblings, 1 reply; 81+ messages in thread
From: monstr @ 2009-04-27  8:32 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/kernel/signal.c |    8 ++++++++
 1 files changed, 8 insertions(+), 0 deletions(-)

diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index 40d3693..c93a3d6 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -463,7 +463,15 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
 	case -ERESTARTNOINTR:
 do_restart:
 		/* offset of 4 bytes to re-execute trap (brki) instruction */
+#ifndef CONFIG_MMU
 		regs->pc -= 4;
+#else
+		/* offset of 8 bytes required = 4 for rtbd
+		   offset, plus 4 for size of
+			"brki r14,8"
+		   instruction. */
+		regs->pc -= 8;
+#endif
 		break;
 	}
 }
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 26/30] microblaze_mmu_v1: Update cacheflush.h
  2009-04-27  8:32                                                 ` [PATCH 25/30] microblaze_mmu_v1: Update signal returning address monstr
@ 2009-04-27  8:32                                                   ` monstr
  2009-04-27  8:32                                                     ` [PATCH 27/30] microblaze_mmu_v1: Update dma.h for MMU monstr
  0 siblings, 1 reply; 81+ messages in thread
From: monstr @ 2009-04-27  8:32 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/include/asm/cacheflush.h |   20 +++++++++++++++++++-
 1 files changed, 19 insertions(+), 1 deletions(-)

diff --git a/arch/microblaze/include/asm/cacheflush.h b/arch/microblaze/include/asm/cacheflush.h
index 3300b78..f989d6a 100644
--- a/arch/microblaze/include/asm/cacheflush.h
+++ b/arch/microblaze/include/asm/cacheflush.h
@@ -1,5 +1,6 @@
 /*
- * Copyright (C) 2007 PetaLogix
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2007-2009 PetaLogix
  * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
  * based on v850 version which was
  * Copyright (C) 2001,02,03 NEC Electronics Corporation
@@ -43,6 +44,23 @@
 #define flush_icache_range(start, len)	__invalidate_icache_range(start, len)
 #define flush_icache_page(vma, pg)		do { } while (0)
 
+#ifndef CONFIG_MMU
+# define flush_icache_user_range(start, len)	do { } while (0)
+#else
+# define flush_icache_user_range(vma, pg, adr, len) __invalidate_icache_all()
+
+# define flush_page_to_ram(page)		do { } while (0)
+
+# define flush_icache()			__invalidate_icache_all()
+# define flush_cache_sigtramp(vaddr) \
+			__invalidate_icache_range(vaddr, vaddr + 8)
+
+# define flush_dcache_mmap_lock(mapping)	do { } while (0)
+# define flush_dcache_mmap_unlock(mapping)	do { } while (0)
+
+# define flush_cache_dup_mm(mm)			do { } while (0)
+#endif
+
 #define flush_cache_vmap(start, end)		do { } while (0)
 #define flush_cache_vunmap(start, end)		do { } while (0)
 
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 27/30] microblaze_mmu_v1: Update dma.h for MMU
  2009-04-27  8:32                                                   ` [PATCH 26/30] microblaze_mmu_v1: Update cacheflush.h monstr
@ 2009-04-27  8:32                                                     ` monstr
  2009-04-27  8:32                                                       ` [PATCH 28/30] microblaze_mmu_v1: Elf update monstr
  0 siblings, 1 reply; 81+ messages in thread
From: monstr @ 2009-04-27  8:32 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/include/asm/dma.h |    5 +++++
 1 files changed, 5 insertions(+), 0 deletions(-)

diff --git a/arch/microblaze/include/asm/dma.h b/arch/microblaze/include/asm/dma.h
index 0967fa0..08c073b 100644
--- a/arch/microblaze/include/asm/dma.h
+++ b/arch/microblaze/include/asm/dma.h
@@ -9,8 +9,13 @@
 #ifndef _ASM_MICROBLAZE_DMA_H
 #define _ASM_MICROBLAZE_DMA_H
 
+#ifndef CONFIG_MMU
 /* we don't have dma address limit. define it as zero to be
  * unlimited. */
 #define MAX_DMA_ADDRESS		(0)
+#else
+/* Virtual address corresponding to last available physical memory address.  */
+#define MAX_DMA_ADDRESS (CONFIG_KERNEL_START + memory_size - 1)
+#endif
 
 #endif /* _ASM_MICROBLAZE_DMA_H */
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 28/30] microblaze_mmu_v1: Elf update
  2009-04-27  8:32                                                     ` [PATCH 27/30] microblaze_mmu_v1: Update dma.h for MMU monstr
@ 2009-04-27  8:32                                                       ` monstr
  2009-04-27  8:32                                                         ` [PATCH 29/30] microblaze_mmu_v1: stat.h MMU update monstr
  0 siblings, 1 reply; 81+ messages in thread
From: monstr @ 2009-04-27  8:32 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/include/asm/elf.h |   94 +++++++++++++++++++++++++++++++++++++
 1 files changed, 94 insertions(+), 0 deletions(-)

diff --git a/arch/microblaze/include/asm/elf.h b/arch/microblaze/include/asm/elf.h
index 81337f2..896b563 100644
--- a/arch/microblaze/include/asm/elf.h
+++ b/arch/microblaze/include/asm/elf.h
@@ -1,4 +1,6 @@
 /*
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
  * Copyright (C) 2006 Atmark Techno, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -27,4 +29,96 @@
  */
 #define ELF_CLASS	ELFCLASS32
 
+#ifndef __uClinux__
+
+/*
+ * ELF register definitions..
+ */
+
+#include <asm/ptrace.h>
+#include <asm/user.h>
+#include <asm/byteorder.h>
+
+#ifndef ELF_GREG_T
+#define ELF_GREG_T
+typedef unsigned long elf_greg_t;
+#endif
+
+#ifndef ELF_NGREG
+#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
+#endif
+
+#ifndef ELF_GREGSET_T
+#define ELF_GREGSET_T
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+#endif
+
+#ifndef ELF_FPREGSET_T
+#define ELF_FPREGSET_T
+
+/* TBD */
+#define ELF_NFPREG	33	/* includes fsr */
+typedef unsigned long elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+/* typedef struct user_fpu_struct elf_fpregset_t; */
+#endif
+
+/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader.  We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+
+#define ELF_ET_DYN_BASE         (0x08000000)
+
+#ifdef __LITTLE_ENDIAN__
+#define ELF_DATA	ELFDATA2LSB
+#else
+#define ELF_DATA	ELFDATA2MSB
+#endif
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE	4096
+
+
+#define ELF_CORE_COPY_REGS(_dest, _regs)			\
+	memcpy((char *) &_dest, (char *) _regs,		\
+	sizeof(struct pt_regs));
+
+/* This yields a mask that user programs can use to figure out what
+ * instruction set this CPU supports.  This could be done in user space,
+ * but it's not easy, and we've already done it here.
+ */
+#define ELF_HWCAP	(0)
+
+/* This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization.  This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+
+ * For the moment, we have only optimizations for the Intel generations,
+ * but that could change...
+ */
+#define ELF_PLATFORM  (NULL)
+
+/* Added _f parameter. Is this definition correct: TBD */
+#define ELF_PLAT_INIT(_r, _f)				\
+do {							\
+	_r->r1 =  _r->r1 =  _r->r2 =  _r->r3 =		\
+	_r->r4 =  _r->r5 =  _r->r6 =  _r->r7 =		\
+	_r->r8 =  _r->r9 =  _r->r10 = _r->r11 =		\
+	_r->r12 = _r->r13 = _r->r14 = _r->r15 =		\
+	_r->r16 = _r->r17 = _r->r18 = _r->r19 =		\
+	_r->r20 = _r->r21 = _r->r22 = _r->r23 =		\
+	_r->r24 = _r->r25 = _r->r26 = _r->r27 =		\
+	_r->r28 = _r->r29 = _r->r30 = _r->r31 =		\
+	0;						\
+} while (0)
+
+#ifdef __KERNEL__
+#define SET_PERSONALITY(ex) set_personality(PER_LINUX_32BIT)
+#endif
+
+#endif /* __uClinux__ */
+
 #endif /* _ASM_MICROBLAZE_ELF_H */
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 29/30] microblaze_mmu_v1: stat.h MMU update
  2009-04-27  8:32                                                       ` [PATCH 28/30] microblaze_mmu_v1: Elf update monstr
@ 2009-04-27  8:32                                                         ` monstr
  2009-04-27  8:32                                                           ` [PATCH 30/30] microblaze_mmu_v1: fcntl.h " monstr
  2009-04-27  9:58                                                           ` [PATCH 29/30] microblaze_mmu_v1: stat.h " Christoph Hellwig
  0 siblings, 2 replies; 81+ messages in thread
From: monstr @ 2009-04-27  8:32 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/include/asm/stat.h |   26 +++++++++++++++++++++++++-
 1 files changed, 25 insertions(+), 1 deletions(-)

diff --git a/arch/microblaze/include/asm/stat.h b/arch/microblaze/include/asm/stat.h
index 5f18b8a..5a33b17 100644
--- a/arch/microblaze/include/asm/stat.h
+++ b/arch/microblaze/include/asm/stat.h
@@ -36,7 +36,7 @@ struct stat {
 	unsigned long	__unused4;
 	unsigned long	__unused5;
 };
-
+# ifdef __uClinux__
 struct stat64 {
 	unsigned long long	st_dev;
 	unsigned long	__unused1;
@@ -69,5 +69,29 @@ struct stat64 {
 
 	unsigned long	__unused8;
 };
+# else /* __uClinux__ */
+/* FIXME */
+struct stat64 {
+	unsigned long long	st_dev; /* Device.*/
+	unsigned long long	st_ino; /* File serial number.*/
+	unsigned int		st_mode; /* File mode.*/
+	unsigned int		st_nlink; /* Link count. */
+	unsigned int		st_uid; /* User ID of the file's owner. */
+	unsigned int		st_gid; /* Group ID of the file's group. */
+	unsigned long long	st_rdev; /* Device number, if device. */
+	unsigned short		__pad2;
+	long long		st_size; /* Size of file, in bytes. */
+	int			st_blksize; /* Optimal block size for I/O. */
+	long long		st_blocks; /* No. 512-byte blocks allocated */
+	int			st_atime; /* Time of last access.  */
+	unsigned int		st_atime_nsec;
+	int			st_mtime; /* Time of last modification. */
+	unsigned int		st_mtime_nsec;
+	int			st_ctime; /* Time of last status change. */
+	unsigned int		st_ctime_nsec;
+	unsigned int		__unused4;
+	unsigned int		__unused5;
+};
+# endif /* __uClinux__ */
 
 #endif /* _ASM_MICROBLAZE_STAT_H */
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* [PATCH 30/30] microblaze_mmu_v1: fcntl.h MMU update
  2009-04-27  8:32                                                         ` [PATCH 29/30] microblaze_mmu_v1: stat.h MMU update monstr
@ 2009-04-27  8:32                                                           ` monstr
  2009-04-27  9:59                                                             ` Christoph Hellwig
  2009-04-27  9:58                                                           ` [PATCH 29/30] microblaze_mmu_v1: stat.h " Christoph Hellwig
  1 sibling, 1 reply; 81+ messages in thread
From: monstr @ 2009-04-27  8:32 UTC (permalink / raw)
  To: linux-kernel; +Cc: john.williams, Michal Simek

From: Michal Simek <monstr@monstr.eu>

Signed-off-by: Michal Simek <monstr@monstr.eu>
---
 arch/microblaze/include/asm/fcntl.h |   14 ++++++++++++++
 1 files changed, 14 insertions(+), 0 deletions(-)

diff --git a/arch/microblaze/include/asm/fcntl.h b/arch/microblaze/include/asm/fcntl.h
index 46ab12d..b4d27f6 100644
--- a/arch/microblaze/include/asm/fcntl.h
+++ b/arch/microblaze/include/asm/fcntl.h
@@ -1 +1,15 @@
+#ifndef _ASM_MICROBLAZE_FCNTL_H
+#define _ASM_MICROBLAZE_FCNTL_H
+
+#ifndef __uClinux__
+
+#define O_DIRECTORY	040000 /* must be a directory */
+#define O_NOFOLLOW	0100000 /* don't follow links */
+#define O_LARGEFILE	0200000
+#define O_DIRECT	0400000 /* direct disk access hint */
+
+#endif /* __uClinux__ */
+
 #include <asm-generic/fcntl.h>
+
+#endif /* _ASM_MICROBLAZE_FCNTL_H */
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 81+ messages in thread

* Re: [PATCH 29/30] microblaze_mmu_v1: stat.h MMU update
  2009-04-27  8:32                                                         ` [PATCH 29/30] microblaze_mmu_v1: stat.h MMU update monstr
  2009-04-27  8:32                                                           ` [PATCH 30/30] microblaze_mmu_v1: fcntl.h " monstr
@ 2009-04-27  9:58                                                           ` Christoph Hellwig
  2009-04-27 10:35                                                             ` Michal Simek
  1 sibling, 1 reply; 81+ messages in thread
From: Christoph Hellwig @ 2009-04-27  9:58 UTC (permalink / raw)
  To: monstr; +Cc: linux-kernel, john.williams

On Mon, Apr 27, 2009 at 10:32:18AM +0200, monstr@monstr.eu wrote:
> +# ifdef __uClinux__
>  struct stat64 {
>  	unsigned long long	st_dev;
>  	unsigned long	__unused1;
> @@ -69,5 +69,29 @@ struct stat64 {
>  
>  	unsigned long	__unused8;
>  };
> +# else /* __uClinux__ */
> +/* FIXME */
> +struct stat64 {
> +	unsigned long long	st_dev; /* Device.*/
> +	unsigned long long	st_ino; /* File serial number.*/
> +	unsigned int		st_mode; /* File mode.*/
> +	unsigned int		st_nlink; /* Link count. */
> +	unsigned int		st_uid; /* User ID of the file's owner. */
> +	unsigned int		st_gid; /* Group ID of the file's group. */
> +	unsigned long long	st_rdev; /* Device number, if device. */
> +	unsigned short		__pad2;
> +	long long		st_size; /* Size of file, in bytes. */
> +	int			st_blksize; /* Optimal block size for I/O. */
> +	long long		st_blocks; /* No. 512-byte blocks allocated */
> +	int			st_atime; /* Time of last access.  */
> +	unsigned int		st_atime_nsec;
> +	int			st_mtime; /* Time of last modification. */
> +	unsigned int		st_mtime_nsec;
> +	int			st_ctime; /* Time of last status change. */
> +	unsigned int		st_ctime_nsec;
> +	unsigned int		__unused4;
> +	unsigned int		__unused5;
> +};
> +# endif /* __uClinux__ */

Userspace ABIs must not change because of MMU vs not.


^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 30/30] microblaze_mmu_v1: fcntl.h MMU update
  2009-04-27  8:32                                                           ` [PATCH 30/30] microblaze_mmu_v1: fcntl.h " monstr
@ 2009-04-27  9:59                                                             ` Christoph Hellwig
  2009-04-27 10:05                                                               ` John Williams
  0 siblings, 1 reply; 81+ messages in thread
From: Christoph Hellwig @ 2009-04-27  9:59 UTC (permalink / raw)
  To: monstr; +Cc: linux-kernel, john.williams

On Mon, Apr 27, 2009 at 10:32:19AM +0200, monstr@monstr.eu wrote:
> From: Michal Simek <monstr@monstr.eu>
> 
> Signed-off-by: Michal Simek <monstr@monstr.eu>
> ---
>  arch/microblaze/include/asm/fcntl.h |   14 ++++++++++++++
>  1 files changed, 14 insertions(+), 0 deletions(-)
> 
> diff --git a/arch/microblaze/include/asm/fcntl.h b/arch/microblaze/include/asm/fcntl.h
> index 46ab12d..b4d27f6 100644
> --- a/arch/microblaze/include/asm/fcntl.h
> +++ b/arch/microblaze/include/asm/fcntl.h
> @@ -1 +1,15 @@
> +#ifndef _ASM_MICROBLAZE_FCNTL_H
> +#define _ASM_MICROBLAZE_FCNTL_H
> +
> +#ifndef __uClinux__
> +
> +#define O_DIRECTORY	040000 /* must be a directory */
> +#define O_NOFOLLOW	0100000 /* don't follow links */
> +#define O_LARGEFILE	0200000
> +#define O_DIRECT	0400000 /* direct disk access hint */
> +
> +#endif /* __uClinux__ */
> +
>  #include <asm-generic/fcntl.h>

Again, ABIs should always be the same for MMU vs noMMU.

Also no new port shall override the asm-generic/fcntl.h constants.


^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 30/30] microblaze_mmu_v1: fcntl.h MMU update
  2009-04-27  9:59                                                             ` Christoph Hellwig
@ 2009-04-27 10:05                                                               ` John Williams
  2009-04-27 10:56                                                                 ` Michal Simek
  0 siblings, 1 reply; 81+ messages in thread
From: John Williams @ 2009-04-27 10:05 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: monstr, linux-kernel

Michal,

On Mon, Apr 27, 2009 at 7:59 PM, Christoph Hellwig <hch@infradead.org> wrote:
> On Mon, Apr 27, 2009 at 10:32:19AM +0200, monstr@monstr.eu wrote:
>> From: Michal Simek <monstr@monstr.eu>
>>
>> Signed-off-by: Michal Simek <monstr@monstr.eu>
>> ---
>>  arch/microblaze/include/asm/fcntl.h |   14 ++++++++++++++
>>  1 files changed, 14 insertions(+), 0 deletions(-)
>>
>> diff --git a/arch/microblaze/include/asm/fcntl.h b/arch/microblaze/include/asm/fcntl.h
>> index 46ab12d..b4d27f6 100644
>> --- a/arch/microblaze/include/asm/fcntl.h
>> +++ b/arch/microblaze/include/asm/fcntl.h
>> @@ -1 +1,15 @@
>> +#ifndef _ASM_MICROBLAZE_FCNTL_H
>> +#define _ASM_MICROBLAZE_FCNTL_H
>> +
>> +#ifndef __uClinux__
>> +
>> +#define O_DIRECTORY  040000 /* must be a directory */
>> +#define O_NOFOLLOW   0100000 /* don't follow links */
>> +#define O_LARGEFILE  0200000
>> +#define O_DIRECT     0400000 /* direct disk access hint */
>> +
>> +#endif /* __uClinux__ */
>> +
>>  #include <asm-generic/fcntl.h>
>
> Again, ABIs should always be the same for MMU vs noMMU.

Is there a reason you need these custom fcntls?  What if you rebuild
glibc against the newer kernel headers (without __uClinux__)?

John

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 29/30] microblaze_mmu_v1: stat.h MMU update
  2009-04-27  9:58                                                           ` [PATCH 29/30] microblaze_mmu_v1: stat.h " Christoph Hellwig
@ 2009-04-27 10:35                                                             ` Michal Simek
  2009-04-27 11:30                                                               ` Christoph Hellwig
  2009-04-27 12:53                                                               ` Arnd Bergmann
  0 siblings, 2 replies; 81+ messages in thread
From: Michal Simek @ 2009-04-27 10:35 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-kernel, john.williams, Arnd Bergmann

Christoph Hellwig wrote:
> On Mon, Apr 27, 2009 at 10:32:18AM +0200, monstr@monstr.eu wrote:
>> +# ifdef __uClinux__
>>  struct stat64 {
>>  	unsigned long long	st_dev;
>>  	unsigned long	__unused1;
>> @@ -69,5 +69,29 @@ struct stat64 {
>>  
>>  	unsigned long	__unused8;
>>  };
>> +# else /* __uClinux__ */
>> +/* FIXME */
>> +struct stat64 {
>> +	unsigned long long	st_dev; /* Device.*/
>> +	unsigned long long	st_ino; /* File serial number.*/
>> +	unsigned int		st_mode; /* File mode.*/
>> +	unsigned int		st_nlink; /* Link count. */
>> +	unsigned int		st_uid; /* User ID of the file's owner. */
>> +	unsigned int		st_gid; /* Group ID of the file's group. */
>> +	unsigned long long	st_rdev; /* Device number, if device. */
>> +	unsigned short		__pad2;
>> +	long long		st_size; /* Size of file, in bytes. */
>> +	int			st_blksize; /* Optimal block size for I/O. */
>> +	long long		st_blocks; /* No. 512-byte blocks allocated */
>> +	int			st_atime; /* Time of last access.  */
>> +	unsigned int		st_atime_nsec;
>> +	int			st_mtime; /* Time of last modification. */
>> +	unsigned int		st_mtime_nsec;
>> +	int			st_ctime; /* Time of last status change. */
>> +	unsigned int		st_ctime_nsec;
>> +	unsigned int		__unused4;
>> +	unsigned int		__unused5;
>> +};
>> +# endif /* __uClinux__ */
> 
> Userspace ABIs must not change because of MMU vs not.
> 

:-)

I expect that someone will beat me for last two patches.

Short answer: You are right I know about.

Long answer: in stat64 structure I wanted to find out more information
about stat64 structure. I mean there are some variables and they have different
types and I would like to know about more info.
For example
If make sense long long	type for st_blocks. IMHO unsigned will be better.
And I would like to create new stat64 structure where is not a fault for both noMMU/MMU version.
In noMMU implementation is st_blocks unsigned long. Is it OK? or unsigned long long is better?

Thanks,
Michal

-- 
Michal Simek, Ing. (M.Eng)
w: www.monstr.eu p: +42-0-721842854

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 01/30] microblaze_mmu_v1: Makefiles
  2009-04-27  8:31 ` [PATCH 01/30] microblaze_mmu_v1: Makefiles monstr
  2009-04-27  8:31   ` [PATCH 02/30] microblaze_mmu_v1: Kconfig update monstr
@ 2009-04-27 10:50   ` Arnd Bergmann
  2009-04-27 10:54     ` Michal Simek
  1 sibling, 1 reply; 81+ messages in thread
From: Arnd Bergmann @ 2009-04-27 10:50 UTC (permalink / raw)
  To: monstr; +Cc: linux-kernel, john.williams

On Monday 27 April 2009, monstr@monstr.eu wrote:
> From: Michal Simek <monstr@monstr.eu>
> 
> Signed-off-by: Michal Simek <monstr@monstr.eu>
> ---
>  arch/microblaze/Makefile        |    4 ++++
>  arch/microblaze/boot/Makefile   |    2 ++
>  arch/microblaze/kernel/Makefile |    1 +
>  arch/microblaze/lib/Makefile    |    3 ++-
>  arch/microblaze/mm/Makefile     |    2 ++
>  5 files changed, 11 insertions(+), 1 deletions(-)

It would be nice to keep the Makefile and Kconfig changes together with
the files they are referring to, otherwise you risk getting an
unbuildable source tree in the middle of a git-bisect.

	Arnd <><

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 01/30] microblaze_mmu_v1: Makefiles
  2009-04-27 10:50   ` [PATCH 01/30] microblaze_mmu_v1: Makefiles Arnd Bergmann
@ 2009-04-27 10:54     ` Michal Simek
  0 siblings, 0 replies; 81+ messages in thread
From: Michal Simek @ 2009-04-27 10:54 UTC (permalink / raw)
  To: Arnd Bergmann; +Cc: linux-kernel, john.williams

Arnd Bergmann wrote:
> On Monday 27 April 2009, monstr@monstr.eu wrote:
>> From: Michal Simek <monstr@monstr.eu>
>>
>> Signed-off-by: Michal Simek <monstr@monstr.eu>
>> ---
>>  arch/microblaze/Makefile        |    4 ++++
>>  arch/microblaze/boot/Makefile   |    2 ++
>>  arch/microblaze/kernel/Makefile |    1 +
>>  arch/microblaze/lib/Makefile    |    3 ++-
>>  arch/microblaze/mm/Makefile     |    2 ++
>>  5 files changed, 11 insertions(+), 1 deletions(-)
> 
> It would be nice to keep the Makefile and Kconfig changes together with
> the files they are referring to, otherwise you risk getting an
> unbuildable source tree in the middle of a git-bisect.

make sense to me too -> I'll move Makefile to the end of set (in next version).

Thanks,
Michal
> 
> 	Arnd <><


-- 
Michal Simek, Ing. (M.Eng)
w: www.monstr.eu p: +42-0-721842854

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 10/30] microblaze_mmu_v1: Context handling - mmu_context.c/h
  2009-04-27  8:31                   ` [PATCH 10/30] microblaze_mmu_v1: Context handling - mmu_context.c/h monstr
  2009-04-27  8:32                     ` [PATCH 11/30] microblaze_mmu_v1: Page table - ioremap - pgtable.c/h, section update monstr
@ 2009-04-27 10:55                     ` Arnd Bergmann
  2009-04-27 11:18                     ` Geert Uytterhoeven
  2 siblings, 0 replies; 81+ messages in thread
From: Arnd Bergmann @ 2009-04-27 10:55 UTC (permalink / raw)
  To: monstr; +Cc: linux-kernel, john.williams

On Monday 27 April 2009, monstr@monstr.eu wrote:
>  arch/microblaze/include/asm/mmu_context.h |  134 +++++++++++++++++++++++++++++
>  arch/microblaze/mm/mmu_context.c          |   70 +++++++++++++++
>  2 files changed, 204 insertions(+), 0 deletions(-)
>  create mode 100644 arch/microblaze/mm/mmu_context.c
> 

Your mmu_context.h is basically

#ifdef CONFIG_MMU
 /* MMU code */
#else
 /* NOMMU code */
#endif

but doesn't have any shared code at all. In cases like this, I'd
recommend having three separate files as m68k does: mmu_context_no.h
mmu_context_mm.h and mmu_context.h that has a simple switch between
them.

	Arnd <><

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 30/30] microblaze_mmu_v1: fcntl.h MMU update
  2009-04-27 10:05                                                               ` John Williams
@ 2009-04-27 10:56                                                                 ` Michal Simek
  0 siblings, 0 replies; 81+ messages in thread
From: Michal Simek @ 2009-04-27 10:56 UTC (permalink / raw)
  To: John Williams; +Cc: Christoph Hellwig, linux-kernel

John Williams wrote:
> Michal,
> 
> On Mon, Apr 27, 2009 at 7:59 PM, Christoph Hellwig <hch@infradead.org> wrote:
>> On Mon, Apr 27, 2009 at 10:32:19AM +0200, monstr@monstr.eu wrote:
>>> From: Michal Simek <monstr@monstr.eu>
>>>
>>> Signed-off-by: Michal Simek <monstr@monstr.eu>
>>> ---
>>>  arch/microblaze/include/asm/fcntl.h |   14 ++++++++++++++
>>>  1 files changed, 14 insertions(+), 0 deletions(-)
>>>
>>> diff --git a/arch/microblaze/include/asm/fcntl.h b/arch/microblaze/include/asm/fcntl.h
>>> index 46ab12d..b4d27f6 100644
>>> --- a/arch/microblaze/include/asm/fcntl.h
>>> +++ b/arch/microblaze/include/asm/fcntl.h
>>> @@ -1 +1,15 @@
>>> +#ifndef _ASM_MICROBLAZE_FCNTL_H
>>> +#define _ASM_MICROBLAZE_FCNTL_H
>>> +
>>> +#ifndef __uClinux__
>>> +
>>> +#define O_DIRECTORY  040000 /* must be a directory */
>>> +#define O_NOFOLLOW   0100000 /* don't follow links */
>>> +#define O_LARGEFILE  0200000
>>> +#define O_DIRECT     0400000 /* direct disk access hint */
>>> +
>>> +#endif /* __uClinux__ */
>>> +
>>>  #include <asm-generic/fcntl.h>
>> Again, ABIs should always be the same for MMU vs noMMU.
> 
> Is there a reason you need these custom fcntls?  What if you rebuild
> glibc against the newer kernel headers (without __uClinux__)?

here is situation a little bit cleaner than stat64 issue. This should be definitely
removed for merging.I need more time for test.

Thanks,
Michal

> 
> John


-- 
Michal Simek, Ing. (M.Eng)
w: www.monstr.eu p: +42-0-721842854

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 10/30] microblaze_mmu_v1: Context handling -  mmu_context.c/h
  2009-04-27  8:31                   ` [PATCH 10/30] microblaze_mmu_v1: Context handling - mmu_context.c/h monstr
  2009-04-27  8:32                     ` [PATCH 11/30] microblaze_mmu_v1: Page table - ioremap - pgtable.c/h, section update monstr
  2009-04-27 10:55                     ` [PATCH 10/30] microblaze_mmu_v1: Context handling - mmu_context.c/h Arnd Bergmann
@ 2009-04-27 11:18                     ` Geert Uytterhoeven
  2 siblings, 0 replies; 81+ messages in thread
From: Geert Uytterhoeven @ 2009-04-27 11:18 UTC (permalink / raw)
  To: monstr; +Cc: linux-kernel, john.williams

On Mon, Apr 27, 2009 at 10:31,  <monstr@monstr.eu> wrote:
> --- a/arch/microblaze/include/asm/mmu_context.h
> +++ b/arch/microblaze/include/asm/mmu_context.h

> +/*
> + * This function defines the mapping from contexts to VSIDs (virtual
> + * segment IDs).  We use a skew on both the context and the high 4 bits
> + * of the 32-bit virtual address (the "effective segment ID") in order
> + * to spread out the entries in the MMU hash table.  Note, if this
> + * function is changed then arch/ppc/mm/hashtable.S will have to be
                                    ^^^

> + * changed to correspond.
> + */

Gr{oetje,eeting}s,

						Geert

--
Geert Uytterhoeven -- There's lots of Linux beyond ia32 -- geert@linux-m68k.org

In personal conversations with technical people, I call myself a hacker. But
when I'm talking to journalists I just say "programmer" or something like that.
							    -- Linus Torvalds

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 29/30] microblaze_mmu_v1: stat.h MMU update
  2009-04-27 10:35                                                             ` Michal Simek
@ 2009-04-27 11:30                                                               ` Christoph Hellwig
  2009-04-27 11:43                                                                 ` Michal Simek
  2009-04-27 12:53                                                               ` Arnd Bergmann
  1 sibling, 1 reply; 81+ messages in thread
From: Christoph Hellwig @ 2009-04-27 11:30 UTC (permalink / raw)
  To: Michal Simek
  Cc: Christoph Hellwig, linux-kernel, john.williams, Arnd Bergmann

On Mon, Apr 27, 2009 at 12:35:51PM +0200, Michal Simek wrote:
> types and I would like to know about more info.
> For example
> If make sense long long	type for st_blocks. IMHO unsigned will be better.
> And I would like to create new stat64 structure where is not a fault for both noMMU/MMU version.
> In noMMU implementation is st_blocks unsigned long. Is it OK? or unsigned long long is better?

You want unsigned long long for it.  Take a look at e.g. the xtensa
stat.h and just reuse that one.  We should probaly create an
asm-generic/stat.h that new architectures can simply use..

Given that microblaze only got merged in the 2.6.30 window I would
suggest fixing up the nommu variant.

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 23/30] microblaze_mmu_v1: Enable fork syscall for MMU and  add fork as vfork for noMMU
  2009-04-27  8:32                                             ` [PATCH 23/30] microblaze_mmu_v1: Enable fork syscall for MMU and add fork as vfork for noMMU monstr
  2009-04-27  8:32                                               ` [PATCH 24/30] microblaze_mmu_v1: Traps MMU update monstr
@ 2009-04-27 11:43                                               ` John Williams
  2009-04-29 10:16                                                 ` Michal Simek
  1 sibling, 1 reply; 81+ messages in thread
From: John Williams @ 2009-04-27 11:43 UTC (permalink / raw)
  To: monstr; +Cc: linux-kernel

On Mon, Apr 27, 2009 at 6:32 PM,  <monstr@monstr.eu> wrote:
> From: Michal Simek <monstr@monstr.eu>
>
> Signed-off-by: Michal Simek <monstr@monstr.eu>
> ---
>  arch/microblaze/kernel/entry-nommu.S   |    3 ++-
>  arch/microblaze/kernel/syscall_table.S |    2 +-
>  2 files changed, 3 insertions(+), 2 deletions(-)
>
> diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S
> index f24b126..89f978a 100644
> --- a/arch/microblaze/kernel/entry-nommu.S
> +++ b/arch/microblaze/kernel/entry-nommu.S
> @@ -10,7 +10,7 @@
>
>  #include <linux/linkage.h>
>  #include <asm/thread_info.h>
> -#include <asm/errno.h>
> +#include <linux/errno.h>
>  #include <asm/entry.h>
>  #include <asm/asm-offsets.h>
>  #include <asm/registers.h>
> @@ -551,6 +551,7 @@ no_work_pending:
>        rtid    r14, 0
>        nop
>
> +sys_fork_wrapper:
>  sys_vfork_wrapper:
>        brid    sys_vfork
>        addk    r5, r1, r0

NACK!  If only it was that easy :)

On noMMU we cannot just silently call vfork() if the application tries
to fork() - you have to return -ENOSYS (or is there a better one?).
This will just require a small sys_fork_wrapper in entry-nommu.S.

John
-- 
John Williams, PhD, B.Eng, B.IT
PetaLogix - Linux Solutions for a Reconfigurable World
w: www.petalogix.com  p: +61-7-30090663  f: +61-7-30090663

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 29/30] microblaze_mmu_v1: stat.h MMU update
  2009-04-27 11:30                                                               ` Christoph Hellwig
@ 2009-04-27 11:43                                                                 ` Michal Simek
  2009-04-27 11:57                                                                   ` Arnd Bergmann
  0 siblings, 1 reply; 81+ messages in thread
From: Michal Simek @ 2009-04-27 11:43 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-kernel, john.williams, Arnd Bergmann

Christoph Hellwig wrote:
> On Mon, Apr 27, 2009 at 12:35:51PM +0200, Michal Simek wrote:
>> types and I would like to know about more info.
>> For example
>> If make sense long long	type for st_blocks. IMHO unsigned will be better.
>> And I would like to create new stat64 structure where is not a fault for both noMMU/MMU version.
>> In noMMU implementation is st_blocks unsigned long. Is it OK? or unsigned long long is better?
> 
> You want unsigned long long for it.  Take a look at e.g. the xtensa
> stat.h and just reuse that one.  We should probaly create an
> asm-generic/stat.h that new architectures can simply use..
> 
> Given that microblaze only got merged in the 2.6.30 window I would
> suggest fixing up the nommu variant.

ok. Let's do it.
Here are stats structures from xtensa.

Arnd: Is it ok for asm-generic?

Thanks,
Michal

#ifndef _XTENSA_STAT_H
#define _XTENSA_STAT_H

#define STAT_HAVE_NSEC 1

struct stat {
	unsigned long	st_dev;
	unsigned long	st_ino;
	unsigned int	st_mode;
	unsigned int	st_nlink;
	unsigned int	st_uid;
	unsigned int	st_gid;
	unsigned long	st_rdev;
	long		st_size;
	unsigned long	st_blksize;
	unsigned long	st_blocks;
	unsigned long	st_atime;
	unsigned long	st_atime_nsec;
	unsigned long	st_mtime;
	unsigned long	st_mtime_nsec;
	unsigned long	st_ctime;
	unsigned long	st_ctime_nsec;
	unsigned long	__unused4;
	unsigned long	__unused5;
};

struct stat64  {
	unsigned long long st_dev;	/* Device */
	unsigned long long st_ino;	/* File serial number */
	unsigned int  st_mode;		/* File mode. */
	unsigned int  st_nlink;		/* Link count. */
	unsigned int  st_uid;		/* User ID of the file's owner. */
	unsigned int  st_gid;		/* Group ID of the file's group. */
	unsigned long long st_rdev;	/* Device number, if device. */
	long long st_size;		/* Size of file, in bytes. */
	unsigned long st_blksize;	/* Optimal block size for I/O. */
	unsigned long __unused2;
	unsigned long long st_blocks;	/* Number 512-byte blocks allocated. */
	unsigned long st_atime;		/* Time of last access. */
	unsigned long st_atime_nsec;
	unsigned long st_mtime;		/* Time of last modification. */
	unsigned long st_mtime_nsec;
	unsigned long st_ctime;		/* Time of last status change. */
	unsigned long st_ctime_nsec;
	unsigned long __unused4;
	unsigned long __unused5;
};



-- 
Michal Simek, Ing. (M.Eng)
w: www.monstr.eu p: +42-0-721842854

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 29/30] microblaze_mmu_v1: stat.h MMU update
  2009-04-27 11:43                                                                 ` Michal Simek
@ 2009-04-27 11:57                                                                   ` Arnd Bergmann
  2009-04-27 12:15                                                                     ` Sam Ravnborg
  2009-04-27 12:31                                                                     ` Michal Simek
  0 siblings, 2 replies; 81+ messages in thread
From: Arnd Bergmann @ 2009-04-27 11:57 UTC (permalink / raw)
  To: monstr; +Cc: Christoph Hellwig, linux-kernel, john.williams

On Monday 27 April 2009, Michal Simek wrote:
> Christoph Hellwig wrote:

> > Given that microblaze only got merged in the 2.6.30 window I would
> > suggest fixing up the nommu variant.
> 
> ok. Let's do it.
> Here are stats structures from xtensa.
> 
> Arnd: Is it ok for asm-generic?

I carry a (not cleaned up) tree with changes in my playground
(git.kernel.org/pub/scm/linux/kernel/git/arnd/playground.git)
as branch generic-microblaze, where I try to use generic
headers for everything. See below for my version of stat.h.
In particular, it uses 64 bit st_size and st_blocks, but 32 bit
time fields. It also defines struct stat to be identical to
struct stat64 on 64 bit systems. 32 bit systems should only
provide the stat64 syscalls, so the shorter types in struct
stat do not hurt.

	Arnd <><

---
#ifndef __ASM_GENERIC_STAT_H
#define __ASM_GENERIC_STAT_H

/*
 * Everybody gets this wrong and has to stick with it for all
 * eternity. Hopefully, this version gets used by new architectures
 * so they don't fall into the same traps.
 *
 * stat64 is copied from powerpc64, with explicit padding added.
 * stat is the same structure layout on 64-bit, without the 'long long'
 * types.
 *
 * By convention, 64 bit architectures use the stat interface, while
 * 32 bit architectures use the stat64 interface. Note that we don't
 * provide an __old_kernel_stat here, which new architecture should
 * not have to start with.
 */

#include <asm/bitsperlong.h>

#define STAT_HAVE_NSEC 1

struct stat {
	unsigned long	st_dev;		/* Device.  */
	unsigned long	st_ino;		/* File serial number.  */
	unsigned int	st_mode;	/* File mode.  */
	unsigned int	st_nlink;	/* Link count.  */
	unsigned int	st_uid;		/* User ID of the file's owner.  */
	unsigned int	st_gid;		/* Group ID of the file's group. */
	unsigned long	st_rdev;	/* Device number, if device.  */
	unsigned long	__pad1;
	long		st_size;	/* Size of file, in bytes.  */
	int		st_blksize;	/* Optimal block size for I/O.  */
	int		__pad2;
	long		st_blocks;	/* Number 512-byte blocks allocated. */
	int		st_atime;	/* Time of last access.  */
	unsigned int	st_atime_nsec;
	int		st_mtime;	/* Time of last modification.  */
	unsigned int	st_mtime_nsec;
	int		st_ctime;	/* Time of last status change.  */
	unsigned int	st_ctime_nsec;
	unsigned int	__unused4;
	unsigned int	__unused5;
};

#if __BITS_PER_LONG != 64
/* This matches struct stat64 in glibc2.1. Only used for 32 bit. */
struct stat64 {
	unsigned long long st_dev;	/* Device.  */
	unsigned long long st_ino;	/* File serial number.  */
	unsigned int	st_mode;	/* File mode.  */
	unsigned int	st_nlink;	/* Link count.  */
	unsigned int	st_uid;		/* User ID of the file's owner.  */
	unsigned int	st_gid;		/* Group ID of the file's group. */
	unsigned long long st_rdev;	/* Device number, if device.  */
	unsigned long long __pad1;
	long long	st_size;	/* Size of file, in bytes.  */
	int		st_blksize;	/* Optimal block size for I/O.  */
	int		__pad2;
	long long	st_blocks;	/* Number 512-byte blocks allocated. */
	int		st_atime;	/* Time of last access.  */
	unsigned int	st_atime_nsec;
	int		st_mtime;	/* Time of last modification.  */
	unsigned int	st_mtime_nsec;
	int		st_ctime;	/* Time of last status change.  */
	unsigned int	st_ctime_nsec;
	unsigned int	__unused4;
	unsigned int	__unused5;
};
#endif

#endif /* __ASM_GENERIC_STAT_H */

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 29/30] microblaze_mmu_v1: stat.h MMU update
  2009-04-27 11:57                                                                   ` Arnd Bergmann
@ 2009-04-27 12:15                                                                     ` Sam Ravnborg
  2009-04-27 12:37                                                                       ` Arnd Bergmann
  2009-04-27 12:31                                                                     ` Michal Simek
  1 sibling, 1 reply; 81+ messages in thread
From: Sam Ravnborg @ 2009-04-27 12:15 UTC (permalink / raw)
  To: Arnd Bergmann; +Cc: monstr, Christoph Hellwig, linux-kernel, john.williams

On Mon, Apr 27, 2009 at 01:57:00PM +0200, Arnd Bergmann wrote:
> On Monday 27 April 2009, Michal Simek wrote:
> > Christoph Hellwig wrote:
> 
> > > Given that microblaze only got merged in the 2.6.30 window I would
> > > suggest fixing up the nommu variant.
> > 
> > ok. Let's do it.
> > Here are stats structures from xtensa.
> > 
> > Arnd: Is it ok for asm-generic?
> 
> I carry a (not cleaned up) tree with changes in my playground
> (git.kernel.org/pub/scm/linux/kernel/git/arnd/playground.git)
> as branch generic-microblaze, where I try to use generic
> headers for everything. See below for my version of stat.h.
> In particular, it uses 64 bit st_size and st_blocks, but 32 bit
> time fields. It also defines struct stat to be identical to
> struct stat64 on 64 bit systems. 32 bit systems should only
> provide the stat64 syscalls, so the shorter types in struct
> stat do not hurt.

Why not use __u64/__u32 (and s32/s64 where appropriate)?
Historical baggage or a techncal reason?

	Sam

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 29/30] microblaze_mmu_v1: stat.h MMU update
  2009-04-27 11:57                                                                   ` Arnd Bergmann
  2009-04-27 12:15                                                                     ` Sam Ravnborg
@ 2009-04-27 12:31                                                                     ` Michal Simek
  2009-04-27 12:42                                                                       ` Arnd Bergmann
  1 sibling, 1 reply; 81+ messages in thread
From: Michal Simek @ 2009-04-27 12:31 UTC (permalink / raw)
  To: Arnd Bergmann; +Cc: Christoph Hellwig, linux-kernel, john.williams

Arnd Bergmann wrote:
> On Monday 27 April 2009, Michal Simek wrote:
>> Christoph Hellwig wrote:
> 
>>> Given that microblaze only got merged in the 2.6.30 window I would
>>> suggest fixing up the nommu variant.
>> ok. Let's do it.
>> Here are stats structures from xtensa.
>>
>> Arnd: Is it ok for asm-generic?
> 
> I carry a (not cleaned up) tree with changes in my playground
> (git.kernel.org/pub/scm/linux/kernel/git/arnd/playground.git)
> as branch generic-microblaze, where I try to use generic
> headers for everything. See below for my version of stat.h.
> In particular, it uses 64 bit st_size and st_blocks, but 32 bit
> time fields. It also defines struct stat to be identical to
> struct stat64 on 64 bit systems. 32 bit systems should only
> provide the stat64 syscalls, so the shorter types in struct
> stat do not hurt.
> 
> 	Arnd <><
> 
> ---
> #ifndef __ASM_GENERIC_STAT_H
> #define __ASM_GENERIC_STAT_H
> 
> /*
>  * Everybody gets this wrong and has to stick with it for all
>  * eternity. Hopefully, this version gets used by new architectures
>  * so they don't fall into the same traps.
>  *
>  * stat64 is copied from powerpc64, with explicit padding added.
>  * stat is the same structure layout on 64-bit, without the 'long long'
>  * types.
>  *
>  * By convention, 64 bit architectures use the stat interface, while
>  * 32 bit architectures use the stat64 interface. Note that we don't
>  * provide an __old_kernel_stat here, which new architecture should
>  * not have to start with.
>  */
> 
> #include <asm/bitsperlong.h>
> 
> #define STAT_HAVE_NSEC 1
> 
> struct stat {
> 	unsigned long	st_dev;		/* Device.  */
> 	unsigned long	st_ino;		/* File serial number.  */
> 	unsigned int	st_mode;	/* File mode.  */
> 	unsigned int	st_nlink;	/* Link count.  */
> 	unsigned int	st_uid;		/* User ID of the file's owner.  */
> 	unsigned int	st_gid;		/* Group ID of the file's group. */
> 	unsigned long	st_rdev;	/* Device number, if device.  */
> 	unsigned long	__pad1;
> 	long		st_size;	/* Size of file, in bytes.  */

Maybe unsigned? Make more sense to me that file size is not minus.
And for some types below.

Michal


> 	int		st_blksize;	/* Optimal block size for I/O.  */


> 	int		__pad2;
> 	long		st_blocks;	/* Number 512-byte blocks allocated. */

> 	int		st_atime;	/* Time of last access.  */
> 	unsigned int	st_atime_nsec;
> 	int		st_mtime;	/* Time of last modification.  */
> 	unsigned int	st_mtime_nsec;
> 	int		st_ctime;	/* Time of last status change.  */
> 	unsigned int	st_ctime_nsec;
> 	unsigned int	__unused4;
> 	unsigned int	__unused5;
> };
> 
> #if __BITS_PER_LONG != 64
> /* This matches struct stat64 in glibc2.1. Only used for 32 bit. */
> struct stat64 {
> 	unsigned long long st_dev;	/* Device.  */
> 	unsigned long long st_ino;	/* File serial number.  */
> 	unsigned int	st_mode;	/* File mode.  */
> 	unsigned int	st_nlink;	/* Link count.  */
> 	unsigned int	st_uid;		/* User ID of the file's owner.  */
> 	unsigned int	st_gid;		/* Group ID of the file's group. */
> 	unsigned long long st_rdev;	/* Device number, if device.  */
> 	unsigned long long __pad1;
> 	long long	st_size;	/* Size of file, in bytes.  */
> 	int		st_blksize;	/* Optimal block size for I/O.  */
> 	int		__pad2;
> 	long long	st_blocks;	/* Number 512-byte blocks allocated. */
> 	int		st_atime;	/* Time of last access.  */
> 	unsigned int	st_atime_nsec;
> 	int		st_mtime;	/* Time of last modification.  */
> 	unsigned int	st_mtime_nsec;
> 	int		st_ctime;	/* Time of last status change.  */
> 	unsigned int	st_ctime_nsec;
> 	unsigned int	__unused4;
> 	unsigned int	__unused5;
> };
> #endif
> 
> #endif /* __ASM_GENERIC_STAT_H */


-- 
Michal Simek, Ing. (M.Eng)
w: www.monstr.eu p: +42-0-721842854

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 29/30] microblaze_mmu_v1: stat.h MMU update
  2009-04-27 12:15                                                                     ` Sam Ravnborg
@ 2009-04-27 12:37                                                                       ` Arnd Bergmann
  2009-04-27 12:48                                                                         ` Sam Ravnborg
  0 siblings, 1 reply; 81+ messages in thread
From: Arnd Bergmann @ 2009-04-27 12:37 UTC (permalink / raw)
  To: Sam Ravnborg; +Cc: monstr, Christoph Hellwig, linux-kernel, john.williams

On Monday 27 April 2009, Sam Ravnborg wrote:
> Why not use __u64/__u32 (and s32/s64 where appropriate)?
> Historical baggage or a techncal reason?

Yes, purely historical reasons: all architectures currently
use the simple C types rather than __u32 in stat.h.

I don't think it makes a difference either way, so I'd
leave it like this unless you find a good reason for
changing.

	Arnd <><

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 29/30] microblaze_mmu_v1: stat.h MMU update
  2009-04-27 12:31                                                                     ` Michal Simek
@ 2009-04-27 12:42                                                                       ` Arnd Bergmann
  2009-04-27 12:44                                                                         ` Michal Simek
  0 siblings, 1 reply; 81+ messages in thread
From: Arnd Bergmann @ 2009-04-27 12:42 UTC (permalink / raw)
  To: monstr; +Cc: Christoph Hellwig, linux-kernel, john.williams

On Monday 27 April 2009, Michal Simek wrote:
> >       long            st_size;        /* Size of file, in bytes.  */
> 
> Maybe unsigned? Make more sense to me that file size is not minus.
> And for some types below.

st_size represents an off_t in user space, which is signed.
Just over half the existing architectures use signed types
for st_size, but for this (like Sam's suggestion for __u64),
I don't think it matters either way.

	Arnd <><

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 29/30] microblaze_mmu_v1: stat.h MMU update
  2009-04-27 12:42                                                                       ` Arnd Bergmann
@ 2009-04-27 12:44                                                                         ` Michal Simek
  2009-04-27 12:47                                                                           ` Arnd Bergmann
  0 siblings, 1 reply; 81+ messages in thread
From: Michal Simek @ 2009-04-27 12:44 UTC (permalink / raw)
  To: Arnd Bergmann; +Cc: Christoph Hellwig, linux-kernel, john.williams

Arnd Bergmann wrote:
> On Monday 27 April 2009, Michal Simek wrote:
>>>       long            st_size;        /* Size of file, in bytes.  */
>> Maybe unsigned? Make more sense to me that file size is not minus.
>> And for some types below.
> 
> st_size represents an off_t in user space, which is signed.
> Just over half the existing architectures use signed types
> for st_size, but for this (like Sam's suggestion for __u64),
> I don't think it matters either way.

OK. Is it mean that your structure is perfect? If yes, I'll recompile toolchain and do some more tests.

Michal

> 
> 	Arnd <><


-- 
Michal Simek, Ing. (M.Eng)
w: www.monstr.eu p: +42-0-721842854

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 29/30] microblaze_mmu_v1: stat.h MMU update
  2009-04-27 12:44                                                                         ` Michal Simek
@ 2009-04-27 12:47                                                                           ` Arnd Bergmann
  2009-04-27 12:48                                                                             ` Michal Simek
  0 siblings, 1 reply; 81+ messages in thread
From: Arnd Bergmann @ 2009-04-27 12:47 UTC (permalink / raw)
  To: monstr; +Cc: Christoph Hellwig, linux-kernel, john.williams

On Monday 27 April 2009, Michal Simek wrote:
> > st_size represents an off_t in user space, which is signed.
> > Just over half the existing architectures use signed types
> > for st_size, but for this (like Sam's suggestion for __u64),
> > I don't think it matters either way.
> 
> OK. Is it mean that your structure is perfect? If yes,
> I'll recompile toolchain and do some more tests. 

Please wait for my full patches, which I am about to
post. I hope to get to the stage where literally none
of the ABI is defined in your own header files but
it all gets into the generic parts. I hope to get this
out within a few hours.

	Arnd <><

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 29/30] microblaze_mmu_v1: stat.h MMU update
  2009-04-27 12:37                                                                       ` Arnd Bergmann
@ 2009-04-27 12:48                                                                         ` Sam Ravnborg
  2009-04-27 12:55                                                                           ` Arnd Bergmann
  0 siblings, 1 reply; 81+ messages in thread
From: Sam Ravnborg @ 2009-04-27 12:48 UTC (permalink / raw)
  To: Arnd Bergmann; +Cc: monstr, Christoph Hellwig, linux-kernel, john.williams

On Mon, Apr 27, 2009 at 02:37:08PM +0200, Arnd Bergmann wrote:
> On Monday 27 April 2009, Sam Ravnborg wrote:
> > Why not use __u64/__u32 (and s32/s64 where appropriate)?
> > Historical baggage or a techncal reason?
> 
> Yes, purely historical reasons: all architectures currently
> use the simple C types rather than __u32 in stat.h.
> 
> I don't think it makes a difference either way, so I'd
> leave it like this unless you find a good reason for
> changing.

The reason I had was:
1) consistency. We say that we should use the width specific types in our interfaces
2) readability. We expect to see the kernel types used - so we know then and does not
                start to wonder why we did not use them here.
3) documentation. The __{u,s}{32,64} documents the size better than "unsigned int" / long long" etc.

But no technical atrong arguments.

	Sam

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 29/30] microblaze_mmu_v1: stat.h MMU update
  2009-04-27 12:47                                                                           ` Arnd Bergmann
@ 2009-04-27 12:48                                                                             ` Michal Simek
  2009-04-30 13:53                                                                               ` Arnd Bergmann
  0 siblings, 1 reply; 81+ messages in thread
From: Michal Simek @ 2009-04-27 12:48 UTC (permalink / raw)
  To: Arnd Bergmann; +Cc: Christoph Hellwig, linux-kernel, john.williams

Arnd Bergmann wrote:
> On Monday 27 April 2009, Michal Simek wrote:
>>> st_size represents an off_t in user space, which is signed.
>>> Just over half the existing architectures use signed types
>>> for st_size, but for this (like Sam's suggestion for __u64),
>>> I don't think it matters either way.
>> OK. Is it mean that your structure is perfect? If yes,
>> I'll recompile toolchain and do some more tests. 
> 
> Please wait for my full patches, which I am about to
> post. I hope to get to the stage where literally none
> of the ABI is defined in your own header files but
> it all gets into the generic parts. I hope to get this
> out within a few hours.

OK.

Michal
> 
> 	Arnd <><


-- 
Michal Simek, Ing. (M.Eng)
w: www.monstr.eu p: +42-0-721842854

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 29/30] microblaze_mmu_v1: stat.h MMU update
  2009-04-27 10:35                                                             ` Michal Simek
  2009-04-27 11:30                                                               ` Christoph Hellwig
@ 2009-04-27 12:53                                                               ` Arnd Bergmann
  2009-04-27 13:03                                                                 ` Michal Simek
  1 sibling, 1 reply; 81+ messages in thread
From: Arnd Bergmann @ 2009-04-27 12:53 UTC (permalink / raw)
  To: monstr; +Cc: Christoph Hellwig, linux-kernel, john.williams

On Monday 27 April 2009, Michal Simek wrote:
> Long answer: in stat64 structure I wanted to find out more information
> about stat64 structure. I mean there are some variables and they have different
> types and I would like to know about more info.
> For example
> If make sense long long type for st_blocks. IMHO unsigned will be better.
> And I would like to create new stat64 structure where is not a fault for both noMMU/MMU version.
> In noMMU implementation is st_blocks unsigned long. Is it OK? or unsigned long long is better?
> 

I still think that there are major changes that should be done to the
Microblaze ABI. I actually did most of the work for this, but then you
beat be by getting it upstream first ;-)

I suppose it's much too late for 2.6.30 now, but maybe we can do one major
ABI change to microblaze by making the ABI totally generic in the next
merge window. I'll follow up with an RFC for this.

	Arnd <><

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 29/30] microblaze_mmu_v1: stat.h MMU update
  2009-04-27 12:48                                                                         ` Sam Ravnborg
@ 2009-04-27 12:55                                                                           ` Arnd Bergmann
  0 siblings, 0 replies; 81+ messages in thread
From: Arnd Bergmann @ 2009-04-27 12:55 UTC (permalink / raw)
  To: Sam Ravnborg; +Cc: monstr, Christoph Hellwig, linux-kernel, john.williams

On Monday 27 April 2009, Sam Ravnborg wrote:
> The reason I had was:
> 1) consistency. We say that we should use the width specific types in our interfaces
> 2) readability. We expect to see the kernel types used - so we know then and does not
>                 start to wonder why we did not use them here.
> 3) documentation. The __{u,s}{32,64} documents the size better than "unsigned int" / long long" etc.
> 
> But no technical atrong arguments.

Well, these are good points, I guess. I'm about to post a series
that adds generic headers for all the ABI. I'm not adapting them
all for this, but it might be a good idea, considering that the
asm-generic headers have a documentation character as well, meaning
that they should serve as an example for how to write new headers.

	Arnd <><

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 29/30] microblaze_mmu_v1: stat.h MMU update
  2009-04-27 12:53                                                               ` Arnd Bergmann
@ 2009-04-27 13:03                                                                 ` Michal Simek
  2009-04-27 13:13                                                                   ` Arnd Bergmann
  0 siblings, 1 reply; 81+ messages in thread
From: Michal Simek @ 2009-04-27 13:03 UTC (permalink / raw)
  To: Arnd Bergmann; +Cc: Christoph Hellwig, linux-kernel, john.williams

Arnd Bergmann wrote:
> On Monday 27 April 2009, Michal Simek wrote:
>> Long answer: in stat64 structure I wanted to find out more information
>> about stat64 structure. I mean there are some variables and they have different
>> types and I would like to know about more info.
>> For example
>> If make sense long long type for st_blocks. IMHO unsigned will be better.
>> And I would like to create new stat64 structure where is not a fault for both noMMU/MMU version.
>> In noMMU implementation is st_blocks unsigned long. Is it OK? or unsigned long long is better?
>>
> 
> I still think that there are major changes that should be done to the
> Microblaze ABI. I actually did most of the work for this, but then you
> beat be by getting it upstream first ;-)

For me is/was good that I could focus on MMU part and no to spend all my time to moving noMMU
patches and wait for future changes. Community is sending patches for Microblaze and for me is
easier to manage small changes not drag bunch of 50 patches to next version.

> 
> I suppose it's much too late for 2.6.30 now, but maybe we can do one major
> ABI change to microblaze by making the ABI totally generic in the next
> merge window. I'll follow up with an RFC for this.

If you propose me changes, I'll try to test it. If the results are OK, I'll add it to next merge
open window for Microblaze. As I wrote before I am open to your visions and if microblaze is in
mainline I can test it easier than before.

Thanks,
Michal



> 
> 	Arnd <><


-- 
Michal Simek, Ing. (M.Eng)
w: www.monstr.eu p: +42-0-721842854

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 29/30] microblaze_mmu_v1: stat.h MMU update
  2009-04-27 13:03                                                                 ` Michal Simek
@ 2009-04-27 13:13                                                                   ` Arnd Bergmann
  0 siblings, 0 replies; 81+ messages in thread
From: Arnd Bergmann @ 2009-04-27 13:13 UTC (permalink / raw)
  To: monstr; +Cc: Christoph Hellwig, linux-kernel, john.williams

On Monday 27 April 2009, Michal Simek wrote:
> > I still think that there are major changes that should be done to the
> > Microblaze ABI. I actually did most of the work for this, but then you
> > beat be by getting it upstream first ;-)
> 
> For me is/was good that I could focus on MMU part and no to spend all my time to moving noMMU
> patches and wait for future changes. Community is sending patches for Microblaze and for me is
> easier to manage small changes not drag bunch of 50 patches to next version.

Oh, absolutely, and I think it's great that you managed to get it all in, as
it also makes my job easier. It just means that any ABI change now has to
be thought through much more.

> > I suppose it's much too late for 2.6.30 now, but maybe we can do one major
> > ABI change to microblaze by making the ABI totally generic in the next
> > merge window. I'll follow up with an RFC for this.
> 
> If you propose me changes, I'll try to test it. If the results are OK, I'll add it to next merge
> open window for Microblaze. As I wrote before I am open to your visions and if microblaze is in
> mainline I can test it easier than before.

ok.

	Arnd <><

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 07/30] microblaze_mmu_v1: MMU initialization
  2009-04-27  8:31             ` [PATCH 07/30] microblaze_mmu_v1: MMU initialization monstr
  2009-04-27  8:31               ` [PATCH 08/30] microblaze_mmu_v1: mmu.h update monstr
@ 2009-04-29  5:42               ` Andrew Morton
  2009-04-29  7:02                 ` Michal Simek
  2009-04-29  8:42                 ` Michal Simek
  1 sibling, 2 replies; 81+ messages in thread
From: Andrew Morton @ 2009-04-29  5:42 UTC (permalink / raw)
  To: monstr; +Cc: linux-kernel, john.williams

On Mon, 27 Apr 2009 10:31:56 +0200 monstr@monstr.eu wrote:

> +	/* Check for nobats option (used in mapin_ram). */
> +	if (strstr(cmd_line, "nobats"))
> +		__map_without_bats = 1;

Sometime you could go through the architecture and document all of its
kernel command-line parameters in Documentation/kernel-parameters.txt.

Or maybe document them in a separate microblaze documentation file,
which gives you a bit more freedom to expound upon what they do.


^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 08/30] microblaze_mmu_v1: mmu.h update
  2009-04-27  8:31               ` [PATCH 08/30] microblaze_mmu_v1: mmu.h update monstr
  2009-04-27  8:31                 ` [PATCH 09/30] microblaze_mmu_v1: Page fault handling high level - fault.c monstr
@ 2009-04-29  5:44                 ` Andrew Morton
  2009-04-29  9:28                   ` Michal Simek
  1 sibling, 1 reply; 81+ messages in thread
From: Andrew Morton @ 2009-04-29  5:44 UTC (permalink / raw)
  To: monstr; +Cc: linux-kernel, john.williams

On Mon, 27 Apr 2009 10:31:57 +0200 monstr@monstr.eu wrote:

> +extern PTE *Hash, *Hash_end;
> +extern unsigned long Hash_size, Hash_mask;

These are global symbols?

There's some namespace collision risk here. `microblaze_mmu_hash' or
whatever would be a safer change.

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 09/30] microblaze_mmu_v1: Page fault handling high level - fault.c
  2009-04-27  8:31                 ` [PATCH 09/30] microblaze_mmu_v1: Page fault handling high level - fault.c monstr
  2009-04-27  8:31                   ` [PATCH 10/30] microblaze_mmu_v1: Context handling - mmu_context.c/h monstr
@ 2009-04-29  5:46                   ` Andrew Morton
  2009-04-29  9:30                     ` Michal Simek
  1 sibling, 1 reply; 81+ messages in thread
From: Andrew Morton @ 2009-04-29  5:46 UTC (permalink / raw)
  To: monstr; +Cc: linux-kernel, john.williams

On Mon, 27 Apr 2009 10:31:58 +0200 monstr@monstr.eu wrote:

> +#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)

Only powerpc can define CONFIG_XMON.  Confused.

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 20/30] microblaze_mmu_v1: uaccess MMU update
  2009-04-27  8:32                                       ` [PATCH 20/30] microblaze_mmu_v1: uaccess MMU update monstr
  2009-04-27  8:32                                         ` [PATCH 21/30] microblaze_mmu_v1: Add MMU related exceptions handling monstr
@ 2009-04-29  5:53                                         ` Andrew Morton
  2009-04-29 10:12                                           ` Michal Simek
  2009-04-29 11:26                                         ` Arnd Bergmann
  2 siblings, 1 reply; 81+ messages in thread
From: Andrew Morton @ 2009-04-29  5:53 UTC (permalink / raw)
  To: monstr; +Cc: linux-kernel, john.williams

On Mon, 27 Apr 2009 10:32:09 +0200 monstr@monstr.eu wrote:

> +#define __clear_user(addr, n)	(memset((void *)(addr), 0, (n)), 0)

__clear_user() already takes a void* argument, so the cast is hopefully
unneeded and undesirable.  The same applies to many other
architectures.  If someone is passing a non-pointer type to
__clear_user() then that's potentially a bad thing which we want to
know about.

IOW, someone screwed up and everyone copied it :(


^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 21/30] microblaze_mmu_v1: Add MMU related exceptions handling
  2009-04-27  8:32                                         ` [PATCH 21/30] microblaze_mmu_v1: Add MMU related exceptions handling monstr
  2009-04-27  8:32                                           ` [PATCH 22/30] microblaze_mmu_v1: Update linker script for MMU monstr
@ 2009-04-29  5:54                                           ` Andrew Morton
  2009-04-29  9:36                                             ` Michal Simek
  1 sibling, 1 reply; 81+ messages in thread
From: Andrew Morton @ 2009-04-29  5:54 UTC (permalink / raw)
  To: monstr; +Cc: linux-kernel, john.williams

On Mon, 27 Apr 2009 10:32:10 +0200 monstr@monstr.eu wrote:

> MICROBLAZE_PRIVILEG_EXCEPTION

Someon can' spel.

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 07/30] microblaze_mmu_v1: MMU initialization
  2009-04-29  5:42               ` [PATCH 07/30] microblaze_mmu_v1: MMU initialization Andrew Morton
@ 2009-04-29  7:02                 ` Michal Simek
  2009-04-29  8:42                 ` Michal Simek
  1 sibling, 0 replies; 81+ messages in thread
From: Michal Simek @ 2009-04-29  7:02 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-kernel, john.williams

Andrew Morton wrote:
> On Mon, 27 Apr 2009 10:31:56 +0200 monstr@monstr.eu wrote:
> 
>> +	/* Check for nobats option (used in mapin_ram). */
>> +	if (strstr(cmd_line, "nobats"))
>> +		__map_without_bats = 1;
> 
> Sometime you could go through the architecture and document all of its
> kernel command-line parameters in Documentation/kernel-parameters.txt.
> 
> Or maybe document them in a separate microblaze documentation file,
> which gives you a bit more freedom to expound upon what they do.
> 

this will go away - I forget to remove it. I have to check whole that function.

Thanks,
Michal

-- 
Michal Simek, Ing. (M.Eng)
w: www.monstr.eu p: +42-0-721842854

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 07/30] microblaze_mmu_v1: MMU initialization
  2009-04-29  5:42               ` [PATCH 07/30] microblaze_mmu_v1: MMU initialization Andrew Morton
  2009-04-29  7:02                 ` Michal Simek
@ 2009-04-29  8:42                 ` Michal Simek
  1 sibling, 0 replies; 81+ messages in thread
From: Michal Simek @ 2009-04-29  8:42 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-kernel, john.williams

Andrew Morton wrote:
> On Mon, 27 Apr 2009 10:31:56 +0200 monstr@monstr.eu wrote:
> 
>> +	/* Check for nobats option (used in mapin_ram). */
>> +	if (strstr(cmd_line, "nobats"))
>> +		__map_without_bats = 1;
> 
> Sometime you could go through the architecture and document all of its
> kernel command-line parameters in Documentation/kernel-parameters.txt.
> 
> Or maybe document them in a separate microblaze documentation file,
> which gives you a bit more freedom to expound upon what they do.
> 

I fixed it -> it will be in next version.

Thanks,
Michal

-- 
Michal Simek, Ing. (M.Eng)
w: www.monstr.eu p: +42-0-721842854

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 08/30] microblaze_mmu_v1: mmu.h update
  2009-04-29  5:44                 ` [PATCH 08/30] microblaze_mmu_v1: mmu.h update Andrew Morton
@ 2009-04-29  9:28                   ` Michal Simek
  0 siblings, 0 replies; 81+ messages in thread
From: Michal Simek @ 2009-04-29  9:28 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-kernel, john.williams

Andrew Morton wrote:
> On Mon, 27 Apr 2009 10:31:57 +0200 monstr@monstr.eu wrote:
> 
>> +extern PTE *Hash, *Hash_end;
>> +extern unsigned long Hash_size, Hash_mask;
> 
> These are global symbols?

NO. I removed them.

Thanks,
Michal
> 
> There's some namespace collision risk here. `microblaze_mmu_hash' or
> whatever would be a safer change.


-- 
Michal Simek, Ing. (M.Eng)
w: www.monstr.eu p: +42-0-721842854

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 09/30] microblaze_mmu_v1: Page fault handling high level - fault.c
  2009-04-29  5:46                   ` [PATCH 09/30] microblaze_mmu_v1: Page fault handling high level - fault.c Andrew Morton
@ 2009-04-29  9:30                     ` Michal Simek
  0 siblings, 0 replies; 81+ messages in thread
From: Michal Simek @ 2009-04-29  9:30 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-kernel, john.williams

Andrew Morton wrote:
> On Mon, 27 Apr 2009 10:31:58 +0200 monstr@monstr.eu wrote:
> 
>> +#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
> 
> Only powerpc can define CONFIG_XMON.  Confused.

This was there because someone in past just copy MMU function from PPC
and XMON reference was there. Microblaze use the similar MMU unit as
Xilinx ppc.

I removed all references.

Thanks,
Michal

-- 
Michal Simek, Ing. (M.Eng)
w: www.monstr.eu p: +42-0-721842854

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 21/30] microblaze_mmu_v1: Add MMU related exceptions handling
  2009-04-29  5:54                                           ` [PATCH 21/30] microblaze_mmu_v1: Add MMU related exceptions handling Andrew Morton
@ 2009-04-29  9:36                                             ` Michal Simek
  0 siblings, 0 replies; 81+ messages in thread
From: Michal Simek @ 2009-04-29  9:36 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-kernel, john.williams

Andrew Morton wrote:
> On Mon, 27 Apr 2009 10:32:10 +0200 monstr@monstr.eu wrote:
> 
>> MICROBLAZE_PRIVILEG_EXCEPTION
> 
> Someon can' spel.

Fixed,
Thanks,
Michal

-- 
Michal Simek, Ing. (M.Eng)
w: www.monstr.eu p: +42-0-721842854

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 20/30] microblaze_mmu_v1: uaccess MMU update
  2009-04-29  5:53                                         ` [PATCH 20/30] microblaze_mmu_v1: uaccess MMU update Andrew Morton
@ 2009-04-29 10:12                                           ` Michal Simek
  2009-04-29 10:58                                             ` Arnd Bergmann
  0 siblings, 1 reply; 81+ messages in thread
From: Michal Simek @ 2009-04-29 10:12 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-kernel, john.williams

Andrew Morton wrote:
> On Mon, 27 Apr 2009 10:32:09 +0200 monstr@monstr.eu wrote:
> 
>> +#define __clear_user(addr, n)	(memset((void *)(addr), 0, (n)), 0)
> 
> __clear_user() already takes a void* argument, so the cast is hopefully
> unneeded and undesirable.  The same applies to many other
> architectures.  If someone is passing a non-pointer type to
> __clear_user() then that's potentially a bad thing which we want to
> know about.
> 

Please look at fs/signalfd.c:72. that's the reason why I need __clear_user macro
first parameter is pointer to struct signalfd_siginfo __user.

memset function has the following parameters.
void *memset(void *v_src, int c, __kernel_size_t n)

That's why is cast there. I think that I have to add __force parameter
because without it is there address space mishmash.

Without cast
CHECK   fs/signalfd.c
fs/signalfd.c:72:8: warning: incorrect type in argument 1 (different address spaces)
fs/signalfd.c:72:8:    expected void *<noident>
fs/signalfd.c:72:8:    got struct signalfd_siginfo [noderef] <asn:1>*uinfo

With current cast (void *)
CHECK   fs/signalfd.c
fs/signalfd.c:72:8: warning: cast removes address space of expression

Here is that change which remove address space problem.

#define __clear_user(addr, n)	(memset((__force void *)(addr), 0, (n)), 0)

Am I right?

The same mishmash is for memset_fromio/memset_toio and maybe some others which I want to fix too.

Thanks,
Michal


> IOW, someone screwed up and everyone copied it :(
> 






-- 
Michal Simek, Ing. (M.Eng)
w: www.monstr.eu p: +42-0-721842854

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 23/30] microblaze_mmu_v1: Enable fork syscall for MMU and add fork as vfork for noMMU
  2009-04-27 11:43                                               ` [PATCH 23/30] microblaze_mmu_v1: Enable fork syscall for MMU and add fork as vfork for noMMU John Williams
@ 2009-04-29 10:16                                                 ` Michal Simek
  2009-04-29 11:31                                                   ` Arnd Bergmann
  0 siblings, 1 reply; 81+ messages in thread
From: Michal Simek @ 2009-04-29 10:16 UTC (permalink / raw)
  To: John Williams; +Cc: linux-kernel, Arnd Bergmann

John Williams wrote:
> On Mon, Apr 27, 2009 at 6:32 PM,  <monstr@monstr.eu> wrote:
>> From: Michal Simek <monstr@monstr.eu>
>>
>> Signed-off-by: Michal Simek <monstr@monstr.eu>
>> ---
>>  arch/microblaze/kernel/entry-nommu.S   |    3 ++-
>>  arch/microblaze/kernel/syscall_table.S |    2 +-
>>  2 files changed, 3 insertions(+), 2 deletions(-)
>>
>> diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S
>> index f24b126..89f978a 100644
>> --- a/arch/microblaze/kernel/entry-nommu.S
>> +++ b/arch/microblaze/kernel/entry-nommu.S
>> @@ -10,7 +10,7 @@
>>
>>  #include <linux/linkage.h>
>>  #include <asm/thread_info.h>
>> -#include <asm/errno.h>
>> +#include <linux/errno.h>
>>  #include <asm/entry.h>
>>  #include <asm/asm-offsets.h>
>>  #include <asm/registers.h>
>> @@ -551,6 +551,7 @@ no_work_pending:
>>        rtid    r14, 0
>>        nop
>>
>> +sys_fork_wrapper:
>>  sys_vfork_wrapper:
>>        brid    sys_vfork
>>        addk    r5, r1, r0
> 
> NACK!  If only it was that easy :)
> 
> On noMMU we cannot just silently call vfork() if the application tries
> to fork() - you have to return -ENOSYS (or is there a better one?).
> This will just require a small sys_fork_wrapper in entry-nommu.S.

I will call for noMMU kernel ni_syscall which solve that problem.
(Arnd do it in his patches too)

Thanks,
Michal

> 
> John


-- 
Michal Simek, Ing. (M.Eng)
w: www.monstr.eu p: +42-0-721842854

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 20/30] microblaze_mmu_v1: uaccess MMU update
  2009-04-29 10:12                                           ` Michal Simek
@ 2009-04-29 10:58                                             ` Arnd Bergmann
  2009-04-29 15:19                                               ` Michal Simek
  0 siblings, 1 reply; 81+ messages in thread
From: Arnd Bergmann @ 2009-04-29 10:58 UTC (permalink / raw)
  To: monstr; +Cc: Andrew Morton, linux-kernel, john.williams

On Wednesday 29 April 2009, Michal Simek wrote:
> Here is that change which remove address space problem.
> 
> #define __clear_user(addr, n)   (memset((__force void *)(addr), 0, (n)), 0)
> 
> Am I right?
> 
> The same mishmash is for memset_fromio/memset_toio and maybe some others which I want to fix too.

This will work, but a better fix would be to define an inline
function that explicitly takes a __user pointer. This would give
you warnings when code accidentally calls __clear_user on a
kernel pointer (this also adds the might_sleep()):

static inline unsigned long __must_check 
__clear_user(void __user *to, unsigned long n)
{
	memset((__force void *)addr, 0, n);
	return 0;
}

static inline unsigned long __must_check 
clear_user(void __user *to, unsigned long n)
{
	might_sleep();
	if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
		return n;

	return __clear_user(to, n);
}

The above is just the nommu variant. For mmu, you need to
have exception handling in __clear_user to take care of the
case where the address is part of the user mapping (access_ok)
but not currently mapped.

	Arnd <><

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 20/30] microblaze_mmu_v1: uaccess MMU update
  2009-04-27  8:32                                       ` [PATCH 20/30] microblaze_mmu_v1: uaccess MMU update monstr
  2009-04-27  8:32                                         ` [PATCH 21/30] microblaze_mmu_v1: Add MMU related exceptions handling monstr
  2009-04-29  5:53                                         ` [PATCH 20/30] microblaze_mmu_v1: uaccess MMU update Andrew Morton
@ 2009-04-29 11:26                                         ` Arnd Bergmann
  2 siblings, 0 replies; 81+ messages in thread
From: Arnd Bergmann @ 2009-04-29 11:26 UTC (permalink / raw)
  To: monstr; +Cc: linux-kernel, john.williams

Following up on my earlier comment:

On Monday 27 April 2009, monstr@monstr.eu wrote:

> +#define __get_user(var, ptr)				\
> +({							\
> +	int __gu_err = 0;				\
> +	switch (sizeof(*(ptr))) {			\
> +	case 1:						\
> +	case 2:						\
> +	case 4:						\
> +		(var) = *(ptr);				\
> +		break;					\
> +	case 8:						\
> +		memcpy((void *) &(var), (ptr), 8);	\
> +		break;					\
> +	default:					\
> +		(var) = 0;				\
> +		__gu_err = __get_user_bad();		\
> +		break;					\
> +	}						\
> +	__gu_err;					\
> +})

This needs more __force to dereference the user pointers.
  
>  #define __get_user_bad()	(bad_user_access_length(), (-EFAULT))

You have actually defined bad_user_access_length(), which is not
how it __get_user_bad()	was meant as. The idea was to declare
an undefined function, which results in a link error in case
of funny length inputs to __get_user, a cheap way to do
BUILD_BUG_ON() in a switch() statement.

> +/* FIXME is not there defined __pu_val */
> +({								\
> +	int __pu_err = 0;					\
> +	switch (sizeof(*(ptr))) {				\
> +	case 1:							\
> +	case 2:							\
> +	case 4:							\
> +		*(ptr) = (var);					\
> +		break;						\
> +	case 8: {						\
> +		typeof(*(ptr)) __pu_val = (var);		\
> +		memcpy(ptr, &__pu_val, sizeof(__pu_val));	\
> +		}						\
> +		break;						\
> +	default:						\
> +		__pu_err = __put_user_bad();			\
> +		break;						\
> +	}							\
> +	__pu_err;						\
> +})
>  
>  #define __put_user_bad()	(bad_user_access_length(), (-EFAULT))

Same comments apply here.
  
> -#define put_user(x, ptr)	__put_user(x, ptr)
> -#define get_user(x, ptr)	__get_user(x, ptr)
> -
> -#define copy_to_user(to, from, n)		(memcpy(to, from, n), 0)
> -#define copy_from_user(to, from, n)		(memcpy(to, from, n), 0)
> +#define put_user(x, ptr)	__put_user((x), (ptr))
> +#define get_user(x, ptr)	__get_user((x), (ptr))

put_user and get_user should call access_ok() for the !MMU version I guess,
if you can easily detect kernel pointers based on the address.
It's also a good idea to add might_sleep() in there if access_ok()
doesn't have it already.

> -#define __copy_to_user(to, from, n)		(copy_to_user(to, from, n))
> -#define __copy_from_user(to, from, n)		(copy_from_user(to, from, n))
> -#define __copy_to_user_inatomic(to, from, n)	(__copy_to_user(to, from, n))
> -#define __copy_from_user_inatomic(to, from, n)	(__copy_from_user(to, from, n))
> +#define copy_to_user(to, from, n)	(memcpy((to), (from), (n)), 0)
> +#define copy_from_user(to, from, n)	(memcpy((to), (from), (n)), 0)

Same here, plus they can be shared between MMU and NOMMU.

> +#else /* CONFIG_MMU */
> +
> +/*
> + * Address is valid if:
> + *  - "addr", "addr + size" and "size" are all below the limit
> + */
> +#define access_ok(type, addr, size) \
> +	(get_fs().seg > (((unsigned long)(addr)) | \
> +		(size) | ((unsigned long)(addr) + (size))))
> +
> +/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
> + type?"WRITE":"READ",addr,size,get_fs().seg)) */
> +
> +/*
> + * All the __XXX versions macros/functions below do not perform
> + * access checking. It is assumed that the necessary checks have been
> + * already performed before the finction (macro) is called.
> + */
> +
> +#define get_user(x, ptr)						\
> +({									\
> +	access_ok(VERIFY_READ, (ptr), sizeof(*(ptr)))			\
> +		? __get_user((x), (ptr)) : -EFAULT;			\
> +})
> +
> +#define put_user(x, ptr)						\
> +({									\
> +	access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr)))			\
> +		? __put_user((x), (ptr)) : -EFAULT;			\
> +})

Please add might_sleep() either here or in access_ok().

> +#define __get_user(x, ptr)						\
> +({									\
> +	unsigned long __gu_val;						\
> +	/*unsigned long __gu_ptr = (unsigned long)(ptr);*/		\
> +	long __gu_err;							\
> +	switch (sizeof(*(ptr))) {					\
> +	case 1:								\
> +		__get_user_asm("lbu", (ptr), __gu_val, __gu_err);	\
> +		break;							\
> +	case 2:								\
> +		__get_user_asm("lhu", (ptr), __gu_val, __gu_err);	\
> +		break;							\
> +	case 4:								\
> +		__get_user_asm("lw", (ptr), __gu_val, __gu_err);	\
> +		break;							\
> +	default:							\
> +		__gu_val = 0; __gu_err = -EINVAL;			\
> +	}								\
> +	x = (__typeof__(*(ptr))) __gu_val;				\
> +	__gu_err;							\
> +})

Again, the 'default:' case should give a link error, not a runtime
error. 

It seems inconsistent to have a 'case 8:' in !MMU as well as both
__put_user variants but not in the MMU __get_user.

> +#define __put_user(x, ptr)						\
> +({									\
> +	__typeof__(*(ptr)) __gu_val = x;				\
> +	long __gu_err = 0;						\
> +	switch (sizeof(__gu_val)) {					\
> +	case 1:								\
> +		__put_user_asm("sb", (ptr), __gu_val, __gu_err);	\
> +		break;							\
> +	case 2: 							\
> +		__put_user_asm("sh", (ptr), __gu_val, __gu_err);	\
> +		break;							\
> +	case 4:								\
> +		__put_user_asm("sw", (ptr), __gu_val, __gu_err);	\
> +		break;							\
> +	case 8:								\
> +		__put_user_asm_8((ptr), __gu_val, __gu_err);		\
> +		break;							\
> +	default:							\
> +		__gu_err = -EINVAL;					\
> +	}								\
> +	__gu_err;							\
> +})

> +/*
> + * Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail.
> + */
> +static inline int clear_user(char *to, int size)
> +{
> +	if (size && access_ok(VERIFY_WRITE, to, size)) {
> +		__asm__ __volatile__ ("				\
> +				1:				\
> +					sb	r0, %2, r0;	\
> +					addik	%0, %0, -1;	\
> +					bneid	%0, 1b;		\
> +					addik	%2, %2, 1;	\
> +				2:				\
> +				.section __ex_table,\"a\";	\
> +				.word	1b,2b;			\
> +				.section .text;"		\
> +			: "=r"(size)				\
> +			: "0"(size), "r"(to)
> +		);
> +	}
> +	return size;
> +}

Please use the prototype I mentioned in the other mail.

While I don't remember the exact story, I think the preferred
way to write multi-line inline assembly is

	asm volatile("# clear_user		\n"
		"1:				\n"
		"	sb r0, %2, r0		\n"
		"	addik	%0, %0, -1	\n"
		"	bneid	%0, 1b		\n"
		"2:				\n"
		".section __ex_table,\"a\"	\n"
		".word	1b,2b;			\n"
		".section .text;		\n");

rather than a single multi-line string.

> +extern unsigned long __copy_tofrom_user(void __user *to,
> +		const void __user *from, unsigned long size);
> +
> +#define copy_to_user(to, from, n)					\
> +	(access_ok(VERIFY_WRITE, (to), (n)) ?				\
> +		__copy_tofrom_user((void __user *)(to),			\
> +			(__force const void __user *)(from), (n))	\
> +		: -EFAULT)

No, copy_to_user does *not* return an errno, but instead the number
of bytes not copied. Assuming your __copy_tofrom_user is correct,
this would be

static inline __must_check unsigned long
copy_to_user(void __user *to, void *from, size_t n)
{
	might_sleep();

	if (!access_ok(VERIFY_WRITE, to, n))
		return n;

	return __copy_tofrom_user(to, (const void __force __user *)from, n);
}

> +#define __copy_to_user(to, from, n)	copy_to_user((to), (from), (n))
> +#define __copy_to_user_inatomic(to, from, n)	copy_to_user((to), (from), (n))
> +
> +#define copy_from_user(to, from, n)					\
> +	(access_ok(VERIFY_READ, (from), (n)) ?				\
> +		__copy_tofrom_user((__force void __user *)(to),		\
> +			(void __user *)(from), (n))			\
> +		: -EFAULT)

same here

> +#define __copy_from_user(to, from, n)	copy_from_user((to), (from), (n))
> +#define __copy_from_user_inatomic(to, from, n) \
> +		copy_from_user((to), (from), (n))
> +
> +extern int __strncpy_user(char *to, const char __user *from, int len);
> +extern int __strnlen_user(const char __user *sstr, int len);
> +
> +#define strncpy_from_user(to, from, len)	\
> +		(access_ok(VERIFY_READ, from, 1) ?	\
> +			__strncpy_user(to, from, len) : -EFAULT)

and here.

	Arnd <><

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 22/30] microblaze_mmu_v1: Update linker script for MMU
  2009-04-27  8:32                                           ` [PATCH 22/30] microblaze_mmu_v1: Update linker script for MMU monstr
  2009-04-27  8:32                                             ` [PATCH 23/30] microblaze_mmu_v1: Enable fork syscall for MMU and add fork as vfork for noMMU monstr
@ 2009-04-29 11:27                                             ` Arnd Bergmann
  2009-04-29 11:39                                               ` Michal Simek
  1 sibling, 1 reply; 81+ messages in thread
From: Arnd Bergmann @ 2009-04-29 11:27 UTC (permalink / raw)
  To: monstr; +Cc: linux-kernel, john.williams

On Monday 27 April 2009, monstr@monstr.eu wrote:
> --- a/arch/microblaze/kernel/vmlinux.lds.S
> +++ b/arch/microblaze/kernel/vmlinux.lds.S
> @@ -17,8 +17,11 @@ ENTRY(_start)
>  jiffies = jiffies_64 + 4;
>  
>  SECTIONS {
> +#ifdef CONFIG_MMU
> +       . = CONFIG_KERNEL_START;
> +#else
>         . = CONFIG_KERNEL_BASE_ADDR;
> -
> +#endif
>         .text : {
>                 _text = . ;
>                 _stext = . ;

This change looks a bit bogus, can't you use the same symbol for
both? I guess either one would be fine, but I don't see a reason
for them to be different.

	Arnd <><

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 23/30] microblaze_mmu_v1: Enable fork syscall for MMU and add fork as vfork for noMMU
  2009-04-29 10:16                                                 ` Michal Simek
@ 2009-04-29 11:31                                                   ` Arnd Bergmann
  0 siblings, 0 replies; 81+ messages in thread
From: Arnd Bergmann @ 2009-04-29 11:31 UTC (permalink / raw)
  To: monstr; +Cc: John Williams, linux-kernel

On Wednesday 29 April 2009, Michal Simek wrote:
> >> +sys_fork_wrapper:
> >>  sys_vfork_wrapper:
> >>        brid    sys_vfork
> >>        addk    r5, r1, r0
> > 
> > NACK!  If only it was that easy :)
> > 
> > On noMMU we cannot just silently call vfork() if the application tries
> > to fork() - you have to return -ENOSYS (or is there a better one?).
> > This will just require a small sys_fork_wrapper in entry-nommu.S.
> 
> I will call for noMMU kernel ni_syscall which solve that problem.
> (Arnd do it in his patches too)
> 

Note that my patches did not call the wrappers correctly, this
has to be fixed to make them work first.

For the fork call on !CONFIG_MMU, we should probably just add
a cond_syscall() in kernel/sys_ni.c near sys_msync() and
the other MMU specific calls.

	Arnd <><

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 22/30] microblaze_mmu_v1: Update linker script for MMU
  2009-04-29 11:27                                             ` [PATCH 22/30] microblaze_mmu_v1: Update linker script for MMU Arnd Bergmann
@ 2009-04-29 11:39                                               ` Michal Simek
  0 siblings, 0 replies; 81+ messages in thread
From: Michal Simek @ 2009-04-29 11:39 UTC (permalink / raw)
  To: Arnd Bergmann; +Cc: linux-kernel, john.williams

Arnd Bergmann wrote:
> On Monday 27 April 2009, monstr@monstr.eu wrote:
>> --- a/arch/microblaze/kernel/vmlinux.lds.S
>> +++ b/arch/microblaze/kernel/vmlinux.lds.S
>> @@ -17,8 +17,11 @@ ENTRY(_start)
>>  jiffies = jiffies_64 + 4;
>>  
>>  SECTIONS {
>> +#ifdef CONFIG_MMU
>> +       . = CONFIG_KERNEL_START;
>> +#else
>>         . = CONFIG_KERNEL_BASE_ADDR;
>> -
>> +#endif
>>         .text : {
>>                 _text = . ;
>>                 _stext = . ;
> 
> This change looks a bit bogus, can't you use the same symbol for
> both? I guess either one would be fine, but I don't see a reason
> for them to be different.

It will be possible to set for noMMU kernel
CONFIG_KERNEL_BASE_ADDR is CONFIG_KERNEL_START which can remove it.

CONFIG_KERNEL_START is physical base memory address because of noMMU
KERNEL_BASE_ADDR is 0xC0000000 buf of course could be change in menuconfig.

I'll do some tests.

Thanks,
Michal



> 
> 	Arnd <><


-- 
Michal Simek, Ing. (M.Eng)
w: www.monstr.eu p: +42-0-721842854

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 20/30] microblaze_mmu_v1: uaccess MMU update
  2009-04-29 10:58                                             ` Arnd Bergmann
@ 2009-04-29 15:19                                               ` Michal Simek
  2009-04-29 15:35                                                 ` Arnd Bergmann
  0 siblings, 1 reply; 81+ messages in thread
From: Michal Simek @ 2009-04-29 15:19 UTC (permalink / raw)
  To: Arnd Bergmann; +Cc: Andrew Morton, linux-kernel, john.williams

Arnd Bergmann wrote:
> On Wednesday 29 April 2009, Michal Simek wrote:
>> Here is that change which remove address space problem.
>>
>> #define __clear_user(addr, n)   (memset((__force void *)(addr), 0, (n)), 0)
>>
>> Am I right?
>>
>> The same mishmash is for memset_fromio/memset_toio and maybe some others which I want to fix too.
> 
> This will work, but a better fix would be to define an inline
> function that explicitly takes a __user pointer. This would give
> you warnings when code accidentally calls __clear_user on a
> kernel pointer (this also adds the might_sleep()):
> 
> static inline unsigned long __must_check 
> __clear_user(void __user *to, unsigned long n)
> {
> 	memset((__force void *)addr, 0, n);
> 	return 0;
> }
> 
> static inline unsigned long __must_check 
> clear_user(void __user *to, unsigned long n)
> {
> 	might_sleep();
> 	if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
> 		return n;
> 
> 	return __clear_user(to, n);
> }
> 
> The above is just the nommu variant. For mmu, you need to
> have exception handling in __clear_user to take care of the
> case where the address is part of the user mapping (access_ok)
> but not currently mapped.

Is it possible to do it for noMMU kernel too? I mean current MMU implementation of
__clear_user in asm is faster than call memset for noMMU. I think I can use MMU implementation for
noMMU too. Add two words to __ex_table
just extend size of one section but not too much and won't be used for noMMU.

Michal


> 
> 	Arnd <><


-- 
Michal Simek, Ing. (M.Eng)
w: www.monstr.eu p: +42-0-721842854

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 20/30] microblaze_mmu_v1: uaccess MMU update
  2009-04-29 15:19                                               ` Michal Simek
@ 2009-04-29 15:35                                                 ` Arnd Bergmann
  2009-04-29 15:43                                                   ` Michal Simek
  0 siblings, 1 reply; 81+ messages in thread
From: Arnd Bergmann @ 2009-04-29 15:35 UTC (permalink / raw)
  To: monstr; +Cc: Andrew Morton, linux-kernel, john.williams

On Wednesday 29 April 2009, Michal Simek wrote:
> > The above is just the nommu variant. For mmu, you need to
> > have exception handling in __clear_user to take care of the
> > case where the address is part of the user mapping (access_ok)
> > but not currently mapped.
> 
> Is it possible to do it for noMMU kernel too? I mean current MMU
> implementation of __clear_user in asm is faster than call memset
> for noMMU. I think I can use MMU implementation for noMMU too.
> Add two words to __ex_table just extend size of one section but
> not too much and won't be used for noMMU. 

Well, you can probably do something like this:

#ifdef CONFIG_MMU
#define __FIXUP_SECTION		".section .fixup,\"ax\"\n"
#define __EX_TABLE_SECTION	".section __ex_table,\"a\"\n"
#else
#define __FIXUP_SECTION		".section .discard,\"ax\"\n"
#define __EX_TABLE_SECTION	".section .discard,\"a\"\n"
#endif

+#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err)             \
+({                                                                     \
+       __asm__ __volatile__ (                                          \
+                       "1:"    insn    " %1, %2, r0; \n"               \
+                       "       addk    %0, r0, r0;   \n"               \
+                       "2:                           \n"               \
+                        __FIXUP_SECTION				\
+                       "3:      brid    2b	      \n"               \
+                       "        addik   %0, r0, %3   \n"               \
+                       ".previous		      \n"               \
+                        __EX_TABLE_SECTION	                        \
+                       ".word   1b,3b		      \n"               \
+                       ".previous		      \n"               \
+               : "=r"(__gu_err), "=r"(__gu_val)                        \
+               : "r"(__gu_ptr), "i"(-EFAULT)                           \
+       );                                                              \
+})

This should simply throw away all the fixups if you list the .discard
section in vmlinux.lds.S under '/DISCARD/:', so you have no
overhead at all.

	Arnd <><

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 20/30] microblaze_mmu_v1: uaccess MMU update
  2009-04-29 15:35                                                 ` Arnd Bergmann
@ 2009-04-29 15:43                                                   ` Michal Simek
  2009-04-29 15:55                                                     ` Arnd Bergmann
  0 siblings, 1 reply; 81+ messages in thread
From: Michal Simek @ 2009-04-29 15:43 UTC (permalink / raw)
  To: Arnd Bergmann; +Cc: Andrew Morton, linux-kernel, john.williams

Arnd Bergmann wrote:
> On Wednesday 29 April 2009, Michal Simek wrote:
>>> The above is just the nommu variant. For mmu, you need to
>>> have exception handling in __clear_user to take care of the
>>> case where the address is part of the user mapping (access_ok)
>>> but not currently mapped.
>> Is it possible to do it for noMMU kernel too? I mean current MMU
>> implementation of __clear_user in asm is faster than call memset
>> for noMMU. I think I can use MMU implementation for noMMU too.
>> Add two words to __ex_table just extend size of one section but
>> not too much and won't be used for noMMU. 
> 
> Well, you can probably do something like this:
> 
> #ifdef CONFIG_MMU
> #define __FIXUP_SECTION		".section .fixup,\"ax\"\n"
> #define __EX_TABLE_SECTION	".section __ex_table,\"a\"\n"
> #else
> #define __FIXUP_SECTION		".section .discard,\"ax\"\n"
> #define __EX_TABLE_SECTION	".section .discard,\"a\"\n"
> #endif
> 
> +#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err)             \
> +({                                                                     \
> +       __asm__ __volatile__ (                                          \
> +                       "1:"    insn    " %1, %2, r0; \n"               \
> +                       "       addk    %0, r0, r0;   \n"               \
> +                       "2:                           \n"               \
> +                        __FIXUP_SECTION				\
> +                       "3:      brid    2b	      \n"               \
> +                       "        addik   %0, r0, %3   \n"               \
> +                       ".previous		      \n"               \
> +                        __EX_TABLE_SECTION	                        \
> +                       ".word   1b,3b		      \n"               \
> +                       ".previous		      \n"               \
> +               : "=r"(__gu_err), "=r"(__gu_val)                        \
> +               : "r"(__gu_ptr), "i"(-EFAULT)                           \
> +       );                                                              \
> +})
> 
> This should simply throw away all the fixups if you list the .discard
> section in vmlinux.lds.S under '/DISCARD/:', so you have no
> overhead at all.

ok. I'll try it.

I look at user_bad macros.

#define __get_user_bad()	(bad_user_access_length(), (-EFAULT))

For powerpc I found these tree references. Where is the __get_user_bad definition?
arch/powerpc/include/asm/uaccess.h:216:extern long __get_user_bad(void);
arch/powerpc/include/asm/uaccess.h:263:         (x) = __get_user_bad();                         \
arch/powerpc/include/asm/uaccess.h:269: default: (x) = __get_user_bad();                        \

Michal


> 
> 	Arnd <><


-- 
Michal Simek, Ing. (M.Eng)
w: www.monstr.eu p: +42-0-721842854

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 20/30] microblaze_mmu_v1: uaccess MMU update
  2009-04-29 15:43                                                   ` Michal Simek
@ 2009-04-29 15:55                                                     ` Arnd Bergmann
  0 siblings, 0 replies; 81+ messages in thread
From: Arnd Bergmann @ 2009-04-29 15:55 UTC (permalink / raw)
  To: monstr; +Cc: Andrew Morton, linux-kernel, john.williams

On Wednesday 29 April 2009, Michal Simek wrote:
> For powerpc I found these tree references. Where is the __get_user_bad definition?

> arch/powerpc/include/asm/uaccess.h:216:extern long __get_user_bad(void);
> arch/powerpc/include/asm/uaccess.h:263:         (x) = __get_user_bad();                         \
> arch/powerpc/include/asm/uaccess.h:269: default: (x) = __get_user_bad();                        \
> 

As I tried to explain, the whole point is that there is no definition
at all, just a declaration.
Normally, the compiler will discard the 'default:' case of get_user,
so the function is not called. However, if you pass a variable of
a length other than 1, 2, or 4, the compiler will generate a function
call to __get_user_bad in the object file. When you try to link
the kernel afterwards, you will get an error message like

Unknown symbol __get_user_bad" for drivers/foo/bar.o

which tells you that some function in drivers/foo/bar.c calls
get_user with an invalid argument.

	Arnd <><

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 29/30] microblaze_mmu_v1: stat.h MMU update
  2009-04-27 12:48                                                                             ` Michal Simek
@ 2009-04-30 13:53                                                                               ` Arnd Bergmann
  2009-04-30 14:10                                                                                 ` Michal Simek
  0 siblings, 1 reply; 81+ messages in thread
From: Arnd Bergmann @ 2009-04-30 13:53 UTC (permalink / raw)
  To: monstr; +Cc: Christoph Hellwig, linux-kernel, john.williams

On Monday 27 April 2009, Michal Simek wrote:
> Arnd Bergmann wrote:
> > On Monday 27 April 2009, Michal Simek wrote:
> >>> st_size represents an off_t in user space, which is signed.
> >>> Just over half the existing architectures use signed types
> >>> for st_size, but for this (like Sam's suggestion for __u64),
> >>> I don't think it matters either way.
> >> OK. Is it mean that your structure is perfect? If yes,
> >> I'll recompile toolchain and do some more tests. 
> > 
> > Please wait for my full patches, which I am about to
> > post. I hope to get to the stage where literally none
> > of the ABI is defined in your own header files but
> > it all gets into the generic parts. I hope to get this
> > out within a few hours.
> 
> OK.

Any success so far with the patches I posted? I guess you
need to apply just the first of the three microblaze patches
in the series first to see if that works, before changing
the signal handling and syscall table as well.

	Arnd <><

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 29/30] microblaze_mmu_v1: stat.h MMU update
  2009-04-30 13:53                                                                               ` Arnd Bergmann
@ 2009-04-30 14:10                                                                                 ` Michal Simek
  2009-04-30 14:40                                                                                   ` Arnd Bergmann
  0 siblings, 1 reply; 81+ messages in thread
From: Michal Simek @ 2009-04-30 14:10 UTC (permalink / raw)
  To: Arnd Bergmann; +Cc: Christoph Hellwig, linux-kernel, john.williams

Arnd Bergmann wrote:
> On Monday 27 April 2009, Michal Simek wrote:
>> Arnd Bergmann wrote:
>>> On Monday 27 April 2009, Michal Simek wrote:
>>>>> st_size represents an off_t in user space, which is signed.
>>>>> Just over half the existing architectures use signed types
>>>>> for st_size, but for this (like Sam's suggestion for __u64),
>>>>> I don't think it matters either way.
>>>> OK. Is it mean that your structure is perfect? If yes,
>>>> I'll recompile toolchain and do some more tests. 
>>> Please wait for my full patches, which I am about to
>>> post. I hope to get to the stage where literally none
>>> of the ABI is defined in your own header files but
>>> it all gets into the generic parts. I hope to get this
>>> out within a few hours.
>> OK.
> 
> Any success so far with the patches I posted? I guess you
> need to apply just the first of the three microblaze patches
> in the series first to see if that works, before changing
> the signal handling and syscall table as well.

I looked at there are two things together. Header cleanup and syscall cleanup.
For both I have problems.
1. header cleanup -> some of them is only moving to generic location -> that's no problem
2. header cleanup -> some of them is pain because I have to recompile toolchain
that's should be a big deal but after recompilation kernel, user apps I am getting some problems.
3. syscall cleanup -> we still have old libc and I can't simple remove ancient syscalls

I would like to test all of them but I can't do too fast.
I am trying to cleanup uaccess.h troubles and I have some changes about context switch change
but this work only on MMU and I haven't had a time to fix it on noMMU.
I am fighting with old code where I don't have any information about.

I have your changes in my queue and they inspired me to do things better and I hope that
I will be able to use all of them.

Thanks,
Michal


> 	Arnd <><


-- 
Michal Simek, Ing. (M.Eng)
w: www.monstr.eu p: +42-0-721842854

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 29/30] microblaze_mmu_v1: stat.h MMU update
  2009-04-30 14:10                                                                                 ` Michal Simek
@ 2009-04-30 14:40                                                                                   ` Arnd Bergmann
  2009-04-30 14:51                                                                                     ` Michal Simek
  0 siblings, 1 reply; 81+ messages in thread
From: Arnd Bergmann @ 2009-04-30 14:40 UTC (permalink / raw)
  To: monstr; +Cc: Christoph Hellwig, linux-kernel, john.williams

On Thursday 30 April 2009, Michal Simek wrote:

> 2. header cleanup -> some of them is pain because I have to recompile toolchain
> that's should be a big deal but after recompilation kernel, user apps I am getting some problems.

Yes, I understand that you have a circular dependency there, but fixing
the ABI can only become harder with time, as more people start using
the current kernel/toolchain combination.

> 3. syscall cleanup -> we still have old libc and I can't simple remove ancient syscalls

I had hoped that the way I set __ARCH_WANT_SYSCALL_* in the microblaze
unistd.h would provide an easy way to migrate, because this still defines
almost all traditional system calls (I haven't double-checked if some more
are currently used on microblaze, but they would be easy to add).
This means that as a first step, rebuilding the tool chain (as above) should
be sufficient and not require source-level changes to libc. From that point,
you can remove the remaining __ARCH_WANT_SYSCALL_* defintions one by one,
and change the libc accordingly.

I could also try building uclibc with the generic headers if I find the
time. Which version do you use, just upstream or do you have a private
libc tree with more changes?

	Arnd <><

^ permalink raw reply	[flat|nested] 81+ messages in thread

* Re: [PATCH 29/30] microblaze_mmu_v1: stat.h MMU update
  2009-04-30 14:40                                                                                   ` Arnd Bergmann
@ 2009-04-30 14:51                                                                                     ` Michal Simek
  0 siblings, 0 replies; 81+ messages in thread
From: Michal Simek @ 2009-04-30 14:51 UTC (permalink / raw)
  To: Arnd Bergmann; +Cc: Christoph Hellwig, linux-kernel, john.williams

Arnd Bergmann wrote:
> On Thursday 30 April 2009, Michal Simek wrote:
> 
>> 2. header cleanup -> some of them is pain because I have to recompile toolchain
>> that's should be a big deal but after recompilation kernel, user apps I am getting some problems.
> 
> Yes, I understand that you have a circular dependency there, but fixing
> the ABI can only become harder with time, as more people start using
> the current kernel/toolchain combination.

Here is the one advantage that in mainline are only some drivers for Microblaze
and the rest is in petalinux. And people can use whole distribution.

> 
>> 3. syscall cleanup -> we still have old libc and I can't simple remove ancient syscalls
> 
> I had hoped that the way I set __ARCH_WANT_SYSCALL_* in the microblaze
> unistd.h would provide an easy way to migrate, because this still defines
> almost all traditional system calls (I haven't double-checked if some more
> are currently used on microblaze, but they would be easy to add).
> This means that as a first step, rebuilding the tool chain (as above) should
> be sufficient and not require source-level changes to libc. From that point,
> you can remove the remaining __ARCH_WANT_SYSCALL_* defintions one by one,
> and change the libc accordingly.
> 
> I could also try building uclibc with the generic headers if I find the
> time. Which version do you use, just upstream or do you have a private
> libc tree with more changes?

That's good question but harder answer.
Glibc is any ancient 3-4 years non mainline version. There are some backports too.
I have some comments to it but it is better not to write them.

uclibc version is a little bit better. I had functional port half year mainline version
+ some private fixes. Currently John W is working on it to get working version with gcc 4.1.2.
He has some troubles with it but I hope he solve it soon.

We have troubles because none have taken care about. I hope that we are able to update
all the things and repair everything what wrong it.

Michal

> 
> 	Arnd <><


-- 
Michal Simek, Ing. (M.Eng)
w: www.monstr.eu p: +42-0-721842854

^ permalink raw reply	[flat|nested] 81+ messages in thread

end of thread, other threads:[~2009-04-30 14:51 UTC | newest]

Thread overview: 81+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2009-04-27  8:31 Microblaze MMU patches monstr
2009-04-27  8:31 ` [PATCH 01/30] microblaze_mmu_v1: Makefiles monstr
2009-04-27  8:31   ` [PATCH 02/30] microblaze_mmu_v1: Kconfig update monstr
2009-04-27  8:31     ` [PATCH 03/30] microblaze_mmu_v1: Add mmu_defconfig monstr
2009-04-27  8:31       ` [PATCH 04/30] microblaze_mmu_v1: MMU update for startup code monstr
2009-04-27  8:31         ` [PATCH 05/30] microblaze_mmu_v1: Alocate TLB for early console monstr
2009-04-27  8:31           ` [PATCH 06/30] microblaze_mmu_v1: TLB low level code monstr
2009-04-27  8:31             ` [PATCH 07/30] microblaze_mmu_v1: MMU initialization monstr
2009-04-27  8:31               ` [PATCH 08/30] microblaze_mmu_v1: mmu.h update monstr
2009-04-27  8:31                 ` [PATCH 09/30] microblaze_mmu_v1: Page fault handling high level - fault.c monstr
2009-04-27  8:31                   ` [PATCH 10/30] microblaze_mmu_v1: Context handling - mmu_context.c/h monstr
2009-04-27  8:32                     ` [PATCH 11/30] microblaze_mmu_v1: Page table - ioremap - pgtable.c/h, section update monstr
2009-04-27  8:32                       ` [PATCH 12/30] microblaze_mmu_v1: io.h MMU update monstr
2009-04-27  8:32                         ` [PATCH 13/30] microblaze_mmu_v1: pgalloc.h and page.h monstr
2009-04-27  8:32                           ` [PATCH 14/30] microblaze_mmu_v1: Update process creation for MMU monstr
2009-04-27  8:32                             ` [PATCH 15/30] microblaze_mmu_v1: Update tlb.h and tlbflush.h monstr
2009-04-27  8:32                               ` [PATCH 16/30] microblaze_mmu_v1: MMU asm offset update monstr
2009-04-27  8:32                                 ` [PATCH 17/30] microblaze_mmu_v1: Add CURRENT_TASK for entry.S monstr
2009-04-27  8:32                                   ` [PATCH 18/30] microblaze_mmu_v1: entry.S, entry.h monstr
2009-04-27  8:32                                     ` [PATCH 19/30] microblaze_mmu_v1: Update exception handling - MMU exception monstr
2009-04-27  8:32                                       ` [PATCH 20/30] microblaze_mmu_v1: uaccess MMU update monstr
2009-04-27  8:32                                         ` [PATCH 21/30] microblaze_mmu_v1: Add MMU related exceptions handling monstr
2009-04-27  8:32                                           ` [PATCH 22/30] microblaze_mmu_v1: Update linker script for MMU monstr
2009-04-27  8:32                                             ` [PATCH 23/30] microblaze_mmu_v1: Enable fork syscall for MMU and add fork as vfork for noMMU monstr
2009-04-27  8:32                                               ` [PATCH 24/30] microblaze_mmu_v1: Traps MMU update monstr
2009-04-27  8:32                                                 ` [PATCH 25/30] microblaze_mmu_v1: Update signal returning address monstr
2009-04-27  8:32                                                   ` [PATCH 26/30] microblaze_mmu_v1: Update cacheflush.h monstr
2009-04-27  8:32                                                     ` [PATCH 27/30] microblaze_mmu_v1: Update dma.h for MMU monstr
2009-04-27  8:32                                                       ` [PATCH 28/30] microblaze_mmu_v1: Elf update monstr
2009-04-27  8:32                                                         ` [PATCH 29/30] microblaze_mmu_v1: stat.h MMU update monstr
2009-04-27  8:32                                                           ` [PATCH 30/30] microblaze_mmu_v1: fcntl.h " monstr
2009-04-27  9:59                                                             ` Christoph Hellwig
2009-04-27 10:05                                                               ` John Williams
2009-04-27 10:56                                                                 ` Michal Simek
2009-04-27  9:58                                                           ` [PATCH 29/30] microblaze_mmu_v1: stat.h " Christoph Hellwig
2009-04-27 10:35                                                             ` Michal Simek
2009-04-27 11:30                                                               ` Christoph Hellwig
2009-04-27 11:43                                                                 ` Michal Simek
2009-04-27 11:57                                                                   ` Arnd Bergmann
2009-04-27 12:15                                                                     ` Sam Ravnborg
2009-04-27 12:37                                                                       ` Arnd Bergmann
2009-04-27 12:48                                                                         ` Sam Ravnborg
2009-04-27 12:55                                                                           ` Arnd Bergmann
2009-04-27 12:31                                                                     ` Michal Simek
2009-04-27 12:42                                                                       ` Arnd Bergmann
2009-04-27 12:44                                                                         ` Michal Simek
2009-04-27 12:47                                                                           ` Arnd Bergmann
2009-04-27 12:48                                                                             ` Michal Simek
2009-04-30 13:53                                                                               ` Arnd Bergmann
2009-04-30 14:10                                                                                 ` Michal Simek
2009-04-30 14:40                                                                                   ` Arnd Bergmann
2009-04-30 14:51                                                                                     ` Michal Simek
2009-04-27 12:53                                                               ` Arnd Bergmann
2009-04-27 13:03                                                                 ` Michal Simek
2009-04-27 13:13                                                                   ` Arnd Bergmann
2009-04-27 11:43                                               ` [PATCH 23/30] microblaze_mmu_v1: Enable fork syscall for MMU and add fork as vfork for noMMU John Williams
2009-04-29 10:16                                                 ` Michal Simek
2009-04-29 11:31                                                   ` Arnd Bergmann
2009-04-29 11:27                                             ` [PATCH 22/30] microblaze_mmu_v1: Update linker script for MMU Arnd Bergmann
2009-04-29 11:39                                               ` Michal Simek
2009-04-29  5:54                                           ` [PATCH 21/30] microblaze_mmu_v1: Add MMU related exceptions handling Andrew Morton
2009-04-29  9:36                                             ` Michal Simek
2009-04-29  5:53                                         ` [PATCH 20/30] microblaze_mmu_v1: uaccess MMU update Andrew Morton
2009-04-29 10:12                                           ` Michal Simek
2009-04-29 10:58                                             ` Arnd Bergmann
2009-04-29 15:19                                               ` Michal Simek
2009-04-29 15:35                                                 ` Arnd Bergmann
2009-04-29 15:43                                                   ` Michal Simek
2009-04-29 15:55                                                     ` Arnd Bergmann
2009-04-29 11:26                                         ` Arnd Bergmann
2009-04-27 10:55                     ` [PATCH 10/30] microblaze_mmu_v1: Context handling - mmu_context.c/h Arnd Bergmann
2009-04-27 11:18                     ` Geert Uytterhoeven
2009-04-29  5:46                   ` [PATCH 09/30] microblaze_mmu_v1: Page fault handling high level - fault.c Andrew Morton
2009-04-29  9:30                     ` Michal Simek
2009-04-29  5:44                 ` [PATCH 08/30] microblaze_mmu_v1: mmu.h update Andrew Morton
2009-04-29  9:28                   ` Michal Simek
2009-04-29  5:42               ` [PATCH 07/30] microblaze_mmu_v1: MMU initialization Andrew Morton
2009-04-29  7:02                 ` Michal Simek
2009-04-29  8:42                 ` Michal Simek
2009-04-27 10:50   ` [PATCH 01/30] microblaze_mmu_v1: Makefiles Arnd Bergmann
2009-04-27 10:54     ` Michal Simek

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.