unified VC4 and ARM runtime code (including C++ runtime), updated some minor bits
authorchristinaa <kristinaa@tuta.io>
Fri, 6 Jan 2017 13:46:15 +0000 (13:46 +0000)
committerchristinaa <kristinaa@tuta.io>
Fri, 6 Jan 2017 13:46:15 +0000 (13:46 +0000)
73 files changed:
AUTHORS.md
Makefile
arm_chainloader/Makefile
arm_chainloader/build/arm_chainloader.bin [new file with mode: 0644]
arm_chainloader/build/arm_chainloader.bin.elf [new file with mode: 0644]
arm_chainloader/build/armv6zk-objects/drivers/cprman.o [new file with mode: 0644]
arm_chainloader/build/armv6zk-objects/drivers/fatfs/ff.o [new file with mode: 0644]
arm_chainloader/build/armv6zk-objects/drivers/libfdt/fdt.o [new file with mode: 0644]
arm_chainloader/build/armv6zk-objects/drivers/libfdt/fdt_ro.o [new file with mode: 0644]
arm_chainloader/build/armv6zk-objects/drivers/libfdt/fdt_rw.o [new file with mode: 0644]
arm_chainloader/build/armv6zk-objects/drivers/libfdt/fdt_strerror.o [new file with mode: 0644]
arm_chainloader/build/armv6zk-objects/drivers/libfdt/fdt_wip.o [new file with mode: 0644]
arm_chainloader/build/armv6zk-objects/drivers/mailbox.o [new file with mode: 0644]
arm_chainloader/build/armv6zk-objects/drivers/mbr_disk.o [new file with mode: 0644]
arm_chainloader/build/armv6zk-objects/drivers/sdhost_impl.o [new file with mode: 0644]
arm_chainloader/build/armv6zk-objects/drivers/uart.o [new file with mode: 0644]
arm_chainloader/build/armv6zk-objects/lib_armv6/arm_bcopy.o [new file with mode: 0644]
arm_chainloader/build/armv6zk-objects/lib_armv6/arm_bzero.o [new file with mode: 0644]
arm_chainloader/build/armv6zk-objects/lib_armv6/arm_locore.o [new file with mode: 0644]
arm_chainloader/build/armv6zk-objects/lib_armv6/arm_memcmp.o [new file with mode: 0644]
arm_chainloader/build/armv6zk-objects/lib_armv6/arm_strlen.o [new file with mode: 0644]
arm_chainloader/build/armv6zk-objects/lib_armv6/string_misc.o [new file with mode: 0644]
arm_chainloader/build/armv6zk-objects/loader.o [new file with mode: 0644]
arm_chainloader/build/armv6zk-objects/main.o [new file with mode: 0644]
arm_chainloader/build/armv6zk-objects/start.o [new file with mode: 0644]
arm_chainloader/build/lib/cxx_runtime.o [new file with mode: 0644]
arm_chainloader/build/lib/panic.o [new file with mode: 0644]
arm_chainloader/build/lib/tlsf/tlsf.o [new file with mode: 0644]
arm_chainloader/build/lib/udelay.o [new file with mode: 0644]
arm_chainloader/build/lib/xprintf.o [new file with mode: 0644]
arm_chainloader/chainloader.h
arm_chainloader/lib/arm_bcopy.s [deleted file]
arm_chainloader/lib/arm_bzero.s [deleted file]
arm_chainloader/lib/arm_locore.s [deleted file]
arm_chainloader/lib/arm_memcmp.s [deleted file]
arm_chainloader/lib/arm_strlen.s [deleted file]
arm_chainloader/lib/string_misc.c [deleted file]
arm_chainloader/lib_armv6/arm_bcopy.s [new file with mode: 0644]
arm_chainloader/lib_armv6/arm_bzero.s [new file with mode: 0644]
arm_chainloader/lib_armv6/arm_locore.s [new file with mode: 0644]
arm_chainloader/lib_armv6/arm_memcmp.s [new file with mode: 0644]
arm_chainloader/lib_armv6/arm_strlen.s [new file with mode: 0644]
arm_chainloader/lib_armv6/string_misc.c [new file with mode: 0644]
arm_chainloader/main.c
arm_loader.cc
arm_monitor.c
build/bootcode.bin [new file with mode: 0644]
build/bootcode.bin.elf [new file with mode: 0644]
build/vc4-objects/arm_loader.o [new file with mode: 0644]
build/vc4-objects/arm_monitor.o [new file with mode: 0644]
build/vc4-objects/chainloader_inc.o [new file with mode: 0644]
build/vc4-objects/lib/cxx_runtime.o [new file with mode: 0644]
build/vc4-objects/lib/memcpy.o [new file with mode: 0644]
build/vc4-objects/lib/panic.o [new file with mode: 0644]
build/vc4-objects/lib/udelay.o [new file with mode: 0644]
build/vc4-objects/lib/xprintf.o [new file with mode: 0644]
build/vc4-objects/romstage.o [new file with mode: 0644]
build/vc4-objects/sdram.o [new file with mode: 0644]
build/vc4-objects/start.o [new file with mode: 0644]
build/vc4-objects/trap.o [new file with mode: 0644]
buildall.sh [changed mode: 0755->0644]
lib/cxx_runtime.cc [new file with mode: 0644]
lib/panic.c
lib/runtime.h [new file with mode: 0644]
lib/udelay.c
linker.lds [new file with mode: 0644]
romstage.c
sdram.c
tools/wslstage.py [new file with mode: 0644]
trap.c
vc4_include/common.h [deleted file]
vc4_include/pcb.h
vc4_include/vc4_types.h [new file with mode: 0644]

index 872bb7c..9a17fc4 100644 (file)
@@ -2,6 +2,6 @@ Authors of the `rpi-open-firmware` project are listed below, for purposes of det
 
 ---
 
-Alex Badea <vamposdecampos@gmail.com>
-Alyssa Rosenzweig <alyssa@rosenzweig.io>
-Kristina Brooks <kristinaa@tuta.io>
+ * Alex Badea <vamposdecampos@gmail.com>
+ * Alyssa Rosenzweig <alyssa@rosenzweig.io>
+ * Kristina Brooks <tinab@hush.ai>
index d7123a3..cc6e4ef 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -10,12 +10,13 @@ SRCS = \
        romstage.c \
        sdram.c \
        arm_loader.c \
-       arm_monitor.c \
+       arm_monitor.cc \
        trap.c \
        lib/xprintf.c \
        lib/panic.c \
        lib/udelay.c \
        lib/memcpy.c \
+       lib/cxx_runtime.c \
        chainloader_inc.s
 
 ARCH = vc4
@@ -38,11 +39,14 @@ OBJ := $(addprefix $(TARGET_BUILD_DIR)/, $(addsuffix .o, $(basename $(SRCS))))
 # the cross compiler should already be in your path
 CROSS_COMPILE = vc4-elf-
 CC = $(CROSS_COMPILE)gcc
+CXX = $(CROSS_COMPILE)g++
 AS = $(CC)
 OBJCOPY = $(CROSS_COMPILE)objcopy
-LINKFLAGS = -nostdlib -nostartfiles
-CFLAGS = -c -nostdlib -std=c11 -fsingle-precision-constant -Wdouble-promotion -D__VIDEOCORE4__ -I./vc4_include/
-ASFLAGS = -c -nostdlib -x assembler-with-cpp -D__VIDEOCORE4__ -I./vc4_include/
+LINKFLAGS = -nostdlib -nostartfiles -Wl,--build-id=none -T linker.lds
+
+CFLAGS = -c -nostdlib -std=c11 -fsingle-precision-constant -Wdouble-promotion -D__VIDEOCORE4__ -I./vc4_include/ -I./
+ASFLAGS = -c -nostdlib -x assembler-with-cpp -D__VIDEOCORE4__ -I./vc4_include/ -I./
+CXXFLAGS = -c -nostdlib -std=c++11 -fno-exceptions -fno-rtti -D__VIDEOCORE4__ -I./vc4_include/ -I./
 
 HEADERS := \
        $(shell find . -type f -name '*.h') \
@@ -65,6 +69,11 @@ $(TARGET_BUILD_DIR)/%.o: %.c $(HEADERS)
        @echo $(WARN_COLOR)CC  $(NO_COLOR) $@
        @$(CC) $(CFLAGS) $< -o $@
 
+$(TARGET_BUILD_DIR)/%.o: %.cc $(HEADERS)
+       $(CREATE_SUBDIR)
+       @echo $(WARN_COLOR)CXX $(NO_COLOR) $@
+       @$(CXX) $(CXXFLAGS) $< -o $@
+
 $(TARGET_BUILD_DIR)/%.o: %.s $(HEADERS)
        $(CREATE_SUBDIR)
        @echo $(WARN_COLOR)AS  $(NO_COLOR) $@
index 6ae426c..594403c 100644 (file)
@@ -3,12 +3,12 @@ TARGET_ARM_CHAINLOADER = arm_chainloader.bin
 
 SRCS = \
        start.s \
-       lib/arm_bcopy.s \
-       lib/arm_bzero.s \
-       lib/arm_locore.s \
-       lib/arm_strlen.s \
-       lib/arm_memcmp.s \
-       lib/string_misc.c \
+       lib_armv6/arm_bcopy.s \
+       lib_armv6/arm_bzero.s \
+       lib_armv6/arm_locore.s \
+       lib_armv6/arm_strlen.s \
+       lib_armv6/arm_memcmp.s \
+       lib_armv6/string_misc.c \
        drivers/uart.c \
        drivers/cprman.cc \
        drivers/libfdt/fdt.c \
@@ -23,8 +23,8 @@ SRCS = \
        ../lib/xprintf.c \
        ../lib/panic.c \
        ../lib/udelay.c \
+       ../lib/cxx_runtime.c \
        ../lib/tlsf/tlsf.c \
-       minicxx.cc \
        loader.cc \
        main.c
 
diff --git a/arm_chainloader/build/arm_chainloader.bin b/arm_chainloader/build/arm_chainloader.bin
new file mode 100644 (file)
index 0000000..9792a66
Binary files /dev/null and b/arm_chainloader/build/arm_chainloader.bin differ
diff --git a/arm_chainloader/build/arm_chainloader.bin.elf b/arm_chainloader/build/arm_chainloader.bin.elf
new file mode 100644 (file)
index 0000000..150bd48
Binary files /dev/null and b/arm_chainloader/build/arm_chainloader.bin.elf differ
diff --git a/arm_chainloader/build/armv6zk-objects/drivers/cprman.o b/arm_chainloader/build/armv6zk-objects/drivers/cprman.o
new file mode 100644 (file)
index 0000000..5275dbe
Binary files /dev/null and b/arm_chainloader/build/armv6zk-objects/drivers/cprman.o differ
diff --git a/arm_chainloader/build/armv6zk-objects/drivers/fatfs/ff.o b/arm_chainloader/build/armv6zk-objects/drivers/fatfs/ff.o
new file mode 100644 (file)
index 0000000..fc81079
Binary files /dev/null and b/arm_chainloader/build/armv6zk-objects/drivers/fatfs/ff.o differ
diff --git a/arm_chainloader/build/armv6zk-objects/drivers/libfdt/fdt.o b/arm_chainloader/build/armv6zk-objects/drivers/libfdt/fdt.o
new file mode 100644 (file)
index 0000000..dc5727e
Binary files /dev/null and b/arm_chainloader/build/armv6zk-objects/drivers/libfdt/fdt.o differ
diff --git a/arm_chainloader/build/armv6zk-objects/drivers/libfdt/fdt_ro.o b/arm_chainloader/build/armv6zk-objects/drivers/libfdt/fdt_ro.o
new file mode 100644 (file)
index 0000000..cf7fff1
Binary files /dev/null and b/arm_chainloader/build/armv6zk-objects/drivers/libfdt/fdt_ro.o differ
diff --git a/arm_chainloader/build/armv6zk-objects/drivers/libfdt/fdt_rw.o b/arm_chainloader/build/armv6zk-objects/drivers/libfdt/fdt_rw.o
new file mode 100644 (file)
index 0000000..62cf71b
Binary files /dev/null and b/arm_chainloader/build/armv6zk-objects/drivers/libfdt/fdt_rw.o differ
diff --git a/arm_chainloader/build/armv6zk-objects/drivers/libfdt/fdt_strerror.o b/arm_chainloader/build/armv6zk-objects/drivers/libfdt/fdt_strerror.o
new file mode 100644 (file)
index 0000000..97a05a6
Binary files /dev/null and b/arm_chainloader/build/armv6zk-objects/drivers/libfdt/fdt_strerror.o differ
diff --git a/arm_chainloader/build/armv6zk-objects/drivers/libfdt/fdt_wip.o b/arm_chainloader/build/armv6zk-objects/drivers/libfdt/fdt_wip.o
new file mode 100644 (file)
index 0000000..cd26767
Binary files /dev/null and b/arm_chainloader/build/armv6zk-objects/drivers/libfdt/fdt_wip.o differ
diff --git a/arm_chainloader/build/armv6zk-objects/drivers/mailbox.o b/arm_chainloader/build/armv6zk-objects/drivers/mailbox.o
new file mode 100644 (file)
index 0000000..8d9353b
Binary files /dev/null and b/arm_chainloader/build/armv6zk-objects/drivers/mailbox.o differ
diff --git a/arm_chainloader/build/armv6zk-objects/drivers/mbr_disk.o b/arm_chainloader/build/armv6zk-objects/drivers/mbr_disk.o
new file mode 100644 (file)
index 0000000..6d0a8e9
Binary files /dev/null and b/arm_chainloader/build/armv6zk-objects/drivers/mbr_disk.o differ
diff --git a/arm_chainloader/build/armv6zk-objects/drivers/sdhost_impl.o b/arm_chainloader/build/armv6zk-objects/drivers/sdhost_impl.o
new file mode 100644 (file)
index 0000000..0ea6f6f
Binary files /dev/null and b/arm_chainloader/build/armv6zk-objects/drivers/sdhost_impl.o differ
diff --git a/arm_chainloader/build/armv6zk-objects/drivers/uart.o b/arm_chainloader/build/armv6zk-objects/drivers/uart.o
new file mode 100644 (file)
index 0000000..b627929
Binary files /dev/null and b/arm_chainloader/build/armv6zk-objects/drivers/uart.o differ
diff --git a/arm_chainloader/build/armv6zk-objects/lib_armv6/arm_bcopy.o b/arm_chainloader/build/armv6zk-objects/lib_armv6/arm_bcopy.o
new file mode 100644 (file)
index 0000000..394950f
Binary files /dev/null and b/arm_chainloader/build/armv6zk-objects/lib_armv6/arm_bcopy.o differ
diff --git a/arm_chainloader/build/armv6zk-objects/lib_armv6/arm_bzero.o b/arm_chainloader/build/armv6zk-objects/lib_armv6/arm_bzero.o
new file mode 100644 (file)
index 0000000..680a969
Binary files /dev/null and b/arm_chainloader/build/armv6zk-objects/lib_armv6/arm_bzero.o differ
diff --git a/arm_chainloader/build/armv6zk-objects/lib_armv6/arm_locore.o b/arm_chainloader/build/armv6zk-objects/lib_armv6/arm_locore.o
new file mode 100644 (file)
index 0000000..011742a
Binary files /dev/null and b/arm_chainloader/build/armv6zk-objects/lib_armv6/arm_locore.o differ
diff --git a/arm_chainloader/build/armv6zk-objects/lib_armv6/arm_memcmp.o b/arm_chainloader/build/armv6zk-objects/lib_armv6/arm_memcmp.o
new file mode 100644 (file)
index 0000000..ac8bd5c
Binary files /dev/null and b/arm_chainloader/build/armv6zk-objects/lib_armv6/arm_memcmp.o differ
diff --git a/arm_chainloader/build/armv6zk-objects/lib_armv6/arm_strlen.o b/arm_chainloader/build/armv6zk-objects/lib_armv6/arm_strlen.o
new file mode 100644 (file)
index 0000000..76fad90
Binary files /dev/null and b/arm_chainloader/build/armv6zk-objects/lib_armv6/arm_strlen.o differ
diff --git a/arm_chainloader/build/armv6zk-objects/lib_armv6/string_misc.o b/arm_chainloader/build/armv6zk-objects/lib_armv6/string_misc.o
new file mode 100644 (file)
index 0000000..602f635
Binary files /dev/null and b/arm_chainloader/build/armv6zk-objects/lib_armv6/string_misc.o differ
diff --git a/arm_chainloader/build/armv6zk-objects/loader.o b/arm_chainloader/build/armv6zk-objects/loader.o
new file mode 100644 (file)
index 0000000..c69e75f
Binary files /dev/null and b/arm_chainloader/build/armv6zk-objects/loader.o differ
diff --git a/arm_chainloader/build/armv6zk-objects/main.o b/arm_chainloader/build/armv6zk-objects/main.o
new file mode 100644 (file)
index 0000000..008feb0
Binary files /dev/null and b/arm_chainloader/build/armv6zk-objects/main.o differ
diff --git a/arm_chainloader/build/armv6zk-objects/start.o b/arm_chainloader/build/armv6zk-objects/start.o
new file mode 100644 (file)
index 0000000..e938060
Binary files /dev/null and b/arm_chainloader/build/armv6zk-objects/start.o differ
diff --git a/arm_chainloader/build/lib/cxx_runtime.o b/arm_chainloader/build/lib/cxx_runtime.o
new file mode 100644 (file)
index 0000000..7ec03c0
Binary files /dev/null and b/arm_chainloader/build/lib/cxx_runtime.o differ
diff --git a/arm_chainloader/build/lib/panic.o b/arm_chainloader/build/lib/panic.o
new file mode 100644 (file)
index 0000000..41db218
Binary files /dev/null and b/arm_chainloader/build/lib/panic.o differ
diff --git a/arm_chainloader/build/lib/tlsf/tlsf.o b/arm_chainloader/build/lib/tlsf/tlsf.o
new file mode 100644 (file)
index 0000000..0c8af97
Binary files /dev/null and b/arm_chainloader/build/lib/tlsf/tlsf.o differ
diff --git a/arm_chainloader/build/lib/udelay.o b/arm_chainloader/build/lib/udelay.o
new file mode 100644 (file)
index 0000000..a5e784a
Binary files /dev/null and b/arm_chainloader/build/lib/udelay.o differ
diff --git a/arm_chainloader/build/lib/xprintf.o b/arm_chainloader/build/lib/xprintf.o
new file mode 100644 (file)
index 0000000..fc96753
Binary files /dev/null and b/arm_chainloader/build/lib/xprintf.o differ
index 05ada30..d222593 100644 (file)
@@ -1,18 +1,14 @@
 #pragma once\r
 \r
+#include <lib/runtime.h>\r
 #include <stdint.h>\r
 #include <stddef.h>\r
-#include <lib/panic.h>\r
-#include <lib/xprintf.h>\r
-#include <lib/tlsf/tlsf.h>\r
 #include <memory_map.h>\r
 \r
 #ifdef __cplusplus\r
 extern "C" {\r
 #endif\r
 \r
-extern void udelay(uint32_t time);\r
-\r
 static inline void __attribute__((noreturn)) hang_cpu() {\r
        __asm__ __volatile__ (\r
                "wfi\n"\r
diff --git a/arm_chainloader/lib/arm_bcopy.s b/arm_chainloader/lib/arm_bcopy.s
deleted file mode 100644 (file)
index 2155429..0000000
+++ /dev/null
@@ -1,397 +0,0 @@
-/*\r
- * Copyright (c) 2006, 2009 Apple Inc. All rights reserved.\r
- *\r
- * @APPLE_LICENSE_HEADER_START@\r
- * \r
- * This file contains Original Code and/or Modifications of Original Code\r
- * as defined in and that are subject to the Apple Public Source License\r
- * Version 2.0 (the 'License'). You may not use this file except in\r
- * compliance with the License. Please obtain a copy of the License at\r
- * http://www.opensource.apple.com/apsl/ and read it before using this\r
- * file.\r
- * \r
- * The Original Code and all software distributed under the License are\r
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER\r
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,\r
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,\r
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.\r
- * Please see the License for the specific language governing rights and\r
- * limitations under the License.\r
- * \r
- * @APPLE_LICENSE_HEADER_END@\r
- */\r
-\r
-.text\r
-.align 2\r
-       \r
-       .globl memcpy\r
-       .globl bcopy\r
-       .globl memmove\r
-\r
-bcopy:         /* void bcopy(const void *src, void *dest, size_t len); */\r
-       mov             r3, r0\r
-       mov             r0, r1\r
-       mov             r1, r3\r
-\r
-memcpy:                /* void *memcpy(void *dest, const void *src, size_t len); */\r
-memmove:       /* void *memmove(void *dest, const void *src, size_t len); */\r
-       /* check for zero len or if the pointers are the same */\r
-       cmp             r2, #0\r
-       cmpne   r0, r1\r
-       bxeq    lr\r
-\r
-       /* save r0 (return value), r4 (scratch), and r5 (scratch) */\r
-       stmfd   sp!, { r0, r4, r5, r7, lr }\r
-       add     r7, sp, #12\r
-       \r
-       /* check for overlap. r3 <- distance between src & dest */\r
-       subhs   r3, r0, r1\r
-       sublo   r3, r1, r0\r
-       cmp             r3, r2                  /* if distance(src, dest) < len, we have overlap */\r
-       blo             Loverlap\r
-\r
-Lnormalforwardcopy:\r
-       /* are src and dest dissimilarly word aligned? */\r
-       mov             r12, r0, lsl #30\r
-       cmp             r12, r1, lsl #30\r
-       bne             Lnonwordaligned_forward\r
-\r
-       /* if len < 64, do a quick forward copy */\r
-       cmp             r2, #64\r
-       blt             Lsmallforwardcopy\r
-\r
-       /* check for 16 byte src/dest unalignment */\r
-       tst             r0, #0xf\r
-       bne             Lsimilarlyunaligned\r
-\r
-       /* check for 32 byte dest unalignment */\r
-       tst             r0, #(1<<4)\r
-       bne             Lunaligned_32\r
-\r
-Lmorethan64_aligned:\r
-       /* save some more registers to use in the copy */\r
-       stmfd   sp!, { r6, r8, r10, r11 }\r
-\r
-       /* pre-subtract 64 from the len counter to avoid an extra compare in the loop */\r
-       sub             r2, r2, #64\r
-\r
-L64loop:\r
-       /* copy 64 bytes at a time */\r
-       ldmia   r1!, { r3, r4, r5, r6, r8, r10, r11, r12 }\r
-#ifdef _ARM_ARCH_6\r
-       pld             [r1, #32]\r
-#endif\r
-       stmia   r0!, { r3, r4, r5, r6, r8, r10, r11, r12 }\r
-       ldmia   r1!, { r3, r4, r5, r6, r8, r10, r11, r12 }\r
-       subs    r2, r2, #64\r
-#ifdef _ARM_ARCH_6\r
-       pld             [r1, #32]\r
-#endif\r
-       stmia   r0!, { r3, r4, r5, r6, r8, r10, r11, r12 }\r
-       bge             L64loop\r
-\r
-       /* restore the scratch registers we just saved */\r
-       ldmfd   sp!, { r6, r8, r10, r11 }\r
-\r
-       /* fix up the len counter (previously subtracted an extra 64 from it) and test for completion */\r
-       adds    r2, r2, #64\r
-       beq             Lexit\r
-\r
-Llessthan64_aligned:\r
-       /* copy 16 bytes at a time until we have < 16 bytes */\r
-       cmp             r2, #16\r
-       ldmgeia r1!, { r3, r4, r5, r12 }\r
-       stmgeia r0!, { r3, r4, r5, r12 }\r
-       subges  r2, r2, #16\r
-       bgt             Llessthan64_aligned\r
-       beq             Lexit\r
-       \r
-Llessthan16_aligned:\r
-       mov             r2, r2, lsl #28\r
-       msr             cpsr_f, r2\r
-\r
-       ldmmiia r1!, { r2, r3 }\r
-       ldreq   r4, [r1], #4\r
-       ldrcsh  r5, [r1], #2\r
-       ldrvsb  r12, [r1], #1\r
-\r
-       stmmiia r0!, { r2, r3 }\r
-       streq   r4, [r0], #4\r
-       strcsh  r5, [r0], #2\r
-       strvsb  r12, [r0], #1\r
-       b               Lexit\r
-\r
-Lsimilarlyunaligned:\r
-       /* both src and dest are unaligned in similar ways, align to dest on 32 byte boundary */\r
-       mov             r12, r0, lsl #28\r
-       rsb             r12, r12, #0\r
-       msr             cpsr_f, r12\r
-\r
-       ldrvsb  r3, [r1], #1\r
-       ldrcsh  r4, [r1], #2\r
-       ldreq   r5, [r1], #4\r
-\r
-       strvsb  r3, [r0], #1\r
-       strcsh  r4, [r0], #2\r
-       streq   r5, [r0], #4\r
-\r
-       ldmmiia r1!, { r3, r4 }\r
-       stmmiia r0!, { r3, r4 }\r
-\r
-       subs    r2, r2, r12, lsr #28\r
-       beq             Lexit\r
-\r
-Lunaligned_32:\r
-       /* bring up to dest 32 byte alignment */\r
-       tst             r0, #(1 << 4)\r
-       ldmneia r1!, { r3, r4, r5, r12 }\r
-       stmneia r0!, { r3, r4, r5, r12 }\r
-       subne   r2, r2, #16\r
-\r
-       /* we should now be aligned, see what copy method we should use */\r
-       cmp             r2, #64\r
-       bge             Lmorethan64_aligned\r
-       b               Llessthan64_aligned\r
-       \r
-Lbytewise2:\r
-       /* copy 2 bytes at a time */\r
-       subs    r2, r2, #2\r
-\r
-       ldrb    r3, [r1], #1\r
-       ldrplb  r4, [r1], #1\r
-\r
-       strb    r3, [r0], #1\r
-       strplb  r4, [r0], #1\r
-\r
-       bhi             Lbytewise2\r
-       b               Lexit\r
-\r
-Lbytewise:\r
-       /* simple bytewise forward copy */\r
-       ldrb    r3, [r1], #1\r
-       subs    r2, r2, #1\r
-       strb    r3, [r0], #1\r
-       bne             Lbytewise\r
-       b               Lexit\r
-\r
-Lsmallforwardcopy:\r
-       /* src and dest are word aligned similarly, less than 64 bytes to copy */\r
-       cmp             r2, #4\r
-       blt             Lbytewise2\r
-\r
-       /* bytewise copy until word aligned */\r
-       tst             r1, #3\r
-Lwordalignloop:\r
-       ldrneb  r3, [r1], #1\r
-       strneb  r3, [r0], #1\r
-       subne   r2, r2, #1\r
-       tstne   r1, #3\r
-       bne             Lwordalignloop\r
-\r
-       cmp             r2, #16\r
-       bge             Llessthan64_aligned\r
-       blt             Llessthan16_aligned\r
-\r
-Loverlap:\r
-       /* src and dest overlap in some way, len > 0 */\r
-       cmp             r0, r1                          /* if dest > src */\r
-       bhi             Loverlap_srclower\r
-\r
-Loverlap_destlower:\r
-       /* dest < src, see if we can still do a fast forward copy or fallback to slow forward copy */\r
-       cmp             r3, #64\r
-       bge             Lnormalforwardcopy      /* overlap is greater than one stride of the copy, use normal copy */\r
-\r
-       cmp             r3, #2\r
-       bge             Lbytewise2\r
-       b               Lbytewise\r
-\r
-       /* the following routines deal with having to copy in the reverse direction */\r
-Loverlap_srclower:\r
-       /* src < dest, with overlap */\r
-\r
-       /* src += len; dest += len; */\r
-       add             r0, r0, r2\r
-       add             r1, r1, r2\r
-\r
-       /* we have to copy in reverse no matter what, test if we can we use a large block reverse copy */\r
-       cmp             r2, #64                         /* less than 64 bytes to copy? */\r
-       cmpgt   r3, #64                         /* less than 64 bytes of nonoverlap? */\r
-       blt             Lbytewise_reverse\r
-\r
-       /* test of src and dest are nonword aligned differently */\r
-       mov             r3, r0, lsl #30\r
-       cmp             r3, r1, lsl #30\r
-       bne             Lbytewise_reverse\r
-\r
-       /* test if src and dest are non word aligned or dest is non 16 byte aligned */\r
-       tst             r0, #0xf\r
-       bne             Lunaligned_reverse_similarly\r
-\r
-       /* test for dest 32 byte alignment */\r
-       tst             r0, #(1<<4)\r
-       bne             Lunaligned_32_reverse_similarly\r
-\r
-       /* 64 byte reverse block copy, src and dest aligned */\r
-Lmorethan64_aligned_reverse:\r
-       /* save some more registers to use in the copy */\r
-       stmfd   sp!, { r6, r8, r10, r11 }\r
-\r
-       /* pre-subtract 64 from the len counter to avoid an extra compare in the loop */\r
-       sub             r2, r2, #64\r
-\r
-L64loop_reverse:\r
-       /* copy 64 bytes at a time */\r
-       ldmdb   r1!, { r3, r4, r5, r6, r8, r10, r11, r12 }\r
-#ifdef _ARM_ARCH_6\r
-       pld             [r1, #-32]\r
-#endif\r
-       stmdb   r0!, { r3, r4, r5, r6, r8, r10, r11, r12 }      \r
-       ldmdb   r1!, { r3, r4, r5, r6, r8, r10, r11, r12 }      \r
-       subs    r2, r2, #64\r
-#ifdef _ARM_ARCH_6\r
-       pld             [r1, #-32]\r
-#endif\r
-       stmdb   r0!, { r3, r4, r5, r6, r8, r10, r11, r12 }      \r
-       bge             L64loop_reverse\r
-\r
-       /* restore the scratch registers we just saved */\r
-       ldmfd   sp!, { r6, r8, r10, r11 }\r
-\r
-       /* fix up the len counter (previously subtracted an extra 64 from it) and test for completion */\r
-       adds    r2, r2, #64\r
-       beq             Lexit\r
-\r
-Lbytewise_reverse:\r
-       ldrb    r3, [r1, #-1]!\r
-       strb    r3, [r0, #-1]!\r
-       subs    r2, r2, #1\r
-       bne             Lbytewise_reverse\r
-       b               Lexit\r
-\r
-Lunaligned_reverse_similarly:\r
-       /* both src and dest are unaligned in similar ways, align to dest on 32 byte boundary */\r
-       mov             r12, r0, lsl #28\r
-       msr             cpsr_f, r12\r
-\r
-       ldrvsb  r3, [r1, #-1]!\r
-       ldrcsh  r4, [r1, #-2]!\r
-       ldreq   r5, [r1, #-4]!\r
-\r
-       strvsb  r3, [r0, #-1]!\r
-       strcsh  r4, [r0, #-2]!\r
-       streq   r5, [r0, #-4]!\r
-\r
-       ldmmidb r1!, { r3, r4 }\r
-       stmmidb r0!, { r3, r4 }\r
-\r
-       subs    r2, r2, r12, lsr #28\r
-       beq             Lexit\r
-\r
-Lunaligned_32_reverse_similarly:\r
-       /* bring up to dest 32 byte alignment */\r
-       tst             r0, #(1 << 4)\r
-       ldmnedb r1!, { r3, r4, r5, r12 }\r
-       stmnedb r0!, { r3, r4, r5, r12 }\r
-       subne   r2, r2, #16\r
-\r
-       /* we should now be aligned, see what copy method we should use */\r
-       cmp             r2, #64\r
-       bge             Lmorethan64_aligned_reverse\r
-       b               Lbytewise_reverse\r
-\r
-       /* the following routines deal with non word aligned copies */\r
-Lnonwordaligned_forward:\r
-       cmp             r2, #8\r
-       blt             Lbytewise2                      /* not worth the effort with less than 24 bytes total */\r
-\r
-       /* bytewise copy until src word aligned */\r
-       tst             r1, #3\r
-Lwordalignloop2:\r
-       ldrneb  r3, [r1], #1\r
-       strneb  r3, [r0], #1\r
-       subne   r2, r2, #1\r
-       tstne   r1, #3\r
-       bne             Lwordalignloop2\r
-\r
-       /* figure out how the src and dest are unaligned */\r
-       and             r3, r0, #3\r
-       cmp             r3, #2\r
-       blt             Lalign1_forward\r
-       beq             Lalign2_forward\r
-       bgt             Lalign3_forward\r
-\r
-Lalign1_forward:\r
-       /* the dest pointer is 1 byte off from src */\r
-       mov             r12, r2, lsr #2         /* number of words we should copy */\r
-       sub             r0, r0, #1\r
-\r
-       /* prime the copy */\r
-       ldrb    r4, [r0]                        /* load D[7:0] */\r
-\r
-Lalign1_forward_loop:\r
-       ldr             r3, [r1], #4            /* load S */\r
-       orr             r4, r4, r3, lsl #8      /* D[31:8] = S[24:0] */\r
-       str             r4, [r0], #4            /* save D */\r
-       mov             r4, r3, lsr #24         /* D[7:0] = S[31:25] */\r
-       subs    r12, r12, #1\r
-       bne             Lalign1_forward_loop\r
-\r
-       /* finish the copy off */\r
-       strb    r4, [r0], #1            /* save D[7:0] */\r
-\r
-       ands    r2, r2, #3\r
-       beq             Lexit\r
-       b               Lbytewise2\r
-\r
-Lalign2_forward:\r
-       /* the dest pointer is 2 bytes off from src */\r
-       mov             r12, r2, lsr #2         /* number of words we should copy */\r
-       sub             r0, r0, #2\r
-\r
-       /* prime the copy */\r
-       ldrh    r4, [r0]                        /* load D[15:0] */\r
-\r
-Lalign2_forward_loop:\r
-       ldr             r3, [r1], #4            /* load S */\r
-       orr             r4, r4, r3, lsl #16     /* D[31:16] = S[15:0] */\r
-       str             r4, [r0], #4            /* save D */\r
-       mov             r4, r3, lsr #16         /* D[15:0] = S[31:15] */\r
-       subs    r12, r12, #1\r
-       bne             Lalign2_forward_loop\r
-\r
-       /* finish the copy off */\r
-       strh    r4, [r0], #2            /* save D[15:0] */\r
-\r
-       ands    r2, r2, #3\r
-       beq             Lexit\r
-       b               Lbytewise2\r
-\r
-Lalign3_forward:\r
-       /* the dest pointer is 3 bytes off from src */\r
-       mov             r12, r2, lsr #2         /* number of words we should copy */\r
-       sub             r0, r0, #3\r
-\r
-       /* prime the copy */\r
-       ldr             r4, [r0]\r
-       and             r4, r4, #0x00ffffff     /* load D[24:0] */\r
-\r
-Lalign3_forward_loop:\r
-       ldr             r3, [r1], #4            /* load S */\r
-       orr             r4, r4, r3, lsl #24     /* D[31:25] = S[7:0] */\r
-       str             r4, [r0], #4            /* save D */\r
-       mov             r4, r3, lsr #8          /* D[24:0] = S[31:8] */\r
-       subs    r12, r12, #1\r
-       bne             Lalign3_forward_loop\r
-\r
-       /* finish the copy off */\r
-       strh    r4, [r0], #2            /* save D[15:0] */\r
-       mov             r4, r4, lsr #16\r
-       strb    r4, [r0], #1            /* save D[23:16] */\r
-\r
-       ands    r2, r2, #3\r
-       beq             Lexit\r
-       b               Lbytewise2\r
-\r
-Lexit:\r
-       ldmfd   sp!, {r0, r4, r5, r7, pc}\r
diff --git a/arm_chainloader/lib/arm_bzero.s b/arm_chainloader/lib/arm_bzero.s
deleted file mode 100644 (file)
index c3123a9..0000000
+++ /dev/null
@@ -1,157 +0,0 @@
-/*\r
- * Copyright (c) 2006, 2009 Apple Inc. All rights reserved.\r
- *\r
- * @APPLE_LICENSE_HEADER_START@\r
- * \r
- * This file contains Original Code and/or Modifications of Original Code\r
- * as defined in and that are subject to the Apple Public Source License\r
- * Version 2.0 (the 'License'). You may not use this file except in\r
- * compliance with the License. Please obtain a copy of the License at\r
- * http://www.opensource.apple.com/apsl/ and read it before using this\r
- * file.\r
- * \r
- * The Original Code and all software distributed under the License are\r
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER\r
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,\r
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,\r
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.\r
- * Please see the License for the specific language governing rights and\r
- * limitations under the License.\r
- * \r
- * @APPLE_LICENSE_HEADER_END@\r
- */\r
-       \r
-/* \r
- * A reasonably well-optimized bzero/memset. Should work equally well on arm11 and arm9 based\r
- * cores. \r
- *\r
- * The algorithm is to align the destination pointer on a 32 byte boundary and then\r
- * blast data 64 bytes at a time, in two stores of 32 bytes per loop.\r
- */\r
-       .text\r
-       .align 2\r
-\r
-       .globl memset\r
-/* void *memset(void *ptr, int c, size_t len); */\r
-memset:\r
-       /* move len into r1, unpack c into r2 */\r
-       mov             r3, r2\r
-       and             r1, r1, #0xff\r
-       orr             r1, r1, r1, lsl #8\r
-       orr             r2, r1, r1, lsl #16\r
-       mov             r1, r3\r
-       b               Lbzeroengine\r
-\r
-       .globl bzero\r
-/* void bzero(void *ptr, size_t len); */\r
-bzero:\r
-       /* zero out r2 so we can be just like memset(0) */\r
-       mov             r2, #0\r
-\r
-Lbzeroengine:\r
-       /* move the base pointer into r12 and leave r0 alone so that we return the original pointer */\r
-       mov             r12, r0\r
-\r
-       /* copy r2 into r3 for 64-bit stores */\r
-       mov             r3, r2\r
-\r
-       /* check for zero len */\r
-       cmp             r1, #0\r
-       bxeq    lr\r
-\r
-       /* fall back to a bytewise store for less than 32 bytes */\r
-       cmp             r1, #32\r
-       blt             L_bytewise\r
-\r
-       /* check for 32 byte unaligned ptr */\r
-       tst             r12, #0x1f\r
-       bne             L_unaligned\r
-\r
-       /* make sure we have more than 64 bytes to zero */\r
-       cmp             r1, #64\r
-       blt             L_lessthan64aligned\r
-\r
-       /* >= 64 bytes of len, 32 byte aligned */\r
-L_64ormorealigned:\r
-\r
-       /* we need some registers, avoid r7 (frame pointer) and r9 (thread register) */\r
-       stmfd   sp!, { r4-r6, r8, r10-r11 }\r
-       mov             r4, r2\r
-       mov             r5, r2\r
-       mov             r6, r2\r
-       mov             r8, r2\r
-       mov             r10, r2\r
-       mov             r11, r2\r
-\r
-       /* pre-subtract 64 from the len to avoid an extra compare in the loop */\r
-       sub             r1, r1, #64\r
-\r
-L_64loop:\r
-       stmia   r12!, { r2-r6, r8, r10-r11 }\r
-       subs    r1, r1, #64\r
-       stmia   r12!, { r2-r6, r8, r10-r11 }\r
-       bge             L_64loop\r
-\r
-       /* restore the saved regs */\r
-       ldmfd   sp!, { r4-r6, r8, r10-r11 }\r
-\r
-       /* check for completion (had previously subtracted an extra 64 from len) */\r
-       adds    r1, r1, #64\r
-       bxeq    lr\r
-\r
-L_lessthan64aligned:\r
-       /* do we have 16 or more bytes left */\r
-       cmp             r1, #16\r
-       stmgeia r12!, { r2-r3 }\r
-       stmgeia r12!, { r2-r3 }\r
-       subges  r1, r1, #16\r
-       bgt             L_lessthan64aligned\r
-       bxeq    lr\r
-\r
-L_lessthan16aligned:\r
-       /* store 0 to 15 bytes */\r
-       mov             r1, r1, lsl #28         /* move the remaining len bits [3:0] to the flags area of cpsr */\r
-       msr             cpsr_f, r1\r
-\r
-       stmmiia r12!, { r2-r3 }         /* n is set, store 8 bytes */\r
-       streq   r2, [r12], #4           /* z is set, store 4 bytes */\r
-       strcsh  r2, [r12], #2           /* c is set, store 2 bytes */\r
-       strvsb  r2, [r12], #1           /* v is set, store 1 byte */\r
-       bx              lr\r
-\r
-L_bytewise:\r
-       /* bytewise copy, 2 bytes at a time, alignment not guaranteed */        \r
-       subs    r1, r1, #2\r
-       strb    r2, [r12], #1\r
-       strplb  r2, [r12], #1\r
-       bhi             L_bytewise\r
-       bx              lr\r
-\r
-L_unaligned:\r
-       /* unaligned on 32 byte boundary, store 1-15 bytes until we're 16 byte aligned */\r
-       mov             r3, r12, lsl #28\r
-       rsb     r3, r3, #0x00000000\r
-       msr             cpsr_f, r3\r
-\r
-       strvsb  r2, [r12], #1           /* v is set, unaligned in the 1s column */\r
-       strcsh  r2, [r12], #2           /* c is set, unaligned in the 2s column */\r
-       streq   r2, [r12], #4           /* z is set, unaligned in the 4s column */\r
-       strmi   r2, [r12], #4           /* n is set, unaligned in the 8s column */\r
-       strmi   r2, [r12], #4\r
-\r
-       subs    r1, r1, r3, lsr #28\r
-       bxeq    lr\r
-\r
-       /* we had previously trashed r3, restore it */\r
-       mov             r3, r2\r
-\r
-       /* now make sure we're 32 byte aligned */\r
-       tst             r12, #(1 << 4)\r
-       stmneia r12!, { r2-r3 }\r
-       stmneia r12!, { r2-r3 }\r
-       subnes  r1, r1, #16\r
-\r
-       /* we're now aligned, check for >= 64 bytes left */\r
-       cmp             r1, #64\r
-       bge             L_64ormorealigned\r
-       b               L_lessthan64aligned
\ No newline at end of file
diff --git a/arm_chainloader/lib/arm_locore.s b/arm_chainloader/lib/arm_locore.s
deleted file mode 100644 (file)
index b9659b6..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-.text\r
-.align 2\r
diff --git a/arm_chainloader/lib/arm_memcmp.s b/arm_chainloader/lib/arm_memcmp.s
deleted file mode 100644 (file)
index 2427b54..0000000
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (c) 2009 Apple Inc. All rights reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- * 
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- * 
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- * 
- * @APPLE_LICENSE_HEADER_END@
- */
-
-// ARM Assembly implementation of memcmp( ) from <string.h>
-// Uses Thumb2 if it is available, otherwise generates ARM code.
-//
-// -- Stephen Canon, August 2009
-//
-// The basic idea is to use word compares instead of byte compares as long as
-// at least four bytes remain to be compared.  However, because memcmp( )
-// compares the buffers as though they were big-endian unsigned integers, we
-// need to byte-reverse each word before comparing them.
-//
-// If the buffers are not word aligned, or they are shorter than four bytes,
-// we just use a simple byte comparison loop instead.
-//
-// int   bcmp(void *src1, void *src2, size_t length);
-// int memcmp(void *src1, void *src2, size_t length);
-
-    .text
-    .syntax unified
-    .code 32
-    .globl bcmp
-    .globl memcmp
-    .align 3
-bcmp:
-memcmp:
-
-#define _ARM_ARCH_6
-
-#ifdef _ARM_ARCH_6
-    subs    ip,     r2,  #4     // if length < 4
-    bmi     L_useByteCompares   // jump to the byte comparison loop
-    
-    orr     r3,     r0,  r1     // if the buffers are
-    tst     r3,          #3     // not word aligned
-    bne     L_useByteCompares   // jump to the byte comparison loop
-
-.align 3
-L_wordCompare:                  // Here we know that both buffers are word
-    ldr     r2,    [r0], #4     // aligned, and (length - 4) > 0, so at least
-    ldr     r3,    [r1], #4     // four bytes remain to be compared.  We load
-    subs    ip,          #4     // a word from each buffer, and byte reverse
-    bmi     L_lastWord          // the loaded words.  We also decrement the
-    rev     r2,     r2          // length by four and jump out of this loop if
-    rev     r3,     r3          // the result is negative.  Then we compare the
-    cmp     r2,     r3          // reversed words, and continue the loop only
-    beq     L_wordCompare       // if they are equal.
-L_wordsUnequal:
-    ite     hi                  // If the words compared unequal, return +/- 1
-    movhi   r0,     #1          // according to the result of the comparison.
-    movls   r0,     #-1         //
-    bx      lr                  //
-L_lastWord:
-    rev     r2,     r2          // If we just loaded the last complete words
-    rev     r3,     r3          // from the buffers, byte-reverse them and
-    cmp     r2,     r3          // compare.  If they are unequal, jump to the
-    bne     L_wordsUnequal      // return path.
-    add     r2,     ip,  #4     // Otherwise, fall into the cleanup code.
-#endif // _ARM_ARCH_6
-
-L_useByteCompares:
-    tst     r2,     r2          // If the length is exactly zero
-    beq     L_returnZero        // avoid doing any loads and return zero.
-    mov     r3,     r0
-.align 3
-L_byteCompareLoop:
-    ldrb    r0,    [r3], #1     // Load a byte from each buffer, and decrement
-    ldrb    ip,    [r1], #1     // the length by one.  If the decremented
-    subs    r2,     #1          // length is zero, exit the loop.  Otherwise
-    beq     L_lastByte          // subtract the loaded bytes; if their
-    subs    r0,     ip          // difference is zero, continue the comparison
-    beq     L_byteCompareLoop   // loop.  Otherwise, return their difference.
-    bx      lr
-L_returnZero:
-    mov     r0,     ip
-L_lastByte:
-    sub     r0,     ip          // Return the difference of the final bytes
-    bx      lr
diff --git a/arm_chainloader/lib/arm_strlen.s b/arm_chainloader/lib/arm_strlen.s
deleted file mode 100644 (file)
index 9dbcb24..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- * 
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- * 
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- * 
- * @APPLE_LICENSE_HEADER_END@
- */
-
-.text
-       .align 2
-
-       .globl strlen
-/* size_t strlen(const char *s); */
-strlen:
-       /* save the original pointer */
-       mov             r12, r0
-
-       /* see if the string is aligned */
-       ands    r3, r0, #3
-
-       /* load the first word, address rounded down */
-       bic             r0, r0, #3
-       ldr             r2, [r0], #4
-
-       /* skip the next part if the string is already aligned */
-       beq             Laligned
-
-Lunaligned:
-       /* unaligned pointer, mask out the bytes that we've read that we should be ignoring */
-       cmp             r3, #2
-       orr             r2, r2, #0x000000ff
-       orrge   r2, r2, #0x0000ff00
-       orrgt   r2, r2, #0x00ff0000
-
-Laligned:
-       /* load 0x01010101 into r1 */
-       mov             r1, #0x01
-       orr             r1, r1, r1, lsl #8
-       orr             r1, r1, r1, lsl #16
-
-Laligned_loop:
-       /* ((x - 0x01010101) & ~x & 0x80808080) == hasnull(word) */
-       sub             r3, r2, r1              /* x - 0x01010101 */
-       bic             r3, r3, r2              /* above & ~x */
-       tst             r3, r1, lsl #7  /* above & 0x80808080 */
-       ldreq   r2, [r0], #4    /* load next word */
-       beq             Laligned_loop
-
-       /* we found a nullbyte */
-       /* r0 (ptr) has overshot by up to 4 bytes, so subtract off until we find a nullbyte */
-       sub             r0, r0, #1
-       tst             r2, #0x000000ff
-       subeq   r0, r0, #1
-       tstne   r2, #0x0000ff00
-       subeq   r0, r0, #1
-       tstne   r2, #0x00ff0000
-       subeq   r0, r0, #1
-
-Lexit:
-       /* len = ptr - original pointer */
-       sub             r0, r0, r12
-       bx              lr
diff --git a/arm_chainloader/lib/string_misc.c b/arm_chainloader/lib/string_misc.c
deleted file mode 100644 (file)
index ca9454f..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
- * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
- * 
- * @APPLE_LICENSE_HEADER_END@
- */
-/*
- * Copyright (c) 1990, 1993
- *     The Regents of the University of California.  All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * Chris Torek.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *     This product includes software developed by the University of
- *     California, Berkeley and its contributors.
- * 4. Neither the name of the University nor the names of its contributors
- *    may be used to endorse or promote products derived from this software
- *    without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <string.h>
-
-void *
-memchr(s, c, n)
-       const void *s;
-       register unsigned char c;
-       register size_t n;
-{
-       if (n != 0) {
-               register const unsigned char *p = s;
-
-               do {
-                       if (*p++ == c)
-                               return ((void *)(p - 1));
-               } while (--n != 0);
-       }
-       return (NULL);
-}
-
-
-size_t
-strnlen(const char *s, size_t maxlen)
-{
-       size_t len;
-
-       for (len = 0; len < maxlen; len++, s++) {
-               if (!*s)
-                       break;
-       }
-       return (len);
-}
\ No newline at end of file
diff --git a/arm_chainloader/lib_armv6/arm_bcopy.s b/arm_chainloader/lib_armv6/arm_bcopy.s
new file mode 100644 (file)
index 0000000..2155429
--- /dev/null
@@ -0,0 +1,397 @@
+/*\r
+ * Copyright (c) 2006, 2009 Apple Inc. All rights reserved.\r
+ *\r
+ * @APPLE_LICENSE_HEADER_START@\r
+ * \r
+ * This file contains Original Code and/or Modifications of Original Code\r
+ * as defined in and that are subject to the Apple Public Source License\r
+ * Version 2.0 (the 'License'). You may not use this file except in\r
+ * compliance with the License. Please obtain a copy of the License at\r
+ * http://www.opensource.apple.com/apsl/ and read it before using this\r
+ * file.\r
+ * \r
+ * The Original Code and all software distributed under the License are\r
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER\r
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,\r
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,\r
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.\r
+ * Please see the License for the specific language governing rights and\r
+ * limitations under the License.\r
+ * \r
+ * @APPLE_LICENSE_HEADER_END@\r
+ */\r
+\r
+.text\r
+.align 2\r
+       \r
+       .globl memcpy\r
+       .globl bcopy\r
+       .globl memmove\r
+\r
+bcopy:         /* void bcopy(const void *src, void *dest, size_t len); */\r
+       mov             r3, r0\r
+       mov             r0, r1\r
+       mov             r1, r3\r
+\r
+memcpy:                /* void *memcpy(void *dest, const void *src, size_t len); */\r
+memmove:       /* void *memmove(void *dest, const void *src, size_t len); */\r
+       /* check for zero len or if the pointers are the same */\r
+       cmp             r2, #0\r
+       cmpne   r0, r1\r
+       bxeq    lr\r
+\r
+       /* save r0 (return value), r4 (scratch), and r5 (scratch) */\r
+       stmfd   sp!, { r0, r4, r5, r7, lr }\r
+       add     r7, sp, #12\r
+       \r
+       /* check for overlap. r3 <- distance between src & dest */\r
+       subhs   r3, r0, r1\r
+       sublo   r3, r1, r0\r
+       cmp             r3, r2                  /* if distance(src, dest) < len, we have overlap */\r
+       blo             Loverlap\r
+\r
+Lnormalforwardcopy:\r
+       /* are src and dest dissimilarly word aligned? */\r
+       mov             r12, r0, lsl #30\r
+       cmp             r12, r1, lsl #30\r
+       bne             Lnonwordaligned_forward\r
+\r
+       /* if len < 64, do a quick forward copy */\r
+       cmp             r2, #64\r
+       blt             Lsmallforwardcopy\r
+\r
+       /* check for 16 byte src/dest unalignment */\r
+       tst             r0, #0xf\r
+       bne             Lsimilarlyunaligned\r
+\r
+       /* check for 32 byte dest unalignment */\r
+       tst             r0, #(1<<4)\r
+       bne             Lunaligned_32\r
+\r
+Lmorethan64_aligned:\r
+       /* save some more registers to use in the copy */\r
+       stmfd   sp!, { r6, r8, r10, r11 }\r
+\r
+       /* pre-subtract 64 from the len counter to avoid an extra compare in the loop */\r
+       sub             r2, r2, #64\r
+\r
+L64loop:\r
+       /* copy 64 bytes at a time */\r
+       ldmia   r1!, { r3, r4, r5, r6, r8, r10, r11, r12 }\r
+#ifdef _ARM_ARCH_6\r
+       pld             [r1, #32]\r
+#endif\r
+       stmia   r0!, { r3, r4, r5, r6, r8, r10, r11, r12 }\r
+       ldmia   r1!, { r3, r4, r5, r6, r8, r10, r11, r12 }\r
+       subs    r2, r2, #64\r
+#ifdef _ARM_ARCH_6\r
+       pld             [r1, #32]\r
+#endif\r
+       stmia   r0!, { r3, r4, r5, r6, r8, r10, r11, r12 }\r
+       bge             L64loop\r
+\r
+       /* restore the scratch registers we just saved */\r
+       ldmfd   sp!, { r6, r8, r10, r11 }\r
+\r
+       /* fix up the len counter (previously subtracted an extra 64 from it) and test for completion */\r
+       adds    r2, r2, #64\r
+       beq             Lexit\r
+\r
+Llessthan64_aligned:\r
+       /* copy 16 bytes at a time until we have < 16 bytes */\r
+       cmp             r2, #16\r
+       ldmgeia r1!, { r3, r4, r5, r12 }\r
+       stmgeia r0!, { r3, r4, r5, r12 }\r
+       subges  r2, r2, #16\r
+       bgt             Llessthan64_aligned\r
+       beq             Lexit\r
+       \r
+Llessthan16_aligned:\r
+       mov             r2, r2, lsl #28\r
+       msr             cpsr_f, r2\r
+\r
+       ldmmiia r1!, { r2, r3 }\r
+       ldreq   r4, [r1], #4\r
+       ldrcsh  r5, [r1], #2\r
+       ldrvsb  r12, [r1], #1\r
+\r
+       stmmiia r0!, { r2, r3 }\r
+       streq   r4, [r0], #4\r
+       strcsh  r5, [r0], #2\r
+       strvsb  r12, [r0], #1\r
+       b               Lexit\r
+\r
+Lsimilarlyunaligned:\r
+       /* both src and dest are unaligned in similar ways, align to dest on 32 byte boundary */\r
+       mov             r12, r0, lsl #28\r
+       rsb             r12, r12, #0\r
+       msr             cpsr_f, r12\r
+\r
+       ldrvsb  r3, [r1], #1\r
+       ldrcsh  r4, [r1], #2\r
+       ldreq   r5, [r1], #4\r
+\r
+       strvsb  r3, [r0], #1\r
+       strcsh  r4, [r0], #2\r
+       streq   r5, [r0], #4\r
+\r
+       ldmmiia r1!, { r3, r4 }\r
+       stmmiia r0!, { r3, r4 }\r
+\r
+       subs    r2, r2, r12, lsr #28\r
+       beq             Lexit\r
+\r
+Lunaligned_32:\r
+       /* bring up to dest 32 byte alignment */\r
+       tst             r0, #(1 << 4)\r
+       ldmneia r1!, { r3, r4, r5, r12 }\r
+       stmneia r0!, { r3, r4, r5, r12 }\r
+       subne   r2, r2, #16\r
+\r
+       /* we should now be aligned, see what copy method we should use */\r
+       cmp             r2, #64\r
+       bge             Lmorethan64_aligned\r
+       b               Llessthan64_aligned\r
+       \r
+Lbytewise2:\r
+       /* copy 2 bytes at a time */\r
+       subs    r2, r2, #2\r
+\r
+       ldrb    r3, [r1], #1\r
+       ldrplb  r4, [r1], #1\r
+\r
+       strb    r3, [r0], #1\r
+       strplb  r4, [r0], #1\r
+\r
+       bhi             Lbytewise2\r
+       b               Lexit\r
+\r
+Lbytewise:\r
+       /* simple bytewise forward copy */\r
+       ldrb    r3, [r1], #1\r
+       subs    r2, r2, #1\r
+       strb    r3, [r0], #1\r
+       bne             Lbytewise\r
+       b               Lexit\r
+\r
+Lsmallforwardcopy:\r
+       /* src and dest are word aligned similarly, less than 64 bytes to copy */\r
+       cmp             r2, #4\r
+       blt             Lbytewise2\r
+\r
+       /* bytewise copy until word aligned */\r
+       tst             r1, #3\r
+Lwordalignloop:\r
+       ldrneb  r3, [r1], #1\r
+       strneb  r3, [r0], #1\r
+       subne   r2, r2, #1\r
+       tstne   r1, #3\r
+       bne             Lwordalignloop\r
+\r
+       cmp             r2, #16\r
+       bge             Llessthan64_aligned\r
+       blt             Llessthan16_aligned\r
+\r
+Loverlap:\r
+       /* src and dest overlap in some way, len > 0 */\r
+       cmp             r0, r1                          /* if dest > src */\r
+       bhi             Loverlap_srclower\r
+\r
+Loverlap_destlower:\r
+       /* dest < src, see if we can still do a fast forward copy or fallback to slow forward copy */\r
+       cmp             r3, #64\r
+       bge             Lnormalforwardcopy      /* overlap is greater than one stride of the copy, use normal copy */\r
+\r
+       cmp             r3, #2\r
+       bge             Lbytewise2\r
+       b               Lbytewise\r
+\r
+       /* the following routines deal with having to copy in the reverse direction */\r
+Loverlap_srclower:\r
+       /* src < dest, with overlap */\r
+\r
+       /* src += len; dest += len; */\r
+       add             r0, r0, r2\r
+       add             r1, r1, r2\r
+\r
+       /* we have to copy in reverse no matter what, test if we can we use a large block reverse copy */\r
+       cmp             r2, #64                         /* less than 64 bytes to copy? */\r
+       cmpgt   r3, #64                         /* less than 64 bytes of nonoverlap? */\r
+       blt             Lbytewise_reverse\r
+\r
+       /* test of src and dest are nonword aligned differently */\r
+       mov             r3, r0, lsl #30\r
+       cmp             r3, r1, lsl #30\r
+       bne             Lbytewise_reverse\r
+\r
+       /* test if src and dest are non word aligned or dest is non 16 byte aligned */\r
+       tst             r0, #0xf\r
+       bne             Lunaligned_reverse_similarly\r
+\r
+       /* test for dest 32 byte alignment */\r
+       tst             r0, #(1<<4)\r
+       bne             Lunaligned_32_reverse_similarly\r
+\r
+       /* 64 byte reverse block copy, src and dest aligned */\r
+Lmorethan64_aligned_reverse:\r
+       /* save some more registers to use in the copy */\r
+       stmfd   sp!, { r6, r8, r10, r11 }\r
+\r
+       /* pre-subtract 64 from the len counter to avoid an extra compare in the loop */\r
+       sub             r2, r2, #64\r
+\r
+L64loop_reverse:\r
+       /* copy 64 bytes at a time */\r
+       ldmdb   r1!, { r3, r4, r5, r6, r8, r10, r11, r12 }\r
+#ifdef _ARM_ARCH_6\r
+       pld             [r1, #-32]\r
+#endif\r
+       stmdb   r0!, { r3, r4, r5, r6, r8, r10, r11, r12 }      \r
+       ldmdb   r1!, { r3, r4, r5, r6, r8, r10, r11, r12 }      \r
+       subs    r2, r2, #64\r
+#ifdef _ARM_ARCH_6\r
+       pld             [r1, #-32]\r
+#endif\r
+       stmdb   r0!, { r3, r4, r5, r6, r8, r10, r11, r12 }      \r
+       bge             L64loop_reverse\r
+\r
+       /* restore the scratch registers we just saved */\r
+       ldmfd   sp!, { r6, r8, r10, r11 }\r
+\r
+       /* fix up the len counter (previously subtracted an extra 64 from it) and test for completion */\r
+       adds    r2, r2, #64\r
+       beq             Lexit\r
+\r
+Lbytewise_reverse:\r
+       ldrb    r3, [r1, #-1]!\r
+       strb    r3, [r0, #-1]!\r
+       subs    r2, r2, #1\r
+       bne             Lbytewise_reverse\r
+       b               Lexit\r
+\r
+Lunaligned_reverse_similarly:\r
+       /* both src and dest are unaligned in similar ways, align to dest on 32 byte boundary */\r
+       mov             r12, r0, lsl #28\r
+       msr             cpsr_f, r12\r
+\r
+       ldrvsb  r3, [r1, #-1]!\r
+       ldrcsh  r4, [r1, #-2]!\r
+       ldreq   r5, [r1, #-4]!\r
+\r
+       strvsb  r3, [r0, #-1]!\r
+       strcsh  r4, [r0, #-2]!\r
+       streq   r5, [r0, #-4]!\r
+\r
+       ldmmidb r1!, { r3, r4 }\r
+       stmmidb r0!, { r3, r4 }\r
+\r
+       subs    r2, r2, r12, lsr #28\r
+       beq             Lexit\r
+\r
+Lunaligned_32_reverse_similarly:\r
+       /* bring up to dest 32 byte alignment */\r
+       tst             r0, #(1 << 4)\r
+       ldmnedb r1!, { r3, r4, r5, r12 }\r
+       stmnedb r0!, { r3, r4, r5, r12 }\r
+       subne   r2, r2, #16\r
+\r
+       /* we should now be aligned, see what copy method we should use */\r
+       cmp             r2, #64\r
+       bge             Lmorethan64_aligned_reverse\r
+       b               Lbytewise_reverse\r
+\r
+       /* the following routines deal with non word aligned copies */\r
+Lnonwordaligned_forward:\r
+       cmp             r2, #8\r
+       blt             Lbytewise2                      /* not worth the effort with less than 24 bytes total */\r
+\r
+       /* bytewise copy until src word aligned */\r
+       tst             r1, #3\r
+Lwordalignloop2:\r
+       ldrneb  r3, [r1], #1\r
+       strneb  r3, [r0], #1\r
+       subne   r2, r2, #1\r
+       tstne   r1, #3\r
+       bne             Lwordalignloop2\r
+\r
+       /* figure out how the src and dest are unaligned */\r
+       and             r3, r0, #3\r
+       cmp             r3, #2\r
+       blt             Lalign1_forward\r
+       beq             Lalign2_forward\r
+       bgt             Lalign3_forward\r
+\r
+Lalign1_forward:\r
+       /* the dest pointer is 1 byte off from src */\r
+       mov             r12, r2, lsr #2         /* number of words we should copy */\r
+       sub             r0, r0, #1\r
+\r
+       /* prime the copy */\r
+       ldrb    r4, [r0]                        /* load D[7:0] */\r
+\r
+Lalign1_forward_loop:\r
+       ldr             r3, [r1], #4            /* load S */\r
+       orr             r4, r4, r3, lsl #8      /* D[31:8] = S[24:0] */\r
+       str             r4, [r0], #4            /* save D */\r
+       mov             r4, r3, lsr #24         /* D[7:0] = S[31:25] */\r
+       subs    r12, r12, #1\r
+       bne             Lalign1_forward_loop\r
+\r
+       /* finish the copy off */\r
+       strb    r4, [r0], #1            /* save D[7:0] */\r
+\r
+       ands    r2, r2, #3\r
+       beq             Lexit\r
+       b               Lbytewise2\r
+\r
+Lalign2_forward:\r
+       /* the dest pointer is 2 bytes off from src */\r
+       mov             r12, r2, lsr #2         /* number of words we should copy */\r
+       sub             r0, r0, #2\r
+\r
+       /* prime the copy */\r
+       ldrh    r4, [r0]                        /* load D[15:0] */\r
+\r
+Lalign2_forward_loop:\r
+       ldr             r3, [r1], #4            /* load S */\r
+       orr             r4, r4, r3, lsl #16     /* D[31:16] = S[15:0] */\r
+       str             r4, [r0], #4            /* save D */\r
+       mov             r4, r3, lsr #16         /* D[15:0] = S[31:15] */\r
+       subs    r12, r12, #1\r
+       bne             Lalign2_forward_loop\r
+\r
+       /* finish the copy off */\r
+       strh    r4, [r0], #2            /* save D[15:0] */\r
+\r
+       ands    r2, r2, #3\r
+       beq             Lexit\r
+       b               Lbytewise2\r
+\r
+Lalign3_forward:\r
+       /* the dest pointer is 3 bytes off from src */\r
+       mov             r12, r2, lsr #2         /* number of words we should copy */\r
+       sub             r0, r0, #3\r
+\r
+       /* prime the copy */\r
+       ldr             r4, [r0]\r
+       and             r4, r4, #0x00ffffff     /* load D[24:0] */\r
+\r
+Lalign3_forward_loop:\r
+       ldr             r3, [r1], #4            /* load S */\r
+       orr             r4, r4, r3, lsl #24     /* D[31:25] = S[7:0] */\r
+       str             r4, [r0], #4            /* save D */\r
+       mov             r4, r3, lsr #8          /* D[24:0] = S[31:8] */\r
+       subs    r12, r12, #1\r
+       bne             Lalign3_forward_loop\r
+\r
+       /* finish the copy off */\r
+       strh    r4, [r0], #2            /* save D[15:0] */\r
+       mov             r4, r4, lsr #16\r
+       strb    r4, [r0], #1            /* save D[23:16] */\r
+\r
+       ands    r2, r2, #3\r
+       beq             Lexit\r
+       b               Lbytewise2\r
+\r
+Lexit:\r
+       ldmfd   sp!, {r0, r4, r5, r7, pc}\r
diff --git a/arm_chainloader/lib_armv6/arm_bzero.s b/arm_chainloader/lib_armv6/arm_bzero.s
new file mode 100644 (file)
index 0000000..c3123a9
--- /dev/null
@@ -0,0 +1,157 @@
+/*\r
+ * Copyright (c) 2006, 2009 Apple Inc. All rights reserved.\r
+ *\r
+ * @APPLE_LICENSE_HEADER_START@\r
+ * \r
+ * This file contains Original Code and/or Modifications of Original Code\r
+ * as defined in and that are subject to the Apple Public Source License\r
+ * Version 2.0 (the 'License'). You may not use this file except in\r
+ * compliance with the License. Please obtain a copy of the License at\r
+ * http://www.opensource.apple.com/apsl/ and read it before using this\r
+ * file.\r
+ * \r
+ * The Original Code and all software distributed under the License are\r
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER\r
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,\r
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,\r
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.\r
+ * Please see the License for the specific language governing rights and\r
+ * limitations under the License.\r
+ * \r
+ * @APPLE_LICENSE_HEADER_END@\r
+ */\r
+       \r
+/* \r
+ * A reasonably well-optimized bzero/memset. Should work equally well on arm11 and arm9 based\r
+ * cores. \r
+ *\r
+ * The algorithm is to align the destination pointer on a 32 byte boundary and then\r
+ * blast data 64 bytes at a time, in two stores of 32 bytes per loop.\r
+ */\r
+       .text\r
+       .align 2\r
+\r
+       .globl memset\r
+/* void *memset(void *ptr, int c, size_t len); */\r
+memset:\r
+       /* move len into r1, unpack c into r2 */\r
+       mov             r3, r2\r
+       and             r1, r1, #0xff\r
+       orr             r1, r1, r1, lsl #8\r
+       orr             r2, r1, r1, lsl #16\r
+       mov             r1, r3\r
+       b               Lbzeroengine\r
+\r
+       .globl bzero\r
+/* void bzero(void *ptr, size_t len); */\r
+bzero:\r
+       /* zero out r2 so we can be just like memset(0) */\r
+       mov             r2, #0\r
+\r
+Lbzeroengine:\r
+       /* move the base pointer into r12 and leave r0 alone so that we return the original pointer */\r
+       mov             r12, r0\r
+\r
+       /* copy r2 into r3 for 64-bit stores */\r
+       mov             r3, r2\r
+\r
+       /* check for zero len */\r
+       cmp             r1, #0\r
+       bxeq    lr\r
+\r
+       /* fall back to a bytewise store for less than 32 bytes */\r
+       cmp             r1, #32\r
+       blt             L_bytewise\r
+\r
+       /* check for 32 byte unaligned ptr */\r
+       tst             r12, #0x1f\r
+       bne             L_unaligned\r
+\r
+       /* make sure we have more than 64 bytes to zero */\r
+       cmp             r1, #64\r
+       blt             L_lessthan64aligned\r
+\r
+       /* >= 64 bytes of len, 32 byte aligned */\r
+L_64ormorealigned:\r
+\r
+       /* we need some registers, avoid r7 (frame pointer) and r9 (thread register) */\r
+       stmfd   sp!, { r4-r6, r8, r10-r11 }\r
+       mov             r4, r2\r
+       mov             r5, r2\r
+       mov             r6, r2\r
+       mov             r8, r2\r
+       mov             r10, r2\r
+       mov             r11, r2\r
+\r
+       /* pre-subtract 64 from the len to avoid an extra compare in the loop */\r
+       sub             r1, r1, #64\r
+\r
+L_64loop:\r
+       stmia   r12!, { r2-r6, r8, r10-r11 }\r
+       subs    r1, r1, #64\r
+       stmia   r12!, { r2-r6, r8, r10-r11 }\r
+       bge             L_64loop\r
+\r
+       /* restore the saved regs */\r
+       ldmfd   sp!, { r4-r6, r8, r10-r11 }\r
+\r
+       /* check for completion (had previously subtracted an extra 64 from len) */\r
+       adds    r1, r1, #64\r
+       bxeq    lr\r
+\r
+L_lessthan64aligned:\r
+       /* do we have 16 or more bytes left */\r
+       cmp             r1, #16\r
+       stmgeia r12!, { r2-r3 }\r
+       stmgeia r12!, { r2-r3 }\r
+       subges  r1, r1, #16\r
+       bgt             L_lessthan64aligned\r
+       bxeq    lr\r
+\r
+L_lessthan16aligned:\r
+       /* store 0 to 15 bytes */\r
+       mov             r1, r1, lsl #28         /* move the remaining len bits [3:0] to the flags area of cpsr */\r
+       msr             cpsr_f, r1\r
+\r
+       stmmiia r12!, { r2-r3 }         /* n is set, store 8 bytes */\r
+       streq   r2, [r12], #4           /* z is set, store 4 bytes */\r
+       strcsh  r2, [r12], #2           /* c is set, store 2 bytes */\r
+       strvsb  r2, [r12], #1           /* v is set, store 1 byte */\r
+       bx              lr\r
+\r
+L_bytewise:\r
+       /* bytewise copy, 2 bytes at a time, alignment not guaranteed */        \r
+       subs    r1, r1, #2\r
+       strb    r2, [r12], #1\r
+       strplb  r2, [r12], #1\r
+       bhi             L_bytewise\r
+       bx              lr\r
+\r
+L_unaligned:\r
+       /* unaligned on 32 byte boundary, store 1-15 bytes until we're 16 byte aligned */\r
+       mov             r3, r12, lsl #28\r
+       rsb     r3, r3, #0x00000000\r
+       msr             cpsr_f, r3\r
+\r
+       strvsb  r2, [r12], #1           /* v is set, unaligned in the 1s column */\r
+       strcsh  r2, [r12], #2           /* c is set, unaligned in the 2s column */\r
+       streq   r2, [r12], #4           /* z is set, unaligned in the 4s column */\r
+       strmi   r2, [r12], #4           /* n is set, unaligned in the 8s column */\r
+       strmi   r2, [r12], #4\r
+\r
+       subs    r1, r1, r3, lsr #28\r
+       bxeq    lr\r
+\r
+       /* we had previously trashed r3, restore it */\r
+       mov             r3, r2\r
+\r
+       /* now make sure we're 32 byte aligned */\r
+       tst             r12, #(1 << 4)\r
+       stmneia r12!, { r2-r3 }\r
+       stmneia r12!, { r2-r3 }\r
+       subnes  r1, r1, #16\r
+\r
+       /* we're now aligned, check for >= 64 bytes left */\r
+       cmp             r1, #64\r
+       bge             L_64ormorealigned\r
+       b               L_lessthan64aligned
\ No newline at end of file
diff --git a/arm_chainloader/lib_armv6/arm_locore.s b/arm_chainloader/lib_armv6/arm_locore.s
new file mode 100644 (file)
index 0000000..b9659b6
--- /dev/null
@@ -0,0 +1,2 @@
+.text\r
+.align 2\r
diff --git a/arm_chainloader/lib_armv6/arm_memcmp.s b/arm_chainloader/lib_armv6/arm_memcmp.s
new file mode 100644 (file)
index 0000000..2427b54
--- /dev/null
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2009 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+// ARM Assembly implementation of memcmp( ) from <string.h>
+// Uses Thumb2 if it is available, otherwise generates ARM code.
+//
+// -- Stephen Canon, August 2009
+//
+// The basic idea is to use word compares instead of byte compares as long as
+// at least four bytes remain to be compared.  However, because memcmp( )
+// compares the buffers as though they were big-endian unsigned integers, we
+// need to byte-reverse each word before comparing them.
+//
+// If the buffers are not word aligned, or they are shorter than four bytes,
+// we just use a simple byte comparison loop instead.
+//
+// int   bcmp(void *src1, void *src2, size_t length);
+// int memcmp(void *src1, void *src2, size_t length);
+
+    .text
+    .syntax unified
+    .code 32
+    .globl bcmp
+    .globl memcmp
+    .align 3
+bcmp:
+memcmp:
+
+#define _ARM_ARCH_6
+
+#ifdef _ARM_ARCH_6
+    subs    ip,     r2,  #4     // if length < 4
+    bmi     L_useByteCompares   // jump to the byte comparison loop
+    
+    orr     r3,     r0,  r1     // if the buffers are
+    tst     r3,          #3     // not word aligned
+    bne     L_useByteCompares   // jump to the byte comparison loop
+
+.align 3
+L_wordCompare:                  // Here we know that both buffers are word
+    ldr     r2,    [r0], #4     // aligned, and (length - 4) > 0, so at least
+    ldr     r3,    [r1], #4     // four bytes remain to be compared.  We load
+    subs    ip,          #4     // a word from each buffer, and byte reverse
+    bmi     L_lastWord          // the loaded words.  We also decrement the
+    rev     r2,     r2          // length by four and jump out of this loop if
+    rev     r3,     r3          // the result is negative.  Then we compare the
+    cmp     r2,     r3          // reversed words, and continue the loop only
+    beq     L_wordCompare       // if they are equal.
+L_wordsUnequal:
+    ite     hi                  // If the words compared unequal, return +/- 1
+    movhi   r0,     #1          // according to the result of the comparison.
+    movls   r0,     #-1         //
+    bx      lr                  //
+L_lastWord:
+    rev     r2,     r2          // If we just loaded the last complete words
+    rev     r3,     r3          // from the buffers, byte-reverse them and
+    cmp     r2,     r3          // compare.  If they are unequal, jump to the
+    bne     L_wordsUnequal      // return path.
+    add     r2,     ip,  #4     // Otherwise, fall into the cleanup code.
+#endif // _ARM_ARCH_6
+
+L_useByteCompares:
+    tst     r2,     r2          // If the length is exactly zero
+    beq     L_returnZero        // avoid doing any loads and return zero.
+    mov     r3,     r0
+.align 3
+L_byteCompareLoop:
+    ldrb    r0,    [r3], #1     // Load a byte from each buffer, and decrement
+    ldrb    ip,    [r1], #1     // the length by one.  If the decremented
+    subs    r2,     #1          // length is zero, exit the loop.  Otherwise
+    beq     L_lastByte          // subtract the loaded bytes; if their
+    subs    r0,     ip          // difference is zero, continue the comparison
+    beq     L_byteCompareLoop   // loop.  Otherwise, return their difference.
+    bx      lr
+L_returnZero:
+    mov     r0,     ip
+L_lastByte:
+    sub     r0,     ip          // Return the difference of the final bytes
+    bx      lr
diff --git a/arm_chainloader/lib_armv6/arm_strlen.s b/arm_chainloader/lib_armv6/arm_strlen.s
new file mode 100644 (file)
index 0000000..9dbcb24
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+.text
+       .align 2
+
+       .globl strlen
+/* size_t strlen(const char *s); */
+strlen:
+       /* save the original pointer */
+       mov             r12, r0
+
+       /* see if the string is aligned */
+       ands    r3, r0, #3
+
+       /* load the first word, address rounded down */
+       bic             r0, r0, #3
+       ldr             r2, [r0], #4
+
+       /* skip the next part if the string is already aligned */
+       beq             Laligned
+
+Lunaligned:
+       /* unaligned pointer, mask out the bytes that we've read that we should be ignoring */
+       cmp             r3, #2
+       orr             r2, r2, #0x000000ff
+       orrge   r2, r2, #0x0000ff00
+       orrgt   r2, r2, #0x00ff0000
+
+Laligned:
+       /* load 0x01010101 into r1 */
+       mov             r1, #0x01
+       orr             r1, r1, r1, lsl #8
+       orr             r1, r1, r1, lsl #16
+
+Laligned_loop:
+       /* ((x - 0x01010101) & ~x & 0x80808080) == hasnull(word) */
+       sub             r3, r2, r1              /* x - 0x01010101 */
+       bic             r3, r3, r2              /* above & ~x */
+       tst             r3, r1, lsl #7  /* above & 0x80808080 */
+       ldreq   r2, [r0], #4    /* load next word */
+       beq             Laligned_loop
+
+       /* we found a nullbyte */
+       /* r0 (ptr) has overshot by up to 4 bytes, so subtract off until we find a nullbyte */
+       sub             r0, r0, #1
+       tst             r2, #0x000000ff
+       subeq   r0, r0, #1
+       tstne   r2, #0x0000ff00
+       subeq   r0, r0, #1
+       tstne   r2, #0x00ff0000
+       subeq   r0, r0, #1
+
+Lexit:
+       /* len = ptr - original pointer */
+       sub             r0, r0, r12
+       bx              lr
diff --git a/arm_chainloader/lib_armv6/string_misc.c b/arm_chainloader/lib_armv6/string_misc.c
new file mode 100644 (file)
index 0000000..ca9454f
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * The contents of this file constitute Original Code as defined in and
+ * are subject to the Apple Public Source License Version 1.1 (the
+ * "License").  You may not use this file except in compliance with the
+ * License.  Please obtain a copy of the License at
+ * http://www.apple.com/publicsource and read it before using this file.
+ * 
+ * This Original Code and all software distributed under the License are
+ * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
+ * License for the specific language governing rights and limitations
+ * under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+/*
+ * Copyright (c) 1990, 1993
+ *     The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *     This product includes software developed by the University of
+ *     California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+void *
+memchr(s, c, n)
+       const void *s;
+       register unsigned char c;
+       register size_t n;
+{
+       if (n != 0) {
+               register const unsigned char *p = s;
+
+               do {
+                       if (*p++ == c)
+                               return ((void *)(p - 1));
+               } while (--n != 0);
+       }
+       return (NULL);
+}
+
+
+size_t
+strnlen(const char *s, size_t maxlen)
+{
+       size_t len;
+
+       for (len = 0; len < maxlen; len++, s++) {
+               if (!*s)
+                       break;
+       }
+       return (len);
+}
\ No newline at end of file
index d83eefc..1b7ad8b 100644 (file)
@@ -2,25 +2,10 @@
 #include <chainloader.h>
 #include <hardware.h>
 
-extern uintptr_t* __init_array_start;
-extern uintptr_t* __init_array_end;
 extern uintptr_t* _end;
 
 #define logf(fmt, ...) printf("[BRINGUP:%s]: " fmt, __FUNCTION__, ##__VA_ARGS__);
 
-static void cxx_init() {
-       unsigned ctor_count = (unsigned)(&__init_array_end - &__init_array_start);
-       void (*static_ctor)();
-
-       logf("calling %d static constructors (0x%X - 0x%X) ...\n", ctor_count, &__init_array_start, &__init_array_end);
-
-       for (unsigned i = 0; i < ctor_count; i++) {
-               uintptr_t* ptr = (((uintptr_t*)&__init_array_start) + i);
-               static_ctor = (void*)*ptr;
-               static_ctor();
-       }
-}
-
 static void heap_init() {
        void* start_of_heap = (void*)MEM_HEAP_START;
        size_t hs = MEM_HEAP_SIZE;
@@ -39,15 +24,7 @@ void main() {
        heap_init();
 
        /* c++ runtime */
-       cxx_init();
+       __cxx_init();
 
        panic("Nothing else to do!");
-
-#if 0
-       printf("Done ");
-       for(;;) {
-               printf(".");
-               udelay(1000000);
-       }
-#endif
 }
index 613df9c..9561e2e 100644 (file)
@@ -17,7 +17,7 @@ ARM initialization stuff.
 \r
 =============================================================================*/\r
 \r
-#include <common.h>\r
+#include <lib/runtime.h>\r
 #include "hardware.h"\r
 \r
 \r
@@ -215,7 +215,8 @@ static void arm_load_code() {
 }\r
 \r
 static void arm_pmap_enter(uint32_t bus_address, uint32_t arm_address) {\r
-       volatile uint32_t* tte = &ARM_TRANSLATE;\r
+       volatile uint32_t* tte = reinterpret_cast<volatile uint32_t*>(&ARM_TRANSLATE);\r
+\r
        uint32_t index = arm_address >> 24;\r
        uint32_t pte = bus_address >> 21;\r
 \r
index 0b4435f..122b6d2 100644 (file)
@@ -17,7 +17,7 @@ First stage monitor.
 \r
 =============================================================================*/\r
 \r
-#include <common.h>\r
+#include <lib/runtime.h>\r
 #include "hardware.h"\r
 \r
 /*\r
diff --git a/build/bootcode.bin b/build/bootcode.bin
new file mode 100644 (file)
index 0000000..3518fc6
Binary files /dev/null and b/build/bootcode.bin differ
diff --git a/build/bootcode.bin.elf b/build/bootcode.bin.elf
new file mode 100644 (file)
index 0000000..68f8501
Binary files /dev/null and b/build/bootcode.bin.elf differ
diff --git a/build/vc4-objects/arm_loader.o b/build/vc4-objects/arm_loader.o
new file mode 100644 (file)
index 0000000..01371cf
Binary files /dev/null and b/build/vc4-objects/arm_loader.o differ
diff --git a/build/vc4-objects/arm_monitor.o b/build/vc4-objects/arm_monitor.o
new file mode 100644 (file)
index 0000000..bc086df
Binary files /dev/null and b/build/vc4-objects/arm_monitor.o differ
diff --git a/build/vc4-objects/chainloader_inc.o b/build/vc4-objects/chainloader_inc.o
new file mode 100644 (file)
index 0000000..82087ce
Binary files /dev/null and b/build/vc4-objects/chainloader_inc.o differ
diff --git a/build/vc4-objects/lib/cxx_runtime.o b/build/vc4-objects/lib/cxx_runtime.o
new file mode 100644 (file)
index 0000000..b3f4305
Binary files /dev/null and b/build/vc4-objects/lib/cxx_runtime.o differ
diff --git a/build/vc4-objects/lib/memcpy.o b/build/vc4-objects/lib/memcpy.o
new file mode 100644 (file)
index 0000000..1fa5049
Binary files /dev/null and b/build/vc4-objects/lib/memcpy.o differ
diff --git a/build/vc4-objects/lib/panic.o b/build/vc4-objects/lib/panic.o
new file mode 100644 (file)
index 0000000..e7c2502
Binary files /dev/null and b/build/vc4-objects/lib/panic.o differ
diff --git a/build/vc4-objects/lib/udelay.o b/build/vc4-objects/lib/udelay.o
new file mode 100644 (file)
index 0000000..332124a
Binary files /dev/null and b/build/vc4-objects/lib/udelay.o differ
diff --git a/build/vc4-objects/lib/xprintf.o b/build/vc4-objects/lib/xprintf.o
new file mode 100644 (file)
index 0000000..7f7c120
Binary files /dev/null and b/build/vc4-objects/lib/xprintf.o differ
diff --git a/build/vc4-objects/romstage.o b/build/vc4-objects/romstage.o
new file mode 100644 (file)
index 0000000..23a7ca8
Binary files /dev/null and b/build/vc4-objects/romstage.o differ
diff --git a/build/vc4-objects/sdram.o b/build/vc4-objects/sdram.o
new file mode 100644 (file)
index 0000000..2c1d489
Binary files /dev/null and b/build/vc4-objects/sdram.o differ
diff --git a/build/vc4-objects/start.o b/build/vc4-objects/start.o
new file mode 100644 (file)
index 0000000..17897a7
Binary files /dev/null and b/build/vc4-objects/start.o differ
diff --git a/build/vc4-objects/trap.o b/build/vc4-objects/trap.o
new file mode 100644 (file)
index 0000000..3fdcd0a
Binary files /dev/null and b/build/vc4-objects/trap.o differ
old mode 100755 (executable)
new mode 100644 (file)
index 67648d6..ea275d2
@@ -16,4 +16,9 @@ echo "-----------------------------------------"
 echo "Building firmware ..."
 echo "-----------------------------------------"
 cd ..
-make
\ No newline at end of file
+make
+
+# stage through WSL
+if [ "$1" = "sw" ]; then
+       tools/wslstage.py
+fi
\ No newline at end of file
diff --git a/lib/cxx_runtime.cc b/lib/cxx_runtime.cc
new file mode 100644 (file)
index 0000000..7c2364d
--- /dev/null
@@ -0,0 +1,58 @@
+/*=============================================================================
+Copyright (C) 2016-2017 Authors of rpi-open-firmware
+All rights reserved.
+
+This program is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public License
+as published by the Free Software Foundation; either version 2
+of the License, or (at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+FILE DESCRIPTION
+Crappy C++ runtime.
+
+=============================================================================*/
+
+#include <stdint.h>
+#include <hardware.h>
+#include <lib/runtime.h>
+
+extern uintptr_t* __init_array_start;
+extern uintptr_t* __init_array_end;
+
+void* operator new[] (size_t sz) {
+       return malloc(sz);
+}
+
+void* operator new (size_t sz) {
+       return malloc(sz);
+}
+
+void operator delete (void* ptr) {
+       free(ptr);
+}
+
+void operator delete[] (void* ptr) {
+       free(ptr);
+}
+
+extern "C" void __cxa_pure_virtual() {
+       panic("__cxa_pure_virtual!");
+}
+
+void __cxx_init() {
+       unsigned ctor_count = (unsigned)(&__init_array_end - &__init_array_start);
+       void (*static_ctor)();
+
+       printf("__cxx_init: calling %d static constructors (0x%X - 0x%X) ...\n", ctor_count, &__init_array_start, &__init_array_end);
+
+       for (unsigned i = 0; i < ctor_count; i++) {
+               uintptr_t* ptr = (((uintptr_t*)&__init_array_start) + i);
+               static_ctor = reinterpret_cast<void (*)()>(*ptr);
+               static_ctor();
+       }
+}
\ No newline at end of file
index a72d96e..8b73aa7 100644 (file)
@@ -17,8 +17,9 @@ Panic routine.
 \r
 =============================================================================*/\r
 \r
+#include <lib/runtime.h>\r
+\r
 #if defined(__VIDEOCORE4__)\r
-       #include <common.h>\r
        #include <cpu.h>\r
 #else\r
        #include <chainloader.h>\r
diff --git a/lib/runtime.h b/lib/runtime.h
new file mode 100644 (file)
index 0000000..e34aea5
--- /dev/null
@@ -0,0 +1,43 @@
+#pragma once
+
+#include <stdint.h>
+#include <stdarg.h>
+#include <stddef.h>
+
+#include <lib/panic.h>
+#include <lib/xprintf.h>
+
+#ifdef __arm__
+#define HAS_DYNAMIC_ALLOCATIONS
+#endif
+
+#ifdef HAS_DYNAMIC_ALLOCATIONS
+#include <lib/tlsf/tlsf.h>
+#endif
+
+#ifdef __VIDEOCORE4__
+#include <vc4_types.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern void udelay(uint32_t time);
+extern void __cxx_init();
+
+#ifdef __VIDEOCORE4__
+       extern void *__memcpy(void *_dst, const void *_src, unsigned len);
+       #define memcpy(d,s,l) __memcpy(d,s,l)
+#endif
+
+#define bcopy(s,d,l) memcpy(d,s,l)
+
+#ifndef HAS_DYNAMIC_ALLOCATIONS
+static inline void* malloc(size_t size) { panic("malloc: dynamic allocations not supported on this configuration"); }
+static inline void free(void* obj)      { panic("free: dynamic allocations not supported on this configuration"); }
+#endif
+
+#ifdef __cplusplus
+}
+#endif
index 1d7451c..f6a5d43 100644 (file)
@@ -17,14 +17,8 @@ Simple timer based delay routine.
 \r
 =============================================================================*/\r
 \r
-#ifdef __arm__\r
-       #include <stdint.h>\r
-#else\r
-       #include <common.h>\r
-#endif\r
-\r
-#include "../hardware.h"\r
-\r
+#include <lib/runtime.h>\r
+#include <hardware.h>\r
 \r
 void udelay(uint32_t t) {\r
        uint32_t tv = ST_CLO;\r
diff --git a/linker.lds b/linker.lds
new file mode 100644 (file)
index 0000000..6cebac7
--- /dev/null
@@ -0,0 +1,40 @@
+SECTIONS
+{
+       . = 0x0;
+
+       _text = .;
+
+       .text : {
+               *(.text)
+               *(.text.*)
+               *(.gnu.warning)
+       }
+
+       _etext = .;
+
+       .rodata : { *(.rodata) *(.rodata.*) }
+       . = ALIGN(4096);
+       _erodata = .;
+
+       . = ALIGN(32 / 8);
+
+       PROVIDE (__init_array_start = .);
+       .init_array : {
+               *(.init_array)
+               *(.init_array.*)
+       }
+       PROVIDE (__init_array_end = .);
+
+       .data : {                       /* Data */
+               *(.data)
+       }
+
+       .bss : {
+               _edata = .;
+               __bss_start = .;
+               *(.bss)
+       }
+
+       . = ALIGN(32 / 8);
+       _end = . ;
+}
\ No newline at end of file
index bb362b4..aa7879b 100644 (file)
@@ -17,7 +17,7 @@ VideoCoreIV first stage bootloader.
 \r
 =============================================================================*/\r
 \r
-#include <common.h>\r
+#include <lib/runtime.h>\r
 #include <hardware.h>\r
 \r
 uint32_t g_CPUID;\r
@@ -51,26 +51,26 @@ void uart_init(void) {
        unsigned int ra = GP_FSEL1;\r
        ra &= ~(7 << 12);\r
        ra |= 4 << 12;\r
-        ra &= ~(7 << 15);\r
-        ra |= 4 << 15;\r
+       ra &= ~(7 << 15);\r
+       ra |= 4 << 15;\r
        GP_FSEL1 = ra;\r
 \r
-        mmio_write32(UART_CR, 0);\r
+       mmio_write32(UART_CR, 0);\r
 \r
-        GP_PUD = 0;\r
+       GP_PUD = 0;\r
        udelay(150);\r
        GP_PUDCLK0 = (1 << 14) | (1 << 15);\r
        udelay(150);\r
        GP_PUDCLK0 = 0;\r
 \r
-        CM_UARTDIV = CM_PASSWORD | 0x6666;\r
-        CM_UARTCTL = CM_PASSWORD | CM_SRC_OSC | CM_UARTCTL_FRAC_SET | CM_UARTCTL_ENAB_SET;\r
+       CM_UARTDIV = CM_PASSWORD | 0x6666;\r
+       CM_UARTCTL = CM_PASSWORD | CM_SRC_OSC | CM_UARTCTL_FRAC_SET | CM_UARTCTL_ENAB_SET;\r
 \r
-        mmio_write32(UART_ICR, 0x7FF);\r
-        mmio_write32(UART_IBRD, 1);\r
-        mmio_write32(UART_FBRD, 40);\r
-        mmio_write32(UART_LCRH, 0x70);\r
-        mmio_write32(UART_CR, 0x301);\r
+       mmio_write32(UART_ICR, 0x7FF);\r
+       mmio_write32(UART_IBRD, 1);\r
+       mmio_write32(UART_FBRD, 40);\r
+       mmio_write32(UART_LCRH, 0x70);\r
+       mmio_write32(UART_CR, 0x301);\r
 }\r
 \r
 void led_init(void) {\r
@@ -193,6 +193,8 @@ int _main(unsigned int cpuid, unsigned int load_address) {
 \r
        g_CPUID = cpuid;\r
 \r
+       __cxx_init();\r
+\r
        /* bring up SDRAM */\r
        sdram_init();\r
        printf("SDRAM initialization completed successfully!\n");\r
diff --git a/sdram.c b/sdram.c
index 16a9e79..1f7df89 100644 (file)
--- a/sdram.c
+++ b/sdram.c
@@ -18,10 +18,9 @@ VideoCoreIV SDRAM initialization code.
 \r
 =============================================================================*/\r
 \r
-#include <common.h>\r
+#include <lib/runtime.h>\r
 #include <hardware.h>\r
 \r
-\r
 /*\r
  Registers\r
  =========\r
diff --git a/tools/wslstage.py b/tools/wslstage.py
new file mode 100644 (file)
index 0000000..cf6c79b
--- /dev/null
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+# Copyright (c) 2017 Kristina Brooks
+
+# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+# ! DO NOT CHANGE OR REMOVE THIS FILE      !
+# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+import subprocess
+import os
+from StringIO import StringIO
+
+disk_name = 'E'
+disk_ready = False
+wmic_output = os.popen('wcmd wmic logicaldisk get caption, filesystem')
+
+for line in wmic_output:
+       if '%s:' % disk_name in line and 'FAT' in line:
+               disk_ready = True
+
+if disk_ready:
+       print('Windows disk %s:\\ is ready!' % disk_name)
+       print('Copying bootcode.bin to %s:\\ ...\n' % disk_name)
+
+       os.system('''wcmd xcopy /s /y "C:\\Users\\k\\VC4_Firmware\\rpi-open-firmware\\build\\bootcode.bin" "''' + disk_name + ''':\\bootcode.bin"''')
+else:
+       print('Windows disk %s:\\ is not ready!' % disk_name)
+
diff --git a/trap.c b/trap.c
index cdac7a4..dbaaa1a 100644 (file)
--- a/trap.c
+++ b/trap.c
@@ -17,7 +17,7 @@ VideoCoreIV second level exception handlers.
 \r
 =============================================================================*/\r
 \r
-#include <common.h>\r
+#include <lib/runtime.h>\r
 #include <pcb.h>\r
 #include <exception.h>\r
 #include <hardware.h>\r
diff --git a/vc4_include/common.h b/vc4_include/common.h
deleted file mode 100644 (file)
index 18a0714..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-#pragma once\r
-\r
-#include "../lib/stdarg.h"\r
-\r
-typedef unsigned long long u64;\r
-typedef unsigned long long uint64_t;\r
-typedef long long int64_t;\r
-\r
-typedef unsigned int u32;\r
-typedef unsigned int uint32_t;\r
-typedef int int32_t;\r
-\r
-typedef unsigned short u16;\r
-typedef unsigned short uint16_t;\r
-typedef short int16_t;\r
-\r
-typedef unsigned char u8;\r
-typedef unsigned char uint8_t;\r
-\r
-typedef int bool;\r
-\r
-#define true 1\r
-#define false 0\r
-\r
-#define NULL ((void*)0)\r
-\r
-typedef uint32_t size_t;\r
-\r
-# define ALWAYS_INLINE __attribute__((always_inline)) inline\r
-\r
-#define _OPEN_SOURCE\r
-\r
-extern void udelay(uint32_t time);\r
-extern uint32_t __div64_32(uint64_t *n, uint32_t base);\r
-\r
-#include "../lib/panic.h"\r
-\r
-#define do_div __div64_32\r
-\r
-/*\r
- * this is done like that because clang likes using __builtin_memcpy\r
- * which makes LLC choke in a fabulous way.\r
- */\r
-extern void *__memcpy(void *_dst, const void *_src, unsigned len);\r
-#define bcopy(s,d,l) __memcpy(d,s,l)\r
-#define memcpy(d,s,l) __memcpy(d,s,l)\r
-\r
-extern int putchar(int c);\r
-extern int vprintf(const char* fmt, va_list arp);\r
-extern int printf(const char *fmt, ...);\r
-extern int puts(const char* str);
\ No newline at end of file
index a6c13bb..5458df5 100644 (file)
@@ -19,7 +19,7 @@ Process control block.
 \r
 #pragma once\r
 \r
-#include <common.h>\r
+#include <lib/runtime.h>\r
 \r
 typedef struct {\r
        uint32_t r23;\r
diff --git a/vc4_include/vc4_types.h b/vc4_include/vc4_types.h
new file mode 100644 (file)
index 0000000..fb43f36
--- /dev/null
@@ -0,0 +1,19 @@
+#pragma once\r
+\r
+#include <lib/stdarg.h>\r
+\r
+typedef unsigned long long u64;\r
+typedef unsigned int u32;\r
+typedef unsigned short u16;\r
+typedef unsigned char u8;\r
+\r
+#ifndef __cplusplus\r
+       typedef int bool;\r
+       #define true 1\r
+       #define false 0\r
+#endif\r
+\r
+#define ALWAYS_INLINE __attribute__((always_inline)) inline\r
+\r
+#define _OPEN_SOURCE\r
+\r
This page took 0.168269 seconds and 4 git commands to generate.