summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndy Ross <andrew.j.ross@intel.com>2016-09-22 13:25:02 -0700
committerAnas Nashif <nashif@linux.intel.com>2016-10-01 01:41:51 +0000
commit0f227848b90a71de0a407d39b3b00606af612e66 (patch)
tree0d8255ca9771a6e6fe3692baf1feb6e7d6692712
parent6b3c5e8bb2cee171a233638a1b33e0c92121c50c (diff)
link: Add section size validity checker
There are demons hiding in binutils with regard to the size of the data section that needs to be copied in XIP platforms. It's easy to mess up and get a mismatch between the linker data output and the runtime addresses. Add a cheap check to discover this early when it occurs. Jira: ZEP-955 Change-Id: If1c61fe8712221c6450b5b89f5f8af006b41b3fe Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
-rw-r--r--Makefile1
-rwxr-xr-xscripts/check_link_map.pl63
2 files changed, 64 insertions, 0 deletions
diff --git a/Makefile b/Makefile
index c63e1e458..1d81a1bb4 100644
--- a/Makefile
+++ b/Makefile
@@ -867,6 +867,7 @@ WARN_ABOUT_ASSERT := $(if $(CONFIG_ASSERT),echo -e -n $(ASSERT_WARNING_STR),true
ifeq ($(ARCH),x86)
$(KERNEL_ELF_NAME): staticIdt.o final-linker.cmd
$(call cmd,lnk_elf)
+ @$(srctree)/scripts/check_link_map.pl $(KERNEL_NAME).map
@$(WARN_ABOUT_ASSERT)
else
$(KERNEL_ELF_NAME): $(TMP_ELF)
diff --git a/scripts/check_link_map.pl b/scripts/check_link_map.pl
new file mode 100755
index 000000000..3ba45d13f
--- /dev/null
+++ b/scripts/check_link_map.pl
@@ -0,0 +1,63 @@
+#!/usr/bin/perl
+use warnings;
+use strict;
+
+# Linker address generation validity checker. By default, GNU ld is
+# broken when faced with sections where the load address (i.e. the
+# spot in the XIP program binary where initialized data lives) differs
+# from the virtual address (i.e. the location in RAM where that data
+# will live at runtime. We need to be sure we're using the
+# ALIGN_WITH_INPUT feature correctly everywhere, which is hard --
+# especially so given that many of these bugs are semi-invisible at
+# runtime (most initialized data is still a bunch of zeros and often
+# "works" even if it's wrong).
+#
+# This quick test just checks the offsets between sequential segments
+# with separate VMA/LMA addresses and verifies that the size deltas
+# are identical.
+#
+# Note that this is assuming that the address generation is always
+# in-order and that there is only one "ROM" LMA block. It's possible
+# to write a valid linker script that will fail this script, but we
+# don't have such a use case and one isn't forseen.
+
+# Skip the header stuff
+while(<>) { last if /Linker script and memory map/; }
+
+
+my ($last_sec, $last_vma, $last_lma);
+while(<>) {
+ next if ! /^([a-zA-Z0-9_\.]+) \s+ # name
+ (0x[0-9a-f]+) \s+ # addr
+ (0x[0-9a-f]+) \s+ # size
+ /x;
+
+ my ($sec, $vma, $sz) = ($1, $2, $3);
+
+ my $lma = "";
+ if(/load address (0x[0-9a-f]+)/) {
+ $lma = $1;
+ } else {
+ $last_sec = undef;
+ next;
+ }
+
+ $vma = eval $vma;
+ $lma = eval $lma;
+
+ if(defined $last_sec) {
+ my $dv = $vma - $last_vma;
+ my $dl = $lma - $last_lma;
+ if($dv != $dl) {
+ print STDERR
+ "ERROR: section $last_sec is $dv bytes "
+ . "in the virtual/runtime address space, "
+ . "but only $dl in the loaded/XIP section!\n";
+ exit 1;
+ }
+ }
+
+ $last_sec = $sec;
+ $last_vma = $vma;
+ $last_lma = $lma;
+}