summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDuncan Sands <baldrick@free.fr>2009-10-12 10:15:02 +0000
committerDuncan Sands <baldrick@free.fr>2009-10-12 10:15:02 +0000
commitbc61ab91dff2f07cc23f6ba8e9449f9b545381f1 (patch)
tree06f5d75f0bc5724a2106cc50e0773440bf3d4256
parentef6fabb10f99c0299ea5b0a70d754c623f8233ca (diff)
parentfb5ad0b54ecf42c4e1a3c6d1ae034ce07edaa9a7 (diff)
Rename the plugin dragonegg.so.
-rw-r--r--dragonegg/Makefile70
-rw-r--r--dragonegg/README90
-rw-r--r--dragonegg/TODO64
-rw-r--r--dragonegg/bits_and_bobs.cpp27
-rw-r--r--dragonegg/bits_and_bobs.h14
-rw-r--r--dragonegg/darwin/llvm-os.h45
-rw-r--r--dragonegg/gcc-patches/i386_static.diff40
-rw-r--r--dragonegg/gt-llvm-cache.h752
-rw-r--r--dragonegg/linux/llvm-os.h31
-rw-r--r--dragonegg/llvm-abi.h1148
-rw-r--r--dragonegg/llvm-backend.cpp2252
-rw-r--r--dragonegg/llvm-cache.c133
-rw-r--r--dragonegg/llvm-cache.h50
-rw-r--r--dragonegg/llvm-convert.cpp8211
-rw-r--r--dragonegg/llvm-debug.cpp924
-rw-r--r--dragonegg/llvm-debug.h140
-rw-r--r--dragonegg/llvm-internal.h765
-rw-r--r--dragonegg/llvm-types.cpp2210
-rw-r--r--dragonegg/utils/target.cpp68
-rw-r--r--dragonegg/x86/llvm-target.cpp1513
-rw-r--r--dragonegg/x86/llvm-target.h971
21 files changed, 19518 insertions, 0 deletions
diff --git a/dragonegg/Makefile b/dragonegg/Makefile
new file mode 100644
index 00000000000..16971f9216c
--- /dev/null
+++ b/dragonegg/Makefile
@@ -0,0 +1,70 @@
+GCCSOURCE_DIR=$(HOME)/GCC/src/
+GCCOBJECT_DIR=$(HOME)/GCC/objects/
+# Point LLVM_CONFIG to the just built llvm-config to use an LLVM build rather
+# than the installed version of LLVM.
+LLVM_CONFIG=llvm-config
+
+# Replace with an informative string when doing a release.
+REVISION:=$(shell svnversion -n .)
+TARGET_TRIPLE:=$(shell $(GCCOBJECT_DIR)/gcc/xgcc -v 2>&1 | grep "^Target:" | sed -e "s/^Target: *//")
+
+PLUGIN=dragonegg.so
+PLUGIN_C=llvm-cache.c
+PLUGIN_CPP=llvm-convert.cpp llvm-backend.cpp llvm-debug.cpp llvm-types.cpp \
+ bits_and_bobs.cpp
+PLUGIN_C_OBJECTS=$(PLUGIN_C:.c=.o)
+PLUGIN_CPP_OBJECTS=$(PLUGIN_CPP:.cpp=.o)
+PLUGIN_OBJECTS=$(PLUGIN_C_OBJECTS) $(PLUGIN_CPP_OBJECTS)
+
+TARGET_CPP=$(shell $(TARGET_UTIL) -p)/llvm-target.cpp
+TARGET_OBJECT=llvm-target.o
+
+TARGET_UTIL=./target
+TARGET_UTIL_OBJECTS=utils/target.o
+
+GENGTYPE_INPUT=$(PWD)/llvm-cache.c
+GENGTYPE_OUTPUT=$(PWD)/gt-llvm-cache.h
+
+CFLAGS+=-Wall -Werror -fPIC -g -O2
+CFLAGS+=-DIN_GCC -DREVISION=\"$(REVISION)\" -DTARGET_NAME=\"$(TARGET_TRIPLE)\"
+CXXFLAGS+=$(CFLAGS) $(shell $(LLVM_CONFIG) --cppflags)
+
+LDFLAGS+=$(shell $(LLVM_CONFIG) --libs analysis core ipo scalaropts target) \
+ $(shell $(LLVM_CONFIG) --ldflags)
+
+PLUGIN_CFLAGS+=-I$(GCCOBJECT_DIR)/gcc -I$(GCCOBJECT_DIR)/gcc/include \
+ -I$(GCCSOURCE_DIR)/gcc -I$(GCCSOURCE_DIR)/include \
+ -I$(GCCSOURCE_DIR)/libcpp/include -I$(GCCSOURCE_DIR)/libdecnumber \
+ -I$(GCCOBJECT_DIR)/libdecnumber -I$(shell $(TARGET_UTIL) -p) \
+ -I$(shell $(TARGET_UTIL) -o)
+PLUGIN_CXXFLAGS+=$(PLUGIN_CFLAGS)
+
+default: $(PLUGIN)
+
+$(TARGET_UTIL): $(TARGET_UTIL_OBJECTS)
+ $(CXX) $^ -o $@ $(CXXFLAGS) $(LDFLAGS)
+
+$(PLUGIN_C_OBJECTS): %.o : %.c $(TARGET_UTIL)
+ $(CC) -c $(CPPFLAGS) $(CFLAGS) $(PLUGIN_CFLAGS) $<
+
+$(PLUGIN_CPP_OBJECTS): %.o : %.cpp $(TARGET_UTIL)
+ $(CXX) -c $(CPPFLAGS) $(CXXFLAGS) $(PLUGIN_CXXFLAGS) $<
+
+$(TARGET_OBJECT): $(TARGET_UTIL)
+ $(CXX) -c $(TARGET_CPP) -o $@ $(CPPFLAGS) $(CXXFLAGS) $(PLUGIN_CXXFLAGS) -I.
+
+$(PLUGIN): $(TARGET_UTIL) $(PLUGIN_OBJECTS) $(TARGET_OBJECT)
+ $(CXX) -shared $(PLUGIN_OBJECTS) $(TARGET_OBJECT) -o $@ $(LDFLAGS) \
+ $(shell $(LLVM_CONFIG) --libs $(shell $(TARGET_UTIL) -p))
+
+llvm-cache.o: gt-llvm-cache.h
+
+gt-llvm-cache.h:
+ cd $(GCCOBJECT_DIR)/gcc && ./build/gengtype \
+ -P $(GENGTYPE_OUTPUT) $(GCCSOURCE_DIR) gtyp-input.list \
+ $(GENGTYPE_INPUT)
+ sed -i "s/ggc_cache_tab .*\[\]/ggc_cache_tab gt_ggc_rc__gt_llvm_cache_h[]/" $(GENGTYPE_OUTPUT)
+ sed -i "s/ggc_root_tab .*\[\]/ggc_root_tab gt_pch_rc__gt_llvm_cache_h[]/" $(GENGTYPE_OUTPUT)
+
+clean::
+ rm -f *.o */*.o $(PLUGIN) $(TARGET_UTIL)
diff --git a/dragonegg/README b/dragonegg/README
new file mode 100644
index 00000000000..3019369ab6f
--- /dev/null
+++ b/dragonegg/README
@@ -0,0 +1,90 @@
+----------------------
+- BUILD INSTRUCTIONS -
+----------------------
+
+Step 0: Build and install llvm
+------------------------------
+
+I'm assuming anyone reading this knows how to build and install llvm.
+You need the latest llvm from the subversion repository.
+
+Step 1: Build gcc
+-----------------
+
+Check out gcc from the gcc subversion repository:
+ svn checkout svn://gcc.gnu.org/svn/gcc/trunk SomeLocalDir
+Apply the patches in the gcc-patches subdirectory, if any. Hopefully one day
+the plugin will work with an unpatched gcc, but for the moment a few small
+patches need to be applied. Configure gcc with your favorite options.
+Build gcc, and install it somewhere.
+
+Darwin special: the gcc configure script thinks darwin doesn't support dynamic
+libraries and concludes that plugins won't work. Delete or improve the check.
+If you improve it, please send your patch to the gcc developers!
+
+Step 2: Build the plugin
+------------------------
+
+In the Makefile, set the GCCSOURCE_DIR variable to point to the place you
+checked out the gcc repository, rather than to where I checked it out.
+Set the GCCOBJECT_DIR to point to the place you built the repository.
+Admire the awfulness of the build system, and make a mental note to rewrite
+it properly.
+
+Build the plugin using "make". The end result of the build is a shared
+library, dragonegg.so.
+
+Darwin special: "-shared" doesn't result in a correct dynamic library on darwin,
+use "-Wl,-flat_namespace -Wl,-undefined -Wl,suppress -dynamiclib" instead.
+
+----------------------
+- USAGE INSTRUCTIONS -
+----------------------
+
+Run gcc as usual, but pass -fplugin=./dragonegg.so as an extra command line
+argument. Make sure you use the gcc you installed above, not the system gcc!
+
+Currently the plugin isn't capable of compiling much - you have been warned.
+
+
+------------------
+- USEFUL OPTIONS -
+------------------
+
+If you renamed dragonegg.so to something else, for example llvm.so, replace
+-fplugin-arg-dragonegg with -fplugin-arg-llvm in the options below.
+
+-fplugin-arg-dragonegg-emit-ir
+ Output LLVM IR rather than target assembler. You need to use -S with this,
+ since otherwise GCC will pass the output to the system assembler (these don't
+ usually understand LLVM IR). It would be nice to fix this and have the option
+ work with -c too but it's not clear how. If you plan to read the IR then you
+ probably want to use the -fverbose-asm flag as well (see below).
+
+-fverbose-asm
+ Annotate the target assembler with helpful comments. Turns on the generation
+ of helpful names (the same as in GCC tree dumps) in the LLVM IR.
+
+-fstats
+ Output both LLVM and GCC statistics.
+
+-ftime-report
+ Output both LLVM and GCC timing information.
+
+-fno-ident
+ If the ident global asm in the LLVM IR annoys you, use this to turn it off.
+
+-fdump-rtl-all
+ In the dump file, each function is output both as gimple and as LLVM IR.
+
+-fplugin-arg-dragonegg-disable-llvm-optzns
+ Do not perform any LLVM IR optimizations even if compiling at -O1, -O2 etc.
+
+-fplugin-arg-dragonegg-enable-gcc-optzns
+ Run the GCC tree optimizers as well as the LLVM IR optimizers. Normally the
+ GCC optimizers are disabled.
+
+-fplugin-arg-dragonegg-save-gcc-output
+ GCC assembler output is normally redirected to /dev/null so that it doesn't
+ clash with the LLVM output. This option causes GCC output to be written to
+ a file instead. Good for seeing which GCC output we've failed to turn off.
diff --git a/dragonegg/TODO b/dragonegg/TODO
new file mode 100644
index 00000000000..b46b0749e4c
--- /dev/null
+++ b/dragonegg/TODO
@@ -0,0 +1,64 @@
+Build system
+------------
+
+The location of the gcc source and objects should be passed as options to a
+configure script rather than being hardwired into the Makefile.
+
+Determination of the target triple should be moved from the Makefile to a
+configure script.
+
+The plugin revision is created from the subversion revision. What if people
+are using git etc? Maybe it should be calculated in a configure script, but
+since that might not get run often perhaps the Makefile is the best place.
+
+Target subdirectories should have their own Makefiles, instead of assuming
+that there's only one source file and that it's called llvm-target.cpp.
+
+Currently the target directory (eg: i386) is calculated from the target triple
+(eg: x86_64-unknown-linux-gnu) using the "target" tool. This should be done
+from a configure script, rather from the Makefile.
+
+Define LLVM_TARGET_NAME from the Makefile rather than being specified in
+llvm-target.h. Maybe LLVM_TARGET_INTRINSIC_PREFIX could go too. An annoyance
+is that the target tool returns "x86" while what is needed is "X86".
+
+Teach the build system that the plugin needs to be rebuilt if any of the bits of
+LLVM/gcc it depends on changes.
+
+
+Optimizations
+-------------
+
+After outputting global variables, maybe they can be deleted or marked somehow
+(eg: TREE_ASM_WRITTEN) so that GCC does not output them (such output gets sent
+to /dev/null, but it would be more efficient to teach GCC to not produce any in
+the first place). Investigate.
+
+Consider using separate caches for types and globals.
+
+Work out how to stop GCC from outputting debug info for global variables
+when compiling with -g. The output is all thrown away, so harmless, but it
+would be more efficient not to produce any in the first place.
+
+Correctness
+-----------
+
+If an ssa name refers to a global (can this happen?), the SSANames map might
+need to be updated if the target is altered by changeLLVMConstant.
+
+GCC now has per-function optimization levels. Add support for this.
+
+
+Code quality
+------------
+
+Check the effect on code speed of having complex numbers be first class
+structures, i.e. values rather than in-memory aggregates.
+
+
+Features
+--------
+
+Output proper debug info rather than throwing most of it away.
+
+Add support for exception handling.
diff --git a/dragonegg/bits_and_bobs.cpp b/dragonegg/bits_and_bobs.cpp
new file mode 100644
index 00000000000..ed7a06eb8be
--- /dev/null
+++ b/dragonegg/bits_and_bobs.cpp
@@ -0,0 +1,27 @@
+// LLVM headers
+#include "llvm/Constant.h"
+#include "llvm/Value.h"
+
+// System headers
+#include <gmp.h>
+
+// GCC headers
+#undef VISIBILITY_HIDDEN
+
+extern "C" {
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "target.h"
+#include "tree.h"
+}
+
+using namespace llvm;
+
+bool flag_odr = false;
+
+int ix86_regparm;
+
+extern "C" bool contains_aligned_value_p (tree type) {
+abort();
+}
diff --git a/dragonegg/bits_and_bobs.h b/dragonegg/bits_and_bobs.h
new file mode 100644
index 00000000000..066b96f4cf9
--- /dev/null
+++ b/dragonegg/bits_and_bobs.h
@@ -0,0 +1,14 @@
+// Place to keep various things that will need to be sorted out someday.
+#ifndef BITS_AND_BOBS_H
+#define BITS_AND_BOBS_H
+
+union tree_node;
+
+// emit_global_to_llvm - Emit the specified VAR_DECL to LLVM as a global
+// variable.
+// FIXME: Should not be here
+void emit_global_to_llvm(union tree_node*);
+
+extern bool flag_odr;
+
+#endif
diff --git a/dragonegg/darwin/llvm-os.h b/dragonegg/darwin/llvm-os.h
new file mode 100644
index 00000000000..00aabac0cb2
--- /dev/null
+++ b/dragonegg/darwin/llvm-os.h
@@ -0,0 +1,45 @@
+/* Darwin specific definitions
+Copyright (C) 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
+
+#ifndef LLVM_OS_H
+#define LLVM_OS_H
+
+/* Darwin X86-64 only supports PIC code generation. */
+#if defined (TARGET_386)
+#define LLVM_SET_TARGET_OPTIONS(argvec) \
+ if ((TARGET_64BIT) || flag_pic) \
+ argvec.push_back ("--relocation-model=pic"); \
+ else if (!MACHO_DYNAMIC_NO_PIC_P) \
+ argvec.push_back ("--relocation-model=static")
+#elif defined (TARGET_ARM)
+#define LLVM_SET_TARGET_OPTIONS(argvec) \
+ if (flag_pic) \
+ argvec.push_back ("--relocation-model=pic"); \
+ else if (!MACHO_DYNAMIC_NO_PIC_P) \
+ argvec.push_back ("--relocation-model=static"); \
+#else /* !TARGET_386 && !TARGET_ARM */
+#define LLVM_SET_TARGET_OPTIONS(argvec) \
+ if (flag_pic) \
+ argvec.push_back ("--relocation-model=pic"); \
+ else if (!MACHO_DYNAMIC_NO_PIC_P) \
+ argvec.push_back ("--relocation-model=static")
+#endif /* !TARGET_386 && !TARGET_ARM */
+
+#endif /* LLVM_OS_H */
diff --git a/dragonegg/gcc-patches/i386_static.diff b/dragonegg/gcc-patches/i386_static.diff
new file mode 100644
index 00000000000..452ee6444a3
--- /dev/null
+++ b/dragonegg/gcc-patches/i386_static.diff
@@ -0,0 +1,40 @@
+Index: mainline/gcc/config/i386/i386.c
+===================================================================
+--- mainline.orig/gcc/config/i386/i386.c 2009-09-28 10:25:38.639572451 +0200
++++ mainline/gcc/config/i386/i386.c 2009-09-28 10:58:43.498571902 +0200
+@@ -4898,7 +4898,7 @@
+ case, we return the original mode and warn ABI change if CUM isn't
+ NULL. */
+
+-static enum machine_mode
++enum machine_mode
+ type_natural_mode (const_tree type, CUMULATIVE_ARGS *cum)
+ {
+ enum machine_mode mode = TYPE_MODE (type);
+@@ -5029,7 +5029,7 @@
+ See the x86-64 PS ABI for details.
+ */
+
+-static int
++int
+ classify_argument (enum machine_mode mode, const_tree type,
+ enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
+ {
+@@ -5409,7 +5409,7 @@
+
+ /* Examine the argument and return set number of register required in each
+ class. Return 0 iff parameter should be passed in memory. */
+-static int
++int
+ examine_argument (enum machine_mode mode, const_tree type, int in_return,
+ int *int_nregs, int *sse_nregs)
+ {
+@@ -6089,7 +6089,7 @@
+
+ /* Return true when TYPE should be 128bit aligned for 32bit argument passing
+ ABI. */
+-static bool
++bool
+ contains_aligned_value_p (tree type)
+ {
+ enum machine_mode mode = TYPE_MODE (type);
diff --git a/dragonegg/gt-llvm-cache.h b/dragonegg/gt-llvm-cache.h
new file mode 100644
index 00000000000..d12d173b5d3
--- /dev/null
+++ b/dragonegg/gt-llvm-cache.h
@@ -0,0 +1,752 @@
+/* Type information for GCC.
+ Copyright (C) 2004, 2007, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* This file is machine generated. Do not edit. */
+
+/* GC marker procedures. */
+#define gt_ggc_m_13tree_llvm_map(X) do { \
+ if (X != NULL) gt_ggc_mx_tree_llvm_map (X);\
+ } while (0)
+extern void gt_ggc_mx_tree_llvm_map (void *);
+
+void
+gt_ggc_mx_tree_llvm_map (void *x_p)
+{
+ struct tree_llvm_map * const x = (struct tree_llvm_map *)x_p;
+ if (ggc_test_and_set_mark (x))
+ {
+ gt_ggc_m_9tree_node ((*x).base.from);
+ }
+}
+#define gt_ggc_m_15interface_tuple(X) do { \
+ if (X != NULL) gt_ggc_mx_interface_tuple (X);\
+ } while (0)
+extern void gt_ggc_mx_interface_tuple (void *);
+#define gt_ggc_m_16volatilized_type(X) do { \
+ if (X != NULL) gt_ggc_mx_volatilized_type (X);\
+ } while (0)
+extern void gt_ggc_mx_volatilized_type (void *);
+#define gt_ggc_m_17string_descriptor(X) do { \
+ if (X != NULL) gt_ggc_mx_string_descriptor (X);\
+ } while (0)
+extern void gt_ggc_mx_string_descriptor (void *);
+#define gt_ggc_m_15c_inline_static(X) do { \
+ if (X != NULL) gt_ggc_mx_c_inline_static (X);\
+ } while (0)
+extern void gt_ggc_mx_c_inline_static (void *);
+#define gt_ggc_m_24VEC_c_goto_bindings_p_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_c_goto_bindings_p_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_c_goto_bindings_p_gc (void *);
+#define gt_ggc_m_15c_goto_bindings(X) do { \
+ if (X != NULL) gt_ggc_mx_c_goto_bindings (X);\
+ } while (0)
+extern void gt_ggc_mx_c_goto_bindings (void *);
+#define gt_ggc_m_7c_scope(X) do { \
+ if (X != NULL) gt_ggc_mx_c_scope (X);\
+ } while (0)
+extern void gt_ggc_mx_c_scope (void *);
+#define gt_ggc_m_9c_binding(X) do { \
+ if (X != NULL) gt_ggc_mx_c_binding (X);\
+ } while (0)
+extern void gt_ggc_mx_c_binding (void *);
+#define gt_ggc_m_12c_label_vars(X) do { \
+ if (X != NULL) gt_ggc_mx_c_label_vars (X);\
+ } while (0)
+extern void gt_ggc_mx_c_label_vars (void *);
+#define gt_ggc_m_8c_parser(X) do { \
+ if (X != NULL) gt_ggc_mx_c_parser (X);\
+ } while (0)
+extern void gt_ggc_mx_c_parser (void *);
+#define gt_ggc_m_9imp_entry(X) do { \
+ if (X != NULL) gt_ggc_mx_imp_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_imp_entry (void *);
+#define gt_ggc_m_16hashed_attribute(X) do { \
+ if (X != NULL) gt_ggc_mx_hashed_attribute (X);\
+ } while (0)
+extern void gt_ggc_mx_hashed_attribute (void *);
+#define gt_ggc_m_12hashed_entry(X) do { \
+ if (X != NULL) gt_ggc_mx_hashed_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_hashed_entry (void *);
+#define gt_ggc_m_14type_assertion(X) do { \
+ if (X != NULL) gt_ggc_mx_type_assertion (X);\
+ } while (0)
+extern void gt_ggc_mx_type_assertion (void *);
+#define gt_ggc_m_18treetreehash_entry(X) do { \
+ if (X != NULL) gt_ggc_mx_treetreehash_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_treetreehash_entry (void *);
+#define gt_ggc_m_5CPool(X) do { \
+ if (X != NULL) gt_ggc_mx_CPool (X);\
+ } while (0)
+extern void gt_ggc_mx_CPool (void *);
+#define gt_ggc_m_3JCF(X) do { \
+ if (X != NULL) gt_ggc_mx_JCF (X);\
+ } while (0)
+extern void gt_ggc_mx_JCF (void *);
+#define gt_ggc_m_17module_htab_entry(X) do { \
+ if (X != NULL) gt_ggc_mx_module_htab_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_module_htab_entry (void *);
+#define gt_ggc_m_13binding_level(X) do { \
+ if (X != NULL) gt_ggc_mx_binding_level (X);\
+ } while (0)
+extern void gt_ggc_mx_binding_level (void *);
+#define gt_ggc_m_9opt_stack(X) do { \
+ if (X != NULL) gt_ggc_mx_opt_stack (X);\
+ } while (0)
+extern void gt_ggc_mx_opt_stack (void *);
+#define gt_ggc_m_16def_pragma_macro(X) do { \
+ if (X != NULL) gt_ggc_mx_def_pragma_macro (X);\
+ } while (0)
+extern void gt_ggc_mx_def_pragma_macro (void *);
+#define gt_ggc_m_22def_pragma_macro_value(X) do { \
+ if (X != NULL) gt_ggc_mx_def_pragma_macro_value (X);\
+ } while (0)
+extern void gt_ggc_mx_def_pragma_macro_value (void *);
+#define gt_ggc_m_11align_stack(X) do { \
+ if (X != NULL) gt_ggc_mx_align_stack (X);\
+ } while (0)
+extern void gt_ggc_mx_align_stack (void *);
+#define gt_ggc_m_18VEC_tree_gc_vec_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_tree_gc_vec_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_tree_gc_vec_gc (void *);
+#define gt_ggc_m_19VEC_const_char_p_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_const_char_p_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_const_char_p_gc (void *);
+#define gt_ggc_m_21pending_abstract_type(X) do { \
+ if (X != NULL) gt_ggc_mx_pending_abstract_type (X);\
+ } while (0)
+extern void gt_ggc_mx_pending_abstract_type (void *);
+#define gt_ggc_m_9cp_parser(X) do { \
+ if (X != NULL) gt_ggc_mx_cp_parser (X);\
+ } while (0)
+extern void gt_ggc_mx_cp_parser (void *);
+#define gt_ggc_m_17cp_parser_context(X) do { \
+ if (X != NULL) gt_ggc_mx_cp_parser_context (X);\
+ } while (0)
+extern void gt_ggc_mx_cp_parser_context (void *);
+#define gt_ggc_m_8cp_lexer(X) do { \
+ if (X != NULL) gt_ggc_mx_cp_lexer (X);\
+ } while (0)
+extern void gt_ggc_mx_cp_lexer (void *);
+#define gt_ggc_m_10tree_check(X) do { \
+ if (X != NULL) gt_ggc_mx_tree_check (X);\
+ } while (0)
+extern void gt_ggc_mx_tree_check (void *);
+#define gt_ggc_m_22VEC_deferred_access_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_deferred_access_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_deferred_access_gc (void *);
+#define gt_ggc_m_10spec_entry(X) do { \
+ if (X != NULL) gt_ggc_mx_spec_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_spec_entry (void *);
+#define gt_ggc_m_16pending_template(X) do { \
+ if (X != NULL) gt_ggc_mx_pending_template (X);\
+ } while (0)
+extern void gt_ggc_mx_pending_template (void *);
+#define gt_ggc_m_21named_label_use_entry(X) do { \
+ if (X != NULL) gt_ggc_mx_named_label_use_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_named_label_use_entry (void *);
+#define gt_ggc_m_28VEC_deferred_access_check_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_deferred_access_check_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_deferred_access_check_gc (void *);
+#define gt_ggc_m_11tinst_level(X) do { \
+ if (X != NULL) gt_ggc_mx_tinst_level (X);\
+ } while (0)
+extern void gt_ggc_mx_tinst_level (void *);
+#define gt_ggc_m_18sorted_fields_type(X) do { \
+ if (X != NULL) gt_ggc_mx_sorted_fields_type (X);\
+ } while (0)
+extern void gt_ggc_mx_sorted_fields_type (void *);
+#define gt_ggc_m_18VEC_tree_pair_s_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_tree_pair_s_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_tree_pair_s_gc (void *);
+#define gt_ggc_m_17named_label_entry(X) do { \
+ if (X != NULL) gt_ggc_mx_named_label_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_named_label_entry (void *);
+#define gt_ggc_m_14cp_token_cache(X) do { \
+ if (X != NULL) gt_ggc_mx_cp_token_cache (X);\
+ } while (0)
+extern void gt_ggc_mx_cp_token_cache (void *);
+#define gt_ggc_m_11saved_scope(X) do { \
+ if (X != NULL) gt_ggc_mx_saved_scope (X);\
+ } while (0)
+extern void gt_ggc_mx_saved_scope (void *);
+#define gt_ggc_m_16cxx_int_tree_map(X) do { \
+ if (X != NULL) gt_ggc_mx_cxx_int_tree_map (X);\
+ } while (0)
+extern void gt_ggc_mx_cxx_int_tree_map (void *);
+#define gt_ggc_m_23VEC_cp_class_binding_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_cp_class_binding_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_cp_class_binding_gc (void *);
+#define gt_ggc_m_24VEC_cxx_saved_binding_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_cxx_saved_binding_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_cxx_saved_binding_gc (void *);
+#define gt_ggc_m_16cp_binding_level(X) do { \
+ if (X != NULL) gt_ggc_mx_cp_binding_level (X);\
+ } while (0)
+extern void gt_ggc_mx_cp_binding_level (void *);
+#define gt_ggc_m_11cxx_binding(X) do { \
+ if (X != NULL) gt_ggc_mx_cxx_binding (X);\
+ } while (0)
+extern void gt_ggc_mx_cxx_binding (void *);
+#define gt_ggc_m_15binding_entry_s(X) do { \
+ if (X != NULL) gt_ggc_mx_binding_entry_s (X);\
+ } while (0)
+extern void gt_ggc_mx_binding_entry_s (void *);
+#define gt_ggc_m_15binding_table_s(X) do { \
+ if (X != NULL) gt_ggc_mx_binding_table_s (X);\
+ } while (0)
+extern void gt_ggc_mx_binding_table_s (void *);
+#define gt_ggc_m_14VEC_tinfo_s_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_tinfo_s_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_tinfo_s_gc (void *);
+#define gt_ggc_m_18gnat_binding_level(X) do { \
+ if (X != NULL) gt_ggc_mx_gnat_binding_level (X);\
+ } while (0)
+extern void gt_ggc_mx_gnat_binding_level (void *);
+#define gt_ggc_m_9elab_info(X) do { \
+ if (X != NULL) gt_ggc_mx_elab_info (X);\
+ } while (0)
+extern void gt_ggc_mx_elab_info (void *);
+#define gt_ggc_m_10stmt_group(X) do { \
+ if (X != NULL) gt_ggc_mx_stmt_group (X);\
+ } while (0)
+extern void gt_ggc_mx_stmt_group (void *);
+#define gt_ggc_m_16VEC_parm_attr_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_parm_attr_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_parm_attr_gc (void *);
+#define gt_ggc_m_11parm_attr_d(X) do { \
+ if (X != NULL) gt_ggc_mx_parm_attr_d (X);\
+ } while (0)
+extern void gt_ggc_mx_parm_attr_d (void *);
+#define gt_ggc_m_20ssa_operand_memory_d(X) do { \
+ if (X != NULL) gt_ggc_mx_ssa_operand_memory_d (X);\
+ } while (0)
+extern void gt_ggc_mx_ssa_operand_memory_d (void *);
+#define gt_ggc_m_13scev_info_str(X) do { \
+ if (X != NULL) gt_ggc_mx_scev_info_str (X);\
+ } while (0)
+extern void gt_ggc_mx_scev_info_str (void *);
+#define gt_ggc_m_13VEC_gimple_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_gimple_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_gimple_gc (void *);
+#define gt_ggc_m_9type_hash(X) do { \
+ if (X != NULL) gt_ggc_mx_type_hash (X);\
+ } while (0)
+extern void gt_ggc_mx_type_hash (void *);
+#define gt_ggc_m_16string_pool_data(X) do { \
+ if (X != NULL) gt_ggc_mx_string_pool_data (X);\
+ } while (0)
+extern void gt_ggc_mx_string_pool_data (void *);
+#define gt_ggc_m_13libfunc_entry(X) do { \
+ if (X != NULL) gt_ggc_mx_libfunc_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_libfunc_entry (void *);
+#define gt_ggc_m_23temp_slot_address_entry(X) do { \
+ if (X != NULL) gt_ggc_mx_temp_slot_address_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_temp_slot_address_entry (void *);
+#define gt_ggc_m_15throw_stmt_node(X) do { \
+ if (X != NULL) gt_ggc_mx_throw_stmt_node (X);\
+ } while (0)
+extern void gt_ggc_mx_throw_stmt_node (void *);
+#define gt_ggc_m_21VEC_eh_landing_pad_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_eh_landing_pad_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_eh_landing_pad_gc (void *);
+#define gt_ggc_m_16VEC_eh_region_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_eh_region_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_eh_region_gc (void *);
+#define gt_ggc_m_10eh_catch_d(X) do { \
+ if (X != NULL) gt_ggc_mx_eh_catch_d (X);\
+ } while (0)
+extern void gt_ggc_mx_eh_catch_d (void *);
+#define gt_ggc_m_16eh_landing_pad_d(X) do { \
+ if (X != NULL) gt_ggc_mx_eh_landing_pad_d (X);\
+ } while (0)
+extern void gt_ggc_mx_eh_landing_pad_d (void *);
+#define gt_ggc_m_11eh_region_d(X) do { \
+ if (X != NULL) gt_ggc_mx_eh_region_d (X);\
+ } while (0)
+extern void gt_ggc_mx_eh_region_d (void *);
+#define gt_ggc_m_16var_loc_list_def(X) do { \
+ if (X != NULL) gt_ggc_mx_var_loc_list_def (X);\
+ } while (0)
+extern void gt_ggc_mx_var_loc_list_def (void *);
+#define gt_ggc_m_12var_loc_node(X) do { \
+ if (X != NULL) gt_ggc_mx_var_loc_node (X);\
+ } while (0)
+extern void gt_ggc_mx_var_loc_node (void *);
+#define gt_ggc_m_20VEC_die_arg_entry_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_die_arg_entry_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_die_arg_entry_gc (void *);
+#define gt_ggc_m_16limbo_die_struct(X) do { \
+ if (X != NULL) gt_ggc_mx_limbo_die_struct (X);\
+ } while (0)
+extern void gt_ggc_mx_limbo_die_struct (void *);
+#define gt_ggc_m_20VEC_pubname_entry_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_pubname_entry_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_pubname_entry_gc (void *);
+#define gt_ggc_m_19VEC_dw_attr_node_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_dw_attr_node_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_dw_attr_node_gc (void *);
+#define gt_ggc_m_25dw_ranges_by_label_struct(X) do { \
+ if (X != NULL) gt_ggc_mx_dw_ranges_by_label_struct (X);\
+ } while (0)
+extern void gt_ggc_mx_dw_ranges_by_label_struct (void *);
+#define gt_ggc_m_16dw_ranges_struct(X) do { \
+ if (X != NULL) gt_ggc_mx_dw_ranges_struct (X);\
+ } while (0)
+extern void gt_ggc_mx_dw_ranges_struct (void *);
+#define gt_ggc_m_28dw_separate_line_info_struct(X) do { \
+ if (X != NULL) gt_ggc_mx_dw_separate_line_info_struct (X);\
+ } while (0)
+extern void gt_ggc_mx_dw_separate_line_info_struct (void *);
+#define gt_ggc_m_19dw_line_info_struct(X) do { \
+ if (X != NULL) gt_ggc_mx_dw_line_info_struct (X);\
+ } while (0)
+extern void gt_ggc_mx_dw_line_info_struct (void *);
+#define gt_ggc_m_25VEC_deferred_locations_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_deferred_locations_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_deferred_locations_gc (void *);
+#define gt_ggc_m_18dw_loc_list_struct(X) do { \
+ if (X != NULL) gt_ggc_mx_dw_loc_list_struct (X);\
+ } while (0)
+extern void gt_ggc_mx_dw_loc_list_struct (void *);
+#define gt_ggc_m_15dwarf_file_data(X) do { \
+ if (X != NULL) gt_ggc_mx_dwarf_file_data (X);\
+ } while (0)
+extern void gt_ggc_mx_dwarf_file_data (void *);
+#define gt_ggc_m_15queued_reg_save(X) do { \
+ if (X != NULL) gt_ggc_mx_queued_reg_save (X);\
+ } while (0)
+extern void gt_ggc_mx_queued_reg_save (void *);
+#define gt_ggc_m_20indirect_string_node(X) do { \
+ if (X != NULL) gt_ggc_mx_indirect_string_node (X);\
+ } while (0)
+extern void gt_ggc_mx_indirect_string_node (void *);
+#define gt_ggc_m_19dw_loc_descr_struct(X) do { \
+ if (X != NULL) gt_ggc_mx_dw_loc_descr_struct (X);\
+ } while (0)
+extern void gt_ggc_mx_dw_loc_descr_struct (void *);
+#define gt_ggc_m_13dw_fde_struct(X) do { \
+ if (X != NULL) gt_ggc_mx_dw_fde_struct (X);\
+ } while (0)
+extern void gt_ggc_mx_dw_fde_struct (void *);
+#define gt_ggc_m_13dw_cfi_struct(X) do { \
+ if (X != NULL) gt_ggc_mx_dw_cfi_struct (X);\
+ } while (0)
+extern void gt_ggc_mx_dw_cfi_struct (void *);
+#define gt_ggc_m_8typeinfo(X) do { \
+ if (X != NULL) gt_ggc_mx_typeinfo (X);\
+ } while (0)
+extern void gt_ggc_mx_typeinfo (void *);
+#define gt_ggc_m_22VEC_alias_set_entry_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_alias_set_entry_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_alias_set_entry_gc (void *);
+#define gt_ggc_m_17alias_set_entry_d(X) do { \
+ if (X != NULL) gt_ggc_mx_alias_set_entry_d (X);\
+ } while (0)
+extern void gt_ggc_mx_alias_set_entry_d (void *);
+#define gt_ggc_m_24constant_descriptor_tree(X) do { \
+ if (X != NULL) gt_ggc_mx_constant_descriptor_tree (X);\
+ } while (0)
+extern void gt_ggc_mx_constant_descriptor_tree (void *);
+#define gt_ggc_m_15cgraph_asm_node(X) do { \
+ if (X != NULL) gt_ggc_mx_cgraph_asm_node (X);\
+ } while (0)
+extern void gt_ggc_mx_cgraph_asm_node (void *);
+#define gt_ggc_m_12varpool_node(X) do { \
+ if (X != NULL) gt_ggc_mx_varpool_node (X);\
+ } while (0)
+extern void gt_ggc_mx_varpool_node (void *);
+#define gt_ggc_m_11cgraph_edge(X) do { \
+ if (X != NULL) gt_ggc_mx_cgraph_edge (X);\
+ } while (0)
+extern void gt_ggc_mx_cgraph_edge (void *);
+#define gt_ggc_m_24VEC_ipa_replace_map_p_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_ipa_replace_map_p_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_ipa_replace_map_p_gc (void *);
+#define gt_ggc_m_15ipa_replace_map(X) do { \
+ if (X != NULL) gt_ggc_mx_ipa_replace_map (X);\
+ } while (0)
+extern void gt_ggc_mx_ipa_replace_map (void *);
+#define gt_ggc_m_11cgraph_node(X) do { \
+ if (X != NULL) gt_ggc_mx_cgraph_node (X);\
+ } while (0)
+extern void gt_ggc_mx_cgraph_node (void *);
+#define gt_ggc_m_18VEC_basic_block_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_basic_block_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_basic_block_gc (void *);
+#define gt_ggc_m_14gimple_bb_info(X) do { \
+ if (X != NULL) gt_ggc_mx_gimple_bb_info (X);\
+ } while (0)
+extern void gt_ggc_mx_gimple_bb_info (void *);
+#define gt_ggc_m_11rtl_bb_info(X) do { \
+ if (X != NULL) gt_ggc_mx_rtl_bb_info (X);\
+ } while (0)
+extern void gt_ggc_mx_rtl_bb_info (void *);
+#define gt_ggc_m_11VEC_edge_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_edge_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_edge_gc (void *);
+#define gt_ggc_m_17cselib_val_struct(X) do { \
+ if (X != NULL) gt_ggc_mx_cselib_val_struct (X);\
+ } while (0)
+extern void gt_ggc_mx_cselib_val_struct (void *);
+#define gt_ggc_m_12elt_loc_list(X) do { \
+ if (X != NULL) gt_ggc_mx_elt_loc_list (X);\
+ } while (0)
+extern void gt_ggc_mx_elt_loc_list (void *);
+#define gt_ggc_m_13VEC_loop_p_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_loop_p_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_loop_p_gc (void *);
+#define gt_ggc_m_4loop(X) do { \
+ if (X != NULL) gt_ggc_mx_loop (X);\
+ } while (0)
+extern void gt_ggc_mx_loop (void *);
+#define gt_ggc_m_9loop_exit(X) do { \
+ if (X != NULL) gt_ggc_mx_loop_exit (X);\
+ } while (0)
+extern void gt_ggc_mx_loop_exit (void *);
+#define gt_ggc_m_13nb_iter_bound(X) do { \
+ if (X != NULL) gt_ggc_mx_nb_iter_bound (X);\
+ } while (0)
+extern void gt_ggc_mx_nb_iter_bound (void *);
+#define gt_ggc_m_24types_used_by_vars_entry(X) do { \
+ if (X != NULL) gt_ggc_mx_types_used_by_vars_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_types_used_by_vars_entry (void *);
+#define gt_ggc_m_17language_function(X) do { \
+ if (X != NULL) gt_ggc_mx_language_function (X);\
+ } while (0)
+extern void gt_ggc_mx_language_function (void *);
+#define gt_ggc_m_5loops(X) do { \
+ if (X != NULL) gt_ggc_mx_loops (X);\
+ } while (0)
+extern void gt_ggc_mx_loops (void *);
+#define gt_ggc_m_18control_flow_graph(X) do { \
+ if (X != NULL) gt_ggc_mx_control_flow_graph (X);\
+ } while (0)
+extern void gt_ggc_mx_control_flow_graph (void *);
+#define gt_ggc_m_9eh_status(X) do { \
+ if (X != NULL) gt_ggc_mx_eh_status (X);\
+ } while (0)
+extern void gt_ggc_mx_eh_status (void *);
+#define gt_ggc_m_20initial_value_struct(X) do { \
+ if (X != NULL) gt_ggc_mx_initial_value_struct (X);\
+ } while (0)
+extern void gt_ggc_mx_initial_value_struct (void *);
+#define gt_ggc_m_17rtx_constant_pool(X) do { \
+ if (X != NULL) gt_ggc_mx_rtx_constant_pool (X);\
+ } while (0)
+extern void gt_ggc_mx_rtx_constant_pool (void *);
+#define gt_ggc_m_18VEC_temp_slot_p_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_temp_slot_p_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_temp_slot_p_gc (void *);
+#define gt_ggc_m_9temp_slot(X) do { \
+ if (X != NULL) gt_ggc_mx_temp_slot (X);\
+ } while (0)
+extern void gt_ggc_mx_temp_slot (void *);
+#define gt_ggc_m_9gimple_df(X) do { \
+ if (X != NULL) gt_ggc_mx_gimple_df (X);\
+ } while (0)
+extern void gt_ggc_mx_gimple_df (void *);
+#define gt_ggc_m_23VEC_call_site_record_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_call_site_record_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_call_site_record_gc (void *);
+#define gt_ggc_m_18call_site_record_d(X) do { \
+ if (X != NULL) gt_ggc_mx_call_site_record_d (X);\
+ } while (0)
+extern void gt_ggc_mx_call_site_record_d (void *);
+#define gt_ggc_m_14sequence_stack(X) do { \
+ if (X != NULL) gt_ggc_mx_sequence_stack (X);\
+ } while (0)
+extern void gt_ggc_mx_sequence_stack (void *);
+#define gt_ggc_m_8elt_list(X) do { \
+ if (X != NULL) gt_ggc_mx_elt_list (X);\
+ } while (0)
+extern void gt_ggc_mx_elt_list (void *);
+#define gt_ggc_m_17tree_priority_map(X) do { \
+ if (X != NULL) gt_ggc_mx_tree_priority_map (X);\
+ } while (0)
+extern void gt_ggc_mx_tree_priority_map (void *);
+#define gt_ggc_m_12tree_int_map(X) do { \
+ if (X != NULL) gt_ggc_mx_tree_int_map (X);\
+ } while (0)
+extern void gt_ggc_mx_tree_int_map (void *);
+#define gt_ggc_m_8tree_map(X) do { \
+ if (X != NULL) gt_ggc_mx_tree_map (X);\
+ } while (0)
+extern void gt_ggc_mx_tree_map (void *);
+#define gt_ggc_m_14lang_tree_node(X) do { \
+ if (X != NULL) gt_ggc_mx_lang_tree_node (X);\
+ } while (0)
+extern void gt_ggc_mx_lang_tree_node (void *);
+#define gt_ggc_m_24tree_statement_list_node(X) do { \
+ if (X != NULL) gt_ggc_mx_tree_statement_list_node (X);\
+ } while (0)
+extern void gt_ggc_mx_tree_statement_list_node (void *);
+#define gt_ggc_m_9lang_decl(X) do { \
+ if (X != NULL) gt_ggc_mx_lang_decl (X);\
+ } while (0)
+extern void gt_ggc_mx_lang_decl (void *);
+#define gt_ggc_m_9lang_type(X) do { \
+ if (X != NULL) gt_ggc_mx_lang_type (X);\
+ } while (0)
+extern void gt_ggc_mx_lang_type (void *);
+#define gt_ggc_m_10die_struct(X) do { \
+ if (X != NULL) gt_ggc_mx_die_struct (X);\
+ } while (0)
+extern void gt_ggc_mx_die_struct (void *);
+#define gt_ggc_m_15varray_head_tag(X) do { \
+ if (X != NULL) gt_ggc_mx_varray_head_tag (X);\
+ } while (0)
+extern void gt_ggc_mx_varray_head_tag (void *);
+#define gt_ggc_m_12ptr_info_def(X) do { \
+ if (X != NULL) gt_ggc_mx_ptr_info_def (X);\
+ } while (0)
+extern void gt_ggc_mx_ptr_info_def (void *);
+#define gt_ggc_m_22VEC_constructor_elt_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_constructor_elt_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_constructor_elt_gc (void *);
+#define gt_ggc_m_10tree_ann_d(X) do { \
+ if (X != NULL) gt_ggc_mx_tree_ann_d (X);\
+ } while (0)
+extern void gt_ggc_mx_tree_ann_d (void *);
+#define gt_ggc_m_17VEC_alias_pair_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_alias_pair_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_alias_pair_gc (void *);
+#define gt_ggc_m_11VEC_tree_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_tree_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_tree_gc (void *);
+#define gt_ggc_m_12VEC_uchar_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_uchar_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_uchar_gc (void *);
+#define gt_ggc_m_8function(X) do { \
+ if (X != NULL) gt_ggc_mx_function (X);\
+ } while (0)
+extern void gt_ggc_mx_function (void *);
+#define gt_ggc_m_23constant_descriptor_rtx(X) do { \
+ if (X != NULL) gt_ggc_mx_constant_descriptor_rtx (X);\
+ } while (0)
+extern void gt_ggc_mx_constant_descriptor_rtx (void *);
+#define gt_ggc_m_11fixed_value(X) do { \
+ if (X != NULL) gt_ggc_mx_fixed_value (X);\
+ } while (0)
+extern void gt_ggc_mx_fixed_value (void *);
+#define gt_ggc_m_10real_value(X) do { \
+ if (X != NULL) gt_ggc_mx_real_value (X);\
+ } while (0)
+extern void gt_ggc_mx_real_value (void *);
+#define gt_ggc_m_10VEC_rtx_gc(X) do { \
+ if (X != NULL) gt_ggc_mx_VEC_rtx_gc (X);\
+ } while (0)
+extern void gt_ggc_mx_VEC_rtx_gc (void *);
+#define gt_ggc_m_12object_block(X) do { \
+ if (X != NULL) gt_ggc_mx_object_block (X);\
+ } while (0)
+extern void gt_ggc_mx_object_block (void *);
+#define gt_ggc_m_9reg_attrs(X) do { \
+ if (X != NULL) gt_ggc_mx_reg_attrs (X);\
+ } while (0)
+extern void gt_ggc_mx_reg_attrs (void *);
+#define gt_ggc_m_9mem_attrs(X) do { \
+ if (X != NULL) gt_ggc_mx_mem_attrs (X);\
+ } while (0)
+extern void gt_ggc_mx_mem_attrs (void *);
+#define gt_ggc_m_14bitmap_obstack(X) do { \
+ if (X != NULL) gt_ggc_mx_bitmap_obstack (X);\
+ } while (0)
+extern void gt_ggc_mx_bitmap_obstack (void *);
+#define gt_ggc_m_18bitmap_element_def(X) do { \
+ if (X != NULL) gt_ggc_mx_bitmap_element_def (X);\
+ } while (0)
+extern void gt_ggc_mx_bitmap_element_def (void *);
+#define gt_ggc_m_16machine_function(X) do { \
+ if (X != NULL) gt_ggc_mx_machine_function (X);\
+ } while (0)
+extern void gt_ggc_mx_machine_function (void *);
+#define gt_ggc_m_17stack_local_entry(X) do { \
+ if (X != NULL) gt_ggc_mx_stack_local_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_stack_local_entry (void *);
+#define gt_ggc_m_15basic_block_def(X) do { \
+ if (X != NULL) gt_ggc_mx_basic_block_def (X);\
+ } while (0)
+extern void gt_ggc_mx_basic_block_def (void *);
+#define gt_ggc_m_8edge_def(X) do { \
+ if (X != NULL) gt_ggc_mx_edge_def (X);\
+ } while (0)
+extern void gt_ggc_mx_edge_def (void *);
+#define gt_ggc_m_17gimple_seq_node_d(X) do { \
+ if (X != NULL) gt_ggc_mx_gimple_seq_node_d (X);\
+ } while (0)
+extern void gt_ggc_mx_gimple_seq_node_d (void *);
+#define gt_ggc_m_12gimple_seq_d(X) do { \
+ if (X != NULL) gt_ggc_mx_gimple_seq_d (X);\
+ } while (0)
+extern void gt_ggc_mx_gimple_seq_d (void *);
+#define gt_ggc_m_7section(X) do { \
+ if (X != NULL) gt_ggc_mx_section (X);\
+ } while (0)
+extern void gt_ggc_mx_section (void *);
+#define gt_ggc_m_18gimple_statement_d(X) do { \
+ if (X != NULL) gt_ggc_mx_gimple_statement_d (X);\
+ } while (0)
+extern void gt_ggc_mx_gimple_statement_d (void *);
+#define gt_ggc_m_9rtvec_def(X) do { \
+ if (X != NULL) gt_ggc_mx_rtvec_def (X);\
+ } while (0)
+extern void gt_ggc_mx_rtvec_def (void *);
+#define gt_ggc_m_7rtx_def(X) do { \
+ if (X != NULL) gt_ggc_mx_rtx_def (X);\
+ } while (0)
+extern void gt_ggc_mx_rtx_def (void *);
+#define gt_ggc_m_15bitmap_head_def(X) do { \
+ if (X != NULL) gt_ggc_mx_bitmap_head_def (X);\
+ } while (0)
+extern void gt_ggc_mx_bitmap_head_def (void *);
+#define gt_ggc_m_9tree_node(X) do { \
+ if (X != NULL) gt_ggc_mx_tree_node (X);\
+ } while (0)
+#define gt_ggc_mx_tree_node gt_ggc_mx_lang_tree_node
+#define gt_ggc_m_6answer(X) do { \
+ if (X != NULL) gt_ggc_mx_answer (X);\
+ } while (0)
+extern void gt_ggc_mx_answer (void *);
+#define gt_ggc_m_9cpp_macro(X) do { \
+ if (X != NULL) gt_ggc_mx_cpp_macro (X);\
+ } while (0)
+extern void gt_ggc_mx_cpp_macro (void *);
+#define gt_ggc_m_9cpp_token(X) do { \
+ if (X != NULL) gt_ggc_mx_cpp_token (X);\
+ } while (0)
+extern void gt_ggc_mx_cpp_token (void *);
+#define gt_ggc_m_9line_maps(X) do { \
+ if (X != NULL) gt_ggc_mx_line_maps (X);\
+ } while (0)
+extern void gt_ggc_mx_line_maps (void *);
+extern void gt_ggc_m_II17splay_tree_node_s (void *);
+extern void gt_ggc_m_SP9tree_node17splay_tree_node_s (void *);
+extern void gt_ggc_m_P9tree_nodeP9tree_node17splay_tree_node_s (void *);
+extern void gt_ggc_m_IP9tree_node17splay_tree_node_s (void *);
+extern void gt_ggc_m_P13tree_llvm_map4htab (void *);
+
+void
+gt_ggc_m_P13tree_llvm_map4htab (void *x_p)
+{
+ struct htab * const x = (struct htab *)x_p;
+ if (ggc_test_and_set_mark (x))
+ {
+ if ((*x).entries != NULL) {
+ size_t i0;
+ for (i0 = 0; i0 != (size_t)(((*x)).size); i0++) {
+ gt_ggc_m_13tree_llvm_map ((*x).entries[i0]);
+ }
+ ggc_mark ((*x).entries);
+ }
+ }
+}
+extern void gt_ggc_m_P15interface_tuple4htab (void *);
+extern void gt_ggc_m_P16volatilized_type4htab (void *);
+extern void gt_ggc_m_P17string_descriptor4htab (void *);
+extern void gt_ggc_m_P14type_assertion4htab (void *);
+extern void gt_ggc_m_P18treetreehash_entry4htab (void *);
+extern void gt_ggc_m_P17module_htab_entry4htab (void *);
+extern void gt_ggc_m_P16def_pragma_macro4htab (void *);
+extern void gt_ggc_m_P21pending_abstract_type4htab (void *);
+extern void gt_ggc_m_P10spec_entry4htab (void *);
+extern void gt_ggc_m_P16cxx_int_tree_map4htab (void *);
+extern void gt_ggc_m_P17named_label_entry4htab (void *);
+extern void gt_ggc_m_P12tree_int_map4htab (void *);
+extern void gt_ggc_m_IP9tree_node12splay_tree_s (void *);
+extern void gt_ggc_m_P9tree_nodeP9tree_node12splay_tree_s (void *);
+extern void gt_ggc_m_P12varpool_node4htab (void *);
+extern void gt_ggc_m_P13scev_info_str4htab (void *);
+extern void gt_ggc_m_P23constant_descriptor_rtx4htab (void *);
+extern void gt_ggc_m_P24constant_descriptor_tree4htab (void *);
+extern void gt_ggc_m_P12object_block4htab (void *);
+extern void gt_ggc_m_P7section4htab (void *);
+extern void gt_ggc_m_P17tree_priority_map4htab (void *);
+extern void gt_ggc_m_P8tree_map4htab (void *);
+extern void gt_ggc_m_P9type_hash4htab (void *);
+extern void gt_ggc_m_P13libfunc_entry4htab (void *);
+extern void gt_ggc_m_P23temp_slot_address_entry4htab (void *);
+extern void gt_ggc_m_P15throw_stmt_node4htab (void *);
+extern void gt_ggc_m_P9reg_attrs4htab (void *);
+extern void gt_ggc_m_P9mem_attrs4htab (void *);
+extern void gt_ggc_m_P7rtx_def4htab (void *);
+extern void gt_ggc_m_SP9tree_node12splay_tree_s (void *);
+extern void gt_ggc_m_P16var_loc_list_def4htab (void *);
+extern void gt_ggc_m_P10die_struct4htab (void *);
+extern void gt_ggc_m_P15dwarf_file_data4htab (void *);
+extern void gt_ggc_m_P20indirect_string_node4htab (void *);
+extern void gt_ggc_m_P11cgraph_node4htab (void *);
+extern void gt_ggc_m_II12splay_tree_s (void *);
+extern void gt_ggc_m_P11cgraph_edge4htab (void *);
+extern void gt_ggc_m_P9loop_exit4htab (void *);
+extern void gt_ggc_m_P24types_used_by_vars_entry4htab (void *);
+extern void gt_ggc_m_P9tree_node4htab (void *);
+
+/* GC roots. */
+
+EXPORTED_CONST struct ggc_cache_tab gt_ggc_rc__gt_llvm_cache_h[] = {
+ {
+ &llvm_cache,
+ 1,
+ sizeof (llvm_cache),
+ &gt_ggc_mx_tree_llvm_map,
+ NULL,
+ &tree_llvm_map_marked_p
+ },
+ LAST_GGC_CACHE_TAB
+};
+
diff --git a/dragonegg/linux/llvm-os.h b/dragonegg/linux/llvm-os.h
new file mode 100644
index 00000000000..4f521d9d8a6
--- /dev/null
+++ b/dragonegg/linux/llvm-os.h
@@ -0,0 +1,31 @@
+/* Linux specific definitions
+Copyright (C) 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
+
+#ifndef LLVM_OS_H
+#define LLVM_OS_H
+
+/* Yes, we support PIC codegen for linux targets! */
+#define LLVM_SET_TARGET_OPTIONS(argvec) \
+ if (flag_pic) \
+ argvec.push_back ("--relocation-model=pic"); \
+ else \
+ argvec.push_back ("--relocation-model=static");
+
+#endif /* LLVM_OS_H */
diff --git a/dragonegg/llvm-abi.h b/dragonegg/llvm-abi.h
new file mode 100644
index 00000000000..90adb10d320
--- /dev/null
+++ b/dragonegg/llvm-abi.h
@@ -0,0 +1,1148 @@
+/* Processor ABI customization hooks
+Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
+Contributed by Chris Lattner (sabre@nondot.org)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
+
+//===----------------------------------------------------------------------===//
+// This is a C++ header file that specifies how argument values are passed and
+// returned from function calls. This allows the target to specialize handling
+// of things like how structures are passed by-value.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ABI_H
+#define LLVM_ABI_H
+
+// LLVM headers
+#include "llvm/Attributes.h"
+#include "llvm/CallingConv.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Target/TargetData.h"
+
+// System headers
+#include <gmp.h>
+
+// GCC headers
+#undef VISIBILITY_HIDDEN
+
+extern "C" {
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "tree.h"
+}
+
+// Plugin headers
+#include "llvm-internal.h"
+#include "llvm-target.h"
+
+namespace llvm {
+ class BasicBlock;
+}
+
+/// DefaultABIClient - This is a simple implementation of the ABI client
+/// interface that can be subclassed.
+struct DefaultABIClient {
+ bool isShadowReturn() { return false; }
+
+ /// HandleScalarResult - This callback is invoked if the function returns a
+ /// simple scalar result value, which is of type RetTy.
+ void HandleScalarResult(const Type *RetTy) {}
+
+ /// HandleAggregateResultAsScalar - This callback is invoked if the function
+ /// returns an aggregate value by bit converting it to the specified scalar
+ /// type and returning that. The bit conversion should start at byte Offset
+ /// within the struct, and ScalarTy is not necessarily big enough to cover
+ /// the entire struct.
+ void HandleAggregateResultAsScalar(const Type *ScalarTy, unsigned Offset=0) {}
+
+ /// HandleAggregateResultAsAggregate - This callback is invoked if the function
+ /// returns an aggregate value using multiple return values.
+ void HandleAggregateResultAsAggregate(const Type *AggrTy) {}
+
+ /// HandleAggregateShadowResult - This callback is invoked if the function
+ /// returns an aggregate value by using a "shadow" first parameter, which is
+ /// a pointer to the aggregate, of type PtrArgTy. If RetPtr is set to true,
+ /// the pointer argument itself is returned from the function.
+ void HandleAggregateShadowResult(const PointerType *PtrArgTy, bool RetPtr){}
+
+ /// HandleScalarShadowResult - This callback is invoked if the function
+ /// returns a scalar value by using a "shadow" first parameter, which is a
+ /// pointer to the scalar, of type PtrArgTy. If RetPtr is set to true,
+ /// the pointer argument itself is returned from the function.
+ void HandleScalarShadowResult(const PointerType *PtrArgTy, bool RetPtr) {}
+
+
+ /// HandleScalarArgument - This is the primary callback that specifies an
+ /// LLVM argument to pass. It is only used for first class types.
+ /// If RealSize is non Zero then it specifies number of bytes to access
+ /// from LLVMTy.
+ void HandleScalarArgument(const llvm::Type *LLVMTy, tree type,
+ unsigned RealSize = 0) {}
+
+ /// HandleByInvisibleReferenceArgument - This callback is invoked if a pointer
+ /// (of type PtrTy) to the argument is passed rather than the argument itself.
+ void HandleByInvisibleReferenceArgument(const llvm::Type *PtrTy, tree type) {}
+
+ /// HandleByValArgument - This callback is invoked if the aggregate function
+ /// argument is passed by value.
+ void HandleByValArgument(const llvm::Type *LLVMTy, tree type) {}
+
+ /// HandleFCAArgument - This callback is invoked if the aggregate function
+ /// argument is passed by value as a first class aggregate.
+ void HandleFCAArgument(const llvm::Type *LLVMTy, tree type) {}
+
+ /// EnterField - Called when we're about the enter the field of a struct
+ /// or union. FieldNo is the number of the element we are entering in the
+ /// LLVM Struct, StructTy is the LLVM type of the struct we are entering.
+ void EnterField(unsigned FieldNo, const llvm::Type *StructTy) {}
+ void ExitField() {}
+};
+
+// LLVM_SHOULD_NOT_RETURN_COMPLEX_IN_MEMORY - A hook to allow
+// special _Complex handling. Return true if X should be returned using
+// multiple value return instruction.
+#ifndef LLVM_SHOULD_NOT_RETURN_COMPLEX_IN_MEMORY
+#define LLVM_SHOULD_NOT_RETURN_COMPLEX_IN_MEMORY(X) \
+ false
+#endif
+
+// doNotUseShadowReturn - Return true if the specified GCC type
+// should not be returned using a pointer to struct parameter.
+static inline bool doNotUseShadowReturn(tree type, tree fndecl) {
+ if (!TYPE_SIZE(type))
+ return false;
+ if (TREE_CODE(TYPE_SIZE(type)) != INTEGER_CST)
+ return false;
+ // LLVM says do not use shadow argument.
+ if (LLVM_SHOULD_NOT_RETURN_COMPLEX_IN_MEMORY(type))
+ return true;
+ // GCC says use shadow argument.
+ if (aggregate_value_p(type, fndecl))
+ return false;
+ return true;
+}
+
+/// isSingleElementStructOrArray - If this is (recursively) a structure with one
+/// field or an array with one element, return the field type, otherwise return
+/// null. If ignoreZeroLength, the struct (recursively) may include zero-length
+/// fields in addition to the single element that has data. If
+/// rejectFatBitField, and the single element is a bitfield of a type that's
+/// bigger than the struct, return null anyway.
+static inline
+tree isSingleElementStructOrArray(tree type, bool ignoreZeroLength,
+ bool rejectFatBitfield) {
+ // Scalars are good.
+ if (!AGGREGATE_TYPE_P(type)) return type;
+
+ tree FoundField = 0;
+ switch (TREE_CODE(type)) {
+ case QUAL_UNION_TYPE:
+ case UNION_TYPE: // Single element unions don't count.
+ case COMPLEX_TYPE: // Complex values are like 2-element records.
+ default:
+ return 0;
+ case RECORD_TYPE:
+ // If this record has variable length, reject it.
+ if (TREE_CODE(TYPE_SIZE(type)) != INTEGER_CST)
+ return 0;
+
+ for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field))
+ if (TREE_CODE(Field) == FIELD_DECL) {
+ if (ignoreZeroLength) {
+ if (DECL_SIZE(Field) &&
+ TREE_CODE(DECL_SIZE(Field)) == INTEGER_CST &&
+ TREE_INT_CST_LOW(DECL_SIZE(Field)) == 0)
+ continue;
+ }
+ if (!FoundField) {
+ if (rejectFatBitfield &&
+ TREE_CODE(TYPE_SIZE(type)) == INTEGER_CST &&
+ TREE_INT_CST_LOW(TYPE_SIZE(getDeclaredType(Field))) >
+ TREE_INT_CST_LOW(TYPE_SIZE(type)))
+ return 0;
+ FoundField = getDeclaredType(Field);
+ } else {
+ return 0; // More than one field.
+ }
+ }
+ return FoundField ? isSingleElementStructOrArray(FoundField,
+ ignoreZeroLength, false)
+ : 0;
+ case ARRAY_TYPE:
+ const ArrayType *Ty = dyn_cast<ArrayType>(ConvertType(type));
+ if (!Ty || Ty->getNumElements() != 1)
+ return 0;
+ return isSingleElementStructOrArray(TREE_TYPE(type), false, false);
+ }
+}
+
+/// isZeroSizedStructOrUnion - Returns true if this is a struct or union
+/// which is zero bits wide.
+static inline bool isZeroSizedStructOrUnion(tree type) {
+ if (TREE_CODE(type) != RECORD_TYPE &&
+ TREE_CODE(type) != UNION_TYPE &&
+ TREE_CODE(type) != QUAL_UNION_TYPE)
+ return false;
+ return int_size_in_bytes(type) == 0;
+}
+
+// getLLVMScalarTypeForStructReturn - Return LLVM Type if TY can be
+// returned as a scalar, otherwise return NULL. This is the default
+// target independent implementation.
+static inline
+const Type* getLLVMScalarTypeForStructReturn(tree type, unsigned *Offset) {
+ const Type *Ty = ConvertType(type);
+ unsigned Size = getTargetData().getTypeAllocSize(Ty);
+ *Offset = 0;
+ if (Size == 0)
+ return Type::getVoidTy(getGlobalContext());
+ else if (Size == 1)
+ return Type::getInt8Ty(getGlobalContext());
+ else if (Size == 2)
+ return Type::getInt16Ty(getGlobalContext());
+ else if (Size <= 4)
+ return Type::getInt32Ty(getGlobalContext());
+ else if (Size <= 8)
+ return Type::getInt64Ty(getGlobalContext());
+ else if (Size <= 16)
+ return IntegerType::get(getGlobalContext(), 128);
+ else if (Size <= 32)
+ return IntegerType::get(getGlobalContext(), 256);
+
+ return NULL;
+}
+
+// getLLVMAggregateTypeForStructReturn - Return LLVM type if TY can be
+// returns as multiple values, otherwise return NULL. This is the default
+// target independent implementation.
+static inline const Type* getLLVMAggregateTypeForStructReturn(tree type) {
+ return NULL;
+}
+
+// LLVM_SHOULD_PASS_VECTOR_IN_INTEGER_REGS - Return true if this vector
+// type should be passed as integer registers. Generally vectors which are
+// not part of the target architecture should do this.
+#ifndef LLVM_SHOULD_PASS_VECTOR_IN_INTEGER_REGS
+#define LLVM_SHOULD_PASS_VECTOR_IN_INTEGER_REGS(TY) \
+ false
+#endif
+
+// LLVM_SHOULD_PASS_VECTOR_USING_BYVAL_ATTR - Return true if this vector
+// type should be passed byval. Used for generic vectors on x86-64.
+#ifndef LLVM_SHOULD_PASS_VECTOR_USING_BYVAL_ATTR
+#define LLVM_SHOULD_PASS_VECTOR_USING_BYVAL_ATTR(X) \
+ false
+#endif
+
+// LLVM_SHOULD_PASS_AGGREGATE_USING_BYVAL_ATTR - Return true if this aggregate
+// value should be passed by value, i.e. passing its address with the byval
+// attribute bit set. The default is false.
+#ifndef LLVM_SHOULD_PASS_AGGREGATE_USING_BYVAL_ATTR
+#define LLVM_SHOULD_PASS_AGGREGATE_USING_BYVAL_ATTR(X, TY) \
+ false
+#endif
+
+// LLVM_SHOULD_PASS_AGGREGATE_AS_FCA - Return true if this aggregate value
+// should be passed by value as a first class aggregate. The default is false.
+#ifndef LLVM_SHOULD_PASS_AGGREGATE_AS_FCA
+#define LLVM_SHOULD_PASS_AGGREGATE_AS_FCA(X, TY) \
+ false
+#endif
+
+// LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS - Return true if this aggregate
+// value should be passed in a mixture of integer, floating point, and vector
+// registers. The routine should also return by reference a vector of the
+// types of the registers being used. The default is false.
+#ifndef LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS
+#define LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(T, TY, CC, E) \
+ false
+#endif
+
+// LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS - Only called if
+// LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS returns true. This returns true if
+// there are only enough unused argument passing registers to pass a part of
+// the aggregate. Note, this routine should return false if none of the needed
+// registers are available.
+#ifndef LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS
+#define LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS(E, SE, ISR, CC) \
+ false
+#endif
+
+// LLVM_BYVAL_ALIGNMENT - Returns the alignment of the type in bytes, if known,
+// in the getGlobalContext() of its use as a function parameter.
+// Note that the alignment in the TYPE node is usually the alignment appropriate
+// when the type is used within a struct, which may or may not be appropriate
+// here.
+#ifndef LLVM_BYVAL_ALIGNMENT
+#define LLVM_BYVAL_ALIGNMENT(T) 0
+#endif
+
+// LLVM_SHOULD_PASS_AGGREGATE_IN_INTEGER_REGS - Return true if this aggregate
+// value should be passed in integer registers. By default, we do this for all
+// values that are not single-element structs. This ensures that things like
+// {short,short} are passed in one 32-bit chunk, not as two arguments (which
+// would often be 64-bits). We also do it for single-element structs when the
+// single element is a bitfield of a type bigger than the struct; the code
+// for field-by-field struct passing does not handle this one right.
+#ifndef LLVM_SHOULD_PASS_AGGREGATE_IN_INTEGER_REGS
+#define LLVM_SHOULD_PASS_AGGREGATE_IN_INTEGER_REGS(X, Y, Z) \
+ !isSingleElementStructOrArray((X), false, true)
+#endif
+
+// LLVM_SHOULD_RETURN_SELT_STRUCT_AS_SCALAR - Return a TYPE tree if this single
+// element struct should be returned using the convention for that scalar TYPE,
+// 0 otherwise.
+// The returned TYPE must be the same size as X for this to work; that is
+// checked elsewhere. (Structs where this is not the case can be constructed
+// by abusing the __aligned__ attribute.)
+#ifndef LLVM_SHOULD_RETURN_SELT_STRUCT_AS_SCALAR
+#define LLVM_SHOULD_RETURN_SELT_STRUCT_AS_SCALAR(X) \
+ isSingleElementStructOrArray(X, false, false)
+#endif
+
+// LLVM_SHOULD_RETURN_VECTOR_AS_SCALAR - Return a TYPE tree if this vector type
+// should be returned using the convention for that scalar TYPE, 0 otherwise.
+// X may be evaluated more than once.
+#ifndef LLVM_SHOULD_RETURN_VECTOR_AS_SCALAR
+#define LLVM_SHOULD_RETURN_VECTOR_AS_SCALAR(X,Y) 0
+#endif
+
+// LLVM_SHOULD_RETURN_VECTOR_AS_SHADOW - Return true if this vector type
+// should be returned using the aggregate shadow (sret) convention, 0 otherwise.
+// X may be evaluated more than once.
+#ifndef LLVM_SHOULD_RETURN_VECTOR_AS_SHADOW
+#define LLVM_SHOULD_RETURN_VECTOR_AS_SHADOW(X,Y) 0
+#endif
+
+// LLVM_SCALAR_TYPE_FOR_STRUCT_RETURN - Return LLVM Type if X can be
+// returned as a scalar, otherwise return NULL.
+#ifndef LLVM_SCALAR_TYPE_FOR_STRUCT_RETURN
+#define LLVM_SCALAR_TYPE_FOR_STRUCT_RETURN(X, Y) \
+ getLLVMScalarTypeForStructReturn((X), (Y))
+#endif
+
+// LLVM_AGGR_TYPE_FOR_STRUCT_RETURN - Return LLVM Type if X can be
+// returned as an aggregate, otherwise return NULL.
+#ifndef LLVM_AGGR_TYPE_FOR_STRUCT_RETURN
+#define LLVM_AGGR_TYPE_FOR_STRUCT_RETURN(X) \
+ getLLVMAggregateTypeForStructReturn(X)
+#endif
+
+// LLVM_EXTRACT_MULTIPLE_RETURN_VALUE - Extract multiple return value from
+// SRC and assign it to DEST. Each target that supports multiple return
+// value must implement this hook.
+#ifndef LLVM_EXTRACT_MULTIPLE_RETURN_VALUE
+#define LLVM_EXTRACT_MULTIPLE_RETURN_VALUE(Src,Dest,V,B) \
+ llvm_default_extract_multiple_return_value((Src),(Dest),(V),(B))
+#endif
+static inline
+void llvm_default_extract_multiple_return_value(Value *Src, Value *Dest,
+ bool isVolatile,
+ LLVMBuilder &Builder) {
+ assert (0 && "LLVM_EXTRACT_MULTIPLE_RETURN_VALUE is not implemented!");
+}
+
+/// DefaultABI - This class implements the default LLVM ABI where structures are
+/// passed by decimating them into individual components and unions are passed
+/// by passing the largest member of the union.
+///
+template<typename Client>
+class DefaultABI {
+protected:
+ Client &C;
+public:
+ DefaultABI(Client &c) : C(c) {}
+
+ bool isShadowReturn() const { return C.isShadowReturn(); }
+
+ /// HandleReturnType - This is invoked by the target-independent code for the
+ /// return type. It potentially breaks down the argument and invokes methods
+ /// on the client that indicate how its pieces should be handled. This
+ /// handles things like returning structures via hidden parameters.
+ void HandleReturnType(tree type, tree fn, bool isBuiltin) {
+ unsigned Offset = 0;
+ const Type *Ty = ConvertType(type);
+ if (isa<VectorType>(Ty)) {
+ // Vector handling is weird on x86. In particular builtin and
+ // non-builtin function of the same return types can use different
+ // calling conventions.
+ tree ScalarType = LLVM_SHOULD_RETURN_VECTOR_AS_SCALAR(type, isBuiltin);
+ if (ScalarType)
+ C.HandleAggregateResultAsScalar(ConvertType(ScalarType));
+ else if (LLVM_SHOULD_RETURN_VECTOR_AS_SHADOW(type, isBuiltin))
+ C.HandleScalarShadowResult(Ty->getPointerTo(), false);
+ else
+ C.HandleScalarResult(Ty);
+ } else if (Ty->isSingleValueType() || Ty->isVoidTy()) {
+ // Return scalar values normally.
+ C.HandleScalarResult(Ty);
+ } else if (doNotUseShadowReturn(type, fn)) {
+ tree SingleElt = LLVM_SHOULD_RETURN_SELT_STRUCT_AS_SCALAR(type);
+ if (SingleElt && TYPE_SIZE(SingleElt) &&
+ TREE_CODE(TYPE_SIZE(SingleElt)) == INTEGER_CST &&
+ TREE_INT_CST_LOW(TYPE_SIZE_UNIT(type)) ==
+ TREE_INT_CST_LOW(TYPE_SIZE_UNIT(SingleElt))) {
+ C.HandleAggregateResultAsScalar(ConvertType(SingleElt));
+ } else {
+ // Otherwise return as an integer value large enough to hold the entire
+ // aggregate.
+ if (const Type *AggrTy = LLVM_AGGR_TYPE_FOR_STRUCT_RETURN(type))
+ C.HandleAggregateResultAsAggregate(AggrTy);
+ else if (const Type* ScalarTy =
+ LLVM_SCALAR_TYPE_FOR_STRUCT_RETURN(type, &Offset))
+ C.HandleAggregateResultAsScalar(ScalarTy, Offset);
+ else {
+ assert(0 && "Unable to determine how to return this aggregate!");
+ abort();
+ }
+ }
+ } else {
+ // If the function is returning a struct or union, we pass the pointer to
+ // the struct as the first argument to the function.
+
+ // FIXME: should return the hidden first argument for some targets
+ // (e.g. ELF i386).
+ C.HandleAggregateShadowResult(Ty->getPointerTo(), false);
+ }
+ }
+
+ /// HandleArgument - This is invoked by the target-independent code for each
+ /// argument type passed into the function. It potentially breaks down the
+ /// argument and invokes methods on the client that indicate how its pieces
+ /// should be handled. This handles things like decimating structures into
+ /// their fields.
+ void HandleArgument(tree type, std::vector<const Type*> &ScalarElts,
+ Attributes *Attributes = NULL) {
+ unsigned Size = 0;
+ bool DontCheckAlignment = false;
+ const Type *Ty = ConvertType(type);
+ // Figure out if this field is zero bits wide, e.g. {} or [0 x int]. Do
+ // not include variable sized fields here.
+ std::vector<const Type*> Elts;
+ if (Ty->isVoidTy()) {
+ // Handle void explicitly as an opaque type.
+ const Type *OpTy = OpaqueType::get(getGlobalContext());
+ C.HandleScalarArgument(OpTy, type);
+ ScalarElts.push_back(OpTy);
+ } else if (isPassedByInvisibleReference(type)) { // variable size -> by-ref.
+ const Type *PtrTy = Ty->getPointerTo();
+ C.HandleByInvisibleReferenceArgument(PtrTy, type);
+ ScalarElts.push_back(PtrTy);
+ } else if (isa<VectorType>(Ty)) {
+ if (LLVM_SHOULD_PASS_VECTOR_IN_INTEGER_REGS(type)) {
+ PassInIntegerRegisters(type, Ty, ScalarElts, 0, false);
+ } else if (LLVM_SHOULD_PASS_VECTOR_USING_BYVAL_ATTR(type)) {
+ C.HandleByValArgument(Ty, type);
+ if (Attributes) {
+ *Attributes |= Attribute::ByVal;
+ *Attributes |=
+ Attribute::constructAlignmentFromInt(LLVM_BYVAL_ALIGNMENT(type));
+ }
+ } else {
+ C.HandleScalarArgument(Ty, type);
+ ScalarElts.push_back(Ty);
+ }
+ } else if (Ty->isSingleValueType()) {
+ C.HandleScalarArgument(Ty, type);
+ ScalarElts.push_back(Ty);
+ } else if (LLVM_SHOULD_PASS_AGGREGATE_AS_FCA(type, Ty)) {
+ C.HandleFCAArgument(Ty, type);
+ } else if (LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(type, Ty,
+ C.getCallingConv(),
+ Elts)) {
+ if (!LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS(Elts, ScalarElts,
+ C.isShadowReturn(),
+ C.getCallingConv()))
+ PassInMixedRegisters(type, Ty, Elts, ScalarElts);
+ else {
+ C.HandleByValArgument(Ty, type);
+ if (Attributes) {
+ *Attributes |= Attribute::ByVal;
+ *Attributes |=
+ Attribute::constructAlignmentFromInt(LLVM_BYVAL_ALIGNMENT(type));
+ }
+ }
+ } else if (LLVM_SHOULD_PASS_AGGREGATE_USING_BYVAL_ATTR(type, Ty)) {
+ C.HandleByValArgument(Ty, type);
+ if (Attributes) {
+ *Attributes |= Attribute::ByVal;
+ *Attributes |=
+ Attribute::constructAlignmentFromInt(LLVM_BYVAL_ALIGNMENT(type));
+ }
+ } else if (LLVM_SHOULD_PASS_AGGREGATE_IN_INTEGER_REGS(type, &Size,
+ &DontCheckAlignment)) {
+ PassInIntegerRegisters(type, Ty, ScalarElts, Size, DontCheckAlignment);
+ } else if (isZeroSizedStructOrUnion(type)) {
+ // Zero sized struct or union, just drop it!
+ ;
+ } else if (TREE_CODE(type) == RECORD_TYPE) {
+ for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field))
+ if (TREE_CODE(Field) == FIELD_DECL) {
+ const tree Ftype = getDeclaredType(Field);
+ const Type *FTy = ConvertType(Ftype);
+ unsigned FNo = GetFieldIndex(Field);
+ assert(FNo != ~0U && "Case not handled yet!");
+
+ // Currently, a bvyal type inside a non-byval struct is a zero-length
+ // object inside a bigger object on x86-64. This type should be
+ // skipped (but only when it is inside a bigger object).
+ // (We know there currently are no other such cases active because
+ // they would hit the assert in FunctionPrologArgumentConversion::
+ // HandleByValArgument.)
+ if (!LLVM_SHOULD_PASS_AGGREGATE_USING_BYVAL_ATTR(Ftype, FTy)) {
+ C.EnterField(FNo, Ty);
+ HandleArgument(getDeclaredType(Field), ScalarElts);
+ C.ExitField();
+ }
+ }
+ } else if (TREE_CODE(type) == COMPLEX_TYPE) {
+ C.EnterField(0, Ty);
+ HandleArgument(TREE_TYPE(type), ScalarElts);
+ C.ExitField();
+ C.EnterField(1, Ty);
+ HandleArgument(TREE_TYPE(type), ScalarElts);
+ C.ExitField();
+ } else if ((TREE_CODE(type) == UNION_TYPE) ||
+ (TREE_CODE(type) == QUAL_UNION_TYPE)) {
+ HandleUnion(type, ScalarElts);
+ } else if (TREE_CODE(type) == ARRAY_TYPE) {
+ const ArrayType *ATy = cast<ArrayType>(Ty);
+ for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
+ C.EnterField(i, Ty);
+ HandleArgument(TREE_TYPE(type), ScalarElts);
+ C.ExitField();
+ }
+ } else {
+ assert(0 && "unknown aggregate type!");
+ abort();
+ }
+ }
+
+ /// HandleUnion - Handle a UNION_TYPE or QUAL_UNION_TYPE tree.
+ ///
+ void HandleUnion(tree type, std::vector<const Type*> &ScalarElts) {
+ if (TYPE_TRANSPARENT_UNION(type)) {
+ tree Field = TYPE_FIELDS(type);
+ assert(Field && "Transparent union must have some elements!");
+ while (TREE_CODE(Field) != FIELD_DECL) {
+ Field = TREE_CHAIN(Field);
+ assert(Field && "Transparent union must have some elements!");
+ }
+
+ HandleArgument(TREE_TYPE(Field), ScalarElts);
+ } else {
+ // Unions pass the largest element.
+ unsigned MaxSize = 0;
+ tree MaxElt = 0;
+ for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field)) {
+ if (TREE_CODE(Field) == FIELD_DECL) {
+ // Skip fields that are known not to be present.
+ if (TREE_CODE(type) == QUAL_UNION_TYPE &&
+ integer_zerop(DECL_QUALIFIER(Field)))
+ continue;
+
+ tree SizeTree = TYPE_SIZE(TREE_TYPE(Field));
+ unsigned Size = ((unsigned)TREE_INT_CST_LOW(SizeTree)+7)/8;
+ if (Size > MaxSize) {
+ MaxSize = Size;
+ MaxElt = Field;
+ }
+
+ // Skip remaining fields if this one is known to be present.
+ if (TREE_CODE(type) == QUAL_UNION_TYPE &&
+ integer_onep(DECL_QUALIFIER(Field)))
+ break;
+ }
+ }
+
+ if (MaxElt)
+ HandleArgument(TREE_TYPE(MaxElt), ScalarElts);
+ }
+ }
+
+ /// PassInIntegerRegisters - Given an aggregate value that should be passed in
+ /// integer registers, convert it to a structure containing ints and pass all
+ /// of the struct elements in. If Size is set we pass only that many bytes.
+ void PassInIntegerRegisters(tree type, const Type *Ty,
+ std::vector<const Type*> &ScalarElts,
+ unsigned origSize, bool DontCheckAlignment) {
+ unsigned Size;
+ if (origSize)
+ Size = origSize;
+ else
+ Size = TREE_INT_CST_LOW(TYPE_SIZE(type))/8;
+
+ // FIXME: We should preserve all aggregate value alignment information.
+ // Work around to preserve some aggregate value alignment information:
+ // don't bitcast aggregate value to Int64 if its alignment is different
+ // from Int64 alignment. ARM backend needs this.
+ unsigned Align = TYPE_ALIGN(type)/8;
+ unsigned Int64Align =
+ getTargetData().getABITypeAlignment(Type::getInt64Ty(getGlobalContext()));
+ bool UseInt64 = DontCheckAlignment ? true : (Align >= Int64Align);
+
+ // FIXME: In cases where we can, we should use the original struct.
+ // Consider cases like { int, int } and {int, short} for example! This will
+ // produce far better LLVM code!
+ std::vector<const Type*> Elts;
+
+ unsigned ElementSize = UseInt64 ? 8:4;
+ unsigned ArraySize = Size / ElementSize;
+
+ const Type *ATy = NULL;
+ const Type *ArrayElementType = NULL;
+ if (ArraySize) {
+ Size = Size % ElementSize;
+ ArrayElementType = (UseInt64) ?
+ Type::getInt64Ty(getGlobalContext()) : Type::getInt32Ty(getGlobalContext());
+ ATy = ArrayType::get(ArrayElementType, ArraySize);
+ Elts.push_back(ATy);
+ }
+
+ if (Size >= 4) {
+ Elts.push_back(Type::getInt32Ty(getGlobalContext()));
+ Size -= 4;
+ }
+ if (Size >= 2) {
+ Elts.push_back(Type::getInt16Ty(getGlobalContext()));
+ Size -= 2;
+ }
+ if (Size >= 1) {
+ Elts.push_back(Type::getInt8Ty(getGlobalContext()));
+ Size -= 1;
+ }
+ assert(Size == 0 && "Didn't cover value?");
+ const StructType *STy = StructType::get(getGlobalContext(), Elts, false);
+
+ unsigned i = 0;
+ if (ArraySize) {
+ C.EnterField(0, STy);
+ for (unsigned j = 0; j < ArraySize; ++j) {
+ C.EnterField(j, ATy);
+ C.HandleScalarArgument(ArrayElementType, 0);
+ ScalarElts.push_back(ArrayElementType);
+ C.ExitField();
+ }
+ C.ExitField();
+ ++i;
+ }
+ for (unsigned e = Elts.size(); i != e; ++i) {
+ C.EnterField(i, STy);
+ C.HandleScalarArgument(Elts[i], 0);
+ ScalarElts.push_back(Elts[i]);
+ C.ExitField();
+ }
+ }
+
+ /// PassInMixedRegisters - Given an aggregate value that should be passed in
+ /// mixed integer, floating point, and vector registers, convert it to a
+ /// structure containing the specified struct elements in.
+ void PassInMixedRegisters(tree type, const Type *Ty,
+ std::vector<const Type*> &OrigElts,
+ std::vector<const Type*> &ScalarElts) {
+ // We use VoidTy in OrigElts to mean "this is a word in the aggregate
+ // that occupies storage but has no useful information, and is not passed
+ // anywhere". Happens on x86-64.
+ std::vector<const Type*> Elts(OrigElts);
+ const Type* wordType = getTargetData().getPointerSize() == 4 ?
+ Type::getInt32Ty(getGlobalContext()) : Type::getInt64Ty(getGlobalContext());
+ for (unsigned i=0, e=Elts.size(); i!=e; ++i)
+ if (OrigElts[i]->isVoidTy())
+ Elts[i] = wordType;
+
+ const StructType *STy = StructType::get(getGlobalContext(), Elts, false);
+
+ unsigned Size = getTargetData().getTypeAllocSize(STy);
+ const StructType *InSTy = dyn_cast<StructType>(Ty);
+ unsigned InSize = 0;
+ // If Ty and STy size does not match then last element is accessing
+ // extra bits.
+ unsigned LastEltSizeDiff = 0;
+ if (InSTy) {
+ InSize = getTargetData().getTypeAllocSize(InSTy);
+ if (InSize < Size) {
+ unsigned N = STy->getNumElements();
+ const llvm::Type *LastEltTy = STy->getElementType(N-1);
+ if (LastEltTy->isInteger())
+ LastEltSizeDiff =
+ getTargetData().getTypeAllocSize(LastEltTy) - (Size - InSize);
+ }
+ }
+ for (unsigned i = 0, e = Elts.size(); i != e; ++i) {
+ if (!OrigElts[i]->isVoidTy()) {
+ C.EnterField(i, STy);
+ unsigned RealSize = 0;
+ if (LastEltSizeDiff && i == (e - 1))
+ RealSize = LastEltSizeDiff;
+ C.HandleScalarArgument(Elts[i], 0, RealSize);
+ ScalarElts.push_back(Elts[i]);
+ C.ExitField();
+ }
+ }
+ }
+};
+
+// Make sure the SVR4 ABI is used on 32-bit PowerPC Linux.
+#if defined(POWERPC_LINUX) && (TARGET_64BIT == 0)
+#define TheLLVMABI SVR4ABI
+#endif
+
+/// TheLLVMABI - This can be defined by targets if they want total control over
+/// ABI decisions.
+///
+#ifndef TheLLVMABI
+#define TheLLVMABI DefaultABI
+#endif
+
+/// SVR4ABI - This class implements the System V Release 4 ABI for PowerPC. The
+/// SVR4 ABI is the ABI used on 32-bit PowerPC Linux.
+///
+template<typename Client>
+class SVR4ABI {
+ // Number of general purpose argument registers which have already been
+ // assigned.
+ unsigned NumGPR;
+protected:
+ Client &C;
+public:
+ SVR4ABI(Client &c) : NumGPR(0), C(c) {}
+
+ bool isShadowReturn() const { return C.isShadowReturn(); }
+
+ /// HandleReturnType - This is invoked by the target-independent code for the
+ /// return type. It potentially breaks down the argument and invokes methods
+ /// on the client that indicate how its pieces should be handled. This
+ /// handles things like returning structures via hidden parameters.
+ ///
+ /// This is the default implementation which was copied from DefaultABI.
+ void HandleReturnType(tree type, tree fn, bool isBuiltin) {
+ unsigned Offset = 0;
+ const Type *Ty = ConvertType(type);
+ if (isa<VectorType>(Ty)) {
+ // Vector handling is weird on x86. In particular builtin and
+ // non-builtin function of the same return types can use different
+ // calling conventions.
+ tree ScalarType = LLVM_SHOULD_RETURN_VECTOR_AS_SCALAR(type, isBuiltin);
+ if (ScalarType)
+ C.HandleAggregateResultAsScalar(ConvertType(ScalarType));
+ else if (LLVM_SHOULD_RETURN_VECTOR_AS_SHADOW(type, isBuiltin))
+ C.HandleScalarShadowResult(Ty->getPointerTo(), false);
+ else
+ C.HandleScalarResult(Ty);
+ } else if (Ty->isSingleValueType() || Ty->isVoidTy()) {
+ // Return scalar values normally.
+ C.HandleScalarResult(Ty);
+ } else if (doNotUseShadowReturn(type, fn)) {
+ tree SingleElt = LLVM_SHOULD_RETURN_SELT_STRUCT_AS_SCALAR(type);
+ if (SingleElt && TYPE_SIZE(SingleElt) &&
+ TREE_CODE(TYPE_SIZE(SingleElt)) == INTEGER_CST &&
+ TREE_INT_CST_LOW(TYPE_SIZE_UNIT(type)) ==
+ TREE_INT_CST_LOW(TYPE_SIZE_UNIT(SingleElt))) {
+ C.HandleAggregateResultAsScalar(ConvertType(SingleElt));
+ } else {
+ // Otherwise return as an integer value large enough to hold the entire
+ // aggregate.
+ if (const Type *AggrTy = LLVM_AGGR_TYPE_FOR_STRUCT_RETURN(type))
+ C.HandleAggregateResultAsAggregate(AggrTy);
+ else if (const Type* ScalarTy =
+ LLVM_SCALAR_TYPE_FOR_STRUCT_RETURN(type, &Offset))
+ C.HandleAggregateResultAsScalar(ScalarTy, Offset);
+ else {
+ assert(0 && "Unable to determine how to return this aggregate!");
+ abort();
+ }
+ }
+ } else {
+ // If the function is returning a struct or union, we pass the pointer to
+ // the struct as the first argument to the function.
+
+ // FIXME: should return the hidden first argument for some targets
+ // (e.g. ELF i386).
+ C.HandleAggregateShadowResult(Ty->getPointerTo(), false);
+ }
+ }
+
+ /// HandleArgument - This is invoked by the target-independent code for each
+ /// argument type passed into the function. It potentially breaks down the
+ /// argument and invokes methods on the client that indicate how its pieces
+ /// should be handled. This handles things like decimating structures into
+ /// their fields.
+ ///
+ /// _Complex arguments are never split, thus their two scalars are either
+ /// passed both in argument registers or both on the stack. Also _Complex
+ /// arguments are always passed in general purpose registers, never in
+ /// Floating-point registers or vector registers. Arguments which should go
+ /// on the stack are marked with the inreg parameter attribute.
+ /// Giving inreg this target-dependent (and counter-intuitive) meaning
+ /// simplifies things, because functions calls are not always coming from the
+ /// frontend but are also created implicitly e.g. for libcalls. If inreg would
+ /// actually mean that the argument is passed in a register, then all places
+ /// which create function calls/function definitions implicitly would need to
+ /// be aware of this fact and would need to mark arguments accordingly. With
+ /// inreg meaning that the argument is passed on the stack, this is not an
+ /// issue, except for calls which involve _Complex types.
+ void HandleArgument(tree type, std::vector<const Type*> &ScalarElts,
+ Attributes *Attributes = NULL) {
+ // Eight GPR's are availabe for parameter passing.
+ const unsigned NumArgRegs = 8;
+ unsigned Size = 0;
+ bool DontCheckAlignment = false;
+ const Type *Ty = ConvertType(type);
+ // Figure out if this field is zero bits wide, e.g. {} or [0 x int]. Do
+ // not include variable sized fields here.
+ std::vector<const Type*> Elts;
+ if (isPassedByInvisibleReference(type)) { // variable size -> by-ref.
+ const Type *PtrTy = Ty->getPointerTo();
+ C.HandleByInvisibleReferenceArgument(PtrTy, type);
+ ScalarElts.push_back(PtrTy);
+
+ unsigned Attr = Attribute::None;
+
+ if (NumGPR < NumArgRegs) {
+ NumGPR++;
+ } else {
+ Attr |= Attribute::InReg;
+ }
+
+ if (Attributes) {
+ *Attributes |= Attr;
+ }
+ } else if (isa<VectorType>(Ty)) {
+ if (LLVM_SHOULD_PASS_VECTOR_IN_INTEGER_REGS(type)) {
+ PassInIntegerRegisters(type, Ty, ScalarElts, 0, false);
+ } else if (LLVM_SHOULD_PASS_VECTOR_USING_BYVAL_ATTR(type)) {
+ C.HandleByValArgument(Ty, type);
+ if (Attributes) {
+ *Attributes |= Attribute::ByVal;
+ *Attributes |=
+ Attribute::constructAlignmentFromInt(LLVM_BYVAL_ALIGNMENT(type));
+ }
+ } else {
+ C.HandleScalarArgument(Ty, type);
+ ScalarElts.push_back(Ty);
+ }
+ } else if (Ty->isSingleValueType()) {
+ C.HandleScalarArgument(Ty, type);
+ ScalarElts.push_back(Ty);
+
+ unsigned Attr = Attribute::None;
+
+ if (Ty->isInteger()) {
+ unsigned TypeSize = Ty->getPrimitiveSizeInBits();
+
+ // Determine how many general purpose registers are needed for the
+ // argument.
+ unsigned NumRegs = (TypeSize + 31) / 32;
+
+ // Make sure argument registers are aligned. 64-bit arguments are put in
+ // a register pair which starts with an odd register number.
+ if (TypeSize == 64 && (NumGPR % 2) == 1) {
+ NumGPR++;
+ }
+
+ if (NumGPR <= (NumArgRegs - NumRegs)) {
+ NumGPR += NumRegs;
+ } else {
+ Attr |= Attribute::InReg;
+ NumGPR = NumArgRegs;
+ }
+ } else if (isa<PointerType>(Ty)) {
+ if (NumGPR < NumArgRegs) {
+ NumGPR++;
+ } else {
+ Attr |= Attribute::InReg;
+ }
+ // We don't care about arguments passed in Floating-point or vector
+ // registers.
+ } else if (!(Ty->isFloatingPoint() || isa<VectorType>(Ty))) {
+ abort();
+ }
+
+ if (Attributes) {
+ *Attributes |= Attr;
+ }
+ } else if (LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(type, Ty,
+ C.getCallingConv(),
+ Elts)) {
+ HOST_WIDE_INT SrcSize = int_size_in_bytes(type);
+
+ // With the SVR4 ABI, the only aggregates which are passed in registers
+ // are _Complex aggregates.
+ assert(TREE_CODE(type) == COMPLEX_TYPE && "Not a _Complex type!");
+
+ unsigned Attr = Attribute::None;
+
+ switch (SrcSize) {
+ default:
+ abort();
+ break;
+ case 32:
+ // _Complex long double
+ if (NumGPR == 0) {
+ NumGPR += NumArgRegs;
+ } else {
+ Attr |= Attribute::InReg;
+ NumGPR = NumArgRegs;
+ }
+ break;
+ case 16:
+ // _Complex long long
+ // _Complex double
+ if (NumGPR <= (NumArgRegs - 4)) {
+ NumGPR += 4;
+ } else {
+ Attr |= Attribute::InReg;
+ NumGPR = NumArgRegs;
+ }
+ break;
+ case 8:
+ // _Complex int
+ // _Complex long
+ // _Complex float
+
+ // Make sure argument registers are aligned. 64-bit arguments are put in
+ // a register pair which starts with an odd register number.
+ if (NumGPR % 2 == 1) {
+ NumGPR++;
+ }
+
+ if (NumGPR <= (NumArgRegs - 2)) {
+ NumGPR += 2;
+ } else {
+ Attr |= Attribute::InReg;
+ NumGPR = NumArgRegs;
+ }
+ break;
+ case 4:
+ case 2:
+ // _Complex short
+ // _Complex char
+ if (NumGPR < NumArgRegs) {
+ NumGPR++;
+ } else {
+ Attr |= Attribute::InReg;
+ }
+ break;
+ }
+
+ if (Attributes) {
+ *Attributes |= Attr;
+ }
+
+ PassInMixedRegisters(type, Ty, Elts, ScalarElts);
+ } else if (LLVM_SHOULD_PASS_AGGREGATE_USING_BYVAL_ATTR(type, Ty)) {
+ C.HandleByValArgument(Ty, type);
+ if (Attributes) {
+ *Attributes |= Attribute::ByVal;
+ *Attributes |=
+ Attribute::constructAlignmentFromInt(LLVM_BYVAL_ALIGNMENT(type));
+ }
+
+ unsigned Attr = Attribute::None;
+
+ if (NumGPR < NumArgRegs) {
+ NumGPR++;
+ } else {
+ Attr |= Attribute::InReg;
+ }
+
+ if (Attributes) {
+ *Attributes |= Attr;
+ }
+ } else if (isZeroSizedStructOrUnion(type)) {
+ // Zero sized struct or union, just drop it!
+ ;
+ } else {
+ assert(0 && "unknown aggregate type!");
+ abort();
+ }
+ }
+
+ /// HandleUnion - Handle a UNION_TYPE or QUAL_UNION_TYPE tree.
+ ///
+ /// This is the default implementation which was copied from DefaultABI.
+ void HandleUnion(tree type, std::vector<const Type*> &ScalarElts) {
+ if (TYPE_TRANSPARENT_UNION(type)) {
+ tree Field = TYPE_FIELDS(type);
+ assert(Field && "Transparent union must have some elements!");
+ while (TREE_CODE(Field) != FIELD_DECL) {
+ Field = TREE_CHAIN(Field);
+ assert(Field && "Transparent union must have some elements!");
+ }
+
+ HandleArgument(TREE_TYPE(Field), ScalarElts);
+ } else {
+ // Unions pass the largest element.
+ unsigned MaxSize = 0;
+ tree MaxElt = 0;
+ for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field)) {
+ if (TREE_CODE(Field) == FIELD_DECL) {
+ // Skip fields that are known not to be present.
+ if (TREE_CODE(type) == QUAL_UNION_TYPE &&
+ integer_zerop(DECL_QUALIFIER(Field)))
+ continue;
+
+ tree SizeTree = TYPE_SIZE(TREE_TYPE(Field));
+ unsigned Size = ((unsigned)TREE_INT_CST_LOW(SizeTree)+7)/8;
+ if (Size > MaxSize) {
+ MaxSize = Size;
+ MaxElt = Field;
+ }
+
+ // Skip remaining fields if this one is known to be present.
+ if (TREE_CODE(type) == QUAL_UNION_TYPE &&
+ integer_onep(DECL_QUALIFIER(Field)))
+ break;
+ }
+ }
+
+ if (MaxElt)
+ HandleArgument(TREE_TYPE(MaxElt), ScalarElts);
+ }
+ }
+
+ /// PassInIntegerRegisters - Given an aggregate value that should be passed in
+ /// integer registers, convert it to a structure containing ints and pass all
+ /// of the struct elements in. If Size is set we pass only that many bytes.
+ ///
+ /// This is the default implementation which was copied from DefaultABI.
+ void PassInIntegerRegisters(tree type, const Type *Ty,
+ std::vector<const Type*> &ScalarElts,
+ unsigned origSize, bool DontCheckAlignment) {
+ unsigned Size;
+ if (origSize)
+ Size = origSize;
+ else
+ Size = TREE_INT_CST_LOW(TYPE_SIZE(type))/8;
+
+ // FIXME: We should preserve all aggregate value alignment information.
+ // Work around to preserve some aggregate value alignment information:
+ // don't bitcast aggregate value to Int64 if its alignment is different
+ // from Int64 alignment. ARM backend needs this.
+ unsigned Align = TYPE_ALIGN(type)/8;
+ unsigned Int64Align =
+ getTargetData().getABITypeAlignment(Type::getInt64Ty(getGlobalContext()));
+ bool UseInt64 = DontCheckAlignment ? true : (Align >= Int64Align);
+
+ // FIXME: In cases where we can, we should use the original struct.
+ // Consider cases like { int, int } and {int, short} for example! This will
+ // produce far better LLVM code!
+ std::vector<const Type*> Elts;
+
+ unsigned ElementSize = UseInt64 ? 8:4;
+ unsigned ArraySize = Size / ElementSize;
+
+ const Type *ATy = NULL;
+ const Type *ArrayElementType = NULL;
+ if (ArraySize) {
+ Size = Size % ElementSize;
+ ArrayElementType = (UseInt64) ?
+ Type::getInt64Ty(getGlobalContext()) : Type::getInt32Ty(getGlobalContext());
+ ATy = ArrayType::get(ArrayElementType, ArraySize);
+ Elts.push_back(ATy);
+ }
+
+ if (Size >= 4) {
+ Elts.push_back(Type::getInt32Ty(getGlobalContext()));
+ Size -= 4;
+ }
+ if (Size >= 2) {
+ Elts.push_back(Type::getInt16Ty(getGlobalContext()));
+ Size -= 2;
+ }
+ if (Size >= 1) {
+ Elts.push_back(Type::getInt8Ty(getGlobalContext()));
+ Size -= 1;
+ }
+ assert(Size == 0 && "Didn't cover value?");
+ const StructType *STy = StructType::get(getGlobalContext(), Elts, false);
+
+ unsigned i = 0;
+ if (ArraySize) {
+ C.EnterField(0, STy);
+ for (unsigned j = 0; j < ArraySize; ++j) {
+ C.EnterField(j, ATy);
+ C.HandleScalarArgument(ArrayElementType, 0);
+ ScalarElts.push_back(ArrayElementType);
+ C.ExitField();
+ }
+ C.ExitField();
+ ++i;
+ }
+ for (unsigned e = Elts.size(); i != e; ++i) {
+ C.EnterField(i, STy);
+ C.HandleScalarArgument(Elts[i], 0);
+ ScalarElts.push_back(Elts[i]);
+ C.ExitField();
+ }
+ }
+
+ /// PassInMixedRegisters - Given an aggregate value that should be passed in
+ /// mixed integer, floating point, and vector registers, convert it to a
+ /// structure containing the specified struct elements in.
+ ///
+ /// This is the default implementation which was copied from DefaultABI.
+ void PassInMixedRegisters(tree type, const Type *Ty,
+ std::vector<const Type*> &OrigElts,
+ std::vector<const Type*> &ScalarElts) {
+ // We use VoidTy in OrigElts to mean "this is a word in the aggregate
+ // that occupies storage but has no useful information, and is not passed
+ // anywhere". Happens on x86-64.
+ std::vector<const Type*> Elts(OrigElts);
+ const Type* wordType = getTargetData().getPointerSize() == 4
+ ? Type::getInt32Ty(getGlobalContext()) : Type::getInt64Ty(getGlobalContext());
+ for (unsigned i=0, e=Elts.size(); i!=e; ++i)
+ if (OrigElts[i]->isVoidTy())
+ Elts[i] = wordType;
+
+ const StructType *STy = StructType::get(getGlobalContext(), Elts, false);
+
+ unsigned Size = getTargetData().getTypeAllocSize(STy);
+ const StructType *InSTy = dyn_cast<StructType>(Ty);
+ unsigned InSize = 0;
+ // If Ty and STy size does not match then last element is accessing
+ // extra bits.
+ unsigned LastEltSizeDiff = 0;
+ if (InSTy) {
+ InSize = getTargetData().getTypeAllocSize(InSTy);
+ if (InSize < Size) {
+ unsigned N = STy->getNumElements();
+ const llvm::Type *LastEltTy = STy->getElementType(N-1);
+ if (LastEltTy->isInteger())
+ LastEltSizeDiff =
+ getTargetData().getTypeAllocSize(LastEltTy) - (Size - InSize);
+ }
+ }
+ for (unsigned i = 0, e = Elts.size(); i != e; ++i) {
+ if (!OrigElts[i]->isVoidTy()) {
+ C.EnterField(i, STy);
+ unsigned RealSize = 0;
+ if (LastEltSizeDiff && i == (e - 1))
+ RealSize = LastEltSizeDiff;
+ C.HandleScalarArgument(Elts[i], 0, RealSize);
+ ScalarElts.push_back(Elts[i]);
+ C.ExitField();
+ }
+ }
+ }
+};
+
+#endif /* LLVM_ABI_H */
diff --git a/dragonegg/llvm-backend.cpp b/dragonegg/llvm-backend.cpp
new file mode 100644
index 00000000000..f28716fcfa5
--- /dev/null
+++ b/dragonegg/llvm-backend.cpp
@@ -0,0 +1,2252 @@
+/* High-level LLVM backend interface
+Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
+Contributed by Chris Lattner (sabre@nondot.org)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
+
+// LLVM headers
+#define DEBUG_TYPE "plugin"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/ModuleProvider.h"
+#include "llvm/PassManager.h"
+#include "llvm/ValueSymbolTable.h"
+#include "llvm/Analysis/LoopPass.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/Assembly/Writer.h"
+#include "llvm/Assembly/PrintModulePass.h"
+#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/CodeGen/RegAllocRegistry.h"
+#include "llvm/Target/SubtargetFeature.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegistry.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/IPO.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/StandardPasses.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/System/Program.h"
+
+// System headers
+#include <cassert>
+#include <gmp.h>
+
+// GCC headers
+#undef VISIBILITY_HIDDEN
+
+extern "C" {
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "target.h"
+#include "tree.h"
+
+#include "cgraph.h"
+#include "diagnostic.h"
+#include "except.h"
+#include "flags.h"
+#include "function.h"
+#include "gcc-plugin.h"
+#include "intl.h"
+#include "langhooks.h"
+#include "output.h"
+#include "params.h"
+#include "plugin-version.h"
+#include "toplev.h"
+#include "tree-inline.h"
+#include "tree-flow.h"
+#include "tree-pass.h"
+#include "version.h"
+}
+
+// Plugin headers
+#include "llvm-internal.h"
+#include "llvm-debug.h"
+#include "llvm-target.h"
+#include "llvm-os.h"
+#include "bits_and_bobs.h"
+extern "C" {
+#include "llvm-cache.h"
+}
+
+// Non-zero if bytecode from PCH is successfully read.
+int flag_llvm_pch_read;
+
+// Non-zero if libcalls should not be simplified.
+int flag_no_simplify_libcalls;
+
+// Non-zero if red-zone is disabled.
+//TODOstatic int flag_disable_red_zone = 0;
+
+// Non-zero if implicit floating point instructions are disabled.
+//TODOstatic int flag_no_implicit_float = 0;
+
+/// llvm_asm_file_name - Name of file to use for assembly code output.
+static const char *llvm_asm_file_name;
+
+// Global state for the LLVM backend.
+Module *TheModule = 0;
+DebugInfo *TheDebugInfo = 0;
+TargetMachine *TheTarget = 0;
+TargetFolder *TheFolder = 0;
+TypeConverter *TheTypeConverter = 0;
+raw_ostream *OutStream = 0; // Stream to write assembly code to.
+formatted_raw_ostream FormattedOutStream;
+
+static bool DisableLLVMOptimizations;
+static bool EnableGCCOptimizations;
+static bool EmitIR;
+static bool SaveGCCOutput;
+
+std::vector<std::pair<Constant*, int> > StaticCtors, StaticDtors;
+SmallSetVector<Constant*, 32> AttributeUsedGlobals;
+SmallSetVector<Constant*, 32> AttributeCompilerUsedGlobals;
+std::vector<Constant*> AttributeAnnotateGlobals;
+
+/// PerFunctionPasses - This is the list of cleanup passes run per-function
+/// as each is compiled. In cases where we are not doing IPO, it includes the
+/// code generator.
+static FunctionPassManager *PerFunctionPasses = 0;
+static PassManager *PerModulePasses = 0;
+static FunctionPassManager *CodeGenPasses = 0;
+
+static void createPerFunctionOptimizationPasses();
+static void createPerModuleOptimizationPasses();
+//TODOstatic void destroyOptimizationPasses();
+
+
+//===----------------------------------------------------------------------===//
+// Matching LLVM Values with GCC DECL trees
+//===----------------------------------------------------------------------===//
+
+/// set_decl_llvm - Remember the LLVM value for a GCC declaration.
+Value *set_decl_llvm (tree t, Value *V) {
+ assert(HAS_RTL_P(t) && "Expected a declaration with RTL!");
+ return (Value *)llvm_set_cached(t, V);
+}
+
+/// get_decl_llvm - Retrieve the LLVM value for a GCC declaration, or NULL.
+Value *get_decl_llvm(tree t) {
+ assert(HAS_RTL_P(t) && "Expected a declaration with RTL!");
+ return (Value *)llvm_get_cached(t);
+}
+
+/// changeLLVMConstant - Replace Old with New everywhere, updating all maps
+/// (except for AttributeAnnotateGlobals, which is a different kind of animal).
+/// At this point we know that New is not in any of these maps.
+void changeLLVMConstant(Constant *Old, Constant *New) {
+ assert(Old->use_empty() && "Old value has uses!");
+
+ if (AttributeUsedGlobals.count(Old)) {
+ AttributeUsedGlobals.remove(Old);
+ AttributeUsedGlobals.insert(New);
+ }
+
+ if (AttributeCompilerUsedGlobals.count(Old)) {
+ AttributeCompilerUsedGlobals.remove(Old);
+ AttributeCompilerUsedGlobals.insert(New);
+ }
+
+ for (unsigned i = 0, e = StaticCtors.size(); i != e; ++i) {
+ if (StaticCtors[i].first == Old)
+ StaticCtors[i].first = New;
+ }
+
+ for (unsigned i = 0, e = StaticDtors.size(); i != e; ++i) {
+ if (StaticDtors[i].first == Old)
+ StaticDtors[i].first = New;
+ }
+
+ llvm_replace_cached(Old, New);
+}
+
+//TODO/// readLLVMValues - Read LLVM Types string table
+//TODOvoid readLLVMValues() {
+//TODO GlobalValue *V = TheModule->getNamedGlobal("llvm.pch.values");
+//TODO if (!V)
+//TODO return;
+//TODO
+//TODO GlobalVariable *GV = cast<GlobalVariable>(V);
+//TODO ConstantStruct *ValuesFromPCH = cast<ConstantStruct>(GV->getOperand(0));
+//TODO
+//TODO for (unsigned i = 0; i < ValuesFromPCH->getNumOperands(); ++i) {
+//TODO Value *Va = ValuesFromPCH->getOperand(i);
+//TODO
+//TODO if (!Va) {
+//TODO // If V is empty then insert NULL to represent empty entries.
+//TODO LLVMValues.push_back(Va);
+//TODO continue;
+//TODO }
+//TODO if (ConstantArray *CA = dyn_cast<ConstantArray>(Va)) {
+//TODO std::string Str = CA->getAsString();
+//TODO Va = TheModule->getValueSymbolTable().lookup(Str);
+//TODO }
+//TODO assert (Va != NULL && "Invalid Value in LLVMValues string table");
+//TODO LLVMValues.push_back(Va);
+//TODO }
+//TODO
+//TODO // Now, llvm.pch.values is not required so remove it from the symbol table.
+//TODO GV->eraseFromParent();
+//TODO}
+//TODO
+//TODO/// writeLLVMValues - GCC tree's uses LLVMValues vector's index to reach LLVM
+//TODO/// Values. Create a string table to hold these LLVM Values' names. This string
+//TODO/// table will be used to recreate LTypes vector after loading PCH.
+//TODOvoid writeLLVMValues() {
+//TODO if (LLVMValues.empty())
+//TODO return;
+//TODO
+//TODO LLVMContext &Context = getGlobalContext();
+//TODO
+//TODO std::vector<Constant *> ValuesForPCH;
+//TODO for (std::vector<Value *>::iterator I = LLVMValues.begin(),
+//TODO E = LLVMValues.end(); I != E; ++I) {
+//TODO if (Constant *C = dyn_cast_or_null<Constant>(*I))
+//TODO ValuesForPCH.push_back(C);
+//TODO else
+//TODO // Non constant values, e.g. arguments, are not at global scope.
+//TODO // When PCH is read, only global scope values are used.
+//TODO ValuesForPCH.push_back(Constant::getNullValue(Type::getInt32Ty(Context)));
+//TODO }
+//TODO
+//TODO // Create string table.
+//TODO Constant *LLVMValuesTable = ConstantStruct::get(Context, ValuesForPCH, false);
+//TODO
+//TODO // Create variable to hold this string table.
+//TODO new GlobalVariable(*TheModule, LLVMValuesTable->getType(), true,
+//TODO GlobalValue::ExternalLinkage,
+//TODO LLVMValuesTable,
+//TODO "llvm.pch.values");
+//TODO}
+
+/// handleVisibility - Forward decl visibility style to global.
+void handleVisibility(tree decl, GlobalValue *GV) {
+ // If decl has visibility specified explicitely (via attribute) - honour
+ // it. Otherwise (e.g. visibility specified via -fvisibility=hidden) honour
+ // only if symbol is local.
+ if (TREE_PUBLIC(decl) &&
+ (DECL_VISIBILITY_SPECIFIED(decl) || !DECL_EXTERNAL(decl))) {
+ if (DECL_VISIBILITY(decl) == VISIBILITY_HIDDEN)
+ GV->setVisibility(GlobalValue::HiddenVisibility);
+ else if (DECL_VISIBILITY(decl) == VISIBILITY_PROTECTED)
+ GV->setVisibility(GlobalValue::ProtectedVisibility);
+ else if (DECL_VISIBILITY(decl) == VISIBILITY_DEFAULT)
+ GV->setVisibility(Function::DefaultVisibility);
+ }
+}
+
+// GuessAtInliningThreshold - Figure out a reasonable threshold to pass llvm's
+// inliner. gcc has many options that control inlining, but we have decided
+// not to support anything like that for llvm-gcc.
+static unsigned GuessAtInliningThreshold() {
+ unsigned threshold = 200;
+ if (optimize_size || optimize < 3)
+ // Reduce inline limit.
+ threshold = 50;
+ return threshold;
+}
+
+#ifndef LLVM_TARGET_NAME
+#error LLVM_TARGET_NAME macro not specified
+#endif
+
+namespace llvm {
+#define Declare2(TARG, MOD) extern "C" void LLVMInitialize ## TARG ## MOD()
+#define Declare(T, M) Declare2(T, M)
+ Declare(LLVM_TARGET_NAME, TargetInfo);
+ Declare(LLVM_TARGET_NAME, Target);
+ Declare(LLVM_TARGET_NAME, AsmPrinter);
+#undef Declare
+#undef Declare2
+}
+
+/// LazilyConfigureLLVM - Set LLVM configuration options, if not already set.
+/// already created.
+static void LazilyConfigureLLVM(void) {
+ static bool Configured = false;
+ if (Configured)
+ return;
+
+ // Initialize the LLVM backend.
+#define DoInit2(TARG, MOD) LLVMInitialize ## TARG ## MOD()
+#define DoInit(T, M) DoInit2(T, M)
+ DoInit(LLVM_TARGET_NAME, TargetInfo);
+ DoInit(LLVM_TARGET_NAME, Target);
+ DoInit(LLVM_TARGET_NAME, AsmPrinter);
+#undef DoInit
+#undef DoInit2
+
+ // Initialize LLVM command line options.
+ std::vector<const char*> Args;
+ Args.push_back(progname); // program name
+
+//TODO // Allow targets to specify PIC options and other stuff to the corresponding
+//TODO // LLVM backends.
+//TODO#ifdef LLVM_SET_RED_ZONE_FLAG
+//TODO LLVM_SET_RED_ZONE_FLAG(flag_disable_red_zone)
+//TODO#endif
+#ifdef LLVM_SET_TARGET_OPTIONS
+ LLVM_SET_TARGET_OPTIONS(Args);
+#endif
+#ifdef LLVM_SET_MACHINE_OPTIONS
+ LLVM_SET_MACHINE_OPTIONS(Args);
+#endif
+//TODO#ifdef LLVM_SET_IMPLICIT_FLOAT
+//TODO LLVM_SET_IMPLICIT_FLOAT(flag_no_implicit_float)
+//TODO#endif
+
+ if (time_report || !quiet_flag || flag_detailed_statistics)
+ Args.push_back("--time-passes");
+ if (!quiet_flag || flag_detailed_statistics)
+ Args.push_back("--stats");
+ if (fast_math_flags_set_p())
+ Args.push_back("--enable-unsafe-fp-math");
+ if (!flag_omit_frame_pointer)
+ Args.push_back("--disable-fp-elim");
+ if (!flag_zero_initialized_in_bss)
+ Args.push_back("--nozero-initialized-in-bss");
+ if (flag_verbose_asm)
+ Args.push_back("--asm-verbose");
+//TODO if (flag_debug_pass_structure)
+//TODO Args.push_back("--debug-pass=Structure");
+//TODO if (flag_debug_pass_arguments)
+//TODO Args.push_back("--debug-pass=Arguments");
+ if (flag_unwind_tables)
+ Args.push_back("--unwind-tables");
+
+ // If there are options that should be passed through to the LLVM backend
+ // directly from the command line, do so now. This is mainly for debugging
+ // purposes, and shouldn't really be for general use.
+ std::vector<std::string> ArgStrings;
+
+ unsigned threshold = GuessAtInliningThreshold();
+ std::string Arg("--inline-threshold="+utostr(threshold));
+ ArgStrings.push_back(Arg);
+
+//TODO if (flag_limited_precision > 0) {
+//TODO std::string Arg("--limit-float-precision="+utostr(flag_limited_precision));
+//TODO ArgStrings.push_back(Arg);
+//TODO }
+
+ if (flag_stack_protect > 0) {
+ std::string Arg("--stack-protector-buffer-size=" +
+ utostr(PARAM_VALUE(PARAM_SSP_BUFFER_SIZE)));
+ ArgStrings.push_back(Arg);
+ }
+
+ for (unsigned i = 0, e = ArgStrings.size(); i != e; ++i)
+ Args.push_back(ArgStrings[i].c_str());
+
+//TODO std::vector<std::string> LLVM_Optns; // Avoid deallocation before opts parsed!
+//TODO if (llvm_optns) {
+//TODO SplitString(llvm_optns, LLVM_Optns);
+//TODO for(unsigned i = 0, e = LLVM_Optns.size(); i != e; ++i)
+//TODO Args.push_back(LLVM_Optns[i].c_str());
+//TODO }
+
+ Args.push_back(0); // Null terminator.
+ int pseudo_argc = Args.size()-1;
+ llvm::cl::ParseCommandLineOptions(pseudo_argc, (char**)&Args[0]);
+
+ Configured = true;
+}
+
+/// LazilyInitializeModule - Create a module to output LLVM IR to, if it wasn't
+/// already created.
+static void LazilyInitializeModule(void) {
+ static bool Initialized = false;
+ if (Initialized)
+ return;
+
+ LazilyConfigureLLVM();
+
+ TheModule = new Module("", getGlobalContext());
+
+ if (main_input_filename)
+ TheModule->setModuleIdentifier(main_input_filename);
+
+ // Insert a special .ident directive to identify the version of the plugin
+ // which compiled this code. The format of the .ident string is patterned
+ // after the ones produced by GCC.
+#ifdef IDENT_ASM_OP
+ if (!flag_no_ident) {
+ const char *pkg_version = "(GNU) ";
+
+ if (strcmp ("(GCC) ", pkgversion_string))
+ pkg_version = pkgversion_string;
+
+ std::string IdentString = IDENT_ASM_OP;
+ IdentString += "\"GCC: ";
+ IdentString += pkg_version;
+ IdentString += version_string;
+ IdentString += " LLVM: ";
+ IdentString += REVISION;
+ IdentString += '"';
+ TheModule->setModuleInlineAsm(IdentString);
+ }
+#endif
+
+ // If the target wants to override the architecture, e.g. turning
+ // powerpc-darwin-... into powerpc64-darwin-... when -m64 is enabled, do so
+ // now.
+ std::string TargetTriple = TARGET_NAME;
+#ifdef LLVM_OVERRIDE_TARGET_ARCH
+ std::string Arch = LLVM_OVERRIDE_TARGET_ARCH();
+ if (!Arch.empty()) {
+ std::string::size_type DashPos = TargetTriple.find('-');
+ if (DashPos != std::string::npos)// If we have a sane t-t, replace the arch.
+ TargetTriple = Arch + TargetTriple.substr(DashPos);
+ }
+#endif
+#ifdef LLVM_OVERRIDE_TARGET_VERSION
+ char *NewTriple;
+ bool OverRidden = LLVM_OVERRIDE_TARGET_VERSION(TargetTriple.c_str(),
+ &NewTriple);
+ if (OverRidden)
+ TargetTriple = std::string(NewTriple);
+#endif
+ TheModule->setTargetTriple(TargetTriple);
+
+ TheTypeConverter = new TypeConverter();
+
+ // Create the TargetMachine we will be generating code with.
+ // FIXME: Figure out how to select the target and pass down subtarget info.
+ std::string Err;
+ const Target *TME =
+ TargetRegistry::lookupTarget(TargetTriple, Err);
+ if (!TME)
+ llvm_report_error(Err);
+
+ // Figure out the subtarget feature string we pass to the target.
+ std::string FeatureStr;
+ // The target can set LLVM_SET_SUBTARGET_FEATURES to configure the LLVM
+ // backend.
+#ifdef LLVM_SET_SUBTARGET_FEATURES
+ SubtargetFeatures Features;
+ LLVM_SET_SUBTARGET_FEATURES(Features);
+ FeatureStr = Features.getString();
+#endif
+ TheTarget = TME->createTargetMachine(TargetTriple, FeatureStr);
+ assert(TheTarget->getTargetData()->isBigEndian() == BYTES_BIG_ENDIAN);
+
+ TheFolder = new TargetFolder(TheTarget->getTargetData(), getGlobalContext());
+
+ // Install information about target datalayout stuff into the module for
+ // optimizer use.
+ TheModule->setDataLayout(TheTarget->getTargetData()->
+ getStringRepresentation());
+
+ if (optimize)
+ RegisterRegAlloc::setDefault(createLinearScanRegisterAllocator);
+ else
+ RegisterRegAlloc::setDefault(createLocalRegisterAllocator);
+
+//TODO // FIXME - Do not disable debug info while writing pch.
+//TODO if (!flag_pch_file &&
+//TODO debug_info_level > DINFO_LEVEL_NONE)
+//TODO TheDebugInfo = new DebugInfo(TheModule);
+//TODO if (TheDebugInfo)
+//TODO TheDebugInfo->Initialize();
+//TODO}
+//TODO
+//TODO/// performLateBackendInitialization - Set backend options that may only be
+//TODO/// known at codegen time.
+//TODOvoid performLateBackendInitialization(void) {
+//TODO // The Ada front-end sets flag_exceptions only after processing the file.
+//TODO if (USING_SJLJ_EXCEPTIONS)
+//TODO SjLjExceptionHandling = flag_exceptions;
+//TODO else
+//TODO DwarfExceptionHandling = flag_exceptions;
+//TODO for (Module::iterator I = TheModule->begin(), E = TheModule->end();
+//TODO I != E; ++I)
+//TODO if (!I->isDeclaration()) {
+//TODO if (flag_disable_red_zone)
+//TODO I->addFnAttr(Attribute::NoRedZone);
+//TODO if (flag_no_implicit_float)
+//TODO I->addFnAttr(Attribute::NoImplicitFloat);
+//TODO }
+//TODO}
+ Initialized = true;
+}
+
+/// InitializeOutputStreams - Initialize the assembly code output streams.
+static void InitializeOutputStreams(bool Binary) {
+ assert(!OutStream && "Output stream already initialized!");
+ std::string Error;
+
+ OutStream = new raw_fd_ostream(llvm_asm_file_name, Error,
+ Binary ? raw_fd_ostream::F_Binary : 0);
+
+ if (!Error.empty())
+ llvm_report_error(Error);
+
+ FormattedOutStream.setStream(*OutStream,
+ formatted_raw_ostream::PRESERVE_STREAM);
+}
+
+//TODOoFILEstream *AsmIntermediateOutStream = 0;
+//TODO
+//TODO/// llvm_pch_read - Read bytecode from PCH file. Initialize TheModule and setup
+//TODO/// LTypes vector.
+//TODOvoid llvm_pch_read(const unsigned char *Buffer, unsigned Size) {
+//TODO std::string ModuleName = TheModule->getModuleIdentifier();
+//TODO
+//TODO delete TheModule;
+//TODO delete TheDebugInfo;
+//TODO
+//TODO clearTargetBuiltinCache();
+//TODO
+//TODO MemoryBuffer *MB = MemoryBuffer::getNewMemBuffer(Size, ModuleName.c_str());
+//TODO memcpy((char*)MB->getBufferStart(), Buffer, Size);
+//TODO
+//TODO std::string ErrMsg;
+//TODO TheModule = ParseBitcodeFile(MB, getGlobalContext(), &ErrMsg);
+//TODO delete MB;
+//TODO
+//TODO // FIXME - Do not disable debug info while writing pch.
+//TODO if (!flag_pch_file && debug_info_level > DINFO_LEVEL_NONE) {
+//TODO TheDebugInfo = new DebugInfo(TheModule);
+//TODO TheDebugInfo->Initialize();
+//TODO }
+//TODO
+//TODO if (!TheModule) {
+//TODO errs() << "Error reading bytecodes from PCH file\n";
+//TODO errs() << ErrMsg << "\n";
+//TODO exit(1);
+//TODO }
+//TODO
+//TODO if (PerFunctionPasses || PerModulePasses) {
+//TODO destroyOptimizationPasses();
+//TODO
+//TODO // Don't run codegen, when we should output PCH
+//TODO if (flag_pch_file)
+//TODO llvm_pch_write_init();
+//TODO }
+//TODO
+//TODO // Read LLVM Types string table
+//TODO readLLVMTypesStringTable();
+//TODO readLLVMValues();
+//TODO
+//TODO flag_llvm_pch_read = 1;
+//TODO}
+//TODO
+//TODO/// llvm_pch_write_init - Initialize PCH writing.
+//TODOvoid llvm_pch_write_init(void) {
+//TODO timevar_push(TV_LLVM_INIT);
+//TODO AsmOutStream = new oFILEstream(asm_out_file);
+//TODO // FIXME: disentangle ostream madness here. Kill off ostream and FILE.
+//TODO AsmOutRawStream =
+//TODO new formatted_raw_ostream(*new raw_os_ostream(*AsmOutStream),
+//TODO formatted_raw_ostream::DELETE_STREAM);
+//TODO
+//TODO PerModulePasses = new PassManager();
+//TODO PerModulePasses->add(new TargetData(*TheTarget->getTargetData()));
+//TODO
+//TODO // If writing to stdout, set binary mode.
+//TODO if (asm_out_file == stdout)
+//TODO sys::Program::ChangeStdoutToBinary();
+//TODO
+//TODO // Emit an LLVM .bc file to the output. This is used when passed
+//TODO // -emit-llvm -c to the GCC driver.
+//TODO PerModulePasses->add(createBitcodeWriterPass(*AsmOutStream));
+//TODO
+//TODO // Disable emission of .ident into the output file... which is completely
+//TODO // wrong for llvm/.bc emission cases.
+//TODO flag_no_ident = 1;
+//TODO
+//TODO flag_llvm_pch_read = 0;
+//TODO
+//TODO timevar_pop(TV_LLVM_INIT);
+//TODO}
+
+//TODOstatic void destroyOptimizationPasses() {
+//TODO delete PerFunctionPasses;
+//TODO delete PerModulePasses;
+//TODO delete CodeGenPasses;
+//TODO
+//TODO PerFunctionPasses = 0;
+//TODO PerModulePasses = 0;
+//TODO CodeGenPasses = 0;
+//TODO}
+
+static void createPerFunctionOptimizationPasses() {
+ if (PerFunctionPasses)
+ return;
+
+ // Create and set up the per-function pass manager.
+ // FIXME: Move the code generator to be function-at-a-time.
+ PerFunctionPasses =
+ new FunctionPassManager(new ExistingModuleProvider(TheModule));
+ PerFunctionPasses->add(new TargetData(*TheTarget->getTargetData()));
+
+ // In -O0 if checking is disabled, we don't even have per-function passes.
+ bool HasPerFunctionPasses = false;
+#ifdef ENABLE_CHECKING
+ PerFunctionPasses->add(createVerifierPass());
+ HasPerFunctionPasses = true;
+#endif
+
+ if (optimize > 0 && !DisableLLVMOptimizations) {
+ HasPerFunctionPasses = true;
+ PerFunctionPasses->add(createCFGSimplificationPass());
+ if (optimize == 1)
+ PerFunctionPasses->add(createPromoteMemoryToRegisterPass());
+ else
+ PerFunctionPasses->add(createScalarReplAggregatesPass());
+ PerFunctionPasses->add(createInstructionCombiningPass());
+ }
+
+ // If there are no module-level passes that have to be run, we codegen as
+ // each function is parsed.
+ // FIXME: We can't figure this out until we know there are no always-inline
+ // functions.
+ // FIXME: This is disabled right now until bugs can be worked out. Reenable
+ // this for fast -O0 compiles!
+ if (!EmitIR && 0) {
+ FunctionPassManager *PM = PerFunctionPasses;
+ HasPerFunctionPasses = true;
+
+ CodeGenOpt::Level OptLevel = CodeGenOpt::Default;
+
+ switch (optimize) {
+ default: break;
+ case 0: OptLevel = CodeGenOpt::None; break;
+ case 3: OptLevel = CodeGenOpt::Aggressive; break;
+ }
+
+ // Normal mode, emit a .s file by running the code generator.
+ // Note, this also adds codegenerator level optimization passes.
+ InitializeOutputStreams(false);
+ switch (TheTarget->addPassesToEmitFile(*PM, FormattedOutStream,
+ TargetMachine::AssemblyFile,
+ OptLevel)) {
+ default:
+ case FileModel::Error:
+ errs() << "Error interfacing to target machine!\n";
+ exit(1);
+ case FileModel::AsmFile:
+ break;
+ }
+
+ if (TheTarget->addPassesToEmitFileFinish(*PM, (MachineCodeEmitter *)0,
+ OptLevel)) {
+ errs() << "Error interfacing to target machine!\n";
+ exit(1);
+ }
+ }
+
+ if (HasPerFunctionPasses) {
+ PerFunctionPasses->doInitialization();
+ } else {
+ delete PerFunctionPasses;
+ PerFunctionPasses = 0;
+ }
+}
+
+static void createPerModuleOptimizationPasses() {
+ if (PerModulePasses)
+ // llvm_pch_write_init has already created the per module passes.
+ return;
+
+ // FIXME: AT -O0/O1, we should stream out functions at a time.
+ PerModulePasses = new PassManager();
+ PerModulePasses->add(new TargetData(*TheTarget->getTargetData()));
+ bool HasPerModulePasses = false;
+
+ if (!DisableLLVMOptimizations) {
+ bool NeedAlwaysInliner = false;
+ llvm::Pass *InliningPass = 0;
+ if (optimize >= 2) {
+ InliningPass = createFunctionInliningPass(); // Inline small functions
+ } else {
+ // If full inliner is not run, check if always-inline is needed to handle
+ // functions that are marked as always_inline.
+ // TODO: Consider letting the GCC inliner do this.
+ for (Module::iterator I = TheModule->begin(), E = TheModule->end();
+ I != E; ++I)
+ if (I->hasFnAttr(Attribute::AlwaysInline)) {
+ NeedAlwaysInliner = true;
+ break;
+ }
+
+ if (NeedAlwaysInliner)
+ InliningPass = createAlwaysInlinerPass(); // Inline always_inline funcs
+ }
+
+ HasPerModulePasses = true;
+ createStandardModulePasses(PerModulePasses, optimize,
+ optimize_size || optimize < 3,
+ flag_unit_at_a_time, flag_unroll_loops,
+ !flag_no_simplify_libcalls, flag_exceptions,
+ InliningPass);
+ }
+
+ if (EmitIR && 0) {
+ // Emit an LLVM .bc file to the output. This is used when passed
+ // -emit-llvm -c to the GCC driver.
+ InitializeOutputStreams(true);
+ PerModulePasses->add(createBitcodeWriterPass(*OutStream));
+ HasPerModulePasses = true;
+ } else if (EmitIR) {
+ // Emit an LLVM .ll file to the output. This is used when passed
+ // -emit-llvm -S to the GCC driver.
+ InitializeOutputStreams(false);
+ PerModulePasses->add(createPrintModulePass(OutStream));
+ HasPerModulePasses = true;
+ } else {
+ // If there are passes we have to run on the entire module, we do codegen
+ // as a separate "pass" after that happens.
+ // However if there are no module-level passes that have to be run, we
+ // codegen as each function is parsed.
+ // FIXME: This is disabled right now until bugs can be worked out. Reenable
+ // this for fast -O0 compiles!
+ if (PerModulePasses || 1) {
+ FunctionPassManager *PM = CodeGenPasses =
+ new FunctionPassManager(new ExistingModuleProvider(TheModule));
+ PM->add(new TargetData(*TheTarget->getTargetData()));
+
+ CodeGenOpt::Level OptLevel = CodeGenOpt::Default;
+
+ switch (optimize) {
+ default: break;
+ case 0: OptLevel = CodeGenOpt::None; break;
+ case 3: OptLevel = CodeGenOpt::Aggressive; break;
+ }
+
+ // Normal mode, emit a .s file by running the code generator.
+ // Note, this also adds codegenerator level optimization passes.
+ InitializeOutputStreams(false);
+ switch (TheTarget->addPassesToEmitFile(*PM, FormattedOutStream,
+ TargetMachine::AssemblyFile,
+ OptLevel)) {
+ default:
+ case FileModel::Error:
+ errs() << "Error interfacing to target machine!\n";
+ exit(1);
+ case FileModel::AsmFile:
+ break;
+ }
+
+ if (TheTarget->addPassesToEmitFileFinish(*PM, (MachineCodeEmitter *)0,
+ OptLevel)) {
+ errs() << "Error interfacing to target machine!\n";
+ exit(1);
+ }
+ }
+ }
+
+ if (!HasPerModulePasses) {
+ delete PerModulePasses;
+ PerModulePasses = 0;
+ }
+}
+
+//TODO/// llvm_asm_file_start - Start the .s file.
+//TODOvoid llvm_asm_file_start(void) {
+//TODO timevar_push(TV_LLVM_INIT);
+//TODO AsmOutStream = new oFILEstream(asm_out_file);
+//TODO // FIXME: disentangle ostream madness here. Kill off ostream and FILE.
+//TODO AsmOutRawStream =
+//TODO new formatted_raw_ostream(*new raw_os_ostream(*AsmOutStream),
+//TODO formatted_raw_ostream::DELETE_STREAM);
+//TODO
+//TODO flag_llvm_pch_read = 0;
+//TODO
+//TODO if (EmitIR)
+//TODO // Disable emission of .ident into the output file... which is completely
+//TODO // wrong for llvm/.bc emission cases.
+//TODO flag_no_ident = 1;
+//TODO
+//TODO // If writing to stdout, set binary mode.
+//TODO if (asm_out_file == stdout)
+//TODO sys::Program::ChangeStdoutToBinary();
+//TODO
+//TODO AttributeUsedGlobals.clear();
+//TODO AttributeCompilerUsedGlobals.clear();
+//TODO timevar_pop(TV_LLVM_INIT);
+//TODO}
+
+/// ConvertStructorsList - Convert a list of static ctors/dtors to an
+/// initializer suitable for the llvm.global_[cd]tors globals.
+static void CreateStructorsList(std::vector<std::pair<Constant*, int> > &Tors,
+ const char *Name) {
+ std::vector<Constant*> InitList;
+ std::vector<Constant*> StructInit;
+ StructInit.resize(2);
+
+ LLVMContext &Context = getGlobalContext();
+
+ const Type *FPTy =
+ FunctionType::get(Type::getVoidTy(Context),
+ std::vector<const Type*>(), false);
+ FPTy = FPTy->getPointerTo();
+
+ for (unsigned i = 0, e = Tors.size(); i != e; ++i) {
+ StructInit[0] = ConstantInt::get(Type::getInt32Ty(Context), Tors[i].second);
+
+ // __attribute__(constructor) can be on a function with any type. Make sure
+ // the pointer is void()*.
+ StructInit[1] = TheFolder->CreateBitCast(Tors[i].first, FPTy);
+ InitList.push_back(ConstantStruct::get(Context, StructInit, false));
+ }
+ Constant *Array = ConstantArray::get(
+ ArrayType::get(InitList[0]->getType(), InitList.size()), InitList);
+ new GlobalVariable(*TheModule, Array->getType(), false,
+ GlobalValue::AppendingLinkage,
+ Array, Name);
+}
+
+/// emit_alias_to_llvm - Given decl and target emit alias to target.
+void emit_alias_to_llvm(tree decl, tree target, tree target_decl) {
+ if (errorcount || sorrycount) {
+ TREE_ASM_WRITTEN(decl) = 1;
+ return; // Do not process broken code.
+ }
+
+//TODO timevar_push(TV_LLVM_GLOBALS);
+
+ // Get or create LLVM global for our alias.
+ GlobalValue *V = cast<GlobalValue>(DECL_LLVM(decl));
+
+ GlobalValue *Aliasee = NULL;
+
+ if (target_decl)
+ Aliasee = cast<GlobalValue>(DECL_LLVM(target_decl));
+ else {
+ // This is something insane. Probably only LTHUNKs can be here
+ // Try to grab decl from IDENTIFIER_NODE
+
+ // Query SymTab for aliasee
+ const char* AliaseeName = IDENTIFIER_POINTER(target);
+ Aliasee =
+ dyn_cast_or_null<GlobalValue>(TheModule->
+ getValueSymbolTable().lookup(AliaseeName));
+
+ // Last resort. Query for name set via __asm__
+ if (!Aliasee) {
+ std::string starred = std::string("\001") + AliaseeName;
+ Aliasee =
+ dyn_cast_or_null<GlobalValue>(TheModule->
+ getValueSymbolTable().lookup(starred));
+ }
+
+ if (!Aliasee) {
+ if (lookup_attribute ("weakref", DECL_ATTRIBUTES (decl))) {
+ if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
+ Aliasee = new GlobalVariable(*TheModule, GV->getType(),
+ GV->isConstant(),
+ GlobalVariable::ExternalWeakLinkage,
+ NULL, AliaseeName);
+ else if (Function *F = dyn_cast<Function>(V))
+ Aliasee = Function::Create(F->getFunctionType(),
+ Function::ExternalWeakLinkage,
+ AliaseeName, TheModule);
+ else
+ assert(0 && "Unsuported global value");
+ } else {
+ error ("%J%qD aliased to undefined symbol %qs", decl, decl, AliaseeName);
+//TODO timevar_pop(TV_LLVM_GLOBALS);
+ return;
+ }
+ }
+ }
+
+ GlobalValue::LinkageTypes Linkage;
+
+ // A weak alias has TREE_PUBLIC set but not the other bits.
+ if (false)//FIXME DECL_LLVM_PRIVATE(decl))
+ Linkage = GlobalValue::PrivateLinkage;
+ else if (false)//FIXME DECL_LLVM_LINKER_PRIVATE(decl))
+ Linkage = GlobalValue::LinkerPrivateLinkage;
+ else if (DECL_WEAK(decl))
+ // The user may have explicitly asked for weak linkage - ignore flag_odr.
+ Linkage = GlobalValue::WeakAnyLinkage;
+ else if (!TREE_PUBLIC(decl))
+ Linkage = GlobalValue::InternalLinkage;
+ else
+ Linkage = GlobalValue::ExternalLinkage;
+
+ GlobalAlias* GA = new GlobalAlias(Aliasee->getType(), Linkage, "",
+ Aliasee, TheModule);
+
+ handleVisibility(decl, GA);
+
+ if (GA->getType()->canLosslesslyBitCastTo(V->getType()))
+ V->replaceAllUsesWith(ConstantExpr::getBitCast(GA, V->getType()));
+ else if (!V->use_empty()) {
+ error ("%J Alias %qD used with invalid type!", decl, decl);
+//TODO timevar_pop(TV_LLVM_GLOBALS);
+ return;
+ }
+
+ changeLLVMConstant(V, GA);
+ GA->takeName(V);
+ if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
+ GV->eraseFromParent();
+ else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
+ GA->eraseFromParent();
+ else if (Function *F = dyn_cast<Function>(V))
+ F->eraseFromParent();
+ else
+ assert(0 && "Unsuported global value");
+
+ TREE_ASM_WRITTEN(decl) = 1;
+
+//TODO timevar_pop(TV_LLVM_GLOBALS);
+ return;
+}
+
+/// ConvertMetadataStringToGV - Convert string to global value. Use existing
+/// global if possible.
+Constant* ConvertMetadataStringToGV(const char *str) {
+
+ Constant *Init = ConstantArray::get(getGlobalContext(), std::string(str));
+
+ // Use cached string if it exists.
+ static std::map<Constant*, GlobalVariable*> StringCSTCache;
+ GlobalVariable *&Slot = StringCSTCache[Init];
+ if (Slot) return Slot;
+
+ // Create a new string global.
+ GlobalVariable *GV = new GlobalVariable(*TheModule, Init->getType(), true,
+ GlobalVariable::PrivateLinkage,
+ Init, ".str");
+ GV->setSection("llvm.metadata");
+ Slot = GV;
+ return GV;
+
+}
+
+/// AddAnnotateAttrsToGlobal - Adds decls that have a annotate attribute to a
+/// vector to be emitted later.
+void AddAnnotateAttrsToGlobal(GlobalValue *GV, tree decl) {
+ LLVMContext &Context = getGlobalContext();
+
+ // Handle annotate attribute on global.
+ tree annotateAttr = lookup_attribute("annotate", DECL_ATTRIBUTES (decl));
+ if (annotateAttr == 0)
+ return;
+
+ // Get file and line number
+ Constant *lineNo = ConstantInt::get(Type::getInt32Ty(Context),
+ DECL_SOURCE_LINE(decl));
+ Constant *file = ConvertMetadataStringToGV(DECL_SOURCE_FILE(decl));
+ const Type *SBP = Type::getInt8PtrTy(Context);
+ file = TheFolder->CreateBitCast(file, SBP);
+
+ // There may be multiple annotate attributes. Pass return of lookup_attr
+ // to successive lookups.
+ while (annotateAttr) {
+
+ // Each annotate attribute is a tree list.
+ // Get value of list which is our linked list of args.
+ tree args = TREE_VALUE(annotateAttr);
+
+ // Each annotate attribute may have multiple args.
+ // Treat each arg as if it were a separate annotate attribute.
+ for (tree a = args; a; a = TREE_CHAIN(a)) {
+ // Each element of the arg list is a tree list, so get value
+ tree val = TREE_VALUE(a);
+
+ // Assert its a string, and then get that string.
+ assert(TREE_CODE(val) == STRING_CST &&
+ "Annotate attribute arg should always be a string");
+ Constant *strGV = TreeConstantToLLVM::EmitLV_STRING_CST(val);
+ Constant *Element[4] = {
+ TheFolder->CreateBitCast(GV,SBP),
+ TheFolder->CreateBitCast(strGV,SBP),
+ file,
+ lineNo
+ };
+
+ AttributeAnnotateGlobals.push_back(
+ ConstantStruct::get(Context, Element, 4, false));
+ }
+
+ // Get next annotate attribute.
+ annotateAttr = TREE_CHAIN(annotateAttr);
+ if (annotateAttr)
+ annotateAttr = lookup_attribute("annotate", annotateAttr);
+ }
+}
+
+/// reset_initializer_llvm - Change the initializer for a global variable.
+void reset_initializer_llvm(tree decl) {
+ // If there were earlier errors we can get here when DECL_LLVM has not
+ // been set. Don't crash.
+ // We can also get here when DECL_LLVM has not been set for some object
+ // referenced in the initializer. Don't crash then either.
+ if (errorcount || sorrycount)
+ return;
+
+ // Get or create the global variable now.
+ GlobalVariable *GV = cast<GlobalVariable>(DECL_LLVM(decl));
+
+ // Visibility may also have changed.
+ handleVisibility(decl, GV);
+
+ // Convert the initializer over.
+ Constant *Init = TreeConstantToLLVM::Convert(DECL_INITIAL(decl));
+
+ // Set the initializer.
+ GV->setInitializer(Init);
+}
+
+/// reset_type_and_initializer_llvm - Change the type and initializer for
+/// a global variable.
+void reset_type_and_initializer_llvm(tree decl) {
+ // If there were earlier errors we can get here when DECL_LLVM has not
+ // been set. Don't crash.
+ // We can also get here when DECL_LLVM has not been set for some object
+ // referenced in the initializer. Don't crash then either.
+ if (errorcount || sorrycount)
+ return;
+
+ // Get or create the global variable now.
+ GlobalVariable *GV = cast<GlobalVariable>(DECL_LLVM(decl));
+
+ // Visibility may also have changed.
+ handleVisibility(decl, GV);
+
+ // Temporary to avoid infinite recursion (see comments emit_global_to_llvm)
+ GV->setInitializer(UndefValue::get(GV->getType()->getElementType()));
+
+ // Convert the initializer over.
+ Constant *Init = TreeConstantToLLVM::Convert(DECL_INITIAL(decl));
+
+ // If we had a forward definition that has a type that disagrees with our
+ // initializer, insert a cast now. This sort of thing occurs when we have a
+ // global union, and the LLVM type followed a union initializer that is
+ // different from the union element used for the type.
+ if (GV->getType()->getElementType() != Init->getType()) {
+ GV->removeFromParent();
+ GlobalVariable *NGV = new GlobalVariable(*TheModule, Init->getType(),
+ GV->isConstant(),
+ GV->getLinkage(), 0,
+ GV->getName());
+ NGV->setVisibility(GV->getVisibility());
+ NGV->setSection(GV->getSection());
+ NGV->setAlignment(GV->getAlignment());
+ NGV->setLinkage(GV->getLinkage());
+ GV->replaceAllUsesWith(TheFolder->CreateBitCast(NGV, GV->getType()));
+ changeLLVMConstant(GV, NGV);
+ delete GV;
+ SET_DECL_LLVM(decl, NGV);
+ GV = NGV;
+ }
+
+ // Set the initializer.
+ GV->setInitializer(Init);
+}
+
+/// emit_global_to_llvm - Emit the specified VAR_DECL or aggregate CONST_DECL to
+/// LLVM as a global variable. This function implements the end of
+/// assemble_variable.
+void emit_global_to_llvm(tree decl) {
+ if (errorcount || sorrycount) {
+ TREE_ASM_WRITTEN(decl) = 1;
+ return; // Do not process broken code.
+ }
+
+ // FIXME: Support alignment on globals: DECL_ALIGN.
+ // FIXME: DECL_PRESERVE_P indicates the var is marked with attribute 'used'.
+
+ // Global register variables don't turn into LLVM GlobalVariables.
+ if (TREE_CODE(decl) == VAR_DECL && DECL_REGISTER(decl))
+ return;
+
+ // If tree nodes says defer output then do not emit global yet.
+ if (CODE_CONTAINS_STRUCT (TREE_CODE (decl), TS_DECL_WITH_VIS)
+ && (DECL_DEFER_OUTPUT(decl)))
+ return;
+
+ // If we encounter a forward declaration then do not emit the global yet.
+ if (!TYPE_SIZE(TREE_TYPE(decl)))
+ return;
+
+//TODO timevar_push(TV_LLVM_GLOBALS);
+
+ // Get or create the global variable now.
+ GlobalVariable *GV = cast<GlobalVariable>(DECL_LLVM(decl));
+
+ // Convert the initializer over.
+ Constant *Init;
+ if (DECL_INITIAL(decl) == 0 || DECL_INITIAL(decl) == error_mark_node) {
+ // This global should be zero initialized. Reconvert the type in case the
+ // forward def of the global and the real def differ in type (e.g. declared
+ // as 'int A[]', and defined as 'int A[100]').
+ Init = Constant::getNullValue(ConvertType(TREE_TYPE(decl)));
+ } else {
+ assert((TREE_CONSTANT(DECL_INITIAL(decl)) ||
+ TREE_CODE(DECL_INITIAL(decl)) == STRING_CST) &&
+ "Global initializer should be constant!");
+
+ // Temporarily set an initializer for the global, so we don't infinitely
+ // recurse. If we don't do this, we can hit cases where we see "oh a global
+ // with an initializer hasn't been initialized yet, call emit_global_to_llvm
+ // on it". When constructing the initializer it might refer to itself.
+ // this can happen for things like void *G = &G;
+ //
+ GV->setInitializer(UndefValue::get(GV->getType()->getElementType()));
+ Init = TreeConstantToLLVM::Convert(DECL_INITIAL(decl));
+ }
+
+ // If we had a forward definition that has a type that disagrees with our
+ // initializer, insert a cast now. This sort of thing occurs when we have a
+ // global union, and the LLVM type followed a union initializer that is
+ // different from the union element used for the type.
+ if (GV->getType()->getElementType() != Init->getType()) {
+ GV->removeFromParent();
+ GlobalVariable *NGV = new GlobalVariable(*TheModule, Init->getType(),
+ GV->isConstant(),
+ GlobalValue::ExternalLinkage, 0,
+ GV->getName());
+ GV->replaceAllUsesWith(TheFolder->CreateBitCast(NGV, GV->getType()));
+ changeLLVMConstant(GV, NGV);
+ delete GV;
+ SET_DECL_LLVM(decl, NGV);
+ GV = NGV;
+ }
+
+ // Set the initializer.
+ GV->setInitializer(Init);
+
+ // Set thread local (TLS)
+ if (TREE_CODE(decl) == VAR_DECL && DECL_THREAD_LOCAL_P(decl))
+ GV->setThreadLocal(true);
+
+ // Set the linkage.
+ GlobalValue::LinkageTypes Linkage;
+
+ if (CODE_CONTAINS_STRUCT (TREE_CODE (decl), TS_DECL_WITH_VIS)
+ && false) {// FIXME DECL_LLVM_PRIVATE(decl)) {
+ Linkage = GlobalValue::PrivateLinkage;
+ } else if (CODE_CONTAINS_STRUCT (TREE_CODE (decl), TS_DECL_WITH_VIS)
+ && false) {//FIXME DECL_LLVM_LINKER_PRIVATE(decl)) {
+ Linkage = GlobalValue::LinkerPrivateLinkage;
+ } else if (!TREE_PUBLIC(decl)) {
+ Linkage = GlobalValue::InternalLinkage;
+ } else if (DECL_WEAK(decl)) {
+ // The user may have explicitly asked for weak linkage - ignore flag_odr.
+ Linkage = GlobalValue::WeakAnyLinkage;
+ } else if (DECL_ONE_ONLY(decl)) {
+ Linkage = GlobalValue::getWeakLinkage(flag_odr);
+ } else if (DECL_COMMON(decl) && // DECL_COMMON is only meaningful if no init
+ (!DECL_INITIAL(decl) || DECL_INITIAL(decl) == error_mark_node)) {
+ // llvm-gcc also includes DECL_VIRTUAL_P here.
+ Linkage = GlobalValue::CommonLinkage;
+ } else if (DECL_COMDAT(decl)) {
+ Linkage = GlobalValue::getLinkOnceLinkage(flag_odr);
+ } else {
+ Linkage = GV->getLinkage();
+ }
+
+ // Allow loads from constants to be folded even if the constant has weak
+ // linkage. Do this by giving the constant weak_odr linkage rather than
+ // weak linkage. It is not clear whether this optimization is valid (see
+ // gcc bug 36685), but mainline gcc chooses to do it, and fold may already
+ // have done it, so we might as well join in with gusto.
+ if (GV->isConstant()) {
+ if (Linkage == GlobalValue::WeakAnyLinkage)
+ Linkage = GlobalValue::WeakODRLinkage;
+ else if (Linkage == GlobalValue::LinkOnceAnyLinkage)
+ Linkage = GlobalValue::LinkOnceODRLinkage;
+ }
+ GV->setLinkage(Linkage);
+
+#ifdef TARGET_ADJUST_LLVM_LINKAGE
+ TARGET_ADJUST_LLVM_LINKAGE(GV, decl);
+#endif /* TARGET_ADJUST_LLVM_LINKAGE */
+
+ handleVisibility(decl, GV);
+
+ // Set the section for the global.
+ if (TREE_CODE(decl) == VAR_DECL) {
+ if (DECL_SECTION_NAME(decl)) {
+ GV->setSection(TREE_STRING_POINTER(DECL_SECTION_NAME(decl)));
+#ifdef LLVM_IMPLICIT_TARGET_GLOBAL_VAR_SECTION
+ } else if (const char *Section =
+ LLVM_IMPLICIT_TARGET_GLOBAL_VAR_SECTION(decl)) {
+ GV->setSection(Section);
+#endif
+ }
+
+ // Set the alignment for the global if one of the following condition is met
+ // 1) DECL_ALIGN is better than the alignment as per ABI specification
+ // 2) DECL_ALIGN is set by user.
+ if (DECL_ALIGN(decl)) {
+ unsigned TargetAlign =
+ getTargetData().getABITypeAlignment(GV->getType()->getElementType());
+ if (DECL_USER_ALIGN(decl) ||
+ 8 * TargetAlign < (unsigned)DECL_ALIGN(decl))
+ GV->setAlignment(DECL_ALIGN(decl) / 8);
+ }
+
+ // Handle used decls
+ if (DECL_PRESERVE_P (decl)) {
+ if (false)//FIXME DECL_LLVM_LINKER_PRIVATE (decl))
+ AttributeCompilerUsedGlobals.insert(GV);
+ else
+ AttributeUsedGlobals.insert(GV);
+ }
+
+ // Add annotate attributes for globals
+ if (DECL_ATTRIBUTES(decl))
+ AddAnnotateAttrsToGlobal(GV, decl);
+
+#ifdef LLVM_IMPLICIT_TARGET_GLOBAL_VAR_SECTION
+ } else if (TREE_CODE(decl) == CONST_DECL) {
+ if (const char *Section =
+ LLVM_IMPLICIT_TARGET_GLOBAL_VAR_SECTION(decl)) {
+ GV->setSection(Section);
+
+ /* LLVM LOCAL - begin radar 6389998 */
+#ifdef TARGET_ADJUST_CFSTRING_NAME
+ TARGET_ADJUST_CFSTRING_NAME(GV, Section);
+#endif
+ /* LLVM LOCAL - end radar 6389998 */
+ }
+#endif
+ }
+
+ // No debug info for globals when optimization is on. While this is
+ // something that would be accurate and useful to a user, it currently
+ // affects some optimizations that, e.g., count uses.
+ if (TheDebugInfo && !optimize)
+ TheDebugInfo->EmitGlobalVariable(GV, decl);
+
+ TREE_ASM_WRITTEN(decl) = 1;
+//TODO timevar_pop(TV_LLVM_GLOBALS);
+}
+
+
+/// ValidateRegisterVariable - Check that a static "asm" variable is
+/// well-formed. If not, emit error messages and return true. If so, return
+/// false.
+bool ValidateRegisterVariable(tree decl) {
+ int RegNumber = decode_reg_name(extractRegisterName(decl));
+ const Type *Ty = ConvertType(TREE_TYPE(decl));
+
+ if (errorcount || sorrycount)
+ return true; // Do not process broken code.
+
+ /* Detect errors in declaring global registers. */
+ if (RegNumber == -1)
+ error("%Jregister name not specified for %qD", decl, decl);
+ else if (RegNumber < 0)
+ error("%Jinvalid register name for %qD", decl, decl);
+ else if (TYPE_MODE(TREE_TYPE(decl)) == BLKmode)
+ error("%Jdata type of %qD isn%'t suitable for a register", decl, decl);
+#if 0 // FIXME: enable this.
+ else if (!HARD_REGNO_MODE_OK(RegNumber, TYPE_MODE(TREE_TYPE(decl))))
+ error("%Jregister specified for %qD isn%'t suitable for data type",
+ decl, decl);
+#endif
+ else if (DECL_INITIAL(decl) != 0 && TREE_STATIC(decl))
+ error("global register variable has initial value");
+ else if (!Ty->isSingleValueType())
+ sorry("%JLLVM cannot handle register variable %qD, report a bug",
+ decl, decl);
+ else {
+ if (TREE_THIS_VOLATILE(decl))
+ warning(0, "volatile register variables don%'t work as you might wish");
+
+ return false; // Everything ok.
+ }
+
+ return true;
+}
+
+
+/// make_decl_llvm - Create the DECL_RTL for a VAR_DECL or FUNCTION_DECL. DECL
+/// should have static storage duration. In other words, it should not be an
+/// automatic variable, including PARM_DECLs.
+///
+/// There is, however, one exception: this function handles variables explicitly
+/// placed in a particular register by the user.
+///
+/// This function corresponds to make_decl_rtl in varasm.c, and is implicitly
+/// called by DECL_LLVM if a decl doesn't have an LLVM set.
+Value *make_decl_llvm(tree decl) {
+ // If we already made the LLVM, then return it.
+ if (Value *V = get_decl_llvm(decl))
+ return V;
+
+#ifdef ENABLE_CHECKING
+ // Check that we are not being given an automatic variable.
+ // A weak alias has TREE_PUBLIC set but not the other bits.
+ if (TREE_CODE(decl) == PARM_DECL || TREE_CODE(decl) == RESULT_DECL
+ || (TREE_CODE(decl) == VAR_DECL && !TREE_STATIC(decl) &&
+ !TREE_PUBLIC(decl) && !DECL_EXTERNAL(decl) && !DECL_REGISTER(decl)))
+ abort();
+ // And that we were not given a type or a label. */
+ else if (TREE_CODE(decl) == TYPE_DECL || TREE_CODE(decl) == LABEL_DECL)
+ abort ();
+#endif
+
+ LLVMContext &Context = getGlobalContext();
+
+ if (errorcount || sorrycount)
+ return NULL; // Do not process broken code.
+
+ // Global register variable with asm name, e.g.:
+ // register unsigned long esp __asm__("ebp");
+ if (TREE_CODE(decl) != FUNCTION_DECL && DECL_REGISTER(decl)) {
+ // This just verifies that the variable is ok. The actual "load/store"
+ // code paths handle accesses to the variable.
+ ValidateRegisterVariable(decl);
+ return NULL;
+ }
+
+//TODO timevar_push(TV_LLVM_GLOBALS);
+
+ std::string Name;
+ if (TREE_CODE(decl) != CONST_DECL) // CONST_DECLs do not have assembler names.
+ Name = getLLVMAssemblerName(decl).str();
+
+ // Now handle ordinary static variables and functions (in memory).
+ // Also handle vars declared register invalidly.
+ if (!Name.empty() && Name[0] == 1) {
+#ifdef REGISTER_PREFIX
+ if (strlen (REGISTER_PREFIX) != 0) {
+ int reg_number = decode_reg_name(Name);
+ if (reg_number >= 0 || reg_number == -3)
+ error("%Jregister name given for non-register variable %qD",
+ decl, decl);
+ }
+#endif
+ }
+
+ // Specifying a section attribute on a variable forces it into a
+ // non-.bss section, and thus it cannot be common.
+ if (TREE_CODE(decl) == VAR_DECL && DECL_SECTION_NAME(decl) != NULL_TREE &&
+ DECL_INITIAL(decl) == NULL_TREE && DECL_COMMON(decl))
+ DECL_COMMON(decl) = 0;
+
+ // Variables can't be both common and weak.
+ if (TREE_CODE(decl) == VAR_DECL && DECL_WEAK(decl))
+ DECL_COMMON(decl) = 0;
+
+ // Okay, now we need to create an LLVM global variable or function for this
+ // object. Note that this is quite possibly a forward reference to the
+ // object, so its type may change later.
+ if (TREE_CODE(decl) == FUNCTION_DECL) {
+ assert(!Name.empty() && "Function with empty name!");
+ // If this function has already been created, reuse the decl. This happens
+ // when we have something like __builtin_memset and memset in the same file.
+ Function *FnEntry = TheModule->getFunction(Name);
+ if (FnEntry == 0) {
+ CallingConv::ID CC;
+ AttrListPtr PAL;
+ const FunctionType *Ty =
+ TheTypeConverter->ConvertFunctionType(TREE_TYPE(decl), decl, NULL,
+ CC, PAL);
+ FnEntry = Function::Create(Ty, Function::ExternalLinkage, Name, TheModule);
+ FnEntry->setCallingConv(CC);
+ FnEntry->setAttributes(PAL);
+
+ // Check for external weak linkage.
+ if (DECL_EXTERNAL(decl) && DECL_WEAK(decl))
+ FnEntry->setLinkage(Function::ExternalWeakLinkage);
+
+#ifdef TARGET_ADJUST_LLVM_LINKAGE
+ TARGET_ADJUST_LLVM_LINKAGE(FnEntry,decl);
+#endif /* TARGET_ADJUST_LLVM_LINKAGE */
+
+ handleVisibility(decl, FnEntry);
+
+ // If FnEntry got renamed, then there is already an object with this name
+ // in the symbol table. If this happens, the old one must be a forward
+ // decl, just replace it with a cast of the new one.
+ if (FnEntry->getName() != Name) {
+ GlobalVariable *G = TheModule->getGlobalVariable(Name, true);
+ assert(G && G->isDeclaration() && "A global turned into a function?");
+
+ // Replace any uses of "G" with uses of FnEntry.
+ Constant *GInNewType = TheFolder->CreateBitCast(FnEntry, G->getType());
+ G->replaceAllUsesWith(GInNewType);
+
+ // Update the decl that points to G.
+ changeLLVMConstant(G, GInNewType);
+
+ // Now we can give GV the proper name.
+ FnEntry->takeName(G);
+
+ // G is now dead, nuke it.
+ G->eraseFromParent();
+ }
+ }
+ return SET_DECL_LLVM(decl, FnEntry);
+ } else {
+ assert((TREE_CODE(decl) == VAR_DECL ||
+ TREE_CODE(decl) == CONST_DECL) && "Not a function or var decl?");
+ const Type *Ty = ConvertType(TREE_TYPE(decl));
+ GlobalVariable *GV ;
+
+ // If we have "extern void foo", make the global have type {} instead of
+ // type void.
+ if (Ty->isVoidTy())
+ Ty = StructType::get(Context);
+
+ if (Name.empty()) { // Global has no name.
+ GV = new GlobalVariable(*TheModule, Ty, false,
+ GlobalValue::ExternalLinkage, 0, "");
+
+ // Check for external weak linkage.
+ if (DECL_EXTERNAL(decl) && DECL_WEAK(decl))
+ GV->setLinkage(GlobalValue::ExternalWeakLinkage);
+
+#ifdef TARGET_ADJUST_LLVM_LINKAGE
+ TARGET_ADJUST_LLVM_LINKAGE(GV,decl);
+#endif /* TARGET_ADJUST_LLVM_LINKAGE */
+
+ handleVisibility(decl, GV);
+ } else {
+ // If the global has a name, prevent multiple vars with the same name from
+ // being created.
+ GlobalVariable *GVE = TheModule->getGlobalVariable(Name, true);
+
+ if (GVE == 0) {
+ GV = new GlobalVariable(*TheModule, Ty, false,
+ GlobalValue::ExternalLinkage, 0, Name);
+
+ // Check for external weak linkage.
+ if (DECL_EXTERNAL(decl) && DECL_WEAK(decl))
+ GV->setLinkage(GlobalValue::ExternalWeakLinkage);
+
+#ifdef TARGET_ADJUST_LLVM_LINKAGE
+ TARGET_ADJUST_LLVM_LINKAGE(GV,decl);
+#endif /* TARGET_ADJUST_LLVM_LINKAGE */
+
+ handleVisibility(decl, GV);
+
+ // If GV got renamed, then there is already an object with this name in
+ // the symbol table. If this happens, the old one must be a forward
+ // decl, just replace it with a cast of the new one.
+ if (GV->getName() != Name) {
+ Function *F = TheModule->getFunction(Name);
+ assert(F && F->isDeclaration() && "A function turned into a global?");
+
+ // Replace any uses of "F" with uses of GV.
+ Constant *FInNewType = TheFolder->CreateBitCast(GV, F->getType());
+ F->replaceAllUsesWith(FInNewType);
+
+ // Update the decl that points to F.
+ changeLLVMConstant(F, FInNewType);
+
+ // Now we can give GV the proper name.
+ GV->takeName(F);
+
+ // F is now dead, nuke it.
+ F->eraseFromParent();
+ }
+
+ } else {
+ GV = GVE; // Global already created, reuse it.
+ }
+ }
+
+ if ((TREE_READONLY(decl) && !TREE_SIDE_EFFECTS(decl)) ||
+ TREE_CODE(decl) == CONST_DECL) {
+ if (DECL_EXTERNAL(decl)) {
+ // Mark external globals constant even though they could be marked
+ // non-constant in the defining translation unit. The definition of the
+ // global determines whether the global is ultimately constant or not,
+ // marking this constant will allow us to do some extra (legal)
+ // optimizations that we would otherwise not be able to do. (In C++,
+ // any global that is 'C++ const' may not be readonly: it could have a
+ // dynamic initializer.
+ //
+ GV->setConstant(true);
+ } else {
+ // Mark readonly globals with constant initializers constant.
+ if (DECL_INITIAL(decl) != error_mark_node && // uninitialized?
+ DECL_INITIAL(decl) &&
+ (TREE_CONSTANT(DECL_INITIAL(decl)) ||
+ TREE_CODE(DECL_INITIAL(decl)) == STRING_CST))
+ GV->setConstant(true);
+ }
+ }
+
+ // Set thread local (TLS)
+ if (TREE_CODE(decl) == VAR_DECL && DECL_THREAD_LOCAL_P(decl))
+ GV->setThreadLocal(true);
+
+ return SET_DECL_LLVM(decl, GV);
+ }
+//TODO timevar_pop(TV_LLVM_GLOBALS);
+}
+
+/// llvm_mark_decl_weak - Used by varasm.c, called when a decl is found to be
+/// weak, but it already had an llvm object created for it. This marks the LLVM
+/// object weak as well.
+void llvm_mark_decl_weak(tree decl) {
+ assert(DECL_LLVM_SET_P(decl) && DECL_WEAK(decl) &&
+ isa<GlobalValue>(DECL_LLVM(decl)) && "Decl isn't marked weak!");
+ GlobalValue *GV = cast<GlobalValue>(DECL_LLVM(decl));
+
+ // Do not mark something that is already known to be linkonce or internal.
+ // The user may have explicitly asked for weak linkage - ignore flag_odr.
+ if (GV->hasExternalLinkage()) {
+ GlobalValue::LinkageTypes Linkage;
+ if (GV->isDeclaration()) {
+ Linkage = GlobalValue::ExternalWeakLinkage;
+ } else {
+ Linkage = GlobalValue::WeakAnyLinkage;
+ // Allow loads from constants to be folded even if the constant has weak
+ // linkage. Do this by giving the constant weak_odr linkage rather than
+ // weak linkage. It is not clear whether this optimization is valid (see
+ // gcc bug 36685), but mainline gcc chooses to do it, and fold may already
+ // have done it, so we might as well join in with gusto.
+ if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
+ if (GVar->isConstant())
+ Linkage = GlobalValue::WeakODRLinkage;
+ }
+ GV->setLinkage(Linkage);
+ }
+}
+
+/// llvm_emit_ctor_dtor - Called to emit static ctors/dtors to LLVM code.
+/// fndecl is a 'void()' FUNCTION_DECL for the code, initprio is the init
+/// priority, and isCtor indicates whether this is a ctor or dtor.
+void llvm_emit_ctor_dtor(tree FnDecl, int InitPrio, int isCtor) {
+ mark_decl_referenced(FnDecl); // Inform cgraph that we used the global.
+
+ if (errorcount || sorrycount) return;
+
+ Constant *C = cast<Constant>(DECL_LLVM(FnDecl));
+ (isCtor ? &StaticCtors:&StaticDtors)->push_back(std::make_pair(C, InitPrio));
+}
+
+void llvm_emit_typedef(tree decl) {
+ // Need hooks for debug info?
+ return;
+}
+
+/// llvm_emit_file_scope_asm - Emit the specified string as a file-scope inline
+/// asm block.
+void llvm_emit_file_scope_asm(const char *string) {
+ if (TheModule->getModuleInlineAsm().empty())
+ TheModule->setModuleInlineAsm(string);
+ else
+ TheModule->setModuleInlineAsm(TheModule->getModuleInlineAsm() + "\n" +
+ string);
+}
+
+//FIXME/// print_llvm - Print the specified LLVM chunk like an operand, called by
+//FIXME/// print-tree.c for tree dumps.
+//FIXMEvoid print_llvm(FILE *file, void *LLVM) {
+//FIXME oFILEstream FS(file);
+//FIXME FS << "LLVM: ";
+//FIXME WriteAsOperand(FS, (Value*)LLVM, true, TheModule);
+//FIXME}
+//FIXME
+//FIXME/// print_llvm_type - Print the specified LLVM type symbolically, called by
+//FIXME/// print-tree.c for tree dumps.
+//FIXMEvoid print_llvm_type(FILE *file, void *LLVM) {
+//FIXME oFILEstream FS(file);
+//FIXME FS << "LLVM: ";
+//FIXME
+//FIXME // FIXME: oFILEstream can probably be removed in favor of a new raw_ostream
+//FIXME // adaptor which would be simpler and more efficient. In the meantime, just
+//FIXME // adapt the adaptor.
+//FIXME raw_os_ostream RO(FS);
+//FIXME WriteTypeSymbolic(RO, (const Type*)LLVM, TheModule);
+//FIXME}
+
+/// extractRegisterName - Get a register name given its decl. In 4.2 unlike 4.0
+/// these names have been run through set_user_assembler_name which means they
+/// may have a leading star at this point; compensate.
+const char* extractRegisterName(tree decl) {
+ const char* Name = IDENTIFIER_POINTER(DECL_ASSEMBLER_NAME(decl));
+ return (*Name == '*') ? Name + 1 : Name;
+}
+
+/// getLLVMAssemblerName - Get the assembler name (DECL_ASSEMBLER_NAME) for the
+/// declaration, with any leading star replaced by '\1'.
+Twine getLLVMAssemblerName(union tree_node *decl) {
+ tree Ident = DECL_ASSEMBLER_NAME(decl);
+ if (!Ident)
+ return "";
+
+ const char *Name = IDENTIFIER_POINTER(Ident);
+ if (*Name != '*')
+ return Name;
+
+ return "\1" + Twine(Name + 1);
+}
+
+/// FinalizePlugin - Shutdown the plugin.
+static void FinalizePlugin(void) {
+ static bool Finalized = false;
+ if (Finalized)
+ return;
+
+ llvm_shutdown();
+
+ Finalized = true;
+}
+
+/// TakeoverAsmOutput - Obtain exclusive use of the assembly code output file.
+/// Any GCC output will be thrown away.
+static void TakeoverAsmOutput(void) {
+ // Calculate the output file name as in init_asm_output (toplev.c).
+ if (!dump_base_name && main_input_filename)
+ dump_base_name = main_input_filename[0] ? main_input_filename : "gccdump";
+
+ if (!main_input_filename && !asm_file_name) {
+ llvm_asm_file_name = "-";
+ } else if (!asm_file_name) {
+ int len = strlen(dump_base_name);
+ char *dumpname = XNEWVEC(char, len + 6);
+
+ memcpy(dumpname, dump_base_name, len + 1);
+ strip_off_ending(dumpname, len);
+ strcat(dumpname, ".s");
+ llvm_asm_file_name = dumpname;
+ } else {
+ llvm_asm_file_name = asm_file_name;
+ }
+
+ if (!SaveGCCOutput) {
+ // Redirect any GCC output to /dev/null.
+ asm_file_name = HOST_BIT_BUCKET;
+ } else {
+ // Save GCC output to a special file. Good for seeing how much pointless
+ // output gcc is producing.
+ int len = strlen(llvm_asm_file_name);
+ char *name = XNEWVEC(char, len + 5);
+ memcpy(name, llvm_asm_file_name, len + 1);
+ asm_file_name = strcat(name, ".gcc");
+ }
+}
+
+
+//===----------------------------------------------------------------------===//
+// Plugin interface
+//===----------------------------------------------------------------------===//
+
+// This plugin's code is licensed under the GPLv2. The LLVM libraries use
+// the GPL compatible University of Illinois/NCSA Open Source License.
+int plugin_is_GPL_compatible; // This plugin is GPL compatible.
+
+
+/// llvm_start_unit - Perform late initialization. This is called by GCC just
+/// before processing the compilation unit.
+/// NOTE: called even when only doing syntax checking, so do not initialize the
+/// module etc here.
+static void llvm_start_unit(void *gcc_data, void *user_data) {
+ if (!quiet_flag)
+ errs() << "Starting compilation unit\n";
+
+#ifdef ENABLE_LTO
+ // Output LLVM IR if the user requested generation of lto data.
+ EmitIR |= flag_generate_lto != 0;
+ flag_generate_lto = 0;
+#endif
+}
+
+
+/// gate_emission - Whether to turn gimple into LLVM IR.
+static bool gate_emission(void) {
+ // Don't bother doing anything if the program has errors.
+ return !errorcount && !sorrycount;
+}
+
+
+/// emit_variables - Output GCC global variables to the LLVM IR.
+static unsigned int emit_variables(void) {
+ LazilyInitializeModule();
+
+ // Output all externally visible global variables, whether they are used in
+ // this compilation unit or not. Global variables that are not externally
+ // visible will be output when their user is, or discarded if unused.
+ struct varpool_node *vnode;
+ FOR_EACH_STATIC_VARIABLE (vnode) {
+ if (TREE_PUBLIC(vnode->decl))
+ // An externally visible global variable - output it.
+ emit_global_to_llvm(vnode->decl);
+
+ // Mark all variables as written so gcc doesn't waste time outputting them.
+ TREE_ASM_WRITTEN(vnode->decl) = 1;
+ }
+
+ return 0;
+}
+
+/// pass_emit_variables - IPA pass that turns GCC variables into LLVM IR.
+static struct simple_ipa_opt_pass pass_emit_variables =
+{
+ {
+ SIMPLE_IPA_PASS,
+ "emit_variables", /* name */
+ gate_emission, /* gate */
+ emit_variables, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_NONE, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0 /* todo_flags_finish */
+ }
+};
+
+
+/// emit_function - Turn a gimple function into LLVM IR. This is called by
+/// GCC once for each function in the compilation unit.
+static unsigned int emit_function (void) {
+ LazilyInitializeModule();
+
+//TODO Don't want to use sorry at this stage...
+//TODO if (cfun->nonlocal_goto_save_area)
+//TODO sorry("%Jnon-local gotos not supported by LLVM", fndecl);
+
+//TODO Do we want to do this? Will the warning set sorry_count etc?
+//TODO enum symbol_visibility vis = DECL_VISIBILITY (current_function_decl);
+//TODO
+//TODO if (vis != VISIBILITY_DEFAULT)
+//TODO // "asm_out.visibility" emits an important warning if we're using a
+//TODO // visibility that's not supported by the target.
+//TODO targetm.asm_out.visibility(current_function_decl, vis);
+
+ // There's no need to defer outputting this function any more; we
+ // know we want to output it.
+ DECL_DEFER_OUTPUT(current_function_decl) = 0;
+
+ // Provide the function convertor with dominators.
+ calculate_dominance_info(CDI_DOMINATORS);
+
+ // Convert the AST to raw/ugly LLVM code.
+ Function *Fn;
+ {
+ TreeToLLVM Emitter(current_function_decl);
+ Fn = Emitter.EmitFunction();
+ }
+
+ // Free dominator and other ssa data structures.
+ execute_free_datastructures();
+
+//TODO performLateBackendInitialization();
+ createPerFunctionOptimizationPasses();
+
+ if (PerFunctionPasses)
+ PerFunctionPasses->run(*Fn);
+
+ // TODO: Nuke the .ll code for the function at -O[01] if we don't want to
+ // inline it or something else.
+
+ // Finally, we have written out this function!
+ TREE_ASM_WRITTEN(current_function_decl) = 1;
+
+ // When debugging, append the LLVM IR to the dump file.
+ if (dump_file) {
+ raw_fd_ostream dump_stream(fileno(dump_file), false);
+ Fn->print(dump_stream);
+ }
+
+ return 0;
+}
+
+/// pass_emit_functions - RTL pass that turns gimple functions into LLVM IR.
+static struct rtl_opt_pass pass_emit_functions =
+{
+ {
+ RTL_PASS,
+ "emit_functions", /* name */
+ gate_emission, /* gate */
+ emit_function, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_NONE, /* tv_id */
+ PROP_ssa | PROP_gimple_leh
+ | PROP_gimple_lomp | PROP_cfg, /* properties_required */
+ 0, /* properties_provided */
+ PROP_ssa | PROP_trees, /* properties_destroyed */
+ TODO_dump_func | TODO_verify_ssa
+ | TODO_verify_flow | TODO_verify_stmts, /* todo_flags_start */
+ TODO_ggc_collect /* todo_flags_finish */
+ }
+};
+
+
+/// llvm_finish - Run shutdown code when GCC exits.
+static void llvm_finish(void *gcc_data, void *user_data) {
+ FinalizePlugin();
+}
+
+/// llvm_finish_unit - Finish the .s file. This is called by GCC once the
+/// compilation unit has been completely processed.
+static void llvm_finish_unit(void *gcc_data, void *user_data) {
+ if (!quiet_flag)
+ errs() << "Finishing compilation unit\n";
+
+ LazilyInitializeModule();
+
+//TODO timevar_push(TV_LLVM_PERFILE);
+ LLVMContext &Context = getGlobalContext();
+
+//TODO performLateBackendInitialization();
+ createPerFunctionOptimizationPasses();
+//TODO
+//TODO if (flag_pch_file) {
+//TODO writeLLVMTypesStringTable();
+//TODO writeLLVMValues();
+//TODO }
+
+ // Add an llvm.global_ctors global if needed.
+ if (!StaticCtors.empty())
+ CreateStructorsList(StaticCtors, "llvm.global_ctors");
+ // Add an llvm.global_dtors global if needed.
+ if (!StaticDtors.empty())
+ CreateStructorsList(StaticDtors, "llvm.global_dtors");
+
+ if (!AttributeUsedGlobals.empty()) {
+ std::vector<Constant *> AUGs;
+ const Type *SBP = Type::getInt8PtrTy(Context);
+ for (SmallSetVector<Constant *,32>::iterator
+ AI = AttributeUsedGlobals.begin(),
+ AE = AttributeUsedGlobals.end(); AI != AE; ++AI) {
+ Constant *C = *AI;
+ AUGs.push_back(TheFolder->CreateBitCast(C, SBP));
+ }
+
+ ArrayType *AT = ArrayType::get(SBP, AUGs.size());
+ Constant *Init = ConstantArray::get(AT, AUGs);
+ GlobalValue *gv = new GlobalVariable(*TheModule, AT, false,
+ GlobalValue::AppendingLinkage, Init,
+ "llvm.used");
+ gv->setSection("llvm.metadata");
+ AttributeUsedGlobals.clear();
+ }
+
+ if (!AttributeCompilerUsedGlobals.empty()) {
+ std::vector<Constant *> ACUGs;
+ const Type *SBP = Type::getInt8PtrTy(Context);
+ for (SmallSetVector<Constant *,32>::iterator
+ AI = AttributeCompilerUsedGlobals.begin(),
+ AE = AttributeCompilerUsedGlobals.end(); AI != AE; ++AI) {
+ Constant *C = *AI;
+ ACUGs.push_back(TheFolder->CreateBitCast(C, SBP));
+ }
+
+ ArrayType *AT = ArrayType::get(SBP, ACUGs.size());
+ Constant *Init = ConstantArray::get(AT, ACUGs);
+ GlobalValue *gv = new GlobalVariable(*TheModule, AT, false,
+ GlobalValue::AppendingLinkage, Init,
+ "llvm.compiler.used");
+ gv->setSection("llvm.metadata");
+ AttributeCompilerUsedGlobals.clear();
+ }
+
+ // Add llvm.global.annotations
+ if (!AttributeAnnotateGlobals.empty()) {
+ Constant *Array = ConstantArray::get(
+ ArrayType::get(AttributeAnnotateGlobals[0]->getType(),
+ AttributeAnnotateGlobals.size()),
+ AttributeAnnotateGlobals);
+ GlobalValue *gv = new GlobalVariable(*TheModule, Array->getType(), false,
+ GlobalValue::AppendingLinkage, Array,
+ "llvm.global.annotations");
+ gv->setSection("llvm.metadata");
+ AttributeAnnotateGlobals.clear();
+ }
+
+ // Finish off the per-function pass.
+ if (PerFunctionPasses)
+ PerFunctionPasses->doFinalization();
+
+//TODO // Emit intermediate file before module level optimization passes are run.
+//TODO if (flag_debug_llvm_module_opt) {
+//TODO
+//TODO static PassManager *IntermediatePM = new PassManager();
+//TODO IntermediatePM->add(new TargetData(*TheTarget->getTargetData()));
+//TODO
+//TODO char asm_intermediate_out_filename[MAXPATHLEN];
+//TODO strcpy(&asm_intermediate_out_filename[0], llvm_asm_file_name);
+//TODO strcat(&asm_intermediate_out_filename[0],".0");
+//TODO FILE *asm_intermediate_out_file = fopen(asm_intermediate_out_filename, "w+b");
+//TODO AsmIntermediateOutStream = new oFILEstream(asm_intermediate_out_file);
+//TODO raw_ostream *AsmIntermediateRawOutStream =
+//TODO new raw_os_ostream(*AsmIntermediateOutStream);
+//TODO if (EmitIR && 0)
+//TODO IntermediatePM->add(createBitcodeWriterPass(*AsmIntermediateOutStream));
+//TODO if (EmitIR)
+//TODO IntermediatePM->add(createPrintModulePass(AsmIntermediateRawOutStream));
+//TODO IntermediatePM->run(*TheModule);
+//TODO AsmIntermediateRawOutStream->flush();
+//TODO delete AsmIntermediateRawOutStream;
+//TODO AsmIntermediateRawOutStream = 0;
+//TODO AsmIntermediateOutStream->flush();
+//TODO fflush(asm_intermediate_out_file);
+//TODO delete AsmIntermediateOutStream;
+//TODO AsmIntermediateOutStream = 0;
+//TODO }
+
+ // Run module-level optimizers, if any are present.
+ createPerModuleOptimizationPasses();
+ if (PerModulePasses)
+ PerModulePasses->run(*TheModule);
+
+ // Run the code generator, if present.
+ if (CodeGenPasses) {
+ CodeGenPasses->doInitialization();
+ for (Module::iterator I = TheModule->begin(), E = TheModule->end();
+ I != E; ++I)
+ if (!I->isDeclaration())
+ CodeGenPasses->run(*I);
+ CodeGenPasses->doFinalization();
+ }
+
+ FormattedOutStream.flush();
+ OutStream->flush();
+//TODO delete AsmOutRawStream;
+//TODO AsmOutRawStream = 0;
+//TODO delete AsmOutStream;
+//TODO AsmOutStream = 0;
+//TODO timevar_pop(TV_LLVM_PERFILE);
+
+ // We have finished - shutdown the plugin. Doing this here ensures that timer
+ // info and other statistics are not intermingled with those produced by GCC.
+ FinalizePlugin();
+}
+
+
+/// gate_null - Gate method for a pass that does nothing.
+static bool
+gate_null (void)
+{
+ return false;
+}
+
+/// pass_gimple_null - Gimple pass that does nothing.
+static struct gimple_opt_pass pass_gimple_null =
+{
+ {
+ GIMPLE_PASS,
+ NULL, /* name */
+ gate_null, /* gate */
+ NULL, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_NONE, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0 /* todo_flags_finish */
+ }
+};
+
+/// pass_ipa_null - IPA pass that does nothing.
+static struct ipa_opt_pass_d pass_ipa_null = {
+ {
+ IPA_PASS,
+ NULL, /* name */
+ gate_null, /* gate */
+ NULL, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_NONE, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0 /* todo_flags_finish */
+ },
+ NULL, /* generate_summary */
+ NULL, /* write_summary */
+ NULL, /* read_summary */
+ NULL, /* function_read_summary */
+ 0, /* TODOs */
+ NULL, /* function_transform */
+ NULL /* variable_transform */
+};
+
+/// pass_rtl_null - RTL pass that does nothing.
+static struct rtl_opt_pass pass_rtl_null =
+{
+ {
+ RTL_PASS,
+ NULL, /* name */
+ gate_null, /* gate */
+ NULL, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_NONE, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0 /* todo_flags_finish */
+ }
+};
+
+/// pass_simple_ipa_null - Simple IPA pass that does nothing.
+static struct simple_ipa_opt_pass pass_simple_ipa_null =
+{
+ {
+ SIMPLE_IPA_PASS,
+ NULL, /* name */
+ gate_null, /* gate */
+ NULL, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_NONE, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0 /* todo_flags_finish */
+ }
+};
+
+
+// Garbage collector roots.
+extern const struct ggc_cache_tab gt_ggc_rc__gt_llvm_cache_h[];
+
+
+/// PluginFlags - Flag arguments for the plugin.
+
+struct FlagDescriptor {
+ const char *Key; // The plugin argument is -fplugin-arg-llvm-KEY.
+ bool *Flag; // Set to true if the flag is seen.
+};
+
+static FlagDescriptor PluginFlags[] = {
+ { "disable-llvm-optzns", &DisableLLVMOptimizations },
+ { "enable-gcc-optzns", &EnableGCCOptimizations },
+ { "emit-ir", &EmitIR },
+ { "save-gcc-output", &SaveGCCOutput },
+ { NULL, NULL } // Terminator.
+};
+
+
+/// llvm_plugin_info - Information about this plugin. Users can access this
+/// using "gcc --help -v".
+static struct plugin_info llvm_plugin_info = {
+ REVISION, // version
+ // TODO provide something useful here
+ NULL // help
+};
+
+
+/// plugin_init - Plugin initialization routine, called by GCC. This is the
+/// first code executed in the plugin (except for constructors). Configure
+/// the plugin and setup GCC, taking over optimization and code generation.
+int plugin_init (struct plugin_name_args *plugin_info,
+ struct plugin_gcc_version *version) {
+ const char *plugin_name = plugin_info->base_name;
+ struct register_pass_info pass_info;
+
+ // Check that the running gcc is the same as the gcc we were built against.
+ // If not, refuse to load. This seems wise when developing against a fast
+ // moving gcc tree. TODO: Use a milder check if doing a "release build".
+ if (!plugin_default_version_check (version, &gcc_version)) {
+ errs() << "Incompatible plugin version\n";
+ return 1;
+ }
+
+ // Provide GCC with our version and help information.
+ register_callback (plugin_name, PLUGIN_INFO, NULL, &llvm_plugin_info);
+
+ // Process any plugin arguments.
+ {
+ struct plugin_argument *argv = plugin_info->argv;
+ int argc = plugin_info->argc;
+
+ for (int i = 0; i < argc; ++i) {
+ bool Found = false;
+
+ // Look for a matching flag.
+ for (FlagDescriptor *F = PluginFlags; F->Key; ++F) {
+ if (strcmp (argv[i].key, F->Key))
+ continue;
+
+ if (argv[i].value)
+ warning (0, G_("option '-fplugin-arg-%s-%s=%s' ignored"
+ " (superfluous '=%s')"),
+ plugin_name, argv[i].key, argv[i].value, argv[i].value);
+ else
+ *F->Flag = true;
+
+ Found = true;
+ break;
+ }
+
+ if (!Found)
+ warning (0, G_("plugin %qs: unrecognized argument %qs ignored"),
+ plugin_name, argv[i].key);
+ }
+ }
+
+ // Obtain exclusive use of the assembly code output file. This stops GCC from
+ // writing anything at all to the assembly file - only we get to write to it.
+ TakeoverAsmOutput();
+
+ // Register our garbage collector roots.
+ register_callback (plugin_name, PLUGIN_REGISTER_GGC_CACHES, NULL,
+ (void *)gt_ggc_rc__gt_llvm_cache_h);
+
+ // Perform late initialization just before processing the compilation unit.
+ register_callback (plugin_name, PLUGIN_START_UNIT, llvm_start_unit, NULL);
+
+ // Add an ipa pass that emits global variables, calling emit_global_to_llvm
+ // for each GCC static variable.
+ pass_info.pass = &pass_emit_variables.pass;
+ pass_info.reference_pass_name = "ipa_struct_reorg";
+ pass_info.ref_pass_instance_number = 0;
+ pass_info.pos_op = PASS_POS_INSERT_AFTER;
+ register_callback (plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info);
+
+ // Turn off all gcc optimization passes.
+ if (!EnableGCCOptimizations) {
+ // TODO: figure out a good way of turning off ipa optimization passes.
+ // Could just set optimize to zero (after taking a copy), but this would
+ // also impact front-end optimizations.
+
+ // Turn off pass_ipa_early_inline.
+ pass_info.pass = &pass_simple_ipa_null.pass;
+ pass_info.reference_pass_name = "einline_ipa";
+ pass_info.ref_pass_instance_number = 0;
+ pass_info.pos_op = PASS_POS_REPLACE;
+ register_callback (plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info);
+
+ // Turn off pass_all_early_optimizations.
+ pass_info.pass = &pass_gimple_null.pass;
+ pass_info.reference_pass_name = "early_optimizations";
+ pass_info.ref_pass_instance_number = 0;
+ pass_info.pos_op = PASS_POS_REPLACE;
+ register_callback (plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info);
+
+ // Turn off pass_ipa_increase_alignment.
+ pass_info.pass = &pass_simple_ipa_null.pass;
+ pass_info.reference_pass_name = "increase_alignment";
+ pass_info.ref_pass_instance_number = 0;
+ pass_info.pos_op = PASS_POS_REPLACE;
+ register_callback (plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info);
+
+ // Turn off pass_ipa_matrix_reorg.
+ pass_info.pass = &pass_simple_ipa_null.pass;
+ pass_info.reference_pass_name = "matrix-reorg";
+ pass_info.ref_pass_instance_number = 0;
+ pass_info.pos_op = PASS_POS_REPLACE;
+ register_callback (plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info);
+
+ // Turn off pass_ipa_cp.
+ pass_info.pass = &pass_ipa_null.pass;
+ pass_info.reference_pass_name = "cp";
+ pass_info.ref_pass_instance_number = 0;
+ pass_info.pos_op = PASS_POS_REPLACE;
+ register_callback (plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info);
+
+ // Turn off pass_ipa_inline.
+ pass_info.pass = &pass_ipa_null.pass;
+ pass_info.reference_pass_name = "inline";
+ pass_info.ref_pass_instance_number = 0;
+ pass_info.pos_op = PASS_POS_REPLACE;
+ register_callback (plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info);
+
+ // Turn off pass_ipa_reference.
+ pass_info.pass = &pass_ipa_null.pass;
+ pass_info.reference_pass_name = "static-var";
+ pass_info.ref_pass_instance_number = 0;
+ pass_info.pos_op = PASS_POS_REPLACE;
+ register_callback (plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info);
+
+ // Turn off pass_ipa_pure_const.
+ pass_info.pass = &pass_ipa_null.pass;
+ pass_info.reference_pass_name = "pure-const";
+ pass_info.ref_pass_instance_number = 0;
+ pass_info.pos_op = PASS_POS_REPLACE;
+ register_callback (plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info);
+
+ // Turn off pass_ipa_type_escape.
+ pass_info.pass = &pass_simple_ipa_null.pass;
+ pass_info.reference_pass_name = "type-escape-var";
+ pass_info.ref_pass_instance_number = 0;
+ pass_info.pos_op = PASS_POS_REPLACE;
+ register_callback (plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info);
+
+ // Turn off pass_ipa_pta.
+ pass_info.pass = &pass_simple_ipa_null.pass;
+ pass_info.reference_pass_name = "pta";
+ pass_info.ref_pass_instance_number = 0;
+ pass_info.pos_op = PASS_POS_REPLACE;
+ register_callback (plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info);
+
+ // Turn off pass_ipa_struct_reorg.
+ pass_info.pass = &pass_simple_ipa_null.pass;
+ pass_info.reference_pass_name = "ipa_struct_reorg";
+ pass_info.ref_pass_instance_number = 0;
+ pass_info.pos_op = PASS_POS_REPLACE;
+ register_callback (plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info);
+
+ // Turn off pass_all_optimizations.
+ pass_info.pass = &pass_gimple_null.pass;
+ pass_info.reference_pass_name = "*all_optimizations";
+ pass_info.ref_pass_instance_number = 0;
+ pass_info.pos_op = PASS_POS_REPLACE;
+ register_callback (plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info);
+ }
+
+ // Replace rtl expansion with gimple to LLVM conversion. This results in each
+ // GCC function in the compilation unit being passed to emit_function.
+ pass_info.pass = &pass_emit_functions.pass;
+ pass_info.reference_pass_name = "expand";
+ pass_info.ref_pass_instance_number = 0;
+ pass_info.pos_op = PASS_POS_REPLACE;
+ register_callback (plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info);
+
+ // Turn off all rtl passes.
+ pass_info.pass = &pass_gimple_null.pass;
+ pass_info.reference_pass_name = "*rest_of_compilation";
+ pass_info.ref_pass_instance_number = 0;
+ pass_info.pos_op = PASS_POS_REPLACE;
+ register_callback (plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info);
+
+ pass_info.pass = &pass_rtl_null.pass;
+ pass_info.reference_pass_name = "*clean_state";
+ pass_info.ref_pass_instance_number = 0;
+ pass_info.pos_op = PASS_POS_REPLACE;
+ register_callback (plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info);
+
+ // Finish the .s file once the compilation unit has been completely processed.
+ register_callback (plugin_name, PLUGIN_FINISH_UNIT, llvm_finish_unit, NULL);
+
+ // Run shutdown code when GCC exits.
+ register_callback (plugin_name, PLUGIN_FINISH, llvm_finish, NULL);
+
+ return 0;
+}
diff --git a/dragonegg/llvm-cache.c b/dragonegg/llvm-cache.c
new file mode 100644
index 00000000000..212ab2f0071
--- /dev/null
+++ b/dragonegg/llvm-cache.c
@@ -0,0 +1,133 @@
+/* Caching values "in" trees
+Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
+
+//===----------------------------------------------------------------------===//
+// This code lets you to associate a void* with a tree, as if it were cached
+// inside the tree: if the tree is garbage collected and reallocated, then the
+// cached value will have been cleared.
+//===----------------------------------------------------------------------===//
+
+// Plugin headers
+#include "llvm-cache.h"
+
+// GCC headers
+#include "ggc.h"
+
+struct GTY(()) tree_llvm_map {
+ struct tree_map_base base;
+ const void * GTY((skip)) val;
+};
+
+#define tree_llvm_map_eq tree_map_base_eq
+#define tree_llvm_map_hash tree_map_base_hash
+#define tree_llvm_map_marked_p tree_map_base_marked_p
+
+static GTY ((if_marked ("tree_llvm_map_marked_p"),
+ param_is(struct tree_llvm_map)))
+ htab_t llvm_cache;
+
+/// llvm_has_cached - Returns whether a value has been associated with the tree.
+bool llvm_has_cached(union tree_node *tree) {
+ struct tree_map_base in;
+
+ if (!llvm_cache)
+ return false;
+
+ in.from = tree;
+ return htab_find(llvm_cache, &in) != NULL;
+}
+
+/// llvm_get_cached - Returns the value associated with the tree, or NULL.
+const void *llvm_get_cached(union tree_node *tree) {
+ struct tree_llvm_map *h;
+ struct tree_map_base in;
+
+ if (!llvm_cache)
+ return NULL;
+
+ in.from = tree;
+ h = (struct tree_llvm_map *) htab_find(llvm_cache, &in);
+ return h ? h->val : NULL;
+}
+
+/// llvm_set_cached - Associates the given value with the tree (and returns it).
+/// To delete an association, pass a NULL value here.
+const void *llvm_set_cached(union tree_node *tree, const void *val) {
+ struct tree_llvm_map **slot;
+ struct tree_map_base in;
+
+ in.from = tree;
+
+ // If deleting, remove the slot.
+ if (val == NULL) {
+ if (llvm_cache)
+ htab_remove_elt(llvm_cache, &in);
+ return NULL;
+ }
+
+ if (!llvm_cache)
+ llvm_cache = htab_create_ggc(1024, tree_llvm_map_hash, tree_llvm_map_eq, NULL);
+
+ slot = (struct tree_llvm_map **) htab_find_slot(llvm_cache, &in, INSERT);
+ gcc_assert(slot);
+
+ if (!*slot) {
+ *slot = GGC_NEW(struct tree_llvm_map);
+ (*slot)->base.from = tree;
+ }
+
+ (*slot)->val = val;
+
+ return val;
+}
+
+struct update {
+ const void *old_val;
+ const void *new_val;
+};
+
+/// replace - If the current value for the slot matches old_val, then replace
+/// it with new_val, or delete it if new_val is NULL.
+static int replace(void **slot, void *data) {
+ struct tree_llvm_map *entry = *(struct tree_llvm_map **)slot;
+ struct update *u = (struct update *)data;
+
+ if (entry->val != u->old_val)
+ return 1;
+
+ if (u->new_val != NULL)
+ entry->val = u->new_val;
+ else
+ htab_clear_slot(llvm_cache, slot);
+
+ return 1;
+}
+
+/// llvm_replace_cached - Replaces all occurrences of old_val with new_val.
+void llvm_replace_cached(const void *old_val, const void *new_val) {
+ struct update u = { old_val, new_val };
+
+ if (!llvm_cache || old_val == NULL)
+ return;
+
+ htab_traverse(llvm_cache, replace, &u);
+}
+
+#include "gt-llvm-cache.h"
diff --git a/dragonegg/llvm-cache.h b/dragonegg/llvm-cache.h
new file mode 100644
index 00000000000..764566a8eb6
--- /dev/null
+++ b/dragonegg/llvm-cache.h
@@ -0,0 +1,50 @@
+/* Caching values "in" trees
+Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
+Contributed by Chris Lattner (sabre@nondot.org)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
+
+//===----------------------------------------------------------------------===//
+// This code lets you to associate a void* with a tree, as if it were cached
+// inside the tree: if the tree is garbage collected and reallocated, then the
+// cached value will have been cleared.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CACHE_H
+#define LLVM_CACHE_H
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "target.h"
+#include "tree.h"
+
+/// llvm_has_cached - Returns whether a value has been associated with the tree.
+extern bool llvm_has_cached(union tree_node *tree);
+
+/// llvm_get_cached - Returns the value associated with the tree, or NULL.
+extern const void *llvm_get_cached(union tree_node *tree);
+
+/// llvm_set_cached - Associates the given value with the tree (and returns it).
+/// To delete an association, pass NULL for the value.
+extern const void *llvm_set_cached(union tree_node *tree, const void *val);
+
+/// llvm_replace_cached - Replaces all occurrences of old_val with new_val.
+extern void llvm_replace_cached(const void *old_val, const void *new_val);
+
+#endif /* LLVM_CACHE_H */
diff --git a/dragonegg/llvm-convert.cpp b/dragonegg/llvm-convert.cpp
new file mode 100644
index 00000000000..fe76bfcad19
--- /dev/null
+++ b/dragonegg/llvm-convert.cpp
@@ -0,0 +1,8211 @@
+/* High-level LLVM backend interface
+Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
+Contributed by Chris Lattner (sabre@nondot.org)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
+
+//===----------------------------------------------------------------------===//
+// This is the code that converts GCC AST nodes into LLVM code.
+//===----------------------------------------------------------------------===//
+
+// LLVM headers
+#include "llvm/ValueSymbolTable.h"
+#include "llvm/CallingConv.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/InlineAsm.h"
+#include "llvm/Instructions.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/System/Host.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/ValueHandle.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/DenseMap.h"
+
+// System headers
+#include <gmp.h>
+
+// GCC headers
+#undef VISIBILITY_HIDDEN
+
+extern "C" {
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "target.h"
+#include "tree.h"
+
+#include "tm_p.h"
+//TODO#include "c-tree.h" // FIXME: eliminate.
+#include "tree-iterator.h"
+#include "output.h"
+#include "diagnostic.h"
+#include "real.h"
+#include "function.h"
+#include "toplev.h"
+#include "flags.h"
+#include "hard-reg-set.h"
+#include "except.h"
+#include "libfuncs.h"
+#include "tree-flow.h"
+#include "tree-pass.h"
+#include "rtl.h"
+#include "domwalk.h"
+
+extern int get_pointer_alignment (tree exp, unsigned int max_align);
+extern enum machine_mode reg_raw_mode[FIRST_PSEUDO_REGISTER];
+}
+
+// Plugin headers
+#include "llvm-abi.h"
+#include "llvm-internal.h"
+#include "llvm-debug.h"
+#include "bits_and_bobs.h"
+
+static LLVMContext &Context = getGlobalContext();
+
+STATISTIC(NumBasicBlocks, "Number of basic blocks converted");
+STATISTIC(NumStatements, "Number of gimple statements converted");
+
+/// dump - Print a gimple statement to standard error.
+void dump(gimple stmt) {
+ print_gimple_stmt(stderr, stmt, 0, TDF_RAW);
+}
+
+// Check for GCC bug 17347: C++ FE sometimes creates bogus ctor trees
+// which we should throw out
+#define BOGUS_CTOR(exp) \
+ (DECL_INITIAL(exp) && \
+ TREE_CODE(DECL_INITIAL(exp)) == CONSTRUCTOR && \
+ !TREE_TYPE(DECL_INITIAL(exp)))
+
+/// getINTEGER_CSTVal - Return the specified INTEGER_CST value as a uint64_t.
+///
+uint64_t getINTEGER_CSTVal(tree exp) {
+ unsigned HOST_WIDE_INT HI = (unsigned HOST_WIDE_INT)TREE_INT_CST_HIGH(exp);
+ unsigned HOST_WIDE_INT LO = (unsigned HOST_WIDE_INT)TREE_INT_CST_LOW(exp);
+ if (HOST_BITS_PER_WIDE_INT == 64) {
+ return (uint64_t)LO;
+ } else {
+ assert(HOST_BITS_PER_WIDE_INT == 32 &&
+ "Only 32- and 64-bit hosts supported!");
+ return ((uint64_t)HI << 32) | (uint64_t)LO;
+ }
+}
+
+/// isInt64 - Return true if t is an INTEGER_CST that fits in a 64 bit integer.
+/// If Unsigned is false, returns whether it fits in a int64_t. If Unsigned is
+/// true, returns whether the value is non-negative and fits in a uint64_t.
+/// Always returns false for overflowed constants.
+bool isInt64(tree t, bool Unsigned) {
+ if (HOST_BITS_PER_WIDE_INT == 64)
+ return host_integerp(t, Unsigned) && !TREE_OVERFLOW (t);
+ else {
+ assert(HOST_BITS_PER_WIDE_INT == 32 &&
+ "Only 32- and 64-bit hosts supported!");
+ return
+ (TREE_CODE (t) == INTEGER_CST && !TREE_OVERFLOW (t))
+ && ((TYPE_UNSIGNED(TREE_TYPE(t)) == Unsigned) ||
+ // If the constant is signed and we want an unsigned result, check
+ // that the value is non-negative. If the constant is unsigned and
+ // we want a signed result, check it fits in 63 bits.
+ (HOST_WIDE_INT)TREE_INT_CST_HIGH(t) >= 0);
+ }
+}
+
+/// getInt64 - Extract the value of an INTEGER_CST as a 64 bit integer. If
+/// Unsigned is false, the value must fit in a int64_t. If Unsigned is true,
+/// the value must be non-negative and fit in a uint64_t. Must not be used on
+/// overflowed constants. These conditions can be checked by calling isInt64.
+uint64_t getInt64(tree t, bool Unsigned) {
+ assert(isInt64(t, Unsigned) && "invalid constant!");
+ return getINTEGER_CSTVal(t);
+}
+
+/// getPointerAlignment - Return the alignment in bytes of exp, a pointer valued
+/// expression, or 1 if the alignment is not known.
+static unsigned int getPointerAlignment(tree exp) {
+ assert(POINTER_TYPE_P (TREE_TYPE (exp)) && "Expected a pointer type!");
+ unsigned int align = get_pointer_alignment(exp, BIGGEST_ALIGNMENT) / 8;
+ return align ? align : 1;
+}
+
+/// NameValue - Try to name the given value after the given GCC tree node. If
+/// the GCC tree node has no sensible name then it does nothing. If the value
+/// already has a name then it is not changed.
+static void NameValue(Value *V, tree t, Twine Prefix = Twine(),
+ Twine Postfix = Twine()) {
+ // If the value already has a name, do not change it.
+ if (V->hasName())
+ return;
+
+ // No sensible name - give up, discarding any pre- and post-fixes.
+ if (!t)
+ return;
+
+ switch (TREE_CODE(t)) {
+ default:
+ // Unhandled case - give up.
+ return;
+
+ case CONST_DECL:
+ case FIELD_DECL:
+ case FUNCTION_DECL:
+ case NAMESPACE_DECL:
+ case PARM_DECL:
+ case VAR_DECL: {
+ if (DECL_NAME(t)) {
+ StringRef Ident(IDENTIFIER_POINTER(DECL_NAME(t)),
+ IDENTIFIER_LENGTH(DECL_NAME(t)));
+ V->setName(Prefix + Ident + Postfix);
+ return;
+ }
+ const char *Annotation = TREE_CODE(t) == CONST_DECL ? "C." : "D.";
+ Twine UID(DECL_UID(t));
+ V->setName(Prefix + Annotation + UID + Postfix);
+ return;
+ }
+
+ case RESULT_DECL:
+ V->setName(Prefix + "<retval>" + Postfix);
+ return;
+
+ case SSA_NAME:
+ Twine NameVersion(SSA_NAME_VERSION(t));
+ NameValue(V, SSA_NAME_VAR(t), Prefix, "_" + NameVersion + Postfix);
+ return;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// ... High-Level Methods ...
+//===----------------------------------------------------------------------===//
+
+/// TheTreeToLLVM - Keep track of the current function being compiled.
+static TreeToLLVM *TheTreeToLLVM = 0;
+
+const TargetData &getTargetData() {
+ return *TheTarget->getTargetData();
+}
+
+TreeToLLVM::TreeToLLVM(tree fndecl) :
+ TD(getTargetData()), Builder(Context, *TheFolder) {
+ FnDecl = fndecl;
+ Fn = 0;
+ ReturnBB = UnwindBB = 0;
+ ReturnOffset = 0;
+
+ if (TheDebugInfo) {
+ expanded_location Location = expand_location(DECL_SOURCE_LOCATION (fndecl));
+
+ if (Location.file) {
+ TheDebugInfo->setLocationFile(Location.file);
+ TheDebugInfo->setLocationLine(Location.line);
+ } else {
+ TheDebugInfo->setLocationFile("<unknown file>");
+ TheDebugInfo->setLocationLine(0);
+ }
+ }
+
+ AllocaInsertionPoint = 0;
+
+ ExceptionValue = 0;
+ ExceptionSelectorValue = 0;
+ FuncEHException = 0;
+ FuncEHSelector = 0;
+ FuncEHGetTypeID = 0;
+
+ NumAddressTakenBlocks = 0;
+ IndirectGotoBlock = 0;
+
+ assert(TheTreeToLLVM == 0 && "Reentering function creation?");
+ TheTreeToLLVM = this;
+}
+
+TreeToLLVM::~TreeToLLVM() {
+ TheTreeToLLVM = 0;
+}
+
+//===----------------------------------------------------------------------===//
+// ... Local declarations ...
+//===----------------------------------------------------------------------===//
+
+/// isLocalDecl - Whether this declaration is local to the current function.
+static bool isLocalDecl(tree decl) {
+ assert(HAS_RTL_P(decl) && "Expected a declaration with RTL!");
+ return DECL_CONTEXT(decl) == current_function_decl &&
+ !TREE_STATIC(decl) && // Static variables not considered local.
+ TREE_CODE(decl) != FUNCTION_DECL; // Nested functions not considered local.
+}
+
+/// set_decl_local - Remember the LLVM value for a GCC declaration.
+Value *TreeToLLVM::set_decl_local(tree decl, Value *V) {
+ if (!isLocalDecl(decl))
+ return set_decl_llvm(decl, V);
+ if (V != NULL)
+ return LocalDecls[decl] = V;
+ LocalDecls.erase(decl);
+ return NULL;
+}
+
+/// get_decl_local - Retrieve the LLVM value for a GCC declaration, or NULL.
+Value *TreeToLLVM::get_decl_local(tree decl) {
+ if (!isLocalDecl(decl))
+ return get_decl_llvm(decl);
+ DenseMap<tree, AssertingVH<> >::iterator I = LocalDecls.find(decl);
+ if (I != LocalDecls.end())
+ return I->second;
+ return NULL;
+}
+
+/// make_decl_local - Return the LLVM value for a GCC declaration if it exists.
+/// Otherwise creates and returns an appropriate value.
+Value *TreeToLLVM::make_decl_local(tree decl) {
+ if (!isLocalDecl(decl))
+ return make_decl_llvm(decl);
+
+ DenseMap<tree, AssertingVH<> >::iterator I = LocalDecls.find(decl);
+ if (I != LocalDecls.end())
+ return I->second;
+
+ switch (TREE_CODE(decl)) {
+ default:
+ llvm_unreachable("Unhandled local declaration!");
+
+ case RESULT_DECL:
+ case VAR_DECL:
+ EmitAutomaticVariableDecl(decl);
+ I = LocalDecls.find(decl);
+ assert(I != LocalDecls.end() && "Not a local variable?");
+ return I->second;
+ }
+}
+
+/// llvm_store_scalar_argument - Store scalar argument ARGVAL of type
+/// LLVMTY at location LOC.
+static void llvm_store_scalar_argument(Value *Loc, Value *ArgVal,
+ const llvm::Type *LLVMTy,
+ unsigned RealSize,
+ LLVMBuilder &Builder) {
+ if (RealSize) {
+ // Not clear what this is supposed to do on big endian machines...
+ assert(!BYTES_BIG_ENDIAN && "Unsupported case - please report");
+ // Do byte wise store because actual argument type does not match LLVMTy.
+ assert(isa<IntegerType>(ArgVal->getType()) && "Expected an integer value!");
+ const Type *StoreType = IntegerType::get(Context, RealSize * 8);
+ Loc = Builder.CreateBitCast(Loc, StoreType->getPointerTo());
+ if (ArgVal->getType()->getPrimitiveSizeInBits() >=
+ StoreType->getPrimitiveSizeInBits())
+ ArgVal = Builder.CreateTrunc(ArgVal, StoreType);
+ else
+ ArgVal = Builder.CreateZExt(ArgVal, StoreType);
+ Builder.CreateStore(ArgVal, Loc);
+ } else {
+ // This cast only involves pointers, therefore BitCast.
+ Loc = Builder.CreateBitCast(Loc, LLVMTy->getPointerTo());
+ Builder.CreateStore(ArgVal, Loc);
+ }
+}
+
+#ifndef LLVM_STORE_SCALAR_ARGUMENT
+#define LLVM_STORE_SCALAR_ARGUMENT(LOC,ARG,TYPE,SIZE,BUILDER) \
+ llvm_store_scalar_argument((LOC),(ARG),(TYPE),(SIZE),(BUILDER))
+#endif
+
+namespace {
+ /// FunctionPrologArgumentConversion - This helper class is driven by the ABI
+ /// definition for this target to figure out how to retrieve arguments from
+ /// the stack/regs coming into a function and store them into an appropriate
+ /// alloca for the argument.
+ struct FunctionPrologArgumentConversion : public DefaultABIClient {
+ tree FunctionDecl;
+ Function::arg_iterator &AI;
+ LLVMBuilder Builder;
+ std::vector<Value*> LocStack;
+ std::vector<std::string> NameStack;
+ unsigned Offset;
+ CallingConv::ID &CallingConv;
+ bool isShadowRet;
+ FunctionPrologArgumentConversion(tree FnDecl,
+ Function::arg_iterator &ai,
+ const LLVMBuilder &B, CallingConv::ID &CC)
+ : FunctionDecl(FnDecl), AI(ai), Builder(B), Offset(0), CallingConv(CC),
+ isShadowRet(false) {}
+
+ /// getCallingConv - This provides the desired CallingConv for the function.
+ CallingConv::ID& getCallingConv(void) { return CallingConv; }
+
+ bool isShadowReturn() {
+ return isShadowRet;
+ }
+ void setName(const std::string &Name) {
+ NameStack.push_back(Name);
+ }
+ void setLocation(Value *Loc) {
+ LocStack.push_back(Loc);
+ }
+ void clear() {
+ assert(NameStack.size() == 1 && LocStack.size() == 1 && "Imbalance!");
+ NameStack.clear();
+ LocStack.clear();
+ }
+
+ void HandleAggregateShadowResult(const PointerType *PtrArgTy,
+ bool RetPtr) {
+ // If the function returns a structure by value, we transform the function
+ // to take a pointer to the result as the first argument of the function
+ // instead.
+ assert(AI != Builder.GetInsertBlock()->getParent()->arg_end() &&
+ "No explicit return value?");
+ AI->setName("agg.result");
+
+ isShadowRet = true;
+ tree ResultDecl = DECL_RESULT(FunctionDecl);
+ tree RetTy = TREE_TYPE(TREE_TYPE(FunctionDecl));
+ if (TREE_CODE(RetTy) == TREE_CODE(TREE_TYPE(ResultDecl))) {
+ TheTreeToLLVM->set_decl_local(ResultDecl, AI);
+ ++AI;
+ return;
+ }
+
+ // Otherwise, this must be something returned with NRVO.
+ assert(TREE_CODE(TREE_TYPE(ResultDecl)) == REFERENCE_TYPE &&
+ "Not type match and not passing by reference?");
+ // Create an alloca for the ResultDecl.
+ Value *Tmp = TheTreeToLLVM->CreateTemporary(AI->getType());
+ Builder.CreateStore(AI, Tmp);
+
+ TheTreeToLLVM->set_decl_local(ResultDecl, Tmp);
+ if (TheDebugInfo) {
+ TheDebugInfo->EmitDeclare(ResultDecl,
+ dwarf::DW_TAG_return_variable,
+ "agg.result", RetTy, Tmp,
+ Builder.GetInsertBlock());
+ }
+ ++AI;
+ }
+
+ void HandleScalarShadowResult(const PointerType *PtrArgTy, bool RetPtr) {
+ assert(AI != Builder.GetInsertBlock()->getParent()->arg_end() &&
+ "No explicit return value?");
+ AI->setName("scalar.result");
+ isShadowRet = true;
+ TheTreeToLLVM->set_decl_local(DECL_RESULT(FunctionDecl), AI);
+ ++AI;
+ }
+
+ void HandleScalarArgument(const llvm::Type *LLVMTy, tree type,
+ unsigned RealSize = 0) {
+ Value *ArgVal = AI;
+ if (ArgVal->getType() != LLVMTy) {
+ if (isa<PointerType>(ArgVal->getType()) && isa<PointerType>(LLVMTy)) {
+ // If this is GCC being sloppy about pointer types, insert a bitcast.
+ // See PR1083 for an example.
+ ArgVal = Builder.CreateBitCast(ArgVal, LLVMTy);
+ } else if (ArgVal->getType()->isDoubleTy()) {
+ // If this is a K&R float parameter, it got promoted to double. Insert
+ // the truncation to float now.
+ ArgVal = Builder.CreateFPTrunc(ArgVal, LLVMTy,
+ NameStack.back().c_str());
+ } else {
+ // If this is just a mismatch between integer types, this is due
+ // to K&R prototypes, where the forward proto defines the arg as int
+ // and the actual impls is a short or char.
+ assert(ArgVal->getType() == Type::getInt32Ty(Context) &&
+ LLVMTy->isInteger() &&
+ "Lowerings don't match?");
+ ArgVal = Builder.CreateTrunc(ArgVal, LLVMTy,NameStack.back().c_str());
+ }
+ }
+ assert(!LocStack.empty());
+ Value *Loc = LocStack.back();
+ LLVM_STORE_SCALAR_ARGUMENT(Loc,ArgVal,LLVMTy,RealSize,Builder);
+ AI->setName(NameStack.back());
+ ++AI;
+ }
+
+ void HandleByValArgument(const llvm::Type *LLVMTy, tree type) {
+ // Should not get here.
+ abort();
+ }
+
+ void HandleFCAArgument(const llvm::Type *LLVMTy, tree type) {
+ // Store the FCA argument into alloca.
+ assert(!LocStack.empty());
+ Value *Loc = LocStack.back();
+ Builder.CreateStore(AI, Loc);
+ AI->setName(NameStack.back());
+ ++AI;
+ }
+
+ void HandleAggregateResultAsScalar(const Type *ScalarTy, unsigned Offset=0){
+ this->Offset = Offset;
+ }
+
+ void EnterField(unsigned FieldNo, const llvm::Type *StructTy) {
+ NameStack.push_back(NameStack.back()+"."+utostr(FieldNo));
+
+ Value *Loc = LocStack.back();
+ // This cast only involves pointers, therefore BitCast.
+ Loc = Builder.CreateBitCast(Loc, StructTy->getPointerTo());
+
+ Loc = Builder.CreateStructGEP(Loc, FieldNo);
+ LocStack.push_back(Loc);
+ }
+ void ExitField() {
+ NameStack.pop_back();
+ LocStack.pop_back();
+ }
+ };
+}
+
+// isPassedByVal - Return true if an aggregate of the specified type will be
+// passed in memory byval.
+static bool isPassedByVal(tree type, const Type *Ty,
+ std::vector<const Type*> &ScalarArgs,
+ bool isShadowRet, CallingConv::ID &CC) {
+ if (LLVM_SHOULD_PASS_AGGREGATE_USING_BYVAL_ATTR(type, Ty))
+ return true;
+
+ std::vector<const Type*> Args;
+ if (LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(type, Ty, CC, Args) &&
+ LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS(Args, ScalarArgs, isShadowRet,
+ CC))
+ // We want to pass the whole aggregate in registers but only some of the
+ // registers are available.
+ return true;
+ return false;
+}
+
+void TreeToLLVM::StartFunctionBody() {
+ std::string Name = getLLVMAssemblerName(FnDecl).str();
+ // TODO: Add support for dropping the leading '\1' in order to support
+ // unsigned bswap(unsigned) __asm__("llvm.bswap");
+ // This would also require adjustments in make_decl_llvm.
+
+ // Determine the FunctionType and calling convention for this function.
+ tree static_chain = cfun->static_chain_decl;
+ const FunctionType *FTy;
+ CallingConv::ID CallingConv;
+ AttrListPtr PAL;
+
+ // If the function has no arguments and is varargs (...), turn it into a
+ // non-varargs function by scanning the param list for the function. This
+ // allows C functions declared as "T foo() {}" to be treated like
+ // "T foo(void) {}" and allows us to handle functions with K&R-style
+ // definitions correctly.
+ if (TYPE_ARG_TYPES(TREE_TYPE(FnDecl)) == 0) {
+ FTy = TheTypeConverter->ConvertArgListToFnType(TREE_TYPE(FnDecl),
+ DECL_ARGUMENTS(FnDecl),
+ static_chain,
+ CallingConv, PAL);
+ } else {
+ // Otherwise, just get the type from the function itself.
+ FTy = TheTypeConverter->ConvertFunctionType(TREE_TYPE(FnDecl),
+ FnDecl,
+ static_chain,
+ CallingConv, PAL);
+ }
+
+ // If we've already seen this function and created a prototype, and if the
+ // proto has the right LLVM type, just use it.
+ if (DECL_LOCAL_SET_P(FnDecl) &&
+ cast<PointerType>(DECL_LOCAL(FnDecl)->getType())->getElementType()==FTy) {
+ Fn = cast<Function>(DECL_LOCAL(FnDecl));
+ assert(Fn->getCallingConv() == CallingConv &&
+ "Calling convention disagreement between prototype and impl!");
+ // The visibility can be changed from the last time we've seen this
+ // function. Set to current.
+ handleVisibility(FnDecl, Fn);
+ } else {
+ Function *FnEntry = TheModule->getFunction(Name);
+ if (FnEntry) {
+ assert(FnEntry->getName() == Name && "Same entry, different name?");
+ assert(FnEntry->isDeclaration() &&
+ "Multiple fns with same name and neither are external!");
+ FnEntry->setName(""); // Clear name to avoid conflicts.
+ assert(FnEntry->getCallingConv() == CallingConv &&
+ "Calling convention disagreement between prototype and impl!");
+ }
+
+ // Otherwise, either it exists with the wrong type or it doesn't exist. In
+ // either case create a new function.
+ Fn = Function::Create(FTy, Function::ExternalLinkage, Name, TheModule);
+ assert(Fn->getName() == Name && "Preexisting fn with the same name!");
+ Fn->setCallingConv(CallingConv);
+ Fn->setAttributes(PAL);
+
+ // If a previous proto existed with the wrong type, replace any uses of it
+ // with the actual function and delete the proto.
+ if (FnEntry) {
+ FnEntry->replaceAllUsesWith(
+ Builder.getFolder().CreateBitCast(Fn, FnEntry->getType())
+ );
+ changeLLVMConstant(FnEntry, Fn);
+ FnEntry->eraseFromParent();
+ }
+ SET_DECL_LOCAL(FnDecl, Fn);
+ }
+
+ // The function should not already have a body.
+ assert(Fn->empty() && "Function expanded multiple times!");
+
+ // Compute the linkage that the function should get.
+ if (false) {//FIXME DECL_LLVM_PRIVATE(FnDecl)) {
+ Fn->setLinkage(Function::PrivateLinkage);
+ } else if (false) {//FIXME DECL_LLVM_LINKER_PRIVATE(FnDecl)) {
+ Fn->setLinkage(Function::LinkerPrivateLinkage);
+ } else if (!TREE_PUBLIC(FnDecl) /*|| lang_hooks.llvm_is_in_anon(subr)*/) {
+ Fn->setLinkage(Function::InternalLinkage);
+ } else if (DECL_COMDAT(FnDecl)) {
+ Fn->setLinkage(Function::getLinkOnceLinkage(flag_odr));
+ } else if (DECL_WEAK(FnDecl)) {
+ // The user may have explicitly asked for weak linkage - ignore flag_odr.
+ Fn->setLinkage(Function::WeakAnyLinkage);
+ } else if (DECL_ONE_ONLY(FnDecl)) {
+ Fn->setLinkage(Function::getWeakLinkage(flag_odr));
+ } else if (DECL_EXTERNAL(FnDecl)) {
+ Fn->setLinkage(Function::AvailableExternallyLinkage);
+ }
+
+#ifdef TARGET_ADJUST_LLVM_LINKAGE
+ TARGET_ADJUST_LLVM_LINKAGE(Fn,FnDecl);
+#endif /* TARGET_ADJUST_LLVM_LINKAGE */
+
+ // Handle visibility style
+ handleVisibility(FnDecl, Fn);
+
+ // Handle functions in specified sections.
+ if (DECL_SECTION_NAME(FnDecl))
+ Fn->setSection(TREE_STRING_POINTER(DECL_SECTION_NAME(FnDecl)));
+
+ // Handle used Functions
+ if (lookup_attribute ("used", DECL_ATTRIBUTES (FnDecl)))
+ AttributeUsedGlobals.insert(Fn);
+
+ // Handle noinline Functions
+ if (lookup_attribute ("noinline", DECL_ATTRIBUTES (FnDecl)))
+ Fn->addFnAttr(Attribute::NoInline);
+
+ // Handle always_inline attribute
+ if (lookup_attribute ("always_inline", DECL_ATTRIBUTES (FnDecl)))
+ Fn->addFnAttr(Attribute::AlwaysInline);
+
+ if (optimize_size)
+ Fn->addFnAttr(Attribute::OptimizeForSize);
+
+ // Handle stack smashing protection.
+ if (flag_stack_protect == 1)
+ Fn->addFnAttr(Attribute::StackProtect);
+ else if (flag_stack_protect == 2)
+ Fn->addFnAttr(Attribute::StackProtectReq);
+
+ // Handle naked attribute
+ if (lookup_attribute ("naked", DECL_ATTRIBUTES (FnDecl)))
+ Fn->addFnAttr(Attribute::Naked);
+
+ // Handle annotate attributes
+ if (DECL_ATTRIBUTES(FnDecl))
+ AddAnnotateAttrsToGlobal(Fn, FnDecl);
+
+ // Mark the function "nounwind" if not doing exception handling.
+ if (!flag_exceptions)
+ Fn->setDoesNotThrow();
+
+ // Create a new basic block for the function.
+ BasicBlock *EntryBlock = BasicBlock::Create(Context, "entry", Fn);
+ BasicBlocks[ENTRY_BLOCK_PTR] = EntryBlock;
+ Builder.SetInsertPoint(EntryBlock);
+
+ if (TheDebugInfo)
+ TheDebugInfo->EmitFunctionStart(FnDecl, Fn, Builder.GetInsertBlock());
+
+ // Loop over all of the arguments to the function, setting Argument names and
+ // creating argument alloca's for the PARM_DECLs in case their address is
+ // exposed.
+ Function::arg_iterator AI = Fn->arg_begin();
+
+ // Rename and alloca'ify real arguments.
+ FunctionPrologArgumentConversion Client(FnDecl, AI, Builder, CallingConv);
+ TheLLVMABI<FunctionPrologArgumentConversion> ABIConverter(Client);
+
+ // Handle the DECL_RESULT.
+ ABIConverter.HandleReturnType(TREE_TYPE(TREE_TYPE(FnDecl)), FnDecl,
+ DECL_BUILT_IN(FnDecl));
+ // Remember this for use by FinishFunctionBody.
+ TheTreeToLLVM->ReturnOffset = Client.Offset;
+
+ // Prepend the static chain (if any) to the list of arguments.
+ tree Args = static_chain ? static_chain : DECL_ARGUMENTS(FnDecl);
+
+ // Scalar arguments processed so far.
+ std::vector<const Type*> ScalarArgs;
+ while (Args) {
+ const char *Name = "unnamed_arg";
+ if (DECL_NAME(Args)) Name = IDENTIFIER_POINTER(DECL_NAME(Args));
+
+ const Type *ArgTy = ConvertType(TREE_TYPE(Args));
+ bool isInvRef = isPassedByInvisibleReference(TREE_TYPE(Args));
+ if (isInvRef ||
+ (isa<VectorType>(ArgTy) &&
+ LLVM_SHOULD_PASS_VECTOR_USING_BYVAL_ATTR(TREE_TYPE(Args))) ||
+ (!ArgTy->isSingleValueType() &&
+ isPassedByVal(TREE_TYPE(Args), ArgTy, ScalarArgs,
+ Client.isShadowReturn(), CallingConv))) {
+ // If the value is passed by 'invisible reference' or 'byval reference',
+ // the l-value for the argument IS the argument itself.
+ AI->setName(Name);
+ SET_DECL_LOCAL(Args, AI);
+ if (!isInvRef && TheDebugInfo)
+ TheDebugInfo->EmitDeclare(Args, dwarf::DW_TAG_arg_variable,
+ Name, TREE_TYPE(Args),
+ AI, Builder.GetInsertBlock());
+ ++AI;
+ } else {
+ // Otherwise, we create an alloca to hold the argument value and provide
+ // an l-value. On entry to the function, we copy formal argument values
+ // into the alloca.
+ Value *Tmp = CreateTemporary(ArgTy);
+ Tmp->setName(std::string(Name)+"_addr");
+ SET_DECL_LOCAL(Args, Tmp);
+ if (TheDebugInfo) {
+ TheDebugInfo->EmitDeclare(Args, dwarf::DW_TAG_arg_variable,
+ Name, TREE_TYPE(Args), Tmp,
+ Builder.GetInsertBlock());
+ }
+
+ // Emit annotate intrinsic if arg has annotate attr
+ if (DECL_ATTRIBUTES(Args))
+ EmitAnnotateIntrinsic(Tmp, Args);
+
+ // Emit gcroot intrinsic if arg has attribute
+ if (POINTER_TYPE_P(TREE_TYPE(Args))
+ && lookup_attribute ("gcroot", TYPE_ATTRIBUTES(TREE_TYPE(Args))))
+ EmitTypeGcroot(Tmp, Args);
+
+ Client.setName(Name);
+ Client.setLocation(Tmp);
+ ABIConverter.HandleArgument(TREE_TYPE(Args), ScalarArgs);
+ Client.clear();
+ }
+
+ Args = Args == static_chain ? DECL_ARGUMENTS(FnDecl) : TREE_CHAIN(Args);
+ }
+
+ // Loading the value of a PARM_DECL at this point yields its initial value.
+ // Remember this for use when materializing the reads implied by SSA default
+ // definitions.
+ SSAInsertionPoint = Builder.Insert(CastInst::Create(Instruction::BitCast,
+ Constant::getNullValue(Type::getInt32Ty(Context)),
+ Type::getInt32Ty(Context)), "ssa point");
+
+ // If this function has nested functions, we should handle a potential
+ // nonlocal_goto_save_area.
+ if (cfun->nonlocal_goto_save_area) {
+ // Not supported yet.
+ }
+
+ // Create a new block for the return node, but don't insert it yet.
+ ReturnBB = BasicBlock::Create(Context, "return");
+}
+
+typedef SmallVector<std::pair<BasicBlock*, unsigned>, 8> PredVector;
+typedef SmallVector<std::pair<BasicBlock*, tree>, 8> TreeVector;
+typedef SmallVector<std::pair<BasicBlock*, Value*>, 8> ValueVector;
+
+/// PopulatePhiNodes - Populate generated phi nodes with their operands.
+void TreeToLLVM::PopulatePhiNodes() {
+ PredVector Predecessors;
+ TreeVector IncomingValues;
+ ValueVector PhiArguments;
+
+ for (unsigned i = 0, e = PendingPhis.size(); i < e; ++i) {
+ // The phi node to process.
+ PhiRecord &P = PendingPhis[i];
+
+ // Extract the incoming value for each predecessor from the GCC phi node.
+ for (size_t i = 0, e = gimple_phi_num_args(P.gcc_phi); i != e; ++i) {
+ // The incoming GCC basic block.
+ basic_block bb = gimple_phi_arg_edge(P.gcc_phi, i)->src;
+
+ // If there is no corresponding LLVM basic block then the GCC basic block
+ // was unreachable - skip this phi argument.
+ DenseMap<basic_block, BasicBlock*>::iterator BI = BasicBlocks.find(bb);
+ if (BI == BasicBlocks.end())
+ continue;
+
+ // The incoming GCC expression.
+ tree val = gimple_phi_arg(P.gcc_phi, i)->def;
+
+ // Associate it with the LLVM basic block.
+ IncomingValues.push_back(std::make_pair(BI->second, val));
+
+ // Several LLVM basic blocks may be generated when emitting one GCC basic
+ // block. The additional blocks always occur immediately after the main
+ // basic block, and can be identified by the fact that they are nameless.
+ // Associate the incoming expression with all of them, since any of them
+ // may occur as a predecessor of the LLVM basic block containing the phi.
+ Function::iterator FI(BI->second), FE = Fn->end();
+ for (++FI; FI != FE && !FI->hasName(); ++FI)
+ IncomingValues.push_back(std::make_pair(FI, val));
+ }
+
+ // Sort the incoming values by basic block to help speed up queries.
+ std::sort(IncomingValues.begin(), IncomingValues.end());
+
+ // Get the LLVM predecessors for the basic block containing the phi node,
+ // and remember their positions in the list of predecessors (this is used
+ // to avoid adding phi operands in a non-deterministic order).
+ Predecessors.reserve(gimple_phi_num_args(P.gcc_phi)); // At least this many.
+ BasicBlock *PhiBB = P.PHI->getParent();
+ unsigned Index = 0;
+ for (pred_iterator PI = pred_begin(PhiBB), PE = pred_end(PhiBB); PI != PE;
+ ++PI, ++Index)
+ Predecessors.push_back(std::make_pair(*PI, Index));
+
+ // Sort the predecessors by basic block. In GCC, each predecessor occurs
+ // exactly once. However in LLVM a predecessor can occur several times,
+ // and then every copy of the predecessor must be associated with exactly
+ // the same incoming value in the phi node. Sorting the predecessors groups
+ // multiple occurrences together, making this easy to handle.
+ std::sort(Predecessors.begin(), Predecessors.end());
+
+ // Now iterate over the predecessors, setting phi operands as we go.
+ TreeVector::iterator VI = IncomingValues.begin(), VE = IncomingValues.end();
+ PredVector::iterator PI = Predecessors.begin(), PE = Predecessors.end();
+ PhiArguments.resize(Predecessors.size());
+ while (PI != PE) {
+ // The predecessor basic block.
+ BasicBlock *BB = PI->first;
+
+ // Find the incoming value for this predecessor.
+ while (VI != VE && VI->first != BB) ++VI;
+ assert(VI != VE && "No value for predecessor!");
+ Value *Val = EmitGimpleReg(VI->second);
+
+ // Need to bitcast to the right type (useless_type_conversion_p). Place
+ // the bitcast at the end of the predecessor, before the terminator.
+ if (Val->getType() != P.PHI->getType())
+ Val = new BitCastInst(Val, P.PHI->getType(), "", BB->getTerminator());
+
+ // Add the phi node arguments for all occurrences of this predecessor.
+ do {
+ // Place the argument at the position given by PI->second, which is the
+ // original position before sorting of the predecessor in the pred list.
+ // Since the predecessors were sorted non-deterministically (by pointer
+ // value), this ensures that the same bitcode is produced on any run.
+ PhiArguments[PI++->second] = std::make_pair(BB, Val);
+ } while (PI != PE && PI->first == BB);
+ }
+
+ // Add the operands to the phi node.
+ P.PHI->reserveOperandSpace(PhiArguments.size());
+ for (ValueVector::iterator I = PhiArguments.begin(), E = PhiArguments.end();
+ I != E; ++I)
+ P.PHI->addIncoming(I->second, I->first);
+
+ IncomingValues.clear();
+ PhiArguments.clear();
+ Predecessors.clear();
+ }
+ PendingPhis.clear();
+}
+
+Function *TreeToLLVM::FinishFunctionBody() {
+ // Insert the return block at the end of the function.
+ EmitBlock(ReturnBB);
+
+ SmallVector <Value *, 4> RetVals;
+
+ // If the function returns a value, get it into a register and return it now.
+ if (!Fn->getReturnType()->isVoidTy()) {
+ if (!AGGREGATE_TYPE_P(TREE_TYPE(DECL_RESULT(FnDecl)))) {
+ // If the DECL_RESULT is a scalar type, just load out the return value
+ // and return it.
+ tree TreeRetVal = DECL_RESULT(FnDecl);
+ Value *RetVal = Builder.CreateLoad(DECL_LOCAL(TreeRetVal), "retval");
+ RetVal = Builder.CreateBitCast(RetVal, Fn->getReturnType());
+ RetVals.push_back(RetVal);
+ } else {
+ Value *RetVal = DECL_LOCAL(DECL_RESULT(FnDecl));
+ if (const StructType *STy = dyn_cast<StructType>(Fn->getReturnType())) {
+ Value *R1 = Builder.CreateBitCast(RetVal, STy->getPointerTo());
+
+ llvm::Value *Idxs[2];
+ Idxs[0] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
+ for (unsigned ri = 0; ri < STy->getNumElements(); ++ri) {
+ Idxs[1] = ConstantInt::get(llvm::Type::getInt32Ty(Context), ri);
+ Value *GEP = Builder.CreateGEP(R1, Idxs, Idxs+2, "mrv_gep");
+ Value *E = Builder.CreateLoad(GEP, "mrv");
+ RetVals.push_back(E);
+ }
+ } else {
+ // Otherwise, this aggregate result must be something that is returned
+ // in a scalar register for this target. We must bit convert the
+ // aggregate to the specified scalar type, which we do by casting the
+ // pointer and loading. The load does not necessarily start at the
+ // beginning of the aggregate (x86-64).
+ if (ReturnOffset) {
+ RetVal = Builder.CreateBitCast(RetVal, Type::getInt8PtrTy(Context));
+ RetVal = Builder.CreateGEP(RetVal,
+ ConstantInt::get(TD.getIntPtrType(Context), ReturnOffset));
+ }
+ RetVal = Builder.CreateBitCast(RetVal,
+ Fn->getReturnType()->getPointerTo());
+ RetVal = Builder.CreateLoad(RetVal, "retval");
+ RetVals.push_back(RetVal);
+ }
+ }
+ }
+ if (TheDebugInfo) {
+ TheDebugInfo->EmitStopPoint(Fn, Builder.GetInsertBlock());
+ TheDebugInfo->EmitRegionEnd(Builder.GetInsertBlock(), true);
+ }
+ if (RetVals.empty())
+ Builder.CreateRetVoid();
+ else if (RetVals.size() == 1 && RetVals[0]->getType() == Fn->getReturnType()){
+ Builder.CreateRet(RetVals[0]);
+ } else {
+ assert(Fn->getReturnType()->isAggregateType() && "Return type mismatch!");
+ Builder.CreateAggregateRet(RetVals.data(), RetVals.size());
+ }
+
+ // Emit pending exception handling code.
+ EmitLandingPads();
+ EmitPostPads();
+ EmitUnwindBlock();
+
+ // If this function takes the address of a label, emit the indirect goto
+ // block.
+ if (IndirectGotoBlock) {
+ EmitBlock(IndirectGotoBlock);
+
+ // Change the default destination to go to one of the other destinations, if
+ // there is any other dest.
+ SwitchInst *SI = cast<SwitchInst>(IndirectGotoBlock->getTerminator());
+ if (SI->getNumSuccessors() > 1)
+ SI->setSuccessor(0, SI->getSuccessor(1));
+ }
+
+ // Populate phi nodes with their operands now that all ssa names have been
+ // defined and all basic blocks output.
+ PopulatePhiNodes();
+
+ return Fn;
+}
+
+/// getBasicBlock - Find or create the LLVM basic block corresponding to BB.
+BasicBlock *TreeToLLVM::getBasicBlock(basic_block bb) {
+ // If we already associated an LLVM basic block with BB, then return it.
+ DenseMap<basic_block, BasicBlock*>::iterator I = BasicBlocks.find(bb);
+ if (I != BasicBlocks.end())
+ return I->second;
+
+ // Otherwise, create a new LLVM basic block.
+ BasicBlock *BB = BasicBlock::Create(Context);
+
+ // All basic blocks that directly correspond to GCC basic blocks (those
+ // created here) must have a name. All artificial basic blocks produced
+ // while generating code must be nameless. That way, artificial blocks
+ // can be easily identified.
+
+ // Give the basic block a name. If the user specified -fverbose-asm then
+ // use the same naming scheme as GCC.
+ if (flag_verbose_asm) {
+ // If BB contains labels, name the LLVM basic block after the first label.
+ gimple stmt = first_stmt(bb);
+ if (stmt && gimple_code(stmt) == GIMPLE_LABEL) {
+ tree label = gimple_label_label(stmt);
+ if (tree name = DECL_NAME(label)) {
+ // If the label has a name then use it.
+ StringRef Ident(IDENTIFIER_POINTER(name), IDENTIFIER_LENGTH(name));
+ BB->setName(Ident);
+ } else if (LABEL_DECL_UID(label) != -1) {
+ // If the label has a UID then use it.
+ Twine UID(LABEL_DECL_UID(label));
+ BB->setName("<L" + UID + ">");
+ } else {
+ // Otherwise use the generic UID.
+ Twine UID(DECL_UID(label));
+ BB->setName("<D." + UID + ">");
+ }
+ } else {
+ // When there is no label, use the same naming scheme as the GCC tree dumps.
+ Twine Index(bb->index);
+ BB->setName("<bb " + Index + ">");
+ }
+ } else {
+ Twine Index(bb->index);
+ BB->setName(Index);
+ }
+
+ return BasicBlocks[bb] = BB;
+}
+
+/// getLabelDeclBlock - Lazily get and create a basic block for the specified
+/// label.
+BasicBlock *TreeToLLVM::getLabelDeclBlock(tree LabelDecl) {
+ assert(TREE_CODE(LabelDecl) == LABEL_DECL && "Isn't a label!?");
+ if (DECL_LOCAL_SET_P(LabelDecl))
+ return cast<BasicBlock>(DECL_LOCAL(LabelDecl));
+
+ BasicBlock *BB = getBasicBlock(label_to_block(LabelDecl));
+ SET_DECL_LOCAL(LabelDecl, BB);
+ return BB;
+}
+
+void TreeToLLVM::EmitBasicBlock(basic_block bb) {
+ ++NumBasicBlocks;
+
+ // Avoid outputting a pointless branch at the end of the entry block.
+ if (bb != ENTRY_BLOCK_PTR)
+ EmitBlock(getBasicBlock(bb));
+
+ // Create an LLVM phi node for each GCC phi and define the associated ssa name
+ // using it. Do not populate with operands at this point since some ssa names
+ // the phi uses may not have been defined yet - phis are special this way.
+ for (gimple_stmt_iterator gsi = gsi_start_phis(bb); !gsi_end_p(gsi);
+ gsi_next(&gsi)) {
+ gimple gcc_phi = gsi_stmt(gsi);
+ // Skip virtual operands.
+ if (!is_gimple_reg(gimple_phi_result(gcc_phi)))
+ continue;
+
+ // Create the LLVM phi node.
+ const Type *Ty = ConvertType(TREE_TYPE(gimple_phi_result(gcc_phi)));
+ PHINode *PHI = Builder.CreatePHI(Ty);
+
+ // The phi defines the associated ssa name.
+ tree name = gimple_phi_result(gcc_phi);
+ assert(TREE_CODE(name) == SSA_NAME && "PHI result not an SSA name!");
+ assert(SSANames.find(name) == SSANames.end() &&
+ "Multiply defined SSA name!");
+ if (flag_verbose_asm)
+ NameValue(PHI, name);
+ SSANames[name] = PHI;
+
+ // The phi operands will be populated later - remember the phi node.
+ PhiRecord P = { gcc_phi, PHI };
+ PendingPhis.push_back(P);
+ }
+
+ // Render statements.
+ for (gimple_stmt_iterator gsi = gsi_start_bb(bb); !gsi_end_p(gsi);
+ gsi_next(&gsi)) {
+ gimple stmt = gsi_stmt(gsi);
+ ++NumStatements;
+
+ switch (gimple_code(stmt)) {
+ case GIMPLE_ASM:
+ RenderGIMPLE_ASM(stmt);
+ break;
+
+ case GIMPLE_ASSIGN:
+ RenderGIMPLE_ASSIGN(stmt);
+ break;
+
+ case GIMPLE_CALL:
+ RenderGIMPLE_CALL(stmt);
+ break;
+
+ case GIMPLE_COND:
+ RenderGIMPLE_COND(stmt);
+ break;
+
+ case GIMPLE_DEBUG:
+ // TODO: Output debug info rather than just discarding it.
+ break;
+
+ case GIMPLE_GOTO:
+ RenderGIMPLE_GOTO(stmt);
+ break;
+
+ case GIMPLE_LABEL:
+ case GIMPLE_NOP:
+ case GIMPLE_PREDICT:
+ break;
+
+ case GIMPLE_RESX:
+ RenderGIMPLE_RESX(stmt);
+ break;
+
+ case GIMPLE_RETURN:
+ RenderGIMPLE_RETURN(stmt);
+ break;
+
+ case GIMPLE_SWITCH:
+ RenderGIMPLE_SWITCH(stmt);
+ break;
+
+ default:
+ dump(stmt);
+ llvm_unreachable("Unhandled GIMPLE statement during LLVM emission!");
+ }
+ }
+
+ // Add a branch to the fallthru block.
+ edge e;
+ edge_iterator ei;
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (e->flags & EDGE_FALLTHRU) {
+ Builder.CreateBr(getBasicBlock(e->dest));
+ EmitBlock(BasicBlock::Create(Context));
+ break;
+ }
+}
+
+static void emit_basic_block(struct dom_walk_data *walk_data, basic_block bb) {
+ ((TreeToLLVM *)walk_data->global_data)->EmitBasicBlock(bb);
+}
+
+Function *TreeToLLVM::EmitFunction() {
+ // Set up parameters and prepare for return, for the function.
+ StartFunctionBody();
+
+ // Emit the body of the function by iterating over all BBs. To ensure that
+ // definitions of ssa names are seen before any uses, the iteration is done
+ // in dominator order.
+ struct dom_walk_data walk_data;
+ memset(&walk_data, 0, sizeof(struct dom_walk_data));
+ walk_data.dom_direction = CDI_DOMINATORS;
+ walk_data.before_dom_children = emit_basic_block;
+ walk_data.global_data = this;
+ init_walk_dominator_tree(&walk_data);
+ walk_dominator_tree(&walk_data, ENTRY_BLOCK_PTR);
+ fini_walk_dominator_tree(&walk_data);
+
+ // Wrap things up.
+ return FinishFunctionBody();
+}
+
+Value *TreeToLLVM::Emit(tree exp, const MemRef *DestLoc) {
+ assert((AGGREGATE_TYPE_P(TREE_TYPE(exp)) == (DestLoc != 0)) &&
+ "Didn't pass DestLoc to an aggregate expr, or passed it to scalar!");
+
+ Value *Result = 0;
+
+ if (TheDebugInfo) {
+ if (EXPR_HAS_LOCATION(exp)) {
+ // Set new location on the way up the tree.
+ TheDebugInfo->setLocationFile(EXPR_FILENAME(exp));
+ TheDebugInfo->setLocationLine(EXPR_LINENO(exp));
+ }
+
+ TheDebugInfo->EmitStopPoint(Fn, Builder.GetInsertBlock());
+ }
+
+ switch (TREE_CODE(exp)) {
+ default:
+ debug_tree(exp);
+ llvm_unreachable("Unhandled expression!");
+
+ // Exception handling.
+//FIXME case EXC_PTR_EXPR: Result = EmitEXC_PTR_EXPR(exp); break;
+//FIXME case FILTER_EXPR: Result = EmitFILTER_EXPR(exp); break;
+
+ // Expressions
+ case VAR_DECL:
+ case PARM_DECL:
+ case RESULT_DECL:
+ case INDIRECT_REF:
+ case ARRAY_REF:
+ case ARRAY_RANGE_REF:
+ case COMPONENT_REF:
+ case BIT_FIELD_REF:
+ case STRING_CST:
+ Result = EmitLoadOfLValue(exp, DestLoc);
+ break;
+ case SSA_NAME: Result = EmitSSA_NAME(exp); break;
+ case OBJ_TYPE_REF: Result = EmitOBJ_TYPE_REF(exp); break;
+ case ADDR_EXPR: Result = EmitADDR_EXPR(exp); break;
+
+ // Unary Operators
+ case REALPART_EXPR: Result = EmitXXXXPART_EXPR(exp, 0); break;
+ case IMAGPART_EXPR: Result = EmitXXXXPART_EXPR(exp, 1); break;
+ case VIEW_CONVERT_EXPR: Result = EmitVIEW_CONVERT_EXPR(exp, DestLoc); break;
+ case CONSTRUCTOR: Result = EmitCONSTRUCTOR(exp, DestLoc); break;
+
+ // Complex Math Expressions.
+ case COMPLEX_CST: Result = TreeConstantToLLVM::ConvertCOMPLEX_CST(exp); break;
+
+ // Constant Expressions
+ case INTEGER_CST:
+ Result = TreeConstantToLLVM::ConvertINTEGER_CST(exp);
+ break;
+ case REAL_CST:
+ Result = TreeConstantToLLVM::ConvertREAL_CST(exp);
+ break;
+ case VECTOR_CST:
+ Result = TreeConstantToLLVM::ConvertVECTOR_CST(exp);
+ break;
+ }
+
+ if (TheDebugInfo && EXPR_HAS_LOCATION(exp)) {
+ // Restore location back down the tree.
+ TheDebugInfo->setLocationFile(EXPR_FILENAME(exp));
+ TheDebugInfo->setLocationLine(EXPR_LINENO(exp));
+ }
+
+ assert(((DestLoc && Result == 0) || DestLoc == 0) &&
+ "Expected a scalar or aggregate but got the wrong thing!");
+ // Check that the type of the result matches that of the tree node. If the
+ // result is not used then GCC sometimes sets the tree type to VOID_TYPE, so
+ // don't take VOID_TYPE too seriously here.
+ assert((Result == 0 || VOID_TYPE_P(TREE_TYPE(exp)) ||
+ // FIXME: The vector stuff isn't straight-forward. Sometimes X86 can
+ // pass it back as a scalar value. Disable checking if it's a
+ // vector. This should be made better, though.
+ isa<VectorType>(ConvertType(TREE_TYPE(exp))) ||
+ Result->getType() == ConvertType(TREE_TYPE(exp))) &&
+ "Value has wrong type!");
+ return Result;
+}
+
+/// EmitLV - Convert the specified l-value tree node to LLVM code, returning
+/// the address of the result.
+LValue TreeToLLVM::EmitLV(tree exp) {
+ // Needs to be in sync with EmitVIEW_CONVERT_EXPR.
+ LValue LV;
+
+ switch (TREE_CODE(exp)) {
+ default:
+ debug_tree(exp);
+ llvm_unreachable("Unhandled lvalue expression!");
+
+ case PARM_DECL:
+ case VAR_DECL:
+ case FUNCTION_DECL:
+ case CONST_DECL:
+ case RESULT_DECL:
+ LV = EmitLV_DECL(exp);
+ break;
+ case ARRAY_RANGE_REF:
+ case ARRAY_REF:
+ LV = EmitLV_ARRAY_REF(exp);
+ break;
+ case COMPONENT_REF:
+ LV = EmitLV_COMPONENT_REF(exp);
+ break;
+ case BIT_FIELD_REF:
+ LV = EmitLV_BIT_FIELD_REF(exp);
+ break;
+ case REALPART_EXPR:
+ LV = EmitLV_XXXXPART_EXPR(exp, 0);
+ break;
+ case IMAGPART_EXPR:
+ LV = EmitLV_XXXXPART_EXPR(exp, 1);
+ break;
+ case SSA_NAME:
+ LV = EmitLV_SSA_NAME(exp);
+ break;
+
+ // Constants.
+ case LABEL_DECL: {
+ Value *Ptr = TreeConstantToLLVM::EmitLV_LABEL_DECL(exp);
+ LV = LValue(Ptr, 1);
+ break;
+ }
+ case COMPLEX_CST: {
+ Value *Ptr = TreeConstantToLLVM::EmitLV_COMPLEX_CST(exp);
+ LV = LValue(Ptr, TYPE_ALIGN(TREE_TYPE(exp)) / 8);
+ break;
+ }
+ case STRING_CST: {
+ Value *Ptr = TreeConstantToLLVM::EmitLV_STRING_CST(exp);
+ LV = LValue(Ptr, TYPE_ALIGN(TREE_TYPE(exp)) / 8);
+ break;
+ }
+
+ // Type Conversion.
+ case VIEW_CONVERT_EXPR:
+ LV = EmitLV_VIEW_CONVERT_EXPR(exp);
+ break;
+
+ // Exception Handling.
+//FIXME case EXC_PTR_EXPR:
+//FIXME LV = EmitLV_EXC_PTR_EXPR(exp);
+//FIXME break;
+//FIXME case FILTER_EXPR:
+//FIXME LV = EmitLV_FILTER_EXPR(exp);
+//FIXME break;
+
+ // Trivial Cases.
+ case WITH_SIZE_EXPR:
+ LV = EmitLV_WITH_SIZE_EXPR(exp);
+ break;
+ case INDIRECT_REF:
+ LV = EmitLV_INDIRECT_REF(exp);
+ break;
+ }
+
+ // Check that the type of the lvalue is indeed that of a pointer to the tree
+ // node. This may not hold for bitfields because the type of a bitfield need
+ // not match the type of the value being loaded out of it. Since LLVM has no
+ // void* type, don't insist that void* be converted to a specific LLVM type.
+ assert((LV.isBitfield() || VOID_TYPE_P(TREE_TYPE(exp)) ||
+ LV.Ptr->getType() == ConvertType(TREE_TYPE(exp))->getPointerTo()) &&
+ "LValue has wrong type!");
+
+ return LV;
+}
+
+//===----------------------------------------------------------------------===//
+// ... Utility Functions ...
+//===----------------------------------------------------------------------===//
+
+void TreeToLLVM::TODO(tree exp) {
+ if (exp) debug_tree(exp);
+ llvm_unreachable("Unhandled tree node");
+}
+
+/// CastToAnyType - Cast the specified value to the specified type making no
+/// assumptions about the types of the arguments. This creates an inferred cast.
+Value *TreeToLLVM::CastToAnyType(Value *V, bool VisSigned,
+ const Type* Ty, bool TyIsSigned) {
+ // Eliminate useless casts of a type to itself.
+ if (V->getType() == Ty)
+ return V;
+
+ // The types are different so we must cast. Use getCastOpcode to create an
+ // inferred cast opcode.
+ Instruction::CastOps opc =
+ CastInst::getCastOpcode(V, VisSigned, Ty, TyIsSigned);
+
+ // Generate the cast and return it.
+ return Builder.CreateCast(opc, V, Ty);
+}
+
+/// CastToUIntType - Cast the specified value to the specified type assuming
+/// that the value and type are unsigned integer types.
+Value *TreeToLLVM::CastToUIntType(Value *V, const Type* Ty) {
+ // Eliminate useless casts of a type to itself.
+ if (V->getType() == Ty)
+ return V;
+
+ unsigned SrcBits = V->getType()->getPrimitiveSizeInBits();
+ unsigned DstBits = Ty->getPrimitiveSizeInBits();
+ assert(SrcBits != DstBits && "Types are different but have same #bits?");
+
+ Instruction::CastOps opcode =
+ (SrcBits > DstBits ? Instruction::Trunc : Instruction::ZExt);
+ return Builder.CreateCast(opcode, V, Ty);
+}
+
+/// CastToSIntType - Cast the specified value to the specified type assuming
+/// that the value and type are signed integer types.
+Value *TreeToLLVM::CastToSIntType(Value *V, const Type* Ty) {
+ // Eliminate useless casts of a type to itself.
+ if (V->getType() == Ty)
+ return V;
+
+ unsigned SrcBits = V->getType()->getPrimitiveSizeInBits();
+ unsigned DstBits = Ty->getPrimitiveSizeInBits();
+ assert(SrcBits != DstBits && "Types are different but have same #bits?");
+
+ Instruction::CastOps opcode =
+ (SrcBits > DstBits ? Instruction::Trunc : Instruction::SExt);
+ return Builder.CreateCast(opcode, V, Ty);
+}
+
+/// CastToFPType - Cast the specified value to the specified type assuming
+/// that the value and type are floating point.
+Value *TreeToLLVM::CastToFPType(Value *V, const Type* Ty) {
+ unsigned SrcBits = V->getType()->getPrimitiveSizeInBits();
+ unsigned DstBits = Ty->getPrimitiveSizeInBits();
+ if (SrcBits == DstBits)
+ return V;
+ Instruction::CastOps opcode = (SrcBits > DstBits ?
+ Instruction::FPTrunc : Instruction::FPExt);
+ return Builder.CreateCast(opcode, V, Ty);
+}
+
+/// CreateTemporary - Create a new alloca instruction of the specified type,
+/// inserting it into the entry block and returning it. The resulting
+/// instruction's type is a pointer to the specified type.
+AllocaInst *TreeToLLVM::CreateTemporary(const Type *Ty) {
+ if (AllocaInsertionPoint == 0) {
+ // Create a dummy instruction in the entry block as a marker to insert new
+ // alloc instructions before. It doesn't matter what this instruction is,
+ // it is dead. This allows us to insert allocas in order without having to
+ // scan for an insertion point. Use BitCast for int -> int
+ AllocaInsertionPoint = CastInst::Create(Instruction::BitCast,
+ Constant::getNullValue(Type::getInt32Ty(Context)),
+ Type::getInt32Ty(Context), "alloca point");
+ // Insert it as the first instruction in the entry block.
+ Fn->begin()->getInstList().insert(Fn->begin()->begin(),
+ AllocaInsertionPoint);
+ }
+ return new AllocaInst(Ty, 0, "memtmp", AllocaInsertionPoint);
+}
+
+/// CreateTempLoc - Like CreateTemporary, but returns a MemRef.
+MemRef TreeToLLVM::CreateTempLoc(const Type *Ty) {
+ AllocaInst *AI = CreateTemporary(Ty);
+ // MemRefs do not allow alignment 0.
+ if (!AI->getAlignment())
+ AI->setAlignment(TD.getPrefTypeAlignment(Ty));
+ return MemRef(AI, AI->getAlignment(), false);
+}
+
+/// EmitBlock - Add the specified basic block to the end of the function. If
+/// the previous block falls through into it, add an explicit branch.
+void TreeToLLVM::EmitBlock(BasicBlock *BB) {
+ BasicBlock *CurBB = Builder.GetInsertBlock();
+ // If the previous block falls through to BB, add an explicit branch.
+ if (CurBB->getTerminator() == 0) {
+ // If the previous block has no label and is empty, remove it: it is a
+ // post-terminator block.
+ if (CurBB->getName().empty() && CurBB->begin() == CurBB->end())
+ CurBB->eraseFromParent();
+ else
+ // Otherwise, fall through to this block.
+ Builder.CreateBr(BB);
+ }
+
+ // Add this block.
+ Fn->getBasicBlockList().push_back(BB);
+ Builder.SetInsertPoint(BB); // It is now the current block.
+}
+
+/// CopyAggregate - Recursively traverse the potientially aggregate src/dest
+/// ptrs, copying all of the elements.
+static void CopyAggregate(MemRef DestLoc, MemRef SrcLoc,
+ LLVMBuilder &Builder, tree gccType){
+ assert(DestLoc.Ptr->getType() == SrcLoc.Ptr->getType() &&
+ "Cannot copy between two pointers of different type!");
+ const Type *ElTy =
+ cast<PointerType>(DestLoc.Ptr->getType())->getElementType();
+
+ unsigned Alignment = std::min(DestLoc.getAlignment(), SrcLoc.getAlignment());
+
+ if (ElTy->isSingleValueType()) {
+ LoadInst *V = Builder.CreateLoad(SrcLoc.Ptr, SrcLoc.Volatile);
+ StoreInst *S = Builder.CreateStore(V, DestLoc.Ptr, DestLoc.Volatile);
+ V->setAlignment(Alignment);
+ S->setAlignment(Alignment);
+ } else if (const StructType *STy = dyn_cast<StructType>(ElTy)) {
+ const StructLayout *SL = getTargetData().getStructLayout(STy);
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ if (gccType && isPaddingElement(gccType, i))
+ continue;
+ Value *DElPtr = Builder.CreateStructGEP(DestLoc.Ptr, i);
+ Value *SElPtr = Builder.CreateStructGEP(SrcLoc.Ptr, i);
+ unsigned Align = MinAlign(Alignment, SL->getElementOffset(i));
+ CopyAggregate(MemRef(DElPtr, Align, DestLoc.Volatile),
+ MemRef(SElPtr, Align, SrcLoc.Volatile),
+ Builder, 0);
+ }
+ } else {
+ const ArrayType *ATy = cast<ArrayType>(ElTy);
+ unsigned EltSize = getTargetData().getTypeAllocSize(ATy->getElementType());
+ for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
+ Value *DElPtr = Builder.CreateStructGEP(DestLoc.Ptr, i);
+ Value *SElPtr = Builder.CreateStructGEP(SrcLoc.Ptr, i);
+ unsigned Align = MinAlign(Alignment, i * EltSize);
+ CopyAggregate(MemRef(DElPtr, Align, DestLoc.Volatile),
+ MemRef(SElPtr, Align, SrcLoc.Volatile),
+ Builder, 0);
+ }
+ }
+}
+
+/// CountAggregateElements - Return the number of elements in the specified type
+/// that will need to be loaded/stored if we copy this by explicit accesses.
+static unsigned CountAggregateElements(const Type *Ty) {
+ if (Ty->isSingleValueType()) return 1;
+
+ if (const StructType *STy = dyn_cast<StructType>(Ty)) {
+ unsigned NumElts = 0;
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
+ NumElts += CountAggregateElements(STy->getElementType(i));
+ return NumElts;
+ } else {
+ const ArrayType *ATy = cast<ArrayType>(Ty);
+ return ATy->getNumElements()*CountAggregateElements(ATy->getElementType());
+ }
+}
+
+/// containsFPField - indicates whether the given LLVM type
+/// contains any floating point elements.
+
+static bool containsFPField(const Type *LLVMTy) {
+ if (LLVMTy->isFloatingPoint())
+ return true;
+ const StructType* STy = dyn_cast<StructType>(LLVMTy);
+ if (STy) {
+ for (StructType::element_iterator I = STy->element_begin(),
+ E = STy->element_end(); I != E; I++) {
+ const Type *Ty = *I;
+ if (Ty->isFloatingPoint())
+ return true;
+ if (isa<StructType>(Ty) && containsFPField(Ty))
+ return true;
+ const ArrayType *ATy = dyn_cast<ArrayType>(Ty);
+ if (ATy && containsFPField(ATy->getElementType()))
+ return true;
+ const VectorType *VTy = dyn_cast<VectorType>(Ty);
+ if (VTy && containsFPField(VTy->getElementType()))
+ return true;
+ }
+ }
+ return false;
+}
+
+#ifndef TARGET_LLVM_MIN_BYTES_COPY_BY_MEMCPY
+#define TARGET_LLVM_MIN_BYTES_COPY_BY_MEMCPY 64
+#endif
+
+/// EmitAggregateCopy - Copy the elements from SrcLoc to DestLoc, using the
+/// GCC type specified by GCCType to know which elements to copy.
+void TreeToLLVM::EmitAggregateCopy(MemRef DestLoc, MemRef SrcLoc, tree type) {
+ if (DestLoc.Ptr == SrcLoc.Ptr && !DestLoc.Volatile && !SrcLoc.Volatile)
+ return; // noop copy.
+
+ // If the type is small, copy the elements instead of using a block copy.
+ if (TREE_CODE(TYPE_SIZE(type)) == INTEGER_CST &&
+ TREE_INT_CST_LOW(TYPE_SIZE_UNIT(type)) <
+ TARGET_LLVM_MIN_BYTES_COPY_BY_MEMCPY) {
+ const Type *LLVMTy = ConvertType(type);
+
+ // Some targets (x87) cannot pass non-floating-point values using FP
+ // instructions. The LLVM type for a union may include FP elements,
+ // even if some of the union fields do not; it is unsafe to pass such
+ // converted types element by element. PR 2680.
+
+ // If the GCC type is not fully covered by the LLVM type, use memcpy. This
+ // can occur with unions etc.
+ if ((TREE_CODE(type) != UNION_TYPE || !containsFPField(LLVMTy)) &&
+ !TheTypeConverter->GCCTypeOverlapsWithLLVMTypePadding(type, LLVMTy) &&
+ // Don't copy tons of tiny elements.
+ CountAggregateElements(LLVMTy) <= 8) {
+ DestLoc.Ptr = Builder.CreateBitCast(DestLoc.Ptr, LLVMTy->getPointerTo());
+ SrcLoc.Ptr = Builder.CreateBitCast(SrcLoc.Ptr, LLVMTy->getPointerTo());
+ CopyAggregate(DestLoc, SrcLoc, Builder, type);
+ return;
+ }
+ }
+
+ Value *TypeSize = Emit(TYPE_SIZE_UNIT(type), 0);
+ EmitMemCpy(DestLoc.Ptr, SrcLoc.Ptr, TypeSize,
+ std::min(DestLoc.getAlignment(), SrcLoc.getAlignment()));
+}
+
+/// ZeroAggregate - Recursively traverse the potentially aggregate DestLoc,
+/// zero'ing all of the elements.
+static void ZeroAggregate(MemRef DestLoc, LLVMBuilder &Builder) {
+ const Type *ElTy =
+ cast<PointerType>(DestLoc.Ptr->getType())->getElementType();
+ if (ElTy->isSingleValueType()) {
+ StoreInst *St = Builder.CreateStore(Constant::getNullValue(ElTy),
+ DestLoc.Ptr, DestLoc.Volatile);
+ St->setAlignment(DestLoc.getAlignment());
+ } else if (const StructType *STy = dyn_cast<StructType>(ElTy)) {
+ const StructLayout *SL = getTargetData().getStructLayout(STy);
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ Value *Ptr = Builder.CreateStructGEP(DestLoc.Ptr, i);
+ unsigned Alignment = MinAlign(DestLoc.getAlignment(),
+ SL->getElementOffset(i));
+ ZeroAggregate(MemRef(Ptr, Alignment, DestLoc.Volatile), Builder);
+ }
+ } else {
+ const ArrayType *ATy = cast<ArrayType>(ElTy);
+ unsigned EltSize = getTargetData().getTypeAllocSize(ATy->getElementType());
+ for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
+ Value *Ptr = Builder.CreateStructGEP(DestLoc.Ptr, i);
+ unsigned Alignment = MinAlign(DestLoc.getAlignment(), i * EltSize);
+ ZeroAggregate(MemRef(Ptr, Alignment, DestLoc.Volatile), Builder);
+ }
+ }
+}
+
+/// EmitAggregateZero - Zero the elements of DestLoc.
+///
+void TreeToLLVM::EmitAggregateZero(MemRef DestLoc, tree type) {
+ // If the type is small, copy the elements instead of using a block copy.
+ if (TREE_CODE(TYPE_SIZE(type)) == INTEGER_CST &&
+ TREE_INT_CST_LOW(TYPE_SIZE_UNIT(type)) < 128) {
+ const Type *LLVMTy = ConvertType(type);
+
+ // If the GCC type is not fully covered by the LLVM type, use memset. This
+ // can occur with unions etc.
+ if (!TheTypeConverter->GCCTypeOverlapsWithLLVMTypePadding(type, LLVMTy) &&
+ // Don't zero tons of tiny elements.
+ CountAggregateElements(LLVMTy) <= 8) {
+ DestLoc.Ptr = Builder.CreateBitCast(DestLoc.Ptr, LLVMTy->getPointerTo());
+ ZeroAggregate(DestLoc, Builder);
+ return;
+ }
+ }
+
+ EmitMemSet(DestLoc.Ptr, ConstantInt::get(Type::getInt8Ty(Context), 0),
+ Emit(TYPE_SIZE_UNIT(type), 0), DestLoc.getAlignment());
+}
+
+Value *TreeToLLVM::EmitMemCpy(Value *DestPtr, Value *SrcPtr, Value *Size,
+ unsigned Align) {
+ const Type *SBP = Type::getInt8PtrTy(Context);
+ const Type *IntPtr = TD.getIntPtrType(Context);
+ Value *Ops[4] = {
+ Builder.CreateBitCast(DestPtr, SBP),
+ Builder.CreateBitCast(SrcPtr, SBP),
+ CastToSIntType(Size, IntPtr),
+ ConstantInt::get(Type::getInt32Ty(Context), Align)
+ };
+
+ Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::memcpy,
+ &IntPtr, 1), Ops, Ops+4);
+ return Ops[0];
+}
+
+Value *TreeToLLVM::EmitMemMove(Value *DestPtr, Value *SrcPtr, Value *Size,
+ unsigned Align) {
+ const Type *SBP = Type::getInt8PtrTy(Context);
+ const Type *IntPtr = TD.getIntPtrType(Context);
+ Value *Ops[4] = {
+ Builder.CreateBitCast(DestPtr, SBP),
+ Builder.CreateBitCast(SrcPtr, SBP),
+ CastToSIntType(Size, IntPtr),
+ ConstantInt::get(Type::getInt32Ty(Context), Align)
+ };
+
+ Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::memmove,
+ &IntPtr, 1), Ops, Ops+4);
+ return Ops[0];
+}
+
+Value *TreeToLLVM::EmitMemSet(Value *DestPtr, Value *SrcVal, Value *Size,
+ unsigned Align) {
+ const Type *SBP = Type::getInt8PtrTy(Context);
+ const Type *IntPtr = TD.getIntPtrType(Context);
+ Value *Ops[4] = {
+ Builder.CreateBitCast(DestPtr, SBP),
+ CastToSIntType(SrcVal, Type::getInt8Ty(Context)),
+ CastToSIntType(Size, IntPtr),
+ ConstantInt::get(Type::getInt32Ty(Context), Align)
+ };
+
+ Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::memset,
+ &IntPtr, 1), Ops, Ops+4);
+ return Ops[0];
+}
+
+
+// Emits code to do something for a type attribute
+void TreeToLLVM::EmitTypeGcroot(Value *V, tree decl) {
+ // GC intrinsics can only be used in functions which specify a collector.
+ Fn->setGC("shadow-stack");
+
+ Function *gcrootFun = Intrinsic::getDeclaration(TheModule,
+ Intrinsic::gcroot);
+
+ // The idea is that it's a pointer to type "Value"
+ // which is opaque* but the routine expects i8** and i8*.
+ const PointerType *Ty = Type::getInt8PtrTy(Context);
+ V = Builder.CreateBitCast(V, Ty->getPointerTo());
+
+ Value *Ops[2] = {
+ V,
+ ConstantPointerNull::get(Ty)
+ };
+
+ Builder.CreateCall(gcrootFun, Ops, Ops+2);
+}
+
+// Emits annotate intrinsic if the decl has the annotate attribute set.
+void TreeToLLVM::EmitAnnotateIntrinsic(Value *V, tree decl) {
+
+ // Handle annotate attribute on global.
+ tree annotateAttr = lookup_attribute("annotate", DECL_ATTRIBUTES (decl));
+
+ if (!annotateAttr)
+ return;
+
+ Function *annotateFun = Intrinsic::getDeclaration(TheModule,
+ Intrinsic::var_annotation);
+
+ // Get file and line number
+ Constant *lineNo =
+ ConstantInt::get(Type::getInt32Ty(Context), DECL_SOURCE_LINE(decl));
+ Constant *file = ConvertMetadataStringToGV(DECL_SOURCE_FILE(decl));
+ const Type *SBP = Type::getInt8PtrTy(Context);
+ file = Builder.getFolder().CreateBitCast(file, SBP);
+
+ // There may be multiple annotate attributes. Pass return of lookup_attr
+ // to successive lookups.
+ while (annotateAttr) {
+
+ // Each annotate attribute is a tree list.
+ // Get value of list which is our linked list of args.
+ tree args = TREE_VALUE(annotateAttr);
+
+ // Each annotate attribute may have multiple args.
+ // Treat each arg as if it were a separate annotate attribute.
+ for (tree a = args; a; a = TREE_CHAIN(a)) {
+ // Each element of the arg list is a tree list, so get value
+ tree val = TREE_VALUE(a);
+
+ // Assert its a string, and then get that string.
+ assert(TREE_CODE(val) == STRING_CST &&
+ "Annotate attribute arg should always be a string");
+ const Type *SBP = Type::getInt8PtrTy(Context);
+ Constant *strGV = TreeConstantToLLVM::EmitLV_STRING_CST(val);
+ Value *Ops[4] = {
+ Builder.CreateBitCast(V, SBP),
+ Builder.CreateBitCast(strGV, SBP),
+ file,
+ lineNo
+ };
+
+ Builder.CreateCall(annotateFun, Ops, Ops+4);
+ }
+
+ // Get next annotate attribute.
+ annotateAttr = TREE_CHAIN(annotateAttr);
+ if (annotateAttr)
+ annotateAttr = lookup_attribute("annotate", annotateAttr);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// ... Basic Lists and Binding Scopes ...
+//===----------------------------------------------------------------------===//
+
+/// EmitAutomaticVariableDecl - Emit the function-local decl to the current
+/// function and set DECL_LOCAL for the decl to the right pointer.
+void TreeToLLVM::EmitAutomaticVariableDecl(tree decl) {
+ // If this is just the rotten husk of a variable that the gimplifier
+ // eliminated all uses of, but is preserving for debug info, ignore it.
+ if (TREE_CODE(decl) == VAR_DECL && DECL_VALUE_EXPR(decl))
+ return;
+
+ tree type = TREE_TYPE(decl);
+ const Type *Ty; // Type to allocate
+ Value *Size = 0; // Amount to alloca (null for 1)
+
+ if (DECL_SIZE(decl) == 0) { // Variable with incomplete type.
+ if (DECL_INITIAL(decl) == 0)
+ return; // Error message was already done; now avoid a crash.
+ else {
+ // "An initializer is going to decide the size of this array."??
+ TODO(decl);
+ abort();
+ }
+ } else if (TREE_CODE(DECL_SIZE_UNIT(decl)) == INTEGER_CST) {
+ // Variable of fixed size that goes on the stack.
+ Ty = ConvertType(type);
+ } else {
+ // Dynamic-size object: must push space on the stack.
+ if (TREE_CODE(type) == ARRAY_TYPE
+ && isSequentialCompatible(type)
+ && TYPE_SIZE(type) == DECL_SIZE(decl)) {
+ Ty = ConvertType(TREE_TYPE(type)); // Get array element type.
+ // Compute the number of elements in the array.
+ Size = Emit(DECL_SIZE(decl), 0);
+ assert(!integer_zerop(TYPE_SIZE(TREE_TYPE(type)))
+ && "Array of positive size with elements of zero size!");
+ Value *EltSize = Emit(TYPE_SIZE(TREE_TYPE(type)), 0);
+ Size = Builder.CreateUDiv(Size, EltSize, "len");
+ } else {
+ // Compute the variable's size in bytes.
+ Size = Emit(DECL_SIZE_UNIT(decl), 0);
+ Ty = Type::getInt8Ty(Context);
+ }
+ Size = CastToUIntType(Size, Type::getInt32Ty(Context));
+ }
+
+ unsigned Alignment = 0; // Alignment in bytes.
+
+ // Set the alignment for the local if one of the following condition is met
+ // 1) DECL_ALIGN is better than the alignment as per ABI specification
+ // 2) DECL_ALIGN is set by user.
+ if (DECL_ALIGN(decl)) {
+ unsigned TargetAlign = getTargetData().getABITypeAlignment(Ty);
+ if (DECL_USER_ALIGN(decl) || 8 * TargetAlign < (unsigned)DECL_ALIGN(decl))
+ Alignment = DECL_ALIGN(decl) / 8;
+ }
+
+ // Insert an alloca for this variable.
+ AllocaInst *AI;
+ if (!Size) // Fixed size alloca -> entry block.
+ AI = CreateTemporary(Ty);
+ else
+ AI = Builder.CreateAlloca(Ty, Size);
+ NameValue(AI, decl);
+
+ AI->setAlignment(Alignment);
+
+ SET_DECL_LOCAL(decl, AI);
+
+ // Handle annotate attributes
+ if (DECL_ATTRIBUTES(decl))
+ EmitAnnotateIntrinsic(AI, decl);
+
+ // Handle gcroot attribute
+ if (POINTER_TYPE_P(TREE_TYPE (decl))
+ && lookup_attribute("gcroot", TYPE_ATTRIBUTES(TREE_TYPE (decl))))
+ {
+ // We should null out local variables so that a stack crawl
+ // before initialization doesn't get garbage results to follow.
+ const Type *T = cast<PointerType>(AI->getType())->getElementType();
+ EmitTypeGcroot(AI, decl);
+ Builder.CreateStore(Constant::getNullValue(T), AI);
+ }
+
+ if (TheDebugInfo) {
+ if (DECL_NAME(decl)) {
+ TheDebugInfo->EmitDeclare(decl, dwarf::DW_TAG_auto_variable,
+ AI->getName(), TREE_TYPE(decl), AI,
+ Builder.GetInsertBlock());
+ } else if (TREE_CODE(decl) == RESULT_DECL) {
+ TheDebugInfo->EmitDeclare(decl, dwarf::DW_TAG_return_variable,
+ AI->getName(), TREE_TYPE(decl), AI,
+ Builder.GetInsertBlock());
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// ... Address Of Labels Extension Support ...
+//===----------------------------------------------------------------------===//
+
+/// getIndirectGotoBlockNumber - Return the unique ID of the specified basic
+/// block for uses that take the address of it.
+Constant *TreeToLLVM::getIndirectGotoBlockNumber(BasicBlock *BB) {
+ ConstantInt *&Val = AddressTakenBBNumbers[BB];
+ if (Val) return Val;
+
+ // Assign the new ID, update AddressTakenBBNumbers to remember it.
+ uint64_t BlockNo = ++NumAddressTakenBlocks;
+ BlockNo &= ~0ULL >> (64-TD.getPointerSizeInBits());
+ Val = ConstantInt::get(TD.getIntPtrType(Context), BlockNo);
+
+ // Add it to the switch statement in the indirect goto block.
+ cast<SwitchInst>(getIndirectGotoBlock()->getTerminator())->addCase(Val, BB);
+ return Val;
+}
+
+/// getIndirectGotoBlock - Get (and potentially lazily create) the indirect
+/// goto block.
+BasicBlock *TreeToLLVM::getIndirectGotoBlock() {
+ if (IndirectGotoBlock) return IndirectGotoBlock;
+
+ // Create a temporary for the value to be switched on.
+ IndirectGotoValue = CreateTemporary(TD.getIntPtrType(Context));
+
+ // Create the block, emit a load, and emit the switch in the block.
+ IndirectGotoBlock = BasicBlock::Create(Context, "indirectgoto");
+ Value *Ld = new LoadInst(IndirectGotoValue, "gotodest", IndirectGotoBlock);
+ SwitchInst::Create(Ld, IndirectGotoBlock, 0, IndirectGotoBlock);
+
+ // Finally, return it.
+ return IndirectGotoBlock;
+}
+
+
+//===----------------------------------------------------------------------===//
+// ... Control Flow ...
+//===----------------------------------------------------------------------===//
+
+/// CreateExceptionValues - Create values used internally by exception handling.
+void TreeToLLVM::CreateExceptionValues() {
+ // Check to see if the exception values have been constructed.
+ if (ExceptionValue) return;
+
+ const Type *IntTy = ConvertType(integer_type_node);
+
+ ExceptionValue = CreateTemporary(Type::getInt8PtrTy(Context));
+ ExceptionValue->setName("eh_exception");
+
+ ExceptionSelectorValue = CreateTemporary(IntTy);
+ ExceptionSelectorValue->setName("eh_selector");
+
+ FuncEHException = Intrinsic::getDeclaration(TheModule,
+ Intrinsic::eh_exception);
+ FuncEHSelector = Intrinsic::getDeclaration(TheModule,
+ (IntTy == Type::getInt32Ty(Context) ?
+ Intrinsic::eh_selector_i32 :
+ Intrinsic::eh_selector_i64));
+ FuncEHGetTypeID = Intrinsic::getDeclaration(TheModule,
+ (IntTy == Type::getInt32Ty(Context) ?
+ Intrinsic::eh_typeid_for_i32 :
+ Intrinsic::eh_typeid_for_i64));
+}
+
+/// getPostPad - Return the post landing pad for the given exception handling
+/// region, creating it if necessary.
+BasicBlock *TreeToLLVM::getPostPad(unsigned RegionNo) {
+ PostPads.grow(RegionNo);
+ BasicBlock *&PostPad = PostPads[RegionNo];
+
+ if (!PostPad)
+ PostPad = BasicBlock::Create(Context, "ppad");
+
+ return PostPad;
+}
+
+/// AddHandler - Append the given region to a vector of exception handlers.
+/// A callback passed to foreach_reachable_handler.
+//FIXMEstatic void AddHandler (eh_region region, void *data) {
+//FIXME ((std::vector<eh_region> *)data)->push_back(region);
+//FIXME}
+
+/// EmitLandingPads - Emit EH landing pads.
+void TreeToLLVM::EmitLandingPads() {
+ std::vector<Value*> Args;
+ std::vector<eh_region> Handlers;
+
+ for (unsigned i = 1; i < LandingPads.size(); ++i) {
+ BasicBlock *LandingPad = LandingPads[i];
+
+ if (!LandingPad)
+ continue;
+
+ CreateExceptionValues();
+
+ EmitBlock(LandingPad);
+
+ // Fetch and store the exception.
+ Value *Ex = Builder.CreateCall(FuncEHException, "eh_ptr");
+ Builder.CreateStore(Ex, ExceptionValue);
+
+ // Fetch and store the exception selector.
+
+ // The exception and the personality function.
+ Args.push_back(Builder.CreateLoad(ExceptionValue, "eh_ptr"));
+abort();//FIXME
+//FIXME assert(llvm_eh_personality_libfunc
+//FIXME && "no exception handling personality function!");
+//FIXME Args.push_back(Builder.CreateBitCast(DECL_LOCAL(llvm_eh_personality_libfunc),
+//FIXME Type::getInt8PtrTy(Context)));
+//FIXME
+//FIXME // Add selections for each handler.
+//FIXME foreach_reachable_handler(i, false, false, AddHandler, &Handlers);
+//FIXME
+//FIXME for (std::vector<eh_region>::iterator I = Handlers.begin(),
+//FIXME E = Handlers.end(); I != E; ++I) {
+//FIXME eh_region region = *I;
+//FIXME
+//FIXME // Create a post landing pad for the handler.
+//FIXME getPostPad(get_eh_region_number(region));
+//FIXME
+//FIXME int RegionKind = classify_eh_handler(region);
+//FIXME if (RegionKind < 0) {
+//FIXME // Filter - note the length.
+//FIXME tree TypeList = get_eh_type_list(region);
+//FIXME unsigned Length = list_length(TypeList);
+//FIXME Args.reserve(Args.size() + Length + 1);
+//FIXME Args.push_back(ConstantInt::get(Type::getInt32Ty, Length + 1));
+//FIXME
+//FIXME // Add the type infos.
+//FIXME for (; TypeList; TypeList = TREE_CHAIN(TypeList)) {
+//FIXME tree TType = lookup_type_for_runtime(TREE_VALUE(TypeList));
+//FIXME Args.push_back(Emit(TType, 0));
+//FIXME }
+//FIXME } else if (RegionKind > 0) {
+//FIXME // Catch.
+//FIXME tree TypeList = get_eh_type_list(region);
+//FIXME
+//FIXME if (!TypeList) {
+//FIXME // Catch-all - push a null pointer.
+//FIXME Args.push_back(
+//FIXME Constant::getNullValue(Type::getInt8PtrTy(Context))
+//FIXME );
+//FIXME } else {
+//FIXME // Add the type infos.
+//FIXME for (; TypeList; TypeList = TREE_CHAIN(TypeList)) {
+//FIXME tree TType = lookup_type_for_runtime(TREE_VALUE(TypeList));
+//FIXME Args.push_back(Emit(TType, 0));
+//FIXME }
+//FIXME }
+//FIXME }
+//FIXME }
+//FIXME
+//FIXME if (can_throw_external_1(i, false, false)) {
+//FIXME // Some exceptions from this region may not be caught by any handler.
+//FIXME // Since invokes are required to branch to the unwind label no matter
+//FIXME // what exception is being unwound, append a catch-all.
+//FIXME
+//FIXME // The representation of a catch-all is language specific.
+//FIXME Value *CatchAll;
+//FIXME if (USING_SJLJ_EXCEPTIONS || !lang_eh_catch_all) {
+//FIXME // Use a "cleanup" - this should be good enough for most languages.
+//FIXME CatchAll = ConstantInt::get(Type::getInt32Ty, 0);
+//FIXME } else {
+//FIXME tree catch_all_type = lang_eh_catch_all();
+//FIXME if (catch_all_type == NULL_TREE)
+//FIXME // Use a C++ style null catch-all object.
+//FIXME CatchAll = Constant::getNullValue(
+//FIXME Type::getInt8PtrTy(Context));
+//FIXME else
+//FIXME // This language has a type that catches all others.
+//FIXME CatchAll = Emit(catch_all_type, 0);
+//FIXME }
+//FIXME Args.push_back(CatchAll);
+//FIXME }
+//FIXME
+//FIXME // Emit the selector call.
+//FIXME Value *Select = Builder.CreateCall(FuncEHSelector, Args.begin(), Args.end(),
+//FIXME "eh_select");
+//FIXME Builder.CreateStore(Select, ExceptionSelectorValue);
+//FIXME // Branch to the post landing pad for the first reachable handler.
+//FIXME assert(!Handlers.empty() && "Landing pad but no handler?");
+//FIXME Builder.CreateBr(getPostPad(get_eh_region_number(*Handlers.begin())));
+//FIXME
+//FIXME Handlers.clear();
+//FIXME Args.clear();
+ }
+}
+
+/// EmitPostPads - Emit EH post landing pads.
+void TreeToLLVM::EmitPostPads() {
+ std::vector<eh_region> Handlers;
+
+ for (unsigned i = 1; i < PostPads.size(); ++i) {
+ BasicBlock *PostPad = PostPads[i];
+
+ if (!PostPad)
+ continue;
+
+ CreateExceptionValues();
+
+ EmitBlock(PostPad);
+
+abort();//FIXME
+//FIXME eh_region region = get_eh_region(i);
+//FIXME BasicBlock *Dest = getLabelDeclBlock(get_eh_region_tree_label(region));
+//FIXME
+//FIXME int RegionKind = classify_eh_handler(region);
+//FIXME if (!RegionKind || !get_eh_type_list(region)) {
+//FIXME // Cleanup, catch-all or empty filter - no testing required.
+//FIXME Builder.CreateBr(Dest);
+//FIXME continue;
+//FIXME } else if (RegionKind < 0) {
+//FIXME // Filter - the result of a filter selection will be a negative index if
+//FIXME // there is a match.
+//FIXME Value *Select = Builder.CreateLoad(ExceptionSelectorValue);
+//FIXME
+//FIXME // Compare with the filter action value.
+//FIXME Value *Zero = ConstantInt::get(Select->getType(), 0);
+//FIXME Value *Compare = Builder.CreateICmpSLT(Select, Zero);
+//FIXME
+//FIXME // Branch on the compare.
+//FIXME BasicBlock *NoFilterBB = BasicBlock::Create(Context, "nofilter");
+//FIXME Builder.CreateCondBr(Compare, Dest, NoFilterBB);
+//FIXME EmitBlock(NoFilterBB);
+//FIXME } else if (RegionKind > 0) {
+//FIXME // Catch
+//FIXME tree TypeList = get_eh_type_list(region);
+//FIXME
+//FIXME Value *Cond = NULL;
+//FIXME for (; TypeList; TypeList = TREE_CHAIN (TypeList)) {
+//FIXME Value *TType = Emit(lookup_type_for_runtime(TREE_VALUE(TypeList)), 0);
+//FIXME TType = Builder.CreateBitCast(TType,
+//FIXME Type::getInt8PtrTy(Context));
+//FIXME
+//FIXME // Call get eh type id.
+//FIXME Value *TypeID = Builder.CreateCall(FuncEHGetTypeID, TType, "eh_typeid");
+//FIXME Value *Select = Builder.CreateLoad(ExceptionSelectorValue);
+//FIXME
+//FIXME // Compare with the exception selector.
+//FIXME Value *Compare = Builder.CreateICmpEQ(Select, TypeID);
+//FIXME
+//FIXME Cond = Cond ? Builder.CreateOr(Cond, Compare) : Compare;
+//FIXME }
+//FIXME
+//FIXME BasicBlock *NoCatchBB = NULL;
+//FIXME
+//FIXME // If the comparion fails, branch to the next catch that has a
+//FIXME // post landing pad.
+//FIXME eh_region next_catch = get_eh_next_catch(region);
+//FIXME for (; next_catch; next_catch = get_eh_next_catch(next_catch)) {
+//FIXME unsigned CatchNo = get_eh_region_number(next_catch);
+//FIXME
+//FIXME if (CatchNo < PostPads.size())
+//FIXME NoCatchBB = PostPads[CatchNo];
+//FIXME
+//FIXME if (NoCatchBB)
+//FIXME break;
+//FIXME }
+//FIXME
+//FIXME if (NoCatchBB) {
+//FIXME // Branch on the compare.
+//FIXME Builder.CreateCondBr(Cond, Dest, NoCatchBB);
+//FIXME continue;
+//FIXME }
+//FIXME
+//FIXME // If there is no such catch, execute a RESX if the comparison fails.
+//FIXME NoCatchBB = BasicBlock::Create(Context, "nocatch");
+//FIXME // Branch on the compare.
+//FIXME Builder.CreateCondBr(Cond, Dest, NoCatchBB);
+//FIXME EmitBlock(NoCatchBB);
+//FIXME }
+//FIXME
+//FIXME // Emit a RESX_EXPR which skips handlers with no post landing pad.
+//FIXME foreach_reachable_handler(i, true, false, AddHandler, &Handlers);
+//FIXME
+//FIXME BasicBlock *TargetBB = NULL;
+//FIXME
+//FIXME for (std::vector<eh_region>::iterator I = Handlers.begin(),
+//FIXME E = Handlers.end(); I != E; ++I) {
+//FIXME unsigned UnwindNo = get_eh_region_number(*I);
+//FIXME
+//FIXME if (UnwindNo < PostPads.size())
+//FIXME TargetBB = PostPads[UnwindNo];
+//FIXME
+//FIXME if (TargetBB)
+//FIXME break;
+//FIXME }
+//FIXME
+//FIXME if (TargetBB) {
+//FIXME Builder.CreateBr(TargetBB);
+//FIXME } else {
+//FIXME assert(can_throw_external_1(i, true, false) &&
+//FIXME "Must-not-throw region handled by runtime?");
+//FIXME // Unwinding continues in the caller.
+//FIXME if (!UnwindBB)
+//FIXME UnwindBB = BasicBlock::Create(Context, "Unwind");
+//FIXME Builder.CreateBr(UnwindBB);
+//FIXME }
+
+ Handlers.clear();
+ }
+}
+
+/// EmitUnwindBlock - Emit the lazily created EH unwind block.
+void TreeToLLVM::EmitUnwindBlock() {
+ if (UnwindBB) {
+ CreateExceptionValues();
+ EmitBlock(UnwindBB);
+abort();//FIXME
+//FIXME // Fetch and store exception handler.
+//FIXME Value *Arg = Builder.CreateLoad(ExceptionValue, "eh_ptr");
+//FIXME assert(llvm_unwind_resume_libfunc && "no unwind resume function!");
+//FIXME
+//FIXME // As we're emitting a naked call (not an expression) going through
+//FIXME // EmitCallOf would be wasteful and incorrect. Manually adjust
+//FIXME // the calling convention for this call here if necessary.
+//FIXME#ifdef TARGET_ADJUST_LLVM_CC
+//FIXME tree fntype = TREE_TYPE(llvm_unwind_resume_libfunc);
+//FIXME CallingConv::ID CallingConvention = CallingConv::C;
+//FIXME
+//FIXME TARGET_ADJUST_LLVM_CC(CallingConvention, fntype);
+//FIXME CallInst *Call = Builder.CreateCall(DECL_LOCAL(llvm_unwind_resume_libfunc),
+//FIXME Arg);
+//FIXME Call->setCallingConv(CallingConvention);
+//FIXME#else
+//FIXME Builder.CreateCall(DECL_LOCAL(llvm_unwind_resume_libfunc), Arg);
+//FIXME#endif
+ Builder.CreateUnreachable();
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// ... Expressions ...
+//===----------------------------------------------------------------------===//
+
+static bool canEmitRegisterVariable(tree exp) {
+ // Only variables can be marked as 'register'.
+ if (TREE_CODE(exp) != VAR_DECL || !DECL_REGISTER(exp))
+ return false;
+
+ // We can emit inline assembler for access to global register variables.
+ if (TREE_STATIC(exp) || DECL_EXTERNAL(exp) || TREE_PUBLIC(exp))
+ return true;
+
+ // Emit inline asm if this is local variable with assembler name on it.
+ if (DECL_ASSEMBLER_NAME_SET_P(exp))
+ return true;
+
+ // Otherwise - it's normal automatic variable.
+ return false;
+}
+
+/// EmitSSA_NAME - Return the defining value of the given SSA_NAME.
+/// Only creates code in the entry block.
+Value *TreeToLLVM::EmitSSA_NAME(tree reg) {
+ assert(TREE_CODE(reg) == SSA_NAME && "Expected an SSA name!");
+ assert(is_gimple_reg_type(TREE_TYPE(reg)) && "Not of register type!");
+
+ DenseMap<tree, AssertingVH<> >::iterator I = SSANames.find(reg);
+ if (I != SSANames.end())
+ return I->second;
+
+ // This SSA name is the default definition for the underlying symbol.
+ assert(SSA_NAME_IS_DEFAULT_DEF(reg) && "SSA name used before being defined!");
+
+ // The underlying symbol is an SSA variable.
+ tree var = SSA_NAME_VAR(reg);
+ assert(SSA_VAR_P(var) && "Not an SSA variable!");
+
+ // If the variable is itself an ssa name, use its LLVM value.
+ if (TREE_CODE (var) == SSA_NAME) {
+ Value *Val = EmitSSA_NAME(var);
+ return SSANames[reg] = Val;
+ }
+
+ // Otherwise the symbol is a VAR_DECL, PARM_DECL or RESULT_DECL. Since a
+ // default definition is only created if the very first reference to the
+ // variable in the function is a read operation, and refers to the value
+ // read, it has an undefined value except for PARM_DECLs.
+ if (TREE_CODE(var) != PARM_DECL)
+ return UndefValue::get(ConvertType(TREE_TYPE(reg)));
+
+ // Read the initial value of the parameter and associate it with the ssa name.
+ assert(DECL_LOCAL_IF_SET(var) && "Parameter not laid out?");
+
+ unsigned Alignment = DECL_ALIGN(var);
+ assert(Alignment != 0 && "Parameter with unknown alignment!");
+
+ const Type *Ty = ConvertType(TREE_TYPE(reg));
+
+ // Perform the load in the entry block, after all parameters have been set up
+ // with their initial values, and before any modifications to their values.
+ LoadInst *LI = new LoadInst(DECL_LOCAL_IF_SET(var), "", SSAInsertionPoint);
+ LI->setAlignment(Alignment);
+
+ // Potentially perform a useless type conversion (useless_type_conversion_p).
+ Value *Def = LI;
+ if (LI->getType() != Ty)
+ Def = new BitCastInst(Def, Ty, "", SSAInsertionPoint);
+ if (flag_verbose_asm)
+ NameValue(Def, reg);
+ return SSANames[reg] = Def;
+}
+
+/// EmitGimpleInvariantAddress - The given address is constant in this function.
+/// Return the corresponding LLVM value. Only creates code in the entry block.
+Value *TreeToLLVM::EmitGimpleInvariantAddress(tree reg) {
+ assert(is_gimple_invariant_address(reg) &&
+ "Expected a locally constant address!");
+ assert(is_gimple_reg_type(TREE_TYPE(reg)) && "Not of register type!");
+
+ // Any generated code goes in the entry block.
+ BasicBlock *EntryBlock = SSAInsertionPoint->getParent();
+
+ // Note the current builder position.
+ BasicBlock *SavedInsertBB = Builder.GetInsertBlock();
+ BasicBlock::iterator SavedInsertPoint = Builder.GetInsertPoint();
+
+ // Pop the entry block terminator. There may not be a terminator if we are
+ // recursing or if the entry block was not yet finished.
+ Instruction *Terminator = EntryBlock->getTerminator();
+ assert(((SavedInsertBB != EntryBlock && Terminator) ||
+ (SavedInsertPoint == EntryBlock->end() && !Terminator)) &&
+ "Insertion point doesn't make sense!");
+ if (Terminator)
+ Terminator->removeFromParent();
+
+ // Point the builder at the end of the entry block.
+ Builder.SetInsertPoint(EntryBlock);
+
+ // Calculate the address.
+ Value *Address = Emit(reg, 0);
+
+ // Restore the entry block terminator.
+ if (Terminator)
+ EntryBlock->getInstList().push_back(Terminator);
+
+ // Restore the builder insertion point.
+ if (SavedInsertBB != EntryBlock)
+ Builder.SetInsertPoint(SavedInsertBB, SavedInsertPoint);
+
+ return Address;
+}
+
+/// EmitGimpleConstant - Return the LLVM constant for this global constant.
+Constant *TreeToLLVM::EmitGimpleConstant(tree reg) {
+ assert(is_gimple_constant(reg) && "Not a gimple constant!");
+ assert(is_gimple_reg_type(TREE_TYPE(reg)) && "Not of register type!");
+ switch (TREE_CODE(reg)) {
+ default:
+ debug_tree(reg);
+ llvm_unreachable("Unhandled GIMPLE constant!");
+
+ case INTEGER_CST:
+ return TreeConstantToLLVM::ConvertINTEGER_CST(reg);
+ case REAL_CST:
+ return TreeConstantToLLVM::ConvertREAL_CST(reg);
+ case COMPLEX_CST:
+ return TreeConstantToLLVM::ConvertCOMPLEX_CST(reg);
+ case VECTOR_CST:
+ return TreeConstantToLLVM::ConvertVECTOR_CST(reg);
+ case CONSTRUCTOR:
+ return TreeConstantToLLVM::ConvertCONSTRUCTOR(reg);
+ }
+}
+
+Value *TreeToLLVM::EmitGimpleAssignRHS(gimple stmt, const MemRef *DestLoc) {
+ if (get_gimple_rhs_class(gimple_expr_code(stmt)) == GIMPLE_SINGLE_RHS)
+ return Emit(gimple_assign_rhs1 (stmt), DestLoc);
+
+ tree type = TREE_TYPE(gimple_assign_lhs(stmt));
+ tree_code code = gimple_assign_rhs_code(stmt);
+ tree rhs1 = gimple_assign_rhs1(stmt);
+ tree rhs2 = gimple_assign_rhs2(stmt);
+
+ switch (code) {
+ default:
+ dump(stmt);
+ llvm_unreachable("Unhandled GIMPLE assignment!");
+
+ // Unary expressions.
+ case ABS_EXPR:
+ return EmitABS_EXPR(rhs1);
+ case BIT_NOT_EXPR:
+ return EmitBIT_NOT_EXPR(rhs1);
+ case CONJ_EXPR:
+ return EmitCONJ_EXPR(rhs1);
+ case CONVERT_EXPR:
+ case FIX_TRUNC_EXPR:
+ case FLOAT_EXPR:
+ return EmitCONVERT_EXPR(type, rhs1);
+ case NEGATE_EXPR:
+ return EmitNEGATE_EXPR(rhs1);
+ case NON_LVALUE_EXPR:
+ return Emit(rhs1, DestLoc);
+ case NOP_EXPR:
+ return EmitNOP_EXPR(type, rhs1, DestLoc);
+ case PAREN_EXPR:
+ return EmitPAREN_EXPR(rhs1);
+ case TRUTH_NOT_EXPR:
+ return EmitTRUTH_NOT_EXPR(type, rhs1);
+
+ // Comparisons.
+ case EQ_EXPR:
+ case GE_EXPR:
+ case GT_EXPR:
+ case LE_EXPR:
+ case LT_EXPR:
+ case LTGT_EXPR:
+ case NE_EXPR:
+ case ORDERED_EXPR:
+ case UNEQ_EXPR:
+ case UNGE_EXPR:
+ case UNGT_EXPR:
+ case UNLE_EXPR:
+ case UNLT_EXPR:
+ case UNORDERED_EXPR:
+ // The GCC result may be of any integer type.
+ return Builder.CreateZExt(EmitCompare(rhs1, rhs2, code), ConvertType(type));
+
+ // Binary expressions.
+ case BIT_AND_EXPR:
+ return EmitBinOp(type, code, rhs1, rhs2, Instruction::And);
+ case BIT_IOR_EXPR:
+ return EmitBinOp(type, code, rhs1, rhs2, Instruction::Or);
+ case BIT_XOR_EXPR:
+ return EmitBinOp(type, code, rhs1, rhs2, Instruction::Xor);
+ case CEIL_DIV_EXPR:
+ return EmitCEIL_DIV_EXPR(type, rhs1, rhs2);
+ case COMPLEX_EXPR:
+ return EmitCOMPLEX_EXPR(rhs1, rhs2);
+ case EXACT_DIV_EXPR:
+ return EmitBinOp(type, code, rhs1, rhs2, TYPE_UNSIGNED(type) ?
+ Instruction::UDiv : Instruction::SDiv);
+ case FLOOR_DIV_EXPR:
+ return EmitFLOOR_DIV_EXPR(type, rhs1, rhs2);
+ case FLOOR_MOD_EXPR:
+ return EmitFLOOR_MOD_EXPR(type, rhs1, rhs2);
+ case LROTATE_EXPR:
+ return EmitRotateOp(type, rhs1, rhs2, Instruction::Shl, Instruction::LShr);
+ case LSHIFT_EXPR:
+ return EmitShiftOp(rhs1, rhs2, Instruction::Shl);
+ case MAX_EXPR:
+ return EmitMinMaxExpr(type, rhs1, rhs2, ICmpInst::ICMP_UGE,
+ ICmpInst::ICMP_SGE, FCmpInst::FCMP_OGE, true);
+ case MIN_EXPR:
+ return EmitMinMaxExpr(type, rhs1, rhs2, ICmpInst::ICMP_ULE,
+ ICmpInst::ICMP_SLE, FCmpInst::FCMP_OLE, false);
+ case MINUS_EXPR:
+ return EmitBinOp(type, code, rhs1, rhs2, FLOAT_TYPE_P(type) ?
+ Instruction::FSub : Instruction::Sub);
+ case MULT_EXPR:
+ return EmitBinOp(type, code, rhs1, rhs2, FLOAT_TYPE_P(type) ?
+ Instruction::FMul : Instruction::Mul);
+ case PLUS_EXPR:
+ return EmitBinOp(type, code, rhs1, rhs2, FLOAT_TYPE_P(type) ?
+ Instruction::FAdd : Instruction::Add);
+ case POINTER_PLUS_EXPR:
+ return EmitPOINTER_PLUS_EXPR(type, rhs1, rhs2);
+ case RDIV_EXPR:
+ return EmitBinOp(type, code, rhs1, rhs2, Instruction::FDiv);
+ case ROUND_DIV_EXPR:
+ return EmitROUND_DIV_EXPR(type, rhs1, rhs2);
+ case RROTATE_EXPR:
+ return EmitRotateOp(type, rhs1, rhs2, Instruction::LShr, Instruction::Shl);
+ case RSHIFT_EXPR:
+ return EmitShiftOp(rhs1, rhs2, TYPE_UNSIGNED(type) ?
+ Instruction::LShr : Instruction::AShr);
+ case TRUNC_DIV_EXPR:
+ return EmitBinOp(type, code, rhs1, rhs2, TYPE_UNSIGNED(type) ?
+ Instruction::UDiv : Instruction::SDiv);
+ case TRUNC_MOD_EXPR:
+ return EmitBinOp(type, code, rhs1, rhs2, TYPE_UNSIGNED(type) ?
+ Instruction::URem : Instruction::SRem);
+ case TRUTH_AND_EXPR:
+ return EmitTruthOp(type, rhs1, rhs2, Instruction::And);
+ case TRUTH_OR_EXPR:
+ return EmitTruthOp(type, rhs1, rhs2, Instruction::Or);
+ case TRUTH_XOR_EXPR:
+ return EmitTruthOp(type, rhs1, rhs2, Instruction::Xor);
+ }
+}
+
+/// EmitLoadOfLValue - When an l-value expression is used in a context that
+/// requires an r-value, this method emits the lvalue computation, then loads
+/// the result.
+Value *TreeToLLVM::EmitLoadOfLValue(tree exp, const MemRef *DestLoc) {
+ if (canEmitRegisterVariable(exp))
+ // If this is a register variable, EmitLV can't handle it (there is no
+ // l-value of a register variable). Emit an inline asm node that copies the
+ // value out of the specified register.
+ return EmitReadOfRegisterVariable(exp, DestLoc);
+
+ LValue LV = EmitLV(exp);
+ bool isVolatile = TREE_THIS_VOLATILE(exp);
+ const Type *Ty = ConvertType(TREE_TYPE(exp));
+ unsigned Alignment = LV.getAlignment();
+ if (TREE_CODE(exp) == COMPONENT_REF)
+ if (const StructType *STy =
+ dyn_cast<StructType>(ConvertType(TREE_TYPE(TREE_OPERAND(exp, 0)))))
+ if (STy->isPacked())
+ // Packed struct members use 1 byte alignment
+ Alignment = 1;
+
+
+ if (!LV.isBitfield()) {
+ if (!DestLoc) {
+ // Scalar value: emit a load.
+ Value *Ptr = Builder.CreateBitCast(LV.Ptr, Ty->getPointerTo());
+ LoadInst *LI = Builder.CreateLoad(Ptr, isVolatile);
+ LI->setAlignment(Alignment);
+ return LI;
+ } else {
+ EmitAggregateCopy(*DestLoc, MemRef(LV.Ptr, Alignment, isVolatile),
+ TREE_TYPE(exp));
+ return 0;
+ }
+ } else {
+ // This is a bitfield reference.
+ if (!LV.BitSize)
+ return Constant::getNullValue(Ty);
+
+ const Type *ValTy = cast<PointerType>(LV.Ptr->getType())->getElementType();
+ unsigned ValSizeInBits = ValTy->getPrimitiveSizeInBits();
+
+ // The number of loads needed to read the entire bitfield.
+ unsigned Strides = 1 + (LV.BitStart + LV.BitSize - 1) / ValSizeInBits;
+
+ assert(ValTy->isInteger() && "Invalid bitfield lvalue!");
+ assert(ValSizeInBits > LV.BitStart && "Bad bitfield lvalue!");
+ assert(ValSizeInBits >= LV.BitSize && "Bad bitfield lvalue!");
+ assert(2*ValSizeInBits > LV.BitSize+LV.BitStart && "Bad bitfield lvalue!");
+
+ Value *Result = NULL;
+
+ for (unsigned I = 0; I < Strides; I++) {
+ unsigned Index = BYTES_BIG_ENDIAN ? I : Strides - I - 1; // MSB first
+ unsigned ThisFirstBit = Index * ValSizeInBits;
+ unsigned ThisLastBitPlusOne = ThisFirstBit + ValSizeInBits;
+ if (ThisFirstBit < LV.BitStart)
+ ThisFirstBit = LV.BitStart;
+ if (ThisLastBitPlusOne > LV.BitStart+LV.BitSize)
+ ThisLastBitPlusOne = LV.BitStart+LV.BitSize;
+
+ Value *Ptr = Index ?
+ Builder.CreateGEP(LV.Ptr,
+ ConstantInt::get(Type::getInt32Ty(Context), Index)) :
+ LV.Ptr;
+ LoadInst *LI = Builder.CreateLoad(Ptr, isVolatile);
+ LI->setAlignment(Alignment);
+ Value *Val = LI;
+
+ unsigned BitsInVal = ThisLastBitPlusOne - ThisFirstBit;
+ unsigned FirstBitInVal = ThisFirstBit % ValSizeInBits;
+
+ if (BYTES_BIG_ENDIAN)
+ FirstBitInVal = ValSizeInBits-FirstBitInVal-BitsInVal;
+
+ // Mask the bits out by shifting left first, then shifting right. The
+ // LLVM optimizer will turn this into an AND if this is an unsigned
+ // expression.
+
+ if (FirstBitInVal+BitsInVal != ValSizeInBits) {
+ Value *ShAmt = ConstantInt::get(ValTy, ValSizeInBits -
+ (FirstBitInVal+BitsInVal));
+ Val = Builder.CreateShl(Val, ShAmt);
+ }
+
+ // Shift right required?
+ if (ValSizeInBits != BitsInVal) {
+ bool AddSignBits = !TYPE_UNSIGNED(TREE_TYPE(exp)) && !Result;
+ Value *ShAmt = ConstantInt::get(ValTy, ValSizeInBits-BitsInVal);
+ Val = AddSignBits ?
+ Builder.CreateAShr(Val, ShAmt) : Builder.CreateLShr(Val, ShAmt);
+ }
+
+ if (Result) {
+ Value *ShAmt = ConstantInt::get(ValTy, BitsInVal);
+ Result = Builder.CreateShl(Result, ShAmt);
+ Result = Builder.CreateOr(Result, Val);
+ } else {
+ Result = Val;
+ }
+ }
+
+ if (TYPE_UNSIGNED(TREE_TYPE(exp)))
+ return CastToUIntType(Result, Ty);
+ else
+ return CastToSIntType(Result, Ty);
+ }
+}
+
+Value *TreeToLLVM::EmitADDR_EXPR(tree exp) {
+ LValue LV = EmitLV(TREE_OPERAND(exp, 0));
+ assert((!LV.isBitfield() || LV.BitStart == 0) &&
+ "It is illegal to take the address of a bitfield");
+ // Perform a cast here if necessary. For example, GCC sometimes forms an
+ // ADDR_EXPR where the operand is an array, and the ADDR_EXPR type is a
+ // pointer to the first element.
+ return Builder.CreateBitCast(LV.Ptr, ConvertType(TREE_TYPE(exp)));
+}
+
+Value *TreeToLLVM::EmitOBJ_TYPE_REF(tree exp) {
+ return Builder.CreateBitCast(Emit(OBJ_TYPE_REF_EXPR(exp), 0),
+ ConvertType(TREE_TYPE(exp)));
+}
+
+Value *TreeToLLVM::EmitGimpleCallRHS(gimple stmt, const MemRef *DestLoc) {
+ // Check for a built-in function call. If we can lower it directly, do so
+ // now.
+ tree fndecl = gimple_call_fndecl(stmt);
+ if (fndecl && DECL_BUILT_IN(fndecl) &&
+ DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_FRONTEND) {
+ Value *Res = 0;
+ if (EmitBuiltinCall(stmt, fndecl, DestLoc, Res))
+ return Res;
+ }
+
+ tree call_expr = gimple_call_fn(stmt);
+ assert(TREE_TYPE (call_expr) &&
+ (TREE_CODE(TREE_TYPE (call_expr)) == POINTER_TYPE ||
+ TREE_CODE(TREE_TYPE (call_expr)) == REFERENCE_TYPE)
+ && "Not calling a function pointer?");
+
+ tree function_type = TREE_TYPE(TREE_TYPE (call_expr));
+ Value *Callee = Emit(call_expr, 0);
+ CallingConv::ID CallingConv;
+ AttrListPtr PAL;
+
+ const Type *Ty =
+ TheTypeConverter->ConvertFunctionType(function_type,
+ fndecl,
+ gimple_call_chain(stmt),
+ CallingConv, PAL);
+
+ // If this is a direct call to a function using a static chain then we need
+ // to ensure the function type is the one just calculated: it has an extra
+ // parameter for the chain.
+ Callee = Builder.CreateBitCast(Callee, Ty->getPointerTo());
+
+ Value *Result = EmitCallOf(Callee, stmt, DestLoc, PAL);
+
+ // When calling a "noreturn" function output an unreachable instruction right
+ // after the function to prevent LLVM from thinking that control flow will
+ // fall into the subsequent block.
+ if (gimple_call_flags(stmt) & ECF_NORETURN) {
+ Builder.CreateUnreachable();
+ EmitBlock(BasicBlock::Create(Context));
+ }
+ return Result;
+}
+
+/// llvm_load_scalar_argument - Load value located at LOC.
+static Value *llvm_load_scalar_argument(Value *L,
+ const llvm::Type *LLVMTy,
+ unsigned RealSize,
+ LLVMBuilder &Builder) {
+ if (!RealSize)
+ return UndefValue::get(LLVMTy);
+
+ // Not clear what this is supposed to do on big endian machines...
+ assert(!BYTES_BIG_ENDIAN && "Unsupported case - please report");
+ assert(isa<IntegerType>(LLVMTy) && "Expected an integer value!");
+ const Type *LoadType = IntegerType::get(Context, RealSize * 8);
+ L = Builder.CreateBitCast(L, LoadType->getPointerTo());
+ Value *Val = Builder.CreateLoad(L);
+ if (LoadType->getPrimitiveSizeInBits() >= LLVMTy->getPrimitiveSizeInBits())
+ Val = Builder.CreateTrunc(Val, LLVMTy);
+ else
+ Val = Builder.CreateZExt(Val, LLVMTy);
+ return Val;
+}
+
+#ifndef LLVM_LOAD_SCALAR_ARGUMENT
+#define LLVM_LOAD_SCALAR_ARGUMENT(LOC,TY,SIZE,BUILDER) \
+ llvm_load_scalar_argument((LOC),(TY),(SIZE),(BUILDER))
+#endif
+
+namespace {
+ /// FunctionCallArgumentConversion - This helper class is driven by the ABI
+ /// definition for this target to figure out how to pass arguments into the
+ /// stack/regs for a function call.
+ struct FunctionCallArgumentConversion : public DefaultABIClient {
+ SmallVector<Value*, 16> &CallOperands;
+ SmallVector<Value*, 2> LocStack;
+ const FunctionType *FTy;
+ const MemRef *DestLoc;
+ bool useReturnSlot;
+ LLVMBuilder &Builder;
+ Value *TheValue;
+ MemRef RetBuf;
+ CallingConv::ID &CallingConv;
+ bool isShadowRet;
+ bool isAggrRet;
+ unsigned Offset;
+
+ FunctionCallArgumentConversion(SmallVector<Value*, 16> &ops,
+ const FunctionType *FnTy,
+ const MemRef *destloc,
+ bool ReturnSlotOpt,
+ LLVMBuilder &b,
+ CallingConv::ID &CC)
+ : CallOperands(ops), FTy(FnTy), DestLoc(destloc),
+ useReturnSlot(ReturnSlotOpt), Builder(b), CallingConv(CC),
+ isShadowRet(false), isAggrRet(false), Offset(0) { }
+
+ /// getCallingConv - This provides the desired CallingConv for the function.
+ CallingConv::ID& getCallingConv(void) { return CallingConv; }
+
+ // Push the address of an argument.
+ void pushAddress(Value *Loc) {
+ assert(Loc && "Invalid location!");
+ LocStack.push_back(Loc);
+ }
+
+ // Push the value of an argument.
+ void pushValue(Value *V) {
+ assert(LocStack.empty() && "Value only allowed at top level!");
+ LocStack.push_back(NULL);
+ TheValue = V;
+ }
+
+ // Get the address of the current location.
+ Value *getAddress(void) {
+ assert(!LocStack.empty());
+ Value *&Loc = LocStack.back();
+ if (!Loc) {
+ // A value. Store to a temporary, and return the temporary's address.
+ // Any future access to this argument will reuse the same address.
+ Loc = TheTreeToLLVM->CreateTemporary(TheValue->getType());
+ Builder.CreateStore(TheValue, Loc);
+ }
+ return Loc;
+ }
+
+ // Get the value of the current location (of type Ty).
+ Value *getValue(const Type *Ty) {
+ assert(!LocStack.empty());
+ Value *Loc = LocStack.back();
+ if (Loc) {
+ // An address. Convert to the right type and load the value out.
+ Loc = Builder.CreateBitCast(Loc, Ty->getPointerTo());
+ return Builder.CreateLoad(Loc, "val");
+ } else {
+ // A value - just return it.
+ assert(TheValue->getType() == Ty && "Value not of expected type!");
+ return TheValue;
+ }
+ }
+
+ void clear() {
+ assert(LocStack.size() == 1 && "Imbalance!");
+ LocStack.clear();
+ }
+
+ bool isShadowReturn() { return isShadowRet; }
+ bool isAggrReturn() { return isAggrRet; }
+
+ // EmitShadowResult - If the return result was redirected to a buffer,
+ // emit it now.
+ Value *EmitShadowResult(tree type, const MemRef *DestLoc) {
+ if (!RetBuf.Ptr)
+ return 0;
+
+ if (DestLoc) {
+ // Copy out the aggregate return value now.
+ assert(ConvertType(type) ==
+ cast<PointerType>(RetBuf.Ptr->getType())->getElementType() &&
+ "Inconsistent result types!");
+ TheTreeToLLVM->EmitAggregateCopy(*DestLoc, RetBuf, type);
+ return 0;
+ } else {
+ // Read out the scalar return value now.
+ return Builder.CreateLoad(RetBuf.Ptr, "result");
+ }
+ }
+
+ /// HandleScalarResult - This callback is invoked if the function returns a
+ /// simple scalar result value.
+ void HandleScalarResult(const Type *RetTy) {
+ // There is nothing to do here if we return a scalar or void.
+ assert(DestLoc == 0 &&
+ "Call returns a scalar but caller expects aggregate!");
+ }
+
+ /// HandleAggregateResultAsScalar - This callback is invoked if the function
+ /// returns an aggregate value by bit converting it to the specified scalar
+ /// type and returning that.
+ void HandleAggregateResultAsScalar(const Type *ScalarTy,
+ unsigned Offset = 0) {
+ this->Offset = Offset;
+ }
+
+ /// HandleAggregateResultAsAggregate - This callback is invoked if the
+ /// function returns an aggregate value using multiple return values.
+ void HandleAggregateResultAsAggregate(const Type *AggrTy) {
+ // There is nothing to do here.
+ isAggrRet = true;
+ }
+
+ /// HandleAggregateShadowResult - This callback is invoked if the function
+ /// returns an aggregate value by using a "shadow" first parameter. If
+ /// RetPtr is set to true, the pointer argument itself is returned from the
+ /// function.
+ void HandleAggregateShadowResult(const PointerType *PtrArgTy,
+ bool RetPtr) {
+ // We need to pass memory to write the return value into.
+ // FIXME: alignment and volatility are being ignored!
+ assert(!DestLoc || PtrArgTy == DestLoc->Ptr->getType());
+
+ if (DestLoc == 0) {
+ // The result is unused, but still needs to be stored somewhere.
+ Value *Buf = TheTreeToLLVM->CreateTemporary(PtrArgTy->getElementType());
+ CallOperands.push_back(Buf);
+ } else if (useReturnSlot) {
+ // Letting the call write directly to the final destination is safe and
+ // may be required. Do not use a buffer.
+ CallOperands.push_back(DestLoc->Ptr);
+ } else {
+ // Letting the call write directly to the final destination may not be
+ // safe (eg: if DestLoc aliases a parameter) and is not required - pass
+ // a buffer and copy it to DestLoc after the call.
+ RetBuf = TheTreeToLLVM->CreateTempLoc(PtrArgTy->getElementType());
+ CallOperands.push_back(RetBuf.Ptr);
+ }
+
+ // Note the use of a shadow argument.
+ isShadowRet = true;
+ }
+
+ /// HandleScalarShadowResult - This callback is invoked if the function
+ /// returns a scalar value by using a "shadow" first parameter, which is a
+ /// pointer to the scalar, of type PtrArgTy. If RetPtr is set to true,
+ /// the pointer argument itself is returned from the function.
+ void HandleScalarShadowResult(const PointerType *PtrArgTy, bool RetPtr) {
+ assert(DestLoc == 0 &&
+ "Call returns a scalar but caller expects aggregate!");
+ // Create a buffer to hold the result. The result will be loaded out of
+ // it after the call.
+ RetBuf = TheTreeToLLVM->CreateTempLoc(PtrArgTy->getElementType());
+ CallOperands.push_back(RetBuf.Ptr);
+
+ // Note the use of a shadow argument.
+ isShadowRet = true;
+ }
+
+ /// HandleScalarArgument - This is the primary callback that specifies an
+ /// LLVM argument to pass. It is only used for first class types.
+ void HandleScalarArgument(const llvm::Type *LLVMTy, tree type,
+ unsigned RealSize = 0) {
+ Value *Loc = NULL;
+ if (RealSize) {
+ Value *L = getAddress();
+ Loc = LLVM_LOAD_SCALAR_ARGUMENT(L,LLVMTy,RealSize,Builder);
+ } else
+ Loc = getValue(LLVMTy);
+
+ // Perform any implicit type conversions.
+ if (CallOperands.size() < FTy->getNumParams()) {
+ const Type *CalledTy= FTy->getParamType(CallOperands.size());
+ if (Loc->getType() != CalledTy) {
+ assert(type && "Inconsistent parameter types?");
+ bool isSigned = !TYPE_UNSIGNED(type);
+ Loc = TheTreeToLLVM->CastToAnyType(Loc, isSigned, CalledTy, false);
+ }
+ }
+
+ CallOperands.push_back(Loc);
+ }
+
+ /// HandleByInvisibleReferenceArgument - This callback is invoked if a
+ /// pointer (of type PtrTy) to the argument is passed rather than the
+ /// argument itself.
+ void HandleByInvisibleReferenceArgument(const llvm::Type *PtrTy, tree type){
+ Value *Loc = getAddress();
+ Loc = Builder.CreateBitCast(Loc, PtrTy);
+ CallOperands.push_back(Loc);
+ }
+
+ /// HandleByValArgument - This callback is invoked if the aggregate function
+ /// argument is passed by value. It is lowered to a parameter passed by
+ /// reference with an additional parameter attribute "ByVal".
+ void HandleByValArgument(const llvm::Type *LLVMTy, tree type) {
+ Value *Loc = getAddress();
+ assert(LLVMTy->getPointerTo() == Loc->getType());
+ CallOperands.push_back(Loc);
+ }
+
+ /// HandleFCAArgument - This callback is invoked if the aggregate function
+ /// argument is passed as a first class aggregate.
+ void HandleFCAArgument(const llvm::Type *LLVMTy, tree type) {
+ Value *Loc = getAddress();
+ assert(LLVMTy->getPointerTo() == Loc->getType());
+ CallOperands.push_back(Builder.CreateLoad(Loc));
+ }
+
+ /// EnterField - Called when we're about the enter the field of a struct
+ /// or union. FieldNo is the number of the element we are entering in the
+ /// LLVM Struct, StructTy is the LLVM type of the struct we are entering.
+ void EnterField(unsigned FieldNo, const llvm::Type *StructTy) {
+ Value *Loc = getAddress();
+ Loc = Builder.CreateBitCast(Loc, StructTy->getPointerTo());
+ pushAddress(Builder.CreateStructGEP(Loc, FieldNo, "elt"));
+ }
+ void ExitField() {
+ assert(!LocStack.empty());
+ LocStack.pop_back();
+ }
+ };
+}
+
+/// EmitCallOf - Emit a call to the specified callee with the operands specified
+/// in the GIMPLE_CALL 'stmt'. If the result of the call is a scalar, return the
+/// result, otherwise store it in DestLoc.
+Value *TreeToLLVM::EmitCallOf(Value *Callee, gimple stmt, const MemRef *DestLoc,
+ const AttrListPtr &InPAL) {
+ BasicBlock *LandingPad = 0; // Non-zero indicates an invoke.
+
+ AttrListPtr PAL = InPAL;
+ if (PAL.isEmpty() && isa<Function>(Callee))
+ PAL = cast<Function>(Callee)->getAttributes();
+
+ // Work out whether to use an invoke or an ordinary call.
+ if (!stmt_could_throw_p(stmt))
+ // This call does not throw - mark it 'nounwind'.
+ PAL = PAL.addAttr(~0, Attribute::NoUnwind);
+
+ if (!PAL.paramHasAttr(~0, Attribute::NoUnwind)) {
+ // This call may throw. Determine if we need to generate
+ // an invoke rather than a simple call.
+//FIXME int RegionNo = lookup_stmt_eh_region(stmt);
+//FIXME
+//FIXME // Is the call contained in an exception handling region?
+//FIXME if (RegionNo > 0) {
+//FIXME // Are there any exception handlers for this region?
+//FIXME if (can_throw_internal_1(RegionNo, false, false)) {
+//FIXME // There are - turn the call into an invoke.
+//FIXME LandingPads.grow(RegionNo);
+//FIXME BasicBlock *&ThisPad = LandingPads[RegionNo];
+//FIXME
+//FIXME // Create a landing pad if one didn't exist already.
+//FIXME if (!ThisPad)
+//FIXME ThisPad = BasicBlock::Create(Context, "lpad");
+//FIXME
+//FIXME LandingPad = ThisPad;
+//FIXME } else {
+//FIXME assert(can_throw_external_1(RegionNo, false, false) &&
+//FIXME "Must-not-throw region handled by runtime?");
+//FIXME }
+//FIXME }
+ }
+
+ tree fndecl = gimple_call_fndecl(stmt);
+ tree fntype = fndecl ?
+ TREE_TYPE(fndecl) : TREE_TYPE (TREE_TYPE(gimple_call_fn(stmt)));
+
+ // Determine the calling convention.
+ CallingConv::ID CallingConvention = CallingConv::C;
+#ifdef TARGET_ADJUST_LLVM_CC
+ TARGET_ADJUST_LLVM_CC(CallingConvention, fntype);
+#endif
+
+ SmallVector<Value*, 16> CallOperands;
+ const PointerType *PFTy = cast<PointerType>(Callee->getType());
+ const FunctionType *FTy = cast<FunctionType>(PFTy->getElementType());
+ FunctionCallArgumentConversion Client(CallOperands, FTy, DestLoc,
+ gimple_call_return_slot_opt_p(stmt),
+ Builder, CallingConvention);
+ TheLLVMABI<FunctionCallArgumentConversion> ABIConverter(Client);
+
+ // Handle the result, including struct returns.
+ ABIConverter.HandleReturnType(gimple_call_return_type(stmt),
+ fndecl ? fndecl : fntype,
+ fndecl ? DECL_BUILT_IN(fndecl) : false);
+
+ // Pass the static chain, if any, as the first parameter.
+ if (gimple_call_chain(stmt))
+ CallOperands.push_back(Emit(gimple_call_chain(stmt), 0));
+
+ // Loop over the arguments, expanding them and adding them to the op list.
+ std::vector<const Type*> ScalarArgs;
+ for (unsigned i = 0, e = gimple_call_num_args(stmt); i != e; ++i) {
+ tree arg = gimple_call_arg(stmt, i);
+ tree type = TREE_TYPE(arg);
+ const Type *ArgTy = ConvertType(type);
+
+ // Push the argument.
+ if (ArgTy->isSingleValueType()) {
+ // A scalar - push the value.
+ Client.pushValue(Emit(arg, 0));
+ } else if (LLVM_SHOULD_PASS_AGGREGATE_AS_FCA(type, ArgTy)) {
+ if (AGGREGATE_TYPE_P(type)) {
+ // Pass the aggregate as a first class value.
+ LValue ArgVal = EmitLV(arg);
+ Client.pushValue(Builder.CreateLoad(ArgVal.Ptr));
+ } else {
+ // Already first class (eg: a complex number) - push the value.
+ Client.pushValue(Emit(arg, 0));
+ }
+ } else {
+ if (AGGREGATE_TYPE_P(type)) {
+ // An aggregate - push the address.
+ LValue ArgVal = EmitLV(arg);
+ assert(!ArgVal.isBitfield() && "Bitfields are first-class types!");
+ Client.pushAddress(ArgVal.Ptr);
+ } else {
+ // A first class value (eg: a complex number). Push the address of a
+ // temporary copy.
+ Value *Copy = CreateTemporary(ArgTy);
+ Builder.CreateStore(Emit(arg, 0), Copy);
+ Client.pushAddress(Copy);
+ }
+ }
+
+ Attributes Attrs = Attribute::None;
+
+ unsigned OldSize = CallOperands.size();
+
+ ABIConverter.HandleArgument(type, ScalarArgs, &Attrs);
+
+ if (Attrs != Attribute::None) {
+ // If the argument is split into multiple scalars, assign the
+ // attributes to all scalars of the aggregate.
+ for (unsigned i = OldSize + 1; i <= CallOperands.size(); ++i) {
+ PAL = PAL.addAttr(i, Attrs);
+ }
+ }
+
+ Client.clear();
+ }
+
+ // Compile stuff like:
+ // %tmp = call float (...)* bitcast (float ()* @foo to float (...)*)( )
+ // to:
+ // %tmp = call float @foo( )
+ // This commonly occurs due to C "implicit ..." semantics.
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Callee)) {
+ if (CallOperands.empty() && CE->getOpcode() == Instruction::BitCast) {
+ Constant *RealCallee = CE->getOperand(0);
+ assert(isa<PointerType>(RealCallee->getType()) &&
+ "Bitcast to ptr not from ptr?");
+ const PointerType *RealPT = cast<PointerType>(RealCallee->getType());
+ if (const FunctionType *RealFT =
+ dyn_cast<FunctionType>(RealPT->getElementType())) {
+ const PointerType *ActualPT = cast<PointerType>(Callee->getType());
+ const FunctionType *ActualFT =
+ cast<FunctionType>(ActualPT->getElementType());
+ if (RealFT->getReturnType() == ActualFT->getReturnType() &&
+ RealFT->getNumParams() == 0)
+ Callee = RealCallee;
+ }
+ }
+ }
+
+ Value *Call;
+ if (!LandingPad) {
+ Call = Builder.CreateCall(Callee, CallOperands.begin(), CallOperands.end());
+ cast<CallInst>(Call)->setCallingConv(CallingConvention);
+ cast<CallInst>(Call)->setAttributes(PAL);
+ } else {
+ BasicBlock *NextBlock = BasicBlock::Create(Context);
+ Call = Builder.CreateInvoke(Callee, NextBlock, LandingPad,
+ CallOperands.begin(), CallOperands.end());
+ cast<InvokeInst>(Call)->setCallingConv(CallingConvention);
+ cast<InvokeInst>(Call)->setAttributes(PAL);
+ EmitBlock(NextBlock);
+ }
+
+ if (Client.isShadowReturn())
+ return Client.EmitShadowResult(gimple_call_return_type(stmt), DestLoc);
+
+ if (Call->getType()->isVoidTy())
+ return 0;
+
+ if (Client.isAggrReturn()) {
+ Value *Dest = Builder.CreateBitCast(DestLoc->Ptr,
+ PointerType::getUnqual(Call->getType()));
+ LLVM_EXTRACT_MULTIPLE_RETURN_VALUE(Call,Dest,DestLoc->Volatile,Builder);
+ return 0;
+ }
+
+ // If the caller expects an aggregate, we have a situation where the ABI for
+ // the current target specifies that the aggregate be returned in scalar
+ // registers even though it is an aggregate. We must bitconvert the scalar
+ // to the destination aggregate type. We do this by casting the DestLoc
+ // pointer and storing into it. The store does not necessarily start at the
+ // beginning of the aggregate (x86-64).
+ if (!DestLoc)
+ return Call; // Normal scalar return.
+
+ Value *Ptr = DestLoc->Ptr;
+ if (Client.Offset) {
+ Ptr = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context));
+ Ptr = Builder.CreateGEP(Ptr,
+ ConstantInt::get(TD.getIntPtrType(Context), Client.Offset));
+ }
+ Ptr = Builder.CreateBitCast(Ptr, PointerType::getUnqual(Call->getType()));
+ StoreInst *St = Builder.CreateStore(Call, Ptr, DestLoc->Volatile);
+ St->setAlignment(DestLoc->getAlignment());
+ return 0;
+}
+
+Value *TreeToLLVM::EmitNOP_EXPR(tree type, tree op, const MemRef *DestLoc) {
+ const Type *Ty = ConvertType(type);
+ bool OpIsSigned = !TYPE_UNSIGNED(TREE_TYPE(op));
+ bool ExpIsSigned = !TYPE_UNSIGNED(type);
+ if (DestLoc == 0) {
+ // Scalar to scalar copy.
+ assert(!AGGREGATE_TYPE_P(TREE_TYPE(op))
+ && "Aggregate to scalar nop_expr!");
+ return CastToAnyType(Emit(op, 0), OpIsSigned, Ty, ExpIsSigned);
+ } else if (AGGREGATE_TYPE_P(TREE_TYPE(op))) {
+ // Aggregate to aggregate copy.
+ MemRef NewLoc = *DestLoc;
+ NewLoc.Ptr = Builder.CreateBitCast(DestLoc->Ptr,Ty->getPointerTo());
+ Value *OpVal = Emit(op, &NewLoc);
+ (void)OpVal;
+ assert(OpVal == 0 && "Shouldn't cast scalar to aggregate!");
+ return 0;
+ }
+
+ // Scalar to aggregate copy.
+ Value *OpVal = Emit(op, 0);
+ Value *Ptr = Builder.CreateBitCast(DestLoc->Ptr,
+ PointerType::getUnqual(OpVal->getType()));
+ StoreInst *St = Builder.CreateStore(OpVal, Ptr, DestLoc->Volatile);
+ St->setAlignment(DestLoc->getAlignment());
+ return 0;
+}
+
+Value *TreeToLLVM::EmitCONVERT_EXPR(tree type, tree op) {
+ bool OpIsSigned = !TYPE_UNSIGNED(TREE_TYPE(op));
+ bool ExpIsSigned = !TYPE_UNSIGNED(type);
+ return CastToAnyType(Emit(op, 0), OpIsSigned, ConvertType(type), ExpIsSigned);
+}
+
+Value *TreeToLLVM::EmitVIEW_CONVERT_EXPR(tree exp, const MemRef *DestLoc) {
+ tree Op = TREE_OPERAND(exp, 0);
+
+ if (AGGREGATE_TYPE_P(TREE_TYPE(Op))) {
+ MemRef Target;
+ if (DestLoc)
+ // This is an aggregate-to-agg VIEW_CONVERT_EXPR, just evaluate in place.
+ Target = *DestLoc;
+ else
+ // This is an aggregate-to-scalar VIEW_CONVERT_EXPR, evaluate, then load.
+ Target = CreateTempLoc(ConvertType(TREE_TYPE(exp)));
+
+ // Make the destination look like the source type.
+ const Type *OpTy = ConvertType(TREE_TYPE(Op));
+ Target.Ptr = Builder.CreateBitCast(Target.Ptr, OpTy->getPointerTo());
+
+ // Needs to be in sync with EmitLV.
+ switch (TREE_CODE(Op)) {
+ default: {
+ Value *OpVal = Emit(Op, &Target);
+ (void)OpVal;
+ assert(OpVal == 0 && "Expected an aggregate operand!");
+ break;
+ }
+
+ // Lvalues
+ case VAR_DECL:
+ case PARM_DECL:
+ case RESULT_DECL:
+ case INDIRECT_REF:
+ case ARRAY_REF:
+ case ARRAY_RANGE_REF:
+ case COMPONENT_REF:
+ case BIT_FIELD_REF:
+ case STRING_CST:
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
+ // Same as EmitLoadOfLValue but taking the size from TREE_TYPE(exp), since
+ // the size of TREE_TYPE(Op) may not be available.
+ LValue LV = EmitLV(Op);
+ assert(!LV.isBitfield() && "Expected an aggregate operand!");
+ bool isVolatile = TREE_THIS_VOLATILE(Op);
+ unsigned Alignment = LV.getAlignment();
+
+ EmitAggregateCopy(Target, MemRef(LV.Ptr, Alignment, isVolatile),
+ TREE_TYPE(exp));
+ break;
+ }
+
+ if (DestLoc)
+ return 0;
+
+ // Target holds the temporary created above.
+ const Type *ExpTy = ConvertType(TREE_TYPE(exp));
+ return Builder.CreateLoad(Builder.CreateBitCast(Target.Ptr,
+ ExpTy->getPointerTo()));
+ }
+
+ if (DestLoc) {
+ // The input is a scalar the output is an aggregate, just eval the input,
+ // then store into DestLoc.
+ Value *OpVal = Emit(Op, 0);
+ assert(OpVal && "Expected a scalar result!");
+ Value *Ptr = Builder.CreateBitCast(DestLoc->Ptr,
+ PointerType::getUnqual(OpVal->getType()));
+ StoreInst *St = Builder.CreateStore(OpVal, Ptr, DestLoc->Volatile);
+ St->setAlignment(DestLoc->getAlignment());
+ return 0;
+ }
+
+ // Otherwise, this is a scalar to scalar conversion.
+ Value *OpVal = Emit(Op, 0);
+ assert(OpVal && "Expected a scalar result!");
+ const Type *DestTy = ConvertType(TREE_TYPE(exp));
+
+ // If the source is a pointer, use ptrtoint to get it to something
+ // bitcast'able. This supports things like v_c_e(foo*, float).
+ if (isa<PointerType>(OpVal->getType())) {
+ if (isa<PointerType>(DestTy)) // ptr->ptr is a simple bitcast.
+ return Builder.CreateBitCast(OpVal, DestTy);
+ // Otherwise, ptrtoint to intptr_t first.
+ OpVal = Builder.CreatePtrToInt(OpVal, TD.getIntPtrType(Context));
+ }
+
+ // If the destination type is a pointer, use inttoptr.
+ if (isa<PointerType>(DestTy))
+ return Builder.CreateIntToPtr(OpVal, DestTy);
+
+ // Otherwise, use a bitcast.
+ return Builder.CreateBitCast(OpVal, DestTy);
+}
+
+Value *TreeToLLVM::EmitNEGATE_EXPR(tree op) {
+ Value *V = Emit(op, 0);
+
+ if (TREE_CODE(TREE_TYPE(op)) != COMPLEX_TYPE) {
+ if (V->getType()->isFPOrFPVector())
+ return Builder.CreateFNeg(V);
+ return Builder.CreateNeg(V);
+ }
+
+ // -(a+ib) = -a + i*-b
+ Value *R, *I;
+ SplitComplex(V, R, I);
+ if (R->getType()->isFloatingPoint()) {
+ R = Builder.CreateFNeg(R);
+ I = Builder.CreateFNeg(I);
+ } else {
+ R = Builder.CreateNeg(R);
+ I = Builder.CreateNeg(I);
+ }
+ return CreateComplex(R, I);
+}
+
+Value *TreeToLLVM::EmitCONJ_EXPR(tree op) {
+ // ~(a+ib) = a + i*-b
+ Value *R, *I;
+ SplitComplex(Emit(op, 0), R, I);
+ if (I->getType()->isFloatingPoint())
+ I = Builder.CreateFNeg(I);
+ else
+ I = Builder.CreateNeg(I);
+ return CreateComplex(R, I);
+}
+
+Value *TreeToLLVM::EmitABS_EXPR(tree op) {
+ Value *Op = EmitGimpleReg(op);
+ if (!Op->getType()->isFloatingPoint()) {
+ Value *OpN = Builder.CreateNeg(Op, (Op->getNameStr()+"neg").c_str());
+ ICmpInst::Predicate pred = TYPE_UNSIGNED(TREE_TYPE(op)) ?
+ ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
+ Value *Cmp = Builder.CreateICmp(pred, Op,
+ Constant::getNullValue(Op->getType()), "abscond");
+ return Builder.CreateSelect(Cmp, Op, OpN, "abs");
+ }
+
+ // Turn FP abs into fabs/fabsf.
+ const char *Name = 0;
+
+ switch (Op->getType()->getTypeID()) {
+ default: assert(0 && "Unknown FP type!");
+ case Type::FloatTyID: Name = "fabsf"; break;
+ case Type::DoubleTyID: Name = "fabs"; break;
+ case Type::X86_FP80TyID:
+ case Type::PPC_FP128TyID:
+ case Type::FP128TyID: Name = "fabsl"; break;
+ }
+
+ Value *V = TheModule->getOrInsertFunction(Name, Op->getType(), Op->getType(),
+ NULL);
+ CallInst *Call = Builder.CreateCall(V, Op);
+ Call->setDoesNotThrow();
+ Call->setDoesNotAccessMemory();
+ return Call;
+}
+
+/// getSuitableBitCastIntType - Given Ty is a floating point type or a vector
+/// type with floating point elements, return an integer type to bitcast to.
+/// e.g. 4 x float -> 4 x i32
+static const Type *getSuitableBitCastIntType(const Type *Ty) {
+ if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
+ unsigned NumElements = VTy->getNumElements();
+ const Type *EltTy = VTy->getElementType();
+ return VectorType::get(
+ IntegerType::get(Context, EltTy->getPrimitiveSizeInBits()), NumElements);
+ }
+ return IntegerType::get(Context, Ty->getPrimitiveSizeInBits());
+}
+
+Value *TreeToLLVM::EmitBIT_NOT_EXPR(tree op) {
+ Value *Op = EmitGimpleReg(op);
+ return Builder.CreateNot(Op, (Op->getNameStr()+"not").c_str());
+}
+
+Value *TreeToLLVM::EmitTRUTH_NOT_EXPR(tree type, tree op) {
+ Value *V = EmitGimpleReg(op);
+ if (V->getType() != Type::getInt1Ty(Context))
+ V = Builder.CreateICmpNE(V,
+ Constant::getNullValue(V->getType()), "toBool");
+ V = Builder.CreateNot(V, (V->getNameStr()+"not").c_str());
+ return CastToUIntType(V, ConvertType(type));
+}
+
+/// EmitCompare - Compare LHS with RHS using the appropriate comparison code.
+/// The result is an i1 boolean.
+Value *TreeToLLVM::EmitCompare(tree lhs, tree rhs, tree_code code) {
+ Value *LHS = EmitGimpleReg(lhs);
+ Value *RHS = Builder.CreateBitCast(EmitGimpleReg(rhs), LHS->getType());
+
+ // Compute the LLVM opcodes corresponding to the GCC comparison.
+ CmpInst::Predicate UIPred = CmpInst::BAD_ICMP_PREDICATE;
+ CmpInst::Predicate SIPred = CmpInst::BAD_ICMP_PREDICATE;
+ CmpInst::Predicate FPPred = CmpInst::BAD_FCMP_PREDICATE;
+
+ switch (code) {
+ default:
+ assert(false && "Unhandled condition code!");
+ case LT_EXPR:
+ UIPred = CmpInst::ICMP_ULT;
+ SIPred = CmpInst::ICMP_SLT;
+ FPPred = CmpInst::FCMP_OLT;
+ break;
+ case LE_EXPR:
+ UIPred = CmpInst::ICMP_ULE;
+ SIPred = CmpInst::ICMP_SLE;
+ FPPred = CmpInst::FCMP_OLE;
+ break;
+ case GT_EXPR:
+ UIPred = CmpInst::ICMP_UGT;
+ SIPred = CmpInst::ICMP_SGT;
+ FPPred = CmpInst::FCMP_OGT;
+ break;
+ case GE_EXPR:
+ UIPred = CmpInst::ICMP_UGE;
+ SIPred = CmpInst::ICMP_SGE;
+ FPPred = CmpInst::FCMP_OGE;
+ break;
+ case EQ_EXPR:
+ UIPred = SIPred = CmpInst::ICMP_EQ;
+ FPPred = CmpInst::FCMP_OEQ;
+ break;
+ case NE_EXPR:
+ UIPred = SIPred = CmpInst::ICMP_NE;
+ FPPred = CmpInst::FCMP_UNE;
+ break;
+ case UNORDERED_EXPR: FPPred = CmpInst::FCMP_UNO; break;
+ case ORDERED_EXPR: FPPred = CmpInst::FCMP_ORD; break;
+ case UNLT_EXPR: FPPred = CmpInst::FCMP_ULT; break;
+ case UNLE_EXPR: FPPred = CmpInst::FCMP_ULE; break;
+ case UNGT_EXPR: FPPred = CmpInst::FCMP_UGT; break;
+ case UNGE_EXPR: FPPred = CmpInst::FCMP_UGE; break;
+ case UNEQ_EXPR: FPPred = CmpInst::FCMP_UEQ; break;
+ case LTGT_EXPR: FPPred = CmpInst::FCMP_ONE; break;
+ }
+
+ if (TREE_CODE(TREE_TYPE(lhs)) == COMPLEX_TYPE) {
+ Value *LHSr, *LHSi;
+ SplitComplex(LHS, LHSr, LHSi);
+ Value *RHSr, *RHSi;
+ SplitComplex(RHS, RHSr, RHSi);
+
+ Value *DSTr, *DSTi;
+ if (LHSr->getType()->isFloatingPoint()) {
+ DSTr = Builder.CreateFCmp(FPPred, LHSr, RHSr);
+ DSTi = Builder.CreateFCmp(FPPred, LHSi, RHSi);
+ if (FPPred == CmpInst::FCMP_OEQ)
+ return Builder.CreateAnd(DSTr, DSTi);
+ assert(FPPred == CmpInst::FCMP_UNE && "Unhandled complex comparison!");
+ return Builder.CreateOr(DSTr, DSTi);
+ }
+
+ assert(SIPred == UIPred && "(In)equality comparison depends on sign!");
+ DSTr = Builder.CreateICmp(UIPred, LHSr, RHSr);
+ DSTi = Builder.CreateICmp(UIPred, LHSi, RHSi);
+ if (UIPred == CmpInst::ICMP_EQ)
+ return Builder.CreateAnd(DSTr, DSTi);
+ assert(UIPred == CmpInst::ICMP_NE && "Unhandled complex comparison!");
+ return Builder.CreateOr(DSTr, DSTi);
+ }
+
+ if (LHS->getType()->isFPOrFPVector())
+ return Builder.CreateFCmp(FPPred, LHS, RHS);
+
+ // Determine which predicate to use based on signedness.
+ CmpInst::Predicate pred = TYPE_UNSIGNED(TREE_TYPE(lhs)) ? UIPred : SIPred;
+ return Builder.CreateICmp(pred, LHS, RHS);
+}
+
+/// EmitBinOp - 'exp' is a binary operator.
+///
+Value *TreeToLLVM::EmitBinOp(tree type, tree_code code, tree op0, tree op1,
+ unsigned Opc) {
+ if (TREE_CODE(type) == COMPLEX_TYPE)
+ return EmitComplexBinOp(type, code, op0, op1);
+
+ Value *LHS = EmitGimpleReg(op0);
+ Value *RHS = EmitGimpleReg(op1);
+
+ // GCC has no problem with things like "xor uint X, int 17", and X-Y, where
+ // X and Y are pointer types, but the result is an integer. As such, convert
+ // everything to the result type.
+ bool LHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(op0));
+ bool RHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(op1));
+ bool TyIsSigned = !TYPE_UNSIGNED(type);
+ bool IsExactDiv = code == EXACT_DIV_EXPR;
+ bool IsPlus = code == PLUS_EXPR;
+
+ const Type *Ty = ConvertType(type);
+ LHS = CastToAnyType(LHS, LHSIsSigned, Ty, TyIsSigned);
+ RHS = CastToAnyType(RHS, RHSIsSigned, Ty, TyIsSigned);
+
+ // If it's And, Or, or Xor, make sure the operands are casted to the right
+ // integer types first.
+ bool isLogicalOp = Opc == Instruction::And || Opc == Instruction::Or ||
+ Opc == Instruction::Xor;
+ const Type *ResTy = Ty;
+ if (isLogicalOp &&
+ (Ty->isFloatingPoint() ||
+ (isa<VectorType>(Ty) &&
+ cast<VectorType>(Ty)->getElementType()->isFloatingPoint()))) {
+ Ty = getSuitableBitCastIntType(Ty);
+ LHS = Builder.CreateBitCast(LHS, Ty);
+ RHS = Builder.CreateBitCast(RHS, Ty);
+ }
+
+ Value *V;
+ if (Opc == Instruction::SDiv && IsExactDiv)
+ V = Builder.CreateExactSDiv(LHS, RHS);
+ else if (Opc == Instruction::Add && IsPlus && TyIsSigned && !flag_wrapv)
+ V = Builder.CreateNSWAdd(LHS, RHS);
+ else
+ V = Builder.CreateBinOp((Instruction::BinaryOps)Opc, LHS, RHS);
+ if (ResTy != Ty)
+ V = Builder.CreateBitCast(V, ResTy);
+ return V;
+}
+
+Value *TreeToLLVM::EmitTruthOp(tree type, tree op0, tree op1, unsigned Opc) {
+ Value *LHS = EmitGimpleReg(op0);
+ Value *RHS = EmitGimpleReg(op1);
+
+ // This is a truth operation like the strict &&,||,^^. Convert to bool as
+ // a test against zero
+ LHS = Builder.CreateICmpNE(LHS,
+ Constant::getNullValue(LHS->getType()),
+ "toBool");
+ RHS = Builder.CreateICmpNE(RHS,
+ Constant::getNullValue(RHS->getType()),
+ "toBool");
+
+ Value *Res = Builder.CreateBinOp((Instruction::BinaryOps)Opc, LHS, RHS);
+ return Builder.CreateZExt(Res, ConvertType(type));
+}
+
+
+Value *TreeToLLVM::EmitShiftOp(tree op0, tree op1, unsigned Opc) {
+ Value *LHS = EmitGimpleReg(op0);
+ Value *RHS = EmitGimpleReg(op1);
+ if (RHS->getType() != LHS->getType())
+ RHS = Builder.CreateIntCast(RHS, LHS->getType(), false,
+ (RHS->getNameStr()+".cast").c_str());
+
+ return Builder.CreateBinOp((Instruction::BinaryOps)Opc, LHS, RHS);
+}
+
+Value *TreeToLLVM::EmitRotateOp(tree type, tree op0, tree op1,
+ unsigned Opc1, unsigned Opc2) {
+ Value *In = EmitGimpleReg(op0);
+ Value *Amt = EmitGimpleReg(op1);
+
+ if (Amt->getType() != In->getType())
+ Amt = Builder.CreateIntCast(Amt, In->getType(), false,
+ (Amt->getNameStr()+".cast").c_str());
+
+ Value *TypeSize =
+ ConstantInt::get(In->getType(),
+ In->getType()->getPrimitiveSizeInBits());
+
+ // Do the two shifts.
+ Value *V1 = Builder.CreateBinOp((Instruction::BinaryOps)Opc1, In, Amt);
+ Value *OtherShift = Builder.CreateSub(TypeSize, Amt);
+ Value *V2 = Builder.CreateBinOp((Instruction::BinaryOps)Opc2, In, OtherShift);
+
+ // Or the two together to return them.
+ Value *Merge = Builder.CreateOr(V1, V2);
+ return CastToUIntType(Merge, ConvertType(type));
+}
+
+Value *TreeToLLVM::EmitMinMaxExpr(tree type, tree op0, tree op1,
+ unsigned UIPred, unsigned SIPred,
+ unsigned FPPred, bool isMax) {
+ Value *LHS = EmitGimpleReg(op0);
+ Value *RHS = EmitGimpleReg(op1);
+
+ const Type *Ty = ConvertType(type);
+
+ // The LHS, RHS and Ty could be integer, floating or pointer typed. We need
+ // to convert the LHS and RHS into the destination type before doing the
+ // comparison. Use CastInst::getCastOpcode to get this right.
+ bool TyIsSigned = !TYPE_UNSIGNED(type);
+ bool LHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(op0));
+ bool RHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(op1));
+ Instruction::CastOps opcode =
+ CastInst::getCastOpcode(LHS, LHSIsSigned, Ty, TyIsSigned);
+ LHS = Builder.CreateCast(opcode, LHS, Ty);
+ opcode = CastInst::getCastOpcode(RHS, RHSIsSigned, Ty, TyIsSigned);
+ RHS = Builder.CreateCast(opcode, RHS, Ty);
+
+ Value *Compare;
+ if (LHS->getType()->isFloatingPoint())
+ Compare = Builder.CreateFCmp(FCmpInst::Predicate(FPPred), LHS, RHS);
+ else if (TYPE_UNSIGNED(type))
+ Compare = Builder.CreateICmp(ICmpInst::Predicate(UIPred), LHS, RHS);
+ else
+ Compare = Builder.CreateICmp(ICmpInst::Predicate(SIPred), LHS, RHS);
+
+ return Builder.CreateSelect(Compare, LHS, RHS, isMax ? "max" : "min");
+}
+
+Value *TreeToLLVM::EmitFLOOR_MOD_EXPR(tree type, tree op0, tree op1) {
+ // Notation: FLOOR_MOD_EXPR <-> Mod, TRUNC_MOD_EXPR <-> Rem.
+
+ // We express Mod in terms of Rem as follows: if RHS exactly divides LHS,
+ // or the values of LHS and RHS have the same sign, then Mod equals Rem.
+ // Otherwise Mod equals Rem + RHS. This means that LHS Mod RHS traps iff
+ // LHS Rem RHS traps.
+ if (TYPE_UNSIGNED(type))
+ // LHS and RHS values must have the same sign if their type is unsigned.
+ return EmitBinOp(type, FLOOR_MOD_EXPR, op0, op1, Instruction::URem);
+
+ const Type *Ty = ConvertType(type);
+ Constant *Zero = ConstantInt::get(Ty, 0);
+
+ Value *LHS = EmitGimpleReg(op0);
+ Value *RHS = EmitGimpleReg(op1);
+
+ // The two possible values for Mod.
+ Value *Rem = Builder.CreateSRem(LHS, RHS, "rem");
+ Value *RemPlusRHS = Builder.CreateAdd(Rem, RHS);
+
+ // HaveSameSign: (LHS >= 0) == (RHS >= 0).
+ Value *LHSIsPositive = Builder.CreateICmpSGE(LHS, Zero);
+ Value *RHSIsPositive = Builder.CreateICmpSGE(RHS, Zero);
+ Value *HaveSameSign = Builder.CreateICmpEQ(LHSIsPositive,RHSIsPositive);
+
+ // RHS exactly divides LHS iff Rem is zero.
+ Value *RemIsZero = Builder.CreateICmpEQ(Rem, Zero);
+
+ Value *SameAsRem = Builder.CreateOr(HaveSameSign, RemIsZero);
+ return Builder.CreateSelect(SameAsRem, Rem, RemPlusRHS, "mod");
+}
+
+Value *TreeToLLVM::EmitCEIL_DIV_EXPR(tree type, tree op0, tree op1) {
+ // Notation: CEIL_DIV_EXPR <-> CDiv, TRUNC_DIV_EXPR <-> Div.
+
+ // CDiv calculates LHS/RHS by rounding up to the nearest integer. In terms
+ // of Div this means if the values of LHS and RHS have opposite signs or if
+ // LHS is zero, then CDiv necessarily equals Div; and
+ // LHS CDiv RHS = (LHS - Sign(RHS)) Div RHS + 1
+ // otherwise.
+
+ const Type *Ty = ConvertType(type);
+ Constant *Zero = ConstantInt::get(Ty, 0);
+ Constant *One = ConstantInt::get(Ty, 1);
+ Constant *MinusOne = Constant::getAllOnesValue(Ty);
+
+ Value *LHS = EmitGimpleReg(op0);
+ Value *RHS = EmitGimpleReg(op1);
+
+ if (!TYPE_UNSIGNED(type)) {
+ // In the case of signed arithmetic, we calculate CDiv as follows:
+ // LHS CDiv RHS = (LHS - Sign(RHS) * Offset) Div RHS + Offset,
+ // where Offset is 1 if LHS and RHS have the same sign and LHS is
+ // not zero, and 0 otherwise.
+
+ // On some machines INT_MIN Div -1 traps. You might expect a trap for
+ // INT_MIN CDiv -1 too, but this implementation will not generate one.
+ // Quick quiz question: what value is returned for INT_MIN CDiv -1?
+
+ // Determine the signs of LHS and RHS, and whether they have the same sign.
+ Value *LHSIsPositive = Builder.CreateICmpSGE(LHS, Zero);
+ Value *RHSIsPositive = Builder.CreateICmpSGE(RHS, Zero);
+ Value *HaveSameSign = Builder.CreateICmpEQ(LHSIsPositive, RHSIsPositive);
+
+ // Offset equals 1 if LHS and RHS have the same sign and LHS is not zero.
+ Value *LHSNotZero = Builder.CreateICmpNE(LHS, Zero);
+ Value *OffsetOne = Builder.CreateAnd(HaveSameSign, LHSNotZero);
+ // ... otherwise it is 0.
+ Value *Offset = Builder.CreateSelect(OffsetOne, One, Zero);
+
+ // Calculate Sign(RHS) ...
+ Value *SignRHS = Builder.CreateSelect(RHSIsPositive, One, MinusOne);
+ // ... and Sign(RHS) * Offset
+ Value *SignedOffset = Builder.CreateSExt(OffsetOne, Ty);
+ SignedOffset = Builder.CreateAnd(SignRHS, SignedOffset);
+
+ // Return CDiv = (LHS - Sign(RHS) * Offset) Div RHS + Offset.
+ Value *CDiv = Builder.CreateSub(LHS, SignedOffset);
+ CDiv = Builder.CreateSDiv(CDiv, RHS);
+ return Builder.CreateAdd(CDiv, Offset, "cdiv");
+ }
+
+ // In the case of unsigned arithmetic, LHS and RHS necessarily have the
+ // same sign, so we can use
+ // LHS CDiv RHS = (LHS - 1) Div RHS + 1
+ // as long as LHS is non-zero.
+
+ // Offset is 1 if LHS is non-zero, 0 otherwise.
+ Value *LHSNotZero = Builder.CreateICmpNE(LHS, Zero);
+ Value *Offset = Builder.CreateSelect(LHSNotZero, One, Zero);
+
+ // Return CDiv = (LHS - Offset) Div RHS + Offset.
+ Value *CDiv = Builder.CreateSub(LHS, Offset);
+ CDiv = Builder.CreateUDiv(CDiv, RHS);
+ return Builder.CreateAdd(CDiv, Offset, "cdiv");
+}
+
+Value *TreeToLLVM::EmitFLOOR_DIV_EXPR(tree type, tree op0, tree op1) {
+ // Notation: FLOOR_DIV_EXPR <-> FDiv, TRUNC_DIV_EXPR <-> Div.
+ Value *LHS = EmitGimpleReg(op0);
+ Value *RHS = EmitGimpleReg(op1);
+
+ // FDiv calculates LHS/RHS by rounding down to the nearest integer. In terms
+ // of Div this means if the values of LHS and RHS have the same sign or if LHS
+ // is zero, then FDiv necessarily equals Div; and
+ // LHS FDiv RHS = (LHS + Sign(RHS)) Div RHS - 1
+ // otherwise.
+
+ if (TYPE_UNSIGNED(type))
+ // In the case of unsigned arithmetic, LHS and RHS necessarily have the
+ // same sign, so FDiv is the same as Div.
+ return Builder.CreateUDiv(LHS, RHS, "fdiv");
+
+ const Type *Ty = ConvertType(type);
+ Constant *Zero = ConstantInt::get(Ty, 0);
+ Constant *One = ConstantInt::get(Ty, 1);
+ Constant *MinusOne = Constant::getAllOnesValue(Ty);
+
+ // In the case of signed arithmetic, we calculate FDiv as follows:
+ // LHS FDiv RHS = (LHS + Sign(RHS) * Offset) Div RHS - Offset,
+ // where Offset is 1 if LHS and RHS have opposite signs and LHS is
+ // not zero, and 0 otherwise.
+
+ // Determine the signs of LHS and RHS, and whether they have the same sign.
+ Value *LHSIsPositive = Builder.CreateICmpSGE(LHS, Zero);
+ Value *RHSIsPositive = Builder.CreateICmpSGE(RHS, Zero);
+ Value *SignsDiffer = Builder.CreateICmpNE(LHSIsPositive, RHSIsPositive);
+
+ // Offset equals 1 if LHS and RHS have opposite signs and LHS is not zero.
+ Value *LHSNotZero = Builder.CreateICmpNE(LHS, Zero);
+ Value *OffsetOne = Builder.CreateAnd(SignsDiffer, LHSNotZero);
+ // ... otherwise it is 0.
+ Value *Offset = Builder.CreateSelect(OffsetOne, One, Zero);
+
+ // Calculate Sign(RHS) ...
+ Value *SignRHS = Builder.CreateSelect(RHSIsPositive, One, MinusOne);
+ // ... and Sign(RHS) * Offset
+ Value *SignedOffset = Builder.CreateSExt(OffsetOne, Ty);
+ SignedOffset = Builder.CreateAnd(SignRHS, SignedOffset);
+
+ // Return FDiv = (LHS + Sign(RHS) * Offset) Div RHS - Offset.
+ Value *FDiv = Builder.CreateAdd(LHS, SignedOffset);
+ FDiv = Builder.CreateSDiv(FDiv, RHS);
+ return Builder.CreateSub(FDiv, Offset, "fdiv");
+}
+
+Value *TreeToLLVM::EmitROUND_DIV_EXPR(tree type, tree op0, tree op1) {
+ // Notation: ROUND_DIV_EXPR <-> RDiv, TRUNC_DIV_EXPR <-> Div.
+
+ // RDiv calculates LHS/RHS by rounding to the nearest integer. Ties
+ // are broken by rounding away from zero. In terms of Div this means:
+ // LHS RDiv RHS = (LHS + (RHS Div 2)) Div RHS
+ // if the values of LHS and RHS have the same sign; and
+ // LHS RDiv RHS = (LHS - (RHS Div 2)) Div RHS
+ // if the values of LHS and RHS differ in sign. The intermediate
+ // expressions in these formulae can overflow, so some tweaking is
+ // required to ensure correct results. The details depend on whether
+ // we are doing signed or unsigned arithmetic.
+
+ const Type *Ty = ConvertType(type);
+ Constant *Zero = ConstantInt::get(Ty, 0);
+ Constant *Two = ConstantInt::get(Ty, 2);
+
+ Value *LHS = EmitGimpleReg(op0);
+ Value *RHS = EmitGimpleReg(op1);
+
+ if (!TYPE_UNSIGNED(type)) {
+ // In the case of signed arithmetic, we calculate RDiv as follows:
+ // LHS RDiv RHS = (sign) ( (|LHS| + (|RHS| UDiv 2)) UDiv |RHS| ),
+ // where sign is +1 if LHS and RHS have the same sign, -1 if their
+ // signs differ. Doing the computation unsigned ensures that there
+ // is no overflow.
+
+ // On some machines INT_MIN Div -1 traps. You might expect a trap for
+ // INT_MIN RDiv -1 too, but this implementation will not generate one.
+ // Quick quiz question: what value is returned for INT_MIN RDiv -1?
+
+ // Determine the signs of LHS and RHS, and whether they have the same sign.
+ Value *LHSIsPositive = Builder.CreateICmpSGE(LHS, Zero);
+ Value *RHSIsPositive = Builder.CreateICmpSGE(RHS, Zero);
+ Value *HaveSameSign = Builder.CreateICmpEQ(LHSIsPositive, RHSIsPositive);
+
+ // Calculate |LHS| ...
+ Value *MinusLHS = Builder.CreateNeg(LHS);
+ Value *AbsLHS = Builder.CreateSelect(LHSIsPositive, LHS, MinusLHS,
+ (LHS->getNameStr()+".abs").c_str());
+ // ... and |RHS|
+ Value *MinusRHS = Builder.CreateNeg(RHS);
+ Value *AbsRHS = Builder.CreateSelect(RHSIsPositive, RHS, MinusRHS,
+ (RHS->getNameStr()+".abs").c_str());
+
+ // Calculate AbsRDiv = (|LHS| + (|RHS| UDiv 2)) UDiv |RHS|.
+ Value *HalfAbsRHS = Builder.CreateUDiv(AbsRHS, Two);
+ Value *Numerator = Builder.CreateAdd(AbsLHS, HalfAbsRHS);
+ Value *AbsRDiv = Builder.CreateUDiv(Numerator, AbsRHS);
+
+ // Return AbsRDiv or -AbsRDiv according to whether LHS and RHS have the
+ // same sign or not.
+ Value *MinusAbsRDiv = Builder.CreateNeg(AbsRDiv);
+ return Builder.CreateSelect(HaveSameSign, AbsRDiv, MinusAbsRDiv, "rdiv");
+ }
+
+ // In the case of unsigned arithmetic, LHS and RHS necessarily have the
+ // same sign, however overflow is a problem. We want to use the formula
+ // LHS RDiv RHS = (LHS + (RHS Div 2)) Div RHS,
+ // but if LHS + (RHS Div 2) overflows then we get the wrong result. Since
+ // the use of a conditional branch seems to be unavoidable, we choose the
+ // simple solution of explicitly checking for overflow, and using
+ // LHS RDiv RHS = ((LHS + (RHS Div 2)) - RHS) Div RHS + 1
+ // if it occurred.
+
+ // Usually the numerator is LHS + (RHS Div 2); calculate this.
+ Value *HalfRHS = Builder.CreateUDiv(RHS, Two);
+ Value *Numerator = Builder.CreateAdd(LHS, HalfRHS);
+
+ // Did the calculation overflow?
+ Value *Overflowed = Builder.CreateICmpULT(Numerator, HalfRHS);
+
+ // If so, use (LHS + (RHS Div 2)) - RHS for the numerator instead.
+ Value *AltNumerator = Builder.CreateSub(Numerator, RHS);
+ Numerator = Builder.CreateSelect(Overflowed, AltNumerator, Numerator);
+
+ // Quotient = Numerator / RHS.
+ Value *Quotient = Builder.CreateUDiv(Numerator, RHS);
+
+ // Return Quotient unless we overflowed, in which case return Quotient + 1.
+ return Builder.CreateAdd(Quotient, CastToUIntType(Overflowed, Ty), "rdiv");
+}
+
+Value *TreeToLLVM::EmitPOINTER_PLUS_EXPR(tree type, tree op0, tree op1) {
+ Value *Ptr = EmitGimpleReg(op0); // The pointer.
+ Value *Idx = EmitGimpleReg(op1); // The offset in bytes.
+
+ // Convert the pointer into an i8* and add the offset to it.
+ Ptr = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context));
+ Value *GEP = POINTER_TYPE_OVERFLOW_UNDEFINED ?
+ Builder.CreateInBoundsGEP(Ptr, Idx) : Builder.CreateGEP(Ptr, Idx);
+
+ // The result may be of a different pointer type.
+ return Builder.CreateBitCast(GEP, ConvertType(type));
+}
+
+Value *TreeToLLVM::EmitXXXXPART_EXPR(tree exp, unsigned Idx) {
+ return Builder.CreateExtractValue(Emit(TREE_OPERAND(exp, 0), 0), Idx);
+}
+
+Value *TreeToLLVM::EmitPAREN_EXPR(tree op) {
+ // TODO: Understand and correctly deal with this subtle expression.
+ return EmitGimpleReg(op);
+}
+
+
+//===----------------------------------------------------------------------===//
+// ... Exception Handling ...
+//===----------------------------------------------------------------------===//
+
+
+/// EmitEXC_PTR_EXPR - Handle EXC_PTR_EXPR.
+Value *TreeToLLVM::EmitEXC_PTR_EXPR(tree exp) {
+abort();
+//TODO CreateExceptionValues();
+//TODO // Load exception address.
+//TODO Value *V = Builder.CreateLoad(ExceptionValue, "eh_value");
+//TODO // Cast the address to the right pointer type.
+//TODO return Builder.CreateBitCast(V, ConvertType(TREE_TYPE(exp)));
+}
+
+/// EmitFILTER_EXPR - Handle FILTER_EXPR.
+Value *TreeToLLVM::EmitFILTER_EXPR(tree exp) {
+abort();
+//FIXME CreateExceptionValues();
+//FIXME // Load exception selector.
+//FIXME return Builder.CreateLoad(ExceptionSelectorValue, "eh_select");
+}
+
+//===----------------------------------------------------------------------===//
+// ... Inline Assembly and Register Variables ...
+//===----------------------------------------------------------------------===//
+
+
+/// Reads from register variables are handled by emitting an inline asm node
+/// that copies the value out of the specified register.
+Value *TreeToLLVM::EmitReadOfRegisterVariable(tree decl,
+ const MemRef *DestLoc) {
+ const Type *Ty = ConvertType(TREE_TYPE(decl));
+
+ // If there was an error, return something bogus.
+ if (ValidateRegisterVariable(decl)) {
+ if (Ty->isSingleValueType())
+ return UndefValue::get(Ty);
+ return 0; // Just don't copy something into DestLoc.
+ }
+
+ // Turn this into a 'tmp = call Ty asm "", "={reg}"()'.
+ FunctionType *FTy = FunctionType::get(Ty, std::vector<const Type*>(),false);
+
+ const char *Name = reg_names[decode_reg_name(extractRegisterName(decl))];
+
+ InlineAsm *IA = InlineAsm::get(FTy, "", "={"+std::string(Name)+"}", false);
+ CallInst *Call = Builder.CreateCall(IA);
+ Call->setDoesNotThrow();
+ return Call;
+}
+
+/// Stores to register variables are handled by emitting an inline asm node
+/// that copies the value into the specified register.
+void TreeToLLVM::EmitModifyOfRegisterVariable(tree decl, Value *RHS) {
+ // If there was an error, bail out.
+ if (ValidateRegisterVariable(decl))
+ return;
+
+ // Turn this into a 'call void asm sideeffect "", "{reg}"(Ty %RHS)'.
+ std::vector<const Type*> ArgTys;
+ ArgTys.push_back(ConvertType(TREE_TYPE(decl)));
+ FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), ArgTys,
+ false);
+
+ const char *Name = reg_names[decode_reg_name(extractRegisterName(decl))];
+
+ InlineAsm *IA = InlineAsm::get(FTy, "", "{"+std::string(Name)+"}", true);
+ CallInst *Call = Builder.CreateCall(IA, RHS);
+ Call->setDoesNotThrow();
+}
+
+/// ConvertInlineAsmStr - Convert the specified inline asm string to an LLVM
+/// InlineAsm string. The GNU style inline asm template string has the
+/// following format:
+/// %N (for N a digit) means print operand N in usual manner.
+/// %= means a unique number for the inline asm.
+/// %lN means require operand N to be a CODE_LABEL or LABEL_REF
+/// and print the label name with no punctuation.
+/// %cN means require operand N to be a constant
+/// and print the constant expression with no punctuation.
+/// %aN means expect operand N to be a memory address
+/// (not a memory reference!) and print a reference to that address.
+/// %nN means expect operand N to be a constant and print a constant
+/// expression for minus the value of the operand, with no other
+/// punctuation.
+/// Other %xN expressions are turned into LLVM ${N:x} operands.
+///
+static std::string ConvertInlineAsmStr(gimple stmt, tree outputs, tree inputs,
+ tree labels, unsigned NumOperands) {
+ const char *AsmStr = gimple_asm_string(stmt);
+
+ // gimple_asm_input_p - This flag is set if this is a non-extended ASM,
+ // which means that the asm string should not be interpreted, other than
+ // to escape $'s.
+ if (gimple_asm_input_p(stmt)) {
+ const char *InStr = AsmStr;
+ std::string Result;
+ while (1) {
+ switch (*InStr++) {
+ case 0: return Result; // End of string.
+ default: Result += InStr[-1]; break; // Normal character.
+ case '$': Result += "$$"; break; // Escape '$' characters.
+ }
+ }
+ }
+
+ // Expand [name] symbolic operand names.
+ tree str = resolve_asm_operand_names(build_string (strlen (AsmStr), AsmStr),
+ outputs, inputs, labels);
+
+ const char *InStr = TREE_STRING_POINTER(str);
+
+ std::string Result;
+ while (1) {
+ switch (*InStr++) {
+ case 0: return Result; // End of string.
+ default: Result += InStr[-1]; break; // Normal character.
+ case '$': Result += "$$"; break; // Escape '$' characters.
+#ifdef ASSEMBLER_DIALECT
+ // Note that we can't escape to ${, because that is the syntax for vars.
+ case '{': Result += "$("; break; // Escape '{' character.
+ case '}': Result += "$)"; break; // Escape '}' character.
+ case '|': Result += "$|"; break; // Escape '|' character.
+#endif
+ case '%': // GCC escape character.
+ char EscapedChar = *InStr++;
+ if (EscapedChar == '%') { // Escaped '%' character
+ Result += '%';
+ } else if (EscapedChar == '=') { // Unique ID for the asm instance.
+ Result += "${:uid}";
+ }
+#ifdef LLVM_ASM_EXTENSIONS
+ LLVM_ASM_EXTENSIONS(EscapedChar, InStr, Result)
+#endif
+ else if (ISALPHA(EscapedChar)) {
+ // % followed by a letter and some digits. This outputs an operand in a
+ // special way depending on the letter. We turn this into LLVM ${N:o}
+ // syntax.
+ char *EndPtr;
+ unsigned long OpNum = strtoul(InStr, &EndPtr, 10);
+
+ if (InStr == EndPtr) {
+ error_at(gimple_location(stmt),
+ "operand number missing after %%-letter");
+ return Result;
+ } else if (OpNum >= NumOperands) {
+ error_at(gimple_location(stmt), "operand number out of range");
+ return Result;
+ }
+ Result += "${" + utostr(OpNum) + ":" + EscapedChar + "}";
+ InStr = EndPtr;
+ } else if (ISDIGIT(EscapedChar)) {
+ char *EndPtr;
+ unsigned long OpNum = strtoul(InStr-1, &EndPtr, 10);
+ InStr = EndPtr;
+ Result += "$" + utostr(OpNum);
+#ifdef PRINT_OPERAND_PUNCT_VALID_P
+ } else if (PRINT_OPERAND_PUNCT_VALID_P((unsigned char)EscapedChar)) {
+ Result += "${:";
+ Result += EscapedChar;
+ Result += "}";
+#endif
+ } else {
+ output_operand_lossage("invalid %%-code");
+ }
+ break;
+ }
+ }
+}
+
+/// CanonicalizeConstraint - If we can canonicalize the constraint into
+/// something simpler, do so now. This turns register classes with a single
+/// register into the register itself, expands builtin constraints to multiple
+/// alternatives, etc.
+static std::string CanonicalizeConstraint(const char *Constraint) {
+ std::string Result;
+
+ // Skip over modifier characters.
+ bool DoneModifiers = false;
+ while (!DoneModifiers) {
+ switch (*Constraint) {
+ default: DoneModifiers = true; break;
+ case '=': assert(0 && "Should be after '='s");
+ case '+': assert(0 && "'+' should already be expanded");
+ case '*':
+ case '?':
+ case '!':
+ ++Constraint;
+ break;
+ case '&': // Pass earlyclobber to LLVM.
+ case '%': // Pass commutative to LLVM.
+ Result += *Constraint++;
+ break;
+ case '#': // No constraint letters left.
+ return Result;
+ }
+ }
+
+ while (*Constraint) {
+ char ConstraintChar = *Constraint++;
+
+ // 'g' is just short-hand for 'imr'.
+ if (ConstraintChar == 'g') {
+ Result += "imr";
+ continue;
+ }
+
+ // Translate 'p' to 'r'. This is supposed to check for a valid memory
+ // address, but for inline assembly there is no way to know the mode of
+ // the data being addressed. Assume that a general register is always
+ // a valid address.
+ if (ConstraintChar == 'p')
+ ConstraintChar = 'r';
+
+ // See if this is a regclass constraint.
+ unsigned RegClass;
+ if (ConstraintChar == 'r')
+ // REG_CLASS_FROM_CONSTRAINT doesn't support 'r' for some reason.
+ RegClass = GENERAL_REGS;
+ else
+ RegClass = REG_CLASS_FROM_CONSTRAINT(Constraint[-1], Constraint-1);
+
+ if (RegClass == NO_REGS) { // not a reg class.
+ Result += ConstraintChar;
+ continue;
+ }
+
+ // Look to see if the specified regclass has exactly one member, and if so,
+ // what it is. Cache this information in AnalyzedRegClasses once computed.
+ static std::map<unsigned, int> AnalyzedRegClasses;
+
+ std::map<unsigned, int>::iterator I =
+ AnalyzedRegClasses.lower_bound(RegClass);
+
+ int RegMember;
+ if (I != AnalyzedRegClasses.end() && I->first == RegClass) {
+ // We've already computed this, reuse value.
+ RegMember = I->second;
+ } else {
+ // Otherwise, scan the regclass, looking for exactly one member.
+ RegMember = -1; // -1 => not a single-register class.
+ for (unsigned j = 0; j != FIRST_PSEUDO_REGISTER; ++j)
+ if (TEST_HARD_REG_BIT(reg_class_contents[RegClass], j)) {
+ if (RegMember == -1) {
+ RegMember = j;
+ } else {
+ RegMember = -1;
+ break;
+ }
+ }
+ // Remember this answer for the next query of this regclass.
+ AnalyzedRegClasses.insert(I, std::make_pair(RegClass, RegMember));
+ }
+
+ // If we found a single register register class, return the register.
+ if (RegMember != -1) {
+ Result += '{';
+ Result += reg_names[RegMember];
+ Result += '}';
+ } else {
+ Result += ConstraintChar;
+ }
+ }
+
+ return Result;
+}
+
+/// See if operand "exp" can use the indicated Constraint (which is
+/// terminated by a null or a comma).
+/// Returns: -1=no, 0=yes but auxiliary instructions needed, 1=yes and free
+int MatchWeight(const char *Constraint, tree Operand, bool isInput) {
+ const char *p = Constraint;
+ int RetVal = 0;
+ // Look for hard register operand. This matches only a constraint of a
+ // register class that includes that hard register, and it matches that
+ // perfectly, so we never return 0 in this case.
+ if (TREE_CODE(Operand) == VAR_DECL && DECL_HARD_REGISTER(Operand)) {
+ int RegNum = decode_reg_name(extractRegisterName(Operand));
+ RetVal = -1;
+ if (RegNum >= 0) {
+ do {
+ unsigned RegClass;
+ if (*p == 'r')
+ RegClass = GENERAL_REGS;
+ else
+ RegClass = REG_CLASS_FROM_CONSTRAINT(*p, p);
+ if (RegClass != NO_REGS &&
+ TEST_HARD_REG_BIT(reg_class_contents[RegClass], RegNum)) {
+ RetVal = 1;
+ break;
+ }
+ ++p;
+ } while (*p != ',' && *p != 0);
+ }
+ }
+ // Look for integer constant operand. This cannot match "m", and "i" is
+ // better than "r". FIXME target-dependent immediate letters are not handled
+ // yet; in general they require looking at the value.
+ if (TREE_CODE(Operand) == INTEGER_CST) {
+ do {
+ RetVal = -1;
+ if (*p == 'i' || *p == 'n') { // integer constant
+ RetVal = 1;
+ break;
+ }
+ if (*p != 'm' && *p != 'o' && *p != 'V') // not memory
+ RetVal = 0;
+ ++p;
+ } while (*p != ',' && *p != 0);
+ }
+ /// TEMPORARY. This has the effect that alternative 0 is always chosen,
+ /// except in the cases handled above.
+ return RetVal;
+}
+
+/// ChooseConstraintTuple: we know each of the NumInputs+NumOutputs strings
+/// in Constraints[] is a comma-separated list of NumChoices different
+/// constraints. Look through the operands and constraint possibilities
+/// and pick a tuple where all the operands match. Replace the strings
+/// in Constraints[] with the shorter strings from that tuple (malloc'ed,
+/// caller is responsible for cleaning it up). Later processing can alter what
+/// Constraints points to, so to make sure we delete everything, the addresses
+/// of everything we allocated also are returned in ReplacementStrings.
+/// Casting back and forth from char* to const char* is Ugly, but we have to
+/// interface with C code that expects const char*.
+///
+/// gcc's algorithm for picking "the best" tuple is quite complicated, and
+/// is performed after things like SROA, not before. At the moment we are
+/// just trying to pick one that will work. This may get refined.
+static void
+ChooseConstraintTuple(const char **Constraints, gimple stmt, tree outputs,
+ tree inputs, unsigned NumOutputs, unsigned NumInputs,
+ unsigned NumChoices, const char **ReplacementStrings)
+{
+ int MaxWeight = -1;
+ unsigned int CommasToSkip = 0;
+ int *Weights = (int *)alloca(NumChoices * sizeof(int));
+ // RunningConstraints is pointers into the Constraints strings which
+ // are incremented as we go to point to the beginning of each
+ // comma-separated alternative.
+ const char** RunningConstraints =
+ (const char**)alloca((NumInputs+NumOutputs)*sizeof(const char*));
+ memcpy(RunningConstraints, Constraints,
+ (NumInputs+NumOutputs) * sizeof(const char*));
+ // The entire point of this loop is to compute CommasToSkip.
+ for (unsigned int i=0; i<NumChoices; i++) {
+ Weights[i] = 0;
+ unsigned int j = 0;
+ for (tree Output = outputs; j<NumOutputs;
+ j++, Output = TREE_CHAIN(Output)) {
+ if (i==0)
+ RunningConstraints[j]++; // skip leading =
+ const char* p = RunningConstraints[j];
+ while (*p=='*' || *p=='&' || *p=='%') // skip modifiers
+ p++;
+ if (Weights[i] != -1) {
+ int w = MatchWeight(p, TREE_VALUE(Output), false);
+ // Nonmatch means the entire tuple doesn't match. However, we
+ // keep scanning to set up RunningConstraints correctly for the
+ // next tuple.
+ if (w < 0)
+ Weights[i] = -1;
+ else
+ Weights[i] += w;
+ }
+ while (*p!=0 && *p!=',')
+ p++;
+ if (*p!=0) {
+ p++; // skip comma
+ while (*p=='*' || *p=='&' || *p=='%')
+ p++; // skip modifiers
+ }
+ RunningConstraints[j] = p;
+ }
+ assert(j==NumOutputs);
+ for (tree Input = inputs; j<NumInputs+NumOutputs;
+ j++, Input = TREE_CHAIN(Input)) {
+ const char* p = RunningConstraints[j];
+ if (Weights[i] != -1) {
+ int w = MatchWeight(p, TREE_VALUE(Input), true);
+ if (w < 0)
+ Weights[i] = -1; // As above.
+ else
+ Weights[i] += w;
+ }
+ while (*p!=0 && *p!=',')
+ p++;
+ if (*p!=0)
+ p++;
+ RunningConstraints[j] = p;
+ }
+ if (Weights[i]>MaxWeight) {
+ CommasToSkip = i;
+ MaxWeight = Weights[i];
+ }
+ }
+ // We have picked an alternative (the CommasToSkip'th one).
+ // Change Constraints to point to malloc'd copies of the appropriate
+ // constraints picked out of the original strings.
+ for (unsigned int i=0; i<NumInputs+NumOutputs; i++) {
+ assert(*(RunningConstraints[i])==0); // sanity check
+ const char* start = Constraints[i];
+ if (i<NumOutputs)
+ start++; // skip '=' or '+'
+ const char* end = start;
+ while (*end != ',' && *end != 0)
+ end++;
+ for (unsigned int j=0; j<CommasToSkip; j++) {
+ start = end+1;
+ end = start;
+ while (*end != ',' && *end != 0)
+ end++;
+ }
+ // String we want is at start..end-1 inclusive.
+ // For outputs, copy the leading = or +.
+ char *newstring;
+ if (i<NumOutputs) {
+ newstring = (char *)xmalloc(end-start+1+1);
+ newstring[0] = *(Constraints[i]);
+ strncpy(newstring+1, start, end-start);
+ newstring[end-start+1] = 0;
+ } else {
+ newstring = (char *)xmalloc(end-start+1);
+ strncpy(newstring, start, end-start);
+ newstring[end-start] = 0;
+ }
+ Constraints[i] = (const char *)newstring;
+ ReplacementStrings[i] = (const char*)newstring;
+ }
+}
+
+static void FreeConstTupleStrings(const char **ReplacementStrings,
+ unsigned int Size) {
+ for (unsigned int i=0; i<Size; i++)
+ free((char *)ReplacementStrings[i]);
+}
+
+// When extracting a register name from a DECL_HARD_REGISTER variable,
+// we normally want to look up RegNum in reg_names. This works on most
+// targets, where ADDITIONAL_REGISTER_NAMES are true synonyms. It does not
+// work on x86, where ADDITIONAL_REGISTER_NAMES are overlapping subregisters;
+// in particular AH and AL can't be distinguished if we go through reg_names.
+static const char* getConstraintRegNameFromGccTables(const char *RegName,
+ unsigned int RegNum) {
+#ifdef LLVM_DO_NOT_USE_REG_NAMES
+ if (*RegName == '%')
+ RegName++;
+ return RegName;
+#else
+ return reg_names[RegNum];
+#endif
+}
+
+
+//===----------------------------------------------------------------------===//
+// ... Helpers for Builtin Function Expansion ...
+//===----------------------------------------------------------------------===//
+
+Value *TreeToLLVM::BuildVector(const std::vector<Value*> &Ops) {
+ assert((Ops.size() & (Ops.size()-1)) == 0 &&
+ "Not a power-of-two sized vector!");
+ bool AllConstants = true;
+ for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
+ AllConstants &= isa<Constant>(Ops[i]);
+
+ // If this is a constant vector, create a ConstantVector.
+ if (AllConstants) {
+ std::vector<Constant*> CstOps;
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
+ CstOps.push_back(cast<Constant>(Ops[i]));
+ return ConstantVector::get(CstOps);
+ }
+
+ // Otherwise, insertelement the values to build the vector.
+ Value *Result =
+ UndefValue::get(VectorType::get(Ops[0]->getType(), Ops.size()));
+
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
+ Result = Builder.CreateInsertElement(Result, Ops[i],
+ ConstantInt::get(Type::getInt32Ty(Context), i));
+
+ return Result;
+}
+
+/// BuildVector - This varargs function builds a literal vector ({} syntax) with
+/// the specified null-terminated list of elements. The elements must be all
+/// the same element type and there must be a power of two of them.
+Value *TreeToLLVM::BuildVector(Value *Elt, ...) {
+ std::vector<Value*> Ops;
+ va_list VA;
+ va_start(VA, Elt);
+
+ Ops.push_back(Elt);
+ while (Value *Arg = va_arg(VA, Value *))
+ Ops.push_back(Arg);
+ va_end(VA);
+
+ return BuildVector(Ops);
+}
+
+/// BuildVectorShuffle - Given two vectors and a variable length list of int
+/// constants, create a shuffle of the elements of the inputs, where each dest
+/// is specified by the indexes. The int constant list must be as long as the
+/// number of elements in the input vector.
+///
+/// Undef values may be specified by passing in -1 as the result value.
+///
+Value *TreeToLLVM::BuildVectorShuffle(Value *InVec1, Value *InVec2, ...) {
+ assert(isa<VectorType>(InVec1->getType()) &&
+ InVec1->getType() == InVec2->getType() && "Invalid shuffle!");
+ unsigned NumElements = cast<VectorType>(InVec1->getType())->getNumElements();
+
+ // Get all the indexes from varargs.
+ std::vector<Constant*> Idxs;
+ va_list VA;
+ va_start(VA, InVec2);
+ for (unsigned i = 0; i != NumElements; ++i) {
+ int idx = va_arg(VA, int);
+ if (idx == -1)
+ Idxs.push_back(UndefValue::get(Type::getInt32Ty(Context)));
+ else {
+ assert((unsigned)idx < 2*NumElements && "Element index out of range!");
+ Idxs.push_back(ConstantInt::get(Type::getInt32Ty(Context), idx));
+ }
+ }
+ va_end(VA);
+
+ // Turn this into the appropriate shuffle operation.
+ return Builder.CreateShuffleVector(InVec1, InVec2,
+ ConstantVector::get(Idxs));
+}
+
+//===----------------------------------------------------------------------===//
+// ... Builtin Function Expansion ...
+//===----------------------------------------------------------------------===//
+
+/// EmitFrontendExpandedBuiltinCall - For MD builtins that do not have a
+/// directly corresponding LLVM intrinsic, we allow the target to do some amount
+/// of lowering. This allows us to avoid having intrinsics for operations that
+/// directly correspond to LLVM constructs.
+///
+/// This method returns true if the builtin is handled, otherwise false.
+///
+bool TreeToLLVM::EmitFrontendExpandedBuiltinCall(gimple stmt, tree fndecl,
+ const MemRef *DestLoc,
+ Value *&Result) {
+#ifdef LLVM_TARGET_INTRINSIC_LOWER
+ // Get the result type and operand line in an easy to consume format.
+ const Type *ResultType = ConvertType(TREE_TYPE(TREE_TYPE(fndecl)));
+ std::vector<Value*> Operands;
+ for (unsigned i = 0, e = gimple_call_num_args(stmt); i != e; ++i) {
+ tree OpVal = gimple_call_arg(stmt, i);
+ if (AGGREGATE_TYPE_P(TREE_TYPE(OpVal))) {
+ MemRef OpLoc = CreateTempLoc(ConvertType(TREE_TYPE(OpVal)));
+ Emit(OpVal, &OpLoc);
+ Operands.push_back(Builder.CreateLoad(OpLoc.Ptr));
+ } else {
+ Operands.push_back(Emit(OpVal, 0));
+ }
+ }
+
+ unsigned FnCode = DECL_FUNCTION_CODE(fndecl);
+ return LLVM_TARGET_INTRINSIC_LOWER(stmt, FnCode, DestLoc, Result, ResultType,
+ Operands);
+#endif
+ return false;
+}
+
+/// TargetBuiltinCache - A cache of builtin intrinsics indexed by the GCC
+/// builtin number.
+static std::vector<Constant*> TargetBuiltinCache;
+
+void clearTargetBuiltinCache() {
+ TargetBuiltinCache.clear();
+}
+
+void TreeToLLVM::EmitMemoryBarrier(bool ll, bool ls, bool sl, bool ss) {
+ Value* C[5];
+ C[0] = ConstantInt::get(Type::getInt1Ty(Context), ll);
+ C[1] = ConstantInt::get(Type::getInt1Ty(Context), ls);
+ C[2] = ConstantInt::get(Type::getInt1Ty(Context), sl);
+ C[3] = ConstantInt::get(Type::getInt1Ty(Context), ss);
+ // Be conservatively safe.
+ C[4] = ConstantInt::get(Type::getInt1Ty(Context), true);
+
+ Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
+ Intrinsic::memory_barrier),
+ C, C + 5);
+}
+
+Value *
+TreeToLLVM::BuildBinaryAtomicBuiltin(gimple stmt, Intrinsic::ID id) {
+ const Type *ResultTy = ConvertType(gimple_call_return_type(stmt));
+ Value* C[2] = {
+ Emit(gimple_call_arg(stmt, 0), 0),
+ Emit(gimple_call_arg(stmt, 1), 0)
+ };
+ const Type* Ty[2];
+ Ty[0] = ResultTy;
+ Ty[1] = ResultTy->getPointerTo();
+ C[0] = Builder.CreateBitCast(C[0], Ty[1]);
+ C[1] = Builder.CreateIntCast(C[1], Ty[0], "cast");
+ // The gcc builtins are also full memory barriers.
+ // FIXME: __sync_lock_test_and_set and __sync_lock_release require less.
+ EmitMemoryBarrier(true, true, true, true);
+ Value *Result =
+ Builder.CreateCall(Intrinsic::getDeclaration(TheModule, id,
+ Ty, 2),
+ C, C + 2);
+ Result = Builder.CreateIntToPtr(Result, ResultTy);
+ return Result;
+}
+
+Value *
+TreeToLLVM::BuildCmpAndSwapAtomicBuiltin(gimple stmt, tree type, bool isBool) {
+ const Type *ResultTy = ConvertType(type);
+ Value* C[3] = {
+ Emit(gimple_call_arg(stmt, 0), 0),
+ Emit(gimple_call_arg(stmt, 1), 0),
+ Emit(gimple_call_arg(stmt, 2), 0)
+ };
+ const Type* Ty[2];
+ Ty[0] = ResultTy;
+ Ty[1] = ResultTy->getPointerTo();
+ C[0] = Builder.CreateBitCast(C[0], Ty[1]);
+ C[1] = Builder.CreateIntCast(C[1], Ty[0], "cast");
+ C[2] = Builder.CreateIntCast(C[2], Ty[0], "cast");
+
+ // The gcc builtins are also full memory barriers.
+ // FIXME: __sync_lock_test_and_set and __sync_lock_release require less.
+ EmitMemoryBarrier(true, true, true, true);
+
+ Value *Result =
+ Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
+ Intrinsic::atomic_cmp_swap,
+ Ty, 2),
+ C, C + 3);
+
+ // The gcc builtins are also full memory barriers.
+ // FIXME: __sync_lock_test_and_set and __sync_lock_release require less.
+ EmitMemoryBarrier(true, true, true, true);
+
+ if (isBool)
+ Result = CastToUIntType(Builder.CreateICmpEQ(Result, C[1]),
+ ConvertType(boolean_type_node));
+ else
+ Result = Builder.CreateIntToPtr(Result, ResultTy);
+ return Result;
+}
+
+/// EmitBuiltinCall - stmt is a call to fndecl, a builtin function. Try to emit
+/// the call in a special way, setting Result to the scalar result if necessary.
+/// If we can't handle the builtin, return false, otherwise return true.
+bool TreeToLLVM::EmitBuiltinCall(gimple stmt, tree fndecl,
+ const MemRef *DestLoc, Value *&Result) {
+ if (DECL_BUILT_IN_CLASS(fndecl) == BUILT_IN_MD) {
+ unsigned FnCode = DECL_FUNCTION_CODE(fndecl);
+ if (TargetBuiltinCache.size() <= FnCode)
+ TargetBuiltinCache.resize(FnCode+1);
+
+ // If we haven't converted this intrinsic over yet, do so now.
+ if (TargetBuiltinCache[FnCode] == 0) {
+ const char *TargetPrefix = "";
+#ifdef LLVM_TARGET_INTRINSIC_PREFIX
+ TargetPrefix = LLVM_TARGET_INTRINSIC_PREFIX;
+#endif
+ // If this builtin directly corresponds to an LLVM intrinsic, get the
+ // IntrinsicID now.
+ const char *BuiltinName = IDENTIFIER_POINTER(DECL_NAME(fndecl));
+ Intrinsic::ID IntrinsicID =
+ Intrinsic::getIntrinsicForGCCBuiltin(TargetPrefix, BuiltinName);
+ if (IntrinsicID == Intrinsic::not_intrinsic) {
+ if (EmitFrontendExpandedBuiltinCall(stmt, fndecl, DestLoc, Result))
+ return true;
+
+ error_at(gimple_location(stmt),
+ "unsupported target builtin %<%s%> used", BuiltinName);
+ const Type *ResTy = ConvertType(gimple_call_return_type(stmt));
+ if (ResTy->isSingleValueType())
+ Result = UndefValue::get(ResTy);
+ return true;
+ }
+
+ // Finally, map the intrinsic ID back to a name.
+ TargetBuiltinCache[FnCode] =
+ Intrinsic::getDeclaration(TheModule, IntrinsicID);
+ }
+
+ Result = EmitCallOf(TargetBuiltinCache[FnCode], stmt, DestLoc,
+ AttrListPtr());
+ return true;
+ }
+
+ enum built_in_function fcode = DECL_FUNCTION_CODE(fndecl);
+ switch (fcode) {
+ default: return false;
+ // Varargs builtins.
+ case BUILT_IN_VA_START: return EmitBuiltinVAStart(stmt);
+ case BUILT_IN_VA_END: return EmitBuiltinVAEnd(stmt);
+ case BUILT_IN_VA_COPY: return EmitBuiltinVACopy(stmt);
+ case BUILT_IN_CONSTANT_P: return EmitBuiltinConstantP(stmt, Result);
+ case BUILT_IN_ALLOCA: return EmitBuiltinAlloca(stmt, Result);
+ case BUILT_IN_EXTEND_POINTER: return EmitBuiltinExtendPointer(stmt, Result);
+ case BUILT_IN_EXPECT: return EmitBuiltinExpect(stmt, DestLoc, Result);
+ case BUILT_IN_MEMCPY: return EmitBuiltinMemCopy(stmt, Result,
+ false, false);
+ case BUILT_IN_MEMCPY_CHK: return EmitBuiltinMemCopy(stmt, Result,
+ false, true);
+ case BUILT_IN_MEMMOVE: return EmitBuiltinMemCopy(stmt, Result,
+ true, false);
+ case BUILT_IN_MEMMOVE_CHK: return EmitBuiltinMemCopy(stmt, Result,
+ true, true);
+ case BUILT_IN_MEMSET: return EmitBuiltinMemSet(stmt, Result, false);
+ case BUILT_IN_MEMSET_CHK: return EmitBuiltinMemSet(stmt, Result, true);
+ case BUILT_IN_BZERO: return EmitBuiltinBZero(stmt, Result);
+ case BUILT_IN_PREFETCH: return EmitBuiltinPrefetch(stmt);
+ case BUILT_IN_FRAME_ADDRESS: return EmitBuiltinReturnAddr(stmt, Result,true);
+ case BUILT_IN_RETURN_ADDRESS:
+ return EmitBuiltinReturnAddr(stmt, Result,false);
+ case BUILT_IN_STACK_SAVE: return EmitBuiltinStackSave(stmt, Result);
+ case BUILT_IN_STACK_RESTORE: return EmitBuiltinStackRestore(stmt);
+ case BUILT_IN_EXTRACT_RETURN_ADDR:
+ return EmitBuiltinExtractReturnAddr(stmt, Result);
+ case BUILT_IN_FROB_RETURN_ADDR:
+ return EmitBuiltinFrobReturnAddr(stmt, Result);
+ case BUILT_IN_INIT_TRAMPOLINE:
+ return EmitBuiltinInitTrampoline(stmt, Result);
+
+ // Builtins used by the exception handling runtime.
+ case BUILT_IN_DWARF_CFA:
+ return EmitBuiltinDwarfCFA(stmt, Result);
+#ifdef DWARF2_UNWIND_INFO
+ case BUILT_IN_DWARF_SP_COLUMN:
+ return EmitBuiltinDwarfSPColumn(stmt, Result);
+ case BUILT_IN_INIT_DWARF_REG_SIZES:
+ return EmitBuiltinInitDwarfRegSizes(stmt, Result);
+#endif
+ case BUILT_IN_EH_RETURN:
+ return EmitBuiltinEHReturn(stmt, Result);
+#ifdef EH_RETURN_DATA_REGNO
+ case BUILT_IN_EH_RETURN_DATA_REGNO:
+ return EmitBuiltinEHReturnDataRegno(stmt, Result);
+#endif
+ case BUILT_IN_UNWIND_INIT:
+ return EmitBuiltinUnwindInit(stmt, Result);
+
+ case BUILT_IN_OBJECT_SIZE: {
+ if (!validate_gimple_arglist(stmt, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) {
+ error("Invalid builtin_object_size argument types");
+ return false;
+ }
+ tree ObjSizeTree = gimple_call_arg(stmt, 1);
+ STRIP_NOPS (ObjSizeTree);
+ if (TREE_CODE (ObjSizeTree) != INTEGER_CST
+ || tree_int_cst_sgn (ObjSizeTree) < 0
+ || compare_tree_int (ObjSizeTree, 3) > 0) {
+ error("Invalid second builtin_object_size argument");
+ return false;
+ }
+
+ // This treats everything as unknown, and is minimally defensible as
+ // correct, although completely useless.
+ if (tree_low_cst (ObjSizeTree, 0) < 2)
+ Result = Constant::getAllOnesValue(TD.getIntPtrType(Context));
+ else
+ Result = ConstantInt::get(TD.getIntPtrType(Context), 0);
+ return true;
+ }
+ // Unary bit counting intrinsics.
+ // NOTE: do not merge these case statements. That will cause the memoized
+ // Function* to be incorrectly shared across the different typed functions.
+ case BUILT_IN_CLZ: // These GCC builtins always return int.
+ case BUILT_IN_CLZL:
+ case BUILT_IN_CLZLL: {
+ Value *Amt = Emit(gimple_call_arg(stmt, 0), 0);
+ EmitBuiltinUnaryOp(Amt, Result, Intrinsic::ctlz);
+ const Type *DestTy = ConvertType(gimple_call_return_type(stmt));
+ Result = Builder.CreateIntCast(Result, DestTy, "cast");
+ return true;
+ }
+ case BUILT_IN_CTZ: // These GCC builtins always return int.
+ case BUILT_IN_CTZL:
+ case BUILT_IN_CTZLL: {
+ Value *Amt = Emit(gimple_call_arg(stmt, 0), 0);
+ EmitBuiltinUnaryOp(Amt, Result, Intrinsic::cttz);
+ const Type *DestTy = ConvertType(gimple_call_return_type(stmt));
+ Result = Builder.CreateIntCast(Result, DestTy, "cast");
+ return true;
+ }
+ case BUILT_IN_PARITYLL:
+ case BUILT_IN_PARITYL:
+ case BUILT_IN_PARITY: {
+ Value *Amt = Emit(gimple_call_arg(stmt, 0), 0);
+ EmitBuiltinUnaryOp(Amt, Result, Intrinsic::ctpop);
+ Result = Builder.CreateBinOp(Instruction::And, Result,
+ ConstantInt::get(Result->getType(), 1));
+ return true;
+ }
+ case BUILT_IN_POPCOUNT: // These GCC builtins always return int.
+ case BUILT_IN_POPCOUNTL:
+ case BUILT_IN_POPCOUNTLL: {
+ Value *Amt = Emit(gimple_call_arg(stmt, 0), 0);
+ EmitBuiltinUnaryOp(Amt, Result, Intrinsic::ctpop);
+ const Type *DestTy = ConvertType(gimple_call_return_type(stmt));
+ Result = Builder.CreateIntCast(Result, DestTy, "cast");
+ return true;
+ }
+ case BUILT_IN_BSWAP32:
+ case BUILT_IN_BSWAP64: {
+ Value *Amt = Emit(gimple_call_arg(stmt, 0), 0);
+ EmitBuiltinUnaryOp(Amt, Result, Intrinsic::bswap);
+ const Type *DestTy = ConvertType(gimple_call_return_type(stmt));
+ Result = Builder.CreateIntCast(Result, DestTy, "cast");
+ return true;
+ }
+
+ case BUILT_IN_SQRT:
+ case BUILT_IN_SQRTF:
+ case BUILT_IN_SQRTL:
+ // If errno math has been disabled, expand these to llvm.sqrt calls.
+ if (!flag_errno_math) {
+ Result = EmitBuiltinSQRT(stmt);
+ Result = CastToFPType(Result, ConvertType(gimple_call_return_type(stmt)));
+ return true;
+ }
+ break;
+ case BUILT_IN_POWI:
+ case BUILT_IN_POWIF:
+ case BUILT_IN_POWIL:
+ Result = EmitBuiltinPOWI(stmt);
+ return true;
+ case BUILT_IN_POW:
+ case BUILT_IN_POWF:
+ case BUILT_IN_POWL:
+ // If errno math has been disabled, expand these to llvm.pow calls.
+ if (!flag_errno_math) {
+ Result = EmitBuiltinPOW(stmt);
+ return true;
+ }
+ break;
+ case BUILT_IN_LOG:
+ case BUILT_IN_LOGF:
+ case BUILT_IN_LOGL:
+ // If errno math has been disabled, expand these to llvm.log calls.
+ if (!flag_errno_math) {
+ Value *Amt = Emit(gimple_call_arg(stmt, 0), 0);
+ EmitBuiltinUnaryOp(Amt, Result, Intrinsic::log);
+ Result = CastToFPType(Result, ConvertType(gimple_call_return_type(stmt)));
+ return true;
+ }
+ break;
+ case BUILT_IN_LOG2:
+ case BUILT_IN_LOG2F:
+ case BUILT_IN_LOG2L:
+ // If errno math has been disabled, expand these to llvm.log2 calls.
+ if (!flag_errno_math) {
+ Value *Amt = Emit(gimple_call_arg(stmt, 0), 0);
+ EmitBuiltinUnaryOp(Amt, Result, Intrinsic::log2);
+ Result = CastToFPType(Result, ConvertType(gimple_call_return_type(stmt)));
+ return true;
+ }
+ break;
+ case BUILT_IN_LOG10:
+ case BUILT_IN_LOG10F:
+ case BUILT_IN_LOG10L:
+ // If errno math has been disabled, expand these to llvm.log10 calls.
+ if (!flag_errno_math) {
+ Value *Amt = Emit(gimple_call_arg(stmt, 0), 0);
+ EmitBuiltinUnaryOp(Amt, Result, Intrinsic::log10);
+ Result = CastToFPType(Result, ConvertType(gimple_call_return_type(stmt)));
+ return true;
+ }
+ break;
+ case BUILT_IN_EXP:
+ case BUILT_IN_EXPF:
+ case BUILT_IN_EXPL:
+ // If errno math has been disabled, expand these to llvm.exp calls.
+ if (!flag_errno_math) {
+ Value *Amt = Emit(gimple_call_arg(stmt, 0), 0);
+ EmitBuiltinUnaryOp(Amt, Result, Intrinsic::exp);
+ Result = CastToFPType(Result, ConvertType(gimple_call_return_type(stmt)));
+ return true;
+ }
+ break;
+ case BUILT_IN_EXP2:
+ case BUILT_IN_EXP2F:
+ case BUILT_IN_EXP2L:
+ // If errno math has been disabled, expand these to llvm.exp2 calls.
+ if (!flag_errno_math) {
+ Value *Amt = Emit(gimple_call_arg(stmt, 0), 0);
+ EmitBuiltinUnaryOp(Amt, Result, Intrinsic::exp2);
+ Result = CastToFPType(Result, ConvertType(gimple_call_return_type(stmt)));
+ return true;
+ }
+ break;
+ case BUILT_IN_FFS: // These GCC builtins always return int.
+ case BUILT_IN_FFSL:
+ case BUILT_IN_FFSLL: { // FFS(X) -> (x == 0 ? 0 : CTTZ(x)+1)
+ // The argument and return type of cttz should match the argument type of
+ // the ffs, but should ignore the return type of ffs.
+ Value *Amt = Emit(gimple_call_arg(stmt, 0), 0);
+ EmitBuiltinUnaryOp(Amt, Result, Intrinsic::cttz);
+ Result = Builder.CreateAdd(Result,
+ ConstantInt::get(Result->getType(), 1));
+ Result = CastToUIntType(Result, ConvertType(gimple_call_return_type(stmt)));
+ Value *Cond =
+ Builder.CreateICmpEQ(Amt,
+ Constant::getNullValue(Amt->getType()));
+ Result = Builder.CreateSelect(Cond,
+ Constant::getNullValue(Result->getType()),
+ Result);
+ return true;
+ }
+//TODO case BUILT_IN_FLT_ROUNDS: {
+//TODO Result =
+//TODO Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
+//TODO Intrinsic::flt_rounds));
+//TODO Result = Builder.CreateBitCast(Result, ConvertType(gimple_call_return_type(stmt)));
+//TODO return true;
+//TODO }
+ case BUILT_IN_TRAP:
+ Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::trap));
+ // Emit an explicit unreachable instruction.
+ Builder.CreateUnreachable();
+ EmitBlock(BasicBlock::Create(Context));
+ return true;
+
+//TODO // Convert annotation built-in to llvm.annotation intrinsic.
+//TODO case BUILT_IN_ANNOTATION: {
+//TODO
+//TODO // Get file and line number
+//TODO location_t locus = gimple_location(stmt);
+//TODO Constant *lineNo = ConstantInt::get(Type::getInt32Ty, LOCATION_LINE(locus));
+//TODO Constant *file = ConvertMetadataStringToGV(LOCATION_FILE(locus));
+//TODO const Type *SBP= Type::getInt8PtrTy(Context);
+//TODO file = Builder.getFolder().CreateBitCast(file, SBP);
+//TODO
+//TODO // Get arguments.
+//TODO tree arglist = CALL_EXPR_ARGS(stmt);
+//TODO Value *ExprVal = Emit(gimple_call_arg(stmt, 0), 0);
+//TODO const Type *Ty = ExprVal->getType();
+//TODO Value *StrVal = Emit(gimple_call_arg(stmt, 1), 0);
+//TODO
+//TODO SmallVector<Value *, 4> Args;
+//TODO Args.push_back(ExprVal);
+//TODO Args.push_back(StrVal);
+//TODO Args.push_back(file);
+//TODO Args.push_back(lineNo);
+//TODO
+//TODO assert(Ty && "llvm.annotation arg type may not be null");
+//TODO Result = Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
+//TODO Intrinsic::annotation,
+//TODO &Ty,
+//TODO 1),
+//TODO Args.begin(), Args.end());
+//TODO return true;
+//TODO }
+
+ case BUILT_IN_SYNCHRONIZE: {
+ // We assume like gcc appears to, that this only applies to cached memory.
+ Value* C[5];
+ C[0] = C[1] = C[2] = C[3] = ConstantInt::get(Type::getInt1Ty(Context), 1);
+ C[4] = ConstantInt::get(Type::getInt1Ty(Context), 0);
+
+ Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
+ Intrinsic::memory_barrier),
+ C, C + 5);
+ return true;
+ }
+#if defined(TARGET_ALPHA) || defined(TARGET_386) || defined(TARGET_POWERPC)
+ // gcc uses many names for the sync intrinsics
+ // The type of the first argument is not reliable for choosing the
+ // right llvm function; if the original type is not volatile, gcc has
+ // helpfully changed it to "volatile void *" at this point. The
+ // original type can be recovered from the function type in most cases.
+ // For lock_release and bool_compare_and_swap even that is not good
+ // enough, we have to key off the opcode.
+ // Note that Intrinsic::getDeclaration expects the type list in reversed
+ // order, while CreateCall expects the parameter list in normal order.
+ case BUILT_IN_BOOL_COMPARE_AND_SWAP_1: {
+ Result = BuildCmpAndSwapAtomicBuiltin(stmt, unsigned_char_type_node, true);
+ return true;
+ }
+ case BUILT_IN_BOOL_COMPARE_AND_SWAP_2: {
+ Result = BuildCmpAndSwapAtomicBuiltin(stmt, short_unsigned_type_node, true);
+ return true;
+ }
+ case BUILT_IN_BOOL_COMPARE_AND_SWAP_4: {
+ Result = BuildCmpAndSwapAtomicBuiltin(stmt, unsigned_type_node, true);
+ return true;
+ }
+ case BUILT_IN_BOOL_COMPARE_AND_SWAP_8: {
+#if defined(TARGET_POWERPC)
+ if (!TARGET_64BIT)
+ return false;
+#endif
+ Result = BuildCmpAndSwapAtomicBuiltin(stmt, long_long_unsigned_type_node,
+ true);
+ return true;
+ }
+
+ case BUILT_IN_VAL_COMPARE_AND_SWAP_8:
+#if defined(TARGET_POWERPC)
+ if (!TARGET_64BIT)
+ return false;
+#endif
+ case BUILT_IN_VAL_COMPARE_AND_SWAP_1:
+ case BUILT_IN_VAL_COMPARE_AND_SWAP_2:
+ case BUILT_IN_VAL_COMPARE_AND_SWAP_4: {
+ tree type = gimple_call_return_type(stmt);
+ Result = BuildCmpAndSwapAtomicBuiltin(stmt, type, false);
+ return true;
+ }
+ case BUILT_IN_FETCH_AND_ADD_8:
+#if defined(TARGET_POWERPC)
+ if (!TARGET_64BIT)
+ return false;
+#endif
+ case BUILT_IN_FETCH_AND_ADD_1:
+ case BUILT_IN_FETCH_AND_ADD_2:
+ case BUILT_IN_FETCH_AND_ADD_4: {
+ Result = BuildBinaryAtomicBuiltin(stmt, Intrinsic::atomic_load_add);
+ return true;
+ }
+ case BUILT_IN_FETCH_AND_SUB_8:
+#if defined(TARGET_POWERPC)
+ if (!TARGET_64BIT)
+ return false;
+#endif
+ case BUILT_IN_FETCH_AND_SUB_1:
+ case BUILT_IN_FETCH_AND_SUB_2:
+ case BUILT_IN_FETCH_AND_SUB_4: {
+ Result = BuildBinaryAtomicBuiltin(stmt, Intrinsic::atomic_load_sub);
+ return true;
+ }
+ case BUILT_IN_FETCH_AND_OR_8:
+#if defined(TARGET_POWERPC)
+ if (!TARGET_64BIT)
+ return false;
+#endif
+ case BUILT_IN_FETCH_AND_OR_1:
+ case BUILT_IN_FETCH_AND_OR_2:
+ case BUILT_IN_FETCH_AND_OR_4: {
+ Result = BuildBinaryAtomicBuiltin(stmt, Intrinsic::atomic_load_or);
+ return true;
+ }
+ case BUILT_IN_FETCH_AND_AND_8:
+#if defined(TARGET_POWERPC)
+ if (!TARGET_64BIT)
+ return false;
+#endif
+ case BUILT_IN_FETCH_AND_AND_1:
+ case BUILT_IN_FETCH_AND_AND_2:
+ case BUILT_IN_FETCH_AND_AND_4: {
+ Result = BuildBinaryAtomicBuiltin(stmt, Intrinsic::atomic_load_and);
+ return true;
+ }
+ case BUILT_IN_FETCH_AND_XOR_8:
+#if defined(TARGET_POWERPC)
+ if (!TARGET_64BIT)
+ return false;
+#endif
+ case BUILT_IN_FETCH_AND_XOR_1:
+ case BUILT_IN_FETCH_AND_XOR_2:
+ case BUILT_IN_FETCH_AND_XOR_4: {
+ Result = BuildBinaryAtomicBuiltin(stmt, Intrinsic::atomic_load_xor);
+ return true;
+ }
+ case BUILT_IN_FETCH_AND_NAND_8:
+#if defined(TARGET_POWERPC)
+ if (!TARGET_64BIT)
+ return false;
+#endif
+ case BUILT_IN_FETCH_AND_NAND_1:
+ case BUILT_IN_FETCH_AND_NAND_2:
+ case BUILT_IN_FETCH_AND_NAND_4: {
+ Result = BuildBinaryAtomicBuiltin(stmt, Intrinsic::atomic_load_nand);
+ return true;
+ }
+ case BUILT_IN_LOCK_TEST_AND_SET_8:
+#if defined(TARGET_POWERPC)
+ if (!TARGET_64BIT)
+ return false;
+#endif
+ case BUILT_IN_LOCK_TEST_AND_SET_1:
+ case BUILT_IN_LOCK_TEST_AND_SET_2:
+ case BUILT_IN_LOCK_TEST_AND_SET_4: {
+ Result = BuildBinaryAtomicBuiltin(stmt, Intrinsic::atomic_swap);
+ return true;
+ }
+
+ case BUILT_IN_ADD_AND_FETCH_8:
+#if defined(TARGET_POWERPC)
+ if (!TARGET_64BIT)
+ return false;
+#endif
+ case BUILT_IN_ADD_AND_FETCH_1:
+ case BUILT_IN_ADD_AND_FETCH_2:
+ case BUILT_IN_ADD_AND_FETCH_4: {
+ const Type *ResultTy = ConvertType(gimple_call_return_type(stmt));
+ Value* C[2] = {
+ Emit(gimple_call_arg(stmt, 0), 0),
+ Emit(gimple_call_arg(stmt, 1), 0)
+ };
+ const Type* Ty[2];
+ Ty[0] = ResultTy;
+ Ty[1] = ResultTy->getPointerTo();
+ C[0] = Builder.CreateBitCast(C[0], Ty[1]);
+ C[1] = Builder.CreateIntCast(C[1], Ty[0], "cast");
+
+ // The gcc builtins are also full memory barriers.
+ // FIXME: __sync_lock_test_and_set and __sync_lock_release require less.
+ EmitMemoryBarrier(true, true, true, true);
+
+ Result =
+ Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
+ Intrinsic::atomic_load_add,
+ Ty, 2),
+ C, C + 2);
+
+ // The gcc builtins are also full memory barriers.
+ // FIXME: __sync_lock_test_and_set and __sync_lock_release require less.
+ EmitMemoryBarrier(true, true, true, true);
+
+ Result = Builder.CreateAdd(Result, C[1]);
+ Result = Builder.CreateIntToPtr(Result, ResultTy);
+ return true;
+ }
+ case BUILT_IN_SUB_AND_FETCH_8:
+#if defined(TARGET_POWERPC)
+ if (!TARGET_64BIT)
+ return false;
+#endif
+ case BUILT_IN_SUB_AND_FETCH_1:
+ case BUILT_IN_SUB_AND_FETCH_2:
+ case BUILT_IN_SUB_AND_FETCH_4: {
+ const Type *ResultTy = ConvertType(gimple_call_return_type(stmt));
+ Value* C[2] = {
+ Emit(gimple_call_arg(stmt, 0), 0),
+ Emit(gimple_call_arg(stmt, 1), 0)
+ };
+ const Type* Ty[2];
+ Ty[0] = ResultTy;
+ Ty[1] = ResultTy->getPointerTo();
+ C[0] = Builder.CreateBitCast(C[0], Ty[1]);
+ C[1] = Builder.CreateIntCast(C[1], Ty[0], "cast");
+
+ // The gcc builtins are also full memory barriers.
+ // FIXME: __sync_lock_test_and_set and __sync_lock_release require less.
+ EmitMemoryBarrier(true, true, true, true);
+
+ Result =
+ Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
+ Intrinsic::atomic_load_sub,
+ Ty, 2),
+ C, C + 2);
+
+ // The gcc builtins are also full memory barriers.
+ // FIXME: __sync_lock_test_and_set and __sync_lock_release require less.
+ EmitMemoryBarrier(true, true, true, true);
+
+ Result = Builder.CreateSub(Result, C[1]);
+ Result = Builder.CreateIntToPtr(Result, ResultTy);
+ return true;
+ }
+ case BUILT_IN_OR_AND_FETCH_8:
+#if defined(TARGET_POWERPC)
+ if (!TARGET_64BIT)
+ return false;
+#endif
+ case BUILT_IN_OR_AND_FETCH_1:
+ case BUILT_IN_OR_AND_FETCH_2:
+ case BUILT_IN_OR_AND_FETCH_4: {
+ const Type *ResultTy = ConvertType(gimple_call_return_type(stmt));
+ Value* C[2] = {
+ Emit(gimple_call_arg(stmt, 0), 0),
+ Emit(gimple_call_arg(stmt, 1), 0)
+ };
+ const Type* Ty[2];
+ Ty[0] = ResultTy;
+ Ty[1] = ResultTy->getPointerTo();
+ C[0] = Builder.CreateBitCast(C[0], Ty[1]);
+ C[1] = Builder.CreateIntCast(C[1], Ty[0], "cast");
+
+ // The gcc builtins are also full memory barriers.
+ // FIXME: __sync_lock_test_and_set and __sync_lock_release require less.
+ EmitMemoryBarrier(true, true, true, true);
+
+ Result =
+ Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
+ Intrinsic::atomic_load_or,
+ Ty, 2),
+ C, C + 2);
+
+ // The gcc builtins are also full memory barriers.
+ // FIXME: __sync_lock_test_and_set and __sync_lock_release require less.
+ EmitMemoryBarrier(true, true, true, true);
+
+ Result = Builder.CreateOr(Result, C[1]);
+ Result = Builder.CreateIntToPtr(Result, ResultTy);
+ return true;
+ }
+ case BUILT_IN_AND_AND_FETCH_8:
+#if defined(TARGET_POWERPC)
+ if (!TARGET_64BIT)
+ return false;
+#endif
+ case BUILT_IN_AND_AND_FETCH_1:
+ case BUILT_IN_AND_AND_FETCH_2:
+ case BUILT_IN_AND_AND_FETCH_4: {
+ const Type *ResultTy = ConvertType(gimple_call_return_type(stmt));
+ Value* C[2] = {
+ Emit(gimple_call_arg(stmt, 0), 0),
+ Emit(gimple_call_arg(stmt, 1), 0)
+ };
+ const Type* Ty[2];
+ Ty[0] = ResultTy;
+ Ty[1] = ResultTy->getPointerTo();
+ C[0] = Builder.CreateBitCast(C[0], Ty[1]);
+ C[1] = Builder.CreateIntCast(C[1], Ty[0], "cast");
+
+ // The gcc builtins are also full memory barriers.
+ // FIXME: __sync_lock_test_and_set and __sync_lock_release require less.
+ EmitMemoryBarrier(true, true, true, true);
+
+ Result =
+ Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
+ Intrinsic::atomic_load_and,
+ Ty, 2),
+ C, C + 2);
+
+ // The gcc builtins are also full memory barriers.
+ // FIXME: __sync_lock_test_and_set and __sync_lock_release require less.
+ EmitMemoryBarrier(true, true, true, true);
+
+ Result = Builder.CreateAnd(Result, C[1]);
+ Result = Builder.CreateIntToPtr(Result, ResultTy);
+ return true;
+ }
+ case BUILT_IN_XOR_AND_FETCH_8:
+#if defined(TARGET_POWERPC)
+ if (!TARGET_64BIT)
+ return false;
+#endif
+ case BUILT_IN_XOR_AND_FETCH_1:
+ case BUILT_IN_XOR_AND_FETCH_2:
+ case BUILT_IN_XOR_AND_FETCH_4: {
+ const Type *ResultTy = ConvertType(gimple_call_return_type(stmt));
+ Value* C[2] = {
+ Emit(gimple_call_arg(stmt, 0), 0),
+ Emit(gimple_call_arg(stmt, 1), 0)
+ };
+ const Type* Ty[2];
+ Ty[0] = ResultTy;
+ Ty[1] = ResultTy->getPointerTo();
+ C[0] = Builder.CreateBitCast(C[0], Ty[1]);
+ C[1] = Builder.CreateIntCast(C[1], Ty[0], "cast");
+
+ // The gcc builtins are also full memory barriers.
+ // FIXME: __sync_lock_test_and_set and __sync_lock_release require less.
+ EmitMemoryBarrier(true, true, true, true);
+
+ Result =
+ Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
+ Intrinsic::atomic_load_xor,
+ Ty, 2),
+ C, C + 2);
+
+ // The gcc builtins are also full memory barriers.
+ // FIXME: __sync_lock_test_and_set and __sync_lock_release require less.
+ EmitMemoryBarrier(true, true, true, true);
+
+ Result = Builder.CreateXor(Result, C[1]);
+ Result = Builder.CreateIntToPtr(Result, ResultTy);
+ return true;
+ }
+ case BUILT_IN_NAND_AND_FETCH_8:
+#if defined(TARGET_POWERPC)
+ if (!TARGET_64BIT)
+ return false;
+#endif
+ case BUILT_IN_NAND_AND_FETCH_1:
+ case BUILT_IN_NAND_AND_FETCH_2:
+ case BUILT_IN_NAND_AND_FETCH_4: {
+ const Type *ResultTy = ConvertType(gimple_call_return_type(stmt));
+ Value* C[2] = {
+ Emit(gimple_call_arg(stmt, 0), 0),
+ Emit(gimple_call_arg(stmt, 1), 0)
+ };
+ const Type* Ty[2];
+ Ty[0] = ResultTy;
+ Ty[1] = ResultTy->getPointerTo();
+ C[0] = Builder.CreateBitCast(C[0], Ty[1]);
+ C[1] = Builder.CreateIntCast(C[1], Ty[0], "cast");
+
+ // The gcc builtins are also full memory barriers.
+ // FIXME: __sync_lock_test_and_set and __sync_lock_release require less.
+ EmitMemoryBarrier(true, true, true, true);
+
+ Result =
+ Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
+ Intrinsic::atomic_load_nand,
+ Ty, 2),
+ C, C + 2);
+
+ // The gcc builtins are also full memory barriers.
+ // FIXME: __sync_lock_test_and_set and __sync_lock_release require less.
+ EmitMemoryBarrier(true, true, true, true);
+
+ Result = Builder.CreateAnd(Builder.CreateNot(Result), C[1]);
+ Result = Builder.CreateIntToPtr(Result, ResultTy);
+ return true;
+ }
+
+ case BUILT_IN_LOCK_RELEASE_1:
+ case BUILT_IN_LOCK_RELEASE_2:
+ case BUILT_IN_LOCK_RELEASE_4:
+ case BUILT_IN_LOCK_RELEASE_8:
+ case BUILT_IN_LOCK_RELEASE_16: {
+ // This is effectively a volatile store of 0, and has no return value.
+ // The argument has typically been coerced to "volatile void*"; the
+ // only way to find the size of the operation is from the builtin
+ // opcode.
+ const Type *Ty;
+ switch(DECL_FUNCTION_CODE(fndecl)) {
+ case BUILT_IN_LOCK_RELEASE_16: // not handled; should use SSE on x86
+ default:
+ abort();
+ case BUILT_IN_LOCK_RELEASE_1:
+ Ty = Type::getInt8Ty(Context); break;
+ case BUILT_IN_LOCK_RELEASE_2:
+ Ty = Type::getInt16Ty(Context); break;
+ case BUILT_IN_LOCK_RELEASE_4:
+ Ty = Type::getInt32Ty(Context); break;
+ case BUILT_IN_LOCK_RELEASE_8:
+ Ty = Type::getInt64Ty(Context); break;
+ }
+ Value *Ptr = Emit(gimple_call_arg(stmt, 0), 0);
+ Ptr = Builder.CreateBitCast(Ptr, Ty->getPointerTo());
+ Builder.CreateStore(Constant::getNullValue(Ty), Ptr, true);
+ Result = 0;
+ return true;
+ }
+
+#endif //FIXME: these break the build for backends that haven't implemented them
+
+
+#if 1 // FIXME: Should handle these GCC extensions eventually.
+ case BUILT_IN_LONGJMP: {
+ if (validate_gimple_arglist(stmt, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) {
+ tree value = gimple_call_arg(stmt, 1);
+
+ if (TREE_CODE(value) != INTEGER_CST ||
+ cast<ConstantInt>(Emit(value, 0))->getValue() != 1) {
+ error ("%<__builtin_longjmp%> second argument must be 1");
+ return false;
+ }
+ }
+ }
+ case BUILT_IN_APPLY_ARGS:
+ case BUILT_IN_APPLY:
+ case BUILT_IN_RETURN:
+ case BUILT_IN_SAVEREGS:
+ case BUILT_IN_ARGS_INFO:
+ case BUILT_IN_NEXT_ARG:
+ case BUILT_IN_CLASSIFY_TYPE:
+ case BUILT_IN_AGGREGATE_INCOMING_ADDRESS:
+ case BUILT_IN_SETJMP_SETUP:
+ case BUILT_IN_SETJMP_DISPATCHER:
+ case BUILT_IN_SETJMP_RECEIVER:
+ case BUILT_IN_UPDATE_SETJMP_BUF:
+
+ // FIXME: HACK: Just ignore these.
+ {
+ const Type *Ty = ConvertType(gimple_call_return_type(stmt));
+ if (!Ty->isVoidTy())
+ Result = Constant::getNullValue(Ty);
+ return true;
+ }
+#endif // FIXME: Should handle these GCC extensions eventually.
+ }
+ return false;
+}
+
+bool TreeToLLVM::EmitBuiltinUnaryOp(Value *InVal, Value *&Result,
+ Intrinsic::ID Id) {
+ // The intrinsic might be overloaded in which case the argument is of
+ // varying type. Make sure that we specify the actual type for "iAny"
+ // by passing it as the 3rd and 4th parameters. This isn't needed for
+ // most intrinsics, but is needed for ctpop, cttz, ctlz.
+ const Type *Ty = InVal->getType();
+ Result = Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Id, &Ty, 1),
+ InVal);
+ return true;
+}
+
+Value *TreeToLLVM::EmitBuiltinSQRT(gimple stmt) {
+ Value *Amt = Emit(gimple_call_arg(stmt, 0), 0);
+ const Type* Ty = Amt->getType();
+
+ return Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
+ Intrinsic::sqrt, &Ty, 1),
+ Amt);
+}
+
+Value *TreeToLLVM::EmitBuiltinPOWI(gimple stmt) {
+ if (!validate_gimple_arglist(stmt, REAL_TYPE, INTEGER_TYPE, VOID_TYPE))
+ return 0;
+
+ Value *Val = Emit(gimple_call_arg(stmt, 0), 0);
+ Value *Pow = Emit(gimple_call_arg(stmt, 1), 0);
+ const Type *Ty = Val->getType();
+ Pow = CastToSIntType(Pow, Type::getInt32Ty(Context));
+
+ SmallVector<Value *,2> Args;
+ Args.push_back(Val);
+ Args.push_back(Pow);
+ return Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
+ Intrinsic::powi, &Ty, 1),
+ Args.begin(), Args.end());
+}
+
+Value *TreeToLLVM::EmitBuiltinPOW(gimple stmt) {
+ if (!validate_gimple_arglist(stmt, REAL_TYPE, REAL_TYPE, VOID_TYPE))
+ return 0;
+
+ Value *Val = Emit(gimple_call_arg(stmt, 0), 0);
+ Value *Pow = Emit(gimple_call_arg(stmt, 1), 0);
+ const Type *Ty = Val->getType();
+
+ SmallVector<Value *,2> Args;
+ Args.push_back(Val);
+ Args.push_back(Pow);
+ return Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
+ Intrinsic::pow, &Ty, 1),
+ Args.begin(), Args.end());
+}
+
+bool TreeToLLVM::EmitBuiltinConstantP(gimple stmt, Value *&Result) {
+ Result = Constant::getNullValue(ConvertType(gimple_call_return_type(stmt)));
+ return true;
+}
+
+bool TreeToLLVM::EmitBuiltinExtendPointer(gimple stmt, Value *&Result) {
+ tree arg0 = gimple_call_arg(stmt, 0);
+ Value *Amt = Emit(arg0, 0);
+ bool AmtIsSigned = !TYPE_UNSIGNED(TREE_TYPE(arg0));
+ bool ExpIsSigned = !TYPE_UNSIGNED(gimple_call_return_type(stmt));
+ Result = CastToAnyType(Amt, AmtIsSigned,
+ ConvertType(gimple_call_return_type(stmt)),
+ ExpIsSigned);
+ return true;
+}
+
+/// OptimizeIntoPlainBuiltIn - Return true if it's safe to lower the object
+/// size checking builtin calls (e.g. __builtin___memcpy_chk into the
+/// plain non-checking calls. If the size of the argument is either -1 (unknown)
+/// or large enough to ensure no overflow (> len), then it's safe to do so.
+static bool OptimizeIntoPlainBuiltIn(gimple stmt, Value *Len, Value *Size) {
+ if (BitCastInst *SizeBC = dyn_cast<BitCastInst>(Size))
+ Size = SizeBC->getOperand(0);
+ ConstantInt *SizeCI = dyn_cast<ConstantInt>(Size);
+ if (!SizeCI)
+ return false;
+ if (SizeCI->isAllOnesValue())
+ // If size is -1, convert to plain memcpy, etc.
+ return true;
+
+ if (BitCastInst *LenBC = dyn_cast<BitCastInst>(Len))
+ Len = LenBC->getOperand(0);
+ ConstantInt *LenCI = dyn_cast<ConstantInt>(Len);
+ if (!LenCI)
+ return false;
+ if (SizeCI->getValue().ult(LenCI->getValue())) {
+ warning_at (gimple_location(stmt), 0,
+ "call to %D will always overflow destination buffer",
+ gimple_call_fndecl(stmt));
+ return false;
+ }
+ return true;
+}
+
+/// EmitBuiltinMemCopy - Emit an llvm.memcpy or llvm.memmove intrinsic,
+/// depending on the value of isMemMove.
+bool TreeToLLVM::EmitBuiltinMemCopy(gimple stmt, Value *&Result, bool isMemMove,
+ bool SizeCheck) {
+ if (SizeCheck) {
+ if (!validate_gimple_arglist(stmt, POINTER_TYPE, POINTER_TYPE,
+ INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE))
+ return false;
+ } else {
+ if (!validate_gimple_arglist(stmt, POINTER_TYPE, POINTER_TYPE,
+ INTEGER_TYPE, VOID_TYPE))
+ return false;
+ }
+
+ tree Dst = gimple_call_arg(stmt, 0);
+ tree Src = gimple_call_arg(stmt, 1);
+ unsigned SrcAlign = getPointerAlignment(Src);
+ unsigned DstAlign = getPointerAlignment(Dst);
+
+ Value *DstV = Emit(Dst, 0);
+ Value *SrcV = Emit(Src, 0);
+ Value *Len = Emit(gimple_call_arg(stmt, 2), 0);
+ if (SizeCheck) {
+ tree SizeArg = gimple_call_arg(stmt, 3);
+ Value *Size = Emit(SizeArg, 0);
+ if (!OptimizeIntoPlainBuiltIn(stmt, Len, Size))
+ return false;
+ }
+
+ Result = isMemMove ?
+ EmitMemMove(DstV, SrcV, Len, std::min(SrcAlign, DstAlign)) :
+ EmitMemCpy(DstV, SrcV, Len, std::min(SrcAlign, DstAlign));
+ return true;
+}
+
+bool TreeToLLVM::EmitBuiltinMemSet(gimple stmt, Value *&Result, bool SizeCheck){
+ if (SizeCheck) {
+ if (!validate_gimple_arglist(stmt, POINTER_TYPE, INTEGER_TYPE,
+ INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE))
+ return false;
+ } else {
+ if (!validate_gimple_arglist(stmt, POINTER_TYPE, INTEGER_TYPE,
+ INTEGER_TYPE, VOID_TYPE))
+ return false;
+ }
+
+ tree Dst = gimple_call_arg(stmt, 0);
+ unsigned DstAlign = getPointerAlignment(Dst);
+
+ Value *DstV = Emit(Dst, 0);
+ Value *Val = Emit(gimple_call_arg(stmt, 1), 0);
+ Value *Len = Emit(gimple_call_arg(stmt, 2), 0);
+ if (SizeCheck) {
+ tree SizeArg = gimple_call_arg(stmt, 3);
+ Value *Size = Emit(SizeArg, 0);
+ if (!OptimizeIntoPlainBuiltIn(stmt, Len, Size))
+ return false;
+ }
+ Result = EmitMemSet(DstV, Val, Len, DstAlign);
+ return true;
+}
+
+bool TreeToLLVM::EmitBuiltinBZero(gimple stmt, Value *&Result) {
+ if (!validate_gimple_arglist(stmt, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE))
+ return false;
+
+ tree Dst = gimple_call_arg(stmt, 0);
+ unsigned DstAlign = getPointerAlignment(Dst);
+
+ Value *DstV = Emit(Dst, 0);
+ Value *Val = Constant::getNullValue(Type::getInt32Ty(Context));
+ Value *Len = Emit(gimple_call_arg(stmt, 1), 0);
+ EmitMemSet(DstV, Val, Len, DstAlign);
+ return true;
+}
+
+bool TreeToLLVM::EmitBuiltinPrefetch(gimple stmt) {
+ if (!validate_gimple_arglist(stmt, POINTER_TYPE, 0))
+ return false;
+
+ Value *Ptr = Emit(gimple_call_arg(stmt, 0), 0);
+ Value *ReadWrite = 0;
+ Value *Locality = 0;
+
+ if (gimple_call_num_args(stmt) > 1) { // Args 1/2 are optional
+ ReadWrite = Emit(gimple_call_arg(stmt, 1), 0);
+ if (!isa<ConstantInt>(ReadWrite)) {
+ error("second argument to %<__builtin_prefetch%> must be a constant");
+ ReadWrite = 0;
+ } else if (cast<ConstantInt>(ReadWrite)->getZExtValue() > 1) {
+ warning (0, "invalid second argument to %<__builtin_prefetch%>;"
+ " using zero");
+ ReadWrite = 0;
+ } else {
+ ReadWrite = Builder.getFolder().CreateIntCast(cast<Constant>(ReadWrite),
+ Type::getInt32Ty(Context), false);
+ }
+
+ if (gimple_call_num_args(stmt) > 2) {
+ Locality = Emit(gimple_call_arg(stmt, 2), 0);
+ if (!isa<ConstantInt>(Locality)) {
+ error("third argument to %<__builtin_prefetch%> must be a constant");
+ Locality = 0;
+ } else if (cast<ConstantInt>(Locality)->getZExtValue() > 3) {
+ warning(0, "invalid third argument to %<__builtin_prefetch%>; using 3");
+ Locality = 0;
+ } else {
+ Locality = Builder.getFolder().CreateIntCast(cast<Constant>(Locality),
+ Type::getInt32Ty(Context), false);
+ }
+ }
+ }
+
+ // Default to highly local read.
+ if (ReadWrite == 0)
+ ReadWrite = Constant::getNullValue(Type::getInt32Ty(Context));
+ if (Locality == 0)
+ Locality = ConstantInt::get(Type::getInt32Ty(Context), 3);
+
+ Ptr = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context));
+
+ Value *Ops[3] = { Ptr, ReadWrite, Locality };
+ Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::prefetch),
+ Ops, Ops+3);
+ return true;
+}
+
+/// EmitBuiltinReturnAddr - Emit an llvm.returnaddress or llvm.frameaddress
+/// instruction, depending on whether isFrame is true or not.
+bool TreeToLLVM::EmitBuiltinReturnAddr(gimple stmt, Value *&Result,
+ bool isFrame) {
+ if (!validate_gimple_arglist(stmt, INTEGER_TYPE, VOID_TYPE))
+ return false;
+
+ ConstantInt *Level =
+ dyn_cast<ConstantInt>(Emit(gimple_call_arg(stmt, 0), 0));
+ if (!Level) {
+ if (isFrame)
+ error("invalid argument to %<__builtin_frame_address%>");
+ else
+ error("invalid argument to %<__builtin_return_address%>");
+ return false;
+ }
+
+ Intrinsic::ID IID =
+ !isFrame ? Intrinsic::returnaddress : Intrinsic::frameaddress;
+ Result = Builder.CreateCall(Intrinsic::getDeclaration(TheModule, IID), Level);
+ Result = Builder.CreateBitCast(Result,
+ ConvertType(gimple_call_return_type(stmt)));
+ return true;
+}
+
+bool TreeToLLVM::EmitBuiltinExtractReturnAddr(gimple stmt, Value *&Result) {
+ Value *Ptr = Emit(gimple_call_arg(stmt, 0), 0);
+
+ // FIXME: Actually we should do something like this:
+ //
+ // Result = (Ptr & MASK_RETURN_ADDR) + RETURN_ADDR_OFFSET, if mask and
+ // offset are defined. This seems to be needed for: ARM, MIPS, Sparc.
+ // Unfortunately, these constants are defined as RTL expressions and
+ // should be handled separately.
+
+ Result = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context));
+
+ return true;
+}
+
+bool TreeToLLVM::EmitBuiltinFrobReturnAddr(gimple stmt, Value *&Result) {
+ Value *Ptr = Emit(gimple_call_arg(stmt, 0), 0);
+
+ // FIXME: Actually we should do something like this:
+ //
+ // Result = Ptr - RETURN_ADDR_OFFSET, if offset is defined. This seems to be
+ // needed for: MIPS, Sparc. Unfortunately, these constants are defined
+ // as RTL expressions and should be handled separately.
+
+ Result = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context));
+
+ return true;
+}
+
+bool TreeToLLVM::EmitBuiltinStackSave(gimple stmt, Value *&Result) {
+ if (!validate_gimple_arglist(stmt, VOID_TYPE))
+ return false;
+
+ Result = Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
+ Intrinsic::stacksave));
+ return true;
+}
+
+
+// Builtins used by the exception handling runtime.
+
+// On most machines, the CFA coincides with the first incoming parm.
+#ifndef ARG_POINTER_CFA_OFFSET
+#define ARG_POINTER_CFA_OFFSET(FNDECL) FIRST_PARM_OFFSET (FNDECL)
+#endif
+
+// The mapping from gcc register number to DWARF 2 CFA column number. By
+// default, we just provide columns for all registers.
+#ifndef DWARF_FRAME_REGNUM
+#define DWARF_FRAME_REGNUM(REG) DBX_REGISTER_NUMBER (REG)
+#endif
+
+// Map register numbers held in the call frame info that gcc has
+// collected using DWARF_FRAME_REGNUM to those that should be output in
+// .debug_frame and .eh_frame.
+#ifndef DWARF2_FRAME_REG_OUT
+#define DWARF2_FRAME_REG_OUT(REGNO, FOR_EH) (REGNO)
+#endif
+
+/* Registers that get partially clobbered by a call in a given mode.
+ These must not be call used registers. */
+#ifndef HARD_REGNO_CALL_PART_CLOBBERED
+#define HARD_REGNO_CALL_PART_CLOBBERED(REGNO, MODE) 0
+#endif
+
+bool TreeToLLVM::EmitBuiltinDwarfCFA(gimple stmt, Value *&Result) {
+ if (!validate_gimple_arglist(stmt, VOID_TYPE))
+ return false;
+
+ int cfa_offset = ARG_POINTER_CFA_OFFSET(exp);
+
+ // FIXME: is i32 always enough here?
+ Result = Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
+ Intrinsic::eh_dwarf_cfa),
+ ConstantInt::get(Type::getInt32Ty(Context), cfa_offset));
+
+ return true;
+}
+
+bool TreeToLLVM::EmitBuiltinDwarfSPColumn(gimple stmt, Value *&Result) {
+ if (!validate_gimple_arglist(stmt, VOID_TYPE))
+ return false;
+
+ unsigned int dwarf_regnum = DWARF_FRAME_REGNUM(STACK_POINTER_REGNUM);
+ Result = ConstantInt::get(ConvertType(gimple_call_return_type(stmt)),
+ dwarf_regnum);
+
+ return true;
+}
+
+bool TreeToLLVM::EmitBuiltinEHReturnDataRegno(gimple stmt, Value *&Result) {
+#ifdef EH_RETURN_DATA_REGNO
+ if (!validate_gimple_arglist(stmt, INTEGER_TYPE, VOID_TYPE))
+ return false;
+
+ tree which = gimple_call_arg(stmt, 0);
+ unsigned HOST_WIDE_INT iwhich;
+
+ if (TREE_CODE (which) != INTEGER_CST) {
+ error ("argument of %<__builtin_eh_return_regno%> must be constant");
+ return false;
+ }
+
+ iwhich = tree_low_cst (which, 1);
+ iwhich = EH_RETURN_DATA_REGNO (iwhich);
+ if (iwhich == INVALID_REGNUM)
+ return false;
+
+ iwhich = DWARF_FRAME_REGNUM (iwhich);
+
+ Result = ConstantInt::get(ConvertType(gimple_call_return_type(stmt)), iwhich);
+#endif
+
+ return true;
+}
+
+bool TreeToLLVM::EmitBuiltinEHReturn(gimple stmt, Value *&Result) {
+ if (!validate_gimple_arglist(stmt, INTEGER_TYPE, POINTER_TYPE, VOID_TYPE))
+ return false;
+
+ const Type *IntPtr = TD.getIntPtrType(Context);
+ Value *Offset = Emit(gimple_call_arg(stmt, 0), 0);
+ Value *Handler = Emit(gimple_call_arg(stmt, 1), 0);
+
+ Intrinsic::ID IID = (IntPtr == Type::getInt32Ty(Context) ?
+ Intrinsic::eh_return_i32 : Intrinsic::eh_return_i64);
+
+ Offset = Builder.CreateIntCast(Offset, IntPtr, true);
+ Handler = Builder.CreateBitCast(Handler, Type::getInt8PtrTy(Context));
+
+ SmallVector<Value *, 2> Args;
+ Args.push_back(Offset);
+ Args.push_back(Handler);
+ Builder.CreateCall(Intrinsic::getDeclaration(TheModule, IID),
+ Args.begin(), Args.end());
+ Result = Builder.CreateUnreachable();
+ EmitBlock(BasicBlock::Create(Context));
+
+ return true;
+}
+
+bool TreeToLLVM::EmitBuiltinInitDwarfRegSizes(gimple stmt, Value *&Result) {
+#ifdef DWARF2_UNWIND_INFO
+ unsigned int i;
+ bool wrote_return_column = false;
+ static bool reg_modes_initialized = false;
+
+ if (!validate_gimple_arglist(stmt, POINTER_TYPE, VOID_TYPE))
+ return false;
+
+ if (!reg_modes_initialized) {
+ init_reg_modes_target();
+ reg_modes_initialized = true;
+ }
+
+ Value *Addr =
+ Builder.CreateBitCast(Emit(gimple_call_arg(stmt, 0), 0),
+ Type::getInt8PtrTy(Context));
+ Constant *Size, *Idx;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) {
+ int rnum = DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), 1);
+
+ if (rnum < DWARF_FRAME_REGISTERS) {
+ enum machine_mode save_mode = reg_raw_mode[i];
+ HOST_WIDE_INT size;
+
+ if (HARD_REGNO_CALL_PART_CLOBBERED (i, save_mode))
+ save_mode = choose_hard_reg_mode (i, 1, true);
+ if (DWARF_FRAME_REGNUM (i) == DWARF_FRAME_RETURN_COLUMN) {
+ if (save_mode == VOIDmode)
+ continue;
+ wrote_return_column = true;
+ }
+ size = GET_MODE_SIZE (save_mode);
+ if (rnum < 0)
+ continue;
+
+ Size = ConstantInt::get(Type::getInt8Ty(Context), size);
+ Idx = ConstantInt::get(Type::getInt32Ty(Context), rnum);
+ Builder.CreateStore(Size, Builder.CreateGEP(Addr, Idx), false);
+ }
+ }
+
+ if (!wrote_return_column) {
+ Size = ConstantInt::get(Type::getInt8Ty(Context),
+ GET_MODE_SIZE (Pmode));
+ Idx = ConstantInt::get(Type::getInt32Ty(Context),
+ DWARF_FRAME_RETURN_COLUMN);
+ Builder.CreateStore(Size, Builder.CreateGEP(Addr, Idx), false);
+ }
+
+#ifdef DWARF_ALT_FRAME_RETURN_COLUMN
+ Size = ConstantInt::get(Type::getInt8Ty(Context),
+ GET_MODE_SIZE (Pmode));
+ Idx = ConstantInt::get(Type::getInt32Ty(Context),
+ DWARF_ALT_FRAME_RETURN_COLUMN);
+ Builder.CreateStore(Size, Builder.CreateGEP(Addr, Idx), false);
+#endif
+
+#endif /* DWARF2_UNWIND_INFO */
+
+ // TODO: the RS6000 target needs extra initialization [gcc changeset 122468].
+
+ return true;
+}
+
+bool TreeToLLVM::EmitBuiltinUnwindInit(gimple stmt, Value *&Result) {
+ if (!validate_gimple_arglist(stmt, VOID_TYPE))
+ return false;
+
+ Result = Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
+ Intrinsic::eh_unwind_init));
+
+ return true;
+}
+
+bool TreeToLLVM::EmitBuiltinStackRestore(gimple stmt) {
+ if (!validate_gimple_arglist(stmt, POINTER_TYPE, VOID_TYPE))
+ return false;
+
+ Value *Ptr = Emit(gimple_call_arg(stmt, 0), 0);
+ Ptr = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context));
+
+ Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
+ Intrinsic::stackrestore), Ptr);
+ return true;
+}
+
+
+bool TreeToLLVM::EmitBuiltinAlloca(gimple stmt, Value *&Result) {
+ if (!validate_gimple_arglist(stmt, INTEGER_TYPE, VOID_TYPE))
+ return false;
+ Value *Amt = Emit(gimple_call_arg(stmt, 0), 0);
+ Amt = CastToSIntType(Amt, Type::getInt32Ty(Context));
+ Result = Builder.CreateAlloca(Type::getInt8Ty(Context), Amt);
+ return true;
+}
+
+bool TreeToLLVM::EmitBuiltinExpect(gimple stmt, const MemRef *DestLoc,
+ Value *&Result) {
+ // Ignore the hint for now, just expand the expr. This is safe, but not
+ // optimal.
+ if (gimple_call_num_args(stmt) < 2)
+ return true;
+ Result = Emit(gimple_call_arg(stmt, 0), DestLoc);
+ return true;
+}
+
+bool TreeToLLVM::EmitBuiltinVAStart(gimple stmt) {
+ if (gimple_call_num_args(stmt) < 2) {
+ error_at (gimple_location(stmt),
+ "too few arguments to function %<va_start%>");
+ return true;
+ }
+
+ tree fntype = TREE_TYPE(current_function_decl);
+ if (TYPE_ARG_TYPES(fntype) == 0 ||
+ (tree_last(TYPE_ARG_TYPES(fntype)) == void_type_node)) {
+ error("%<va_start%> used in function with fixed args");
+ return true;
+ }
+
+ Constant *va_start = Intrinsic::getDeclaration(TheModule, Intrinsic::vastart);
+ Value *ArgVal = Emit(gimple_call_arg(stmt, 0), 0);
+ ArgVal = Builder.CreateBitCast(ArgVal, Type::getInt8PtrTy(Context));
+ Builder.CreateCall(va_start, ArgVal);
+ return true;
+}
+
+bool TreeToLLVM::EmitBuiltinVAEnd(gimple stmt) {
+ Value *Arg = Emit(gimple_call_arg(stmt, 0), 0);
+ Arg = Builder.CreateBitCast(Arg, Type::getInt8PtrTy(Context));
+ Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::vaend),
+ Arg);
+ return true;
+}
+
+bool TreeToLLVM::EmitBuiltinVACopy(gimple stmt) {
+ tree Arg1T = gimple_call_arg(stmt, 0);
+ tree Arg2T = gimple_call_arg(stmt, 1);
+
+ Value *Arg1 = Emit(Arg1T, 0); // Emit the address of the destination.
+ // The second arg of llvm.va_copy is a pointer to a valist.
+ Value *Arg2;
+ if (!AGGREGATE_TYPE_P(va_list_type_node)) {
+ // Emit it as a value, then store it to a temporary slot.
+ Value *V2 = Emit(Arg2T, 0);
+ Arg2 = CreateTemporary(V2->getType());
+ Builder.CreateStore(V2, Arg2);
+ } else {
+ // If the target has aggregate valists, then the second argument
+ // from GCC is the address of the source valist and we don't
+ // need to do anything special.
+ Arg2 = Emit(Arg2T, 0);
+ }
+
+ static const Type *VPTy = Type::getInt8PtrTy(Context);
+
+ // FIXME: This ignores alignment and volatility of the arguments.
+ SmallVector<Value *, 2> Args;
+ Args.push_back(Builder.CreateBitCast(Arg1, VPTy));
+ Args.push_back(Builder.CreateBitCast(Arg2, VPTy));
+
+ Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::vacopy),
+ Args.begin(), Args.end());
+ return true;
+}
+
+bool TreeToLLVM::EmitBuiltinInitTrampoline(gimple stmt, Value *&Result) {
+ if (!validate_gimple_arglist(stmt, POINTER_TYPE, POINTER_TYPE, POINTER_TYPE,
+ VOID_TYPE))
+ return false;
+
+ static const Type *VPTy = Type::getInt8PtrTy(Context);
+
+ Value *Tramp = Emit(gimple_call_arg(stmt, 0), 0);
+ Tramp = Builder.CreateBitCast(Tramp, VPTy);
+
+ Value *Func = Emit(gimple_call_arg(stmt, 1), 0);
+ Func = Builder.CreateBitCast(Func, VPTy);
+
+ Value *Chain = Emit(gimple_call_arg(stmt, 2), 0);
+ Chain = Builder.CreateBitCast(Chain, VPTy);
+
+ Value *Ops[3] = { Tramp, Func, Chain };
+
+ Function *Intr = Intrinsic::getDeclaration(TheModule,
+ Intrinsic::init_trampoline);
+ Result = Builder.CreateCall(Intr, Ops, Ops+3, "tramp");
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// ... Complex Math Expressions ...
+//===----------------------------------------------------------------------===//
+
+Value *TreeToLLVM::CreateComplex(Value *Real, Value *Imag) {
+ assert(Real->getType() == Imag->getType() && "Component type mismatch!");
+ const Type *EltTy = Real->getType();
+ Value *Result = UndefValue::get(StructType::get(Context, EltTy, EltTy, NULL));
+ Result = Builder.CreateInsertValue(Result, Real, 0);
+ Result = Builder.CreateInsertValue(Result, Imag, 1);
+ return Result;
+}
+
+void TreeToLLVM::SplitComplex(Value *Complex, Value *&Real, Value *&Imag) {
+ Real = Builder.CreateExtractValue(Complex, 0);
+ Imag = Builder.CreateExtractValue(Complex, 1);
+}
+
+Value *TreeToLLVM::EmitCOMPLEX_EXPR(tree op0, tree op1) {
+ return CreateComplex(Emit(op0, 0), Emit(op1, 0));
+}
+
+// EmitComplexBinOp - Note that this operates on binops like ==/!=, which return
+// a bool, not a complex value.
+Value *TreeToLLVM::EmitComplexBinOp(tree type, tree_code code,
+ tree op0, tree op1) {
+ Value *LHSr, *LHSi;
+ SplitComplex(Emit(op0, 0), LHSr, LHSi);
+ Value *RHSr, *RHSi;
+ SplitComplex(Emit(op1, 0), RHSr, RHSi);
+
+ Value *DSTr, *DSTi;
+ switch (code) {
+ default: llvm_unreachable("Unhandled complex binop!");
+ case PLUS_EXPR: // (a+ib) + (c+id) = (a+c) + i(b+d)
+ if (LHSr->getType()->isFloatingPoint()) {
+ DSTr = Builder.CreateFAdd(LHSr, RHSr);
+ DSTi = Builder.CreateFAdd(LHSi, RHSi);
+ } else {
+ DSTr = Builder.CreateAdd(LHSr, RHSr);
+ DSTi = Builder.CreateAdd(LHSi, RHSi);
+ }
+ break;
+ case MINUS_EXPR: // (a+ib) - (c+id) = (a-c) + i(b-d)
+ if (LHSr->getType()->isFloatingPoint()) {
+ DSTr = Builder.CreateFSub(LHSr, RHSr);
+ DSTi = Builder.CreateFSub(LHSi, RHSi);
+ } else {
+ DSTr = Builder.CreateSub(LHSr, RHSr);
+ DSTi = Builder.CreateSub(LHSi, RHSi);
+ }
+ break;
+ case MULT_EXPR: { // (a+ib) * (c+id) = (ac-bd) + i(ad+cb)
+ if (LHSr->getType()->isFloatingPoint()) {
+ Value *Tmp1 = Builder.CreateFMul(LHSr, RHSr); // a*c
+ Value *Tmp2 = Builder.CreateFMul(LHSi, RHSi); // b*d
+ DSTr = Builder.CreateFSub(Tmp1, Tmp2); // ac-bd
+
+ Value *Tmp3 = Builder.CreateFMul(LHSr, RHSi); // a*d
+ Value *Tmp4 = Builder.CreateFMul(RHSr, LHSi); // c*b
+ DSTi = Builder.CreateFAdd(Tmp3, Tmp4); // ad+cb
+ } else {
+ Value *Tmp1 = Builder.CreateMul(LHSr, RHSr); // a*c
+ Value *Tmp2 = Builder.CreateMul(LHSi, RHSi); // b*d
+ DSTr = Builder.CreateSub(Tmp1, Tmp2); // ac-bd
+
+ Value *Tmp3 = Builder.CreateMul(LHSr, RHSi); // a*d
+ Value *Tmp4 = Builder.CreateMul(RHSr, LHSi); // c*b
+ DSTi = Builder.CreateAdd(Tmp3, Tmp4); // ad+cb
+ }
+ break;
+ }
+ case RDIV_EXPR: { // (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
+ // RDIV_EXPR should always be floating point.
+ assert (LHSr->getType()->isFloatingPoint());
+ Value *Tmp1 = Builder.CreateFMul(LHSr, RHSr); // a*c
+ Value *Tmp2 = Builder.CreateFMul(LHSi, RHSi); // b*d
+ Value *Tmp3 = Builder.CreateFAdd(Tmp1, Tmp2); // ac+bd
+
+ Value *Tmp4 = Builder.CreateFMul(RHSr, RHSr); // c*c
+ Value *Tmp5 = Builder.CreateFMul(RHSi, RHSi); // d*d
+ Value *Tmp6 = Builder.CreateFAdd(Tmp4, Tmp5); // cc+dd
+ DSTr = Builder.CreateFDiv(Tmp3, Tmp6);
+
+ Value *Tmp7 = Builder.CreateFMul(LHSi, RHSr); // b*c
+ Value *Tmp8 = Builder.CreateFMul(LHSr, RHSi); // a*d
+ Value *Tmp9 = Builder.CreateFSub(Tmp7, Tmp8); // bc-ad
+ DSTi = Builder.CreateFDiv(Tmp9, Tmp6);
+ break;
+ }
+ }
+
+ return CreateComplex(DSTr, DSTi);
+}
+
+
+//===----------------------------------------------------------------------===//
+// ... L-Value Expressions ...
+//===----------------------------------------------------------------------===//
+
+Value *TreeToLLVM::EmitFieldAnnotation(Value *FieldPtr, tree FieldDecl) {
+ tree AnnotateAttr = lookup_attribute("annotate", DECL_ATTRIBUTES(FieldDecl));
+
+ const Type *SBP = Type::getInt8PtrTy(Context);
+
+ Function *Fn = Intrinsic::getDeclaration(TheModule,
+ Intrinsic::ptr_annotation,
+ &SBP, 1);
+
+ // Get file and line number. FIXME: Should this be for the decl or the
+ // use. Is there a location info for the use?
+ Constant *LineNo = ConstantInt::get(Type::getInt32Ty(Context),
+ DECL_SOURCE_LINE(FieldDecl));
+ Constant *File = ConvertMetadataStringToGV(DECL_SOURCE_FILE(FieldDecl));
+
+ File = TheFolder->CreateBitCast(File, SBP);
+
+ // There may be multiple annotate attributes. Pass return of lookup_attr
+ // to successive lookups.
+ while (AnnotateAttr) {
+ // Each annotate attribute is a tree list.
+ // Get value of list which is our linked list of args.
+ tree args = TREE_VALUE(AnnotateAttr);
+
+ // Each annotate attribute may have multiple args.
+ // Treat each arg as if it were a separate annotate attribute.
+ for (tree a = args; a; a = TREE_CHAIN(a)) {
+ // Each element of the arg list is a tree list, so get value
+ tree val = TREE_VALUE(a);
+
+ // Assert its a string, and then get that string.
+ assert(TREE_CODE(val) == STRING_CST &&
+ "Annotate attribute arg should always be a string");
+
+ Constant *strGV = TreeConstantToLLVM::EmitLV_STRING_CST(val);
+
+ // We can not use the IRBuilder because it will constant fold away
+ // the GEP that is critical to distinguish between an annotate
+ // attribute on a whole struct from one on the first element of the
+ // struct.
+ BitCastInst *CastFieldPtr = new BitCastInst(FieldPtr, SBP,
+ FieldPtr->getName());
+ Builder.Insert(CastFieldPtr);
+
+ Value *Ops[4] = {
+ CastFieldPtr, Builder.CreateBitCast(strGV, SBP),
+ File, LineNo
+ };
+
+ const Type* FieldPtrType = FieldPtr->getType();
+ FieldPtr = Builder.CreateCall(Fn, Ops, Ops+4);
+ FieldPtr = Builder.CreateBitCast(FieldPtr, FieldPtrType);
+ }
+
+ // Get next annotate attribute.
+ AnnotateAttr = TREE_CHAIN(AnnotateAttr);
+ if (AnnotateAttr)
+ AnnotateAttr = lookup_attribute("annotate", AnnotateAttr);
+ }
+ return FieldPtr;
+}
+
+LValue TreeToLLVM::EmitLV_ARRAY_REF(tree exp) {
+ // The result type is an ElementTy* in the case of an ARRAY_REF, an array
+ // of ElementTy in the case of ARRAY_RANGE_REF.
+
+ tree Array = TREE_OPERAND(exp, 0);
+ tree ArrayTreeType = TREE_TYPE(Array);
+ tree Index = TREE_OPERAND(exp, 1);
+ tree IndexType = TREE_TYPE(Index);
+ tree ElementType = TREE_TYPE(ArrayTreeType);
+
+ assert(TREE_CODE (ArrayTreeType) == ARRAY_TYPE && "Unknown ARRAY_REF!");
+
+ Value *ArrayAddr;
+ unsigned ArrayAlign;
+
+ // First subtract the lower bound, if any, in the type of the index.
+ Value *IndexVal = Emit(Index, 0);
+ tree LowerBound = array_ref_low_bound(exp);
+ if (!integer_zerop(LowerBound))
+ IndexVal = TYPE_UNSIGNED(TREE_TYPE(Index)) ?
+ Builder.CreateSub(IndexVal, Emit(LowerBound, 0)) :
+ Builder.CreateNSWSub(IndexVal, Emit(LowerBound, 0));
+
+ LValue ArrayAddrLV = EmitLV(Array);
+ assert(!ArrayAddrLV.isBitfield() && "Arrays cannot be bitfields!");
+ ArrayAddr = ArrayAddrLV.Ptr;
+ ArrayAlign = ArrayAddrLV.getAlignment();
+
+ const Type *IntPtrTy = getTargetData().getIntPtrType(Context);
+ if (TYPE_UNSIGNED(IndexType)) // if the index is unsigned
+ // ZExt it to retain its value in the larger type
+ IndexVal = CastToUIntType(IndexVal, IntPtrTy);
+ else
+ // SExt it to retain its value in the larger type
+ IndexVal = CastToSIntType(IndexVal, IntPtrTy);
+
+ // If we are indexing over a fixed-size type, just use a GEP.
+ if (isSequentialCompatible(ArrayTreeType)) {
+ Value *Idx[2];
+ Idx[0] = ConstantInt::get(IntPtrTy, 0);
+ Idx[1] = IndexVal;
+ Value *Ptr = POINTER_TYPE_OVERFLOW_UNDEFINED ?
+ Builder.CreateInBoundsGEP(ArrayAddr, Idx, Idx + 2) :
+ Builder.CreateGEP(ArrayAddr, Idx, Idx + 2);
+
+ const Type *ElementTy = ConvertType(ElementType);
+ unsigned Alignment = MinAlign(ArrayAlign, TD.getABITypeAlignment(ElementTy));
+ return LValue(Builder.CreateBitCast(Ptr,
+ PointerType::getUnqual(ConvertType(TREE_TYPE(exp)))),
+ Alignment);
+ }
+
+ // Otherwise, just do raw, low-level pointer arithmetic. FIXME: this could be
+ // much nicer in cases like:
+ // float foo(int w, float A[][w], int g) { return A[g][0]; }
+
+ ArrayAddr = Builder.CreateBitCast(ArrayAddr, Type::getInt8PtrTy(Context));
+ if (VOID_TYPE_P(TREE_TYPE(ArrayTreeType)))
+ return LValue(Builder.CreateGEP(ArrayAddr, IndexVal), 1);
+
+ Value *TypeSize = Emit(array_ref_element_size(exp), 0);
+ TypeSize = CastToUIntType(TypeSize, IntPtrTy);
+ IndexVal = Builder.CreateMul(IndexVal, TypeSize);
+ unsigned Alignment = 1;
+ if (isa<ConstantInt>(IndexVal))
+ Alignment = MinAlign(ArrayAlign,
+ cast<ConstantInt>(IndexVal)->getZExtValue());
+ Value *Ptr = POINTER_TYPE_OVERFLOW_UNDEFINED ?
+ Builder.CreateInBoundsGEP(ArrayAddr, IndexVal) :
+ Builder.CreateGEP(ArrayAddr, IndexVal);
+ return LValue(Builder.CreateBitCast(Ptr,
+ PointerType::getUnqual(ConvertType(TREE_TYPE(exp)))),
+ Alignment);
+}
+
+LValue TreeToLLVM::EmitLV_BIT_FIELD_REF(tree exp) {
+ LValue Ptr = EmitLV(TREE_OPERAND(exp, 0));
+ assert(!Ptr.isBitfield() && "BIT_FIELD_REF operands cannot be bitfields!");
+
+ unsigned BitStart = (unsigned)TREE_INT_CST_LOW(TREE_OPERAND(exp, 2));
+ unsigned BitSize = (unsigned)TREE_INT_CST_LOW(TREE_OPERAND(exp, 1));
+ const Type *ValTy = ConvertType(TREE_TYPE(exp));
+
+ unsigned ValueSizeInBits = TD.getTypeSizeInBits(ValTy);
+ assert(BitSize <= ValueSizeInBits &&
+ "ValTy isn't large enough to hold the value loaded!");
+
+ assert(ValueSizeInBits == TD.getTypeAllocSizeInBits(ValTy) &&
+ "FIXME: BIT_FIELD_REF logic is broken for non-round types");
+
+ // BIT_FIELD_REF values can have BitStart values that are quite large. We
+ // know that the thing we are loading is ValueSizeInBits large. If BitStart
+ // is larger than ValueSizeInBits, bump the pointer over to where it should
+ // be.
+ if (unsigned UnitOffset = BitStart / ValueSizeInBits) {
+ // TODO: If Ptr.Ptr is a struct type or something, we can do much better
+ // than this. e.g. check out when compiling unwind-dw2-fde-darwin.c.
+ Ptr.Ptr = Builder.CreateBitCast(Ptr.Ptr, ValTy->getPointerTo());
+ Ptr.Ptr = Builder.CreateGEP(Ptr.Ptr,
+ ConstantInt::get(Type::getInt32Ty(Context),
+ UnitOffset));
+ BitStart -= UnitOffset*ValueSizeInBits;
+ }
+
+ // If this is referring to the whole field, return the whole thing.
+ if (BitStart == 0 && BitSize == ValueSizeInBits) {
+ return LValue(Builder.CreateBitCast(Ptr.Ptr, ValTy->getPointerTo()),
+ Ptr.getAlignment());
+ }
+
+ return LValue(Builder.CreateBitCast(Ptr.Ptr, ValTy->getPointerTo()),
+ 1, BitStart, BitSize);
+}
+
+LValue TreeToLLVM::EmitLV_COMPONENT_REF(tree exp) {
+ LValue StructAddrLV = EmitLV(TREE_OPERAND(exp, 0));
+ tree FieldDecl = TREE_OPERAND(exp, 1);
+ unsigned LVAlign = StructAddrLV.getAlignment();
+
+ assert((TREE_CODE(DECL_CONTEXT(FieldDecl)) == RECORD_TYPE ||
+ TREE_CODE(DECL_CONTEXT(FieldDecl)) == UNION_TYPE ||
+ TREE_CODE(DECL_CONTEXT(FieldDecl)) == QUAL_UNION_TYPE));
+
+ // Ensure that the struct type has been converted, so that the fielddecls
+ // are laid out. Note that we convert to the context of the Field, not to the
+ // type of Operand #0, because GCC doesn't always have the field match up with
+ // operand #0's type.
+ const Type *StructTy = ConvertType(DECL_CONTEXT(FieldDecl));
+
+ assert((!StructAddrLV.isBitfield() ||
+ StructAddrLV.BitStart == 0) && "structs cannot be bitfields!");
+
+ StructAddrLV.Ptr = Builder.CreateBitCast(StructAddrLV.Ptr,
+ StructTy->getPointerTo());
+ const Type *FieldTy = ConvertType(getDeclaredType(FieldDecl));
+
+ // BitStart - This is the actual offset of the field from the start of the
+ // struct, in bits. For bitfields this may be on a non-byte boundary.
+ unsigned BitStart = getFieldOffsetInBits(TREE_OPERAND(exp, 1));
+ Value *FieldPtr;
+
+ // If this is a normal field at a fixed offset from the start, handle it.
+ if (!TREE_OPERAND(exp, 2)) {
+ unsigned int MemberIndex = GetFieldIndex(FieldDecl);
+
+ // If the LLVM struct has zero field, don't try to index into it, just use
+ // the current pointer.
+ FieldPtr = StructAddrLV.Ptr;
+ if (StructTy->getNumContainedTypes() != 0) {
+ assert(MemberIndex < StructTy->getNumContainedTypes() &&
+ "Field Idx out of range!");
+ FieldPtr = Builder.CreateStructGEP(FieldPtr, MemberIndex);
+ }
+
+ // Now that we did an offset from the start of the struct, subtract off
+ // the offset from BitStart.
+ if (MemberIndex) {
+ const StructLayout *SL = TD.getStructLayout(cast<StructType>(StructTy));
+ unsigned Offset = SL->getElementOffset(MemberIndex);
+ BitStart -= Offset * 8;
+
+ // If the base is known to be 8-byte aligned, and we're adding a 4-byte
+ // offset, the field is known to be 4-byte aligned.
+ LVAlign = MinAlign(LVAlign, Offset);
+ }
+
+ // There is debate about whether this is really safe or not, be conservative
+ // in the meantime.
+#if 0
+ // If this field is at a constant offset, if the LLVM pointer really points
+ // to it, then we know that the pointer is at least as aligned as the field
+ // is required to be. Try to round up our alignment info.
+ if (BitStart == 0 && // llvm pointer points to it.
+ !isBitfield(FieldDecl) && // bitfield computation might offset pointer.
+ DECL_ALIGN(FieldDecl))
+ LVAlign = std::max(LVAlign, unsigned(DECL_ALIGN(FieldDecl)) / 8);
+#endif
+
+ // If the FIELD_DECL has an annotate attribute on it, emit it.
+ if (lookup_attribute("annotate", DECL_ATTRIBUTES(FieldDecl)))
+ FieldPtr = EmitFieldAnnotation(FieldPtr, FieldDecl);
+ } else {
+ // Offset is the field offset in octets.
+ Value *Offset = Emit(TREE_OPERAND(exp, 2), 0);
+ if (BITS_PER_UNIT != 8) {
+ assert(!(BITS_PER_UNIT & 7) && "Unit size not a multiple of 8 bits!");
+ Offset = Builder.CreateMul(Offset, ConstantInt::get(Offset->getType(),
+ BITS_PER_UNIT / 8));
+ }
+
+ // Here BitStart gives the offset of the field in bits from Offset.
+ // Incorporate as much of it as possible into the pointer computation.
+ unsigned ByteOffset = BitStart/8;
+ if (ByteOffset > 0) {
+ Offset = Builder.CreateAdd(Offset,
+ ConstantInt::get(Offset->getType(), ByteOffset));
+ BitStart -= ByteOffset*8;
+ // If the base is known to be 8-byte aligned, and we're adding a 4-byte
+ // offset, the field is known to be 4-byte aligned.
+ LVAlign = MinAlign(LVAlign, ByteOffset);
+ }
+
+ Value *Ptr = Builder.CreatePtrToInt(StructAddrLV.Ptr, Offset->getType());
+ Ptr = Builder.CreateAdd(Ptr, Offset);
+ FieldPtr = Builder.CreateIntToPtr(Ptr, FieldTy->getPointerTo());
+ }
+
+ if (isBitfield(FieldDecl)) {
+ // If this is a bitfield, the declared type must be an integral type.
+ assert(FieldTy->isInteger() && "Invalid bitfield");
+
+ assert(DECL_SIZE(FieldDecl) &&
+ TREE_CODE(DECL_SIZE(FieldDecl)) == INTEGER_CST &&
+ "Variable sized bitfield?");
+ unsigned BitfieldSize = TREE_INT_CST_LOW(DECL_SIZE(FieldDecl));
+
+ const Type *LLVMFieldTy =
+ cast<PointerType>(FieldPtr->getType())->getElementType();
+
+ // If the LLVM notion of the field type contains the entire bitfield being
+ // accessed, use the LLVM type. This avoids pointer casts and other bad
+ // things that are difficult to clean up later. This occurs in cases like
+ // "struct X{ unsigned long long x:50; unsigned y:2; }" when accessing y.
+ // We want to access the field as a ulong, not as a uint with an offset.
+ if (LLVMFieldTy->isInteger() &&
+ LLVMFieldTy->getPrimitiveSizeInBits() >= BitStart + BitfieldSize &&
+ LLVMFieldTy->getPrimitiveSizeInBits() ==
+ TD.getTypeAllocSizeInBits(LLVMFieldTy))
+ FieldTy = LLVMFieldTy;
+ else
+ // If the field result type T is a bool or some other curiously sized
+ // integer type, then not all bits may be accessible by advancing a T*
+ // and loading through it. For example, if the result type is i1 then
+ // only the first bit in each byte would be loaded. Even if T is byte
+ // sized like an i24 there may be trouble: incrementing a T* will move
+ // the position by 32 bits not 24, leaving the upper 8 of those 32 bits
+ // inaccessible. Avoid this by rounding up the size appropriately.
+ FieldTy = IntegerType::get(Context, TD.getTypeAllocSizeInBits(FieldTy));
+
+ assert(FieldTy->getPrimitiveSizeInBits() ==
+ TD.getTypeAllocSizeInBits(FieldTy) && "Field type not sequential!");
+
+ // If this is a bitfield, the field may span multiple fields in the LLVM
+ // type. As such, cast the pointer to be a pointer to the declared type.
+ FieldPtr = Builder.CreateBitCast(FieldPtr, FieldTy->getPointerTo());
+
+ unsigned LLVMValueBitSize = FieldTy->getPrimitiveSizeInBits();
+ // Finally, because bitfields can span LLVM fields, and because the start
+ // of the first LLVM field (where FieldPtr currently points) may be up to
+ // 63 bits away from the start of the bitfield), it is possible that
+ // *FieldPtr doesn't contain any of the bits for this bitfield. If needed,
+ // adjust FieldPtr so that it is close enough to the bitfield that
+ // *FieldPtr contains the first needed bit. Be careful to make sure that
+ // the pointer remains appropriately aligned.
+ if (BitStart > LLVMValueBitSize) {
+ // In this case, we know that the alignment of the field is less than
+ // the size of the field. To get the pointer close enough, add some
+ // number of alignment units to the pointer.
+ unsigned ByteAlignment = TD.getABITypeAlignment(FieldTy);
+ // It is possible that an individual field is Packed. This information is
+ // not reflected in FieldTy. Check DECL_PACKED here.
+ if (DECL_PACKED(FieldDecl))
+ ByteAlignment = 1;
+ assert(ByteAlignment*8 <= LLVMValueBitSize && "Unknown overlap case!");
+ unsigned NumAlignmentUnits = BitStart/(ByteAlignment*8);
+ assert(NumAlignmentUnits && "Not adjusting pointer?");
+
+ // Compute the byte offset, and add it to the pointer.
+ unsigned ByteOffset = NumAlignmentUnits*ByteAlignment;
+ LVAlign = MinAlign(LVAlign, ByteOffset);
+
+ Constant *Offset = ConstantInt::get(TD.getIntPtrType(Context), ByteOffset);
+ FieldPtr = Builder.CreatePtrToInt(FieldPtr, Offset->getType());
+ FieldPtr = Builder.CreateAdd(FieldPtr, Offset);
+ FieldPtr = Builder.CreateIntToPtr(FieldPtr, FieldTy->getPointerTo());
+
+ // Adjust bitstart to account for the pointer movement.
+ BitStart -= ByteOffset*8;
+
+ // Check that this worked. Note that the bitfield may extend beyond
+ // the end of *FieldPtr, for example because BitfieldSize is the same
+ // as LLVMValueBitSize but BitStart > 0.
+ assert(BitStart < LLVMValueBitSize &&
+ BitStart+BitfieldSize < 2*LLVMValueBitSize &&
+ "Couldn't get bitfield into value!");
+ }
+
+ // Okay, everything is good. Return this as a bitfield if we can't
+ // return it as a normal l-value. (e.g. "struct X { int X : 32 };" ).
+ // Conservatively return LValue with alignment 1.
+ if (BitfieldSize != LLVMValueBitSize || BitStart != 0)
+ return LValue(FieldPtr, 1, BitStart, BitfieldSize);
+ } else {
+ // Make sure we return a pointer to the right type.
+ const Type *EltTy = ConvertType(TREE_TYPE(exp));
+ FieldPtr = Builder.CreateBitCast(FieldPtr, EltTy->getPointerTo());
+ }
+
+ assert(BitStart == 0 &&
+ "It's a bitfield reference or we didn't get to the field!");
+ return LValue(FieldPtr, LVAlign);
+}
+
+LValue TreeToLLVM::EmitLV_DECL(tree exp) {
+ if (TREE_CODE(exp) == PARM_DECL || TREE_CODE(exp) == VAR_DECL ||
+ TREE_CODE(exp) == CONST_DECL) {
+ // If a static var's type was incomplete when the decl was written,
+ // but the type is complete now, lay out the decl now.
+ if (DECL_SIZE(exp) == 0 && COMPLETE_OR_UNBOUND_ARRAY_TYPE_P(TREE_TYPE(exp))
+ && (TREE_STATIC(exp) || DECL_EXTERNAL(exp))) {
+ layout_decl(exp, 0);
+
+#if 0
+ // This mirrors code in layout_decl for munging the RTL. Here we actually
+ // emit a NEW declaration for the global variable, now that it has been
+ // laid out. We then tell the compiler to "forward" any uses of the old
+ // global to this new one.
+ if (Value *Val = DECL_LOCAL_IF_SET(exp)) {
+ //fprintf(stderr, "***\n*** SHOULD HANDLE GLOBAL VARIABLES!\n***\n");
+ //assert(0 && "Reimplement this with replace all uses!");
+ SET_DECL_LOCAL(exp, 0);
+ // Create a new global variable declaration
+ llvm_assemble_external(exp);
+ V2GV(Val)->ForwardedGlobal = V2GV(DECL_LOCAL(exp));
+ }
+#endif
+ }
+ }
+
+ Value *Decl = DECL_LOCAL(exp);
+ if (Decl == 0) {
+ if (errorcount || sorrycount) {
+ const Type *Ty = ConvertType(TREE_TYPE(exp));
+ const PointerType *PTy = Ty->getPointerTo();
+ LValue LV(ConstantPointerNull::get(PTy), 1);
+ return LV;
+ }
+ assert(0 && "INTERNAL ERROR: Referencing decl that hasn't been laid out");
+ abort();
+ }
+
+ // Ensure variable marked as used even if it doesn't go through a parser. If
+ // it hasn't been used yet, write out an external definition.
+ if (!TREE_USED(exp)) {
+ assemble_external(exp);
+ TREE_USED(exp) = 1;
+ Decl = DECL_LOCAL(exp);
+ }
+
+ if (GlobalValue *GV = dyn_cast<GlobalValue>(Decl)) {
+ // If this is an aggregate, emit it to LLVM now. GCC happens to
+ // get this case right by forcing the initializer into memory.
+ if (TREE_CODE(exp) == CONST_DECL || TREE_CODE(exp) == VAR_DECL) {
+ if ((DECL_INITIAL(exp) || !TREE_PUBLIC(exp)) && !DECL_EXTERNAL(exp) &&
+ GV->isDeclaration() &&
+ !BOGUS_CTOR(exp)) {
+ emit_global_to_llvm(exp);
+ Decl = DECL_LOCAL(exp); // Decl could have change if it changed type.
+ }
+ } else {
+ // Otherwise, inform cgraph that we used the global.
+ mark_decl_referenced(exp);
+ if (tree ID = DECL_ASSEMBLER_NAME(exp))
+ mark_referenced(ID);
+ }
+ }
+
+ const Type *Ty = ConvertType(TREE_TYPE(exp));
+ // If we have "extern void foo", make the global have type {} instead of
+ // type void.
+ if (Ty->isVoidTy()) Ty = StructType::get(Context);
+ const PointerType *PTy = Ty->getPointerTo();
+ unsigned Alignment = Ty->isSized() ? TD.getABITypeAlignment(Ty) : 1;
+ if (DECL_ALIGN(exp)) {
+ if (DECL_USER_ALIGN(exp) || 8 * Alignment < (unsigned)DECL_ALIGN(exp))
+ Alignment = DECL_ALIGN(exp) / 8;
+ }
+
+ return LValue(Builder.CreateBitCast(Decl, PTy), Alignment);
+}
+
+LValue TreeToLLVM::EmitLV_EXC_PTR_EXPR(tree exp) {
+ CreateExceptionValues();
+ // Cast the address pointer to the expected type.
+ unsigned Alignment = TD.getABITypeAlignment(cast<PointerType>(ExceptionValue->
+ getType())->getElementType());
+ return LValue(Builder.CreateBitCast(ExceptionValue,
+ PointerType::getUnqual(ConvertType(TREE_TYPE(exp)))),
+ Alignment);
+}
+
+LValue TreeToLLVM::EmitLV_FILTER_EXPR(tree exp) {
+ CreateExceptionValues();
+ unsigned Alignment =
+ TD.getABITypeAlignment(cast<PointerType>(ExceptionSelectorValue->
+ getType())->getElementType());
+ return LValue(ExceptionSelectorValue, Alignment);
+}
+
+LValue TreeToLLVM::EmitLV_INDIRECT_REF(tree exp) {
+ // The lvalue is just the address.
+ LValue LV = LValue(Emit(TREE_OPERAND(exp, 0), 0), expr_align(exp) / 8);
+ // Correct for implicit type conversion: INDIRECT_REF can be applied to a
+ // void*, resulting in a non-void type.
+ LV.Ptr = Builder.CreateBitCast(LV.Ptr,
+ ConvertType(TREE_TYPE(exp))->getPointerTo());
+ return LV;
+}
+
+LValue TreeToLLVM::EmitLV_VIEW_CONVERT_EXPR(tree exp) {
+ tree Op = TREE_OPERAND(exp, 0);
+
+ if (AGGREGATE_TYPE_P(TREE_TYPE(Op))) {
+ // If the input is an aggregate, the address is the address of the operand.
+ LValue LV = EmitLV(Op);
+ // The type is the type of the expression.
+ LV.Ptr = Builder.CreateBitCast(LV.Ptr,
+ PointerType::getUnqual(ConvertType(TREE_TYPE(exp))));
+ return LV;
+ } else {
+ // TODO: Check the VCE is being used as an rvalue, see EmitLoadOfLValue.
+ // If the input is a scalar, emit to a temporary.
+ Value *Dest = CreateTemporary(ConvertType(TREE_TYPE(Op)));
+ Builder.CreateStore(Emit(Op, 0), Dest);
+ // The type is the type of the expression.
+ Dest = Builder.CreateBitCast(Dest,
+ PointerType::getUnqual(ConvertType(TREE_TYPE(exp))));
+ return LValue(Dest, 1);
+ }
+}
+
+LValue TreeToLLVM::EmitLV_WITH_SIZE_EXPR(tree exp) {
+ // The address is the address of the operand.
+ return EmitLV(TREE_OPERAND(exp, 0));
+}
+
+LValue TreeToLLVM::EmitLV_XXXXPART_EXPR(tree exp, unsigned Idx) {
+ LValue Ptr = EmitLV(TREE_OPERAND(exp, 0));
+ assert(!Ptr.isBitfield() &&
+ "REALPART_EXPR / IMAGPART_EXPR operands cannot be bitfields!");
+ unsigned Alignment;
+ if (Idx == 0)
+ // REALPART alignment is same as the complex operand.
+ Alignment = Ptr.getAlignment();
+ else
+ // IMAGPART alignment = MinAlign(Ptr.Alignment, sizeof field);
+ Alignment = MinAlign(Ptr.getAlignment(),
+ TD.getTypeAllocSize(Ptr.Ptr->getType()));
+ return LValue(Builder.CreateStructGEP(Ptr.Ptr, Idx), Alignment);
+}
+
+LValue TreeToLLVM::EmitLV_SSA_NAME(tree exp) {
+ // TODO: Check the ssa name is being used as an rvalue, see EmitLoadOfLValue.
+ Value *Temp = CreateTemporary(ConvertType(TREE_TYPE(exp)));
+ Builder.CreateStore(EmitSSA_NAME(exp), Temp);
+ return LValue(Temp, 1);
+}
+
+
+//===----------------------------------------------------------------------===//
+// ... Convert GIMPLE to LLVM ...
+//===----------------------------------------------------------------------===//
+
+void TreeToLLVM::RenderGIMPLE_ASM(gimple stmt) {
+ // Some of the GCC utilities we use still want lists and not gimple, so create
+ // input, output and clobber lists for their benefit.
+ unsigned NumOutputs = gimple_asm_noutputs (stmt);
+ tree outputs = NULL_TREE;
+ if (NumOutputs) {
+ tree t = outputs = gimple_asm_output_op (stmt, 0);
+ for (unsigned i = 1; i < NumOutputs; i++) {
+ TREE_CHAIN (t) = gimple_asm_output_op (stmt, i);
+ t = gimple_asm_output_op (stmt, i);
+ }
+ }
+
+ unsigned NumInputs = gimple_asm_ninputs(stmt);
+ tree inputs = NULL_TREE;
+ if (NumInputs) {
+ tree t = inputs = gimple_asm_input_op (stmt, 0);
+ for (unsigned i = 1; i < NumInputs; i++) {
+ TREE_CHAIN (t) = gimple_asm_input_op (stmt, i);
+ t = gimple_asm_input_op (stmt, i);
+ }
+ }
+
+ unsigned NumClobbers = gimple_asm_nclobbers (stmt);
+ tree clobbers = NULL_TREE;
+ if (NumClobbers) {
+ tree t = clobbers = gimple_asm_clobber_op (stmt, 0);
+ for (unsigned i = 1; i < NumClobbers; i++) {
+ TREE_CHAIN (t) = gimple_asm_clobber_op (stmt, i);
+ t = gimple_asm_clobber_op (stmt, i);
+ }
+ }
+
+ // TODO: Understand what these labels are about, and handle them properly.
+ unsigned NumLabels = gimple_asm_nlabels (stmt);
+ tree labels = NULL_TREE;
+ if (NumLabels) {
+ tree t = labels = gimple_asm_label_op (stmt, 0);
+ for (unsigned i = 1; i < NumLabels; i++) {
+ TREE_CHAIN (t) = gimple_asm_label_op (stmt, i);
+ t = gimple_asm_label_op (stmt, i);
+ }
+ }
+
+ unsigned NumInOut = 0;
+
+ // Look for multiple alternative constraints: multiple alternatives separated
+ // by commas.
+ unsigned NumChoices = 0; // sentinal; real value is always at least 1.
+ const char* p;
+ for (tree t = inputs; t; t = TREE_CHAIN(t)) {
+ unsigned NumInputChoices = 1;
+ for (p = TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(t))); *p; p++) {
+ if (*p == ',')
+ NumInputChoices++;
+ }
+ if (NumChoices==0)
+ NumChoices = NumInputChoices;
+ else if (NumChoices != NumInputChoices)
+ abort(); // invalid constraints
+ }
+ for (tree t = outputs; t; t = TREE_CHAIN(t)) {
+ unsigned NumOutputChoices = 1;
+ for (p = TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(t))); *p; p++) {
+ if (*p == ',')
+ NumOutputChoices++;
+ }
+ if (NumChoices==0)
+ NumChoices = NumOutputChoices;
+ else if (NumChoices != NumOutputChoices)
+ abort(); // invalid constraints
+ }
+
+ /// Constraints - The output/input constraints, concatenated together in array
+ /// form instead of list form.
+ const char **Constraints =
+ (const char **)alloca((NumOutputs + NumInputs) * sizeof(const char *));
+
+ // Process outputs.
+ int ValNum = 0;
+ for (tree Output = outputs; Output; Output = TREE_CHAIN(Output), ++ValNum) {
+ tree Operand = TREE_VALUE(Output);
+ tree type = TREE_TYPE(Operand);
+ // If there's an erroneous arg, emit no insn.
+ if (type == error_mark_node) return;
+
+ // Parse the output constraint.
+ const char *Constraint =
+ TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(Output)));
+ Constraints[ValNum] = Constraint;
+ }
+ // Process inputs.
+ for (tree Input = inputs; Input; Input = TREE_CHAIN(Input),++ValNum) {
+ tree Val = TREE_VALUE(Input);
+ tree type = TREE_TYPE(Val);
+ // If there's an erroneous arg, emit no insn.
+ if (type == error_mark_node) return;
+
+ const char *Constraint =
+ TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(Input)));
+ Constraints[ValNum] = Constraint;
+ }
+
+ // If there are multiple constraint tuples, pick one. Constraints is
+ // altered to point to shorter strings (which are malloc'ed), and everything
+ // below Just Works as in the NumChoices==1 case.
+ const char** ReplacementStrings = 0;
+ if (NumChoices>1) {
+ ReplacementStrings =
+ (const char **)alloca((NumOutputs + NumInputs) * sizeof(const char *));
+ ChooseConstraintTuple(Constraints, stmt, outputs, inputs, NumOutputs,
+ NumInputs, NumChoices, ReplacementStrings);
+ }
+
+ std::vector<Value*> CallOps;
+ std::vector<const Type*> CallArgTypes;
+ std::string NewAsmStr = ConvertInlineAsmStr(stmt, outputs, inputs, labels,
+ NumOutputs+NumInputs);
+ std::string ConstraintStr;
+
+ // StoreCallResultAddr - The pointer to store the result of the call through.
+ SmallVector<Value *, 4> StoreCallResultAddrs;
+ SmallVector<const Type *, 4> CallResultTypes;
+ SmallVector<bool, 4> CallResultIsSigned;
+ SmallVector<tree, 4> CallResultSSANames;
+ SmallVector<Value *, 4> CallResultSSATemps;
+
+ // Process outputs.
+ ValNum = 0;
+ for (tree Output = outputs; Output; Output = TREE_CHAIN(Output), ++ValNum) {
+ tree Operand = TREE_VALUE(Output);
+
+ // Parse the output constraint.
+ const char *Constraint = Constraints[ValNum];
+ bool IsInOut, AllowsReg, AllowsMem;
+ if (!parse_output_constraint(&Constraint, ValNum, NumInputs, NumOutputs,
+ &AllowsMem, &AllowsReg, &IsInOut)) {
+ if (NumChoices>1)
+ FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
+ return;
+ }
+ assert(Constraint[0] == '=' && "Not an output constraint?");
+
+ // Output constraints must be addressable if they aren't simple register
+ // constraints (this emits "address of register var" errors, etc).
+ if (!AllowsReg && (AllowsMem || IsInOut))
+ mark_addressable(Operand);
+
+ // Count the number of "+" constraints.
+ if (IsInOut)
+ ++NumInOut, ++NumInputs;
+
+ std::string SimplifiedConstraint;
+ // If this output register is pinned to a machine register, use that machine
+ // register instead of the specified constraint.
+ if (TREE_CODE(Operand) == VAR_DECL && DECL_HARD_REGISTER(Operand)) {
+ const char* RegName = extractRegisterName(Operand);
+ int RegNum = decode_reg_name(RegName);
+ if (RegNum >= 0) {
+ RegName = getConstraintRegNameFromGccTables(RegName, RegNum);
+ unsigned RegNameLen = strlen(RegName);
+ char *NewConstraint = (char*)alloca(RegNameLen+4);
+ NewConstraint[0] = '=';
+ NewConstraint[1] = '{';
+ memcpy(NewConstraint+2, RegName, RegNameLen);
+ NewConstraint[RegNameLen+2] = '}';
+ NewConstraint[RegNameLen+3] = 0;
+ SimplifiedConstraint = NewConstraint;
+ // We should no longer consider mem constraints.
+ AllowsMem = false;
+ } else {
+ // If we can simplify the constraint into something else, do so now.
+ // This avoids LLVM having to know about all the (redundant) GCC
+ // constraints.
+ SimplifiedConstraint = CanonicalizeConstraint(Constraint+1);
+ }
+ } else {
+ SimplifiedConstraint = CanonicalizeConstraint(Constraint+1);
+ }
+
+ LValue Dest;
+ const Type *DestValTy;
+ if (TREE_CODE(Operand) == SSA_NAME) {
+ // The ASM is defining an ssa name. Store the output to a temporary, then
+ // load it out again later as the ssa name.
+ DestValTy = ConvertType(TREE_TYPE(Operand));
+ Dest.Ptr = CreateTemporary(DestValTy);
+ CallResultSSANames.push_back(Operand);
+ CallResultSSATemps.push_back(Dest.Ptr);
+ } else {
+ Dest = EmitLV(Operand);
+ DestValTy = cast<PointerType>(Dest.Ptr->getType())->getElementType();
+ }
+
+ assert(!Dest.isBitfield() && "Cannot assign into a bitfield!");
+ if (!AllowsMem && DestValTy->isSingleValueType()) {// Reg dest -> asm return
+ StoreCallResultAddrs.push_back(Dest.Ptr);
+ ConstraintStr += ",=";
+ ConstraintStr += SimplifiedConstraint;
+ CallResultTypes.push_back(DestValTy);
+ CallResultIsSigned.push_back(!TYPE_UNSIGNED(TREE_TYPE(Operand)));
+ } else {
+ ConstraintStr += ",=*";
+ ConstraintStr += SimplifiedConstraint;
+ CallOps.push_back(Dest.Ptr);
+ CallArgTypes.push_back(Dest.Ptr->getType());
+ }
+ }
+
+ // Process inputs.
+ for (tree Input = inputs; Input; Input = TREE_CHAIN(Input),++ValNum) {
+ tree Val = TREE_VALUE(Input);
+ tree type = TREE_TYPE(Val);
+
+ const char *Constraint = Constraints[ValNum];
+
+ bool AllowsReg, AllowsMem;
+ if (!parse_input_constraint(Constraints+ValNum, ValNum-NumOutputs,
+ NumInputs, NumOutputs, NumInOut,
+ Constraints, &AllowsMem, &AllowsReg)) {
+ if (NumChoices>1)
+ FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
+ return;
+ }
+ bool isIndirect = false;
+ if (AllowsReg || !AllowsMem) { // Register operand.
+ const Type *LLVMTy = ConvertType(type);
+
+ Value *Op = 0;
+ if (LLVMTy->isSingleValueType()) {
+ if (TREE_CODE(Val)==ADDR_EXPR &&
+ TREE_CODE(TREE_OPERAND(Val,0))==LABEL_DECL) {
+ // Emit the label, but do not assume it is going to be the target
+ // of an indirect branch. Having this logic here is a hack; there
+ // should be a bit in the label identifying it as in an asm.
+ Op = getLabelDeclBlock(TREE_OPERAND(Val, 0));
+ } else
+ Op = Emit(Val, 0);
+ } else {
+ LValue LV = EmitLV(Val);
+ assert(!LV.isBitfield() && "Inline asm can't have bitfield operand");
+
+ // Structs and unions are permitted here, as long as they're the
+ // same size as a register.
+ uint64_t TySize = TD.getTypeSizeInBits(LLVMTy);
+ if (TySize == 1 || TySize == 8 || TySize == 16 ||
+ TySize == 32 || TySize == 64) {
+ LLVMTy = IntegerType::get(Context, TySize);
+ Op = Builder.CreateLoad(Builder.CreateBitCast(LV.Ptr,
+ LLVMTy->getPointerTo()));
+ } else {
+ // Otherwise, emit our value as a lvalue and let the codegen deal with
+ // it.
+ isIndirect = true;
+ Op = LV.Ptr;
+ }
+ }
+
+ const Type *OpTy = Op->getType();
+ // If this input operand is matching an output operand, e.g. '0', check if
+ // this is something that llvm supports. If the operand types are
+ // different, then emit an error if 1) one of the types is not integer or
+ // pointer, 2) if size of input type is larger than the output type. If
+ // the size of the integer input size is smaller than the integer output
+ // type, then cast it to the larger type and shift the value if the target
+ // is big endian.
+ if (ISDIGIT(Constraint[0])) {
+ unsigned Match = atoi(Constraint);
+ const Type *OTy = (Match < CallResultTypes.size())
+ ? CallResultTypes[Match] : 0;
+ if (OTy && OTy != OpTy) {
+ if (!(isa<IntegerType>(OTy) || isa<PointerType>(OTy)) ||
+ !(isa<IntegerType>(OpTy) || isa<PointerType>(OpTy))) {
+ error_at(gimple_location(stmt),
+ "unsupported inline asm: input constraint with a matching "
+ "output constraint of incompatible type!");
+ if (NumChoices>1)
+ FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
+ return;
+ }
+ unsigned OTyBits = TD.getTypeSizeInBits(OTy);
+ unsigned OpTyBits = TD.getTypeSizeInBits(OpTy);
+ if (OTyBits == 0 || OpTyBits == 0 || OTyBits < OpTyBits) {
+ // It's tempting to implement the OTyBits < OpTyBits case by truncating
+ // Op down to OTy, however that breaks in the case of an inline asm
+ // constraint that corresponds to a single register, because the
+ // user can write code that assumes the whole register is defined,
+ // despite the output operand being only a subset of the register. For
+ // example:
+ //
+ // asm ("sarl $10, %%eax" : "=a"(c) : "0"(1000000));
+ //
+ // The expected behavior is for %eax to be fully defined with the value
+ // 1000000 immediately before the asm.
+ error_at(gimple_location(stmt),
+ "unsupported inline asm: input constraint with a matching "
+ "output constraint of incompatible type!");
+ return;
+ } else if (OTyBits > OpTyBits) {
+ Op = CastToAnyType(Op, !TYPE_UNSIGNED(type),
+ OTy, CallResultIsSigned[Match]);
+ if (BYTES_BIG_ENDIAN) {
+ Constant *ShAmt = ConstantInt::get(Op->getType(),
+ OTyBits-OpTyBits);
+ Op = Builder.CreateLShr(Op, ShAmt);
+ }
+ OpTy = Op->getType();
+ }
+ }
+ }
+
+ CallOps.push_back(Op);
+ CallArgTypes.push_back(OpTy);
+ } else { // Memory operand.
+ mark_addressable(TREE_VALUE(Input));
+ isIndirect = true;
+ LValue Src = EmitLV(Val);
+ assert(!Src.isBitfield() && "Cannot read from a bitfield!");
+ CallOps.push_back(Src.Ptr);
+ CallArgTypes.push_back(Src.Ptr->getType());
+ }
+
+ ConstraintStr += ',';
+ if (isIndirect)
+ ConstraintStr += '*';
+
+ // If this output register is pinned to a machine register, use that machine
+ // register instead of the specified constraint.
+ if (TREE_CODE(Val) == VAR_DECL && DECL_HARD_REGISTER(Val)) {
+ const char *RegName = extractRegisterName(Val);
+ int RegNum = decode_reg_name(RegName);
+ if (RegNum >= 0) {
+ RegName = getConstraintRegNameFromGccTables(RegName, RegNum);
+ ConstraintStr += '{';
+ ConstraintStr += RegName;
+ ConstraintStr += '}';
+ continue;
+ }
+ }
+
+ // If there is a simpler form for the register constraint, use it.
+ std::string Simplified = CanonicalizeConstraint(Constraint);
+ ConstraintStr += Simplified;
+ }
+
+ // Process clobbers.
+
+ // Some targets automatically clobber registers across an asm.
+ tree Clobbers = targetm.md_asm_clobbers(outputs, inputs, clobbers);
+ for (; Clobbers; Clobbers = TREE_CHAIN(Clobbers)) {
+ const char *RegName = TREE_STRING_POINTER(TREE_VALUE(Clobbers));
+ int RegCode = decode_reg_name(RegName);
+
+ switch (RegCode) {
+ case -1: // Nothing specified?
+ case -2: // Invalid.
+ error_at(gimple_location(stmt), "unknown register name %qs in %<asm%>",
+ RegName);
+ if (NumChoices>1)
+ FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
+ return;
+ case -3: // cc
+ ConstraintStr += ",~{cc}";
+ break;
+ case -4: // memory
+ ConstraintStr += ",~{memory}";
+ break;
+ default: // Normal register name.
+ RegName = getConstraintRegNameFromGccTables(RegName, RegCode);
+ ConstraintStr += ",~{";
+ ConstraintStr += RegName;
+ ConstraintStr += "}";
+ break;
+ }
+ }
+
+ const Type *CallResultType;
+ switch (CallResultTypes.size()) {
+ case 0: CallResultType = Type::getVoidTy(Context); break;
+ case 1: CallResultType = CallResultTypes[0]; break;
+ default:
+ std::vector<const Type*> TmpVec(CallResultTypes.begin(),
+ CallResultTypes.end());
+ CallResultType = StructType::get(Context, TmpVec);
+ break;
+ }
+
+ const FunctionType *FTy =
+ FunctionType::get(CallResultType, CallArgTypes, false);
+
+ // Remove the leading comma if we have operands.
+ if (!ConstraintStr.empty())
+ ConstraintStr.erase(ConstraintStr.begin());
+
+ // Make sure we're created a valid inline asm expression.
+ if (!InlineAsm::Verify(FTy, ConstraintStr)) {
+ error_at(gimple_location(stmt), "Invalid or unsupported inline assembly!");
+ if (NumChoices>1)
+ FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
+ return;
+ }
+
+ Value *Asm = InlineAsm::get(FTy, NewAsmStr, ConstraintStr,
+ gimple_asm_volatile_p(stmt) || !outputs);
+ CallInst *CV = Builder.CreateCall(Asm, CallOps.begin(), CallOps.end(),
+ CallResultTypes.empty() ? "" : "asmtmp");
+ CV->setDoesNotThrow();
+
+ // If the call produces a value, store it into the destination.
+ if (StoreCallResultAddrs.size() == 1)
+ Builder.CreateStore(CV, StoreCallResultAddrs[0]);
+ else if (unsigned NumResults = StoreCallResultAddrs.size()) {
+ for (unsigned i = 0; i != NumResults; ++i) {
+ Value *ValI = Builder.CreateExtractValue(CV, i, "asmresult");
+ Builder.CreateStore(ValI, StoreCallResultAddrs[i]);
+ }
+ }
+
+ // If the call defined any ssa names, associate them with their value.
+ for (unsigned i = 0, e = CallResultSSANames.size(); i != e; ++i)
+ SSANames[CallResultSSANames[i]] = Builder.CreateLoad(CallResultSSATemps[i]);
+
+ // Give the backend a chance to upgrade the inline asm to LLVM code. This
+ // handles some common cases that LLVM has intrinsics for, e.g. x86 bswap ->
+ // llvm.bswap.
+ if (const TargetLowering *TLI = TheTarget->getTargetLowering())
+ TLI->ExpandInlineAsm(CV);
+
+ if (NumChoices>1)
+ FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
+}
+
+void TreeToLLVM::RenderGIMPLE_ASSIGN(gimple stmt) {
+ tree lhs = gimple_assign_lhs(stmt);
+ if (AGGREGATE_TYPE_P(TREE_TYPE(lhs))) {
+ LValue LV = EmitLV(lhs);
+ MemRef NewLoc(LV.Ptr, LV.getAlignment(), TREE_THIS_VOLATILE(lhs));
+ // TODO: This case can presumably only happen with special gimple
+ // assign right-hand-sides. Try to simplify by exploiting this.
+ EmitGimpleAssignRHS(stmt, &NewLoc);
+ return;
+ }
+ WriteScalarToLHS(lhs, EmitGimpleAssignRHS(stmt, 0));
+}
+
+void TreeToLLVM::RenderGIMPLE_CALL(gimple stmt) {
+ tree lhs = gimple_call_lhs(stmt);
+ if (!lhs) {
+ // The returned value is not used.
+ if (!AGGREGATE_TYPE_P(gimple_call_return_type(stmt))) {
+ EmitGimpleCallRHS(stmt, 0);
+ return;
+ }
+ // Create a temporary to hold the returned value.
+ // TODO: Figure out how to avoid creating this temporary and the
+ // associated useless code that stores the returned value into it.
+ MemRef Loc = CreateTempLoc(ConvertType(gimple_call_return_type(stmt)));
+ EmitGimpleCallRHS(stmt, &Loc);
+ return;
+ }
+
+ if (AGGREGATE_TYPE_P(TREE_TYPE(lhs))) {
+ LValue LV = EmitLV(lhs);
+ MemRef NewLoc(LV.Ptr, LV.getAlignment(), TREE_THIS_VOLATILE(lhs));
+ EmitGimpleCallRHS(stmt, &NewLoc);
+ return;
+ }
+ WriteScalarToLHS(lhs, EmitGimpleCallRHS(stmt, 0));
+}
+
+void TreeToLLVM::RenderGIMPLE_COND(gimple stmt) {
+ // Emit the comparison.
+ Value *Cond = EmitCompare(gimple_cond_lhs(stmt), gimple_cond_rhs(stmt),
+ gimple_cond_code(stmt));
+
+ // Extract the target basic blocks.
+ edge true_edge, false_edge;
+ extract_true_false_edges_from_block(gimple_bb(stmt), &true_edge, &false_edge);
+ BasicBlock *IfTrue = getBasicBlock(true_edge->dest);
+ BasicBlock *IfFalse = getBasicBlock(false_edge->dest);
+
+ // Branch based on the condition.
+ Builder.CreateCondBr(Cond, IfTrue, IfFalse);
+}
+
+void TreeToLLVM::RenderGIMPLE_GOTO(gimple stmt) {
+ tree dest = gimple_goto_dest(stmt);
+
+ if (TREE_CODE(dest) == LABEL_DECL) {
+ // Direct branch.
+ Builder.CreateBr(getLabelDeclBlock(dest));
+ return;
+ }
+
+ // Otherwise we have an indirect goto.
+ BasicBlock *DestBB = getIndirectGotoBlock();
+
+ // Store the destination block to the GotoValue alloca.
+ Value *V = Builder.CreatePtrToInt(Emit(dest, 0), TD.getIntPtrType(Context));
+ Builder.CreateStore(V, IndirectGotoValue);
+
+ // FIXME: This is HORRIBLY INCORRECT in the presence of exception handlers.
+ // There should be one collector block per cleanup level!
+ Builder.CreateBr(DestBB);
+}
+
+void TreeToLLVM::RenderGIMPLE_RESX(gimple stmt) {
+abort();
+//FIXME int RegionNo = gimple_resx_region(stmt);
+//FIXME std::vector<eh_region> Handlers;
+//FIXME
+//FIXME foreach_reachable_handler(RegionNo, true, false, AddHandler, &Handlers);
+//FIXME
+//FIXME if (!Handlers.empty()) {
+//FIXME for (std::vector<eh_region>::iterator I = Handlers.begin(),
+//FIXME E = Handlers.end(); I != E; ++I)
+//FIXME // Create a post landing pad for the handler.
+//FIXME getPostPad(get_eh_region_number(*I));
+//FIXME
+//FIXME Builder.CreateBr(getPostPad(get_eh_region_number(*Handlers.begin())));
+//FIXME } else {
+//FIXME assert(can_throw_external_1(RegionNo, true, false) &&
+//FIXME "Must-not-throw region handled by runtime?");
+//FIXME // Unwinding continues in the caller.
+//FIXME if (!UnwindBB)
+//FIXME UnwindBB = BasicBlock::Create(Context, "Unwind");
+//FIXME Builder.CreateBr(UnwindBB);
+//FIXME }
+}
+
+void TreeToLLVM::RenderGIMPLE_RETURN(gimple stmt) {
+ tree retval = gimple_return_retval(stmt);
+ tree result = DECL_RESULT(current_function_decl);
+
+ if (retval && retval != error_mark_node && retval != result) {
+ // Store the return value to the function's DECL_RESULT.
+ if (AGGREGATE_TYPE_P(TREE_TYPE(result))) {
+ MemRef DestLoc(DECL_LOCAL(result), 1, false); // FIXME: What alignment?
+ Emit(retval, &DestLoc);
+ } else {
+ Value *Val = Builder.CreateBitCast(Emit(retval, 0),
+ ConvertType(TREE_TYPE(result)));
+ Builder.CreateStore(Val, DECL_LOCAL(result));
+ }
+ }
+
+ // Emit a branch to the exit label.
+ Builder.CreateBr(ReturnBB);
+}
+
+void TreeToLLVM::RenderGIMPLE_SWITCH(gimple stmt) {
+ // Emit the condition.
+ Value *Index = Emit(gimple_switch_index(stmt), 0);
+ bool IndexIsSigned = !TYPE_UNSIGNED(TREE_TYPE(gimple_switch_index(stmt)));
+
+ // Create the switch instruction.
+ tree default_label = CASE_LABEL(gimple_switch_label(stmt, 0));
+ SwitchInst *SI = Builder.CreateSwitch(Index, getLabelDeclBlock(default_label),
+ gimple_switch_num_labels(stmt));
+
+ // Add the switch cases.
+ BasicBlock *IfBlock = 0; // Set if a range was output as an "if".
+ for (size_t i = 1, e = gimple_switch_num_labels(stmt); i != e; ++i) {
+ tree label = gimple_switch_label(stmt, i);
+ BasicBlock *Dest = getLabelDeclBlock(CASE_LABEL(label));
+
+ // Convert the integer to the right type.
+ Value *Val = Emit(CASE_LOW(label), 0);
+ Val = CastToAnyType(Val, !TYPE_UNSIGNED(TREE_TYPE(CASE_LOW(label))),
+ Index->getType(), IndexIsSigned);
+ ConstantInt *LowC = cast<ConstantInt>(Val);
+
+ if (!CASE_HIGH(label)) {
+ SI->addCase(LowC, Dest); // Single destination.
+ continue;
+ }
+
+ // Otherwise, we have a range, like 'case 1 ... 17'.
+ Val = Emit(CASE_HIGH(label), 0);
+ // Make sure the case value is the same type as the switch expression
+ Val = CastToAnyType(Val, !TYPE_UNSIGNED(TREE_TYPE(CASE_HIGH(label))),
+ Index->getType(), IndexIsSigned);
+ ConstantInt *HighC = cast<ConstantInt>(Val);
+
+ APInt Range = HighC->getValue() - LowC->getValue();
+ if (Range.ult(APInt(Range.getBitWidth(), 64))) {
+ // Add all of the necessary successors to the switch.
+ APInt CurrentValue = LowC->getValue();
+ while (1) {
+ SI->addCase(LowC, Dest);
+ if (LowC == HighC) break; // Emitted the last one.
+ CurrentValue++;
+ LowC = ConstantInt::get(Context, CurrentValue);
+ }
+ } else {
+ // The range is too big to add to the switch - emit an "if".
+ if (!IfBlock) {
+ IfBlock = BasicBlock::Create(Context);
+ EmitBlock(IfBlock);
+ }
+ Value *Diff = Builder.CreateSub(Index, LowC);
+ Value *Cond = Builder.CreateICmpULE(Diff,
+ ConstantInt::get(Context, Range));
+ BasicBlock *False_Block = BasicBlock::Create(Context);
+ Builder.CreateCondBr(Cond, Dest, False_Block);
+ EmitBlock(False_Block);
+ }
+ }
+
+ if (IfBlock) {
+ Builder.CreateBr(SI->getDefaultDest());
+ SI->setSuccessor(0, IfBlock);
+ }
+}
+
+
+//===----------------------------------------------------------------------===//
+// ... Constant Expressions ...
+//===----------------------------------------------------------------------===//
+
+/// EmitCONSTRUCTOR - emit the constructor into the location specified by
+/// DestLoc.
+Value *TreeToLLVM::EmitCONSTRUCTOR(tree exp, const MemRef *DestLoc) {
+ tree type = TREE_TYPE(exp);
+ const Type *Ty = ConvertType(type);
+ if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
+ assert(DestLoc == 0 && "Dest location for vector value?");
+ std::vector<Value *> BuildVecOps;
+ BuildVecOps.reserve(VTy->getNumElements());
+
+ // Insert all of the elements here.
+ unsigned HOST_WIDE_INT idx;
+ tree value;
+ FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), idx, value) {
+ Value *Elt = Emit(value, 0);
+
+ if (const VectorType *EltTy = dyn_cast<VectorType>(Elt->getType())) {
+ // GCC allows vectors to be built up from vectors. Extract all of the
+ // vector elements and add them to the list of build vector operands.
+ for (unsigned i = 0, e = EltTy->getNumElements(); i != e; ++i) {
+ Value *Index = ConstantInt::get(llvm::Type::getInt32Ty(Context), i);
+ BuildVecOps.push_back(Builder.CreateExtractElement(Elt, Index));
+ }
+ } else {
+ assert(Elt->getType() == VTy->getElementType() &&
+ "Unexpected type for vector constructor!");
+ BuildVecOps.push_back(Elt);
+ }
+ }
+
+ // Insert zero for any unspecified values.
+ while (BuildVecOps.size() < VTy->getNumElements())
+ BuildVecOps.push_back(Constant::getNullValue(VTy->getElementType()));
+ assert(BuildVecOps.size() == VTy->getNumElements() &&
+ "Vector constructor specified too many values!");
+
+ return BuildVector(BuildVecOps);
+ }
+
+ assert(!Ty->isSingleValueType() && "Constructor for scalar type??");
+
+ // Start out with the value zero'd out.
+ EmitAggregateZero(*DestLoc, type);
+
+ VEC(constructor_elt, gc) *elt = CONSTRUCTOR_ELTS(exp);
+ switch (TREE_CODE(TREE_TYPE(exp))) {
+ case ARRAY_TYPE:
+ case RECORD_TYPE:
+ default:
+ if (elt && VEC_length(constructor_elt, elt)) {
+ // We don't handle elements yet.
+
+ TODO(exp);
+ }
+ return 0;
+ case QUAL_UNION_TYPE:
+ case UNION_TYPE:
+ // Store each element of the constructor into the corresponding field of
+ // DEST.
+ if (!elt || VEC_empty(constructor_elt, elt)) return 0; // no elements
+ assert(VEC_length(constructor_elt, elt) == 1
+ && "Union CONSTRUCTOR should have one element!");
+ tree tree_purpose = VEC_index(constructor_elt, elt, 0)->index;
+ tree tree_value = VEC_index(constructor_elt, elt, 0)->value;
+ if (!tree_purpose)
+ return 0; // Not actually initialized?
+
+ if (!ConvertType(TREE_TYPE(tree_purpose))->isSingleValueType()) {
+ Value *V = Emit(tree_value, DestLoc);
+ (void)V;
+ assert(V == 0 && "Aggregate value returned in a register?");
+ } else {
+ // Scalar value. Evaluate to a register, then do the store.
+ Value *V = Emit(tree_value, 0);
+ Value *Ptr = Builder.CreateBitCast(DestLoc->Ptr,
+ PointerType::getUnqual(V->getType()));
+ StoreInst *St = Builder.CreateStore(V, Ptr, DestLoc->Volatile);
+ St->setAlignment(DestLoc->getAlignment());
+ }
+ break;
+ }
+ return 0;
+}
+
+Constant *TreeConstantToLLVM::Convert(tree exp) {
+ assert((TREE_CONSTANT(exp) || TREE_CODE(exp) == STRING_CST) &&
+ "Isn't a constant!");
+ switch (TREE_CODE(exp)) {
+ case FDESC_EXPR: // Needed on itanium
+ default:
+ debug_tree(exp);
+ assert(0 && "Unknown constant to convert!");
+ abort();
+ case INTEGER_CST: return ConvertINTEGER_CST(exp);
+ case REAL_CST: return ConvertREAL_CST(exp);
+ case VECTOR_CST: return ConvertVECTOR_CST(exp);
+ case STRING_CST: return ConvertSTRING_CST(exp);
+ case COMPLEX_CST: return ConvertCOMPLEX_CST(exp);
+ case NOP_EXPR: return ConvertNOP_EXPR(exp);
+ case CONVERT_EXPR: return ConvertCONVERT_EXPR(exp);
+ case PLUS_EXPR:
+ case MINUS_EXPR: return ConvertBinOp_CST(exp);
+ case CONSTRUCTOR: return ConvertCONSTRUCTOR(exp);
+ case VIEW_CONVERT_EXPR: return Convert(TREE_OPERAND(exp, 0));
+ case POINTER_PLUS_EXPR: return ConvertPOINTER_PLUS_EXPR(exp);
+ case ADDR_EXPR:
+ return TheFolder->CreateBitCast(EmitLV(TREE_OPERAND(exp, 0)),
+ ConvertType(TREE_TYPE(exp)));
+ }
+}
+
+Constant *TreeConstantToLLVM::ConvertINTEGER_CST(tree exp) {
+ const Type *Ty = ConvertType(TREE_TYPE(exp));
+
+ // Handle i128 specially.
+ if (const IntegerType *IT = dyn_cast<IntegerType>(Ty)) {
+ if (IT->getBitWidth() == 128) {
+ // GCC only supports i128 on 64-bit systems.
+ assert(HOST_BITS_PER_WIDE_INT == 64 &&
+ "i128 only supported on 64-bit system");
+ uint64_t Bits[] = { TREE_INT_CST_LOW(exp), TREE_INT_CST_HIGH(exp) };
+ return ConstantInt::get(Context, APInt(128, 2, Bits));
+ }
+ }
+
+ // Build the value as a ulong constant, then constant fold it to the right
+ // type. This handles overflow and other things appropriately.
+ uint64_t IntValue = getINTEGER_CSTVal(exp);
+ ConstantInt *C = ConstantInt::get(Type::getInt64Ty(Context), IntValue);
+ // The destination type can be a pointer, integer or floating point
+ // so we need a generalized cast here
+ Instruction::CastOps opcode = CastInst::getCastOpcode(C, false, Ty,
+ !TYPE_UNSIGNED(TREE_TYPE(exp)));
+ return TheFolder->CreateCast(opcode, C, Ty);
+}
+
+Constant *TreeConstantToLLVM::ConvertREAL_CST(tree exp) {
+ const Type *Ty = ConvertType(TREE_TYPE(exp));
+ assert(Ty->isFloatingPoint() && "Integer REAL_CST?");
+ long RealArr[2];
+ union {
+ int UArr[2];
+ double V;
+ };
+ if (Ty->isFloatTy() || Ty->isDoubleTy()) {
+ REAL_VALUE_TO_TARGET_DOUBLE(TREE_REAL_CST(exp), RealArr);
+
+ // Here's how this works:
+ // REAL_VALUE_TO_TARGET_DOUBLE() will generate the floating point number
+ // as an array of integers in the target's representation. Each integer
+ // in the array will hold 32 bits of the result REGARDLESS OF THE HOST'S
+ // INTEGER SIZE.
+ //
+ // This, then, makes the conversion pretty simple. The tricky part is
+ // getting the byte ordering correct and make sure you don't print any
+ // more than 32 bits per integer on platforms with ints > 32 bits.
+ //
+ // We want to switch the words of UArr if host and target endianness
+ // do not match. FLOAT_WORDS_BIG_ENDIAN describes the target endianness.
+ // The host's used to be available in HOST_WORDS_BIG_ENDIAN, but the gcc
+ // maintainers removed this in a fit of cleanliness between 4.0
+ // and 4.2. llvm::sys has a substitute.
+
+ UArr[0] = RealArr[0]; // Long -> int convert
+ UArr[1] = RealArr[1];
+
+ if (llvm::sys::isBigEndianHost() != FLOAT_WORDS_BIG_ENDIAN)
+ std::swap(UArr[0], UArr[1]);
+
+ return
+ ConstantFP::get(Context, Ty->isFloatTy() ? APFloat((float)V) : APFloat(V));
+ } else if (Ty->isX86_FP80Ty()) {
+ long RealArr[4];
+ uint64_t UArr[2];
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE(TREE_REAL_CST(exp), RealArr);
+ UArr[0] = ((uint64_t)((uint32_t)RealArr[0])) |
+ ((uint64_t)((uint32_t)RealArr[1]) << 32);
+ UArr[1] = (uint16_t)RealArr[2];
+ return ConstantFP::get(Context, APFloat(APInt(80, 2, UArr)));
+ } else if (Ty->isPPC_FP128Ty()) {
+ long RealArr[4];
+ uint64_t UArr[2];
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE(TREE_REAL_CST(exp), RealArr);
+
+ UArr[0] = ((uint64_t)((uint32_t)RealArr[0]) << 32) |
+ ((uint64_t)((uint32_t)RealArr[1]));
+ UArr[1] = ((uint64_t)((uint32_t)RealArr[2]) << 32) |
+ ((uint64_t)((uint32_t)RealArr[3]));
+ return ConstantFP::get(Context, APFloat(APInt(128, 2, UArr)));
+ }
+ assert(0 && "Floating point type not handled yet");
+ return 0; // outwit compiler warning
+}
+
+Constant *TreeConstantToLLVM::ConvertVECTOR_CST(tree exp) {
+ if (!TREE_VECTOR_CST_ELTS(exp))
+ return Constant::getNullValue(ConvertType(TREE_TYPE(exp)));
+
+ std::vector<Constant*> Elts;
+ for (tree elt = TREE_VECTOR_CST_ELTS(exp); elt; elt = TREE_CHAIN(elt))
+ Elts.push_back(Convert(TREE_VALUE(elt)));
+
+ // The vector should be zero filled if insufficient elements are provided.
+ if (Elts.size() < TYPE_VECTOR_SUBPARTS(TREE_TYPE(exp))) {
+ tree EltType = TREE_TYPE(TREE_TYPE(exp));
+ Constant *Zero = Constant::getNullValue(ConvertType(EltType));
+ while (Elts.size() < TYPE_VECTOR_SUBPARTS(TREE_TYPE(exp)))
+ Elts.push_back(Zero);
+ }
+
+ return ConstantVector::get(Elts);
+}
+
+Constant *TreeConstantToLLVM::ConvertSTRING_CST(tree exp) {
+ const ArrayType *StrTy = cast<ArrayType>(ConvertType(TREE_TYPE(exp)));
+ const Type *ElTy = StrTy->getElementType();
+
+ unsigned Len = (unsigned)TREE_STRING_LENGTH(exp);
+
+ std::vector<Constant*> Elts;
+ if (ElTy == Type::getInt8Ty(Context)) {
+ const unsigned char *InStr =(const unsigned char *)TREE_STRING_POINTER(exp);
+ for (unsigned i = 0; i != Len; ++i)
+ Elts.push_back(ConstantInt::get(Type::getInt8Ty(Context), InStr[i]));
+ } else if (ElTy == Type::getInt16Ty(Context)) {
+ assert((Len&1) == 0 &&
+ "Length in bytes should be a multiple of element size");
+ const uint16_t *InStr =
+ (const unsigned short *)TREE_STRING_POINTER(exp);
+ for (unsigned i = 0; i != Len/2; ++i) {
+ // gcc has constructed the initializer elements in the target endianness,
+ // but we're going to treat them as ordinary shorts from here, with
+ // host endianness. Adjust if necessary.
+ if (llvm::sys::isBigEndianHost() == BYTES_BIG_ENDIAN)
+ Elts.push_back(ConstantInt::get(Type::getInt16Ty(Context), InStr[i]));
+ else
+ Elts.push_back(ConstantInt::get(Type::getInt16Ty(Context), ByteSwap_16(InStr[i])));
+ }
+ } else if (ElTy == Type::getInt32Ty(Context)) {
+ assert((Len&3) == 0 &&
+ "Length in bytes should be a multiple of element size");
+ const uint32_t *InStr = (const uint32_t *)TREE_STRING_POINTER(exp);
+ for (unsigned i = 0; i != Len/4; ++i) {
+ // gcc has constructed the initializer elements in the target endianness,
+ // but we're going to treat them as ordinary ints from here, with
+ // host endianness. Adjust if necessary.
+ if (llvm::sys::isBigEndianHost() == BYTES_BIG_ENDIAN)
+ Elts.push_back(ConstantInt::get(Type::getInt32Ty(Context), InStr[i]));
+ else
+ Elts.push_back(ConstantInt::get(Type::getInt32Ty(Context), ByteSwap_32(InStr[i])));
+ }
+ } else {
+ assert(0 && "Unknown character type!");
+ }
+
+ unsigned LenInElts = Len /
+ TREE_INT_CST_LOW(TYPE_SIZE_UNIT(TREE_TYPE(TREE_TYPE(exp))));
+ unsigned ConstantSize = StrTy->getNumElements();
+
+ if (LenInElts != ConstantSize) {
+ // If this is a variable sized array type, set the length to LenInElts.
+ if (ConstantSize == 0) {
+ tree Domain = TYPE_DOMAIN(TREE_TYPE(exp));
+ if (!Domain || !TYPE_MAX_VALUE(Domain)) {
+ ConstantSize = LenInElts;
+ StrTy = ArrayType::get(ElTy, LenInElts);
+ }
+ }
+
+ if (ConstantSize < LenInElts) {
+ // Only some chars are being used, truncate the string: char X[2] = "foo";
+ Elts.resize(ConstantSize);
+ } else {
+ // Fill the end of the string with nulls.
+ Constant *C = Constant::getNullValue(ElTy);
+ for (; LenInElts != ConstantSize; ++LenInElts)
+ Elts.push_back(C);
+ }
+ }
+ return ConstantArray::get(StrTy, Elts);
+}
+
+Constant *TreeConstantToLLVM::ConvertCOMPLEX_CST(tree exp) {
+ Constant *Elts[2] = {
+ Convert(TREE_REALPART(exp)),
+ Convert(TREE_IMAGPART(exp))
+ };
+ return ConstantStruct::get(Context, Elts, 2, false);
+}
+
+Constant *TreeConstantToLLVM::ConvertNOP_EXPR(tree exp) {
+ Constant *Elt = Convert(TREE_OPERAND(exp, 0));
+ const Type *Ty = ConvertType(TREE_TYPE(exp));
+ bool EltIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 0)));
+ bool TyIsSigned = !TYPE_UNSIGNED(TREE_TYPE(exp));
+
+ // If this is a structure-to-structure cast, just return the uncasted value.
+ if (!Elt->getType()->isSingleValueType() || !Ty->isSingleValueType())
+ return Elt;
+
+ // Elt and Ty can be integer, float or pointer here: need generalized cast
+ Instruction::CastOps opcode = CastInst::getCastOpcode(Elt, EltIsSigned,
+ Ty, TyIsSigned);
+ return TheFolder->CreateCast(opcode, Elt, Ty);
+}
+
+Constant *TreeConstantToLLVM::ConvertCONVERT_EXPR(tree exp) {
+ Constant *Elt = Convert(TREE_OPERAND(exp, 0));
+ bool EltIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 0)));
+ const Type *Ty = ConvertType(TREE_TYPE(exp));
+ bool TyIsSigned = !TYPE_UNSIGNED(TREE_TYPE(exp));
+ Instruction::CastOps opcode = CastInst::getCastOpcode(Elt, EltIsSigned, Ty,
+ TyIsSigned);
+ return TheFolder->CreateCast(opcode, Elt, Ty);
+}
+
+Constant *TreeConstantToLLVM::ConvertPOINTER_PLUS_EXPR(tree exp) {
+ Constant *Ptr = Convert(TREE_OPERAND(exp, 0)); // The pointer.
+ Constant *Idx = Convert(TREE_OPERAND(exp, 1)); // The offset in bytes.
+
+ // Convert the pointer into an i8* and add the offset to it.
+ Ptr = TheFolder->CreateBitCast(Ptr, Type::getInt8PtrTy(Context));
+ Constant *GEP = POINTER_TYPE_OVERFLOW_UNDEFINED ?
+ TheFolder->CreateInBoundsGetElementPtr(Ptr, &Idx, 1) :
+ TheFolder->CreateGetElementPtr(Ptr, &Idx, 1);
+
+ // The result may be of a different pointer type.
+ return TheFolder->CreateBitCast(GEP, ConvertType(TREE_TYPE(exp)));
+}
+
+Constant *TreeConstantToLLVM::ConvertBinOp_CST(tree exp) {
+ Constant *LHS = Convert(TREE_OPERAND(exp, 0));
+ bool LHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp,0)));
+ Constant *RHS = Convert(TREE_OPERAND(exp, 1));
+ bool RHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp,1)));
+ Instruction::CastOps opcode;
+ if (isa<PointerType>(LHS->getType())) {
+ const Type *IntPtrTy = getTargetData().getIntPtrType(Context);
+ opcode = CastInst::getCastOpcode(LHS, LHSIsSigned, IntPtrTy, false);
+ LHS = TheFolder->CreateCast(opcode, LHS, IntPtrTy);
+ opcode = CastInst::getCastOpcode(RHS, RHSIsSigned, IntPtrTy, false);
+ RHS = TheFolder->CreateCast(opcode, RHS, IntPtrTy);
+ }
+
+ Constant *Result;
+ switch (TREE_CODE(exp)) {
+ default: assert(0 && "Unexpected case!");
+ case PLUS_EXPR: Result = TheFolder->CreateAdd(LHS, RHS); break;
+ case MINUS_EXPR: Result = TheFolder->CreateSub(LHS, RHS); break;
+ }
+
+ const Type *Ty = ConvertType(TREE_TYPE(exp));
+ bool TyIsSigned = !TYPE_UNSIGNED(TREE_TYPE(exp));
+ opcode = CastInst::getCastOpcode(Result, LHSIsSigned, Ty, TyIsSigned);
+ return TheFolder->CreateCast(opcode, Result, Ty);
+}
+
+Constant *TreeConstantToLLVM::ConvertCONSTRUCTOR(tree exp) {
+ // Please note, that we can have empty ctor, even if array is non-trivial (has
+ // nonzero number of entries). This situation is typical for static ctors,
+ // when array is filled during program initialization.
+ if (CONSTRUCTOR_ELTS(exp) == 0 ||
+ VEC_length(constructor_elt, CONSTRUCTOR_ELTS(exp)) == 0) // All zeros?
+ return Constant::getNullValue(ConvertType(TREE_TYPE(exp)));
+
+ switch (TREE_CODE(TREE_TYPE(exp))) {
+ default:
+ debug_tree(exp);
+ assert(0 && "Unknown ctor!");
+ case VECTOR_TYPE:
+ case ARRAY_TYPE: return ConvertArrayCONSTRUCTOR(exp);
+ case RECORD_TYPE: return ConvertRecordCONSTRUCTOR(exp);
+ case QUAL_UNION_TYPE:
+ case UNION_TYPE: return ConvertUnionCONSTRUCTOR(exp);
+ }
+}
+
+Constant *TreeConstantToLLVM::ConvertArrayCONSTRUCTOR(tree exp) {
+ // Vectors are like arrays, but the domain is stored via an array
+ // type indirectly.
+
+ // If we have a lower bound for the range of the type, get it.
+ tree InitType = TREE_TYPE(exp);
+ tree min_element = size_zero_node;
+ std::vector<Constant*> ResultElts;
+
+ if (TREE_CODE(InitType) == VECTOR_TYPE) {
+ ResultElts.resize(TYPE_VECTOR_SUBPARTS(InitType));
+ } else {
+ assert(TREE_CODE(InitType) == ARRAY_TYPE && "Unknown type for init");
+ tree Domain = TYPE_DOMAIN(InitType);
+ if (Domain && TYPE_MIN_VALUE(Domain))
+ min_element = fold_convert(sizetype, TYPE_MIN_VALUE(Domain));
+
+ if (Domain && TYPE_MAX_VALUE(Domain)) {
+ tree max_element = fold_convert(sizetype, TYPE_MAX_VALUE(Domain));
+ tree size = size_binop (MINUS_EXPR, max_element, min_element);
+ size = size_binop (PLUS_EXPR, size, size_one_node);
+
+ if (host_integerp(size, 1))
+ ResultElts.resize(tree_low_cst(size, 1));
+ }
+ }
+
+ unsigned NextFieldToFill = 0;
+ unsigned HOST_WIDE_INT ix;
+ tree elt_index, elt_value;
+ Constant *SomeVal = 0;
+ FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), ix, elt_index, elt_value) {
+ // Find and decode the constructor's value.
+ Constant *Val = Convert(elt_value);
+ SomeVal = Val;
+
+ // Get the index position of the element within the array. Note that this
+ // can be NULL_TREE, which means that it belongs in the next available slot.
+ tree index = elt_index;
+
+ // The first and last field to fill in, inclusive.
+ unsigned FieldOffset, FieldLastOffset;
+ if (index && TREE_CODE(index) == RANGE_EXPR) {
+ tree first = fold_convert (sizetype, TREE_OPERAND(index, 0));
+ tree last = fold_convert (sizetype, TREE_OPERAND(index, 1));
+
+ first = size_binop (MINUS_EXPR, first, min_element);
+ last = size_binop (MINUS_EXPR, last, min_element);
+
+ assert(host_integerp(first, 1) && host_integerp(last, 1) &&
+ "Unknown range_expr!");
+ FieldOffset = tree_low_cst(first, 1);
+ FieldLastOffset = tree_low_cst(last, 1);
+ } else if (index) {
+ index = size_binop (MINUS_EXPR, fold_convert (sizetype, index),
+ min_element);
+ assert(host_integerp(index, 1));
+ FieldOffset = tree_low_cst(index, 1);
+ FieldLastOffset = FieldOffset;
+ } else {
+ FieldOffset = NextFieldToFill;
+ FieldLastOffset = FieldOffset;
+ }
+
+ // Process all of the elements in the range.
+ for (--FieldOffset; FieldOffset != FieldLastOffset; ) {
+ ++FieldOffset;
+ if (FieldOffset == ResultElts.size())
+ ResultElts.push_back(Val);
+ else {
+ if (FieldOffset >= ResultElts.size())
+ ResultElts.resize(FieldOffset+1);
+ ResultElts[FieldOffset] = Val;
+ }
+
+ NextFieldToFill = FieldOffset+1;
+ }
+ }
+
+ // Zero length array.
+ if (ResultElts.empty())
+ return ConstantArray::get(
+ cast<ArrayType>(ConvertType(TREE_TYPE(exp))), ResultElts);
+ assert(SomeVal && "If we had some initializer, we should have some value!");
+
+ // Do a post-pass over all of the elements. We're taking care of two things
+ // here:
+ // #1. If any elements did not have initializers specified, provide them
+ // with a null init.
+ // #2. If any of the elements have different types, return a struct instead
+ // of an array. This can occur in cases where we have an array of
+ // unions, and the various unions had different pieces init'd.
+ const Type *ElTy = SomeVal->getType();
+ Constant *Filler = Constant::getNullValue(ElTy);
+ bool AllEltsSameType = true;
+ for (unsigned i = 0, e = ResultElts.size(); i != e; ++i) {
+ if (ResultElts[i] == 0)
+ ResultElts[i] = Filler;
+ else if (ResultElts[i]->getType() != ElTy)
+ AllEltsSameType = false;
+ }
+
+ if (TREE_CODE(InitType) == VECTOR_TYPE) {
+ assert(AllEltsSameType && "Vector of heterogeneous element types?");
+ return ConstantVector::get(ResultElts);
+ }
+
+ if (AllEltsSameType)
+ return ConstantArray::get(
+ ArrayType::get(ElTy, ResultElts.size()), ResultElts);
+ return ConstantStruct::get(Context, ResultElts, false);
+}
+
+
+namespace {
+/// ConstantLayoutInfo - A helper class used by ConvertRecordCONSTRUCTOR to
+/// lay out struct inits.
+struct ConstantLayoutInfo {
+ const TargetData &TD;
+
+ /// ResultElts - The initializer elements so far.
+ std::vector<Constant*> ResultElts;
+
+ /// StructIsPacked - This is set to true if we find out that we have to emit
+ /// the ConstantStruct as a Packed LLVM struct type (because the LLVM
+ /// alignment rules would prevent laying out the struct correctly).
+ bool StructIsPacked;
+
+ /// NextFieldByteStart - This field indicates the *byte* that the next field
+ /// will start at. Put another way, this is the size of the struct as
+ /// currently laid out, but without any tail padding considered.
+ uint64_t NextFieldByteStart;
+
+ /// MaxLLVMFieldAlignment - This is the largest alignment of any IR field,
+ /// which is the alignment that the ConstantStruct will get.
+ unsigned MaxLLVMFieldAlignment;
+
+
+ ConstantLayoutInfo(const TargetData &TD) : TD(TD) {
+ StructIsPacked = false;
+ NextFieldByteStart = 0;
+ MaxLLVMFieldAlignment = 1;
+ }
+
+ void ConvertToPacked();
+ void AddFieldToRecordConstant(Constant *Val, uint64_t GCCFieldOffsetInBits);
+ void AddBitFieldToRecordConstant(ConstantInt *Val,
+ uint64_t GCCFieldOffsetInBits);
+ void HandleTailPadding(uint64_t GCCStructBitSize);
+};
+
+}
+
+/// ConvertToPacked - Given a partially constructed initializer for a LLVM
+/// struct constant, change it to make all the implicit padding between elements
+/// be fully explicit.
+void ConstantLayoutInfo::ConvertToPacked() {
+ assert(!StructIsPacked && "Struct is already packed");
+ uint64_t EltOffs = 0;
+ for (unsigned i = 0, e = ResultElts.size(); i != e; ++i) {
+ Constant *Val = ResultElts[i];
+
+ // Check to see if this element has an alignment that would cause it to get
+ // offset. If so, insert explicit padding for the offset.
+ unsigned ValAlign = TD.getABITypeAlignment(Val->getType());
+ uint64_t AlignedEltOffs = TargetData::RoundUpAlignment(EltOffs, ValAlign);
+
+ // If the alignment doesn't affect the element offset, then the value is ok.
+ // Accept the field and keep moving.
+ if (AlignedEltOffs == EltOffs) {
+ EltOffs += TD.getTypeAllocSize(Val->getType());
+ continue;
+ }
+
+ // Otherwise, there is padding here. Insert explicit zeros.
+ const Type *PadTy = Type::getInt8Ty(Context);
+ if (AlignedEltOffs-EltOffs != 1)
+ PadTy = ArrayType::get(PadTy, AlignedEltOffs-EltOffs);
+ ResultElts.insert(ResultElts.begin()+i,
+ Constant::getNullValue(PadTy));
+
+ // The padding is now element "i" and just bumped us up to "AlignedEltOffs".
+ EltOffs = AlignedEltOffs;
+ ++e; // One extra element to scan.
+ }
+
+ // Packed now!
+ MaxLLVMFieldAlignment = 1;
+ StructIsPacked = true;
+}
+
+
+/// AddFieldToRecordConstant - As ConvertRecordCONSTRUCTOR builds up an LLVM
+/// constant to represent a GCC CONSTRUCTOR node, it calls this method to add
+/// fields. The design of this is that it adds leading/trailing padding as
+/// needed to make the piece fit together and honor the GCC layout. This does
+/// not handle bitfields.
+///
+/// The arguments are:
+/// Val: The value to add to the struct, with a size that matches the size of
+/// the corresponding GCC field.
+/// GCCFieldOffsetInBits: The offset that we have to put Val in the result.
+///
+void ConstantLayoutInfo::
+AddFieldToRecordConstant(Constant *Val, uint64_t GCCFieldOffsetInBits) {
+ // Figure out how to add this non-bitfield value to our constant struct so
+ // that it ends up at the right offset. There are four cases we have to
+ // think about:
+ // 1. We may be able to just slap it onto the end of our struct and have
+ // everything be ok.
+ // 2. We may have to insert explicit padding into the LLVM struct to get
+ // the initializer over into the right space. This is needed when the
+ // GCC field has a larger alignment than the LLVM field.
+ // 3. The LLVM field may be too far over and we may be forced to convert
+ // this to an LLVM packed struct. This is required when the LLVM
+ // alignment is larger than the GCC alignment.
+ // 4. We may have a bitfield that needs to be merged into a previous
+ // field.
+ // Start by determining which case we have by looking at where LLVM and GCC
+ // would place the field.
+
+ // Verified that we haven't already laid out bytes that will overlap with
+ // this new field.
+ assert(NextFieldByteStart*8 <= GCCFieldOffsetInBits &&
+ "Overlapping LLVM fields!");
+
+ // Compute the offset the field would get if we just stuck 'Val' onto the
+ // end of our structure right now. It is NextFieldByteStart rounded up to
+ // the LLVM alignment of Val's type.
+ unsigned ValLLVMAlign = 1;
+
+ if (!StructIsPacked) { // Packed structs ignore the alignment of members.
+ ValLLVMAlign = TD.getABITypeAlignment(Val->getType());
+ MaxLLVMFieldAlignment = std::max(MaxLLVMFieldAlignment, ValLLVMAlign);
+ }
+
+ // LLVMNaturalByteOffset - This is where LLVM would drop the field if we
+ // slap it onto the end of the struct.
+ uint64_t LLVMNaturalByteOffset
+ = TargetData::RoundUpAlignment(NextFieldByteStart, ValLLVMAlign);
+
+ // If adding the LLVM field would push it over too far, then we must have a
+ // case that requires the LLVM struct to be packed. Do it now if so.
+ if (LLVMNaturalByteOffset*8 > GCCFieldOffsetInBits) {
+ // Switch to packed.
+ ConvertToPacked();
+ assert(NextFieldByteStart*8 <= GCCFieldOffsetInBits &&
+ "Packing didn't fix the problem!");
+
+ // Recurse to add the field after converting to packed.
+ return AddFieldToRecordConstant(Val, GCCFieldOffsetInBits);
+ }
+
+ // If the LLVM offset is not large enough, we need to insert explicit
+ // padding in the LLVM struct between the fields.
+ if (LLVMNaturalByteOffset*8 < GCCFieldOffsetInBits) {
+ // Insert enough padding to fully fill in the hole. Insert padding from
+ // NextFieldByteStart (not LLVMNaturalByteOffset) because the padding will
+ // not get the same alignment as "Val".
+ const Type *FillTy = Type::getInt8Ty(Context);
+ if (GCCFieldOffsetInBits/8-NextFieldByteStart != 1)
+ FillTy = ArrayType::get(FillTy,
+ GCCFieldOffsetInBits/8-NextFieldByteStart);
+ ResultElts.push_back(Constant::getNullValue(FillTy));
+
+ NextFieldByteStart = GCCFieldOffsetInBits/8;
+
+ // Recurse to add the field. This handles the case when the LLVM struct
+ // needs to be converted to packed after inserting tail padding.
+ return AddFieldToRecordConstant(Val, GCCFieldOffsetInBits);
+ }
+
+ // Slap 'Val' onto the end of our ConstantStruct, it must be known to land
+ // at the right offset now.
+ assert(LLVMNaturalByteOffset*8 == GCCFieldOffsetInBits);
+ ResultElts.push_back(Val);
+ NextFieldByteStart = LLVMNaturalByteOffset;
+ NextFieldByteStart += TD.getTypeAllocSize(Val->getType());
+}
+
+/// AddBitFieldToRecordConstant - Bitfields can span multiple LLVM fields and
+/// have other annoying properties, thus requiring extra layout rules. This
+/// routine handles the extra complexity and then forwards to
+/// AddFieldToRecordConstant.
+void ConstantLayoutInfo::
+AddBitFieldToRecordConstant(ConstantInt *ValC, uint64_t GCCFieldOffsetInBits) {
+ // If the GCC field starts after our current LLVM field then there must have
+ // been an anonymous bitfield or other thing that shoved it over. No matter,
+ // just insert some i8 padding until there are bits to fill in.
+ while (GCCFieldOffsetInBits > NextFieldByteStart*8) {
+ ResultElts.push_back(ConstantInt::get(Type::getInt8Ty(Context), 0));
+ ++NextFieldByteStart;
+ }
+
+ // If the field is a bitfield, it could partially go in a previously
+ // laid out structure member, and may add elements to the end of the currently
+ // laid out structure.
+ //
+ // Since bitfields can only partially overlap other bitfields, because we
+ // always emit components of bitfields as i8, and because we never emit tail
+ // padding until we know it exists, this boils down to merging pieces of the
+ // bitfield values into i8's. This is also simplified by the fact that
+ // bitfields can only be initialized by ConstantInts. An interesting case is
+ // sharing of tail padding in C++ structures. Because this can only happen
+ // in inheritance cases, and those are non-POD, we should never see them here.
+
+ // First handle any part of Val that overlaps an already laid out field by
+ // merging it into it. By the above invariants, we know that it is an i8 that
+ // we are merging into. Note that we may be inserting *all* of Val into the
+ // previous field.
+ if (GCCFieldOffsetInBits < NextFieldByteStart*8) {
+ unsigned ValBitSize = ValC->getBitWidth();
+ assert(!ResultElts.empty() && "Bitfield starts before first element?");
+ assert(ResultElts.back()->getType() == Type::getInt8Ty(Context) &&
+ isa<ConstantInt>(ResultElts.back()) &&
+ "Merging bitfield with non-bitfield value?");
+ assert(NextFieldByteStart*8 - GCCFieldOffsetInBits < 8 &&
+ "Bitfield overlaps backwards more than one field?");
+
+ // Figure out how many bits can fit into the previous field given the
+ // starting point in that field.
+ unsigned BitsInPreviousField =
+ unsigned(NextFieldByteStart*8 - GCCFieldOffsetInBits);
+ assert(BitsInPreviousField != 0 && "Previous field should not be null!");
+
+ // Split the bits that will be inserted into the previous element out of
+ // Val into a new constant. If Val is completely contained in the previous
+ // element, this sets Val to null, otherwise we shrink Val to contain the
+ // bits to insert in the next element.
+ APInt ValForPrevField(ValC->getValue());
+ if (BitsInPreviousField >= ValBitSize) {
+ // The whole field fits into the previous field.
+ ValC = 0;
+ } else if (!BYTES_BIG_ENDIAN) {
+ // Little endian, take bits from the bottom of the field value.
+ ValForPrevField.trunc(BitsInPreviousField);
+ APInt Tmp = ValC->getValue();
+ Tmp = Tmp.lshr(BitsInPreviousField);
+ Tmp = Tmp.trunc(ValBitSize-BitsInPreviousField);
+ ValC = ConstantInt::get(Context, Tmp);
+ } else {
+ // Big endian, take bits from the top of the field value.
+ ValForPrevField = ValForPrevField.lshr(ValBitSize-BitsInPreviousField);
+ ValForPrevField.trunc(BitsInPreviousField);
+
+ APInt Tmp = ValC->getValue();
+ Tmp = Tmp.trunc(ValBitSize-BitsInPreviousField);
+ ValC = ConstantInt::get(Context, Tmp);
+ }
+
+ // Okay, we're going to insert ValForPrevField into the previous i8, extend
+ // it and shift into place.
+ ValForPrevField.zext(8);
+ if (!BYTES_BIG_ENDIAN) {
+ ValForPrevField = ValForPrevField.shl(8-BitsInPreviousField);
+ } else {
+ // On big endian, if the entire field fits into the remaining space, shift
+ // over to not take part of the next field's bits.
+ if (BitsInPreviousField > ValBitSize)
+ ValForPrevField = ValForPrevField.shl(BitsInPreviousField-ValBitSize);
+ }
+
+ // "or" in the previous value and install it.
+ const APInt &LastElt = cast<ConstantInt>(ResultElts.back())->getValue();
+ ResultElts.back() = ConstantInt::get(Context, ValForPrevField | LastElt);
+
+ // If the whole bit-field fit into the previous field, we're done.
+ if (ValC == 0) return;
+ GCCFieldOffsetInBits = NextFieldByteStart*8;
+ }
+
+ APInt Val = ValC->getValue();
+
+ // Okay, we know that we're plopping bytes onto the end of the struct.
+ // Iterate while there is stuff to do.
+ while (1) {
+ ConstantInt *ValToAppend;
+ if (Val.getBitWidth() > 8) {
+ if (!BYTES_BIG_ENDIAN) {
+ // Little endian lays out low bits first.
+ APInt Tmp = Val;
+ Tmp.trunc(8);
+ ValToAppend = ConstantInt::get(Context, Tmp);
+
+ Val = Val.lshr(8);
+ } else {
+ // Big endian lays out high bits first.
+ APInt Tmp = Val;
+ Tmp = Tmp.lshr(Tmp.getBitWidth()-8);
+ Tmp.trunc(8);
+ ValToAppend = ConstantInt::get(Context, Tmp);
+ }
+ } else if (Val.getBitWidth() == 8) {
+ ValToAppend = ConstantInt::get(Context, Val);
+ } else {
+ APInt Tmp = Val;
+ Tmp.zext(8);
+
+ if (BYTES_BIG_ENDIAN)
+ Tmp = Tmp << 8-Val.getBitWidth();
+ ValToAppend = ConstantInt::get(Context, Tmp);
+ }
+
+ ResultElts.push_back(ValToAppend);
+ ++NextFieldByteStart;
+
+ if (Val.getBitWidth() <= 8)
+ break;
+ Val.trunc(Val.getBitWidth()-8);
+ }
+}
+
+
+/// HandleTailPadding - Check to see if the struct fields, as laid out so far,
+/// will be large enough to make the generated constant struct have the right
+/// size. If not, add explicit tail padding. If rounding up based on the LLVM
+/// IR alignment would make the struct too large, convert it to a packed LLVM
+/// struct.
+void ConstantLayoutInfo::HandleTailPadding(uint64_t GCCStructBitSize) {
+ uint64_t GCCStructSize = (GCCStructBitSize+7)/8;
+ uint64_t LLVMNaturalSize =
+ TargetData::RoundUpAlignment(NextFieldByteStart, MaxLLVMFieldAlignment);
+
+ // If the total size of the laid out data is within the size of the GCC type
+ // but the rounded-up size (including the tail padding induced by LLVM
+ // alignment) is too big, convert to a packed struct type. We don't do this
+ // if the size of the laid out fields is too large because initializers like
+ //
+ // struct X { int A; char C[]; } x = { 4, "foo" };
+ //
+ // can occur and no amount of packing will help.
+ if (NextFieldByteStart <= GCCStructSize && // Not flexible init case.
+ LLVMNaturalSize > GCCStructSize) { // Tail pad will overflow type.
+ assert(!StructIsPacked && "LLVM Struct type overflow!");
+
+ // Switch to packed.
+ ConvertToPacked();
+ LLVMNaturalSize = NextFieldByteStart;
+
+ // Verify that packing solved the problem.
+ assert(LLVMNaturalSize <= GCCStructSize &&
+ "Oversized should be handled by packing");
+ }
+
+ // If the LLVM Size is too small, add some tail padding to fill it in.
+ if (LLVMNaturalSize < GCCStructSize) {
+ const Type *FillTy = Type::getInt8Ty(Context);
+ if (GCCStructSize - NextFieldByteStart != 1)
+ FillTy = ArrayType::get(FillTy, GCCStructSize - NextFieldByteStart);
+ ResultElts.push_back(Constant::getNullValue(FillTy));
+ NextFieldByteStart = GCCStructSize;
+
+ // At this point, we know that our struct should have the right size.
+ // However, if the size of the struct is not a multiple of the largest
+ // element alignment, the rounding could bump up the struct more. In this
+ // case, we have to convert the struct to being packed.
+ LLVMNaturalSize =
+ TargetData::RoundUpAlignment(NextFieldByteStart, MaxLLVMFieldAlignment);
+
+ // If the alignment will make the struct too big, convert it to being
+ // packed.
+ if (LLVMNaturalSize > GCCStructSize) {
+ assert(!StructIsPacked && "LLVM Struct type overflow!");
+ ConvertToPacked();
+ }
+ }
+}
+
+Constant *TreeConstantToLLVM::ConvertRecordCONSTRUCTOR(tree exp) {
+ ConstantLayoutInfo LayoutInfo(getTargetData());
+
+ tree NextField = TYPE_FIELDS(TREE_TYPE(exp));
+ unsigned HOST_WIDE_INT CtorIndex;
+ tree FieldValue;
+ tree Field; // The FIELD_DECL for the field.
+ FOR_EACH_CONSTRUCTOR_ELT(CONSTRUCTOR_ELTS(exp), CtorIndex, Field, FieldValue){
+ // If an explicit field is specified, use it.
+ if (Field == 0) {
+ Field = NextField;
+ // Advance to the next FIELD_DECL, skipping over other structure members
+ // (e.g. enums).
+ while (1) {
+ assert(Field && "Fell off end of record!");
+ if (TREE_CODE(Field) == FIELD_DECL) break;
+ Field = TREE_CHAIN(Field);
+ }
+ }
+
+ // Decode the field's value.
+ Constant *Val = Convert(FieldValue);
+
+ // GCCFieldOffsetInBits is where GCC is telling us to put the current field.
+ uint64_t GCCFieldOffsetInBits = getFieldOffsetInBits(Field);
+ NextField = TREE_CHAIN(Field);
+
+
+ // If this is a non-bitfield value, just slap it onto the end of the struct
+ // with the appropriate padding etc. If it is a bitfield, we have more
+ // processing to do.
+ if (!isBitfield(Field))
+ LayoutInfo.AddFieldToRecordConstant(Val, GCCFieldOffsetInBits);
+ else {
+ // Bitfields can only be initialized with constants (integer constant
+ // expressions).
+ ConstantInt *ValC = cast<ConstantInt>(Val);
+ uint64_t FieldSizeInBits = getInt64(DECL_SIZE(Field), true);
+ uint64_t ValueSizeInBits = Val->getType()->getPrimitiveSizeInBits();
+
+ // G++ has various bugs handling {} initializers where it doesn't
+ // synthesize a zero node of the right type. Instead of figuring out G++,
+ // just hack around it by special casing zero and allowing it to be the
+ // wrong size.
+ if (ValueSizeInBits < FieldSizeInBits && ValC->isZero()) {
+ APInt ValAsInt = ValC->getValue();
+ ValC = ConstantInt::get(Context, ValAsInt.zext(FieldSizeInBits));
+ ValueSizeInBits = FieldSizeInBits;
+ }
+
+ assert(ValueSizeInBits >= FieldSizeInBits &&
+ "disagreement between LLVM and GCC on bitfield size");
+ if (ValueSizeInBits != FieldSizeInBits) {
+ // Fields are allowed to be smaller than their type. Simply discard
+ // the unwanted upper bits in the field value.
+ APInt ValAsInt = ValC->getValue();
+ ValC = ConstantInt::get(Context, ValAsInt.trunc(FieldSizeInBits));
+ }
+ LayoutInfo.AddBitFieldToRecordConstant(ValC, GCCFieldOffsetInBits);
+ }
+ }
+
+ // Check to see if the struct fields, as laid out so far, will be large enough
+ // to make the generated constant struct have the right size. If not, add
+ // explicit tail padding. If rounding up based on the LLVM IR alignment would
+ // make the struct too large, convert it to a packed LLVM struct.
+ tree StructTypeSizeTree = TYPE_SIZE(TREE_TYPE(exp));
+ if (StructTypeSizeTree && TREE_CODE(StructTypeSizeTree) == INTEGER_CST)
+ LayoutInfo.HandleTailPadding(getInt64(StructTypeSizeTree, true));
+
+ // Okay, we're done, return the computed elements.
+ return ConstantStruct::get(Context, LayoutInfo.ResultElts,
+ LayoutInfo.StructIsPacked);
+}
+
+Constant *TreeConstantToLLVM::ConvertUnionCONSTRUCTOR(tree exp) {
+ assert(!VEC_empty(constructor_elt, CONSTRUCTOR_ELTS(exp))
+ && "Union CONSTRUCTOR has no elements? Zero?");
+
+ VEC(constructor_elt, gc) *elt = CONSTRUCTOR_ELTS(exp);
+ assert(VEC_length(constructor_elt, elt) == 1
+ && "Union CONSTRUCTOR with multiple elements?");
+
+ std::vector<Constant*> Elts;
+ // Convert the constant itself.
+ Elts.push_back(Convert(VEC_index(constructor_elt, elt, 0)->value));
+
+ // If the union has a fixed size, and if the value we converted isn't large
+ // enough to fill all the bits, add a zero initialized array at the end to pad
+ // it out.
+ tree UnionType = TREE_TYPE(exp);
+ if (TYPE_SIZE(UnionType) && TREE_CODE(TYPE_SIZE(UnionType)) == INTEGER_CST) {
+ uint64_t UnionSize = ((uint64_t)TREE_INT_CST_LOW(TYPE_SIZE(UnionType))+7)/8;
+ uint64_t InitSize = getTargetData().getTypeAllocSize(Elts[0]->getType());
+ if (UnionSize != InitSize) {
+ const Type *FillTy;
+ assert(UnionSize > InitSize && "Init shouldn't be larger than union!");
+ if (UnionSize - InitSize == 1)
+ FillTy = Type::getInt8Ty(Context);
+ else
+ FillTy = ArrayType::get(Type::getInt8Ty(Context), UnionSize - InitSize);
+ Elts.push_back(Constant::getNullValue(FillTy));
+ }
+ }
+ return ConstantStruct::get(Context, Elts, false);
+}
+
+//===----------------------------------------------------------------------===//
+// ... Constant Expressions L-Values ...
+//===----------------------------------------------------------------------===//
+
+Constant *TreeConstantToLLVM::EmitLV(tree exp) {
+ Constant *LV;
+
+ switch (TREE_CODE(exp)) {
+ default:
+ debug_tree(exp);
+ assert(0 && "Unknown constant lvalue to convert!");
+ abort();
+ case FUNCTION_DECL:
+ case CONST_DECL:
+ case VAR_DECL:
+ LV = EmitLV_Decl(exp);
+ break;
+ case LABEL_DECL:
+ LV = EmitLV_LABEL_DECL(exp);
+ break;
+ case COMPLEX_CST:
+ LV = EmitLV_COMPLEX_CST(exp);
+ break;
+ case STRING_CST:
+ LV = EmitLV_STRING_CST(exp);
+ break;
+ case COMPONENT_REF:
+ LV = EmitLV_COMPONENT_REF(exp);
+ break;
+ case ARRAY_RANGE_REF:
+ case ARRAY_REF:
+ LV = EmitLV_ARRAY_REF(exp);
+ break;
+ case INDIRECT_REF:
+ // The lvalue is just the address.
+ LV = Convert(TREE_OPERAND(exp, 0));
+ break;
+ case COMPOUND_LITERAL_EXPR: // FIXME: not gimple - defined by C front-end
+ /* This used to read
+ return EmitLV(COMPOUND_LITERAL_EXPR_DECL(exp));
+ but gcc warns about that and there doesn't seem to be any way to stop it
+ with casts or the like. The following is equivalent with no checking
+ (since we know TREE_CODE(exp) is COMPOUND_LITERAL_EXPR the checking
+ doesn't accomplish anything anyway). */
+ LV = EmitLV(DECL_EXPR_DECL (TREE_OPERAND (exp, 0)));
+ break;
+ }
+
+ // Check that the type of the lvalue is indeed that of a pointer to the tree
+ // node. Since LLVM has no void* type, don't insist that void* be converted
+ // to a specific LLVM type.
+ assert((VOID_TYPE_P(TREE_TYPE(exp)) ||
+ LV->getType() == ConvertType(TREE_TYPE(exp))->getPointerTo()) &&
+ "LValue of constant has wrong type!");
+
+ return LV;
+}
+
+Constant *TreeConstantToLLVM::EmitLV_Decl(tree exp) {
+ GlobalValue *Val = cast<GlobalValue>(DECL_LLVM(exp));
+
+ // Ensure variable marked as used even if it doesn't go through a parser. If
+ // it hasn't been used yet, write out an external definition.
+ if (!TREE_USED(exp)) {
+ assemble_external(exp);
+ TREE_USED(exp) = 1;
+ Val = cast<GlobalValue>(DECL_LLVM(exp));
+ }
+
+ // If this is an aggregate, emit it to LLVM now. GCC happens to
+ // get this case right by forcing the initializer into memory.
+ if (TREE_CODE(exp) == CONST_DECL || TREE_CODE(exp) == VAR_DECL) {
+ if ((DECL_INITIAL(exp) || !TREE_PUBLIC(exp)) && !DECL_EXTERNAL(exp) &&
+ Val->isDeclaration() &&
+ !BOGUS_CTOR(exp)) {
+ emit_global_to_llvm(exp);
+ // Decl could have change if it changed type.
+ Val = cast<GlobalValue>(DECL_LLVM(exp));
+ }
+ } else {
+ // Otherwise, inform cgraph that we used the global.
+ mark_decl_referenced(exp);
+ if (tree ID = DECL_ASSEMBLER_NAME(exp))
+ mark_referenced(ID);
+ }
+
+ // The type of the global value output for exp need not match that of exp.
+ // For example if the global's initializer has a different type to the global
+ // itself (allowed in GCC but not in LLVM) then the global is changed to have
+ // the type of the initializer. Correct for this now.
+ const Type *Ty = ConvertType(TREE_TYPE(exp));
+ if (Ty->isVoidTy()) Ty = Type::getInt8Ty(Context); // void* -> i8*.
+
+ return TheFolder->CreateBitCast(Val, Ty->getPointerTo());
+}
+
+/// EmitLV_LABEL_DECL - Someone took the address of a label.
+Constant *TreeConstantToLLVM::EmitLV_LABEL_DECL(tree exp) {
+ assert(TheTreeToLLVM &&
+ "taking the address of a label while not compiling the function!");
+
+ // Figure out which function this is for, verify it's the one we're compiling.
+ if (DECL_CONTEXT(exp)) {
+ assert(TREE_CODE(DECL_CONTEXT(exp)) == FUNCTION_DECL &&
+ "Address of label in nested function?");
+ assert(TheTreeToLLVM->getFUNCTION_DECL() == DECL_CONTEXT(exp) &&
+ "Taking the address of a label that isn't in the current fn!?");
+ }
+
+ BasicBlock *BB = TheTreeToLLVM->getLabelDeclBlock(exp);
+ Constant *C = TheTreeToLLVM->getIndirectGotoBlockNumber(BB);
+ return
+ TheFolder->CreateIntToPtr(C, Type::getInt8PtrTy(Context));
+}
+
+Constant *TreeConstantToLLVM::EmitLV_COMPLEX_CST(tree exp) {
+ Constant *Init = TreeConstantToLLVM::ConvertCOMPLEX_CST(exp);
+
+ // Cache the constants to avoid making obvious duplicates that have to be
+ // folded by the optimizer.
+ static std::map<Constant*, GlobalVariable*> ComplexCSTCache;
+ GlobalVariable *&Slot = ComplexCSTCache[Init];
+ if (Slot) return Slot;
+
+ // Create a new complex global.
+ Slot = new GlobalVariable(*TheModule, Init->getType(), true,
+ GlobalVariable::PrivateLinkage, Init, ".cpx");
+ return Slot;
+}
+
+Constant *TreeConstantToLLVM::EmitLV_STRING_CST(tree exp) {
+ Constant *Init = TreeConstantToLLVM::ConvertSTRING_CST(exp);
+
+ GlobalVariable **SlotP = 0;
+
+ // Cache the string constants to avoid making obvious duplicate strings that
+ // have to be folded by the optimizer.
+ static std::map<Constant*, GlobalVariable*> StringCSTCache;
+ GlobalVariable *&Slot = StringCSTCache[Init];
+ if (Slot) return Slot;
+ SlotP = &Slot;
+
+ // Create a new string global.
+ GlobalVariable *GV = new GlobalVariable(*TheModule, Init->getType(), true,
+ GlobalVariable::PrivateLinkage, Init,
+ ".str");
+
+ GV->setAlignment(TYPE_ALIGN(TREE_TYPE(exp)) / 8);
+
+ if (SlotP) *SlotP = GV;
+ return GV;
+}
+
+Constant *TreeConstantToLLVM::EmitLV_ARRAY_REF(tree exp) {
+ tree Array = TREE_OPERAND(exp, 0);
+ tree Index = TREE_OPERAND(exp, 1);
+ tree IndexType = TREE_TYPE(Index);
+ assert(TREE_CODE(TREE_TYPE(Array)) == ARRAY_TYPE && "Unknown ARRAY_REF!");
+
+ // Check for variable sized reference.
+ // FIXME: add support for array types where the size doesn't fit into 64 bits
+ assert(isSequentialCompatible(TREE_TYPE(Array)) &&
+ "Global with variable size?");
+
+ Constant *ArrayAddr;
+
+ // First subtract the lower bound, if any, in the type of the index.
+ Constant *IndexVal = Convert(Index);
+ tree LowerBound = array_ref_low_bound(exp);
+ if (!integer_zerop(LowerBound))
+ IndexVal = TYPE_UNSIGNED(TREE_TYPE(Index)) ?
+ TheFolder->CreateSub(IndexVal, Convert(LowerBound)) :
+ TheFolder->CreateNSWSub(IndexVal, Convert(LowerBound));
+
+ ArrayAddr = EmitLV(Array);
+
+ const Type *IntPtrTy = getTargetData().getIntPtrType(Context);
+ if (IndexVal->getType() != IntPtrTy)
+ IndexVal = TheFolder->CreateIntCast(IndexVal, IntPtrTy,
+ !TYPE_UNSIGNED(IndexType));
+
+ Value *Idx[2];
+ Idx[0] = ConstantInt::get(IntPtrTy, 0);
+ Idx[1] = IndexVal;
+
+ return TheFolder->CreateGetElementPtr(ArrayAddr, Idx, 2);
+}
+
+Constant *TreeConstantToLLVM::EmitLV_COMPONENT_REF(tree exp) {
+ Constant *StructAddrLV = EmitLV(TREE_OPERAND(exp, 0));
+
+ // Ensure that the struct type has been converted, so that the fielddecls
+ // are laid out.
+ const Type *StructTy = ConvertType(TREE_TYPE(TREE_OPERAND(exp, 0)));
+
+ tree FieldDecl = TREE_OPERAND(exp, 1);
+
+ StructAddrLV = TheFolder->CreateBitCast(StructAddrLV,
+ StructTy->getPointerTo());
+ const Type *FieldTy = ConvertType(getDeclaredType(FieldDecl));
+
+ // BitStart - This is the actual offset of the field from the start of the
+ // struct, in bits. For bitfields this may be on a non-byte boundary.
+ unsigned BitStart = getFieldOffsetInBits(TREE_OPERAND(exp, 1));
+ Constant *FieldPtr;
+ const TargetData &TD = getTargetData();
+
+ // If this is a normal field at a fixed offset from the start, handle it.
+ if (!TREE_OPERAND(exp, 2)) {
+ unsigned int MemberIndex = GetFieldIndex(FieldDecl);
+
+ Constant *Ops[] = {
+ StructAddrLV,
+ Constant::getNullValue(Type::getInt32Ty(Context)),
+ ConstantInt::get(Type::getInt32Ty(Context), MemberIndex)
+ };
+ FieldPtr = TheFolder->CreateGetElementPtr(StructAddrLV, Ops+1, 2);
+
+ FieldPtr = ConstantFoldInstOperands(Instruction::GetElementPtr,
+ FieldPtr->getType(), Ops,
+ 3, Context, &TD);
+
+ // Now that we did an offset from the start of the struct, subtract off
+ // the offset from BitStart.
+ if (MemberIndex) {
+ const StructLayout *SL = TD.getStructLayout(cast<StructType>(StructTy));
+ BitStart -= SL->getElementOffset(MemberIndex) * 8;
+ }
+
+ } else {
+ // Offset is the field offset in octets.
+ Constant *Offset = Convert(TREE_OPERAND(exp, 2));
+ if (BITS_PER_UNIT != 8) {
+ assert(!(BITS_PER_UNIT & 7) && "Unit size not a multiple of 8 bits!");
+ Offset = TheFolder->CreateMul(Offset,
+ ConstantInt::get(Offset->getType(),
+ BITS_PER_UNIT / 8));
+ }
+ Constant *Ptr = TheFolder->CreatePtrToInt(StructAddrLV, Offset->getType());
+ Ptr = TheFolder->CreateAdd(Ptr, Offset);
+ FieldPtr = TheFolder->CreateIntToPtr(Ptr, FieldTy->getPointerTo());
+ }
+
+ // Make sure we return a result of the right type.
+ if (FieldTy->getPointerTo() != FieldPtr->getType())
+ FieldPtr = TheFolder->CreateBitCast(FieldPtr, FieldTy->getPointerTo());
+
+ assert(BitStart == 0 &&
+ "It's a bitfield reference or we didn't get to the field!");
+ return FieldPtr;
+}
+
+//===----------------------------------------------------------------------===//
+// ... GIMPLE conversion helpers ...
+//===----------------------------------------------------------------------===//
+
+/// WriteScalarToLHS - Store RHS, a non-aggregate value, into the given LHS.
+void TreeToLLVM::WriteScalarToLHS(tree lhs, Value *RHS) {
+ // Perform a useless type conversion (useless_type_conversion_p).
+ RHS = Builder.CreateBitCast(RHS, ConvertType(TREE_TYPE(lhs)));
+
+ // If this is the definition of an ssa name, record it in the SSANames map.
+ if (TREE_CODE(lhs) == SSA_NAME) {
+ assert(SSANames.find(lhs) == SSANames.end() &&"Multiply defined SSA name!");
+ if (flag_verbose_asm)
+ NameValue(RHS, lhs);
+ SSANames[lhs] = RHS;
+ return;
+ }
+
+ if (canEmitRegisterVariable(lhs)) {
+ // If this is a store to a register variable, EmitLV can't handle the dest
+ // (there is no l-value of a register variable). Emit an inline asm node
+ // that copies the value into the specified register.
+ EmitModifyOfRegisterVariable(lhs, RHS);
+ return;
+ }
+
+ LValue LV = EmitLV(lhs);
+ bool isVolatile = TREE_THIS_VOLATILE(lhs);
+ unsigned Alignment = LV.getAlignment();
+
+ if (!LV.isBitfield()) {
+ // Non-bitfield, scalar value. Just emit a store.
+ StoreInst *SI = Builder.CreateStore(RHS, LV.Ptr, isVolatile);
+ SI->setAlignment(Alignment);
+ return;
+ }
+
+ // Last case, this is a store to a bitfield, so we have to emit a
+ // read/modify/write sequence.
+
+ if (!LV.BitSize)
+ return;
+
+ const Type *ValTy = cast<PointerType>(LV.Ptr->getType())->getElementType();
+ unsigned ValSizeInBits = ValTy->getPrimitiveSizeInBits();
+
+ // The number of stores needed to write the entire bitfield.
+ unsigned Strides = 1 + (LV.BitStart + LV.BitSize - 1) / ValSizeInBits;
+
+ assert(ValTy->isInteger() && "Invalid bitfield lvalue!");
+ assert(ValSizeInBits > LV.BitStart && "Bad bitfield lvalue!");
+ assert(ValSizeInBits >= LV.BitSize && "Bad bitfield lvalue!");
+ assert(2*ValSizeInBits > LV.BitSize+LV.BitStart && "Bad bitfield lvalue!");
+
+ bool Signed = !TYPE_UNSIGNED(TREE_TYPE(lhs));
+ RHS = CastToAnyType(RHS, Signed, ValTy, Signed);
+
+ for (unsigned I = 0; I < Strides; I++) {
+ unsigned Index = BYTES_BIG_ENDIAN ? Strides - I - 1 : I; // LSB first
+ unsigned ThisFirstBit = Index * ValSizeInBits;
+ unsigned ThisLastBitPlusOne = ThisFirstBit + ValSizeInBits;
+ if (ThisFirstBit < LV.BitStart)
+ ThisFirstBit = LV.BitStart;
+ if (ThisLastBitPlusOne > LV.BitStart+LV.BitSize)
+ ThisLastBitPlusOne = LV.BitStart+LV.BitSize;
+
+ Value *Ptr = Index ?
+ Builder.CreateGEP(LV.Ptr, ConstantInt::get(Type::getInt32Ty(Context), Index)) :
+ LV.Ptr;
+ LoadInst *LI = Builder.CreateLoad(Ptr, isVolatile);
+ LI->setAlignment(Alignment);
+ Value *OldVal = LI;
+ Value *NewVal = RHS;
+
+ unsigned BitsInVal = ThisLastBitPlusOne - ThisFirstBit;
+ unsigned FirstBitInVal = ThisFirstBit % ValSizeInBits;
+
+ if (BYTES_BIG_ENDIAN)
+ FirstBitInVal = ValSizeInBits-FirstBitInVal-BitsInVal;
+
+ // If not storing into the zero'th bit, shift the Src value to the left.
+ if (FirstBitInVal) {
+ Value *ShAmt = ConstantInt::get(ValTy, FirstBitInVal);
+ NewVal = Builder.CreateShl(NewVal, ShAmt);
+ }
+
+ // Next, if this doesn't touch the top bit, mask out any bits that shouldn't
+ // be set in the result.
+ uint64_t MaskVal = ((1ULL << BitsInVal)-1) << FirstBitInVal;
+ Constant *Mask = ConstantInt::get(Type::getInt64Ty(Context), MaskVal);
+ Mask = Builder.getFolder().CreateTruncOrBitCast(Mask, ValTy);
+
+ if (FirstBitInVal+BitsInVal != ValSizeInBits)
+ NewVal = Builder.CreateAnd(NewVal, Mask);
+
+ // Next, mask out the bits this bit-field should include from the old value.
+ Mask = Builder.getFolder().CreateNot(Mask);
+ OldVal = Builder.CreateAnd(OldVal, Mask);
+
+ // Finally, merge the two together and store it.
+ NewVal = Builder.CreateOr(OldVal, NewVal);
+
+ StoreInst *SI = Builder.CreateStore(NewVal, Ptr, isVolatile);
+ SI->setAlignment(Alignment);
+
+ if (I + 1 < Strides) {
+ Value *ShAmt = ConstantInt::get(ValTy, BitsInVal);
+ RHS = Builder.CreateLShr(RHS, ShAmt);
+ }
+ }
+}
diff --git a/dragonegg/llvm-debug.cpp b/dragonegg/llvm-debug.cpp
new file mode 100644
index 00000000000..6e6a01b829a
--- /dev/null
+++ b/dragonegg/llvm-debug.cpp
@@ -0,0 +1,924 @@
+/* High-level LLVM backend interface
+Copyright (C) 2005 Free Software Foundation, Inc.
+Contributed by Jim Laskey (jlaskey@apple.com)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
+
+//===----------------------------------------------------------------------===//
+// This is a C++ source file that implements the debug information gathering.
+//===----------------------------------------------------------------------===//
+
+// LLVM headers
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Instructions.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
+#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/Support/Dwarf.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/SmallVector.h"
+
+// System headers
+#include <gmp.h>
+
+// GCC headers
+#undef VISIBILITY_HIDDEN
+
+extern "C" {
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "target.h"
+#include "tree.h"
+
+#include "flags.h"
+#include "langhooks.h"
+#include "toplev.h"
+#include "version.h"
+}
+
+// Plugin headers
+#include "llvm-abi.h"
+#include "llvm-debug.h"
+#include "llvm-internal.h"
+#include "bits_and_bobs.h"
+
+using namespace llvm;
+using namespace llvm::dwarf;
+
+#ifndef LLVMTESTDEBUG
+#define DEBUGASSERT(S) ((void)0)
+#else
+#define DEBUGASSERT(S) assert(S)
+#endif
+
+
+/// DirectoryAndFile - Extract the directory and file name from a path. If no
+/// directory is specified, then use the source working directory.
+static void DirectoryAndFile(const std::string &FullPath,
+ std::string &Directory, std::string &FileName) {
+ // Look for the directory slash.
+ size_t Slash = FullPath.rfind('/');
+
+ // If no slash
+ if (Slash == std::string::npos) {
+ // The entire path is the file name.
+ Directory = "";
+ FileName = FullPath;
+ } else {
+ // Separate the directory from the file name.
+ Directory = FullPath.substr(0, Slash);
+ FileName = FullPath.substr(Slash + 1);
+ }
+
+ // If no directory present then use source working directory.
+ if (Directory.empty() || Directory[0] != '/') {
+ Directory = std::string(get_src_pwd()) + "/" + Directory;
+ }
+}
+
+/// NodeSizeInBits - Returns the size in bits stored in a tree node regardless
+/// of whether the node is a TYPE or DECL.
+static uint64_t NodeSizeInBits(tree Node) {
+ if (TREE_CODE(Node) == ERROR_MARK) {
+ return BITS_PER_WORD;
+ } else if (TYPE_P(Node)) {
+ if (TYPE_SIZE(Node) == NULL_TREE)
+ return 0;
+ else if (isInt64(TYPE_SIZE(Node), 1))
+ return getINTEGER_CSTVal(TYPE_SIZE(Node));
+ else
+ return TYPE_ALIGN(Node);
+ } else if (DECL_P(Node)) {
+ if (DECL_SIZE(Node) == NULL_TREE)
+ return 0;
+ else if (isInt64(DECL_SIZE(Node), 1))
+ return getINTEGER_CSTVal(DECL_SIZE(Node));
+ else
+ return DECL_ALIGN(Node);
+ }
+
+ return 0;
+}
+
+/// NodeAlignInBits - Returns the alignment in bits stored in a tree node
+/// regardless of whether the node is a TYPE or DECL.
+static uint64_t NodeAlignInBits(tree Node) {
+ if (TREE_CODE(Node) == ERROR_MARK) return BITS_PER_WORD;
+ if (TYPE_P(Node)) return TYPE_ALIGN(Node);
+ if (DECL_P(Node)) return DECL_ALIGN(Node);
+ return BITS_PER_WORD;
+}
+
+/// FieldType - Returns the type node of a structure member field.
+///
+static tree FieldType(tree Field) {
+ if (TREE_CODE (Field) == ERROR_MARK) return integer_type_node;
+ return getDeclaredType(Field);
+}
+
+/// GetNodeName - Returns the name stored in a node regardless of whether the
+/// node is a TYPE or DECL.
+static const char *GetNodeName(tree Node) {
+ tree Name = NULL;
+
+ if (DECL_P(Node)) {
+ Name = DECL_NAME(Node);
+ } else if (TYPE_P(Node)) {
+ Name = TYPE_NAME(Node);
+ }
+
+ if (Name) {
+ if (TREE_CODE(Name) == IDENTIFIER_NODE) {
+ return IDENTIFIER_POINTER(Name);
+ } else if (TREE_CODE(Name) == TYPE_DECL && DECL_NAME(Name) &&
+ !DECL_IGNORED_P(Name)) {
+ return IDENTIFIER_POINTER(DECL_NAME(Name));
+ }
+ }
+
+ return "";
+}
+
+/// GetNodeLocation - Returns the location stored in a node regardless of
+/// whether the node is a TYPE or DECL. UseStub is true if we should consider
+/// the type stub as the actually location (ignored in struct/unions/enums.)
+static expanded_location GetNodeLocation(tree Node, bool UseStub = true) {
+ expanded_location Location = { NULL, 0 };
+
+ if (Node == NULL_TREE)
+ return Location;
+
+ tree Name = NULL;
+
+ if (DECL_P(Node)) {
+ Name = DECL_NAME(Node);
+ } else if (TYPE_P(Node)) {
+ Name = TYPE_NAME(Node);
+ }
+
+ if (Name) {
+ if (TYPE_STUB_DECL(Name)) {
+ tree Stub = TYPE_STUB_DECL(Name);
+ Location = expand_location(DECL_SOURCE_LOCATION(Stub));
+ } else if (DECL_P(Name)) {
+ Location = expand_location(DECL_SOURCE_LOCATION(Name));
+ }
+ }
+
+ if (!Location.line) {
+ if (UseStub && TYPE_STUB_DECL(Node)) {
+ tree Stub = TYPE_STUB_DECL(Node);
+ Location = expand_location(DECL_SOURCE_LOCATION(Stub));
+ } else if (DECL_P(Node)) {
+ Location = expand_location(DECL_SOURCE_LOCATION(Node));
+ }
+ }
+
+ return Location;
+}
+
+static const char *getLinkageName(tree Node) {
+
+ // Use llvm value name as linkage name if it is available.
+ if (DECL_LLVM_SET_P(Node)) {
+ Value *V = DECL_LLVM(Node);
+ return V->getName().data();
+ }
+
+ tree decl_name = DECL_NAME(Node);
+ if (decl_name != NULL && IDENTIFIER_POINTER (decl_name) != NULL) {
+ if (TREE_PUBLIC(Node) &&
+ DECL_ASSEMBLER_NAME(Node) != DECL_NAME(Node) &&
+ !DECL_ABSTRACT(Node)) {
+ return IDENTIFIER_POINTER(DECL_ASSEMBLER_NAME(Node));
+ }
+ }
+ return "";
+}
+
+DebugInfo::DebugInfo(Module *m)
+: M(m)
+, DebugFactory(*m)
+, CurFullPath("")
+, CurLineNo(0)
+, PrevFullPath("")
+, PrevLineNo(0)
+, PrevBB(NULL)
+, RegionStack()
+{}
+
+/// EmitFunctionStart - Constructs the debug code for entering a function -
+/// "llvm.dbg.func.start."
+void DebugInfo::EmitFunctionStart(tree FnDecl, Function *Fn,
+ BasicBlock *CurBB) {
+ // Gather location information.
+ expanded_location Loc = GetNodeLocation(FnDecl, false);
+ const char *LinkageName = getLinkageName(FnDecl);
+
+ DISubprogram SP =
+ DebugFactory.CreateSubprogram(findRegion(FnDecl),
+ lang_hooks.dwarf_name(FnDecl, 0),
+ lang_hooks.dwarf_name(FnDecl, 0),
+ LinkageName,
+ getOrCreateCompileUnit(Loc.file), CurLineNo,
+ getOrCreateType(TREE_TYPE(FnDecl)),
+ Fn->hasInternalLinkage(),
+ true /*definition*/);
+
+ DebugFactory.InsertSubprogramStart(SP, CurBB);
+
+ // Push function on region stack.
+ RegionStack.push_back(SP);
+ RegionMap[FnDecl] = SP;
+}
+
+ /// findRegion - Find tree_node N's region.
+DIDescriptor DebugInfo::findRegion(tree Node) {
+ if (Node == NULL_TREE)
+ return getOrCreateCompileUnit(main_input_filename);
+
+ std::map<tree_node *, DIDescriptor>::iterator I = RegionMap.find(Node);
+ if (I != RegionMap.end())
+ return I->second;
+
+ if (TYPE_P (Node)) {
+ if (TYPE_CONTEXT (Node))
+ return findRegion (TYPE_CONTEXT(Node));
+ } else if (DECL_P (Node)) {
+ tree decl = Node;
+ tree context = NULL_TREE;
+ if (TREE_CODE (decl) != FUNCTION_DECL || ! DECL_VINDEX (decl))
+ context = DECL_CONTEXT (decl);
+ else
+ context = TYPE_MAIN_VARIANT
+ (TREE_TYPE (TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (decl)))));
+
+ if (context && !TYPE_P (context))
+ context = NULL_TREE;
+ if (context != NULL_TREE)
+ return findRegion(context);
+ }
+
+ // Otherwise main compile unit covers everything.
+ return getOrCreateCompileUnit(main_input_filename);
+}
+
+/// EmitRegionStart- Constructs the debug code for entering a declarative
+/// region - "llvm.dbg.region.start."
+void DebugInfo::EmitRegionStart(BasicBlock *CurBB) {
+ llvm::DIDescriptor D;
+ if (!RegionStack.empty())
+ D = RegionStack.back();
+ D = DebugFactory.CreateLexicalBlock(D);
+ RegionStack.push_back(D);
+ DebugFactory.InsertRegionStart(D, CurBB);
+}
+
+/// EmitRegionEnd - Constructs the debug code for exiting a declarative
+/// region - "llvm.dbg.region.end."
+void DebugInfo::EmitRegionEnd(BasicBlock *CurBB, bool EndFunction) {
+ assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
+ DebugFactory.InsertRegionEnd(RegionStack.back(), CurBB);
+ RegionStack.pop_back();
+ // Blocks get erased; clearing these is needed for determinism, and also
+ // a good idea if the next function gets inlined.
+ if (EndFunction) {
+ PrevBB = NULL;
+ PrevLineNo = 0;
+ PrevFullPath = NULL;
+ }
+}
+
+/// EmitDeclare - Constructs the debug code for allocation of a new variable.
+/// region - "llvm.dbg.declare."
+void DebugInfo::EmitDeclare(tree decl, unsigned Tag, StringRef Name,
+ tree type, Value *AI, BasicBlock *CurBB) {
+
+ // Do not emit variable declaration info, for now.
+ if (optimize)
+ return;
+
+ // Ignore compiler generated temporaries.
+ if (DECL_IGNORED_P(decl))
+ return;
+
+ assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
+
+ expanded_location Loc = GetNodeLocation(decl, false);
+
+ // Construct variable.
+ llvm::DIVariable D =
+ DebugFactory.CreateVariable(Tag, RegionStack.back(), Name,
+ getOrCreateCompileUnit(Loc.file),
+ Loc.line, getOrCreateType(type));
+
+ // Insert an llvm.dbg.declare into the current block.
+ DebugFactory.InsertDeclare(AI, D, CurBB);
+}
+
+/// EmitStopPoint - Emit a call to llvm.dbg.stoppoint to indicate a change of
+/// source line - "llvm.dbg.stoppoint." Now enabled at -O.
+void DebugInfo::EmitStopPoint(Function *Fn, BasicBlock *CurBB) {
+
+ // Don't bother if things are the same as last time.
+ if (PrevLineNo == CurLineNo &&
+ PrevBB == CurBB &&
+ (PrevFullPath == CurFullPath ||
+ !strcmp(PrevFullPath, CurFullPath))) return;
+ if (!CurFullPath[0] || CurLineNo == 0) return;
+
+ // Update last state.
+ PrevFullPath = CurFullPath;
+ PrevLineNo = CurLineNo;
+ PrevBB = CurBB;
+
+ DebugFactory.InsertStopPoint(getOrCreateCompileUnit(CurFullPath),
+ CurLineNo, 0 /*column no. */,
+ CurBB);
+}
+
+/// EmitGlobalVariable - Emit information about a global variable.
+///
+void DebugInfo::EmitGlobalVariable(GlobalVariable *GV, tree decl) {
+ // Gather location information.
+ expanded_location Loc = expand_location(DECL_SOURCE_LOCATION(decl));
+ DIType TyD = getOrCreateType(TREE_TYPE(decl));
+ std::string DispName = GV->getNameStr();
+ if (DECL_NAME(decl)) {
+ if (IDENTIFIER_POINTER(DECL_NAME(decl)))
+ DispName = IDENTIFIER_POINTER(DECL_NAME(decl));
+ }
+
+ DebugFactory.CreateGlobalVariable(getOrCreateCompileUnit(Loc.file),
+ GV->getNameStr(),
+ DispName,
+ getLinkageName(decl),
+ getOrCreateCompileUnit(Loc.file), Loc.line,
+ TyD, GV->hasInternalLinkage(),
+ true/*definition*/, GV);
+}
+
+/// createBasicType - Create BasicType.
+DIType DebugInfo::createBasicType(tree type) {
+
+ const char *TypeName = GetNodeName(type);
+ uint64_t Size = NodeSizeInBits(type);
+ uint64_t Align = NodeAlignInBits(type);
+
+ unsigned Encoding = 0;
+
+ switch (TREE_CODE(type)) {
+ case INTEGER_TYPE:
+ if (TYPE_STRING_FLAG (type)) {
+ if (TYPE_UNSIGNED (type))
+ Encoding = DW_ATE_unsigned_char;
+ else
+ Encoding = DW_ATE_signed_char;
+ }
+ else if (TYPE_UNSIGNED (type))
+ Encoding = DW_ATE_unsigned;
+ else
+ Encoding = DW_ATE_signed;
+ break;
+ case REAL_TYPE:
+ Encoding = DW_ATE_float;
+ break;
+ case COMPLEX_TYPE:
+ Encoding = TREE_CODE(TREE_TYPE(type)) == REAL_TYPE ?
+ DW_ATE_complex_float : DW_ATE_lo_user;
+ break;
+ case BOOLEAN_TYPE:
+ Encoding = DW_ATE_boolean;
+ break;
+ default: {
+ DEBUGASSERT(0 && "Basic type case missing");
+ Encoding = DW_ATE_signed;
+ Size = BITS_PER_WORD;
+ Align = BITS_PER_WORD;
+ break;
+ }
+ }
+
+ return
+ DebugFactory.CreateBasicType(getOrCreateCompileUnit(main_input_filename),
+ TypeName,
+ getOrCreateCompileUnit(main_input_filename),
+ 0, Size, Align,
+ 0, 0, Encoding);
+}
+
+/// createMethodType - Create MethodType.
+DIType DebugInfo::createMethodType(tree type) {
+
+ llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+
+ // Add the result type at least.
+ EltTys.push_back(getOrCreateType(TREE_TYPE(type)));
+
+ // Set up remainder of arguments.
+ for (tree arg = TYPE_ARG_TYPES(type); arg; arg = TREE_CHAIN(arg)) {
+ tree formal_type = TREE_VALUE(arg);
+ if (formal_type == void_type_node) break;
+ EltTys.push_back(getOrCreateType(formal_type));
+ }
+
+ llvm::DIArray EltTypeArray =
+ DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+ return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_subroutine_type,
+ findRegion(type), "",
+ getOrCreateCompileUnit(NULL),
+ 0, 0, 0, 0, 0,
+ llvm::DIType(), EltTypeArray);
+}
+
+/// createPointerType - Create PointerType.
+DIType DebugInfo::createPointerType(tree type) {
+
+ DIType FromTy = getOrCreateType(TREE_TYPE(type));
+ // type* and type&
+ // FIXME: Should BLOCK_POINTER_TYP have its own DW_TAG?
+ unsigned Tag = TREE_CODE(type) == POINTER_TYPE ?
+ DW_TAG_pointer_type :
+ DW_TAG_reference_type;
+ expanded_location Loc = GetNodeLocation(type);
+
+ const char *PName = FromTy.getName();
+ return DebugFactory.CreateDerivedType(Tag, findRegion(type), PName,
+ getOrCreateCompileUnit(NULL),
+ 0 /*line no*/,
+ NodeSizeInBits(type),
+ NodeAlignInBits(type),
+ 0 /*offset */,
+ 0,
+ FromTy);
+}
+
+/// createArrayType - Create ArrayType.
+DIType DebugInfo::createArrayType(tree type) {
+
+ // type[n][m]...[p]
+ if (TYPE_STRING_FLAG(type) && TREE_CODE(TREE_TYPE(type)) == INTEGER_TYPE){
+ DEBUGASSERT(0 && "Don't support pascal strings");
+ return DIType();
+ }
+
+ unsigned Tag = 0;
+
+ if (TREE_CODE(type) == VECTOR_TYPE)
+ Tag = DW_TAG_vector_type;
+ else
+ Tag = DW_TAG_array_type;
+
+ // Add the dimensions of the array. FIXME: This loses CV qualifiers from
+ // interior arrays, do we care? Why aren't nested arrays represented the
+ // obvious/recursive way?
+ llvm::SmallVector<llvm::DIDescriptor, 8> Subscripts;
+
+ // There will be ARRAY_TYPE nodes for each rank. Followed by the derived
+ // type.
+ tree atype = type;
+ tree EltTy = TREE_TYPE(atype);
+ for (; TREE_CODE(atype) == ARRAY_TYPE; atype = TREE_TYPE(atype)) {
+ tree Domain = TYPE_DOMAIN(atype);
+ if (Domain) {
+ // FIXME - handle dynamic ranges
+ tree MinValue = TYPE_MIN_VALUE(Domain);
+ tree MaxValue = TYPE_MAX_VALUE(Domain);
+ uint64_t Low = 0;
+ uint64_t Hi = 0;
+ if (MinValue && isInt64(MinValue, 0))
+ Low = getINTEGER_CSTVal(MinValue);
+ if (MaxValue && isInt64(MaxValue, 0))
+ Hi = getINTEGER_CSTVal(MaxValue);
+ Subscripts.push_back(DebugFactory.GetOrCreateSubrange(Low, Hi));
+ }
+ EltTy = TREE_TYPE(atype);
+ }
+
+ llvm::DIArray SubscriptArray =
+ DebugFactory.GetOrCreateArray(Subscripts.data(), Subscripts.size());
+ expanded_location Loc = GetNodeLocation(type);
+ return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_array_type,
+ findRegion(type), "",
+ getOrCreateCompileUnit(Loc.file), 0,
+ NodeSizeInBits(type),
+ NodeAlignInBits(type), 0, 0,
+ getOrCreateType(EltTy),
+ SubscriptArray);
+}
+
+/// createEnumType - Create EnumType.
+DIType DebugInfo::createEnumType(tree type) {
+ // enum { a, b, ..., z };
+ llvm::SmallVector<llvm::DIDescriptor, 32> Elements;
+
+ if (TYPE_SIZE(type)) {
+ for (tree Link = TYPE_VALUES(type); Link; Link = TREE_CHAIN(Link)) {
+ tree EnumValue = TREE_VALUE(Link);
+ int64_t Value = getINTEGER_CSTVal(EnumValue);
+ const char *EnumName = IDENTIFIER_POINTER(TREE_PURPOSE(Link));
+ Elements.push_back(DebugFactory.CreateEnumerator(EnumName, Value));
+ }
+ }
+
+ llvm::DIArray EltArray =
+ DebugFactory.GetOrCreateArray(Elements.data(), Elements.size());
+
+ expanded_location Loc = { NULL, 0 };
+ if (TYPE_SIZE(type))
+ // Incomplete enums do not have any location info.
+ Loc = GetNodeLocation(TREE_CHAIN(type), false);
+
+ return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_enumeration_type,
+ findRegion(type), GetNodeName(type),
+ getOrCreateCompileUnit(Loc.file),
+ Loc.line,
+ NodeSizeInBits(type),
+ NodeAlignInBits(type), 0, 0,
+ llvm::DIType(), EltArray);
+}
+
+/// createStructType - Create StructType for struct or union or class.
+DIType DebugInfo::createStructType(tree type) {
+
+ // struct { a; b; ... z; }; | union { a; b; ... z; };
+ unsigned Tag = TREE_CODE(type) == RECORD_TYPE ? DW_TAG_structure_type :
+ DW_TAG_union_type;
+
+ unsigned RunTimeLang = 0;
+//TODO if (TYPE_LANG_SPECIFIC (type)
+//TODO && lang_hooks.types.is_runtime_specific_type (type))
+//TODO {
+//TODO DICompileUnit CU = getOrCreateCompileUnit(main_input_filename);
+//TODO unsigned CULang = CU.getLanguage();
+//TODO switch (CULang) {
+//TODO case DW_LANG_ObjC_plus_plus :
+//TODO RunTimeLang = DW_LANG_ObjC_plus_plus;
+//TODO break;
+//TODO case DW_LANG_ObjC :
+//TODO RunTimeLang = DW_LANG_ObjC;
+//TODO break;
+//TODO case DW_LANG_C_plus_plus :
+//TODO RunTimeLang = DW_LANG_C_plus_plus;
+//TODO break;
+//TODO default:
+//TODO break;
+//TODO }
+//TODO }
+
+ // Records and classes and unions can all be recursive. To handle them,
+ // we first generate a debug descriptor for the struct as a forward
+ // declaration. Then (if it is a definition) we go through and get debug
+ // info for all of its members. Finally, we create a descriptor for the
+ // complete type (which may refer to the forward decl if the struct is
+ // recursive) and replace all uses of the forward declaration with the
+ // final definition.
+ expanded_location Loc = GetNodeLocation(TREE_CHAIN(type), false);
+ // FIXME: findRegion() is not able to find context all the time. This
+ // means when type names in different context match then FwdDecl is
+ // reused because MDNodes are uniqued. To avoid this, use type context
+ /// also while creating FwdDecl for now.
+ std::string FwdName;
+ if (TYPE_CONTEXT(type))
+ FwdName = GetNodeName(TYPE_CONTEXT(type));
+ FwdName = FwdName + GetNodeName(type);
+ unsigned Flags = llvm::DIType::FlagFwdDecl;
+ llvm::DICompositeType FwdDecl =
+ DebugFactory.CreateCompositeType(Tag,
+ findRegion(type),
+ FwdName,
+ getOrCreateCompileUnit(Loc.file),
+ Loc.line,
+ 0, 0, 0, Flags,
+ llvm::DIType(), llvm::DIArray(),
+ RunTimeLang);
+
+ // forward declaration,
+ if (TYPE_SIZE(type) == 0)
+ return FwdDecl;
+
+ // Insert into the TypeCache so that recursive uses will find it.
+ TypeCache[type] = FwdDecl.getNode();
+
+ // Convert all the elements.
+ llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+
+ if (tree binfo = TYPE_BINFO(type)) {
+ for (unsigned i = 0, e = BINFO_N_BASE_BINFOS(binfo); i != e; ++i) {
+ tree BInfo = BINFO_BASE_BINFO(binfo, i);
+ tree BInfoType = BINFO_TYPE (BInfo);
+ DIType BaseClass = getOrCreateType(BInfoType);
+
+ // FIXME : name, size, align etc...
+ DIType DTy =
+ DebugFactory.CreateDerivedType(DW_TAG_inheritance,
+ findRegion(type), "",
+ llvm::DICompileUnit(), 0,0,0,
+ getINTEGER_CSTVal(BINFO_OFFSET(BInfo)),
+ 0, BaseClass);
+ EltTys.push_back(DTy);
+ }
+ }
+
+ // Now add members of this class.
+ for (tree Member = TYPE_FIELDS(type); Member;
+ Member = TREE_CHAIN(Member)) {
+ // Should we skip.
+ if (DECL_P(Member) && DECL_IGNORED_P(Member)) continue;
+
+ if (TREE_CODE(Member) == FIELD_DECL) {
+
+ if (DECL_FIELD_OFFSET(Member) == 0)
+ // FIXME: field with variable position, skip it for now.
+ continue;
+
+ /* Ignore nameless fields. */
+ if (DECL_NAME (Member) == NULL_TREE)
+ continue;
+
+ // Get the location of the member.
+ expanded_location MemLoc = GetNodeLocation(Member, false);
+
+ // Field type is the declared type of the field.
+ tree FieldNodeType = FieldType(Member);
+ DIType MemberType = getOrCreateType(FieldNodeType);
+ const char *MemberName = GetNodeName(Member);
+ unsigned Flags = 0;
+ if (TREE_PROTECTED(Member))
+ Flags = llvm::DIType::FlagProtected;
+ else if (TREE_PRIVATE(Member))
+ Flags = llvm::DIType::FlagPrivate;
+
+ DIType DTy =
+ DebugFactory.CreateDerivedType(DW_TAG_member, findRegion(Member),
+ MemberName,
+ getOrCreateCompileUnit(MemLoc.file),
+ MemLoc.line, NodeSizeInBits(Member),
+ NodeAlignInBits(FieldNodeType),
+ int_bit_position(Member),
+ Flags, MemberType);
+ EltTys.push_back(DTy);
+ } else {
+ DEBUGASSERT(0 && "Unsupported member tree code!");
+ }
+ }
+
+ for (tree Member = TYPE_METHODS(type); Member;
+ Member = TREE_CHAIN(Member)) {
+
+ if (DECL_ABSTRACT_ORIGIN (Member)) continue;
+ if (DECL_ARTIFICIAL (Member)) continue;
+ // In C++, TEMPLATE_DECLs are marked Ignored, and should be.
+ if (DECL_P (Member) && DECL_IGNORED_P (Member)) continue;
+
+ // Get the location of the member.
+ expanded_location MemLoc = GetNodeLocation(Member, false);
+
+ const char *MemberName = lang_hooks.dwarf_name(Member, 0);
+ const char *LinkageName = getLinkageName(Member);
+ DIType SPTy = getOrCreateType(TREE_TYPE(Member));
+ DISubprogram SP =
+ DebugFactory.CreateSubprogram(findRegion(Member), MemberName, MemberName,
+ LinkageName,
+ getOrCreateCompileUnit(MemLoc.file),
+ MemLoc.line, SPTy, false, false);
+ EltTys.push_back(SP);
+ }
+
+ llvm::DIArray Elements =
+ DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+ llvm::DICompositeType RealDecl =
+ DebugFactory.CreateCompositeType(Tag, findRegion(type),
+ GetNodeName(type),
+ getOrCreateCompileUnit(Loc.file),
+ Loc.line,
+ NodeSizeInBits(type), NodeAlignInBits(type),
+ 0, Flags, llvm::DIType(), Elements,
+ RunTimeLang);
+
+ // Now that we have a real decl for the struct, replace anything using the
+ // old decl with the new one. This will recursively update the debug info.
+ FwdDecl.replaceAllUsesWith(RealDecl);
+ return RealDecl;
+}
+
+/// createVarinatType - Create variant type or return MainTy.
+DIType DebugInfo::createVariantType(tree type, DIType MainTy) {
+
+ DIType Ty;
+ if (tree TyDef = TYPE_NAME(type)) {
+ std::map<tree_node *, MDNode *>::iterator I = TypeCache.find(TyDef);
+ if (I != TypeCache.end())
+ return DIType(I->second);
+ if (TREE_CODE(TyDef) == TYPE_DECL && DECL_ORIGINAL_TYPE(TyDef)) {
+ expanded_location TypeDefLoc = GetNodeLocation(TyDef);
+ Ty = DebugFactory.CreateDerivedType(DW_TAG_typedef, findRegion(TyDef),
+ GetNodeName(TyDef),
+ getOrCreateCompileUnit(TypeDefLoc.file),
+ TypeDefLoc.line,
+ 0 /*size*/,
+ 0 /*align*/,
+ 0 /*offset */,
+ 0 /*flags*/,
+ MainTy);
+ TypeCache[TyDef] = Ty.getNode();
+ return Ty;
+ }
+ }
+
+ if (TYPE_VOLATILE(type)) {
+ Ty = DebugFactory.CreateDerivedType(DW_TAG_volatile_type,
+ findRegion(type), "",
+ getOrCreateCompileUnit(NULL),
+ 0 /*line no*/,
+ NodeSizeInBits(type),
+ NodeAlignInBits(type),
+ 0 /*offset */,
+ 0 /* flags */,
+ MainTy);
+ MainTy = Ty;
+ }
+
+ if (TYPE_READONLY(type))
+ Ty = DebugFactory.CreateDerivedType(DW_TAG_const_type,
+ findRegion(type), "",
+ getOrCreateCompileUnit(NULL),
+ 0 /*line no*/,
+ NodeSizeInBits(type),
+ NodeAlignInBits(type),
+ 0 /*offset */,
+ 0 /* flags */,
+ MainTy);
+
+ if (TYPE_VOLATILE(type) || TYPE_READONLY(type)) {
+ TypeCache[type] = Ty.getNode();
+ return Ty;
+ }
+
+ // If, for some reason, main type varaint type is seen then use it.
+ return MainTy;
+}
+
+/// getOrCreateType - Get the type from the cache or create a new type if
+/// necessary.
+DIType DebugInfo::getOrCreateType(tree type) {
+ DEBUGASSERT(type != NULL_TREE && type != error_mark_node &&
+ "Not a type.");
+ if (type == NULL_TREE || type == error_mark_node) return DIType();
+
+ // Should only be void if a pointer/reference/return type. Returning NULL
+ // allows the caller to produce a non-derived type.
+ if (TREE_CODE(type) == VOID_TYPE) return DIType();
+
+ // Check to see if the compile unit already has created this type.
+ std::map<tree_node *, MDNode *>::iterator I = TypeCache.find(type);
+ if (I != TypeCache.end())
+ return DIType(I->second);
+
+ DIType MainTy;
+ if (type != TYPE_MAIN_VARIANT(type) && TYPE_MAIN_VARIANT(type))
+ MainTy = getOrCreateType(TYPE_MAIN_VARIANT(type));
+
+ DIType Ty = createVariantType(type, MainTy);
+ if (!Ty.isNull())
+ return Ty;
+
+ // Work out details of type.
+ switch (TREE_CODE(type)) {
+ case ERROR_MARK:
+ case LANG_TYPE:
+ case TRANSLATION_UNIT_DECL:
+ default: {
+ DEBUGASSERT(0 && "Unsupported type");
+ return DIType();
+ }
+
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ Ty = createPointerType(type);
+ break;
+
+ case OFFSET_TYPE: {
+ // gen_type_die(TYPE_OFFSET_BASETYPE(type), context_die);
+ // gen_type_die(TREE_TYPE(type), context_die);
+ // gen_ptr_to_mbr_type_die(type, context_die);
+ break;
+ }
+
+ case FUNCTION_TYPE:
+ case METHOD_TYPE:
+ Ty = createMethodType(type);
+ break;
+
+ case VECTOR_TYPE:
+ case ARRAY_TYPE:
+ Ty = createArrayType(type);
+ break;
+
+ case ENUMERAL_TYPE:
+ Ty = createEnumType(type);
+ break;
+
+ case RECORD_TYPE:
+ case QUAL_UNION_TYPE:
+ case UNION_TYPE:
+ Ty = createStructType(type);
+ break;
+
+ case INTEGER_TYPE:
+ case REAL_TYPE:
+ case COMPLEX_TYPE:
+ case BOOLEAN_TYPE:
+ Ty = createBasicType(type);
+ break;
+ }
+ TypeCache[type] = Ty.getNode();
+ return Ty;
+}
+
+/// Initialize - Initialize debug info by creating compile unit for
+/// main_input_filename. This must be invoked after language dependent
+/// initialization is done.
+void DebugInfo::Initialize() {
+
+ // Each input file is encoded as a separate compile unit in LLVM
+ // debugging information output. However, many target specific tool chains
+ // prefer to encode only one compile unit in an object file. In this
+ // situation, the LLVM code generator will include debugging information
+ // entities in the compile unit that is marked as main compile unit. The
+ // code generator accepts maximum one main compile unit per module. If a
+ // module does not contain any main compile unit then the code generator
+ // will emit multiple compile units in the output object file.
+ getOrCreateCompileUnit(main_input_filename, true);
+}
+
+/// getOrCreateCompileUnit - Get the compile unit from the cache or
+/// create a new one if necessary.
+DICompileUnit DebugInfo::getOrCreateCompileUnit(const char *FullPath,
+ bool isMain) {
+ if (!FullPath)
+ FullPath = main_input_filename;
+ MDNode *&CU = CUCache[FullPath];
+ if (CU)
+ return DICompileUnit(CU);
+
+ // Get source file information.
+ std::string Directory;
+ std::string FileName;
+ DirectoryAndFile(FullPath, Directory, FileName);
+
+ // Set up Language number.
+ unsigned LangTag;
+ const std::string LanguageName(lang_hooks.name);
+ if (LanguageName == "GNU C")
+ LangTag = DW_LANG_C89;
+ else if (LanguageName == "GNU C++")
+ LangTag = DW_LANG_C_plus_plus;
+ else if (LanguageName == "GNU Ada")
+ LangTag = DW_LANG_Ada95;
+ else if (LanguageName == "GNU F77")
+ LangTag = DW_LANG_Fortran77;
+ else if (LanguageName == "GNU Pascal")
+ LangTag = DW_LANG_Pascal83;
+ else if (LanguageName == "GNU Java")
+ LangTag = DW_LANG_Java;
+ else if (LanguageName == "GNU Objective-C")
+ LangTag = DW_LANG_ObjC;
+ else if (LanguageName == "GNU Objective-C++")
+ LangTag = DW_LANG_ObjC_plus_plus;
+ else
+ LangTag = DW_LANG_C89;
+
+ // flag_objc_abi represents Objective-C runtime version number. It is zero
+ // for all other language.
+ unsigned ObjcRunTimeVer = 0;
+//TODO if (flag_objc_abi != 0 && flag_objc_abi != -1)
+//TODO ObjcRunTimeVer = flag_objc_abi;
+ DICompileUnit NewCU = DebugFactory.CreateCompileUnit(LangTag, FileName,
+ Directory,
+ version_string, isMain,
+ optimize, "",
+ ObjcRunTimeVer);
+ CU = NewCU.getNode();
+ return NewCU;
+}
diff --git a/dragonegg/llvm-debug.h b/dragonegg/llvm-debug.h
new file mode 100644
index 00000000000..f53e201e1e9
--- /dev/null
+++ b/dragonegg/llvm-debug.h
@@ -0,0 +1,140 @@
+/* Internal interfaces between the LLVM backend components
+Copyright (C) 2006 Free Software Foundation, Inc.
+Contributed by Jim Laskey (jlaskey@apple.com)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
+
+//===----------------------------------------------------------------------===//
+// This is a C++ header file that defines the debug interfaces shared among
+// the llvm-*.cpp files.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUG_H
+#define LLVM_DEBUG_H
+
+// LLVM headers
+#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/Support/Dwarf.h"
+
+// System headers
+#include <string>
+#include <map>
+#include <vector>
+
+namespace llvm {
+
+// Forward declarations
+class AllocaInst;
+class BasicBlock;
+class CallInst;
+class Function;
+class Module;
+
+/// DebugInfo - This class gathers all debug information during compilation and
+/// is responsible for emitting to llvm globals or pass directly to the backend.
+class DebugInfo {
+private:
+ Module *M; // The current module.
+ DIFactory DebugFactory;
+ const char *CurFullPath; // Previous location file encountered.
+ int CurLineNo; // Previous location line# encountered.
+ const char *PrevFullPath; // Previous location file encountered.
+ int PrevLineNo; // Previous location line# encountered.
+ BasicBlock *PrevBB; // Last basic block encountered.
+ std::map<std::string, MDNode *> CUCache;
+ std::map<tree_node *, MDNode *> TypeCache;
+ // Cache of previously constructed
+ // Types.
+ std::vector<DIDescriptor> RegionStack;
+ // Stack to track declarative scopes.
+
+ std::map<tree_node *, DIDescriptor> RegionMap;
+public:
+ DebugInfo(Module *m);
+
+ /// Initialize - Initialize debug info by creating compile unit for
+ /// main_input_filename. This must be invoked after language dependent
+ /// initialization is done.
+ void Initialize();
+
+ // Accessors.
+ void setLocationFile(const char *FullPath) { CurFullPath = FullPath; }
+ void setLocationLine(int LineNo) { CurLineNo = LineNo; }
+
+ /// EmitFunctionStart - Constructs the debug code for entering a function -
+ /// "llvm.dbg.func.start."
+ void EmitFunctionStart(tree_node *FnDecl, Function *Fn, BasicBlock *CurBB);
+
+ /// EmitRegionStart- Constructs the debug code for entering a declarative
+ /// region - "llvm.dbg.region.start."
+ void EmitRegionStart(BasicBlock *CurBB);
+
+ /// EmitRegionEnd - Constructs the debug code for exiting a declarative
+ /// region - "llvm.dbg.region.end."
+ void EmitRegionEnd(BasicBlock *CurBB, bool EndFunction);
+
+ /// EmitDeclare - Constructs the debug code for allocation of a new variable.
+ /// region - "llvm.dbg.declare."
+ void EmitDeclare(tree_node *decl, unsigned Tag, StringRef Name,
+ tree_node *type, Value *AI,
+ BasicBlock *CurBB);
+
+ /// EmitStopPoint - Emit a call to llvm.dbg.stoppoint to indicate a change of
+ /// source line.
+ void EmitStopPoint(Function *Fn, BasicBlock *CurBB);
+
+ /// EmitGlobalVariable - Emit information about a global variable.
+ ///
+ void EmitGlobalVariable(GlobalVariable *GV, tree_node *decl);
+
+ /// getOrCreateType - Get the type from the cache or create a new type if
+ /// necessary.
+ DIType getOrCreateType(tree_node *type);
+
+ /// createBasicType - Create BasicType.
+ DIType createBasicType(tree_node *type);
+
+ /// createMethodType - Create MethodType.
+ DIType createMethodType(tree_node *type);
+
+ /// createPointerType - Create PointerType.
+ DIType createPointerType(tree_node *type);
+
+ /// createArrayType - Create ArrayType.
+ DIType createArrayType(tree_node *type);
+
+ /// createEnumType - Create EnumType.
+ DIType createEnumType(tree_node *type);
+
+ /// createStructType - Create StructType for struct or union or class.
+ DIType createStructType(tree_node *type);
+
+ /// createVarinatType - Create variant type or return MainTy.
+ DIType createVariantType(tree_node *type, DIType MainTy);
+
+ /// getOrCreateCompileUnit - Create a new compile unit.
+ DICompileUnit getOrCreateCompileUnit(const char *FullPath,
+ bool isMain = false);
+
+ /// findRegion - Find tree_node N's region.
+ DIDescriptor findRegion(tree_node *n);
+};
+
+} // end namespace llvm
+
+#endif /* LLVM_DEBUG_H */
diff --git a/dragonegg/llvm-internal.h b/dragonegg/llvm-internal.h
new file mode 100644
index 00000000000..ed82a4eacc7
--- /dev/null
+++ b/dragonegg/llvm-internal.h
@@ -0,0 +1,765 @@
+/* Internal interfaces between the LLVM backend components
+Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
+Contributed by Chris Lattner (sabre@nondot.org)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
+
+//===----------------------------------------------------------------------===//
+// This is a C++ header file that defines the internal interfaces shared among
+// the llvm-*.cpp files.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_INTERNAL_H
+#define LLVM_INTERNAL_H
+
+// LLVM headers
+#include "llvm/CallingConv.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IndexedMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/IRBuilder.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/TargetFolder.h"
+#include "llvm/Support/raw_os_ostream.h"
+
+// System headers
+#include <vector>
+#include <cassert>
+#include <map>
+#include <string>
+
+namespace llvm {
+ class Module;
+ class GlobalVariable;
+ class Function;
+ class GlobalValue;
+ class BasicBlock;
+ class Instruction;
+ class AllocaInst;
+ class BranchInst;
+ class Value;
+ class Constant;
+ class ConstantInt;
+ class Type;
+ class FunctionType;
+ class TargetMachine;
+ class TargetData;
+ class DebugInfo;
+ template<typename> class AssertingVH;
+}
+using namespace llvm;
+
+typedef IRBuilder<true, TargetFolder> LLVMBuilder;
+
+// Global state.
+
+/// TheModule - This is the current global module that we are compiling into.
+///
+extern llvm::Module *TheModule;
+
+/// TheDebugInfo - This object is responsible for gather all debug information.
+/// If it's value is NULL then no debug information should be gathered.
+extern llvm::DebugInfo *TheDebugInfo;
+
+/// TheTarget - The current target being compiled for.
+///
+extern llvm::TargetMachine *TheTarget;
+
+/// TheFolder - The constant folder to use.
+extern TargetFolder *TheFolder;
+
+/// getTargetData - Return the current TargetData object from TheTarget.
+const TargetData &getTargetData();
+
+/// AttributeUsedGlobals - The list of globals that are marked attribute(used).
+extern SmallSetVector<Constant *,32> AttributeUsedGlobals;
+
+extern Constant* ConvertMetadataStringToGV(const char* str);
+
+/// AddAnnotateAttrsToGlobal - Adds decls that have a
+/// annotate attribute to a vector to be emitted later.
+extern void AddAnnotateAttrsToGlobal(GlobalValue *GV, union tree_node* decl);
+
+// Mapping between GCC declarations and LLVM values.
+
+/// DECL_LLVM - Holds the LLVM expression for the value of a global variable or
+/// function. This value can be evaluated lazily for functions and variables
+/// with static storage duration.
+extern Value *make_decl_llvm(union tree_node *);
+#define DECL_LLVM(NODE) make_decl_llvm(NODE)
+
+/// SET_DECL_LLVM - Set the DECL_LLVM for NODE to LLVM.
+extern Value *set_decl_llvm(union tree_node *, Value *);
+#define SET_DECL_LLVM(NODE, LLVM) set_decl_llvm(NODE, LLVM)
+
+/// DECL_LLVM_IF_SET - The DECL_LLVM for NODE, if it is set, or NULL, if it is
+/// not set.
+extern Value *get_decl_llvm(union tree_node *);
+#define DECL_LLVM_IF_SET(NODE) (HAS_RTL_P(NODE) ? get_decl_llvm(NODE) : NULL)
+
+/// DECL_LLVM_SET_P - Returns nonzero if the DECL_LLVM for NODE has already
+/// been set.
+#define DECL_LLVM_SET_P(NODE) (DECL_LLVM_IF_SET(NODE) != NULL)
+
+void changeLLVMConstant(Constant *Old, Constant *New);
+void readLLVMTypesStringTable();
+void writeLLVMTypesStringTable();
+void readLLVMValues();
+void writeLLVMValues();
+void clearTargetBuiltinCache();
+const char* extractRegisterName(union tree_node*);
+void handleVisibility(union tree_node* decl, GlobalValue *GV);
+Twine getLLVMAssemblerName(union tree_node *);
+
+struct StructTypeConversionInfo;
+
+/// Return true if and only if field no. N from struct type T is a padding
+/// element added to match llvm struct type size and gcc struct type size.
+bool isPaddingElement(union tree_node*, unsigned N);
+
+/// TypeConverter - Implement the converter from GCC types to LLVM types.
+///
+class TypeConverter {
+ /// ConvertingStruct - If we are converting a RECORD or UNION to an LLVM type
+ /// we set this flag to true.
+ bool ConvertingStruct;
+
+ /// PointersToReresolve - When ConvertingStruct is true, we handling of
+ /// POINTER_TYPE and REFERENCE_TYPE is changed to return
+ /// opaque*'s instead of recursively calling ConvertType. When this happens,
+ /// we add the POINTER_TYPE to this list.
+ ///
+ std::vector<tree_node*> PointersToReresolve;
+
+ /// FieldIndexMap - Holds the mapping from a FIELD_DECL to the index of the
+ /// corresponding LLVM field.
+ std::map<tree_node *, unsigned int> FieldIndexMap;
+public:
+ TypeConverter() : ConvertingStruct(false) {}
+
+ const Type *ConvertType(tree_node *type);
+
+ /// GetFieldIndex - Returns the index of the LLVM field corresponding to
+ /// this FIELD_DECL.
+ unsigned int GetFieldIndex(tree_node *field_decl);
+
+ /// GCCTypeOverlapsWithLLVMTypePadding - Return true if the specified GCC type
+ /// has any data that overlaps with structure padding in the specified LLVM
+ /// type.
+ static bool GCCTypeOverlapsWithLLVMTypePadding(tree_node *t, const Type *Ty);
+
+
+ /// ConvertFunctionType - Convert the specified FUNCTION_TYPE or METHOD_TYPE
+ /// tree to an LLVM type. This does the same thing that ConvertType does, but
+ /// it also returns the function's LLVM calling convention and attributes.
+ const FunctionType *ConvertFunctionType(tree_node *type,
+ tree_node *decl,
+ tree_node *static_chain,
+ CallingConv::ID &CallingConv,
+ AttrListPtr &PAL);
+
+ /// ConvertArgListToFnType - Given a DECL_ARGUMENTS list on an GCC tree,
+ /// return the LLVM type corresponding to the function. This is useful for
+ /// turning "T foo(...)" functions into "T foo(void)" functions.
+ const FunctionType *ConvertArgListToFnType(tree_node *type,
+ tree_node *arglist,
+ tree_node *static_chain,
+ CallingConv::ID &CallingConv,
+ AttrListPtr &PAL);
+
+private:
+ const Type *ConvertRECORD(tree_node *type, tree_node *orig_type);
+ const Type *ConvertUNION(tree_node *type, tree_node *orig_type);
+ void SetFieldIndex(tree_node *field_decl, unsigned int Index);
+ bool DecodeStructFields(tree_node *Field, StructTypeConversionInfo &Info);
+ void DecodeStructBitField(tree_node *Field, StructTypeConversionInfo &Info);
+};
+
+extern TypeConverter *TheTypeConverter;
+
+/// ConvertType - Convert the specified tree type to an LLVM type.
+///
+inline const Type *ConvertType(tree_node *type) {
+ return TheTypeConverter->ConvertType(type);
+}
+
+/// GetFieldIndex - Given FIELD_DECL obtain its index.
+///
+inline unsigned int GetFieldIndex(tree_node *field_decl) {
+ return TheTypeConverter->GetFieldIndex(field_decl);
+}
+
+/// getINTEGER_CSTVal - Return the specified INTEGER_CST value as a uint64_t.
+///
+uint64_t getINTEGER_CSTVal(tree_node *exp);
+
+/// isInt64 - Return true if t is an INTEGER_CST that fits in a 64 bit integer.
+/// If Unsigned is false, returns whether it fits in a int64_t. If Unsigned is
+/// true, returns whether the value is non-negative and fits in a uint64_t.
+/// Always returns false for overflowed constants.
+bool isInt64(tree_node *t, bool Unsigned);
+
+/// getInt64 - Extract the value of an INTEGER_CST as a 64 bit integer. If
+/// Unsigned is false, the value must fit in a int64_t. If Unsigned is true,
+/// the value must be non-negative and fit in a uint64_t. Must not be used on
+/// overflowed constants. These conditions can be checked by calling isInt64.
+uint64_t getInt64(tree_node *t, bool Unsigned);
+
+/// isPassedByInvisibleReference - Return true if the specified type should be
+/// passed by 'invisible reference'. In other words, instead of passing the
+/// thing by value, pass the address of a temporary.
+bool isPassedByInvisibleReference(tree_node *type);
+
+/// isSequentialCompatible - Return true if the specified gcc array or pointer
+/// type and the corresponding LLVM SequentialType lay out their components
+/// identically in memory, so doing a GEP accesses the right memory location.
+/// We assume that objects without a known size do not.
+bool isSequentialCompatible(tree_node *type);
+
+/// isBitfield - Returns whether to treat the specified field as a bitfield.
+bool isBitfield(tree_node *field_decl);
+
+/// getFieldOffsetInBits - Return the bit offset of a FIELD_DECL in a structure.
+inline uint64_t getFieldOffsetInBits(tree_node *field) {
+ assert(DECL_FIELD_BIT_OFFSET(field) != 0);
+ uint64_t Result = getInt64(DECL_FIELD_BIT_OFFSET(field), true);
+ if (DECL_FIELD_OFFSET(field))
+ Result += getInt64(DECL_FIELD_OFFSET(field), true) * BITS_PER_UNIT;
+ return Result;
+}
+
+/// getDeclaredType - Get the declared type for the specified field, and
+/// not the shrunk-to-fit type that GCC gives us in TREE_TYPE.
+tree_node *getDeclaredType(tree_node *field_decl);
+
+/// ValidateRegisterVariable - Check that a static "asm" variable is
+/// well-formed. If not, emit error messages and return true. If so, return
+/// false.
+bool ValidateRegisterVariable(tree_node *decl);
+
+/// MemRef - This struct holds the information needed for a memory access:
+/// a pointer to the memory, its alignment and whether the access is volatile.
+struct MemRef {
+ Value *Ptr;
+ bool Volatile;
+private:
+ unsigned char LogAlign;
+
+public:
+ MemRef() : Ptr(0), Volatile(false), LogAlign(0) {}
+ MemRef(Value *P, uint32_t A, bool V) : Ptr(P), Volatile(V) {
+ // Forbid alignment 0 along with non-power-of-2 alignment values.
+ assert(isPowerOf2_32(A) && "Alignment not a power of 2!");
+ LogAlign = Log2_32(A);
+ }
+
+ uint32_t getAlignment() const {
+ return 1U << LogAlign;
+ }
+};
+
+/// LValue - This struct represents an lvalue in the program. In particular,
+/// the Ptr member indicates the memory that the lvalue lives in. Alignment
+/// is the alignment of the memory (in bytes).If this is a bitfield reference,
+/// BitStart indicates the first bit in the memory that is part of the field
+/// and BitSize indicates the extent.
+///
+/// "LValue" is intended to be a light-weight object passed around by-value.
+struct LValue {
+ Value *Ptr;
+ unsigned char BitStart;
+ unsigned char BitSize;
+private:
+ unsigned char LogAlign;
+
+public:
+ LValue() : Ptr(0), BitStart(255), BitSize(255), LogAlign(0) {}
+ LValue(Value *P, uint32_t A) : Ptr(P), BitStart(255), BitSize(255) {
+ // Forbid alignment 0 along with non-power-of-2 alignment values.
+ assert(isPowerOf2_32(A) && "Alignment not a power of 2!");
+ LogAlign = Log2_32(A);
+ }
+ LValue(Value *P, uint32_t A, unsigned BSt, unsigned BSi)
+ : Ptr(P), BitStart(BSt), BitSize(BSi) {
+ assert(BitStart == BSt && BitSize == BSi &&
+ "Bit values larger than 256?");
+ // Forbid alignment 0 along with non-power-of-2 alignment values.
+ assert(isPowerOf2_32(A) && "Alignment not a power of 2!");
+ LogAlign = Log2_32(A);
+ }
+
+ uint32_t getAlignment() const {
+ return 1U << LogAlign;
+ }
+ bool isBitfield() const { return BitStart != 255; }
+};
+
+/// PhiRecord - This struct holds the LLVM PHI node associated with a GCC phi.
+struct PhiRecord {
+ gimple gcc_phi;
+ PHINode *PHI;
+};
+
+/// TreeToLLVM - An instance of this class is created and used to convert the
+/// body of each function to LLVM.
+///
+class TreeToLLVM {
+ // State that is initialized when the function starts.
+ const TargetData &TD;
+ tree_node *FnDecl;
+ Function *Fn;
+ BasicBlock *ReturnBB;
+ BasicBlock *UnwindBB;
+ unsigned ReturnOffset;
+
+ // State that changes as the function is emitted.
+
+ /// Builder - Instruction creator, the location to insert into is always the
+ /// same as &Fn->back().
+ LLVMBuilder Builder;
+
+ // AllocaInsertionPoint - Place to insert alloca instructions. Lazily created
+ // and managed by CreateTemporary.
+ Instruction *AllocaInsertionPoint;
+
+ // SSAInsertionPoint - Place to insert reads corresponding to SSA default
+ // definitions.
+ Instruction *SSAInsertionPoint;
+
+ /// BasicBlocks - Map from GCC to LLVM basic blocks.
+ DenseMap<basic_block, BasicBlock*> BasicBlocks;
+
+ /// LocalDecls - Map from local declarations to their associated LLVM values.
+ DenseMap<tree, AssertingVH<> > LocalDecls;
+
+ /// PendingPhis - Phi nodes which have not yet been populated with operands.
+ SmallVector<PhiRecord, 16> PendingPhis;
+
+ // SSANames - Map from GCC ssa names to the defining LLVM value.
+ DenseMap<tree, AssertingVH<> > SSANames;
+
+public:
+
+ //===---------------------- Local Declarations --------------------------===//
+
+ /// DECL_LOCAL - Like DECL_LLVM, returns the LLVM expression for the value of
+ /// a variable or function. However DECL_LOCAL can be used with declarations
+ /// local to the current function as well as with global declarations.
+ Value *make_decl_local(union tree_node *);
+ #define DECL_LOCAL(NODE) make_decl_local(NODE)
+
+ /// SET_DECL_LOCAL - Set the DECL_LOCAL for NODE to LLVM.
+ Value *set_decl_local(union tree_node *, Value *);
+ #define SET_DECL_LOCAL(NODE, LLVM) set_decl_local(NODE, LLVM)
+
+ /// DECL_LOCAL_IF_SET - The DECL_LOCAL for NODE, if it is set, or NULL, if it
+ /// is not set.
+ Value *get_decl_local(union tree_node *);
+ #define DECL_LOCAL_IF_SET(NODE) (HAS_RTL_P(NODE) ? get_decl_local(NODE) : NULL)
+
+ /// DECL_LOCAL_SET_P - Returns nonzero if the DECL_LOCAL for NODE has already
+ /// been set.
+ #define DECL_LOCAL_SET_P(NODE) (DECL_LOCAL_IF_SET(NODE) != NULL)
+
+
+private:
+
+ //===---------------------- Exception Handling --------------------------===//
+
+ /// LandingPads - The landing pad for a given EH region.
+ IndexedMap<BasicBlock *> LandingPads;
+
+ /// PostPads - The post landing pad for a given EH region.
+ IndexedMap<BasicBlock *> PostPads;
+
+ /// ExceptionValue - Is the local to receive the current exception.
+ Value *ExceptionValue;
+
+ /// ExceptionSelectorValue - Is the local to receive the current exception
+ /// selector.
+ Value *ExceptionSelectorValue;
+
+ /// FuncEHException - Function used to receive the exception.
+ Function *FuncEHException;
+
+ /// FuncEHSelector - Function used to receive the exception selector.
+ Function *FuncEHSelector;
+
+ /// FuncEHGetTypeID - Function used to return type id for give typeinfo.
+ Function *FuncEHGetTypeID;
+
+ /// NumAddressTakenBlocks - Count the number of labels whose addresses are
+ /// taken.
+ uint64_t NumAddressTakenBlocks;
+
+ /// AddressTakenBBNumbers - For each label with its address taken, we keep
+ /// track of its unique ID.
+ std::map<BasicBlock*, ConstantInt*> AddressTakenBBNumbers;
+
+ /// IndirectGotoBlock - If non-null, the block that indirect goto's in this
+ /// function branch to.
+ BasicBlock *IndirectGotoBlock;
+
+ /// IndirectGotoValue - This is set to be the alloca temporary that the
+ /// indirect goto block switches on.
+ Value *IndirectGotoValue;
+
+public:
+ TreeToLLVM(tree_node *fndecl);
+ ~TreeToLLVM();
+
+ /// getFUNCTION_DECL - Return the FUNCTION_DECL node for the current function
+ /// being compiled.
+ tree_node *getFUNCTION_DECL() const { return FnDecl; }
+
+ /// EmitFunction - Convert 'fndecl' to LLVM code.
+ Function *EmitFunction();
+
+ /// EmitBasicBlock - Convert the given basic block.
+ void EmitBasicBlock(basic_block bb);
+
+ /// EmitLV - Convert the specified l-value tree node to LLVM code, returning
+ /// the address of the result.
+ LValue EmitLV(tree_node *exp);
+
+ /// getIndirectGotoBlockNumber - Return the unique ID of the specified basic
+ /// block for uses that take the address of it.
+ Constant *getIndirectGotoBlockNumber(BasicBlock *BB);
+
+ /// getIndirectGotoBlock - Get (and potentially lazily create) the indirect
+ /// goto block.
+ BasicBlock *getIndirectGotoBlock();
+
+ void TODO(tree_node *exp = 0);
+
+ /// CastToAnyType - Cast the specified value to the specified type regardless
+ /// of the types involved. This is an inferred cast.
+ Value *CastToAnyType (Value *V, bool VSigned, const Type* Ty, bool TySigned);
+
+ /// CastToUIntType - Cast the specified value to the specified type assuming
+ /// that V's type and Ty are integral types. This arbitrates between BitCast,
+ /// Trunc and ZExt.
+ Value *CastToUIntType(Value *V, const Type* Ty);
+
+ /// CastToSIntType - Cast the specified value to the specified type assuming
+ /// that V's type and Ty are integral types. This arbitrates between BitCast,
+ /// Trunc and SExt.
+ Value *CastToSIntType(Value *V, const Type* Ty);
+
+ /// CastToFPType - Cast the specified value to the specified type assuming
+ /// that V's type and Ty are floating point types. This arbitrates between
+ /// BitCast, FPTrunc and FPExt.
+ Value *CastToFPType(Value *V, const Type* Ty);
+
+ /// CreateTemporary - Create a new alloca instruction of the specified type,
+ /// inserting it into the entry block and returning it. The resulting
+ /// instruction's type is a pointer to the specified type.
+ AllocaInst *CreateTemporary(const Type *Ty);
+
+ /// CreateTempLoc - Like CreateTemporary, but returns a MemRef.
+ MemRef CreateTempLoc(const Type *Ty);
+
+ /// EmitAggregateCopy - Copy the elements from SrcLoc to DestLoc, using the
+ /// GCC type specified by GCCType to know which elements to copy.
+ void EmitAggregateCopy(MemRef DestLoc, MemRef SrcLoc, tree_node *GCCType);
+
+private: // Helper functions.
+
+ /// StartFunctionBody - Start the emission of 'fndecl', outputing all
+ /// declarations for parameters and setting things up.
+ void StartFunctionBody();
+
+ /// FinishFunctionBody - Once the body of the function has been emitted, this
+ /// cleans up and returns the result function.
+ Function *FinishFunctionBody();
+
+ /// PopulatePhiNodes - Populate generated phi nodes with their operands.
+ void PopulatePhiNodes();
+
+ /// getBasicBlock - Find or create the LLVM basic block corresponding to BB.
+ BasicBlock *getBasicBlock(basic_block bb);
+
+public:
+ /// getLabelDeclBlock - Lazily get and create a basic block for the specified
+ /// label.
+ BasicBlock *getLabelDeclBlock(tree_node *LabelDecl);
+
+private:
+ /// EmitSSA_NAME - Return the defining value of the given SSA_NAME.
+ /// Only creates code in the entry block.
+ Value *EmitSSA_NAME(tree_node *reg);
+
+ /// EmitGimpleInvariantAddress - The given address is constant in this
+ /// function. Return the corresponding LLVM value. Only creates code in
+ /// the entry block.
+ Value *EmitGimpleInvariantAddress(tree_node *reg);
+
+ /// EmitGimpleConstant - Convert the given global constant of register type to
+ /// an LLVM constant. Creates no code, only constants.
+ Constant *EmitGimpleConstant(tree_node *reg);
+
+ /// EmitGimpleMinInvariant - The given value is constant in this function.
+ /// Return the corresponding LLVM value. Only creates code in the entry block.
+ Value *EmitGimpleMinInvariant(tree_node *reg) {
+ if (TREE_CODE(reg) == ADDR_EXPR)
+ return EmitGimpleInvariantAddress(reg);
+ return EmitGimpleConstant(reg);
+ }
+
+ /// EmitGimpleReg - Convert the specified gimple register or local constant of
+ /// register type to an LLVM value. Only creates code in the entry block.
+ Value *EmitGimpleReg(tree_node *reg) {
+ if (TREE_CODE(reg) == SSA_NAME)
+ return EmitSSA_NAME(reg);
+ return EmitGimpleMinInvariant(reg);
+ }
+
+ /// Emit - Convert the specified tree node to LLVM code. If the node is an
+ /// expression that fits into an LLVM scalar value, the result is returned. If
+ /// the result is an aggregate, it is stored into the location specified by
+ /// DestLoc.
+ Value *Emit(tree_node *exp, const MemRef *DestLoc);
+
+ /// EmitBlock - Add the specified basic block to the end of the function. If
+ /// the previous block falls through into it, add an explicit branch.
+ void EmitBlock(BasicBlock *BB);
+
+ /// EmitAggregateZero - Zero the elements of DestLoc.
+ ///
+ void EmitAggregateZero(MemRef DestLoc, tree_node *GCCType);
+
+ /// EmitMemCpy/EmitMemMove/EmitMemSet - Emit an llvm.memcpy/llvm.memmove or
+ /// llvm.memset call with the specified operands. Returns DestPtr bitcast
+ /// to i8*.
+ Value *EmitMemCpy(Value *DestPtr, Value *SrcPtr, Value *Size, unsigned Align);
+ Value *EmitMemMove(Value *DestPtr, Value *SrcPtr, Value *Size, unsigned Align);
+ Value *EmitMemSet(Value *DestPtr, Value *SrcVal, Value *Size, unsigned Align);
+
+ /// EmitLandingPads - Emit EH landing pads.
+ void EmitLandingPads();
+
+ /// EmitPostPads - Emit EH post landing pads.
+ void EmitPostPads();
+
+ /// EmitUnwindBlock - Emit the lazily created EH unwind block.
+ void EmitUnwindBlock();
+
+private: // Helpers for exception handling.
+
+ /// CreateExceptionValues - Create values used internally by exception
+ /// handling.
+ void CreateExceptionValues();
+
+ /// getPostPad - Return the post landing pad for the given exception handling
+ /// region, creating it if necessary.
+ BasicBlock *getPostPad(unsigned RegionNo);
+
+private:
+
+ // Render* - Convert GIMPLE to LLVM.
+ void RenderGIMPLE_ASM(gimple stmt);
+ void RenderGIMPLE_ASSIGN(gimple stmt);
+ void RenderGIMPLE_CALL(gimple stmt);
+ void RenderGIMPLE_COND(gimple stmt);
+ void RenderGIMPLE_GOTO(gimple stmt);
+ void RenderGIMPLE_RESX(gimple stmt);
+ void RenderGIMPLE_RETURN(gimple stmt);
+ void RenderGIMPLE_SWITCH(gimple stmt);
+
+ // Render helpers.
+ void WriteScalarToLHS(tree lhs, Value *Scalar);
+
+private:
+ void EmitAutomaticVariableDecl(tree_node *decl);
+
+ /// isNoopCast - Return true if a cast from V to Ty does not change any bits.
+ ///
+ static bool isNoopCast(Value *V, const Type *Ty);
+
+ void HandleMultiplyDefinedGimpleTemporary(tree_node *var);
+
+ /// EmitAnnotateIntrinsic - Emits call to annotate attr intrinsic
+ void EmitAnnotateIntrinsic(Value *V, tree_node *decl);
+
+ /// EmitTypeGcroot - Emits call to make type a gcroot
+ void EmitTypeGcroot(Value *V, tree_node *decl);
+private:
+
+ // Emit* - These are delegates from Emit, and have the same parameter
+ // characteristics.
+
+ // Expressions.
+ Value *EmitGimpleAssignRHS(gimple stmt, const MemRef *DestLoc);
+ Value *EmitGimpleCallRHS(gimple stmt, const MemRef *DestLoc);
+ Value *EmitLoadOfLValue(tree_node *exp, const MemRef *DestLoc);
+ Value *EmitOBJ_TYPE_REF(tree_node *exp, const MemRef *DestLoc);
+ Value *EmitADDR_EXPR(tree_node *exp);
+ Value *EmitOBJ_TYPE_REF(tree_node *exp);
+ Value *EmitCallOf(Value *Callee, gimple stmt, const MemRef *DestLoc,
+ const AttrListPtr &PAL);
+ Value *EmitNOP_EXPR(tree_node *type, tree_node *op, const MemRef *DestLoc);
+ Value *EmitCONVERT_EXPR(tree_node *type, tree_node *op);
+ Value *EmitVIEW_CONVERT_EXPR(tree_node *exp, const MemRef *DestLoc);
+ Value *EmitNEGATE_EXPR(tree_node *op);
+ Value *EmitCONJ_EXPR(tree_node *op);
+ Value *EmitABS_EXPR(tree_node *op);
+ Value *EmitBIT_NOT_EXPR(tree_node *op);
+ Value *EmitTRUTH_NOT_EXPR(tree_node *type, tree_node *op);
+ Value *EmitCompare(tree_node *lhs, tree_node *rhs, tree_code code);
+ Value *EmitBinOp(tree_node *type, tree_code code, tree_node *op0,
+ tree_node *op1, unsigned Opc);
+ Value *EmitTruthOp(tree_node *type, tree_node *op0, tree_node *op1,
+ unsigned Opc);
+ Value *EmitShiftOp(tree_node *op0, tree_node* op1, unsigned Opc);
+ Value *EmitRotateOp(tree_node *type, tree_node *op0, tree_node *op1,
+ unsigned Opc1, unsigned Opc2);
+ Value *EmitMinMaxExpr(tree_node *type, tree_node *op0, tree_node* op1,
+ unsigned UIPred, unsigned SIPred, unsigned Opc,
+ bool isMax);
+ Value *EmitFLOOR_MOD_EXPR(tree_node *type, tree_node *op0, tree_node *op1);
+ Value *EmitCEIL_DIV_EXPR(tree_node *type, tree_node *op0, tree_node *op1);
+ Value *EmitFLOOR_DIV_EXPR(tree_node *type, tree_node *op0, tree_node *op1);
+ Value *EmitROUND_DIV_EXPR(tree_node *type, tree_node *op0, tree_node *op1);
+ Value *EmitFieldAnnotation(Value *FieldPtr, tree_node *FieldDecl);
+ Value *EmitPOINTER_PLUS_EXPR(tree_node *type, tree_node *op0, tree_node *op1);
+ Value *EmitXXXXPART_EXPR(tree_node *exp, unsigned Idx);
+ Value *EmitPAREN_EXPR(tree_node *exp);
+
+ // Exception Handling.
+ Value *EmitEXC_PTR_EXPR(tree_node *exp);
+ Value *EmitFILTER_EXPR(tree_node *exp);
+
+ // Inline Assembly and Register Variables.
+ Value *EmitReadOfRegisterVariable(tree_node *vardecl, const MemRef *DestLoc);
+ void EmitModifyOfRegisterVariable(tree_node *vardecl, Value *RHS);
+
+ // Helpers for Builtin Function Expansion.
+ void EmitMemoryBarrier(bool ll, bool ls, bool sl, bool ss);
+ Value *BuildVector(const std::vector<Value*> &Elts);
+ Value *BuildVector(Value *Elt, ...);
+ Value *BuildVectorShuffle(Value *InVec1, Value *InVec2, ...);
+ Value *BuildBinaryAtomicBuiltin(gimple stmt, Intrinsic::ID id);
+ Value *BuildCmpAndSwapAtomicBuiltin(gimple stmt, tree_node *type,
+ bool isBool);
+
+ // Builtin Function Expansion.
+ bool EmitBuiltinCall(gimple stmt, tree_node *fndecl,
+ const MemRef *DestLoc, Value *&Result);
+ bool EmitFrontendExpandedBuiltinCall(gimple stmt, tree_node *fndecl,
+ const MemRef *DestLoc, Value *&Result);
+ bool EmitBuiltinUnaryOp(Value *InVal, Value *&Result, Intrinsic::ID Id);
+ Value *EmitBuiltinSQRT(gimple stmt);
+ Value *EmitBuiltinPOWI(gimple stmt);
+ Value *EmitBuiltinPOW(gimple stmt);
+
+ bool EmitBuiltinConstantP(gimple stmt, Value *&Result);
+ bool EmitBuiltinAlloca(gimple stmt, Value *&Result);
+ bool EmitBuiltinExpect(gimple stmt, const MemRef *DestLoc, Value *&Result);
+ bool EmitBuiltinExtendPointer(gimple stmt, Value *&Result);
+ bool EmitBuiltinVAStart(gimple stmt);
+ bool EmitBuiltinVAEnd(gimple stmt);
+ bool EmitBuiltinVACopy(gimple stmt);
+ bool EmitBuiltinMemCopy(gimple stmt, Value *&Result,
+ bool isMemMove, bool SizeCheck);
+ bool EmitBuiltinMemSet(gimple stmt, Value *&Result, bool SizeCheck);
+ bool EmitBuiltinBZero(gimple stmt, Value *&Result);
+ bool EmitBuiltinPrefetch(gimple stmt);
+ bool EmitBuiltinReturnAddr(gimple stmt, Value *&Result, bool isFrame);
+ bool EmitBuiltinExtractReturnAddr(gimple stmt, Value *&Result);
+ bool EmitBuiltinFrobReturnAddr(gimple stmt, Value *&Result);
+ bool EmitBuiltinStackSave(gimple stmt, Value *&Result);
+ bool EmitBuiltinStackRestore(gimple stmt);
+ bool EmitBuiltinDwarfCFA(gimple stmt, Value *&Result);
+ bool EmitBuiltinDwarfSPColumn(gimple stmt, Value *&Result);
+ bool EmitBuiltinEHReturnDataRegno(gimple stmt, Value *&Result);
+ bool EmitBuiltinEHReturn(gimple stmt, Value *&Result);
+ bool EmitBuiltinInitDwarfRegSizes(gimple stmt, Value *&Result);
+ bool EmitBuiltinUnwindInit(gimple stmt, Value *&Result);
+ bool EmitBuiltinInitTrampoline(gimple stmt, Value *&Result);
+
+ // Complex Math Expressions.
+ Value *CreateComplex(Value *Real, Value *Imag);
+ void SplitComplex(Value *Complex, Value *&Real, Value *&Imag);
+ Value *EmitCOMPLEX_EXPR(tree op0, tree op1);
+ Value *EmitComplexBinOp(tree_node *type, tree_code code, tree_node *op0,
+ tree_node *op1);
+
+ // L-Value Expressions.
+ LValue EmitLV_ARRAY_REF(tree_node *exp);
+ LValue EmitLV_BIT_FIELD_REF(tree_node *exp);
+ LValue EmitLV_COMPONENT_REF(tree_node *exp);
+ LValue EmitLV_DECL(tree_node *exp);
+ LValue EmitLV_EXC_PTR_EXPR(tree_node *exp);
+ LValue EmitLV_FILTER_EXPR(tree_node *exp);
+ LValue EmitLV_INDIRECT_REF(tree_node *exp);
+ LValue EmitLV_VIEW_CONVERT_EXPR(tree_node *exp);
+ LValue EmitLV_WITH_SIZE_EXPR(tree_node *exp);
+ LValue EmitLV_XXXXPART_EXPR(tree_node *exp, unsigned Idx);
+ LValue EmitLV_SSA_NAME(tree_node *exp);
+
+ // Constant Expressions.
+ Value *EmitINTEGER_CST(tree_node *exp);
+ Value *EmitREAL_CST(tree_node *exp);
+ Value *EmitCONSTRUCTOR(tree_node *exp, const MemRef *DestLoc);
+
+ // Optional target defined builtin intrinsic expanding function.
+ bool TargetIntrinsicLower(gimple stmt,
+ unsigned FnCode,
+ const MemRef *DestLoc,
+ Value *&Result,
+ const Type *ResultType,
+ std::vector<Value*> &Ops);
+};
+
+/// TreeConstantToLLVM - An instance of this class is created and used to
+/// convert tree constant values to LLVM. This is primarily for things like
+/// global variable initializers.
+///
+class TreeConstantToLLVM {
+public:
+ // Constant Expressions
+ static Constant *Convert(tree_node *exp);
+ static Constant *ConvertINTEGER_CST(tree_node *exp);
+ static Constant *ConvertREAL_CST(tree_node *exp);
+ static Constant *ConvertVECTOR_CST(tree_node *exp);
+ static Constant *ConvertSTRING_CST(tree_node *exp);
+ static Constant *ConvertCOMPLEX_CST(tree_node *exp);
+ static Constant *ConvertNOP_EXPR(tree_node *exp);
+ static Constant *ConvertCONVERT_EXPR(tree_node *exp);
+ static Constant *ConvertBinOp_CST(tree_node *exp);
+ static Constant *ConvertCONSTRUCTOR(tree_node *exp);
+ static Constant *ConvertArrayCONSTRUCTOR(tree_node *exp);
+ static Constant *ConvertRecordCONSTRUCTOR(tree_node *exp);
+ static Constant *ConvertUnionCONSTRUCTOR(tree_node *exp);
+ static Constant *ConvertPOINTER_PLUS_EXPR(tree_node *exp);
+
+ // Constant Expression l-values.
+ static Constant *EmitLV(tree_node *exp);
+ static Constant *EmitLV_Decl(tree_node *exp);
+ static Constant *EmitLV_LABEL_DECL(tree_node *exp);
+ static Constant *EmitLV_COMPLEX_CST(tree_node *exp);
+ static Constant *EmitLV_STRING_CST(tree_node *exp);
+ static Constant *EmitLV_COMPONENT_REF(tree_node *exp);
+ static Constant *EmitLV_ARRAY_REF(tree_node *exp);
+
+};
+
+#endif /* LLVM_INTERNAL_H */
diff --git a/dragonegg/llvm-types.cpp b/dragonegg/llvm-types.cpp
new file mode 100644
index 00000000000..4b097b9a5e4
--- /dev/null
+++ b/dragonegg/llvm-types.cpp
@@ -0,0 +1,2210 @@
+/* Tree type to LLVM type converter
+Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
+Contributed by Chris Lattner (sabre@nondot.org)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
+
+//===----------------------------------------------------------------------===//
+// This is the code that converts GCC tree types into LLVM types.
+//===----------------------------------------------------------------------===//
+
+// LLVM headers
+#include "llvm/CallingConv.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Module.h"
+#include "llvm/TypeSymbolTable.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Assembly/Writer.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
+
+// System headers
+#include <gmp.h>
+#include <map>
+
+// GCC headers
+#undef VISIBILITY_HIDDEN
+
+extern "C" {
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+}
+
+// Plugin headers
+#include "llvm-abi.h"
+extern "C" {
+#include "llvm-cache.h"
+}
+#include "bits_and_bobs.h"
+
+static LLVMContext &Context = getGlobalContext();
+
+//===----------------------------------------------------------------------===//
+// Matching LLVM types with GCC trees
+//===----------------------------------------------------------------------===//
+
+// GET_TYPE_LLVM/SET_TYPE_LLVM - Associate an LLVM type with each TREE type.
+// These are lazily computed by ConvertType.
+
+const Type *llvm_set_type(tree Tr, const Type *Ty) {
+ assert(TYPE_P(Tr) && "Expected a gcc type!");
+ return (const Type *)llvm_set_cached(Tr, Ty);
+}
+
+#define SET_TYPE_LLVM(NODE, TYPE) llvm_set_type(NODE, TYPE)
+
+const Type *llvm_get_type(tree Tr) {
+ assert(TYPE_P(Tr) && "Expected a gcc type!");
+ return (const Type *)llvm_get_cached(Tr);
+}
+
+#define GET_TYPE_LLVM(NODE) llvm_get_type(NODE)
+
+//TODO// Read LLVM Types string table
+//TODOvoid readLLVMTypesStringTable() {
+//TODO
+//TODO GlobalValue *V = TheModule->getNamedGlobal("llvm.pch.types");
+//TODO if (!V)
+//TODO return;
+//TODO
+//TODO // Value *GV = TheModule->getValueSymbolTable().lookup("llvm.pch.types");
+//TODO GlobalVariable *GV = cast<GlobalVariable>(V);
+//TODO ConstantStruct *LTypesNames = cast<ConstantStruct>(GV->getOperand(0));
+//TODO
+//TODO for (unsigned i = 0; i < LTypesNames->getNumOperands(); ++i) {
+//TODO const Type *Ty = NULL;
+//TODO
+//TODO if (ConstantArray *CA =
+//TODO dyn_cast<ConstantArray>(LTypesNames->getOperand(i))) {
+//TODO std::string Str = CA->getAsString();
+//TODO Ty = TheModule->getTypeByName(Str);
+//TODO assert (Ty != NULL && "Invalid Type in LTypes string table");
+//TODO }
+//TODO // If V is not a string then it is empty. Insert NULL to represent
+//TODO // empty entries.
+//TODO LTypes.push_back(Ty);
+//TODO }
+//TODO
+//TODO // Now, llvm.pch.types value is not required so remove it from the symbol
+//TODO // table.
+//TODO GV->eraseFromParent();
+//TODO}
+//TODO
+//TODO
+//TODO// GCC tree's uses LTypes vector's index to reach LLVM types.
+//TODO// Create a string table to hold these LLVM types' names. This string
+//TODO// table will be used to recreate LTypes vector after loading PCH.
+//TODOvoid writeLLVMTypesStringTable() {
+//TODO
+//TODO if (LTypes.empty())
+//TODO return;
+//TODO
+//TODO std::vector<Constant *> LTypesNames;
+//TODO std::map < const Type *, std::string > TypeNameMap;
+//TODO
+//TODO // Collect Type Names in advance.
+//TODO const TypeSymbolTable &ST = TheModule->getTypeSymbolTable();
+//TODO TypeSymbolTable::const_iterator TI = ST.begin();
+//TODO for (; TI != ST.end(); ++TI) {
+//TODO TypeNameMap[TI->second] = TI->first;
+//TODO }
+//TODO
+//TODO // Populate LTypesNames vector.
+//TODO for (std::vector<const Type *>::iterator I = LTypes.begin(),
+//TODO E = LTypes.end(); I != E; ++I) {
+//TODO const Type *Ty = *I;
+//TODO
+//TODO // Give names to nameless types.
+//TODO if (Ty && TypeNameMap[Ty].empty()) {
+//TODO std::string NewName =
+//TODO TheModule->getTypeSymbolTable().getUniqueName("llvm.fe.ty");
+//TODO TheModule->addTypeName(NewName, Ty);
+//TODO TypeNameMap[*I] = NewName;
+//TODO }
+//TODO
+//TODO const std::string &TypeName = TypeNameMap[*I];
+//TODO LTypesNames.push_back(ConstantArray::get(Context, TypeName, false));
+//TODO }
+//TODO
+//TODO // Create string table.
+//TODO Constant *LTypesNameTable = ConstantStruct::get(Context, LTypesNames, false);
+//TODO
+//TODO // Create variable to hold this string table.
+//TODO GlobalVariable *GV = new GlobalVariable(*TheModule,
+//TODO LTypesNameTable->getType(), true,
+//TODO GlobalValue::ExternalLinkage,
+//TODO LTypesNameTable,
+//TODO "llvm.pch.types");
+//TODO}
+
+//===----------------------------------------------------------------------===//
+// Recursive Type Handling Code and Data
+//===----------------------------------------------------------------------===//
+
+// Recursive types are a major pain to handle for a couple of reasons. Because
+// of this, when we start parsing a struct or a union, we globally change how
+// POINTER_TYPE and REFERENCE_TYPE are handled. In particular, instead of
+// actually recursing and computing the type they point to, they will return an
+// opaque*, and remember that they did this in PointersToReresolve.
+
+
+/// GetFunctionType - This is just a helper like FunctionType::get but that
+/// takes PATypeHolders.
+static FunctionType *GetFunctionType(const PATypeHolder &Res,
+ std::vector<PATypeHolder> &ArgTys,
+ bool isVarArg) {
+ std::vector<const Type*> ArgTysP;
+ ArgTysP.reserve(ArgTys.size());
+ for (unsigned i = 0, e = ArgTys.size(); i != e; ++i)
+ ArgTysP.push_back(ArgTys[i]);
+
+ return FunctionType::get(Res, ArgTysP, isVarArg);
+}
+
+//===----------------------------------------------------------------------===//
+// Type Conversion Utilities
+//===----------------------------------------------------------------------===//
+
+// isPassedByInvisibleReference - Return true if an argument of the specified
+// type should be passed in by invisible reference.
+//
+bool isPassedByInvisibleReference(tree Type) {
+ // Don't crash in this case.
+ if (Type == error_mark_node)
+ return false;
+
+ // FIXME: Search for TREE_ADDRESSABLE in calls.c, and see if there are other
+ // cases that make arguments automatically passed in by reference.
+ return TREE_ADDRESSABLE(Type) || TYPE_SIZE(Type) == 0 ||
+ TREE_CODE(TYPE_SIZE(Type)) != INTEGER_CST;
+}
+
+/// NameType - Try to name the given type after the given GCC tree node. If
+/// the GCC tree node has no sensible name then it does nothing.
+static void NameType(const Type *Ty, tree t, Twine Prefix = Twine(),
+ Twine Postfix = Twine()) {
+ // No sensible name - give up, discarding any pre- and post-fixes.
+ if (!t)
+ return;
+
+ switch (TREE_CODE(t)) {
+ default:
+ // Unhandled case - give up.
+ return;
+
+ case ARRAY_TYPE:
+ // If the element type is E, name the array E[] (regardless of the number
+ // of dimensions).
+ for (; TREE_CODE(t) == ARRAY_TYPE; t = TREE_TYPE(t)) ;
+ NameType(Ty, t, Prefix, "[]" + Postfix);
+ return;
+
+ case BOOLEAN_TYPE:
+ case COMPLEX_TYPE:
+ case ENUMERAL_TYPE:
+ case FIXED_POINT_TYPE:
+ case FUNCTION_TYPE:
+ case INTEGER_TYPE:
+ case METHOD_TYPE:
+ case QUAL_UNION_TYPE:
+ case REAL_TYPE:
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case VECTOR_TYPE: {
+ // If the type has a name then use that, otherwise bail out.
+ if (!TYPE_NAME(t))
+ return; // Unnamed type.
+
+ tree identifier = NULL_TREE;
+ if (TREE_CODE(TYPE_NAME(t)) == IDENTIFIER_NODE)
+ identifier = TYPE_NAME(t);
+ else if (TREE_CODE(TYPE_NAME(t)) == TYPE_DECL)
+ identifier = DECL_NAME(TYPE_NAME(t));
+
+ if (identifier) {
+ const char *Class = "";
+ if (TREE_CODE(t) == ENUMERAL_TYPE)
+ Class = "enum ";
+ if (TREE_CODE(t) == RECORD_TYPE)
+ Class = "struct ";
+ else if (TREE_CODE(t) == UNION_TYPE)
+ Class = "union ";
+ StringRef Ident(IDENTIFIER_POINTER(identifier),
+ IDENTIFIER_LENGTH(identifier));
+ TheModule->addTypeName((Prefix + Class + Ident + Postfix).str(), Ty);
+ }
+ return;
+ }
+
+ case POINTER_TYPE:
+ // If the element type is E, LLVM already calls this E*.
+ return;
+
+ case REFERENCE_TYPE:
+ // If the element type is E, name the reference E&.
+ NameType(Ty, TREE_TYPE(t), Prefix, "&" + Postfix);
+ return;
+ }
+}
+
+/// isSequentialCompatible - Return true if the specified gcc array or pointer
+/// type and the corresponding LLVM SequentialType lay out their components
+/// identically in memory, so doing a GEP accesses the right memory location.
+/// We assume that objects without a known size do not.
+bool isSequentialCompatible(tree_node *type) {
+ assert((TREE_CODE(type) == ARRAY_TYPE ||
+ TREE_CODE(type) == POINTER_TYPE ||
+ TREE_CODE(type) == REFERENCE_TYPE) && "not a sequential type!");
+ // This relies on gcc types with constant size mapping to LLVM types with the
+ // same size. It is possible for the component type not to have a size:
+ // struct foo; extern foo bar[];
+ return TYPE_SIZE(TREE_TYPE(type)) &&
+ isInt64(TYPE_SIZE(TREE_TYPE(type)), true);
+}
+
+/// isBitfield - Returns whether to treat the specified field as a bitfield.
+bool isBitfield(tree_node *field_decl) {
+ tree type = DECL_BIT_FIELD_TYPE(field_decl);
+ if (!type)
+ return false;
+
+ // A bitfield. But do we need to treat it as one?
+
+ assert(DECL_FIELD_BIT_OFFSET(field_decl) && "Bitfield with no bit offset!");
+ if (TREE_INT_CST_LOW(DECL_FIELD_BIT_OFFSET(field_decl)) & 7)
+ // Does not start on a byte boundary - must treat as a bitfield.
+ return true;
+
+ if (!TYPE_SIZE(type) || !isInt64(TYPE_SIZE (type), true))
+ // No size or variable sized - play safe, treat as a bitfield.
+ return true;
+
+ uint64_t TypeSizeInBits = getInt64(TYPE_SIZE (type), true);
+ assert(!(TypeSizeInBits & 7) && "A type with a non-byte size!");
+
+ assert(DECL_SIZE(field_decl) && "Bitfield with no bit size!");
+ uint64_t FieldSizeInBits = getInt64(DECL_SIZE(field_decl), true);
+ if (FieldSizeInBits < TypeSizeInBits)
+ // Not wide enough to hold the entire type - treat as a bitfield.
+ return true;
+
+ return false;
+}
+
+/// getDeclaredType - Get the declared type for the specified field_decl, and
+/// not the shrunk-to-fit type that GCC gives us in TREE_TYPE.
+tree getDeclaredType(tree_node *field_decl) {
+ return DECL_BIT_FIELD_TYPE(field_decl) ?
+ DECL_BIT_FIELD_TYPE(field_decl) : TREE_TYPE (field_decl);
+}
+
+/// refine_type_to - Cause all users of the opaque type old_type to switch
+/// to the more concrete type new_type.
+void refine_type_to(tree old_type, tree new_type)
+{
+ const OpaqueType *OldTy = cast_or_null<OpaqueType>(GET_TYPE_LLVM(old_type));
+ if (OldTy) {
+ const Type *NewTy = ConvertType (new_type);
+ const_cast<OpaqueType*>(OldTy)->refineAbstractTypeTo(NewTy);
+ }
+}
+
+
+//===----------------------------------------------------------------------===//
+// Abstract Type Refinement Helpers
+//===----------------------------------------------------------------------===//
+//
+// This code is built to make sure that the TYPE_LLVM field on tree types are
+// updated when LLVM types are refined. This prevents dangling pointers from
+// occuring due to type coallescing.
+//
+namespace {
+ class TypeRefinementDatabase : public AbstractTypeUser {
+ virtual void refineAbstractType(const DerivedType *OldTy,
+ const Type *NewTy);
+ virtual void typeBecameConcrete(const DerivedType *AbsTy);
+
+ // TypeUsers - For each abstract LLVM type, we keep track of all of the GCC
+ // types that point to it.
+ std::map<const Type*, std::vector<tree> > TypeUsers;
+ public:
+ /// setType - call SET_TYPE_LLVM(type, Ty), associating the type with the
+ /// specified tree type. In addition, if the LLVM type is an abstract type,
+ /// we add it to our data structure to track it.
+ inline const Type *setType(tree type, const Type *Ty) {
+ if (GET_TYPE_LLVM(type))
+ RemoveTypeFromTable(type);
+
+ if (Ty->isAbstract()) {
+ std::vector<tree> &Users = TypeUsers[Ty];
+ if (Users.empty()) Ty->addAbstractTypeUser(this);
+ Users.push_back(type);
+ }
+ return SET_TYPE_LLVM(type, Ty);
+ }
+
+ void RemoveTypeFromTable(tree type);
+ void dump() const;
+ };
+
+ /// TypeDB - The main global type database.
+ TypeRefinementDatabase TypeDB;
+}
+
+/// RemoveTypeFromTable - We're about to change the LLVM type of 'type'
+///
+void TypeRefinementDatabase::RemoveTypeFromTable(tree type) {
+ const Type *Ty = GET_TYPE_LLVM(type);
+ if (!Ty->isAbstract()) return;
+ std::map<const Type*, std::vector<tree> >::iterator I = TypeUsers.find(Ty);
+ assert(I != TypeUsers.end() && "Using an abstract type but not in table?");
+
+ bool FoundIt = false;
+ for (unsigned i = 0, e = I->second.size(); i != e; ++i)
+ if (I->second[i] == type) {
+ FoundIt = true;
+ std::swap(I->second[i], I->second.back());
+ I->second.pop_back();
+ break;
+ }
+ assert(FoundIt && "Using an abstract type but not in table?");
+
+ // If the type plane is now empty, nuke it.
+ if (I->second.empty()) {
+ TypeUsers.erase(I);
+ Ty->removeAbstractTypeUser(this);
+ }
+}
+
+/// refineAbstractType - The callback method invoked when an abstract type is
+/// resolved to another type. An object must override this method to update
+/// its internal state to reference NewType instead of OldType.
+///
+void TypeRefinementDatabase::refineAbstractType(const DerivedType *OldTy,
+ const Type *NewTy) {
+ if (OldTy == NewTy && OldTy->isAbstract()) return; // Nothing to do.
+
+ std::map<const Type*, std::vector<tree> >::iterator I = TypeUsers.find(OldTy);
+ assert(I != TypeUsers.end() && "Using an abstract type but not in table?");
+
+ if (!NewTy->isAbstract()) {
+ // If the type became concrete, update everything pointing to it, and remove
+ // all of our entries from the map.
+ if (OldTy != NewTy)
+ for (unsigned i = 0, e = I->second.size(); i != e; ++i)
+ SET_TYPE_LLVM(I->second[i], NewTy);
+ } else {
+ // Otherwise, it was refined to another instance of an abstract type. Move
+ // everything over and stop monitoring OldTy.
+ std::vector<tree> &NewSlot = TypeUsers[NewTy];
+ if (NewSlot.empty()) NewTy->addAbstractTypeUser(this);
+
+ for (unsigned i = 0, e = I->second.size(); i != e; ++i) {
+ NewSlot.push_back(I->second[i]);
+ SET_TYPE_LLVM(I->second[i], NewTy);
+ }
+ }
+
+ TypeUsers.erase(I);
+
+ // Next, remove OldTy's entry in the TargetData object if it has one.
+ if (const StructType *STy = dyn_cast<StructType>(OldTy))
+ getTargetData().InvalidateStructLayoutInfo(STy);
+
+ OldTy->removeAbstractTypeUser(this);
+}
+
+/// The other case which AbstractTypeUsers must be aware of is when a type
+/// makes the transition from being abstract (where it has clients on it's
+/// AbstractTypeUsers list) to concrete (where it does not). This method
+/// notifies ATU's when this occurs for a type.
+///
+void TypeRefinementDatabase::typeBecameConcrete(const DerivedType *AbsTy) {
+ assert(TypeUsers.count(AbsTy) && "Not using this type!");
+ // Remove the type from our collection of tracked types.
+ TypeUsers.erase(AbsTy);
+ AbsTy->removeAbstractTypeUser(this);
+}
+void TypeRefinementDatabase::dump() const {
+ outs() << "TypeRefinementDatabase\n";
+ outs().flush();
+}
+
+//===----------------------------------------------------------------------===//
+// Helper Routines
+//===----------------------------------------------------------------------===//
+
+/// FindLLVMTypePadding - If the specified struct has any inter-element padding,
+/// add it to the Padding array.
+static void FindLLVMTypePadding(const Type *Ty, tree type, uint64_t BitOffset,
+ SmallVector<std::pair<uint64_t,uint64_t>, 16> &Padding) {
+ if (const StructType *STy = dyn_cast<StructType>(Ty)) {
+ const TargetData &TD = getTargetData();
+ const StructLayout *SL = TD.getStructLayout(STy);
+ uint64_t PrevFieldEnd = 0;
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ // If this field is marked as being padding, then pretend it is not there.
+ // This results in it (or something bigger) being added to Padding. This
+ // matches the logic in CopyAggregate.
+ if (type && isPaddingElement(type, i))
+ continue;
+
+ uint64_t FieldBitOffset = SL->getElementOffset(i)*8;
+
+ // Get padding of sub-elements.
+ FindLLVMTypePadding(STy->getElementType(i), 0,
+ BitOffset+FieldBitOffset, Padding);
+ // Check to see if there is any padding between this element and the
+ // previous one.
+ if (PrevFieldEnd < FieldBitOffset)
+ Padding.push_back(std::make_pair(PrevFieldEnd+BitOffset,
+ FieldBitOffset-PrevFieldEnd));
+ PrevFieldEnd =
+ FieldBitOffset + TD.getTypeSizeInBits(STy->getElementType(i));
+ }
+
+ // Check for tail padding.
+ if (PrevFieldEnd < SL->getSizeInBits())
+ Padding.push_back(std::make_pair(PrevFieldEnd,
+ SL->getSizeInBits()-PrevFieldEnd));
+ } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
+ uint64_t EltSize = getTargetData().getTypeSizeInBits(ATy->getElementType());
+ for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
+ FindLLVMTypePadding(ATy->getElementType(), 0, BitOffset+i*EltSize,
+ Padding);
+ }
+
+ // primitive and vector types have no padding.
+}
+
+/// GCCTypeOverlapsWithPadding - Return true if the specified gcc type overlaps
+/// with the specified region of padding. This only needs to handle types with
+/// a constant size.
+static bool GCCTypeOverlapsWithPadding(tree type, int PadStartBits,
+ int PadSizeBits) {
+ assert(type != error_mark_node);
+ // LLVM doesn't care about variants such as const, volatile, or restrict.
+ type = TYPE_MAIN_VARIANT(type);
+
+ // If the type does not overlap, don't bother checking below.
+
+ if (!TYPE_SIZE(type))
+ // C-style variable length array? Be conservative.
+ return true;
+
+ if (!isInt64(TYPE_SIZE(type), true))
+ // Negative size (!) or huge - be conservative.
+ return true;
+
+ if (!getInt64(TYPE_SIZE(type), true) ||
+ PadStartBits >= (int64_t)getInt64(TYPE_SIZE(type), false) ||
+ PadStartBits+PadSizeBits <= 0)
+ return false;
+
+
+ switch (TREE_CODE(type)) {
+ default:
+ fprintf(stderr, "Unknown type to compare:\n");
+ debug_tree(type);
+ abort();
+ case VOID_TYPE:
+ case BOOLEAN_TYPE:
+ case ENUMERAL_TYPE:
+ case INTEGER_TYPE:
+ case REAL_TYPE:
+ case COMPLEX_TYPE:
+ case VECTOR_TYPE:
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ case OFFSET_TYPE:
+ // These types have no holes.
+ return true;
+
+ case ARRAY_TYPE: {
+ unsigned EltSizeBits = TREE_INT_CST_LOW(TYPE_SIZE(TREE_TYPE(type)));
+ unsigned NumElts = cast<ArrayType>(ConvertType(type))->getNumElements();
+
+ // Check each element for overlap. This is inelegant, but effective.
+ for (unsigned i = 0; i != NumElts; ++i)
+ if (GCCTypeOverlapsWithPadding(TREE_TYPE(type),
+ PadStartBits- i*EltSizeBits, PadSizeBits))
+ return true;
+ return false;
+ }
+ case QUAL_UNION_TYPE:
+ case UNION_TYPE: {
+ // If this is a union with the transparent_union attribute set, it is
+ // treated as if it were just the same as its first type.
+ if (TYPE_TRANSPARENT_UNION(type)) {
+ tree Field = TYPE_FIELDS(type);
+ assert(Field && "Transparent union must have some elements!");
+ while (TREE_CODE(Field) != FIELD_DECL) {
+ Field = TREE_CHAIN(Field);
+ assert(Field && "Transparent union must have some elements!");
+ }
+ return GCCTypeOverlapsWithPadding(TREE_TYPE(Field),
+ PadStartBits, PadSizeBits);
+ }
+
+ // See if any elements overlap.
+ for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field)) {
+ if (TREE_CODE(Field) != FIELD_DECL) continue;
+ assert(getFieldOffsetInBits(Field) == 0 && "Union with non-zero offset?");
+ // Skip fields that are known not to be present.
+ if (TREE_CODE(type) == QUAL_UNION_TYPE &&
+ integer_zerop(DECL_QUALIFIER(Field)))
+ continue;
+
+ if (GCCTypeOverlapsWithPadding(TREE_TYPE(Field),
+ PadStartBits, PadSizeBits))
+ return true;
+
+ // Skip remaining fields if this one is known to be present.
+ if (TREE_CODE(type) == QUAL_UNION_TYPE &&
+ integer_onep(DECL_QUALIFIER(Field)))
+ break;
+ }
+
+ return false;
+ }
+
+ case RECORD_TYPE:
+ for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field)) {
+ if (TREE_CODE(Field) != FIELD_DECL) continue;
+
+ if (!DECL_FIELD_OFFSET(Field))
+ return true;
+
+ uint64_t FieldBitOffset = getFieldOffsetInBits(Field);
+ if (GCCTypeOverlapsWithPadding(getDeclaredType(Field),
+ PadStartBits-FieldBitOffset, PadSizeBits))
+ return true;
+ }
+ return false;
+ }
+}
+
+/// GetFieldIndex - Returns the index of the LLVM field corresponding to
+/// this FIELD_DECL, or ~0U if the type the field belongs to has not yet
+/// been converted.
+unsigned int TypeConverter::GetFieldIndex(tree_node *field_decl) {
+ assert(TREE_CODE(field_decl) == FIELD_DECL && "Not a FIELD_DECL!");
+ std::map<tree, unsigned int>::iterator I = FieldIndexMap.find(field_decl);
+ if (I != FieldIndexMap.end()) {
+ return I->second;
+ } else {
+ assert(false && "Type not laid out for LLVM?");
+ return ~0U;
+ }
+}
+
+/// SetFieldIndex - Set the index of the LLVM field corresponding to
+/// this FIELD_DECL.
+void TypeConverter::SetFieldIndex(tree_node *field_decl, unsigned int Index) {
+ assert(TREE_CODE(field_decl) == FIELD_DECL && "Not a FIELD_DECL!");
+ FieldIndexMap[field_decl] = Index;
+}
+
+bool TypeConverter::GCCTypeOverlapsWithLLVMTypePadding(tree type,
+ const Type *Ty) {
+
+ // Start by finding all of the padding in the LLVM Type.
+ SmallVector<std::pair<uint64_t,uint64_t>, 16> StructPadding;
+ FindLLVMTypePadding(Ty, type, 0, StructPadding);
+
+ for (unsigned i = 0, e = StructPadding.size(); i != e; ++i)
+ if (GCCTypeOverlapsWithPadding(type, StructPadding[i].first,
+ StructPadding[i].second))
+ return true;
+ return false;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Main Type Conversion Routines
+//===----------------------------------------------------------------------===//
+
+const Type *TypeConverter::ConvertType(tree orig_type) {
+ if (orig_type == error_mark_node) return Type::getInt32Ty(Context);
+
+ // LLVM doesn't care about variants such as const, volatile, or restrict.
+ tree type = TYPE_MAIN_VARIANT(orig_type);
+ const Type *Ty;
+
+ switch (TREE_CODE(type)) {
+ default:
+ debug_tree(type);
+ llvm_unreachable("Unknown type to convert!");
+
+ case VOID_TYPE:
+ Ty = SET_TYPE_LLVM(type, Type::getVoidTy(Context));
+ break;
+
+ case RECORD_TYPE:
+ Ty = ConvertRECORD(type, orig_type);
+ break;
+
+ case QUAL_UNION_TYPE:
+ case UNION_TYPE:
+ Ty = ConvertUNION(type, orig_type);
+ break;
+
+ case BOOLEAN_TYPE: {
+ if ((Ty = GET_TYPE_LLVM(type)))
+ return Ty;
+ Ty = SET_TYPE_LLVM(type, IntegerType::get(Context, TYPE_PRECISION(type)));
+ break;
+ }
+
+ case ENUMERAL_TYPE:
+ // Use of an enum that is implicitly declared?
+ if (TYPE_SIZE(orig_type) == 0) {
+ // If we already compiled this type, use the old type.
+ if ((Ty = GET_TYPE_LLVM(orig_type)))
+ return Ty;
+
+ Ty = OpaqueType::get(Context);
+ Ty = TypeDB.setType(orig_type, Ty);
+ break;
+ }
+ // FALL THROUGH.
+ type = orig_type;
+ case INTEGER_TYPE: {
+ if ((Ty = GET_TYPE_LLVM(type))) return Ty;
+ // The ARM port defines __builtin_neon_xi as a 511-bit type because GCC's
+ // type precision field has only 9 bits. Treat this as a special case.
+ int precision = TYPE_PRECISION(type) == 511 ? 512 : TYPE_PRECISION(type);
+ Ty = SET_TYPE_LLVM(type, IntegerType::get(Context, precision));
+ break;
+ }
+
+ case REAL_TYPE:
+ if ((Ty = GET_TYPE_LLVM(type))) return Ty;
+ switch (TYPE_PRECISION(type)) {
+ default:
+ debug_tree(type);
+ llvm_unreachable("Unknown FP type!");
+ case 32: Ty = SET_TYPE_LLVM(type, Type::getFloatTy(Context)); break;
+ case 64: Ty = SET_TYPE_LLVM(type, Type::getDoubleTy(Context)); break;
+ case 80: Ty = SET_TYPE_LLVM(type, Type::getX86_FP80Ty(Context)); break;
+ case 128:
+#ifdef TARGET_POWERPC
+ Ty = SET_TYPE_LLVM(type, Type::getPPC_FP128Ty(Context));
+#elif defined(TARGET_ZARCH) || defined(TARGET_CPU_sparc) // FIXME: Use some generic define.
+ // This is for IEEE double extended, e.g. Sparc
+ Ty = SET_TYPE_LLVM(type, Type::getFP128Ty(Context));
+#else
+ // 128-bit long doubles map onto { double, double }.
+ Ty = SET_TYPE_LLVM(type,
+ StructType::get(Context, Type::getDoubleTy(Context),
+ Type::getDoubleTy(Context), NULL));
+#endif
+ break;
+ }
+ break;
+
+ case COMPLEX_TYPE: {
+ if ((Ty = GET_TYPE_LLVM(type))) return Ty;
+ Ty = ConvertType(TREE_TYPE(type));
+ assert(!Ty->isAbstract() && "should use TypeDB.setType()");
+ Ty = StructType::get(Context, Ty, Ty, NULL);
+ Ty = SET_TYPE_LLVM(type, Ty);
+ break;
+ }
+
+ case VECTOR_TYPE: {
+ if ((Ty = GET_TYPE_LLVM(type))) return Ty;
+ Ty = ConvertType(TREE_TYPE(type));
+ assert(!Ty->isAbstract() && "should use TypeDB.setType()");
+ Ty = VectorType::get(Ty, TYPE_VECTOR_SUBPARTS(type));
+ Ty = SET_TYPE_LLVM(type, Ty);
+ break;
+ }
+
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ if (const PointerType *PTy = cast_or_null<PointerType>(GET_TYPE_LLVM(type))){
+ // We already converted this type. If this isn't a case where we have to
+ // reparse it, just return it.
+ if (PointersToReresolve.empty() || PointersToReresolve.back() != type ||
+ ConvertingStruct)
+ return PTy;
+
+ // Okay, we know that we're !ConvertingStruct and that type is on the end
+ // of the vector. Remove this entry from the PointersToReresolve list and
+ // get the pointee type. Note that this order is important in case the
+ // pointee type uses this pointer.
+ assert(isa<OpaqueType>(PTy->getElementType()) && "Not a deferred ref!");
+
+ // We are actively resolving this pointer. We want to pop this value from
+ // the stack, as we are no longer resolving it. However, we don't want to
+ // make it look like we are now resolving the previous pointer on the
+ // stack, so pop this value and push a null.
+ PointersToReresolve.back() = 0;
+
+
+ // Do not do any nested resolution. We know that there is a higher-level
+ // loop processing deferred pointers, let it handle anything new.
+ ConvertingStruct = true;
+
+ // Note that we know that PTy cannot be resolved or invalidated here.
+ const Type *Actual = ConvertType(TREE_TYPE(type));
+ assert(GET_TYPE_LLVM(type) == PTy && "Pointer invalidated!");
+
+ // Restore ConvertingStruct for the caller.
+ ConvertingStruct = false;
+
+ if (Actual->isVoidTy())
+ Actual = Type::getInt8Ty(Context); // void* -> sbyte*
+
+ // Update the type, potentially updating TYPE_LLVM(type).
+ const OpaqueType *OT = cast<OpaqueType>(PTy->getElementType());
+ const_cast<OpaqueType*>(OT)->refineAbstractTypeTo(Actual);
+ Ty = GET_TYPE_LLVM(type);
+ break;
+ } else {
+ // If we are converting a struct, and if we haven't converted the pointee
+ // type, add this pointer to PointersToReresolve and return an opaque*.
+ if (ConvertingStruct) {
+ // If the pointee type has not already been converted to LLVM, create
+ // a new opaque type and remember it in the database.
+ Ty = GET_TYPE_LLVM(TYPE_MAIN_VARIANT(TREE_TYPE(type)));
+ if (Ty == 0) {
+ PointersToReresolve.push_back(type);
+ Ty = TypeDB.setType(type,
+ PointerType::getUnqual(OpaqueType::get(Context)));
+ break;
+ }
+
+ // A type has already been computed. However, this may be some sort of
+ // recursive struct. We don't want to call ConvertType on it, because
+ // this will try to resolve it, and not adding the type to the
+ // PointerToReresolve collection is just an optimization. Instead,
+ // we'll use the type returned by GET_TYPE_LLVM directly, even if this
+ // may be resolved further in the future.
+ } else {
+ // If we're not in a struct, just call ConvertType. If it has already
+ // been converted, this will return the precomputed value, otherwise
+ // this will compute and return the new type.
+ Ty = ConvertType(TREE_TYPE(type));
+ }
+
+ if (Ty->isVoidTy())
+ Ty = Type::getInt8Ty(Context); // void* -> sbyte*
+ Ty = TypeDB.setType(type, Ty->getPointerTo());
+ break;
+ }
+
+ case METHOD_TYPE:
+ case FUNCTION_TYPE: {
+ if ((Ty = GET_TYPE_LLVM(type)))
+ return Ty;
+
+ // No declaration to pass through, passing NULL.
+ CallingConv::ID CallingConv;
+ AttrListPtr PAL;
+ Ty = TypeDB.setType(type, ConvertFunctionType(type, NULL, NULL,
+ CallingConv, PAL));
+ break;
+ }
+
+ case ARRAY_TYPE: {
+ if ((Ty = GET_TYPE_LLVM(type)))
+ return Ty;
+
+ uint64_t ElementSize;
+ const Type *ElementTy;
+ if (isSequentialCompatible(type)) {
+ // The gcc element type maps to an LLVM type of the same size.
+ // Convert to an LLVM array of the converted element type.
+ ElementSize = getInt64(TYPE_SIZE(TREE_TYPE(type)), true);
+ ElementTy = ConvertType(TREE_TYPE(type));
+ } else {
+ // The gcc element type has no size, or has variable size. Convert to an
+ // LLVM array of bytes. In the unlikely but theoretically possible case
+ // that the gcc array type has constant size, using an i8 for the element
+ // type ensures we can produce an LLVM array of the right size.
+ ElementSize = 8;
+ ElementTy = Type::getInt8Ty(Context);
+ }
+
+ uint64_t NumElements;
+ if (!TYPE_SIZE(type)) {
+ // We get here if we have something that is declared to be an array with
+ // no dimension. This just becomes a zero length array of the element
+ // type, so 'int X[]' becomes '%X = external global [0 x i32]'.
+ //
+ // Note that this also affects new expressions, which return a pointer
+ // to an unsized array of elements.
+ NumElements = 0;
+ } else if (!isInt64(TYPE_SIZE(type), true)) {
+ // This handles cases like "int A[n]" which have a runtime constant
+ // number of elements, but is a compile-time variable. Since these
+ // are variable sized, we represent them as [0 x type].
+ NumElements = 0;
+ } else if (integer_zerop(TYPE_SIZE(type))) {
+ // An array of zero length, or with an element type of zero size.
+ // Turn it into a zero length array of the element type.
+ NumElements = 0;
+ } else {
+ // Normal constant-size array.
+ assert(ElementSize
+ && "Array of positive size with elements of zero size!");
+ NumElements = getInt64(TYPE_SIZE(type), true);
+ assert(!(NumElements % ElementSize)
+ && "Array size is not a multiple of the element size!");
+ NumElements /= ElementSize;
+ }
+
+ Ty = TypeDB.setType(type, ArrayType::get(ElementTy, NumElements));
+ break;
+ }
+
+ case OFFSET_TYPE:
+ // Handle OFFSET_TYPE specially. This is used for pointers to members,
+ // which are really just integer offsets. As such, return the appropriate
+ // integer directly.
+ switch (getTargetData().getPointerSize()) {
+ default: assert(0 && "Unknown pointer size!");
+ case 4: Ty = Type::getInt32Ty(Context); break;
+ case 8: Ty = Type::getInt64Ty(Context); break;
+ }
+ }
+
+ NameType(Ty, orig_type);
+ return Ty;
+}
+
+//===----------------------------------------------------------------------===//
+// FUNCTION/METHOD_TYPE Conversion Routines
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class FunctionTypeConversion : public DefaultABIClient {
+ PATypeHolder &RetTy;
+ std::vector<PATypeHolder> &ArgTypes;
+ CallingConv::ID &CallingConv;
+ bool isShadowRet;
+ bool KNRPromotion;
+ unsigned Offset;
+ public:
+ FunctionTypeConversion(PATypeHolder &retty, std::vector<PATypeHolder> &AT,
+ CallingConv::ID &CC, bool KNR)
+ : RetTy(retty), ArgTypes(AT), CallingConv(CC), KNRPromotion(KNR), Offset(0) {
+ CallingConv = CallingConv::C;
+ isShadowRet = false;
+ }
+
+ /// getCallingConv - This provides the desired CallingConv for the function.
+ CallingConv::ID& getCallingConv(void) { return CallingConv; }
+
+ bool isShadowReturn() const { return isShadowRet; }
+
+ /// HandleScalarResult - This callback is invoked if the function returns a
+ /// simple scalar result value.
+ void HandleScalarResult(const Type *RetTy) {
+ this->RetTy = RetTy;
+ }
+
+ /// HandleAggregateResultAsScalar - This callback is invoked if the function
+ /// returns an aggregate value by bit converting it to the specified scalar
+ /// type and returning that.
+ void HandleAggregateResultAsScalar(const Type *ScalarTy, unsigned Offset=0) {
+ RetTy = ScalarTy;
+ this->Offset = Offset;
+ }
+
+ /// HandleAggregateResultAsAggregate - This callback is invoked if the function
+ /// returns an aggregate value using multiple return values.
+ void HandleAggregateResultAsAggregate(const Type *AggrTy) {
+ RetTy = AggrTy;
+ }
+
+ /// HandleShadowResult - Handle an aggregate or scalar shadow argument.
+ void HandleShadowResult(const PointerType *PtrArgTy, bool RetPtr) {
+ // This function either returns void or the shadow argument,
+ // depending on the target.
+ RetTy = RetPtr ? PtrArgTy : Type::getVoidTy(Context);
+
+ // In any case, there is a dummy shadow argument though!
+ ArgTypes.push_back(PtrArgTy);
+
+ // Also, note the use of a shadow argument.
+ isShadowRet = true;
+ }
+
+ /// HandleAggregateShadowResult - This callback is invoked if the function
+ /// returns an aggregate value by using a "shadow" first parameter, which is
+ /// a pointer to the aggregate, of type PtrArgTy. If RetPtr is set to true,
+ /// the pointer argument itself is returned from the function.
+ void HandleAggregateShadowResult(const PointerType *PtrArgTy,
+ bool RetPtr) {
+ HandleShadowResult(PtrArgTy, RetPtr);
+ }
+
+ /// HandleScalarShadowResult - This callback is invoked if the function
+ /// returns a scalar value by using a "shadow" first parameter, which is a
+ /// pointer to the scalar, of type PtrArgTy. If RetPtr is set to true,
+ /// the pointer argument itself is returned from the function.
+ void HandleScalarShadowResult(const PointerType *PtrArgTy, bool RetPtr) {
+ HandleShadowResult(PtrArgTy, RetPtr);
+ }
+
+ void HandleScalarArgument(const llvm::Type *LLVMTy, tree type,
+ unsigned RealSize = 0) {
+ if (KNRPromotion) {
+ if (type == float_type_node)
+ LLVMTy = ConvertType(double_type_node);
+ else if (LLVMTy == Type::getInt16Ty(Context) || LLVMTy == Type::getInt8Ty(Context) ||
+ LLVMTy == Type::getInt1Ty(Context))
+ LLVMTy = Type::getInt32Ty(Context);
+ }
+ ArgTypes.push_back(LLVMTy);
+ }
+
+ /// HandleByInvisibleReferenceArgument - This callback is invoked if a pointer
+ /// (of type PtrTy) to the argument is passed rather than the argument itself.
+ void HandleByInvisibleReferenceArgument(const llvm::Type *PtrTy, tree type) {
+ ArgTypes.push_back(PtrTy);
+ }
+
+ /// HandleByValArgument - This callback is invoked if the aggregate function
+ /// argument is passed by value. It is lowered to a parameter passed by
+ /// reference with an additional parameter attribute "ByVal".
+ void HandleByValArgument(const llvm::Type *LLVMTy, tree type) {
+ HandleScalarArgument(LLVMTy->getPointerTo(), type);
+ }
+
+ /// HandleFCAArgument - This callback is invoked if the aggregate function
+ /// argument is a first class aggregate passed by value.
+ void HandleFCAArgument(const llvm::Type *LLVMTy, tree type) {
+ ArgTypes.push_back(LLVMTy);
+ }
+ };
+}
+
+
+static Attributes HandleArgumentExtension(tree ArgTy) {
+ if (TREE_CODE(ArgTy) == BOOLEAN_TYPE) {
+ if (TREE_INT_CST_LOW(TYPE_SIZE(ArgTy)) < INT_TYPE_SIZE)
+ return Attribute::ZExt;
+ } else if (TREE_CODE(ArgTy) == INTEGER_TYPE &&
+ TREE_INT_CST_LOW(TYPE_SIZE(ArgTy)) < INT_TYPE_SIZE) {
+ if (TYPE_UNSIGNED(ArgTy))
+ return Attribute::ZExt;
+ else
+ return Attribute::SExt;
+ }
+
+ return Attribute::None;
+}
+
+/// ConvertParamListToLLVMSignature - This method is used to build the argument
+/// type list for K&R prototyped functions. In this case, we have to figure out
+/// the type list (to build a FunctionType) from the actual DECL_ARGUMENTS list
+/// for the function. This method takes the DECL_ARGUMENTS list (Args), and
+/// fills in Result with the argument types for the function. It returns the
+/// specified result type for the function.
+const FunctionType *TypeConverter::
+ConvertArgListToFnType(tree type, tree Args, tree static_chain,
+ CallingConv::ID &CallingConv, AttrListPtr &PAL) {
+ tree ReturnType = TREE_TYPE(type);
+ std::vector<PATypeHolder> ArgTys;
+ PATypeHolder RetTy(Type::getVoidTy(Context));
+
+ FunctionTypeConversion Client(RetTy, ArgTys, CallingConv, true /*K&R*/);
+ TheLLVMABI<FunctionTypeConversion> ABIConverter(Client);
+
+ // Builtins are always prototyped, so this isn't one.
+ ABIConverter.HandleReturnType(ReturnType, current_function_decl, false);
+
+#ifdef TARGET_ADJUST_LLVM_CC
+ TARGET_ADJUST_LLVM_CC(CallingConv, type);
+#endif
+
+ SmallVector<AttributeWithIndex, 8> Attrs;
+
+ // Compute whether the result needs to be zext or sext'd.
+ Attributes RAttributes = HandleArgumentExtension(ReturnType);
+
+ // Allow the target to change the attributes.
+#ifdef TARGET_ADJUST_LLVM_RETATTR
+ TARGET_ADJUST_LLVM_RETATTR(RAttributes, type);
+#endif
+
+ if (RAttributes != Attribute::None)
+ Attrs.push_back(AttributeWithIndex::get(0, RAttributes));
+
+ // If this function returns via a shadow argument, the dest loc is passed
+ // in as a pointer. Mark that pointer as struct-ret and noalias.
+ if (ABIConverter.isShadowReturn())
+ Attrs.push_back(AttributeWithIndex::get(ArgTys.size(),
+ Attribute::StructRet | Attribute::NoAlias));
+
+ std::vector<const Type*> ScalarArgs;
+ if (static_chain) {
+ // Pass the static chain as the first parameter.
+ ABIConverter.HandleArgument(TREE_TYPE(static_chain), ScalarArgs);
+ // Mark it as the chain argument.
+ Attrs.push_back(AttributeWithIndex::get(ArgTys.size(),
+ Attribute::Nest));
+ }
+
+ for (; Args && TREE_TYPE(Args) != void_type_node; Args = TREE_CHAIN(Args)) {
+ tree ArgTy = TREE_TYPE(Args);
+
+ // Determine if there are any attributes for this param.
+ Attributes PAttributes = Attribute::None;
+
+ ABIConverter.HandleArgument(ArgTy, ScalarArgs, &PAttributes);
+
+ // Compute zext/sext attributes.
+ PAttributes |= HandleArgumentExtension(ArgTy);
+
+ if (PAttributes != Attribute::None)
+ Attrs.push_back(AttributeWithIndex::get(ArgTys.size(), PAttributes));
+ }
+
+ PAL = AttrListPtr::get(Attrs.begin(), Attrs.end());
+ return GetFunctionType(RetTy, ArgTys, false);
+}
+
+const FunctionType *TypeConverter::
+ConvertFunctionType(tree type, tree decl, tree static_chain,
+ CallingConv::ID &CallingConv, AttrListPtr &PAL) {
+ PATypeHolder RetTy = Type::getVoidTy(Context);
+ std::vector<PATypeHolder> ArgTypes;
+ bool isVarArg = false;
+ FunctionTypeConversion Client(RetTy, ArgTypes, CallingConv, false/*not K&R*/);
+ TheLLVMABI<FunctionTypeConversion> ABIConverter(Client);
+
+ ABIConverter.HandleReturnType(TREE_TYPE(type), current_function_decl,
+ decl ? DECL_BUILT_IN(decl) : false);
+
+ // Allow the target to set the CC for things like fastcall etc.
+#ifdef TARGET_ADJUST_LLVM_CC
+ TARGET_ADJUST_LLVM_CC(CallingConv, type);
+#endif
+
+ // Compute attributes for return type (and function attributes).
+ SmallVector<AttributeWithIndex, 8> Attrs;
+ Attributes FnAttributes = Attribute::None;
+
+ int flags = flags_from_decl_or_type(decl ? decl : type);
+
+ // Check for 'noreturn' function attribute.
+ if (flags & ECF_NORETURN)
+ FnAttributes |= Attribute::NoReturn;
+
+ // Check for 'nounwind' function attribute.
+ if (flags & ECF_NOTHROW)
+ FnAttributes |= Attribute::NoUnwind;
+
+ // Check for 'readnone' function attribute.
+ // Both PURE and CONST will be set if the user applied
+ // __attribute__((const)) to a function the compiler
+ // knows to be pure, such as log. A user or (more
+ // likely) libm implementor might know their local log
+ // is in fact const, so this should be valid (and gcc
+ // accepts it). But llvm IR does not allow both, so
+ // set only ReadNone.
+ if (flags & ECF_CONST)
+ FnAttributes |= Attribute::ReadNone;
+
+ // Check for 'readonly' function attribute.
+ if (flags & ECF_PURE && !(flags & ECF_CONST))
+ FnAttributes |= Attribute::ReadOnly;
+
+ // Since they write the return value through a pointer,
+ // 'sret' functions cannot be 'readnone' or 'readonly'.
+ if (ABIConverter.isShadowReturn())
+ FnAttributes &= ~(Attribute::ReadNone|Attribute::ReadOnly);
+
+ // Demote 'readnone' nested functions to 'readonly' since
+ // they may need to read through the static chain.
+ if (static_chain && (FnAttributes & Attribute::ReadNone)) {
+ FnAttributes &= ~Attribute::ReadNone;
+ FnAttributes |= Attribute::ReadOnly;
+ }
+
+ // Compute whether the result needs to be zext or sext'd.
+ Attributes RAttributes = Attribute::None;
+ RAttributes |= HandleArgumentExtension(TREE_TYPE(type));
+
+ // Allow the target to change the attributes.
+#ifdef TARGET_ADJUST_LLVM_RETATTR
+ TARGET_ADJUST_LLVM_RETATTR(RAttributes, type);
+#endif
+
+ // The value returned by a 'malloc' function does not alias anything.
+ if (flags & ECF_MALLOC)
+ RAttributes |= Attribute::NoAlias;
+
+ if (RAttributes != Attribute::None)
+ Attrs.push_back(AttributeWithIndex::get(0, RAttributes));
+
+ // If this function returns via a shadow argument, the dest loc is passed
+ // in as a pointer. Mark that pointer as struct-ret and noalias.
+ if (ABIConverter.isShadowReturn())
+ Attrs.push_back(AttributeWithIndex::get(ArgTypes.size(),
+ Attribute::StructRet | Attribute::NoAlias));
+
+ std::vector<const Type*> ScalarArgs;
+ if (static_chain) {
+ // Pass the static chain as the first parameter.
+ ABIConverter.HandleArgument(TREE_TYPE(static_chain), ScalarArgs);
+ // Mark it as the chain argument.
+ Attrs.push_back(AttributeWithIndex::get(ArgTypes.size(),
+ Attribute::Nest));
+ }
+
+ // If the target has regparam parameters, allow it to inspect the function
+ // type.
+ int local_regparam = 0;
+ int local_fp_regparam = 0;
+#ifdef LLVM_TARGET_ENABLE_REGPARM
+ LLVM_TARGET_INIT_REGPARM(local_regparam, local_fp_regparam, type);
+#endif // LLVM_TARGET_ENABLE_REGPARM
+
+ // Keep track of whether we see a byval argument.
+ bool HasByVal = false;
+
+ // Check if we have a corresponding decl to inspect.
+ tree DeclArgs = (decl) ? DECL_ARGUMENTS(decl) : NULL;
+ // Loop over all of the arguments, adding them as we go.
+ tree Args = TYPE_ARG_TYPES(type);
+ for (; Args && TREE_VALUE(Args) != void_type_node; Args = TREE_CHAIN(Args)){
+ tree ArgTy = TREE_VALUE(Args);
+ if (!isPassedByInvisibleReference(ArgTy) &&
+ isa<OpaqueType>(ConvertType(ArgTy))) {
+ // If we are passing an opaque struct by value, we don't know how many
+ // arguments it will turn into. Because we can't handle this yet,
+ // codegen the prototype as (...).
+ if (CallingConv == CallingConv::C)
+ ArgTypes.clear();
+ else
+ // Don't nuke last argument.
+ ArgTypes.erase(ArgTypes.begin()+1, ArgTypes.end());
+ Args = 0;
+ break;
+ }
+
+ // Determine if there are any attributes for this param.
+ Attributes PAttributes = Attribute::None;
+
+ unsigned OldSize = ArgTypes.size();
+
+ ABIConverter.HandleArgument(ArgTy, ScalarArgs, &PAttributes);
+
+ // Compute zext/sext attributes.
+ PAttributes |= HandleArgumentExtension(ArgTy);
+
+ // Compute noalias attributes. If we have a decl for the function
+ // inspect it for restrict qualifiers, otherwise try the argument
+ // types.
+ tree RestrictArgTy = (DeclArgs) ? TREE_TYPE(DeclArgs) : ArgTy;
+ if (TREE_CODE(RestrictArgTy) == POINTER_TYPE ||
+ TREE_CODE(RestrictArgTy) == REFERENCE_TYPE) {
+ if (TYPE_RESTRICT(RestrictArgTy))
+ PAttributes |= Attribute::NoAlias;
+ }
+
+#ifdef LLVM_TARGET_ENABLE_REGPARM
+ // Allow the target to mark this as inreg.
+ if (INTEGRAL_TYPE_P(ArgTy) || POINTER_TYPE_P(ArgTy) ||
+ SCALAR_FLOAT_TYPE_P(ArgTy))
+ LLVM_ADJUST_REGPARM_ATTRIBUTE(PAttributes, ArgTy,
+ TREE_INT_CST_LOW(TYPE_SIZE(ArgTy)),
+ local_regparam, local_fp_regparam);
+#endif // LLVM_TARGET_ENABLE_REGPARM
+
+ if (PAttributes != Attribute::None) {
+ HasByVal |= PAttributes & Attribute::ByVal;
+
+ // If the argument is split into multiple scalars, assign the
+ // attributes to all scalars of the aggregate.
+ for (unsigned i = OldSize + 1; i <= ArgTypes.size(); ++i) {
+ Attrs.push_back(AttributeWithIndex::get(i, PAttributes));
+ }
+ }
+
+ if (DeclArgs)
+ DeclArgs = TREE_CHAIN(DeclArgs);
+ }
+
+ // If there is a byval argument then it is not safe to mark the function
+ // 'readnone' or 'readonly': gcc permits a 'const' or 'pure' function to
+ // write to struct arguments passed by value, but in LLVM this becomes a
+ // write through the byval pointer argument, which LLVM does not allow for
+ // readonly/readnone functions.
+ if (HasByVal)
+ FnAttributes &= ~(Attribute::ReadNone | Attribute::ReadOnly);
+
+ // If the argument list ends with a void type node, it isn't vararg.
+ isVarArg = (Args == 0);
+ assert(RetTy && "Return type not specified!");
+
+ if (FnAttributes != Attribute::None)
+ Attrs.push_back(AttributeWithIndex::get(~0, FnAttributes));
+
+ // Finally, make the function type and result attributes.
+ PAL = AttrListPtr::get(Attrs.begin(), Attrs.end());
+ return GetFunctionType(RetTy, ArgTypes, isVarArg);
+}
+
+//===----------------------------------------------------------------------===//
+// RECORD/Struct Conversion Routines
+//===----------------------------------------------------------------------===//
+
+/// StructTypeConversionInfo - A temporary structure that is used when
+/// translating a RECORD_TYPE to an LLVM type.
+struct StructTypeConversionInfo {
+ std::vector<const Type*> Elements;
+ std::vector<uint64_t> ElementOffsetInBytes;
+ std::vector<uint64_t> ElementSizeInBytes;
+ std::vector<bool> PaddingElement; // True if field is used for padding
+ const TargetData &TD;
+ unsigned GCCStructAlignmentInBytes;
+ bool Packed; // True if struct is packed
+ bool AllBitFields; // True if all struct fields are bit fields
+ bool LastFieldStartsAtNonByteBoundry;
+ unsigned ExtraBitsAvailable; // Non-zero if last field is bit field and it
+ // does not use all allocated bits
+
+ StructTypeConversionInfo(TargetMachine &TM, unsigned GCCAlign, bool P)
+ : TD(*TM.getTargetData()), GCCStructAlignmentInBytes(GCCAlign),
+ Packed(P), AllBitFields(true), LastFieldStartsAtNonByteBoundry(false),
+ ExtraBitsAvailable(0) {}
+
+ void lastFieldStartsAtNonByteBoundry(bool value) {
+ LastFieldStartsAtNonByteBoundry = value;
+ }
+
+ void extraBitsAvailable (unsigned E) {
+ ExtraBitsAvailable = E;
+ }
+
+ bool isPacked() { return Packed; }
+
+ void markAsPacked() {
+ Packed = true;
+ }
+
+ void allFieldsAreNotBitFields() {
+ AllBitFields = false;
+ // Next field is not a bitfield.
+ LastFieldStartsAtNonByteBoundry = false;
+ }
+
+ unsigned getGCCStructAlignmentInBytes() const {
+ return GCCStructAlignmentInBytes;
+ }
+
+ /// getTypeAlignment - Return the alignment of the specified type in bytes.
+ ///
+ unsigned getTypeAlignment(const Type *Ty) const {
+ return Packed ? 1 : TD.getABITypeAlignment(Ty);
+ }
+
+ /// getTypeSize - Return the size of the specified type in bytes.
+ ///
+ uint64_t getTypeSize(const Type *Ty) const {
+ return TD.getTypeAllocSize(Ty);
+ }
+
+ /// getLLVMType - Return the LLVM type for the specified object.
+ ///
+ const Type *getLLVMType() const {
+ // Use Packed type if Packed is set or all struct fields are bitfields.
+ // Empty struct is not packed unless packed is set.
+ return StructType::get(Context, Elements,
+ Packed || (!Elements.empty() && AllBitFields));
+ }
+
+ /// getAlignmentAsLLVMStruct - Return the alignment of this struct if it were
+ /// converted to an LLVM type.
+ uint64_t getAlignmentAsLLVMStruct() const {
+ if (Packed || AllBitFields) return 1;
+ unsigned MaxAlign = 1;
+ for (unsigned i = 0, e = Elements.size(); i != e; ++i)
+ MaxAlign = std::max(MaxAlign, getTypeAlignment(Elements[i]));
+ return MaxAlign;
+ }
+
+ /// getSizeAsLLVMStruct - Return the size of this struct if it were converted
+ /// to an LLVM type. This is the end of last element push an alignment pad at
+ /// the end.
+ uint64_t getSizeAsLLVMStruct() const {
+ if (Elements.empty()) return 0;
+ unsigned MaxAlign = getAlignmentAsLLVMStruct();
+ uint64_t Size = ElementOffsetInBytes.back()+ElementSizeInBytes.back();
+ return (Size+MaxAlign-1) & ~(MaxAlign-1);
+ }
+
+ // If this is a Packed struct and ExtraBitsAvailable is not zero then
+ // remove Extra bytes if ExtraBitsAvailable > 8.
+ void RemoveExtraBytes () {
+
+ unsigned NoOfBytesToRemove = ExtraBitsAvailable/8;
+
+ if (!Packed && !AllBitFields)
+ return;
+
+ if (NoOfBytesToRemove == 0)
+ return;
+
+ const Type *LastType = Elements.back();
+ unsigned PadBytes = 0;
+
+ if (LastType == Type::getInt8Ty(Context))
+ PadBytes = 1 - NoOfBytesToRemove;
+ else if (LastType == Type::getInt16Ty(Context))
+ PadBytes = 2 - NoOfBytesToRemove;
+ else if (LastType == Type::getInt32Ty(Context))
+ PadBytes = 4 - NoOfBytesToRemove;
+ else if (LastType == Type::getInt64Ty(Context))
+ PadBytes = 8 - NoOfBytesToRemove;
+ else
+ return;
+
+ assert (PadBytes > 0 && "Unable to remove extra bytes");
+
+ // Update last element type and size, element offset is unchanged.
+ const Type *Pad = ArrayType::get(Type::getInt8Ty(Context), PadBytes);
+ unsigned OriginalSize = ElementSizeInBytes.back();
+ Elements.pop_back();
+ Elements.push_back(Pad);
+
+ ElementSizeInBytes.pop_back();
+ ElementSizeInBytes.push_back(OriginalSize - NoOfBytesToRemove);
+ }
+
+ /// ResizeLastElementIfOverlapsWith - If the last element in the struct
+ /// includes the specified byte, remove it. Return true struct
+ /// layout is sized properly. Return false if unable to handle ByteOffset.
+ /// In this case caller should redo this struct as a packed structure.
+ bool ResizeLastElementIfOverlapsWith(uint64_t ByteOffset, tree Field,
+ const Type *Ty) {
+ const Type *SavedTy = NULL;
+
+ if (!Elements.empty()) {
+ assert(ElementOffsetInBytes.back() <= ByteOffset &&
+ "Cannot go backwards in struct");
+
+ SavedTy = Elements.back();
+ if (ElementOffsetInBytes.back()+ElementSizeInBytes.back() > ByteOffset) {
+ // The last element overlapped with this one, remove it.
+ uint64_t PoppedOffset = ElementOffsetInBytes.back();
+ Elements.pop_back();
+ ElementOffsetInBytes.pop_back();
+ ElementSizeInBytes.pop_back();
+ PaddingElement.pop_back();
+ uint64_t EndOffset = getNewElementByteOffset(1);
+ if (EndOffset < PoppedOffset) {
+ // Make sure that some field starts at the position of the
+ // field we just popped. Otherwise we might end up with a
+ // gcc non-bitfield being mapped to an LLVM field with a
+ // different offset.
+ const Type *Pad = Type::getInt8Ty(Context);
+ if (PoppedOffset != EndOffset + 1)
+ Pad = ArrayType::get(Pad, PoppedOffset - EndOffset);
+ addElement(Pad, EndOffset, PoppedOffset - EndOffset);
+ }
+ }
+ }
+
+ // Get the LLVM type for the field. If this field is a bitfield, use the
+ // declared type, not the shrunk-to-fit type that GCC gives us in TREE_TYPE.
+ unsigned ByteAlignment = getTypeAlignment(Ty);
+ uint64_t NextByteOffset = getNewElementByteOffset(ByteAlignment);
+ if (NextByteOffset > ByteOffset ||
+ ByteAlignment > getGCCStructAlignmentInBytes()) {
+ // LLVM disagrees as to where this field should go in the natural field
+ // ordering. Therefore convert to a packed struct and try again.
+ return false;
+ }
+
+ // If alignment won't round us up to the right boundary, insert explicit
+ // padding.
+ if (NextByteOffset < ByteOffset) {
+ uint64_t CurOffset = getNewElementByteOffset(1);
+ const Type *Pad = Type::getInt8Ty(Context);
+ if (SavedTy && LastFieldStartsAtNonByteBoundry)
+ // We want to reuse SavedType to access this bit field.
+ // e.g. struct __attribute__((packed)) {
+ // unsigned int A,
+ // unsigned short B : 6,
+ // C : 15;
+ // char D; };
+ // In this example, previous field is C and D is current field.
+ addElement(SavedTy, CurOffset, ByteOffset - CurOffset);
+ else if (ByteOffset - CurOffset != 1)
+ Pad = ArrayType::get(Pad, ByteOffset - CurOffset);
+ addElement(Pad, CurOffset, ByteOffset - CurOffset);
+ }
+ return true;
+ }
+
+ /// FieldNo - Remove the specified field and all of the fields that come after
+ /// it.
+ void RemoveFieldsAfter(unsigned FieldNo) {
+ Elements.erase(Elements.begin()+FieldNo, Elements.end());
+ ElementOffsetInBytes.erase(ElementOffsetInBytes.begin()+FieldNo,
+ ElementOffsetInBytes.end());
+ ElementSizeInBytes.erase(ElementSizeInBytes.begin()+FieldNo,
+ ElementSizeInBytes.end());
+ PaddingElement.erase(PaddingElement.begin()+FieldNo,
+ PaddingElement.end());
+ }
+
+ /// getNewElementByteOffset - If we add a new element with the specified
+ /// alignment, what byte offset will it land at?
+ uint64_t getNewElementByteOffset(unsigned ByteAlignment) {
+ if (Elements.empty()) return 0;
+ uint64_t LastElementEnd =
+ ElementOffsetInBytes.back() + ElementSizeInBytes.back();
+
+ return (LastElementEnd+ByteAlignment-1) & ~(ByteAlignment-1);
+ }
+
+ /// addElement - Add an element to the structure with the specified type,
+ /// offset and size.
+ void addElement(const Type *Ty, uint64_t Offset, uint64_t Size,
+ bool ExtraPadding = false) {
+ Elements.push_back(Ty);
+ ElementOffsetInBytes.push_back(Offset);
+ ElementSizeInBytes.push_back(Size);
+ PaddingElement.push_back(ExtraPadding);
+ lastFieldStartsAtNonByteBoundry(false);
+ ExtraBitsAvailable = 0;
+ }
+
+ /// getFieldEndOffsetInBytes - Return the byte offset of the byte immediately
+ /// after the specified field. For example, if FieldNo is 0 and the field
+ /// is 4 bytes in size, this will return 4.
+ uint64_t getFieldEndOffsetInBytes(unsigned FieldNo) const {
+ assert(FieldNo < ElementOffsetInBytes.size() && "Invalid field #!");
+ return ElementOffsetInBytes[FieldNo]+ElementSizeInBytes[FieldNo];
+ }
+
+ /// getEndUnallocatedByte - Return the first byte that isn't allocated at the
+ /// end of a structure. For example, for {}, it's 0, for {int} it is 4, for
+ /// {int,short}, it is 6.
+ uint64_t getEndUnallocatedByte() const {
+ if (ElementOffsetInBytes.empty()) return 0;
+ return getFieldEndOffsetInBytes(ElementOffsetInBytes.size()-1);
+ }
+
+ /// getLLVMFieldFor - When we are assigning indices to FieldDecls, this
+ /// method determines which struct element to use. Since the offset of
+ /// the fields cannot go backwards, CurFieldNo retains the last element we
+ /// looked at, to keep this a nice fast linear process. If isZeroSizeField
+ /// is true, this should return some zero sized field that starts at the
+ /// specified offset.
+ ///
+ /// This returns the first field that contains the specified bit.
+ ///
+ unsigned getLLVMFieldFor(uint64_t FieldOffsetInBits, unsigned &CurFieldNo,
+ bool isZeroSizeField) {
+ if (!isZeroSizeField) {
+ // Skip over LLVM fields that start and end before the GCC field starts.
+ while (CurFieldNo < ElementOffsetInBytes.size() &&
+ getFieldEndOffsetInBytes(CurFieldNo)*8 <= FieldOffsetInBits)
+ ++CurFieldNo;
+ if (CurFieldNo < ElementOffsetInBytes.size())
+ return CurFieldNo;
+ // Otherwise, we couldn't find the field!
+ // FIXME: this works around a latent bug!
+ //assert(0 && "Could not find field!");
+ return ~0U;
+ }
+
+ // Handle zero sized fields now.
+
+ // Skip over LLVM fields that start and end before the GCC field starts.
+ // Such fields are always nonzero sized, and we don't want to skip past
+ // zero sized ones as well, which happens if you use only the Offset
+ // comparison.
+ while (CurFieldNo < ElementOffsetInBytes.size() &&
+ getFieldEndOffsetInBytes(CurFieldNo)*8 <
+ FieldOffsetInBits + (ElementSizeInBytes[CurFieldNo] != 0))
+ ++CurFieldNo;
+
+ // If the next field is zero sized, advance past this one. This is a nicety
+ // that causes us to assign C fields different LLVM fields in cases like
+ // struct X {}; struct Y { struct X a, b, c };
+ if (CurFieldNo+1 < ElementOffsetInBytes.size() &&
+ ElementSizeInBytes[CurFieldNo+1] == 0) {
+ return CurFieldNo++;
+ }
+
+ // Otherwise, if this is a zero sized field, return it.
+ if (CurFieldNo < ElementOffsetInBytes.size() &&
+ ElementSizeInBytes[CurFieldNo] == 0) {
+ return CurFieldNo;
+ }
+
+ // Otherwise, we couldn't find the field!
+ assert(0 && "Could not find field!");
+ return ~0U;
+ }
+
+ void addNewBitField(uint64_t Size, uint64_t FirstUnallocatedByte);
+
+ void dump() const;
+};
+
+// Add new element which is a bit field. Size is not the size of bit filed,
+// but size of bits required to determine type of new Field which will be
+// used to access this bit field.
+void StructTypeConversionInfo::addNewBitField(uint64_t Size,
+ uint64_t FirstUnallocatedByte) {
+
+ // Figure out the LLVM type that we will use for the new field.
+ // Note, Size is not necessarily size of the new field. It indicates
+ // additional bits required after FirstunallocatedByte to cover new field.
+ const Type *NewFieldTy;
+ if (Size <= 8)
+ NewFieldTy = Type::getInt8Ty(Context);
+ else if (Size <= 16)
+ NewFieldTy = Type::getInt16Ty(Context);
+ else if (Size <= 32)
+ NewFieldTy = Type::getInt32Ty(Context);
+ else {
+ assert(Size <= 64 && "Bitfield too large!");
+ NewFieldTy = Type::getInt64Ty(Context);
+ }
+
+ // Check that the alignment of NewFieldTy won't cause a gap in the structure!
+ unsigned ByteAlignment = getTypeAlignment(NewFieldTy);
+ if (FirstUnallocatedByte & (ByteAlignment-1)) {
+ // Instead of inserting a nice whole field, insert a small array of ubytes.
+ NewFieldTy = ArrayType::get(Type::getInt8Ty(Context), (Size+7)/8);
+ }
+
+ // Finally, add the new field.
+ addElement(NewFieldTy, FirstUnallocatedByte, getTypeSize(NewFieldTy));
+ ExtraBitsAvailable = NewFieldTy->getPrimitiveSizeInBits() - Size;
+}
+
+void StructTypeConversionInfo::dump() const {
+ raw_ostream &OS = outs();
+ OS << "Info has " << Elements.size() << " fields:\n";
+ for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
+ OS << " Offset = " << ElementOffsetInBytes[i]
+ << " Size = " << ElementSizeInBytes[i]
+ << " Type = ";
+ WriteTypeSymbolic(OS, Elements[i], TheModule);
+ OS << "\n";
+ }
+ OS.flush();
+}
+
+std::map<tree, StructTypeConversionInfo *> StructTypeInfoMap;
+
+/// Return true if and only if field no. N from struct type T is a padding
+/// element added to match llvm struct type size and gcc struct type size.
+bool isPaddingElement(tree type, unsigned index) {
+
+ StructTypeConversionInfo *Info = StructTypeInfoMap[type];
+
+ // If info is not available then be conservative and return false.
+ if (!Info)
+ return false;
+
+ assert ( Info->Elements.size() == Info->PaddingElement.size()
+ && "Invalid StructTypeConversionInfo");
+ assert ( index < Info->PaddingElement.size()
+ && "Invalid PaddingElement index");
+ return Info->PaddingElement[index];
+}
+
+/// OldTy and NewTy are union members. If they are representing
+/// structs then adjust their PaddingElement bits. Padding
+/// field in one struct may not be a padding field in another
+/// struct.
+void adjustPaddingElement(tree oldtree, tree newtree) {
+
+ StructTypeConversionInfo *OldInfo = StructTypeInfoMap[oldtree];
+ StructTypeConversionInfo *NewInfo = StructTypeInfoMap[newtree];
+
+ if (!OldInfo || !NewInfo)
+ return;
+
+ /// FIXME : Find overlapping padding fields and preserve their
+ /// isPaddingElement bit. For now, clear all isPaddingElement bits.
+ for (unsigned i = 0, size = NewInfo->PaddingElement.size(); i != size; ++i)
+ NewInfo->PaddingElement[i] = false;
+
+ for (unsigned i = 0, size = OldInfo->PaddingElement.size(); i != size; ++i)
+ OldInfo->PaddingElement[i] = false;
+
+}
+
+/// DecodeStructFields - This method decodes the specified field, if it is a
+/// FIELD_DECL, adding or updating the specified StructTypeConversionInfo to
+/// reflect it. Return tree if field is decoded correctly. Otherwise return
+/// false.
+bool TypeConverter::DecodeStructFields(tree Field,
+ StructTypeConversionInfo &Info) {
+ if (TREE_CODE(Field) != FIELD_DECL || !DECL_FIELD_OFFSET(Field))
+ return true;
+
+ // Handle bit-fields specially.
+ if (isBitfield(Field)) {
+ // If this field is forcing packed llvm struct then retry entire struct
+ // layout.
+ if (!Info.isPacked()) {
+ // Unnamed bitfield type does not contribute in struct alignment
+ // computations. Use packed llvm structure in such cases.
+ if (!DECL_NAME(Field))
+ return false;
+ // If this field is packed then the struct may need padding fields
+ // before this field.
+ if (DECL_PACKED(Field))
+ return false;
+ // If Field has user defined alignment and it does not match Ty alignment
+ // then convert to a packed struct and try again.
+ if (TYPE_USER_ALIGN(DECL_BIT_FIELD_TYPE(Field))) {
+ const Type *Ty = ConvertType(getDeclaredType(Field));
+ if (TYPE_ALIGN(DECL_BIT_FIELD_TYPE(Field)) !=
+ 8 * Info.getTypeAlignment(Ty))
+ return false;
+ }
+ }
+ DecodeStructBitField(Field, Info);
+ return true;
+ }
+
+ Info.allFieldsAreNotBitFields();
+
+ // Get the starting offset in the record.
+ uint64_t StartOffsetInBits = getFieldOffsetInBits(Field);
+ assert((StartOffsetInBits & 7) == 0 && "Non-bit-field has non-byte offset!");
+ uint64_t StartOffsetInBytes = StartOffsetInBits/8;
+
+ const Type *Ty = ConvertType(getDeclaredType(Field));
+
+ // If this field is packed then the struct may need padding fields
+ // before this field.
+ if (DECL_PACKED(Field) && !Info.isPacked())
+ return false;
+ // Pop any previous elements out of the struct if they overlap with this one.
+ // This can happen when the C++ front-end overlaps fields with tail padding in
+ // C++ classes.
+ else if (!Info.ResizeLastElementIfOverlapsWith(StartOffsetInBytes, Field, Ty)) {
+ // LLVM disagrees as to where this field should go in the natural field
+ // ordering. Therefore convert to a packed struct and try again.
+ return false;
+ }
+ else if (TYPE_USER_ALIGN(TREE_TYPE(Field))
+ && (unsigned)DECL_ALIGN(Field) != 8 * Info.getTypeAlignment(Ty)
+ && !Info.isPacked()) {
+ // If Field has user defined alignment and it does not match Ty alignment
+ // then convert to a packed struct and try again.
+ return false;
+ } else
+ // At this point, we know that adding the element will happen at the right
+ // offset. Add it.
+ Info.addElement(Ty, StartOffsetInBytes, Info.getTypeSize(Ty));
+ return true;
+}
+
+/// DecodeStructBitField - This method decodes the specified bit-field, adding
+/// or updating the specified StructTypeConversionInfo to reflect it.
+///
+/// Note that in general, we cannot produce a good covering of struct fields for
+/// bitfields. As such, we only make sure that all bits in a struct that
+/// correspond to a bitfield are represented in the LLVM struct with
+/// (potentially multiple) integer fields of integer type. This ensures that
+/// initialized globals with bitfields can have the initializers for the
+/// bitfields specified.
+void TypeConverter::DecodeStructBitField(tree_node *Field,
+ StructTypeConversionInfo &Info) {
+ unsigned FieldSizeInBits = TREE_INT_CST_LOW(DECL_SIZE(Field));
+
+ if (FieldSizeInBits == 0) // Ignore 'int:0', which just affects layout.
+ return;
+
+ // Get the starting offset in the record.
+ uint64_t StartOffsetInBits = getFieldOffsetInBits(Field);
+ uint64_t EndBitOffset = FieldSizeInBits+StartOffsetInBits;
+
+ // If the last inserted LLVM field completely contains this bitfield, just
+ // ignore this field.
+ if (!Info.Elements.empty()) {
+ uint64_t LastFieldBitOffset = Info.ElementOffsetInBytes.back()*8;
+ unsigned LastFieldBitSize = Info.ElementSizeInBytes.back()*8;
+ assert(LastFieldBitOffset <= StartOffsetInBits &&
+ "This bitfield isn't part of the last field!");
+ if (EndBitOffset <= LastFieldBitOffset+LastFieldBitSize &&
+ LastFieldBitOffset+LastFieldBitSize >= StartOffsetInBits) {
+ // Already contained in previous field. Update remaining extra bits that
+ // are available.
+ Info.extraBitsAvailable(Info.getEndUnallocatedByte()*8 - EndBitOffset);
+ return;
+ }
+ }
+
+ // Otherwise, this bitfield lives (potentially) partially in the preceeding
+ // field and in fields that exist after it. Add integer-typed fields to the
+ // LLVM struct such that there are no holes in the struct where the bitfield
+ // is: these holes would make it impossible to statically initialize a global
+ // of this type that has an initializer for the bitfield.
+
+ // Compute the number of bits that we need to add to this struct to cover
+ // this field.
+ uint64_t FirstUnallocatedByte = Info.getEndUnallocatedByte();
+ uint64_t StartOffsetFromByteBoundry = StartOffsetInBits & 7;
+
+ if (StartOffsetInBits < FirstUnallocatedByte*8) {
+
+ uint64_t AvailableBits = FirstUnallocatedByte * 8 - StartOffsetInBits;
+ // This field's starting point is already allocated.
+ if (StartOffsetFromByteBoundry == 0) {
+ // This field starts at byte boundry. Need to allocate space
+ // for additional bytes not yet allocated.
+ unsigned NumBitsToAdd = FieldSizeInBits - AvailableBits;
+ Info.addNewBitField(NumBitsToAdd, FirstUnallocatedByte);
+ return;
+ }
+
+ // Otherwise, this field's starting point is inside previously used byte.
+ // This happens with Packed bit fields. In this case one LLVM Field is
+ // used to access previous field and current field.
+ unsigned prevFieldTypeSizeInBits =
+ Info.ElementSizeInBytes[Info.Elements.size() - 1] * 8;
+
+ unsigned NumBitsRequired = prevFieldTypeSizeInBits
+ + (FieldSizeInBits - AvailableBits);
+
+ if (NumBitsRequired > 64) {
+ // Use bits from previous field.
+ NumBitsRequired = FieldSizeInBits - AvailableBits;
+ } else {
+ // If type used to access previous field is not large enough then
+ // remove previous field and insert new field that is large enough to
+ // hold both fields.
+ Info.RemoveFieldsAfter(Info.Elements.size() - 1);
+ for (unsigned idx = 0; idx < (prevFieldTypeSizeInBits/8); ++idx)
+ FirstUnallocatedByte--;
+ }
+ Info.addNewBitField(NumBitsRequired, FirstUnallocatedByte);
+ // Do this after adding Field.
+ Info.lastFieldStartsAtNonByteBoundry(true);
+ return;
+ }
+
+ if (StartOffsetInBits > FirstUnallocatedByte*8) {
+ // If there is padding between the last field and the struct, insert
+ // explicit bytes into the field to represent it.
+ unsigned PadBytes = 0;
+ unsigned PadBits = 0;
+ if (StartOffsetFromByteBoundry != 0) {
+ // New field does not start at byte boundry.
+ PadBits = StartOffsetInBits - (FirstUnallocatedByte*8);
+ PadBytes = PadBits/8;
+ PadBits = PadBits - PadBytes*8;
+ } else
+ PadBytes = StartOffsetInBits/8-FirstUnallocatedByte;
+
+ if (PadBytes) {
+ const Type *Pad = Type::getInt8Ty(Context);
+ if (PadBytes != 1)
+ Pad = ArrayType::get(Pad, PadBytes);
+ Info.addElement(Pad, FirstUnallocatedByte, PadBytes);
+ }
+
+ FirstUnallocatedByte = StartOffsetInBits/8;
+ // This field will use some of the bits from this PadBytes, if
+ // starting offset is not at byte boundry.
+ if (StartOffsetFromByteBoundry != 0)
+ FieldSizeInBits += PadBits;
+ }
+
+ // Now, Field starts at FirstUnallocatedByte and everything is aligned.
+ Info.addNewBitField(FieldSizeInBits, FirstUnallocatedByte);
+}
+
+
+/// ConvertRECORD - We know that 'type' is a RECORD_TYPE: convert it to an LLVM
+/// type.
+// A note on C++ virtual base class layout. Consider the following example:
+// class A { public: int i0; };
+// class B : public virtual A { public: int i1; };
+// class C : public virtual A { public: int i2; };
+// class D : public virtual B, public virtual C { public: int i3; };
+//
+// The TYPE nodes gcc builds for classes represent that class as it looks
+// standing alone. Thus B is size 12 and looks like { vptr; i2; baseclass A; }
+// However, this is not the layout used when that class is a base class for
+// some other class, yet the same TYPE node is still used. D in the above has
+// both a BINFO list entry and a FIELD that reference type B, but the virtual
+// base class A within B is not allocated in that case; B-within-D is only
+// size 8. The correct size is in the FIELD node (does not match the size
+// in its child TYPE node.) The fields to be omitted from the child TYPE,
+// as far as I can tell, are always the last ones; but also, there is a
+// TYPE_DECL node sitting in the middle of the FIELD list separating virtual
+// base classes from everything else.
+//
+// Similarly, a nonvirtual base class which has virtual base classes might
+// not contain those virtual base classes when used as a nonvirtual base class.
+// There is seemingly no way to detect this except for the size differential.
+//
+// For LLVM purposes, we build a new type for B-within-D that
+// has the correct size and layout for that usage.
+
+const Type *TypeConverter::ConvertRECORD(tree type, tree orig_type) {
+ if (const Type *Ty = GET_TYPE_LLVM(type)) {
+ // If we already compiled this type, and if it was not a forward
+ // definition that is now defined, use the old type.
+ if (!isa<OpaqueType>(Ty) || TYPE_SIZE(type) == 0)
+ return Ty;
+ }
+
+ if (TYPE_SIZE(type) == 0) { // Forward declaration?
+ const Type *Ty = OpaqueType::get(Context);
+ return TypeDB.setType(type, Ty);
+ }
+
+ // Note that we are compiling a struct now.
+ bool OldConvertingStruct = ConvertingStruct;
+ ConvertingStruct = true;
+
+ StructTypeConversionInfo *Info =
+ new StructTypeConversionInfo(*TheTarget, TYPE_ALIGN(type) / 8,
+ TYPE_PACKED(type));
+
+ // Convert over all of the elements of the struct.
+ bool retryAsPackedStruct = false;
+ for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field)) {
+ if (DecodeStructFields(Field, *Info) == false) {
+ retryAsPackedStruct = true;
+ break;
+ }
+ }
+
+ if (retryAsPackedStruct) {
+ delete Info;
+ Info = new StructTypeConversionInfo(*TheTarget, TYPE_ALIGN(type) / 8, true);
+ for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field)) {
+ if (DecodeStructFields(Field, *Info) == false) {
+ assert(0 && "Unable to decode struct fields.");
+ }
+ }
+ }
+
+ // If the LLVM struct requires explicit tail padding to be the same size as
+ // the GCC struct, insert tail padding now. This handles, e.g., "{}" in C++.
+ if (TYPE_SIZE(type) && TREE_CODE(TYPE_SIZE(type)) == INTEGER_CST) {
+ uint64_t GCCTypeSize = getInt64(TYPE_SIZE_UNIT(type), true);
+ uint64_t LLVMStructSize = Info->getSizeAsLLVMStruct();
+
+ if (LLVMStructSize > GCCTypeSize) {
+ Info->RemoveExtraBytes();
+ LLVMStructSize = Info->getSizeAsLLVMStruct();
+ }
+
+ if (LLVMStructSize != GCCTypeSize) {
+ assert(LLVMStructSize < GCCTypeSize &&
+ "LLVM type size doesn't match GCC type size!");
+ uint64_t LLVMLastElementEnd = Info->getNewElementByteOffset(1);
+
+ // If only one byte is needed then insert i8.
+ if (GCCTypeSize-LLVMLastElementEnd == 1)
+ Info->addElement(Type::getInt8Ty(Context), 1, 1);
+ else {
+ if (((GCCTypeSize-LLVMStructSize) % 4) == 0 &&
+ (Info->getAlignmentAsLLVMStruct() %
+ Info->getTypeAlignment(Type::getInt32Ty(Context))) == 0) {
+ // insert array of i32
+ unsigned Int32ArraySize = (GCCTypeSize-LLVMStructSize)/4;
+ const Type *PadTy =
+ ArrayType::get(Type::getInt32Ty(Context), Int32ArraySize);
+ Info->addElement(PadTy, GCCTypeSize - LLVMLastElementEnd,
+ Int32ArraySize, true /* Padding Element */);
+ } else {
+ const Type *PadTy =
+ ArrayType::get(Type::getInt8Ty(Context), GCCTypeSize-LLVMStructSize);
+ Info->addElement(PadTy, GCCTypeSize - LLVMLastElementEnd,
+ GCCTypeSize - LLVMLastElementEnd,
+ true /* Padding Element */);
+
+ }
+ }
+ }
+ } else
+ Info->RemoveExtraBytes();
+
+
+ // Now that the LLVM struct is finalized, figure out a safe place to index to
+ // and set index values for each FieldDecl that doesn't start at a variable
+ // offset.
+ unsigned CurFieldNo = 0;
+ for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field))
+ if (TREE_CODE(Field) == FIELD_DECL && DECL_FIELD_OFFSET(Field)) {
+ uint64_t FieldOffsetInBits = getFieldOffsetInBits(Field);
+ tree FieldType = getDeclaredType(Field);
+ const Type *FieldTy = ConvertType(FieldType);
+
+ // If this is a bitfield, we may want to adjust the FieldOffsetInBits to
+ // produce safe code. In particular, bitfields will be loaded/stored as
+ // their *declared* type, not the smallest integer type that contains
+ // them. As such, we need to respect the alignment of the declared type.
+ if (isBitfield(Field)) {
+ // If this is a bitfield, the declared type must be an integral type.
+ unsigned BitAlignment = Info->getTypeAlignment(FieldTy)*8;
+
+ FieldOffsetInBits &= ~(BitAlignment-1ULL);
+ // When we fix the field alignment, we must restart the FieldNo search
+ // because the FieldOffsetInBits can be lower than it was in the
+ // previous iteration.
+ CurFieldNo = 0;
+
+ // Skip 'int:0', which just affects layout.
+ if (integer_zerop(DECL_SIZE(Field)))
+ continue;
+ }
+
+ // Figure out if this field is zero bits wide, e.g. {} or [0 x int].
+ bool isZeroSizeField = FieldTy->isSized() &&
+ getTargetData().getTypeSizeInBits(FieldTy) == 0;
+
+ unsigned FieldNo =
+ Info->getLLVMFieldFor(FieldOffsetInBits, CurFieldNo, isZeroSizeField);
+ SetFieldIndex(Field, FieldNo);
+
+ assert((isBitfield(Field) || FieldNo == ~0U ||
+ FieldOffsetInBits == 8*Info->ElementOffsetInBytes[FieldNo]) &&
+ "Wrong LLVM field offset!");
+ }
+
+ const Type *ResultTy = Info->getLLVMType();
+ StructTypeInfoMap[type] = Info;
+
+ const OpaqueType *OldTy = cast_or_null<OpaqueType>(GET_TYPE_LLVM(type));
+ TypeDB.setType(type, ResultTy);
+
+ // If there was a forward declaration for this type that is now resolved,
+ // refine anything that used it to the new type.
+ if (OldTy)
+ const_cast<OpaqueType*>(OldTy)->refineAbstractTypeTo(ResultTy);
+
+ // We have finished converting this struct. See if the is the outer-most
+ // struct being converted by ConvertType.
+ ConvertingStruct = OldConvertingStruct;
+ if (!ConvertingStruct) {
+
+ // If this is the outer-most level of structness, resolve any pointers
+ // that were deferred.
+ while (!PointersToReresolve.empty()) {
+ if (tree PtrTy = PointersToReresolve.back()) {
+ ConvertType(PtrTy); // Reresolve this pointer type.
+ assert((PointersToReresolve.empty() ||
+ PointersToReresolve.back() != PtrTy) &&
+ "Something went wrong with pointer resolution!");
+ } else {
+ // Null marker element.
+ PointersToReresolve.pop_back();
+ }
+ }
+ }
+
+ return GET_TYPE_LLVM(type);
+}
+
+
+/// ConvertUNION - We know that 'type' is a UNION_TYPE or a QUAL_UNION_TYPE:
+/// convert it to an LLVM type.
+const Type *TypeConverter::ConvertUNION(tree type, tree orig_type) {
+ if (const Type *Ty = GET_TYPE_LLVM(type)) {
+ // If we already compiled this type, and if it was not a forward
+ // definition that is now defined, use the old type.
+ if (!isa<OpaqueType>(Ty) || TYPE_SIZE(type) == 0)
+ return Ty;
+ }
+
+ if (TYPE_SIZE(type) == 0) { // Forward declaraion?
+ const Type *Ty = OpaqueType::get(Context);
+ return TypeDB.setType(type, Ty);
+ }
+
+ // Note that we are compiling a struct now.
+ bool OldConvertingStruct = ConvertingStruct;
+ ConvertingStruct = true;
+
+ // Find the type with the largest aligment, and if we have multiple types with
+ // the same alignment, select one with largest size. If type with max. align
+ // is smaller then other types then we will add padding later on anyway to
+ // match union size.
+ const TargetData &TD = getTargetData();
+ const Type *UnionTy = 0;
+ tree GccUnionTy = 0;
+ unsigned MaxAlignSize = 0, MaxAlign = 0;
+ for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field)) {
+ if (TREE_CODE(Field) != FIELD_DECL) continue;
+// assert(getFieldOffsetInBits(Field) == 0 && "Union with non-zero offset?");
+ // Workaround to get Fortran EQUIVALENCE working.
+ // TODO: Unify record and union logic and handle this optimally.
+ if (getFieldOffsetInBits(Field) != 0) {
+ ConvertingStruct = OldConvertingStruct;
+ return ConvertRECORD(type, orig_type);
+ }
+
+ // Set the field idx to zero for all fields.
+ SetFieldIndex(Field, 0);
+
+ // Skip fields that are known not to be present.
+ if (TREE_CODE(type) == QUAL_UNION_TYPE &&
+ integer_zerop(DECL_QUALIFIER(Field)))
+ continue;
+
+ tree TheGccTy = TREE_TYPE(Field);
+
+ // Skip zero-length fields; ConvertType refuses to construct a type
+ // of size 0.
+ if (DECL_SIZE(Field) &&
+ TREE_CODE(DECL_SIZE(Field))==INTEGER_CST &&
+ TREE_INT_CST_LOW(DECL_SIZE(Field))==0)
+ continue;
+#ifdef TARGET_POWERPC
+ // Normally gcc reduces the size of bitfields to the size necessary
+ // to hold the bits, e.g. a 1-bit field becomes QI. It does not do
+ // this for bool, which is no problem on most targets because
+ // sizeof(bool)==1. On darwin ppc32, however, sizeof(bool)==4, so
+ // we can have field types bigger than the union type here. Evade
+ // this by creating an appropriate int type here.
+ //
+ // It's possible this issue is not specific to ppc, but I doubt it.
+
+ if (TREE_CODE(TheGccTy) == BOOLEAN_TYPE &&
+ TYPE_SIZE_UNIT(TheGccTy) &&
+ DECL_SIZE_UNIT(Field) &&
+ TREE_CODE(DECL_SIZE_UNIT(Field)) == INTEGER_CST &&
+ TREE_CODE(TYPE_SIZE_UNIT(TheGccTy)) == INTEGER_CST &&
+ TREE_INT_CST_LOW(TYPE_SIZE_UNIT(TheGccTy)) >
+ TREE_INT_CST_LOW(DECL_SIZE_UNIT(Field))) {
+ bool sign = DECL_UNSIGNED(Field);
+ switch(TREE_INT_CST_LOW(DECL_SIZE_UNIT(Field))) {
+ case 1: TheGccTy = sign ? intQI_type_node : unsigned_intQI_type_node;
+ break;
+ case 2: TheGccTy = sign ? intHI_type_node : unsigned_intHI_type_node;
+ break;
+ case 4: TheGccTy = sign ? intSI_type_node : unsigned_intSI_type_node;
+ break;
+ case 8: TheGccTy = sign ? intDI_type_node : unsigned_intDI_type_node;
+ break;
+ default: assert(0 && "Unexpected field size"); break;
+ }
+ }
+#endif
+ const Type *TheTy = ConvertType(TheGccTy);
+ unsigned Size = TD.getTypeAllocSize(TheTy);
+ unsigned Align = TD.getABITypeAlignment(TheTy);
+
+ adjustPaddingElement(GccUnionTy, TheGccTy);
+
+ // Select TheTy as union type if it is more aligned than any other. If more
+ // than one field achieves the maximum alignment then choose the biggest.
+ bool useTheTy;
+ if (UnionTy == 0)
+ useTheTy = true;
+ else if (Align < MaxAlign)
+ useTheTy = false;
+ else if (Align > MaxAlign)
+ useTheTy = true;
+ else if (Size > MaxAlignSize)
+ useTheTy = true;
+ else
+ useTheTy = false;
+
+ if (useTheTy) {
+ UnionTy = TheTy;
+ GccUnionTy = TheGccTy;
+ MaxAlignSize = Size;
+ MaxAlign = Align;
+ }
+
+ // Skip remaining fields if this one is known to be present.
+ if (TREE_CODE(type) == QUAL_UNION_TYPE &&
+ integer_onep(DECL_QUALIFIER(Field)))
+ break;
+ }
+
+ std::vector<const Type*> UnionElts;
+ unsigned EltAlign = 0;
+ unsigned EltSize = 0;
+ if (UnionTy) { // Not an empty union.
+ EltAlign = TD.getABITypeAlignment(UnionTy);
+ EltSize = TD.getTypeAllocSize(UnionTy);
+ UnionElts.push_back(UnionTy);
+ }
+
+ // If the LLVM struct requires explicit tail padding to be the same size as
+ // the GCC union, insert tail padding now. This handles cases where the union
+ // has larger alignment than the largest member does, thus requires tail
+ // padding.
+ if (TYPE_SIZE(type) && TREE_CODE(TYPE_SIZE(type)) == INTEGER_CST) {
+ uint64_t GCCTypeSize = getInt64(TYPE_SIZE_UNIT(type), true);
+
+ if (EltSize != GCCTypeSize) {
+ assert(EltSize < GCCTypeSize &&
+ "LLVM type size doesn't match GCC type size!");
+ const Type *PadTy = Type::getInt8Ty(Context);
+ if (GCCTypeSize-EltSize != 1)
+ PadTy = ArrayType::get(PadTy, GCCTypeSize-EltSize);
+ UnionElts.push_back(PadTy);
+ }
+ }
+
+ bool isPacked = 8 * EltAlign > TYPE_ALIGN(type);
+ const Type *ResultTy = StructType::get(Context, UnionElts, isPacked);
+ const OpaqueType *OldTy = cast_or_null<OpaqueType>(GET_TYPE_LLVM(type));
+ TypeDB.setType(type, ResultTy);
+
+ // If there was a forward declaration for this type that is now resolved,
+ // refine anything that used it to the new type.
+ if (OldTy)
+ const_cast<OpaqueType*>(OldTy)->refineAbstractTypeTo(ResultTy);
+
+ // We have finished converting this union. See if the is the outer-most
+ // union being converted by ConvertType.
+ ConvertingStruct = OldConvertingStruct;
+ if (!ConvertingStruct) {
+ // If this is the outer-most level of structness, resolve any pointers
+ // that were deferred.
+ while (!PointersToReresolve.empty()) {
+ if (tree PtrTy = PointersToReresolve.back()) {
+ ConvertType(PtrTy); // Reresolve this pointer type.
+ assert((PointersToReresolve.empty() ||
+ PointersToReresolve.back() != PtrTy) &&
+ "Something went wrong with pointer resolution!");
+ } else {
+ // Null marker element.
+ PointersToReresolve.pop_back();
+ }
+ }
+ }
+
+ return GET_TYPE_LLVM(type);
+}
diff --git a/dragonegg/utils/target.cpp b/dragonegg/utils/target.cpp
new file mode 100644
index 00000000000..91a2116b7e2
--- /dev/null
+++ b/dragonegg/utils/target.cpp
@@ -0,0 +1,68 @@
+#include <cstring>
+#include <iostream>
+
+#include <llvm/ADT/Triple.h>
+
+using namespace llvm;
+
+static void PrintTriple(Triple &T) {
+ std::cout << T.getTriple() << "\n";
+}
+static void PrintArchName(Triple &T) {
+ std::cout << T.getArchName().str() << "\n";
+}
+static void PrintVendorName(Triple &T) {
+ std::cout << T.getVendorName().str() << "\n";
+}
+static void PrintOSTypeName(Triple &T) {
+ std::cout << T.getOSTypeName(T.getOS()) << "\n";
+}
+static void PrintEnvironmentName(Triple &T) {
+ std::cout << T.getEnvironmentName().str() << "\n";
+}
+static void PrintOSAndEnvironmentName(Triple &T) {
+ std::cout << T.getOSAndEnvironmentName().str() << "\n";
+}
+static void PrintArchTypePrefix(Triple &T) {
+ std::cout << T.getArchTypePrefix(T.getArch()) << "\n";
+}
+
+struct Option {
+ const char *Name;
+ void (*Action)(Triple &);
+};
+
+static Option Options[] = {
+ { "-t", PrintTriple },
+ { "-a", PrintArchName },
+ { "-v", PrintVendorName },
+ { "-o", PrintOSTypeName },
+ { "-e", PrintEnvironmentName },
+ { "-oe", PrintOSAndEnvironmentName },
+ { "-p", PrintArchTypePrefix },
+ { NULL, NULL }
+};
+
+int main(int argc, char **argv) {
+ Triple T(TARGET_NAME);
+
+ for (int i = 1; i < argc; ++i) {
+ bool Found = false;
+ for (Option *O = Options; O->Name; ++O)
+ if (!strcmp(argv[i], O->Name)) {
+ Found = true;
+ O->Action(T);
+ break;
+ }
+ if (!Found) {
+ std::cerr << "Unknown option \"" << argv[i] << "\"\n";
+ std::cerr << "Usage: " << argv[0];
+ for (Option *O = Options; O->Name; ++O)
+ std::cerr << " " << O->Name;
+ std::cerr << "\n";
+ return 1;
+ }
+ }
+
+ return 0;
+}
diff --git a/dragonegg/x86/llvm-target.cpp b/dragonegg/x86/llvm-target.cpp
new file mode 100644
index 00000000000..a6c78b08f11
--- /dev/null
+++ b/dragonegg/x86/llvm-target.cpp
@@ -0,0 +1,1513 @@
+/* High-level LLVM backend interface
+Copyright (C) 2005 Free Software Foundation, Inc.
+Contributed by Evan Cheng (evan.cheng@apple.com)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
+
+//===----------------------------------------------------------------------===//
+// This is a C++ source file that implements specific llvm IA-32 ABI.
+//===----------------------------------------------------------------------===//
+
+// LLVM headers
+#include "llvm/DerivedTypes.h"
+#include "llvm/Instructions.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+
+// System headers
+#include <gmp.h>
+
+// GCC headers
+#undef VISIBILITY_HIDDEN
+
+extern "C" {
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "target.h"
+#include "toplev.h"
+#include "tree.h"
+#include "gimple.h"
+}
+
+// Plugin headers
+#include "llvm-abi.h"
+#include "llvm-internal.h"
+#include "llvm-target.h"
+
+static LLVMContext &Context = getGlobalContext();
+
+/* TargetIntrinsicLower - For builtins that we want to expand to normal LLVM
+ * code, emit the code now. If we can handle the code, this macro should emit
+ * the code, return true.
+ */
+bool TreeToLLVM::TargetIntrinsicLower(gimple stmt,
+ unsigned FnCode,
+ const MemRef *DestLoc,
+ Value *&Result,
+ const Type *ResultType,
+ std::vector<Value*> &Ops) {
+ switch (FnCode) {
+ default: break;
+ case IX86_BUILTIN_ADDPS:
+ case IX86_BUILTIN_ADDPD:
+ case IX86_BUILTIN_PADDB:
+ case IX86_BUILTIN_PADDW:
+ case IX86_BUILTIN_PADDD:
+ case IX86_BUILTIN_PADDQ:
+ case IX86_BUILTIN_PADDB128:
+ case IX86_BUILTIN_PADDW128:
+ case IX86_BUILTIN_PADDD128:
+ case IX86_BUILTIN_PADDQ128:
+ Result = Builder.CreateAdd(Ops[0], Ops[1]);
+ return true;
+ case IX86_BUILTIN_SUBPS:
+ case IX86_BUILTIN_SUBPD:
+ case IX86_BUILTIN_PSUBB:
+ case IX86_BUILTIN_PSUBW:
+ case IX86_BUILTIN_PSUBD:
+ case IX86_BUILTIN_PSUBQ:
+ case IX86_BUILTIN_PSUBB128:
+ case IX86_BUILTIN_PSUBW128:
+ case IX86_BUILTIN_PSUBD128:
+ case IX86_BUILTIN_PSUBQ128:
+ Result = Builder.CreateSub(Ops[0], Ops[1]);
+ return true;
+ case IX86_BUILTIN_MULPS:
+ case IX86_BUILTIN_MULPD:
+ case IX86_BUILTIN_PMULLW:
+ case IX86_BUILTIN_PMULLW128:
+ Result = Builder.CreateMul(Ops[0], Ops[1]);
+ return true;
+ case IX86_BUILTIN_DIVPS:
+ case IX86_BUILTIN_DIVPD:
+ Result = Builder.CreateFDiv(Ops[0], Ops[1]);
+ return true;
+ case IX86_BUILTIN_PAND:
+ case IX86_BUILTIN_PAND128:
+ Result = Builder.CreateAnd(Ops[0], Ops[1]);
+ return true;
+ case IX86_BUILTIN_PANDN:
+ case IX86_BUILTIN_PANDN128:
+ Ops[0] = Builder.CreateNot(Ops[0]);
+ Result = Builder.CreateAnd(Ops[0], Ops[1]);
+ return true;
+ case IX86_BUILTIN_POR:
+ case IX86_BUILTIN_POR128:
+ Result = Builder.CreateOr(Ops[0], Ops[1]);
+ return true;
+ case IX86_BUILTIN_PXOR:
+ case IX86_BUILTIN_PXOR128:
+ Result = Builder.CreateXor(Ops[0], Ops[1]);
+ return true;
+ case IX86_BUILTIN_ANDPS:
+ case IX86_BUILTIN_ORPS:
+ case IX86_BUILTIN_XORPS:
+ case IX86_BUILTIN_ANDNPS:
+ case IX86_BUILTIN_ANDPD:
+ case IX86_BUILTIN_ORPD:
+ case IX86_BUILTIN_XORPD:
+ case IX86_BUILTIN_ANDNPD:
+ if (cast<VectorType>(ResultType)->getNumElements() == 4) // v4f32
+ Ops[0] = Builder.CreateBitCast(Ops[0],
+ VectorType::get(Type::getInt32Ty(Context), 4),
+ "tmp");
+ else // v2f64
+ Ops[0] = Builder.CreateBitCast(Ops[0],
+ VectorType::get(Type::getInt64Ty(Context), 2),
+ "tmp");
+
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType());
+ switch (FnCode) {
+ case IX86_BUILTIN_ANDPS:
+ case IX86_BUILTIN_ANDPD:
+ Result = Builder.CreateAnd(Ops[0], Ops[1]);
+ break;
+ case IX86_BUILTIN_ORPS:
+ case IX86_BUILTIN_ORPD:
+ Result = Builder.CreateOr (Ops[0], Ops[1]);
+ break;
+ case IX86_BUILTIN_XORPS:
+ case IX86_BUILTIN_XORPD:
+ Result = Builder.CreateXor(Ops[0], Ops[1]);
+ break;
+ case IX86_BUILTIN_ANDNPS:
+ case IX86_BUILTIN_ANDNPD:
+ Ops[0] = Builder.CreateNot(Ops[0]);
+ Result = Builder.CreateAnd(Ops[0], Ops[1]);
+ break;
+ }
+ Result = Builder.CreateBitCast(Result, ResultType);
+ return true;
+ case IX86_BUILTIN_SHUFPS:
+ if (ConstantInt *Elt = dyn_cast<ConstantInt>(Ops[2])) {
+ int EV = Elt->getZExtValue();
+ Result = BuildVectorShuffle(Ops[0], Ops[1],
+ ((EV & 0x03) >> 0), ((EV & 0x0c) >> 2),
+ ((EV & 0x30) >> 4)+4, ((EV & 0xc0) >> 6)+4);
+ } else {
+ error_at(gimple_location(stmt), "mask must be an immediate");
+ Result = Ops[0];
+ }
+ return true;
+ case IX86_BUILTIN_SHUFPD:
+ if (ConstantInt *Elt = dyn_cast<ConstantInt>(Ops[2])) {
+ int EV = Elt->getZExtValue();
+ Result = BuildVectorShuffle(Ops[0], Ops[1],
+ ((EV & 0x01) >> 0), ((EV & 0x02) >> 1)+2);
+ } else {
+ error_at(gimple_location(stmt), "mask must be an immediate");
+ Result = Ops[0];
+ }
+ return true;
+ case IX86_BUILTIN_PSHUFW:
+ case IX86_BUILTIN_PSHUFD:
+ if (ConstantInt *Elt = dyn_cast<ConstantInt>(Ops[1])) {
+ int EV = Elt->getZExtValue();
+ Result = BuildVectorShuffle(Ops[0], Ops[0],
+ ((EV & 0x03) >> 0), ((EV & 0x0c) >> 2),
+ ((EV & 0x30) >> 4), ((EV & 0xc0) >> 6));
+ } else {
+ error_at(gimple_location(stmt), "mask must be an immediate");
+ Result = Ops[0];
+ }
+ return true;
+ case IX86_BUILTIN_PSHUFHW:
+ if (ConstantInt *Elt = dyn_cast<ConstantInt>(Ops[1])) {
+ int EV = Elt->getZExtValue();
+ Result = BuildVectorShuffle(Ops[0], Ops[0],
+ 0, 1, 2, 3,
+ ((EV & 0x03) >> 0)+4, ((EV & 0x0c) >> 2)+4,
+ ((EV & 0x30) >> 4)+4, ((EV & 0xc0) >> 6)+4);
+ return true;
+ }
+ return false;
+ case IX86_BUILTIN_PSHUFLW:
+ if (ConstantInt *Elt = dyn_cast<ConstantInt>(Ops[1])) {
+ int EV = Elt->getZExtValue();
+ Result = BuildVectorShuffle(Ops[0], Ops[0],
+ ((EV & 0x03) >> 0), ((EV & 0x0c) >> 2),
+ ((EV & 0x30) >> 4), ((EV & 0xc0) >> 6),
+ 4, 5, 6, 7);
+ } else {
+ error_at(gimple_location(stmt), "mask must be an immediate");
+ Result = Ops[0];
+ }
+
+ return true;
+ case IX86_BUILTIN_PUNPCKHBW:
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 4, 12, 5, 13,
+ 6, 14, 7, 15);
+ return true;
+ case IX86_BUILTIN_PUNPCKHWD:
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 2, 6, 3, 7);
+ return true;
+ case IX86_BUILTIN_PUNPCKHDQ:
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 1, 3);
+ return true;
+ case IX86_BUILTIN_PUNPCKLBW:
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 8, 1, 9,
+ 2, 10, 3, 11);
+ return true;
+ case IX86_BUILTIN_PUNPCKLWD:
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 4, 1, 5);
+ return true;
+ case IX86_BUILTIN_PUNPCKLDQ:
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 2);
+ return true;
+ case IX86_BUILTIN_PUNPCKHBW128:
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 8, 24, 9, 25,
+ 10, 26, 11, 27,
+ 12, 28, 13, 29,
+ 14, 30, 15, 31);
+ return true;
+ case IX86_BUILTIN_PUNPCKHWD128:
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 4, 12, 5, 13, 6, 14, 7, 15);
+ return true;
+ case IX86_BUILTIN_PUNPCKHDQ128:
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 2, 6, 3, 7);
+ return true;
+ case IX86_BUILTIN_PUNPCKHQDQ128:
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 1, 3);
+ return true;
+ case IX86_BUILTIN_PUNPCKLBW128:
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 16, 1, 17,
+ 2, 18, 3, 19,
+ 4, 20, 5, 21,
+ 6, 22, 7, 23);
+ return true;
+ case IX86_BUILTIN_PUNPCKLWD128:
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 8, 1, 9, 2, 10, 3, 11);
+ return true;
+ case IX86_BUILTIN_PUNPCKLDQ128:
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 4, 1, 5);
+ return true;
+ case IX86_BUILTIN_PUNPCKLQDQ128:
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 2);
+ return true;
+ case IX86_BUILTIN_UNPCKHPS:
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 2, 6, 3, 7);
+ return true;
+ case IX86_BUILTIN_UNPCKHPD:
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 1, 3);
+ return true;
+ case IX86_BUILTIN_UNPCKLPS:
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 4, 1, 5);
+ return true;
+ case IX86_BUILTIN_UNPCKLPD:
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 2);
+ return true;
+ case IX86_BUILTIN_MOVHLPS:
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 6, 7, 2, 3);
+ return true;
+ case IX86_BUILTIN_MOVLHPS:
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 1, 4, 5);
+ return true;
+ case IX86_BUILTIN_MOVSS:
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 4, 1, 2, 3);
+ return true;
+ case IX86_BUILTIN_MOVSD:
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 2, 1);
+ return true;
+ case IX86_BUILTIN_MOVQ: {
+ Value *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
+ Result = BuildVector(Zero, Zero, Zero, Zero, NULL);
+ Result = BuildVectorShuffle(Result, Ops[0], 4, 5, 2, 3);
+ return true;
+ }
+ case IX86_BUILTIN_LOADQ: {
+ const PointerType *i64Ptr = Type::getInt64PtrTy(Context);
+ Ops[0] = Builder.CreateBitCast(Ops[0], i64Ptr);
+ Ops[0] = Builder.CreateLoad(Ops[0]);
+ Value *Zero = ConstantInt::get(Type::getInt64Ty(Context), 0);
+ Result = BuildVector(Zero, Zero, NULL);
+ Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), 0);
+ Result = Builder.CreateInsertElement(Result, Ops[0], Idx);
+ Result = Builder.CreateBitCast(Result, ResultType);
+ return true;
+ }
+ case IX86_BUILTIN_LOADUPS: {
+ VectorType *v4f32 = VectorType::get(Type::getFloatTy(Context), 4);
+ const PointerType *v4f32Ptr = v4f32->getPointerTo();
+ Value *BC = Builder.CreateBitCast(Ops[0], v4f32Ptr);
+ LoadInst *LI = Builder.CreateLoad(BC);
+ LI->setAlignment(1);
+ Result = LI;
+ return true;
+ }
+ case IX86_BUILTIN_LOADUPD: {
+ VectorType *v2f64 = VectorType::get(Type::getDoubleTy(Context), 2);
+ const PointerType *v2f64Ptr = v2f64->getPointerTo();
+ Value *BC = Builder.CreateBitCast(Ops[0], v2f64Ptr);
+ LoadInst *LI = Builder.CreateLoad(BC);
+ LI->setAlignment(1);
+ Result = LI;
+ return true;
+ }
+ case IX86_BUILTIN_LOADDQU: {
+ VectorType *v16i8 = VectorType::get(Type::getInt8Ty(Context), 16);
+ const PointerType *v16i8Ptr = v16i8->getPointerTo();
+ Value *BC = Builder.CreateBitCast(Ops[0], v16i8Ptr);
+ LoadInst *LI = Builder.CreateLoad(BC);
+ LI->setAlignment(1);
+ Result = LI;
+ return true;
+ }
+ case IX86_BUILTIN_STOREUPS: {
+ VectorType *v4f32 = VectorType::get(Type::getFloatTy(Context), 4);
+ const PointerType *v4f32Ptr = v4f32->getPointerTo();
+ Value *BC = Builder.CreateBitCast(Ops[0], v4f32Ptr);
+ StoreInst *SI = Builder.CreateStore(Ops[1], BC);
+ SI->setAlignment(1);
+ Result = SI;
+ return true;
+ }
+ case IX86_BUILTIN_STOREUPD: {
+ VectorType *v2f64 = VectorType::get(Type::getDoubleTy(Context), 2);
+ const PointerType *v2f64Ptr = v2f64->getPointerTo();
+ Value *BC = Builder.CreateBitCast(Ops[0], v2f64Ptr);
+ StoreInst *SI = Builder.CreateStore(Ops[1], BC);
+ SI->setAlignment(1);
+ Result = SI;
+ return true;
+ }
+ case IX86_BUILTIN_STOREDQU: {
+ VectorType *v16i8 = VectorType::get(Type::getInt8Ty(Context), 16);
+ const PointerType *v16i8Ptr = v16i8->getPointerTo();
+ Value *BC = Builder.CreateBitCast(Ops[0], v16i8Ptr);
+ StoreInst *SI = Builder.CreateStore(Ops[1], BC);
+ SI->setAlignment(1);
+ Result = SI;
+ return true;
+ }
+ case IX86_BUILTIN_LOADHPS: {
+ const PointerType *f64Ptr = Type::getDoublePtrTy(Context);
+ Ops[1] = Builder.CreateBitCast(Ops[1], f64Ptr);
+ Value *Load = Builder.CreateLoad(Ops[1]);
+ Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
+ Ops[1] = Builder.CreateBitCast(Ops[1], ResultType);
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 1, 4, 5);
+ Result = Builder.CreateBitCast(Result, ResultType);
+ return true;
+ }
+ case IX86_BUILTIN_LOADLPS: {
+ const PointerType *f64Ptr = Type::getDoublePtrTy(Context);
+ Ops[1] = Builder.CreateBitCast(Ops[1], f64Ptr);
+ Value *Load = Builder.CreateLoad(Ops[1]);
+ Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
+ Ops[1] = Builder.CreateBitCast(Ops[1], ResultType);
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 4, 5, 2, 3);
+ Result = Builder.CreateBitCast(Result, ResultType);
+ return true;
+ }
+ case IX86_BUILTIN_LOADHPD: {
+ Value *Load = Builder.CreateLoad(Ops[1]);
+ Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
+ Ops[1] = Builder.CreateBitCast(Ops[1], ResultType);
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 2);
+ Result = Builder.CreateBitCast(Result, ResultType);
+ return true;
+ }
+ case IX86_BUILTIN_LOADLPD: {
+ Value *Load = Builder.CreateLoad(Ops[1]);
+ Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
+ Ops[1] = Builder.CreateBitCast(Ops[1], ResultType);
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 2, 1);
+ Result = Builder.CreateBitCast(Result, ResultType);
+ return true;
+ }
+ case IX86_BUILTIN_STOREHPS: {
+ VectorType *v2f64 = VectorType::get(Type::getDoubleTy(Context), 2);
+ const PointerType *f64Ptr = Type::getDoublePtrTy(Context);
+ Ops[0] = Builder.CreateBitCast(Ops[0], f64Ptr);
+ Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), 1);
+ Ops[1] = Builder.CreateBitCast(Ops[1], v2f64);
+ Ops[1] = Builder.CreateExtractElement(Ops[1], Idx);
+ Result = Builder.CreateStore(Ops[1], Ops[0]);
+ return true;
+ }
+ case IX86_BUILTIN_STORELPS: {
+ VectorType *v2f64 = VectorType::get(Type::getDoubleTy(Context), 2);
+ const PointerType *f64Ptr = Type::getDoublePtrTy(Context);
+ Ops[0] = Builder.CreateBitCast(Ops[0], f64Ptr);
+ Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), 0);
+ Ops[1] = Builder.CreateBitCast(Ops[1], v2f64);
+ Ops[1] = Builder.CreateExtractElement(Ops[1], Idx);
+ Result = Builder.CreateStore(Ops[1], Ops[0]);
+ return true;
+ }
+ case IX86_BUILTIN_MOVSHDUP:
+ Result = BuildVectorShuffle(Ops[0], Ops[0], 1, 1, 3, 3);
+ return true;
+ case IX86_BUILTIN_MOVSLDUP:
+ Result = BuildVectorShuffle(Ops[0], Ops[0], 0, 0, 2, 2);
+ return true;
+ case IX86_BUILTIN_VEC_INIT_V2SI:
+ Result = BuildVector(Ops[0], Ops[1], NULL);
+ return true;
+ case IX86_BUILTIN_VEC_INIT_V4HI:
+ // Sometimes G++ promotes arguments to int.
+ for (unsigned i = 0; i != 4; ++i)
+ Ops[i] = Builder.CreateIntCast(Ops[i], Type::getInt16Ty(Context), false);
+ Result = BuildVector(Ops[0], Ops[1], Ops[2], Ops[3], NULL);
+ return true;
+ case IX86_BUILTIN_VEC_INIT_V8QI:
+ // Sometimes G++ promotes arguments to int.
+ for (unsigned i = 0; i != 8; ++i)
+ Ops[i] = Builder.CreateIntCast(Ops[i], Type::getInt8Ty(Context), false);
+ Result = BuildVector(Ops[0], Ops[1], Ops[2], Ops[3],
+ Ops[4], Ops[5], Ops[6], Ops[7], NULL);
+ return true;
+ case IX86_BUILTIN_VEC_EXT_V2SI:
+ case IX86_BUILTIN_VEC_EXT_V4HI:
+ case IX86_BUILTIN_VEC_EXT_V2DF:
+ case IX86_BUILTIN_VEC_EXT_V2DI:
+ case IX86_BUILTIN_VEC_EXT_V4SI:
+ case IX86_BUILTIN_VEC_EXT_V4SF:
+ case IX86_BUILTIN_VEC_EXT_V8HI:
+ case IX86_BUILTIN_VEC_EXT_V16QI:
+ Result = Builder.CreateExtractElement(Ops[0], Ops[1]);
+ return true;
+ case IX86_BUILTIN_VEC_SET_V16QI:
+ // Sometimes G++ promotes arguments to int.
+ Ops[1] = Builder.CreateIntCast(Ops[1], Type::getInt8Ty(Context), false);
+ Result = Builder.CreateInsertElement(Ops[0], Ops[1], Ops[2]);
+ return true;
+ case IX86_BUILTIN_VEC_SET_V4HI:
+ case IX86_BUILTIN_VEC_SET_V8HI:
+ // GCC sometimes doesn't produce the right element type.
+ Ops[1] = Builder.CreateIntCast(Ops[1], Type::getInt16Ty(Context), false);
+ Result = Builder.CreateInsertElement(Ops[0], Ops[1], Ops[2]);
+ return true;
+ case IX86_BUILTIN_VEC_SET_V4SI:
+ Result = Builder.CreateInsertElement(Ops[0], Ops[1], Ops[2]);
+ return true;
+ case IX86_BUILTIN_VEC_SET_V2DI:
+ Result = Builder.CreateInsertElement(Ops[0], Ops[1], Ops[2]);
+ return true;
+ case IX86_BUILTIN_CMPEQPS:
+ case IX86_BUILTIN_CMPLTPS:
+ case IX86_BUILTIN_CMPLEPS:
+ case IX86_BUILTIN_CMPGTPS:
+ case IX86_BUILTIN_CMPGEPS:
+ case IX86_BUILTIN_CMPNEQPS:
+ case IX86_BUILTIN_CMPNLTPS:
+ case IX86_BUILTIN_CMPNLEPS:
+ case IX86_BUILTIN_CMPNGTPS:
+ case IX86_BUILTIN_CMPNGEPS:
+ case IX86_BUILTIN_CMPORDPS:
+ case IX86_BUILTIN_CMPUNORDPS: {
+ Function *cmpps =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse_cmp_ps);
+ bool flip = false;
+ unsigned PredCode;
+ switch (FnCode) {
+ default: assert(0 && "Unknown fncode!");
+ case IX86_BUILTIN_CMPEQPS: PredCode = 0; break;
+ case IX86_BUILTIN_CMPLTPS: PredCode = 1; break;
+ case IX86_BUILTIN_CMPGTPS: PredCode = 1; flip = true; break;
+ case IX86_BUILTIN_CMPLEPS: PredCode = 2; break;
+ case IX86_BUILTIN_CMPGEPS: PredCode = 2; flip = true; break;
+ case IX86_BUILTIN_CMPUNORDPS: PredCode = 3; break;
+ case IX86_BUILTIN_CMPNEQPS: PredCode = 4; break;
+ case IX86_BUILTIN_CMPNLTPS: PredCode = 5; break;
+ case IX86_BUILTIN_CMPNGTPS: PredCode = 5; flip = true; break;
+ case IX86_BUILTIN_CMPNLEPS: PredCode = 6; break;
+ case IX86_BUILTIN_CMPNGEPS: PredCode = 6; flip = true; break;
+ case IX86_BUILTIN_CMPORDPS: PredCode = 7; break;
+ }
+ Value *Pred = ConstantInt::get(Type::getInt8Ty(Context), PredCode);
+ Value *Arg0 = Ops[0];
+ Value *Arg1 = Ops[1];
+ if (flip) std::swap(Arg0, Arg1);
+ Value *CallOps[3] = { Arg0, Arg1, Pred };
+ Result = Builder.CreateCall(cmpps, CallOps, CallOps+3);
+ Result = Builder.CreateBitCast(Result, ResultType);
+ return true;
+ }
+ case IX86_BUILTIN_CMPEQSS:
+ case IX86_BUILTIN_CMPLTSS:
+ case IX86_BUILTIN_CMPLESS:
+ case IX86_BUILTIN_CMPNEQSS:
+ case IX86_BUILTIN_CMPNLTSS:
+ case IX86_BUILTIN_CMPNLESS:
+ case IX86_BUILTIN_CMPNGTSS:
+ case IX86_BUILTIN_CMPNGESS:
+ case IX86_BUILTIN_CMPORDSS:
+ case IX86_BUILTIN_CMPUNORDSS: {
+ Function *cmpss =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse_cmp_ss);
+ unsigned PredCode;
+ switch (FnCode) {
+ default: assert(0 && "Unknown fncode");
+ case IX86_BUILTIN_CMPEQSS: PredCode = 0; break;
+ case IX86_BUILTIN_CMPLTSS: PredCode = 1; break;
+ case IX86_BUILTIN_CMPLESS: PredCode = 2; break;
+ case IX86_BUILTIN_CMPUNORDSS: PredCode = 3; break;
+ case IX86_BUILTIN_CMPNEQSS: PredCode = 4; break;
+ case IX86_BUILTIN_CMPNLTSS: PredCode = 5; break;
+ case IX86_BUILTIN_CMPNLESS: PredCode = 6; break;
+ case IX86_BUILTIN_CMPORDSS: PredCode = 7; break;
+ }
+ Value *Pred = ConstantInt::get(Type::getInt8Ty(Context), PredCode);
+ Value *CallOps[3] = { Ops[0], Ops[1], Pred };
+ Result = Builder.CreateCall(cmpss, CallOps, CallOps+3);
+ Result = Builder.CreateBitCast(Result, ResultType);
+ return true;
+ }
+ case IX86_BUILTIN_CMPEQPD:
+ case IX86_BUILTIN_CMPLTPD:
+ case IX86_BUILTIN_CMPLEPD:
+ case IX86_BUILTIN_CMPGTPD:
+ case IX86_BUILTIN_CMPGEPD:
+ case IX86_BUILTIN_CMPNEQPD:
+ case IX86_BUILTIN_CMPNLTPD:
+ case IX86_BUILTIN_CMPNLEPD:
+ case IX86_BUILTIN_CMPNGTPD:
+ case IX86_BUILTIN_CMPNGEPD:
+ case IX86_BUILTIN_CMPORDPD:
+ case IX86_BUILTIN_CMPUNORDPD: {
+ Function *cmppd =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse2_cmp_pd);
+ bool flip = false;
+ unsigned PredCode;
+ switch (FnCode) {
+ default: assert(0 && "Unknown fncode!");
+ case IX86_BUILTIN_CMPEQPD: PredCode = 0; break;
+ case IX86_BUILTIN_CMPLTPD: PredCode = 1; break;
+ case IX86_BUILTIN_CMPGTPD: PredCode = 1; flip = true; break;
+ case IX86_BUILTIN_CMPLEPD: PredCode = 2; break;
+ case IX86_BUILTIN_CMPGEPD: PredCode = 2; flip = true; break;
+ case IX86_BUILTIN_CMPUNORDPD: PredCode = 3; break;
+ case IX86_BUILTIN_CMPNEQPD: PredCode = 4; break;
+ case IX86_BUILTIN_CMPNLTPD: PredCode = 5; break;
+ case IX86_BUILTIN_CMPNGTPD: PredCode = 5; flip = true; break;
+ case IX86_BUILTIN_CMPNLEPD: PredCode = 6; break;
+ case IX86_BUILTIN_CMPNGEPD: PredCode = 6; flip = true; break;
+ case IX86_BUILTIN_CMPORDPD: PredCode = 7; break;
+ }
+ Value *Pred = ConstantInt::get(Type::getInt8Ty(Context), PredCode);
+ Value *Arg0 = Ops[0];
+ Value *Arg1 = Ops[1];
+ if (flip) std::swap(Arg0, Arg1);
+
+ Value *CallOps[3] = { Arg0, Arg1, Pred };
+ Result = Builder.CreateCall(cmppd, CallOps, CallOps+3);
+ Result = Builder.CreateBitCast(Result, ResultType);
+ return true;
+ }
+ case IX86_BUILTIN_CMPEQSD:
+ case IX86_BUILTIN_CMPLTSD:
+ case IX86_BUILTIN_CMPLESD:
+ case IX86_BUILTIN_CMPNEQSD:
+ case IX86_BUILTIN_CMPNLTSD:
+ case IX86_BUILTIN_CMPNLESD:
+ case IX86_BUILTIN_CMPORDSD:
+ case IX86_BUILTIN_CMPUNORDSD: {
+ Function *cmpsd =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse2_cmp_sd);
+ unsigned PredCode;
+ switch (FnCode) {
+ default: assert(0 && "Unknown fncode");
+ case IX86_BUILTIN_CMPEQSD: PredCode = 0; break;
+ case IX86_BUILTIN_CMPLTSD: PredCode = 1; break;
+ case IX86_BUILTIN_CMPLESD: PredCode = 2; break;
+ case IX86_BUILTIN_CMPUNORDSD: PredCode = 3; break;
+ case IX86_BUILTIN_CMPNEQSD: PredCode = 4; break;
+ case IX86_BUILTIN_CMPNLTSD: PredCode = 5; break;
+ case IX86_BUILTIN_CMPNLESD: PredCode = 6; break;
+ case IX86_BUILTIN_CMPORDSD: PredCode = 7; break;
+ }
+ Value *Pred = ConstantInt::get(Type::getInt8Ty(Context), PredCode);
+ Value *CallOps[3] = { Ops[0], Ops[1], Pred };
+ Result = Builder.CreateCall(cmpsd, CallOps, CallOps+3);
+ Result = Builder.CreateBitCast(Result, ResultType);
+ return true;
+ }
+ case IX86_BUILTIN_LDMXCSR: {
+ Function *ldmxcsr =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse_ldmxcsr);
+ Value *Ptr = CreateTemporary(Type::getInt32Ty(Context));
+ Builder.CreateStore(Ops[0], Ptr);
+ Ptr = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context));
+ Result = Builder.CreateCall(ldmxcsr, Ptr);
+ return true;
+ }
+ case IX86_BUILTIN_STMXCSR: {
+ Function *stmxcsr =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse_stmxcsr);
+ Value *Ptr = CreateTemporary(Type::getInt32Ty(Context));
+ Value *BPtr = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context));
+ Builder.CreateCall(stmxcsr, BPtr);
+
+ Result = Builder.CreateLoad(Ptr);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* These are defined in i386.c */
+#define MAX_CLASSES 4
+extern "C" enum machine_mode type_natural_mode(tree, CUMULATIVE_ARGS *);
+extern "C" int examine_argument(enum machine_mode, const_tree, int, int*, int*);
+extern "C" int classify_argument(enum machine_mode, const_tree,
+ enum x86_64_reg_class classes[MAX_CLASSES], int);
+
+/* Target hook for llvm-abi.h. It returns true if an aggregate of the
+ specified type should be passed in memory. This is only called for
+ x86-64. */
+static bool llvm_x86_64_should_pass_aggregate_in_memory(tree TreeType,
+ enum machine_mode Mode){
+ int IntRegs, SSERegs;
+ /* If examine_argument return 0, then it's passed byval in memory.*/
+ int ret = examine_argument(Mode, TreeType, 0, &IntRegs, &SSERegs);
+ if (ret==0)
+ return true;
+ if (ret==1 && IntRegs==0 && SSERegs==0) // zero-sized struct
+ return true;
+ return false;
+}
+
+/* Returns true if all elements of the type are integer types. */
+static bool llvm_x86_is_all_integer_types(const Type *Ty) {
+ for (Type::subtype_iterator I = Ty->subtype_begin(), E = Ty->subtype_end();
+ I != E; ++I) {
+ const Type *STy = I->get();
+ if (!STy->isIntOrIntVector() && !isa<PointerType>(STy))
+ return false;
+ }
+ return true;
+}
+
+/* Target hook for llvm-abi.h. It returns true if an aggregate of the
+ specified type should be passed in a number of registers of mixed types.
+ It also returns a vector of types that correspond to the registers used
+ for parameter passing. This is only called for x86-32. */
+bool
+llvm_x86_32_should_pass_aggregate_in_mixed_regs(tree TreeType, const Type *Ty,
+ std::vector<const Type*> &Elts){
+ // If this is a small fixed size type, investigate it.
+ HOST_WIDE_INT SrcSize = int_size_in_bytes(TreeType);
+ if (SrcSize <= 0 || SrcSize > 16)
+ return false;
+
+ // X86-32 passes aggregates on the stack. If this is an extremely simple
+ // aggregate whose elements would be passed the same if passed as scalars,
+ // pass them that way in order to promote SROA on the caller and callee side.
+ // Note that we can't support passing all structs this way. For example,
+ // {i16, i16} should be passed in on 32-bit unit, which is not how "i16, i16"
+ // would be passed as stand-alone arguments.
+ const StructType *STy = dyn_cast<StructType>(Ty);
+ if (!STy || STy->isPacked()) return false;
+
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ const Type *EltTy = STy->getElementType(i);
+ // 32 and 64-bit integers are fine, as are float and double. Long double
+ // (which can be picked as the type for a union of 16 bytes) is not fine,
+ // as loads and stores of it get only 10 bytes.
+ if (EltTy == Type::getInt32Ty(Context) ||
+ EltTy == Type::getInt64Ty(Context) ||
+ EltTy == Type::getFloatTy(Context) ||
+ EltTy == Type::getDoubleTy(Context) ||
+ isa<PointerType>(EltTy)) {
+ Elts.push_back(EltTy);
+ continue;
+ }
+
+ // TODO: Vectors are also ok to pass if they don't require extra alignment.
+ // TODO: We can also pass structs like {i8, i32}.
+
+ Elts.clear();
+ return false;
+ }
+
+ return true;
+}
+
+/* It returns true if an aggregate of the specified type should be passed as a
+ first class aggregate. */
+bool llvm_x86_should_pass_aggregate_as_fca(tree type, const Type *Ty) {
+ if (TREE_CODE(type) != COMPLEX_TYPE)
+ return false;
+ const StructType *STy = dyn_cast<StructType>(Ty);
+ if (!STy || STy->isPacked()) return false;
+
+ // FIXME: Currently codegen isn't lowering most _Complex types in a way that
+ // makes it ABI compatible for x86-64. Same for _Complex char and _Complex
+ // short in 32-bit.
+ const Type *EltTy = STy->getElementType(0);
+ return !((TARGET_64BIT && (EltTy->isInteger() ||
+ EltTy == Type::getFloatTy(Context) ||
+ EltTy == Type::getDoubleTy(Context))) ||
+ EltTy == Type::getInt16Ty(Context) ||
+ EltTy == Type::getInt8Ty(Context));
+}
+
+/* Target hook for llvm-abi.h. It returns true if an aggregate of the
+ specified type should be passed in memory. */
+bool llvm_x86_should_pass_aggregate_in_memory(tree TreeType, const Type *Ty) {
+ if (llvm_x86_should_pass_aggregate_as_fca(TreeType, Ty))
+ return false;
+
+ enum machine_mode Mode = type_natural_mode(TreeType, NULL);
+ HOST_WIDE_INT Bytes =
+ (Mode == BLKmode) ? int_size_in_bytes(TreeType) : (int) GET_MODE_SIZE(Mode);
+
+ // Zero sized array, struct, or class, not passed in memory.
+ if (Bytes == 0)
+ return false;
+
+ if (!TARGET_64BIT) {
+ std::vector<const Type*> Elts;
+ return !llvm_x86_32_should_pass_aggregate_in_mixed_regs(TreeType, Ty, Elts);
+ }
+ return llvm_x86_64_should_pass_aggregate_in_memory(TreeType, Mode);
+}
+
+/* count_num_registers_uses - Return the number of GPRs and XMMs parameter
+ register used so far. Caller is responsible for initializing outputs. */
+static void count_num_registers_uses(std::vector<const Type*> &ScalarElts,
+ unsigned &NumGPRs, unsigned &NumXMMs) {
+ for (unsigned i = 0, e = ScalarElts.size(); i != e; ++i) {
+ const Type *Ty = ScalarElts[i];
+ if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
+ if (!TARGET_MACHO)
+ continue;
+ if (VTy->getNumElements() == 1)
+ // v1i64 is passed in GPRs on Darwin.
+ ++NumGPRs;
+ else
+ // All other vector scalar values are passed in XMM registers.
+ ++NumXMMs;
+ } else if (Ty->isInteger() || isa<PointerType>(Ty)) {
+ ++NumGPRs;
+ } else if (Ty==Type::getVoidTy(Context)) {
+ // Padding bytes that are not passed anywhere
+ ;
+ } else {
+ // Floating point scalar argument.
+ assert(Ty->isFloatingPoint() && Ty->isPrimitiveType() &&
+ "Expecting a floating point primitive type!");
+ if (Ty->getTypeID() == Type::FloatTyID
+ || Ty->getTypeID() == Type::DoubleTyID)
+ ++NumXMMs;
+ }
+ }
+}
+
+/* Target hook for llvm-abi.h. This is called when an aggregate is being passed
+ in registers. If there are only enough available parameter registers to pass
+ part of the aggregate, return true. That means the aggregate should instead
+ be passed in memory. */
+bool
+llvm_x86_64_aggregate_partially_passed_in_regs(std::vector<const Type*> &Elts,
+ std::vector<const Type*> &ScalarElts,
+ bool isShadowReturn) {
+ // Counting number of GPRs and XMMs used so far. According to AMD64 ABI
+ // document: "If there are no registers available for any eightbyte of an
+ // argument, the whole argument is passed on the stack." X86-64 uses 6
+ // integer
+ // For example, if two GPRs are required but only one is available, then
+ // both parts will be in memory.
+ // FIXME: This is a temporary solution. To be removed when llvm has first
+ // class aggregate values.
+ unsigned NumGPRs = isShadowReturn ? 1 : 0;
+ unsigned NumXMMs = 0;
+ count_num_registers_uses(ScalarElts, NumGPRs, NumXMMs);
+
+ unsigned NumGPRsNeeded = 0;
+ unsigned NumXMMsNeeded = 0;
+ count_num_registers_uses(Elts, NumGPRsNeeded, NumXMMsNeeded);
+
+ bool GPRsSatisfied = true;
+ if (NumGPRsNeeded) {
+ if (NumGPRs < 6) {
+ if ((NumGPRs + NumGPRsNeeded) > 6)
+ // Only partially satisfied.
+ return true;
+ } else
+ GPRsSatisfied = false;
+ }
+
+ bool XMMsSatisfied = true;
+ if (NumXMMsNeeded) {
+ if (NumXMMs < 8) {
+ if ((NumXMMs + NumXMMsNeeded) > 8)
+ // Only partially satisfied.
+ return true;
+ } else
+ XMMsSatisfied = false;
+ }
+
+ return !GPRsSatisfied || !XMMsSatisfied;
+}
+
+/* Target hook for llvm-abi.h. It returns true if an aggregate of the
+ specified type should be passed in a number of registers of mixed types.
+ It also returns a vector of types that correspond to the registers used
+ for parameter passing. This is only called for x86-64. */
+bool
+llvm_x86_64_should_pass_aggregate_in_mixed_regs(tree TreeType, const Type *Ty,
+ std::vector<const Type*> &Elts){
+ if (llvm_x86_should_pass_aggregate_as_fca(TreeType, Ty))
+ return false;
+
+ enum x86_64_reg_class Class[MAX_CLASSES];
+ enum machine_mode Mode = type_natural_mode(TreeType, NULL);
+ bool totallyEmpty = true;
+ HOST_WIDE_INT Bytes =
+ (Mode == BLKmode) ? int_size_in_bytes(TreeType) : (int) GET_MODE_SIZE(Mode);
+ int NumClasses = classify_argument(Mode, TreeType, Class, 0);
+ if (!NumClasses)
+ return false;
+
+ if (NumClasses == 1 && Class[0] == X86_64_INTEGERSI_CLASS)
+ // This will fit in one i32 register.
+ return false;
+
+ for (int i = 0; i < NumClasses; ++i) {
+ switch (Class[i]) {
+ case X86_64_INTEGER_CLASS:
+ case X86_64_INTEGERSI_CLASS:
+ Elts.push_back(Type::getInt64Ty(Context));
+ totallyEmpty = false;
+ Bytes -= 8;
+ break;
+ case X86_64_SSE_CLASS:
+ totallyEmpty = false;
+ // If it's a SSE class argument, then one of the followings are possible:
+ // 1. 1 x SSE, size is 8: 1 x Double.
+ // 2. 1 x SSE, size is 4: 1 x Float.
+ // 3. 1 x SSE + 1 x SSEUP, size is 16: 1 x <4 x i32>, <4 x f32>,
+ // <2 x i64>, or <2 x f64>.
+ // 4. 1 x SSE + 1 x SSESF, size is 12: 1 x Double, 1 x Float.
+ // 5. 2 x SSE, size is 16: 2 x Double.
+ if ((NumClasses-i) == 1) {
+ if (Bytes == 8) {
+ Elts.push_back(Type::getDoubleTy(Context));
+ Bytes -= 8;
+ } else if (Bytes == 4) {
+ Elts.push_back (Type::getFloatTy(Context));
+ Bytes -= 4;
+ } else
+ assert(0 && "Not yet handled!");
+ } else if ((NumClasses-i) == 2) {
+ if (Class[i+1] == X86_64_SSEUP_CLASS) {
+ const Type *Ty = ConvertType(TreeType);
+ if (const StructType *STy = dyn_cast<StructType>(Ty))
+ // Look pass the struct wrapper.
+ if (STy->getNumElements() == 1)
+ Ty = STy->getElementType(0);
+ if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
+ if (VTy->getNumElements() == 2) {
+ if (VTy->getElementType()->isInteger()) {
+ Elts.push_back(VectorType::get(Type::getInt64Ty(Context), 2));
+ } else {
+ Elts.push_back(VectorType::get(Type::getDoubleTy(Context), 2));
+ }
+ Bytes -= 8;
+ } else {
+ assert(VTy->getNumElements() == 4);
+ if (VTy->getElementType()->isInteger()) {
+ Elts.push_back(VectorType::get(Type::getInt32Ty(Context), 4));
+ } else {
+ Elts.push_back(VectorType::get(Type::getFloatTy(Context), 4));
+ }
+ Bytes -= 4;
+ }
+ } else if (llvm_x86_is_all_integer_types(Ty)) {
+ Elts.push_back(VectorType::get(Type::getInt32Ty(Context), 4));
+ Bytes -= 4;
+ } else {
+ Elts.push_back(VectorType::get(Type::getFloatTy(Context), 4));
+ Bytes -= 4;
+ }
+ } else if (Class[i+1] == X86_64_SSESF_CLASS) {
+ assert(Bytes == 12 && "Not yet handled!");
+ Elts.push_back(Type::getDoubleTy(Context));
+ Elts.push_back(Type::getFloatTy(Context));
+ Bytes -= 12;
+ } else if (Class[i+1] == X86_64_SSE_CLASS) {
+ Elts.push_back(Type::getDoubleTy(Context));
+ Elts.push_back(Type::getDoubleTy(Context));
+ Bytes -= 16;
+ } else if (Class[i+1] == X86_64_SSEDF_CLASS && Bytes == 16) {
+ Elts.push_back(VectorType::get(Type::getFloatTy(Context), 2));
+ Elts.push_back(Type::getDoubleTy(Context));
+ } else if (Class[i+1] == X86_64_INTEGER_CLASS) {
+ Elts.push_back(VectorType::get(Type::getFloatTy(Context), 2));
+ Elts.push_back(Type::getInt64Ty(Context));
+ } else if (Class[i+1] == X86_64_NO_CLASS) {
+ // padding bytes, don't pass
+ Elts.push_back(Type::getDoubleTy(Context));
+ Elts.push_back(Type::getVoidTy(Context));
+ Bytes -= 16;
+ } else
+ assert(0 && "Not yet handled!");
+ ++i; // Already handled the next one.
+ } else
+ assert(0 && "Not yet handled!");
+ break;
+ case X86_64_SSESF_CLASS:
+ totallyEmpty = false;
+ Elts.push_back(Type::getFloatTy(Context));
+ Bytes -= 4;
+ break;
+ case X86_64_SSEDF_CLASS:
+ totallyEmpty = false;
+ Elts.push_back(Type::getDoubleTy(Context));
+ Bytes -= 8;
+ break;
+ case X86_64_X87_CLASS:
+ case X86_64_X87UP_CLASS:
+ case X86_64_COMPLEX_X87_CLASS:
+ return false;
+ case X86_64_NO_CLASS:
+ // Padding bytes that are not passed (unless the entire object consists
+ // of padding)
+ Elts.push_back(Type::getVoidTy(Context));
+ Bytes -= 8;
+ break;
+ default: assert(0 && "Unexpected register class!");
+ }
+ }
+
+ return !totallyEmpty;
+}
+
+/* On Darwin x86-32, vectors which are not MMX nor SSE should be passed as
+ integers. On Darwin x86-64, such vectors bigger than 128 bits should be
+ passed in memory (byval). */
+bool llvm_x86_should_pass_vector_in_integer_regs(tree type) {
+ if (!TARGET_MACHO)
+ return false;
+ if (TREE_CODE(type) == VECTOR_TYPE &&
+ TYPE_SIZE(type) &&
+ TREE_CODE(TYPE_SIZE(type))==INTEGER_CST) {
+ if (TREE_INT_CST_LOW(TYPE_SIZE(type))==64 && TARGET_MMX)
+ return false;
+ if (TREE_INT_CST_LOW(TYPE_SIZE(type))==128 && TARGET_SSE)
+ return false;
+ if (TARGET_64BIT && TREE_INT_CST_LOW(TYPE_SIZE(type)) > 128)
+ return false;
+ }
+ return true;
+}
+
+/* On Darwin x86-64, vectors which are bigger than 128 bits should be passed
+ byval (in memory). */
+bool llvm_x86_should_pass_vector_using_byval_attr(tree type) {
+ if (!TARGET_MACHO)
+ return false;
+ if (!TARGET_64BIT)
+ return false;
+ if (TREE_CODE(type) == VECTOR_TYPE &&
+ TYPE_SIZE(type) &&
+ TREE_CODE(TYPE_SIZE(type))==INTEGER_CST) {
+ if (TREE_INT_CST_LOW(TYPE_SIZE(type))<=128)
+ return false;
+ }
+ return true;
+}
+
+/* The MMX vector v1i64 is returned in EAX and EDX on Darwin. Communicate
+ this by returning i64 here. Likewise, (generic) vectors such as v2i16
+ are returned in EAX.
+ On Darwin x86-64, v1i64 is returned in RAX and other MMX vectors are
+ returned in XMM0. Judging from comments, this would not be right for
+ Win64. Don't know about Linux. */
+tree llvm_x86_should_return_vector_as_scalar(tree type, bool isBuiltin) {
+ if (TARGET_MACHO &&
+ !isBuiltin &&
+ TREE_CODE(type) == VECTOR_TYPE &&
+ TYPE_SIZE(type) &&
+ TREE_CODE(TYPE_SIZE(type))==INTEGER_CST) {
+ if (TREE_INT_CST_LOW(TYPE_SIZE(type))==64 &&
+ TYPE_VECTOR_SUBPARTS(type)==1)
+ return uint64_type_node;
+ if (TARGET_64BIT && TREE_INT_CST_LOW(TYPE_SIZE(type))==64)
+ return double_type_node;
+ if (TREE_INT_CST_LOW(TYPE_SIZE(type))==32)
+ return uint32_type_node;
+ }
+ return 0;
+}
+
+/* MMX vectors are returned in XMM0 on x86-64 Darwin. The easiest way to
+ communicate this is pretend they're doubles.
+ Judging from comments, this would not be right for Win64. Don't know
+ about Linux. */
+tree llvm_x86_should_return_selt_struct_as_scalar(tree type) {
+ tree retType = isSingleElementStructOrArray(type, true, false);
+ if (!retType || !TARGET_64BIT || !TARGET_MACHO)
+ return retType;
+ if (TREE_CODE(retType) == VECTOR_TYPE &&
+ TYPE_SIZE(retType) &&
+ TREE_CODE(TYPE_SIZE(retType))==INTEGER_CST &&
+ TREE_INT_CST_LOW(TYPE_SIZE(retType))==64)
+ return double_type_node;
+ return retType;
+}
+
+/* MMX vectors v2i32, v4i16, v8i8, v2f32 are returned using sret on Darwin
+ 32-bit. Vectors bigger than 128 are returned using sret. */
+bool llvm_x86_should_return_vector_as_shadow(tree type, bool isBuiltin) {
+ if (TARGET_MACHO &&
+ !isBuiltin &&
+ !TARGET_64BIT &&
+ TREE_CODE(type) == VECTOR_TYPE &&
+ TYPE_SIZE(type) &&
+ TREE_CODE(TYPE_SIZE(type))==INTEGER_CST) {
+ if (TREE_INT_CST_LOW(TYPE_SIZE(type))==64 &&
+ TYPE_VECTOR_SUBPARTS(type)>1)
+ return true;
+ }
+ if (TREE_INT_CST_LOW(TYPE_SIZE(type))>128)
+ return true;
+ return false;
+}
+
+// llvm_x86_should_not_return_complex_in_memory - Return true if TYPE
+// should be returned using multiple value return instruction.
+bool llvm_x86_should_not_return_complex_in_memory(tree type) {
+
+ if (!TARGET_64BIT)
+ return false;
+
+ if (TREE_CODE(type) == COMPLEX_TYPE &&
+ TREE_INT_CST_LOW(TYPE_SIZE_UNIT(type)) == 32)
+ return true;
+
+ return false;
+}
+
+// llvm_suitable_multiple_ret_value_type - Return TRUE if return value
+// of type TY should be returned using multiple value return instruction.
+static bool llvm_suitable_multiple_ret_value_type(const Type *Ty,
+ tree TreeType) {
+
+ if (!TARGET_64BIT)
+ return false;
+
+ const StructType *STy = dyn_cast<StructType>(Ty);
+ if (!STy)
+ return false;
+
+ if (llvm_x86_should_not_return_complex_in_memory(TreeType))
+ return true;
+
+ // Let gcc specific routine answer the question.
+ enum x86_64_reg_class Class[MAX_CLASSES];
+ enum machine_mode Mode = type_natural_mode(TreeType, NULL);
+ int NumClasses = classify_argument(Mode, TreeType, Class, 0);
+ if (NumClasses == 0)
+ return false;
+
+ if (NumClasses == 1 &&
+ (Class[0] == X86_64_INTEGERSI_CLASS || Class[0] == X86_64_INTEGER_CLASS))
+ // This will fit in one i64 register.
+ return false;
+
+ if (NumClasses == 2 &&
+ (Class[0] == X86_64_NO_CLASS || Class[1] == X86_64_NO_CLASS))
+ // One word is padding which is not passed at all; treat this as returning
+ // the scalar type of the other word.
+ return false;
+
+ // Otherwise, use of multiple value return is OK.
+ return true;
+}
+
+// llvm_x86_scalar_type_for_struct_return - Return LLVM type if TYPE
+// can be returned as a scalar, otherwise return NULL.
+const Type *llvm_x86_scalar_type_for_struct_return(tree type, unsigned *Offset) {
+ *Offset = 0;
+ const Type *Ty = ConvertType(type);
+ unsigned Size = getTargetData().getTypeAllocSize(Ty);
+ if (Size == 0)
+ return Type::getVoidTy(Context);
+ else if (Size == 1)
+ return Type::getInt8Ty(Context);
+ else if (Size == 2)
+ return Type::getInt16Ty(Context);
+ else if (Size <= 4)
+ return Type::getInt32Ty(Context);
+
+ // Check if Ty should be returned using multiple value return instruction.
+ if (llvm_suitable_multiple_ret_value_type(Ty, type))
+ return NULL;
+
+ if (TARGET_64BIT) {
+ // This logic relies on llvm_suitable_multiple_ret_value_type to have
+ // removed anything not expected here.
+ enum x86_64_reg_class Class[MAX_CLASSES];
+ enum machine_mode Mode = type_natural_mode(type, NULL);
+ int NumClasses = classify_argument(Mode, type, Class, 0);
+ if (NumClasses == 0)
+ return Type::getInt64Ty(Context);
+
+ if (NumClasses == 1) {
+ if (Class[0] == X86_64_INTEGERSI_CLASS ||
+ Class[0] == X86_64_INTEGER_CLASS) {
+ // one int register
+ HOST_WIDE_INT Bytes =
+ (Mode == BLKmode) ? int_size_in_bytes(type) :
+ (int) GET_MODE_SIZE(Mode);
+ if (Bytes>4)
+ return Type::getInt64Ty(Context);
+ else if (Bytes>2)
+ return Type::getInt32Ty(Context);
+ else if (Bytes>1)
+ return Type::getInt16Ty(Context);
+ else
+ return Type::getInt8Ty(Context);
+ }
+ assert(0 && "Unexpected type!");
+ }
+ if (NumClasses == 2) {
+ if (Class[1] == X86_64_NO_CLASS) {
+ if (Class[0] == X86_64_INTEGER_CLASS ||
+ Class[0] == X86_64_NO_CLASS ||
+ Class[0] == X86_64_INTEGERSI_CLASS)
+ return Type::getInt64Ty(Context);
+ else if (Class[0] == X86_64_SSE_CLASS || Class[0] == X86_64_SSEDF_CLASS)
+ return Type::getDoubleTy(Context);
+ else if (Class[0] == X86_64_SSESF_CLASS)
+ return Type::getFloatTy(Context);
+ assert(0 && "Unexpected type!");
+ }
+ if (Class[0] == X86_64_NO_CLASS) {
+ *Offset = 8;
+ if (Class[1] == X86_64_INTEGERSI_CLASS ||
+ Class[1] == X86_64_INTEGER_CLASS)
+ return Type::getInt64Ty(Context);
+ else if (Class[1] == X86_64_SSE_CLASS || Class[1] == X86_64_SSEDF_CLASS)
+ return Type::getDoubleTy(Context);
+ else if (Class[1] == X86_64_SSESF_CLASS)
+ return Type::getFloatTy(Context);
+ assert(0 && "Unexpected type!");
+ }
+ assert(0 && "Unexpected type!");
+ }
+ assert(0 && "Unexpected type!");
+ } else {
+ if (Size <= 8)
+ return Type::getInt64Ty(Context);
+ else if (Size <= 16)
+ return IntegerType::get(Context, 128);
+ else if (Size <= 32)
+ return IntegerType::get(Context, 256);
+ }
+ return NULL;
+}
+
+/// llvm_x86_64_get_multiple_return_reg_classes - Find register classes used
+/// to return Ty. It is expected that Ty requires multiple return values.
+/// This routine uses GCC implementation to find required register classes.
+/// The original implementation of this routine is based on
+/// llvm_x86_64_should_pass_aggregate_in_mixed_regs code.
+void
+llvm_x86_64_get_multiple_return_reg_classes(tree TreeType, const Type *Ty,
+ std::vector<const Type*> &Elts){
+ enum x86_64_reg_class Class[MAX_CLASSES];
+ enum machine_mode Mode = type_natural_mode(TreeType, NULL);
+ HOST_WIDE_INT Bytes =
+ (Mode == BLKmode) ? int_size_in_bytes(TreeType) : (int) GET_MODE_SIZE(Mode);
+ int NumClasses = classify_argument(Mode, TreeType, Class, 0);
+ if (!NumClasses)
+ assert(0 && "This type does not need multiple return registers!");
+
+ if (NumClasses == 1 && Class[0] == X86_64_INTEGERSI_CLASS)
+ // This will fit in one i32 register.
+ assert(0 && "This type does not need multiple return registers!");
+
+ if (NumClasses == 1 && Class[0] == X86_64_INTEGER_CLASS)
+ assert(0 && "This type does not need multiple return registers!");
+
+ // classify_argument uses a single X86_64_NO_CLASS as a special case for
+ // empty structs. Recognize it and don't add any return values in that
+ // case.
+ if (NumClasses == 1 && Class[0] == X86_64_NO_CLASS)
+ return;
+
+ for (int i = 0; i < NumClasses; ++i) {
+ switch (Class[i]) {
+ case X86_64_INTEGER_CLASS:
+ case X86_64_INTEGERSI_CLASS:
+ Elts.push_back(Type::getInt64Ty(Context));
+ Bytes -= 8;
+ break;
+ case X86_64_SSE_CLASS:
+ // If it's a SSE class argument, then one of the followings are possible:
+ // 1. 1 x SSE, size is 8: 1 x Double.
+ // 2. 1 x SSE, size is 4: 1 x Float.
+ // 3. 1 x SSE + 1 x SSEUP, size is 16: 1 x <4 x i32>, <4 x f32>,
+ // <2 x i64>, or <2 x f64>.
+ // 4. 1 x SSE + 1 x SSESF, size is 12: 1 x Double, 1 x Float.
+ // 5. 2 x SSE, size is 16: 2 x Double.
+ // 6. 1 x SSE, 1 x NO: Second is padding, pass as double.
+ if ((NumClasses-i) == 1) {
+ if (Bytes == 8) {
+ Elts.push_back(Type::getDoubleTy(Context));
+ Bytes -= 8;
+ } else if (Bytes == 4) {
+ Elts.push_back(Type::getFloatTy(Context));
+ Bytes -= 4;
+ } else
+ assert(0 && "Not yet handled!");
+ } else if ((NumClasses-i) == 2) {
+ if (Class[i+1] == X86_64_SSEUP_CLASS) {
+ const Type *Ty = ConvertType(TreeType);
+ if (const StructType *STy = dyn_cast<StructType>(Ty))
+ // Look pass the struct wrapper.
+ if (STy->getNumElements() == 1)
+ Ty = STy->getElementType(0);
+ if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
+ if (VTy->getNumElements() == 2) {
+ if (VTy->getElementType()->isInteger())
+ Elts.push_back(VectorType::get(Type::getInt64Ty(Context), 2));
+ else
+ Elts.push_back(VectorType::get(Type::getDoubleTy(Context), 2));
+ Bytes -= 8;
+ } else {
+ assert(VTy->getNumElements() == 4);
+ if (VTy->getElementType()->isInteger())
+ Elts.push_back(VectorType::get(Type::getInt32Ty(Context), 4));
+ else
+ Elts.push_back(VectorType::get(Type::getFloatTy(Context), 4));
+ Bytes -= 4;
+ }
+ } else if (llvm_x86_is_all_integer_types(Ty)) {
+ Elts.push_back(VectorType::get(Type::getInt32Ty(Context), 4));
+ Bytes -= 4;
+ } else {
+ Elts.push_back(VectorType::get(Type::getFloatTy(Context), 4));
+ Bytes -= 4;
+ }
+ } else if (Class[i+1] == X86_64_SSESF_CLASS) {
+ assert(Bytes == 12 && "Not yet handled!");
+ Elts.push_back(Type::getDoubleTy(Context));
+ Elts.push_back(Type::getFloatTy(Context));
+ Bytes -= 12;
+ } else if (Class[i+1] == X86_64_SSE_CLASS) {
+ Elts.push_back(Type::getDoubleTy(Context));
+ Elts.push_back(Type::getDoubleTy(Context));
+ Bytes -= 16;
+ } else if (Class[i+1] == X86_64_SSEDF_CLASS && Bytes == 16) {
+ Elts.push_back(VectorType::get(Type::getFloatTy(Context), 2));
+ Elts.push_back(Type::getDoubleTy(Context));
+ } else if (Class[i+1] == X86_64_INTEGER_CLASS) {
+ Elts.push_back(VectorType::get(Type::getFloatTy(Context), 2));
+ Elts.push_back(Type::getInt64Ty(Context));
+ } else if (Class[i+1] == X86_64_NO_CLASS) {
+ Elts.push_back(Type::getDoubleTy(Context));
+ Bytes -= 16;
+ } else {
+ assert(0 && "Not yet handled!");
+ }
+ ++i; // Already handled the next one.
+ } else
+ assert(0 && "Not yet handled!");
+ break;
+ case X86_64_SSESF_CLASS:
+ Elts.push_back(Type::getFloatTy(Context));
+ Bytes -= 4;
+ break;
+ case X86_64_SSEDF_CLASS:
+ Elts.push_back(Type::getDoubleTy(Context));
+ Bytes -= 8;
+ break;
+ case X86_64_X87_CLASS:
+ case X86_64_X87UP_CLASS:
+ case X86_64_COMPLEX_X87_CLASS:
+ Elts.push_back(Type::getX86_FP80Ty(Context));
+ break;
+ case X86_64_NO_CLASS:
+ // padding bytes.
+ Elts.push_back(Type::getInt64Ty(Context));
+ break;
+ default: assert(0 && "Unexpected register class!");
+ }
+ }
+}
+
+// Return LLVM Type if TYPE can be returned as an aggregate,
+// otherwise return NULL.
+const Type *llvm_x86_aggr_type_for_struct_return(tree type) {
+ const Type *Ty = ConvertType(type);
+ if (!llvm_suitable_multiple_ret_value_type(Ty, type))
+ return NULL;
+
+ const StructType *STy = cast<StructType>(Ty);
+ std::vector<const Type *> ElementTypes;
+
+ // Special handling for _Complex.
+ if (llvm_x86_should_not_return_complex_in_memory(type)) {
+ ElementTypes.push_back(Type::getX86_FP80Ty(Context));
+ ElementTypes.push_back(Type::getX86_FP80Ty(Context));
+ return StructType::get(Context, ElementTypes, STy->isPacked());
+ }
+
+ std::vector<const Type*> GCCElts;
+ llvm_x86_64_get_multiple_return_reg_classes(type, Ty, GCCElts);
+ return StructType::get(Context, GCCElts, false);
+}
+
+// llvm_x86_extract_mrv_array_element - Helper function that help extract
+// an array element from multiple return value.
+//
+// Here, SRC is returning multiple values. DEST's DESTFIELNO field is an array.
+// Extract SRCFIELDNO's ELEMENO value and store it in DEST's FIELDNO field's
+// ELEMENTNO.
+//
+static void llvm_x86_extract_mrv_array_element(Value *Src, Value *Dest,
+ unsigned SrcFieldNo,
+ unsigned SrcElemNo,
+ unsigned DestFieldNo,
+ unsigned DestElemNo,
+ LLVMBuilder &Builder,
+ bool isVolatile) {
+ Value *EVI = Builder.CreateExtractValue(Src, SrcFieldNo, "mrv_gr");
+ const StructType *STy = cast<StructType>(Src->getType());
+ llvm::Value *Idxs[3];
+ Idxs[0] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
+ Idxs[1] = ConstantInt::get(llvm::Type::getInt32Ty(Context), DestFieldNo);
+ Idxs[2] = ConstantInt::get(llvm::Type::getInt32Ty(Context), DestElemNo);
+ Value *GEP = Builder.CreateGEP(Dest, Idxs, Idxs+3, "mrv_gep");
+ if (isa<VectorType>(STy->getElementType(SrcFieldNo))) {
+ Value *ElemIndex = ConstantInt::get(Type::getInt32Ty(Context), SrcElemNo);
+ Value *EVIElem = Builder.CreateExtractElement(EVI, ElemIndex, "mrv");
+ Builder.CreateStore(EVIElem, GEP, isVolatile);
+ } else {
+ Builder.CreateStore(EVI, GEP, isVolatile);
+ }
+}
+
+// llvm_x86_extract_multiple_return_value - Extract multiple values returned
+// by SRC and store them in DEST. It is expected thaty SRC and
+// DEST types are StructType, but they may not match.
+void llvm_x86_extract_multiple_return_value(Value *Src, Value *Dest,
+ bool isVolatile,
+ LLVMBuilder &Builder) {
+
+ const StructType *STy = cast<StructType>(Src->getType());
+ unsigned NumElements = STy->getNumElements();
+
+ const PointerType *PTy = cast<PointerType>(Dest->getType());
+ const StructType *DestTy = cast<StructType>(PTy->getElementType());
+
+ unsigned SNO = 0;
+ unsigned DNO = 0;
+
+ if (DestTy->getNumElements() == 3
+ && DestTy->getElementType(0)->getTypeID() == Type::FloatTyID
+ && DestTy->getElementType(1)->getTypeID() == Type::FloatTyID
+ && DestTy->getElementType(2)->getTypeID() == Type::FloatTyID) {
+ // DestTy is { float, float, float }
+ // STy is { <4 x float>, float > }
+
+ Value *EVI = Builder.CreateExtractValue(Src, 0, "mrv_gr");
+
+ Value *E0Index = ConstantInt::get(Type::getInt32Ty(Context), 0);
+ Value *EVI0 = Builder.CreateExtractElement(EVI, E0Index, "mrv.v");
+ Value *GEP0 = Builder.CreateStructGEP(Dest, 0, "mrv_gep");
+ Builder.CreateStore(EVI0, GEP0, isVolatile);
+
+ Value *E1Index = ConstantInt::get(Type::getInt32Ty(Context), 1);
+ Value *EVI1 = Builder.CreateExtractElement(EVI, E1Index, "mrv.v");
+ Value *GEP1 = Builder.CreateStructGEP(Dest, 1, "mrv_gep");
+ Builder.CreateStore(EVI1, GEP1, isVolatile);
+
+ Value *GEP2 = Builder.CreateStructGEP(Dest, 2, "mrv_gep");
+ Value *EVI2 = Builder.CreateExtractValue(Src, 1, "mrv_gr");
+ Builder.CreateStore(EVI2, GEP2, isVolatile);
+ return;
+ }
+
+ while (SNO < NumElements) {
+
+ const Type *DestElemType = DestTy->getElementType(DNO);
+
+ // Directly access first class values using getresult.
+ if (DestElemType->isSingleValueType()) {
+ Value *GEP = Builder.CreateStructGEP(Dest, DNO, "mrv_gep");
+ Value *EVI = Builder.CreateExtractValue(Src, SNO, "mrv_gr");
+ Builder.CreateStore(EVI, GEP, isVolatile);
+ ++DNO; ++SNO;
+ continue;
+ }
+
+ // Special treatement for _Complex.
+ if (isa<StructType>(DestElemType)) {
+ llvm::Value *Idxs[3];
+ Idxs[0] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
+ Idxs[1] = ConstantInt::get(llvm::Type::getInt32Ty(Context), DNO);
+
+ Idxs[2] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
+ Value *GEP = Builder.CreateGEP(Dest, Idxs, Idxs+3, "mrv_gep");
+ Value *EVI = Builder.CreateExtractValue(Src, 0, "mrv_gr");
+ Builder.CreateStore(EVI, GEP, isVolatile);
+ ++SNO;
+
+ Idxs[2] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 1);
+ GEP = Builder.CreateGEP(Dest, Idxs, Idxs+3, "mrv_gep");
+ EVI = Builder.CreateExtractValue(Src, 1, "mrv_gr");
+ Builder.CreateStore(EVI, GEP, isVolatile);
+ ++DNO; ++SNO;
+ continue;
+ }
+
+ // Access array elements individually. Note, Src and Dest type may
+ // not match. For example { <2 x float>, float } and { float[3]; }
+ const ArrayType *ATy = cast<ArrayType>(DestElemType);
+ unsigned ArraySize = ATy->getNumElements();
+ unsigned DElemNo = 0; // DestTy's DNO field's element number
+ while (DElemNo < ArraySize) {
+ unsigned i = 0;
+ unsigned Size = 1;
+
+ if (const VectorType *SElemTy =
+ dyn_cast<VectorType>(STy->getElementType(SNO))) {
+ Size = SElemTy->getNumElements();
+ if (SElemTy->getElementType()->getTypeID() == Type::FloatTyID
+ && Size == 4)
+ // Ignore last two <4 x float> elements.
+ Size = 2;
+ }
+ while (i < Size) {
+ llvm_x86_extract_mrv_array_element(Src, Dest, SNO, i++,
+ DNO, DElemNo++,
+ Builder, isVolatile);
+ }
+ // Consumed this src field. Try next one.
+ ++SNO;
+ }
+ // Finished building current dest field.
+ ++DNO;
+ }
+}
+
+/// llvm_x86_should_pass_aggregate_in_integer_regs - x86-32 is same as the
+/// default. x86-64 detects the case where a type is 16 bytes long but
+/// only 8 of them are passed, the rest being padding (*size is set to 8
+/// to identify this case). It also pads out the size to that of a full
+/// register. This means we'll be loading bytes off the end of the object
+/// in some cases. That's what gcc does, so it must be OK, right? Right?
+bool llvm_x86_should_pass_aggregate_in_integer_regs(tree type, unsigned *size,
+ bool *DontCheckAlignment) {
+ *size = 0;
+ if (TARGET_64BIT) {
+ enum x86_64_reg_class Class[MAX_CLASSES];
+ enum machine_mode Mode = type_natural_mode(type, NULL);
+ int NumClasses = classify_argument(Mode, type, Class, 0);
+ *DontCheckAlignment= true;
+ if (NumClasses == 1 && (Class[0] == X86_64_INTEGER_CLASS ||
+ Class[0] == X86_64_INTEGERSI_CLASS)) {
+ // one int register
+ HOST_WIDE_INT Bytes =
+ (Mode == BLKmode) ? int_size_in_bytes(type) : (int) GET_MODE_SIZE(Mode);
+ if (Bytes>4)
+ *size = 8;
+ else if (Bytes>2)
+ *size = 4;
+ else
+ *size = Bytes;
+ return true;
+ }
+ if (NumClasses == 2 && (Class[0] == X86_64_INTEGERSI_CLASS ||
+ Class[0] == X86_64_INTEGER_CLASS)) {
+ if (Class[1] == X86_64_INTEGER_CLASS) {
+ // 16 byte object, 2 int registers
+ *size = 16;
+ return true;
+ }
+ // IntegerSI can occur only as element 0.
+ if (Class[1] == X86_64_NO_CLASS) {
+ // 16 byte object, only 1st register has information
+ *size = 8;
+ return true;
+ }
+ }
+ return false;
+ }
+ else
+ return !isSingleElementStructOrArray(type, false, true);
+}
diff --git a/dragonegg/x86/llvm-target.h b/dragonegg/x86/llvm-target.h
new file mode 100644
index 00000000000..bd5ce7bc969
--- /dev/null
+++ b/dragonegg/x86/llvm-target.h
@@ -0,0 +1,971 @@
+/* Some target-specific hooks for gcc->llvm conversion
+Copyright (C) 2007 Free Software Foundation, Inc.
+Contributed by Anton Korobeynikov (asl@math.spbu.ru)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
+
+#ifndef LLVM_TARGET_H
+#define LLVM_TARGET_H
+
+/* LLVM specific stuff for supporting calling convention output */
+#define TARGET_ADJUST_LLVM_CC(CC, type) \
+ { \
+ tree type_attributes = TYPE_ATTRIBUTES (type); \
+ if (lookup_attribute ("stdcall", type_attributes)) { \
+ CC = CallingConv::X86_StdCall; \
+ } else if (lookup_attribute("fastcall", type_attributes)) { \
+ CC = CallingConv::X86_FastCall; \
+ } \
+ }
+
+#define TARGET_ADJUST_LLVM_RETATTR(Rattributes, type) \
+ { \
+ tree type_attributes = TYPE_ATTRIBUTES (type); \
+ if (!TARGET_64BIT && (TARGET_SSEREGPARM || \
+ lookup_attribute("sseregparm", type_attributes)))\
+ RAttributes |= Attribute::InReg; \
+ }
+
+/* LLVM specific stuff for converting gcc's `regparm` attribute to LLVM's
+ `inreg` parameter attribute */
+#define LLVM_TARGET_ENABLE_REGPARM
+
+extern "C" int ix86_regparm;
+
+#define LLVM_TARGET_INIT_REGPARM(local_regparm, local_fp_regparm, type) \
+ { \
+ tree attr; \
+ local_regparm = ix86_regparm; \
+ local_fp_regparm = TARGET_SSEREGPARM ? 3 : 0; \
+ attr = lookup_attribute ("regparm", \
+ TYPE_ATTRIBUTES (type)); \
+ if (attr) { \
+ local_regparm = TREE_INT_CST_LOW (TREE_VALUE \
+ (TREE_VALUE (attr))); \
+ } \
+ attr = lookup_attribute("sseregparm", \
+ TYPE_ATTRIBUTES (type)); \
+ if (attr) \
+ local_fp_regparm = 3; \
+ }
+
+#define LLVM_ADJUST_REGPARM_ATTRIBUTE(PAttribute, Type, Size, \
+ local_regparm, \
+ local_fp_regparm) \
+ { \
+ if (!TARGET_64BIT) { \
+ if (TREE_CODE(Type) == REAL_TYPE && \
+ (TYPE_PRECISION(Type)==32 || \
+ TYPE_PRECISION(Type)==64)) { \
+ local_fp_regparm -= 1; \
+ if (local_fp_regparm >= 0) \
+ PAttribute |= Attribute::InReg; \
+ else \
+ local_fp_regparm = 0; \
+ } else if (INTEGRAL_TYPE_P(Type) || \
+ POINTER_TYPE_P(Type)) { \
+ int words = \
+ (Size + BITS_PER_WORD - 1) / BITS_PER_WORD; \
+ local_regparm -= words; \
+ if (local_regparm>=0) \
+ PAttribute |= Attribute::InReg; \
+ else \
+ local_regparm = 0; \
+ } \
+ } \
+ }
+
+#define LLVM_SET_RED_ZONE_FLAG(disable_red_zone) \
+ if (TARGET_64BIT && TARGET_NO_RED_ZONE) \
+ disable_red_zone = 1;
+
+#ifdef LLVM_ABI_H
+
+/* On x86-32 objects containing SSE vectors are 16 byte aligned, everything
+ else 4. On x86-64 vectors are 8-byte aligned, everything else can
+ be figured out by the back end. */
+extern "C" bool contains_aligned_value_p(tree);
+#define LLVM_BYVAL_ALIGNMENT(T) \
+ (TARGET_64BIT ? (TREE_CODE(T)==VECTOR_TYPE ? 8 : 0) : \
+ TARGET_SSE && contains_aligned_value_p(T) ? 16 : 4)
+
+extern tree llvm_x86_should_return_selt_struct_as_scalar(tree);
+
+/* Structs containing a single data field plus zero-length fields are
+ considered as if they were the type of the data field. On x86-64,
+ if the element type is an MMX vector, return it as double (which will
+ get it into XMM0). */
+
+#define LLVM_SHOULD_RETURN_SELT_STRUCT_AS_SCALAR(X) \
+ llvm_x86_should_return_selt_struct_as_scalar((X))
+
+extern bool llvm_x86_should_pass_aggregate_in_integer_regs(tree,
+ unsigned*, bool*);
+
+/* LLVM_SHOULD_PASS_AGGREGATE_IN_INTEGER_REGS - Return true if this aggregate
+ value should be passed in integer registers. This differs from the usual
+ handling in that x86-64 passes 128-bit structs and unions which only
+ contain data in the first 64 bits, as 64-bit objects. (These can be
+ created by abusing __attribute__((aligned)). */
+#define LLVM_SHOULD_PASS_AGGREGATE_IN_INTEGER_REGS(X, Y, Z) \
+ llvm_x86_should_pass_aggregate_in_integer_regs((X), (Y), (Z))
+
+extern const Type *llvm_x86_scalar_type_for_struct_return(tree type,
+ unsigned *Offset);
+
+/* LLVM_SCALAR_TYPE_FOR_STRUCT_RETURN - Return LLVM Type if X can be
+ returned as a scalar, otherwise return NULL. */
+#define LLVM_SCALAR_TYPE_FOR_STRUCT_RETURN(X, Y) \
+ llvm_x86_scalar_type_for_struct_return((X), (Y))
+
+extern const Type *llvm_x86_aggr_type_for_struct_return(tree type);
+
+/* LLVM_AGGR_TYPE_FOR_STRUCT_RETURN - Return LLVM Type if X can be
+ returned as an aggregate, otherwise return NULL. */
+#define LLVM_AGGR_TYPE_FOR_STRUCT_RETURN(X) \
+ llvm_x86_aggr_type_for_struct_return(X)
+
+extern void llvm_x86_extract_multiple_return_value(Value *Src, Value *Dest,
+ bool isVolatile,
+ LLVMBuilder &B);
+
+/* LLVM_EXTRACT_MULTIPLE_RETURN_VALUE - Extract multiple return value from
+ SRC and assign it to DEST. */
+#define LLVM_EXTRACT_MULTIPLE_RETURN_VALUE(Src,Dest,V,B) \
+ llvm_x86_extract_multiple_return_value((Src),(Dest),(V),(B))
+
+extern bool llvm_x86_should_pass_vector_using_byval_attr(tree);
+
+/* On x86-64, vectors which are not MMX nor SSE should be passed byval. */
+#define LLVM_SHOULD_PASS_VECTOR_USING_BYVAL_ATTR(X) \
+ llvm_x86_should_pass_vector_using_byval_attr((X))
+
+extern bool llvm_x86_should_pass_vector_in_integer_regs(tree);
+
+/* On x86-32, vectors which are not MMX nor SSE should be passed as integers. */
+#define LLVM_SHOULD_PASS_VECTOR_IN_INTEGER_REGS(X) \
+ llvm_x86_should_pass_vector_in_integer_regs((X))
+
+extern tree llvm_x86_should_return_vector_as_scalar(tree, bool);
+
+/* The MMX vector v1i64 is returned in EAX and EDX on Darwin. Communicate
+ this by returning i64 here. Likewise, (generic) vectors such as v2i16
+ are returned in EAX.
+ On Darwin x86-64, MMX vectors are returned in XMM0. Communicate this by
+ returning f64. */
+#define LLVM_SHOULD_RETURN_VECTOR_AS_SCALAR(X,isBuiltin)\
+ llvm_x86_should_return_vector_as_scalar((X), (isBuiltin))
+
+extern bool llvm_x86_should_return_vector_as_shadow(tree, bool);
+
+/* MMX vectors v2i32, v4i16, v8i8, v2f32 are returned using sret on Darwin
+ 32-bit. Vectors bigger than 128 are returned using sret. */
+#define LLVM_SHOULD_RETURN_VECTOR_AS_SHADOW(X,isBuiltin)\
+ llvm_x86_should_return_vector_as_shadow((X),(isBuiltin))
+
+extern bool
+llvm_x86_should_not_return_complex_in_memory(tree type);
+
+/* LLVM_SHOULD_NOT_RETURN_COMPLEX_IN_MEMORY - A hook to allow
+ special _Complex handling. Return true if X should be returned using
+ multiple value return instruction. */
+#define LLVM_SHOULD_NOT_RETURN_COMPLEX_IN_MEMORY(X) \
+ llvm_x86_should_not_return_complex_in_memory((X))
+
+extern bool
+llvm_x86_should_pass_aggregate_as_fca(tree type, const Type *);
+
+/* LLVM_SHOULD_PASS_AGGREGATE_AS_FCA - Return true if an aggregate of the
+ specified type should be passed as a first-class aggregate. */
+#ifndef LLVM_SHOULD_PASS_AGGREGATE_AS_FCA
+#define LLVM_SHOULD_PASS_AGGREGATE_AS_FCA(X, TY) \
+ llvm_x86_should_pass_aggregate_as_fca(X, TY)
+#endif
+
+extern bool llvm_x86_should_pass_aggregate_in_memory(tree, const Type *);
+
+#define LLVM_SHOULD_PASS_AGGREGATE_USING_BYVAL_ATTR(X, TY) \
+ llvm_x86_should_pass_aggregate_in_memory(X, TY)
+
+
+extern bool
+llvm_x86_64_should_pass_aggregate_in_mixed_regs(tree, const Type *Ty,
+ std::vector<const Type*>&);
+extern bool
+llvm_x86_32_should_pass_aggregate_in_mixed_regs(tree, const Type *Ty,
+ std::vector<const Type*>&);
+
+#define LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(T, TY, CC, E) \
+ (TARGET_64BIT ? \
+ llvm_x86_64_should_pass_aggregate_in_mixed_regs((T), (TY), (E)) : \
+ llvm_x86_32_should_pass_aggregate_in_mixed_regs((T), (TY), (E)))
+
+extern
+bool llvm_x86_64_aggregate_partially_passed_in_regs(std::vector<const Type*>&,
+ std::vector<const Type*>&,
+ bool);
+
+#define LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS(E, SE, ISR, CC) \
+ (TARGET_64BIT ? \
+ llvm_x86_64_aggregate_partially_passed_in_regs((E), (SE), (ISR)) : \
+ false)
+
+#endif /* LLVM_ABI_H */
+
+/* Register class used for passing given 64bit part of the argument.
+ These represent classes as documented by the PS ABI, with the exception
+ of SSESF, SSEDF classes, that are basically SSE class, just gcc will
+ use SF or DFmode move instead of DImode to avoid reformatting penalties.
+
+ Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
+ whenever possible (upper half does contain padding).
+ */
+enum x86_64_reg_class
+ {
+ X86_64_NO_CLASS,
+ X86_64_INTEGER_CLASS,
+ X86_64_INTEGERSI_CLASS,
+ X86_64_SSE_CLASS,
+ X86_64_SSESF_CLASS,
+ X86_64_SSEDF_CLASS,
+ X86_64_SSEUP_CLASS,
+ X86_64_X87_CLASS,
+ X86_64_X87UP_CLASS,
+ X86_64_COMPLEX_X87_CLASS,
+ X86_64_MEMORY_CLASS
+ };
+
+/* Codes for all the SSE/MMX builtins. */
+enum ix86_builtins
+{
+ IX86_BUILTIN_ADDPS,
+ IX86_BUILTIN_ADDSS,
+ IX86_BUILTIN_DIVPS,
+ IX86_BUILTIN_DIVSS,
+ IX86_BUILTIN_MULPS,
+ IX86_BUILTIN_MULSS,
+ IX86_BUILTIN_SUBPS,
+ IX86_BUILTIN_SUBSS,
+
+ IX86_BUILTIN_CMPEQPS,
+ IX86_BUILTIN_CMPLTPS,
+ IX86_BUILTIN_CMPLEPS,
+ IX86_BUILTIN_CMPGTPS,
+ IX86_BUILTIN_CMPGEPS,
+ IX86_BUILTIN_CMPNEQPS,
+ IX86_BUILTIN_CMPNLTPS,
+ IX86_BUILTIN_CMPNLEPS,
+ IX86_BUILTIN_CMPNGTPS,
+ IX86_BUILTIN_CMPNGEPS,
+ IX86_BUILTIN_CMPORDPS,
+ IX86_BUILTIN_CMPUNORDPS,
+ IX86_BUILTIN_CMPNEPS,
+ IX86_BUILTIN_CMPEQSS,
+ IX86_BUILTIN_CMPLTSS,
+ IX86_BUILTIN_CMPLESS,
+ IX86_BUILTIN_CMPNEQSS,
+ IX86_BUILTIN_CMPNLTSS,
+ IX86_BUILTIN_CMPNLESS,
+ IX86_BUILTIN_CMPNGTSS,
+ IX86_BUILTIN_CMPNGESS,
+ IX86_BUILTIN_CMPORDSS,
+ IX86_BUILTIN_CMPUNORDSS,
+ IX86_BUILTIN_CMPNESS,
+
+ IX86_BUILTIN_COMIEQSS,
+ IX86_BUILTIN_COMILTSS,
+ IX86_BUILTIN_COMILESS,
+ IX86_BUILTIN_COMIGTSS,
+ IX86_BUILTIN_COMIGESS,
+ IX86_BUILTIN_COMINEQSS,
+ IX86_BUILTIN_UCOMIEQSS,
+ IX86_BUILTIN_UCOMILTSS,
+ IX86_BUILTIN_UCOMILESS,
+ IX86_BUILTIN_UCOMIGTSS,
+ IX86_BUILTIN_UCOMIGESS,
+ IX86_BUILTIN_UCOMINEQSS,
+
+ IX86_BUILTIN_CVTPI2PS,
+ IX86_BUILTIN_CVTPS2PI,
+ IX86_BUILTIN_CVTSI2SS,
+ IX86_BUILTIN_CVTSI642SS,
+ IX86_BUILTIN_CVTSS2SI,
+ IX86_BUILTIN_CVTSS2SI64,
+ IX86_BUILTIN_CVTTPS2PI,
+ IX86_BUILTIN_CVTTSS2SI,
+ IX86_BUILTIN_CVTTSS2SI64,
+
+ IX86_BUILTIN_MAXPS,
+ IX86_BUILTIN_MAXSS,
+ IX86_BUILTIN_MINPS,
+ IX86_BUILTIN_MINSS,
+
+ IX86_BUILTIN_LOADUPS,
+ IX86_BUILTIN_STOREUPS,
+ IX86_BUILTIN_MOVSS,
+
+ IX86_BUILTIN_MOVHLPS,
+ IX86_BUILTIN_MOVLHPS,
+ IX86_BUILTIN_LOADHPS,
+ IX86_BUILTIN_LOADLPS,
+ IX86_BUILTIN_STOREHPS,
+ IX86_BUILTIN_STORELPS,
+
+ IX86_BUILTIN_MASKMOVQ,
+ IX86_BUILTIN_MOVMSKPS,
+ IX86_BUILTIN_PMOVMSKB,
+
+ IX86_BUILTIN_MOVNTPS,
+ IX86_BUILTIN_MOVNTQ,
+
+ IX86_BUILTIN_LOADDQU,
+ IX86_BUILTIN_STOREDQU,
+
+ IX86_BUILTIN_PACKSSWB,
+ IX86_BUILTIN_PACKSSDW,
+ IX86_BUILTIN_PACKUSWB,
+
+ IX86_BUILTIN_PADDB,
+ IX86_BUILTIN_PADDW,
+ IX86_BUILTIN_PADDD,
+ IX86_BUILTIN_PADDQ,
+ IX86_BUILTIN_PADDSB,
+ IX86_BUILTIN_PADDSW,
+ IX86_BUILTIN_PADDUSB,
+ IX86_BUILTIN_PADDUSW,
+ IX86_BUILTIN_PSUBB,
+ IX86_BUILTIN_PSUBW,
+ IX86_BUILTIN_PSUBD,
+ IX86_BUILTIN_PSUBQ,
+ IX86_BUILTIN_PSUBSB,
+ IX86_BUILTIN_PSUBSW,
+ IX86_BUILTIN_PSUBUSB,
+ IX86_BUILTIN_PSUBUSW,
+
+ IX86_BUILTIN_PAND,
+ IX86_BUILTIN_PANDN,
+ IX86_BUILTIN_POR,
+ IX86_BUILTIN_PXOR,
+
+ IX86_BUILTIN_PAVGB,
+ IX86_BUILTIN_PAVGW,
+
+ IX86_BUILTIN_PCMPEQB,
+ IX86_BUILTIN_PCMPEQW,
+ IX86_BUILTIN_PCMPEQD,
+ IX86_BUILTIN_PCMPGTB,
+ IX86_BUILTIN_PCMPGTW,
+ IX86_BUILTIN_PCMPGTD,
+
+ IX86_BUILTIN_PMADDWD,
+
+ IX86_BUILTIN_PMAXSW,
+ IX86_BUILTIN_PMAXUB,
+ IX86_BUILTIN_PMINSW,
+ IX86_BUILTIN_PMINUB,
+
+ IX86_BUILTIN_PMULHUW,
+ IX86_BUILTIN_PMULHW,
+ IX86_BUILTIN_PMULLW,
+
+ IX86_BUILTIN_PSADBW,
+ IX86_BUILTIN_PSHUFW,
+
+ IX86_BUILTIN_PSLLW,
+ IX86_BUILTIN_PSLLD,
+ IX86_BUILTIN_PSLLQ,
+ IX86_BUILTIN_PSRAW,
+ IX86_BUILTIN_PSRAD,
+ IX86_BUILTIN_PSRLW,
+ IX86_BUILTIN_PSRLD,
+ IX86_BUILTIN_PSRLQ,
+ IX86_BUILTIN_PSLLWI,
+ IX86_BUILTIN_PSLLDI,
+ IX86_BUILTIN_PSLLQI,
+ IX86_BUILTIN_PSRAWI,
+ IX86_BUILTIN_PSRADI,
+ IX86_BUILTIN_PSRLWI,
+ IX86_BUILTIN_PSRLDI,
+ IX86_BUILTIN_PSRLQI,
+
+ IX86_BUILTIN_PUNPCKHBW,
+ IX86_BUILTIN_PUNPCKHWD,
+ IX86_BUILTIN_PUNPCKHDQ,
+ IX86_BUILTIN_PUNPCKLBW,
+ IX86_BUILTIN_PUNPCKLWD,
+ IX86_BUILTIN_PUNPCKLDQ,
+
+ IX86_BUILTIN_SHUFPS,
+
+ IX86_BUILTIN_RCPPS,
+ IX86_BUILTIN_RCPSS,
+ IX86_BUILTIN_RSQRTPS,
+ IX86_BUILTIN_RSQRTSS,
+ IX86_BUILTIN_SQRTPS,
+ IX86_BUILTIN_SQRTSS,
+
+ IX86_BUILTIN_UNPCKHPS,
+ IX86_BUILTIN_UNPCKLPS,
+
+ IX86_BUILTIN_ANDPS,
+ IX86_BUILTIN_ANDNPS,
+ IX86_BUILTIN_ORPS,
+ IX86_BUILTIN_XORPS,
+
+ IX86_BUILTIN_EMMS,
+ IX86_BUILTIN_LDMXCSR,
+ IX86_BUILTIN_STMXCSR,
+ IX86_BUILTIN_SFENCE,
+
+ /* 3DNow! Original */
+ IX86_BUILTIN_FEMMS,
+ IX86_BUILTIN_PAVGUSB,
+ IX86_BUILTIN_PF2ID,
+ IX86_BUILTIN_PFACC,
+ IX86_BUILTIN_PFADD,
+ IX86_BUILTIN_PFCMPEQ,
+ IX86_BUILTIN_PFCMPGE,
+ IX86_BUILTIN_PFCMPGT,
+ IX86_BUILTIN_PFMAX,
+ IX86_BUILTIN_PFMIN,
+ IX86_BUILTIN_PFMUL,
+ IX86_BUILTIN_PFRCP,
+ IX86_BUILTIN_PFRCPIT1,
+ IX86_BUILTIN_PFRCPIT2,
+ IX86_BUILTIN_PFRSQIT1,
+ IX86_BUILTIN_PFRSQRT,
+ IX86_BUILTIN_PFSUB,
+ IX86_BUILTIN_PFSUBR,
+ IX86_BUILTIN_PI2FD,
+ IX86_BUILTIN_PMULHRW,
+
+ /* 3DNow! Athlon Extensions */
+ IX86_BUILTIN_PF2IW,
+ IX86_BUILTIN_PFNACC,
+ IX86_BUILTIN_PFPNACC,
+ IX86_BUILTIN_PI2FW,
+ IX86_BUILTIN_PSWAPDSI,
+ IX86_BUILTIN_PSWAPDSF,
+
+ /* SSE2 */
+ IX86_BUILTIN_ADDPD,
+ IX86_BUILTIN_ADDSD,
+ IX86_BUILTIN_DIVPD,
+ IX86_BUILTIN_DIVSD,
+ IX86_BUILTIN_MULPD,
+ IX86_BUILTIN_MULSD,
+ IX86_BUILTIN_SUBPD,
+ IX86_BUILTIN_SUBSD,
+
+ IX86_BUILTIN_CMPEQPD,
+ IX86_BUILTIN_CMPLTPD,
+ IX86_BUILTIN_CMPLEPD,
+ IX86_BUILTIN_CMPGTPD,
+ IX86_BUILTIN_CMPGEPD,
+ IX86_BUILTIN_CMPNEQPD,
+ IX86_BUILTIN_CMPNLTPD,
+ IX86_BUILTIN_CMPNLEPD,
+ IX86_BUILTIN_CMPNGTPD,
+ IX86_BUILTIN_CMPNGEPD,
+ IX86_BUILTIN_CMPORDPD,
+ IX86_BUILTIN_CMPUNORDPD,
+ IX86_BUILTIN_CMPNEPD,
+ IX86_BUILTIN_CMPEQSD,
+ IX86_BUILTIN_CMPLTSD,
+ IX86_BUILTIN_CMPLESD,
+ IX86_BUILTIN_CMPNEQSD,
+ IX86_BUILTIN_CMPNLTSD,
+ IX86_BUILTIN_CMPNLESD,
+ IX86_BUILTIN_CMPORDSD,
+ IX86_BUILTIN_CMPUNORDSD,
+ IX86_BUILTIN_CMPNESD,
+
+ IX86_BUILTIN_COMIEQSD,
+ IX86_BUILTIN_COMILTSD,
+ IX86_BUILTIN_COMILESD,
+ IX86_BUILTIN_COMIGTSD,
+ IX86_BUILTIN_COMIGESD,
+ IX86_BUILTIN_COMINEQSD,
+ IX86_BUILTIN_UCOMIEQSD,
+ IX86_BUILTIN_UCOMILTSD,
+ IX86_BUILTIN_UCOMILESD,
+ IX86_BUILTIN_UCOMIGTSD,
+ IX86_BUILTIN_UCOMIGESD,
+ IX86_BUILTIN_UCOMINEQSD,
+
+ IX86_BUILTIN_MAXPD,
+ IX86_BUILTIN_MAXSD,
+ IX86_BUILTIN_MINPD,
+ IX86_BUILTIN_MINSD,
+
+ IX86_BUILTIN_ANDPD,
+ IX86_BUILTIN_ANDNPD,
+ IX86_BUILTIN_ORPD,
+ IX86_BUILTIN_XORPD,
+
+ IX86_BUILTIN_SQRTPD,
+ IX86_BUILTIN_SQRTSD,
+
+ IX86_BUILTIN_UNPCKHPD,
+ IX86_BUILTIN_UNPCKLPD,
+
+ IX86_BUILTIN_SHUFPD,
+
+ IX86_BUILTIN_LOADUPD,
+ IX86_BUILTIN_STOREUPD,
+ IX86_BUILTIN_MOVSD,
+
+ IX86_BUILTIN_LOADHPD,
+ IX86_BUILTIN_LOADLPD,
+
+ IX86_BUILTIN_CVTDQ2PD,
+ IX86_BUILTIN_CVTDQ2PS,
+
+ IX86_BUILTIN_CVTPD2DQ,
+ IX86_BUILTIN_CVTPD2PI,
+ IX86_BUILTIN_CVTPD2PS,
+ IX86_BUILTIN_CVTTPD2DQ,
+ IX86_BUILTIN_CVTTPD2PI,
+
+ IX86_BUILTIN_CVTPI2PD,
+ IX86_BUILTIN_CVTSI2SD,
+ IX86_BUILTIN_CVTSI642SD,
+
+ IX86_BUILTIN_CVTSD2SI,
+ IX86_BUILTIN_CVTSD2SI64,
+ IX86_BUILTIN_CVTSD2SS,
+ IX86_BUILTIN_CVTSS2SD,
+ IX86_BUILTIN_CVTTSD2SI,
+ IX86_BUILTIN_CVTTSD2SI64,
+
+ IX86_BUILTIN_CVTPS2DQ,
+ IX86_BUILTIN_CVTPS2PD,
+ IX86_BUILTIN_CVTTPS2DQ,
+
+ IX86_BUILTIN_MOVNTI,
+ IX86_BUILTIN_MOVNTPD,
+ IX86_BUILTIN_MOVNTDQ,
+
+ /* SSE2 MMX */
+ IX86_BUILTIN_MASKMOVDQU,
+ IX86_BUILTIN_MOVMSKPD,
+ IX86_BUILTIN_PMOVMSKB128,
+
+ /* APPLE LOCAL begin 4099020 */
+ IX86_BUILTIN_MOVQ,
+ IX86_BUILTIN_LOADQ,
+ IX86_BUILTIN_STOREQ,
+ /* APPLE LOCAL end 4099020 */
+
+ IX86_BUILTIN_PACKSSWB128,
+ IX86_BUILTIN_PACKSSDW128,
+ IX86_BUILTIN_PACKUSWB128,
+
+ IX86_BUILTIN_PADDB128,
+ IX86_BUILTIN_PADDW128,
+ IX86_BUILTIN_PADDD128,
+ IX86_BUILTIN_PADDQ128,
+ IX86_BUILTIN_PADDSB128,
+ IX86_BUILTIN_PADDSW128,
+ IX86_BUILTIN_PADDUSB128,
+ IX86_BUILTIN_PADDUSW128,
+ IX86_BUILTIN_PSUBB128,
+ IX86_BUILTIN_PSUBW128,
+ IX86_BUILTIN_PSUBD128,
+ IX86_BUILTIN_PSUBQ128,
+ IX86_BUILTIN_PSUBSB128,
+ IX86_BUILTIN_PSUBSW128,
+ IX86_BUILTIN_PSUBUSB128,
+ IX86_BUILTIN_PSUBUSW128,
+
+ IX86_BUILTIN_PAND128,
+ IX86_BUILTIN_PANDN128,
+ IX86_BUILTIN_POR128,
+ IX86_BUILTIN_PXOR128,
+
+ IX86_BUILTIN_PAVGB128,
+ IX86_BUILTIN_PAVGW128,
+
+ IX86_BUILTIN_PCMPEQB128,
+ IX86_BUILTIN_PCMPEQW128,
+ IX86_BUILTIN_PCMPEQD128,
+ IX86_BUILTIN_PCMPGTB128,
+ IX86_BUILTIN_PCMPGTW128,
+ IX86_BUILTIN_PCMPGTD128,
+
+ IX86_BUILTIN_PMADDWD128,
+
+ IX86_BUILTIN_PMAXSW128,
+ IX86_BUILTIN_PMAXUB128,
+ IX86_BUILTIN_PMINSW128,
+ IX86_BUILTIN_PMINUB128,
+
+ IX86_BUILTIN_PMULUDQ,
+ IX86_BUILTIN_PMULUDQ128,
+ IX86_BUILTIN_PMULHUW128,
+ IX86_BUILTIN_PMULHW128,
+ IX86_BUILTIN_PMULLW128,
+
+ IX86_BUILTIN_PSADBW128,
+ IX86_BUILTIN_PSHUFHW,
+ IX86_BUILTIN_PSHUFLW,
+ IX86_BUILTIN_PSHUFD,
+
+ IX86_BUILTIN_PSLLW128,
+ IX86_BUILTIN_PSLLD128,
+ IX86_BUILTIN_PSLLQ128,
+ IX86_BUILTIN_PSRAW128,
+ IX86_BUILTIN_PSRAD128,
+ IX86_BUILTIN_PSRLW128,
+ IX86_BUILTIN_PSRLD128,
+ IX86_BUILTIN_PSRLQ128,
+ IX86_BUILTIN_PSLLDQI128,
+ /* APPLE LOCAL 591583 */
+ IX86_BUILTIN_PSLLDQI128_BYTESHIFT,
+ IX86_BUILTIN_PSLLWI128,
+ IX86_BUILTIN_PSLLDI128,
+ IX86_BUILTIN_PSLLQI128,
+ IX86_BUILTIN_PSRAWI128,
+ IX86_BUILTIN_PSRADI128,
+ IX86_BUILTIN_PSRLDQI128,
+ /* APPLE LOCAL 591583 */
+ IX86_BUILTIN_PSRLDQI128_BYTESHIFT,
+ IX86_BUILTIN_PSRLWI128,
+ IX86_BUILTIN_PSRLDI128,
+ IX86_BUILTIN_PSRLQI128,
+
+ IX86_BUILTIN_PUNPCKHBW128,
+ IX86_BUILTIN_PUNPCKHWD128,
+ IX86_BUILTIN_PUNPCKHDQ128,
+ IX86_BUILTIN_PUNPCKHQDQ128,
+ IX86_BUILTIN_PUNPCKLBW128,
+ IX86_BUILTIN_PUNPCKLWD128,
+ IX86_BUILTIN_PUNPCKLDQ128,
+ IX86_BUILTIN_PUNPCKLQDQ128,
+
+ IX86_BUILTIN_CLFLUSH,
+ IX86_BUILTIN_MFENCE,
+ IX86_BUILTIN_LFENCE,
+
+ /* Prescott New Instructions. */
+ IX86_BUILTIN_ADDSUBPS,
+ IX86_BUILTIN_HADDPS,
+ IX86_BUILTIN_HSUBPS,
+ IX86_BUILTIN_MOVSHDUP,
+ IX86_BUILTIN_MOVSLDUP,
+ IX86_BUILTIN_ADDSUBPD,
+ IX86_BUILTIN_HADDPD,
+ IX86_BUILTIN_HSUBPD,
+ IX86_BUILTIN_LDDQU,
+
+ IX86_BUILTIN_MONITOR,
+ IX86_BUILTIN_MWAIT,
+
+ /* Merom New Instructions. */
+ IX86_BUILTIN_PHADDW,
+ IX86_BUILTIN_PHADDD,
+ IX86_BUILTIN_PHADDSW,
+ IX86_BUILTIN_PHSUBW,
+ IX86_BUILTIN_PHSUBD,
+ IX86_BUILTIN_PHSUBSW,
+ IX86_BUILTIN_PMADDUBSW,
+ IX86_BUILTIN_PMULHRSW,
+ IX86_BUILTIN_PSHUFB,
+ IX86_BUILTIN_PSIGNB,
+ IX86_BUILTIN_PSIGNW,
+ IX86_BUILTIN_PSIGND,
+ IX86_BUILTIN_PALIGNR,
+ IX86_BUILTIN_PABSB,
+ IX86_BUILTIN_PABSW,
+ IX86_BUILTIN_PABSD,
+
+ IX86_BUILTIN_PHADDW128,
+ IX86_BUILTIN_PHADDD128,
+ IX86_BUILTIN_PHADDSW128,
+ IX86_BUILTIN_PHSUBW128,
+ IX86_BUILTIN_PHSUBD128,
+ IX86_BUILTIN_PHSUBSW128,
+ IX86_BUILTIN_PMADDUBSW128,
+ IX86_BUILTIN_PMULHRSW128,
+ IX86_BUILTIN_PSHUFB128,
+ IX86_BUILTIN_PSIGNB128,
+ IX86_BUILTIN_PSIGNW128,
+ IX86_BUILTIN_PSIGND128,
+ IX86_BUILTIN_PALIGNR128,
+ IX86_BUILTIN_PABSB128,
+ IX86_BUILTIN_PABSW128,
+ IX86_BUILTIN_PABSD128,
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ /* AMDFAM10 - SSE4A New Instructions. */
+ IX86_BUILTIN_MOVNTSD,
+ IX86_BUILTIN_MOVNTSS,
+ IX86_BUILTIN_EXTRQI,
+ IX86_BUILTIN_EXTRQ,
+ IX86_BUILTIN_INSERTQI,
+ IX86_BUILTIN_INSERTQ,
+
+ /* SSE4.1. */
+ IX86_BUILTIN_BLENDPD,
+ IX86_BUILTIN_BLENDPS,
+ IX86_BUILTIN_BLENDVPD,
+ IX86_BUILTIN_BLENDVPS,
+ IX86_BUILTIN_PBLENDVB128,
+ IX86_BUILTIN_PBLENDW128,
+
+ IX86_BUILTIN_DPPD,
+ IX86_BUILTIN_DPPS,
+
+ IX86_BUILTIN_INSERTPS128,
+
+ IX86_BUILTIN_MOVNTDQA,
+ IX86_BUILTIN_MPSADBW128,
+ IX86_BUILTIN_PACKUSDW128,
+ IX86_BUILTIN_PCMPEQQ,
+ IX86_BUILTIN_PHMINPOSUW128,
+
+ IX86_BUILTIN_PMAXSB128,
+ IX86_BUILTIN_PMAXSD128,
+ IX86_BUILTIN_PMAXUD128,
+ IX86_BUILTIN_PMAXUW128,
+
+ IX86_BUILTIN_PMINSB128,
+ IX86_BUILTIN_PMINSD128,
+ IX86_BUILTIN_PMINUD128,
+ IX86_BUILTIN_PMINUW128,
+
+ IX86_BUILTIN_PMOVSXBW128,
+ IX86_BUILTIN_PMOVSXBD128,
+ IX86_BUILTIN_PMOVSXBQ128,
+ IX86_BUILTIN_PMOVSXWD128,
+ IX86_BUILTIN_PMOVSXWQ128,
+ IX86_BUILTIN_PMOVSXDQ128,
+
+ IX86_BUILTIN_PMOVZXBW128,
+ IX86_BUILTIN_PMOVZXBD128,
+ IX86_BUILTIN_PMOVZXBQ128,
+ IX86_BUILTIN_PMOVZXWD128,
+ IX86_BUILTIN_PMOVZXWQ128,
+ IX86_BUILTIN_PMOVZXDQ128,
+
+ IX86_BUILTIN_PMULDQ128,
+ IX86_BUILTIN_PMULLD128,
+
+ IX86_BUILTIN_ROUNDPD,
+ IX86_BUILTIN_ROUNDPS,
+ IX86_BUILTIN_ROUNDSD,
+ IX86_BUILTIN_ROUNDSS,
+
+ IX86_BUILTIN_PTESTZ,
+ IX86_BUILTIN_PTESTC,
+ IX86_BUILTIN_PTESTNZC,
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+ /* APPLE LOCAL end mainline */
+ IX86_BUILTIN_VEC_INIT_V2SI,
+ IX86_BUILTIN_VEC_INIT_V4HI,
+ IX86_BUILTIN_VEC_INIT_V8QI,
+ IX86_BUILTIN_VEC_EXT_V2DF,
+ IX86_BUILTIN_VEC_EXT_V2DI,
+ IX86_BUILTIN_VEC_EXT_V4SF,
+ IX86_BUILTIN_VEC_EXT_V4SI,
+ IX86_BUILTIN_VEC_EXT_V8HI,
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ /* deletion */
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+ IX86_BUILTIN_VEC_EXT_V2SI,
+ IX86_BUILTIN_VEC_EXT_V4HI,
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ IX86_BUILTIN_VEC_EXT_V16QI,
+ IX86_BUILTIN_VEC_SET_V2DI,
+ IX86_BUILTIN_VEC_SET_V4SF,
+ IX86_BUILTIN_VEC_SET_V4SI,
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+ IX86_BUILTIN_VEC_SET_V8HI,
+ IX86_BUILTIN_VEC_SET_V4HI,
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ IX86_BUILTIN_VEC_SET_V16QI,
+
+ IX86_BUILTIN_VEC_PACK_SFIX,
+
+ /* SSE4.2. */
+ IX86_BUILTIN_CRC32QI,
+ IX86_BUILTIN_CRC32HI,
+ IX86_BUILTIN_CRC32SI,
+ IX86_BUILTIN_CRC32DI,
+
+ IX86_BUILTIN_PCMPESTRI128,
+ IX86_BUILTIN_PCMPESTRM128,
+ IX86_BUILTIN_PCMPESTRA128,
+ IX86_BUILTIN_PCMPESTRC128,
+ IX86_BUILTIN_PCMPESTRO128,
+ IX86_BUILTIN_PCMPESTRS128,
+ IX86_BUILTIN_PCMPESTRZ128,
+ IX86_BUILTIN_PCMPISTRI128,
+ IX86_BUILTIN_PCMPISTRM128,
+ IX86_BUILTIN_PCMPISTRA128,
+ IX86_BUILTIN_PCMPISTRC128,
+ IX86_BUILTIN_PCMPISTRO128,
+ IX86_BUILTIN_PCMPISTRS128,
+ IX86_BUILTIN_PCMPISTRZ128,
+
+ IX86_BUILTIN_PCMPGTQ,
+
+ /* TFmode support builtins. */
+ IX86_BUILTIN_INFQ,
+ IX86_BUILTIN_FABSQ,
+ IX86_BUILTIN_COPYSIGNQ,
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+
+ IX86_BUILTIN_MAX
+};
+
+/* LLVM_TARGET_INTRINSIC_PREFIX - Specify what prefix this target uses for its
+ * intrinsics.
+ */
+#define LLVM_TARGET_INTRINSIC_PREFIX "x86"
+
+/* LLVM_TARGET_NAME - This specifies the name of the target, which correlates to
+ * the llvm::InitializeXXXTarget() function.
+ */
+#define LLVM_TARGET_NAME X86
+
+/* Turn -march=xx into a CPU type.
+ */
+#define LLVM_SET_SUBTARGET_FEATURES(F) \
+ { if (TARGET_MACHO && ! strcmp (ix86_arch_string, "apple")) \
+ F.setCPU(TARGET_64BIT ? "core2" : "yonah"); \
+ else \
+ F.setCPU(ix86_arch_string); \
+ \
+ if (TARGET_64BIT) \
+ F.AddFeature("64bit"); \
+ else if (target_flags_explicit & OPTION_MASK_ISA_64BIT) \
+ F.AddFeature("64bit", false); \
+ \
+ if (TARGET_MMX) \
+ F.AddFeature("mmx"); \
+ else if (target_flags_explicit & OPTION_MASK_ISA_MMX) \
+ F.AddFeature("mmx", false); \
+ \
+ if (TARGET_3DNOW) \
+ F.AddFeature("3dnow"); \
+ else if (target_flags_explicit & OPTION_MASK_ISA_3DNOW) \
+ F.AddFeature("3dnow", false); \
+ \
+ if (TARGET_3DNOW_A) \
+ F.AddFeature("3dnowa"); \
+ else if (target_flags_explicit & OPTION_MASK_ISA_3DNOW_A) \
+ F.AddFeature("3dnowa", false); \
+ \
+ if (TARGET_SSE) \
+ F.AddFeature("sse"); \
+ else if (target_flags_explicit & OPTION_MASK_ISA_SSE) \
+ F.AddFeature("sse", false); \
+ \
+ if (TARGET_SSE2) \
+ F.AddFeature("sse2"); \
+ else if (target_flags_explicit & OPTION_MASK_ISA_SSE2) \
+ F.AddFeature("sse2", false); \
+ \
+ if (TARGET_SSE3) \
+ F.AddFeature("sse3"); \
+ else if (target_flags_explicit & OPTION_MASK_ISA_SSE3) \
+ F.AddFeature("sse3", false); \
+ \
+ if (TARGET_SSSE3) \
+ F.AddFeature("ssse3"); \
+ else if (target_flags_explicit & OPTION_MASK_ISA_SSSE3) \
+ F.AddFeature("ssse3", false); \
+ \
+ if (TARGET_SSE4_1) \
+ F.AddFeature("sse41"); \
+ else if (target_flags_explicit & OPTION_MASK_ISA_SSE4_1) \
+ F.AddFeature("sse41", false); \
+ \
+ if (TARGET_SSE4_2) \
+ F.AddFeature("sse42"); \
+ else if (target_flags_explicit & OPTION_MASK_ISA_SSE4_2) \
+ F.AddFeature("sse42", false); \
+ \
+ if (TARGET_AVX) \
+ F.AddFeature("avx"); \
+ else if (target_flags_explicit & OPTION_MASK_ISA_AVX) \
+ F.AddFeature("avx", false); \
+ \
+ if (TARGET_FMA) \
+ F.AddFeature("fma3"); \
+ else if (target_flags_explicit & OPTION_MASK_ISA_FMA) \
+ F.AddFeature("fma3", false); \
+ \
+ if (TARGET_SSE4A) \
+ F.AddFeature("sse4a"); \
+ else if (target_flags_explicit & OPTION_MASK_ISA_SSE4A) \
+ F.AddFeature("sse4a", false); \
+ }
+
+#define LLVM_SET_IMPLICIT_FLOAT(flag_no_implicit_float) \
+ if (!TARGET_80387) \
+ flag_no_implicit_float = 1; \
+ else \
+ flag_no_implicit_float = 0;
+
+/* LLVM ABI definition macros. */
+
+/* When -m64 is specified, set the architecture to x86_64-os-blah even if the
+ * compiler was configured for i[3456]86-os-blah.
+ */
+#define LLVM_OVERRIDE_TARGET_ARCH() \
+ (TARGET_64BIT ? "x86_64" : "i386")
+
+/* LLVM_TARGET_INTRINSIC_LOWER - To handle builtins, we want to expand the
+ * invocation into normal LLVM code. If the target can handle the builtin, this
+ * macro should call the target TreeToLLVM::TargetIntrinsicLower method and
+ * return true.This macro is invoked from a method in the TreeToLLVM class.
+ */
+#define LLVM_TARGET_INTRINSIC_LOWER(STMT, BUILTIN_CODE, DESTLOC, RESULT, \
+ DESTTY, OPS) \
+ TargetIntrinsicLower(STMT, BUILTIN_CODE, DESTLOC, RESULT, DESTTY, OPS);
+
+/* When extracting a register name for a constraint, use the string extracted
+ from the magic symbol built for that register, rather than reg_names.
+ The latter maps both AH and AL to the same thing, which means we can't
+ distinguish them. */
+#define LLVM_DO_NOT_USE_REG_NAMES
+
+/* Propagate code model setting to backend */
+#define LLVM_SET_MACHINE_OPTIONS(argvec) \
+ switch (ix86_cmodel) { \
+ default: \
+ sorry ("code model %<%s%> not supported yet", ix86_cmodel_string); \
+ break; \
+ case CM_SMALL: \
+ case CM_SMALL_PIC: \
+ argvec.push_back("--code-model=small"); \
+ break; \
+ case CM_KERNEL: \
+ argvec.push_back("--code-model=kernel"); \
+ break; \
+ case CM_MEDIUM: \
+ case CM_MEDIUM_PIC: \
+ argvec.push_back("--code-model=medium"); \
+ break; \
+ case CM_32: \
+ argvec.push_back("--code-model=default"); \
+ break; \
+ }
+
+#endif /* LLVM_TARGET_H */