diff options
Diffstat (limited to 'gcc/config/riscv')
-rw-r--r-- | gcc/config/riscv/linux.h | 13 | ||||
-rw-r--r-- | gcc/config/riscv/pic.md | 11 | ||||
-rw-r--r-- | gcc/config/riscv/riscv.c | 60 | ||||
-rw-r--r-- | gcc/config/riscv/riscv.h | 10 | ||||
-rw-r--r-- | gcc/config/riscv/riscv.md | 9 | ||||
-rw-r--r-- | gcc/config/riscv/riscv.opt | 4 |
6 files changed, 81 insertions, 26 deletions
diff --git a/gcc/config/riscv/linux.h b/gcc/config/riscv/linux.h index ecf424d1a2b..4b2f7b6e1fd 100644 --- a/gcc/config/riscv/linux.h +++ b/gcc/config/riscv/linux.h @@ -24,6 +24,17 @@ along with GCC; see the file COPYING3. If not see #define GLIBC_DYNAMIC_LINKER "/lib/ld-linux-riscv" XLEN_SPEC "-" ABI_SPEC ".so.1" +#define MUSL_ABI_SUFFIX \ + "%{mabi=ilp32:-sf}" \ + "%{mabi=ilp32f:-sp}" \ + "%{mabi=ilp32d:}" \ + "%{mabi=lp64:-sf}" \ + "%{mabi=lp64f:-sp}" \ + "%{mabi=lp64d:}" \ + +#undef MUSL_DYNAMIC_LINKER +#define MUSL_DYNAMIC_LINKER "/lib/ld-musl-riscv" XLEN_SPEC MUSL_ABI_SUFFIX ".so.1" + /* Because RISC-V only has word-sized atomics, it requries libatomic where others do not. So link libatomic by default, as needed. */ #undef LIB_SPEC @@ -34,6 +45,8 @@ along with GCC; see the file COPYING3. If not see #define LIB_SPEC GNU_USER_TARGET_LIB_SPEC " -latomic " #endif +#define ICACHE_FLUSH_FUNC "__riscv_flush_icache" + #define LINK_SPEC "\ -melf" XLEN_SPEC "lriscv \ %{shared} \ diff --git a/gcc/config/riscv/pic.md b/gcc/config/riscv/pic.md index 6a29ead32d3..03b8f9bc669 100644 --- a/gcc/config/riscv/pic.md +++ b/gcc/config/riscv/pic.md @@ -22,13 +22,20 @@ ;; Simplify PIC loads to static variables. ;; These should go away once we figure out how to emit auipc discretely. -(define_insn "*local_pic_load<mode>" +(define_insn "*local_pic_load_s<mode>" [(set (match_operand:ANYI 0 "register_operand" "=r") - (mem:ANYI (match_operand 1 "absolute_symbolic_operand" "")))] + (sign_extend:ANYI (mem:ANYI (match_operand 1 "absolute_symbolic_operand" ""))))] "USE_LOAD_ADDRESS_MACRO (operands[1])" "<load>\t%0,%1" [(set (attr "length") (const_int 8))]) +(define_insn "*local_pic_load_u<mode>" + [(set (match_operand:ZERO_EXTEND_LOAD 0 "register_operand" "=r") + (zero_extend:ZERO_EXTEND_LOAD (mem:ZERO_EXTEND_LOAD (match_operand 1 "absolute_symbolic_operand" ""))))] + "USE_LOAD_ADDRESS_MACRO (operands[1])" + "<load>u\t%0,%1" + [(set (attr "length") (const_int 8))]) + (define_insn "*local_pic_load<mode>" [(set (match_operand:ANYF 0 "register_operand" "=f") (mem:ANYF (match_operand 1 "absolute_symbolic_operand" ""))) diff --git a/gcc/config/riscv/riscv.c b/gcc/config/riscv/riscv.c index d5928c334de..5f53819eb36 100644 --- a/gcc/config/riscv/riscv.c +++ b/gcc/config/riscv/riscv.c @@ -1,5 +1,5 @@ /* Subroutines used for code generation for RISC-V. - Copyright (C) 2011-2017 Free Software Foundation, Inc. + Copyright (C) 2011-2018 Free Software Foundation, Inc. Contributed by Andrew Waterman (andrew@sifive.com). Based on MIPS target for GNU compiler. @@ -177,9 +177,6 @@ struct GTY(()) machine_function { This area is allocated by the callee at the very top of the frame. */ int varargs_size; - /* Memoized return value of leaf_function_p. <0 if false, >0 if true. */ - int is_leaf; - /* The current frame information, calculated by riscv_compute_frame_info. */ struct riscv_frame_info frame; }; @@ -255,6 +252,7 @@ struct riscv_tune_info unsigned short issue_rate; unsigned short branch_cost; unsigned short memory_cost; + bool slow_unaligned_access; }; /* Information about one CPU we know about. */ @@ -268,6 +266,9 @@ struct riscv_cpu_info { /* Global variables for machine-dependent things. */ +/* Whether unaligned accesses execute very slowly. */ +bool riscv_slow_unaligned_access; + /* Which tuning parameters to use. */ static const struct riscv_tune_info *tune_info; @@ -301,7 +302,8 @@ static const struct riscv_tune_info rocket_tune_info = { {COSTS_N_INSNS (6), COSTS_N_INSNS (6)}, /* int_div */ 1, /* issue_rate */ 3, /* branch_cost */ - 5 /* memory_cost */ + 5, /* memory_cost */ + true, /* slow_unaligned_access */ }; /* Costs to use when optimizing for size. */ @@ -313,12 +315,14 @@ static const struct riscv_tune_info optimize_size_tune_info = { {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* int_div */ 1, /* issue_rate */ 1, /* branch_cost */ - 2 /* memory_cost */ + 2, /* memory_cost */ + false, /* slow_unaligned_access */ }; /* A table describing all the processors GCC knows about. */ static const struct riscv_cpu_info riscv_cpu_info_table[] = { { "rocket", &rocket_tune_info }, + { "size", &optimize_size_tune_info }, }; /* Return the riscv_cpu_info entry for the given name string. */ @@ -726,7 +730,8 @@ riscv_valid_lo_sum_p (enum riscv_symbol_type sym_type, enum machine_mode mode) /* We may need to split multiword moves, so make sure that each word can be accessed without inducing a carry. */ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD - && GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode)) + && (!TARGET_STRICT_ALIGN + || GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode))) return false; return true; @@ -1377,6 +1382,22 @@ riscv_legitimize_move (enum machine_mode mode, rtx dest, rtx src) return true; } + /* RISC-V GCC may generate non-legitimate address due to we provide some + pattern for optimize access PIC local symbol and it's make GCC generate + unrecognizable instruction during optmizing. */ + + if (MEM_P (dest) && !riscv_legitimate_address_p (mode, XEXP (dest, 0), + reload_completed)) + { + XEXP (dest, 0) = riscv_force_address (XEXP (dest, 0), mode); + } + + if (MEM_P (src) && !riscv_legitimate_address_p (mode, XEXP (src, 0), + reload_completed)) + { + XEXP (src, 0) = riscv_force_address (XEXP (src, 0), mode); + } + return false; } @@ -3773,6 +3794,16 @@ riscv_option_override (void) RISCV_TUNE_STRING_DEFAULT); tune_info = optimize_size ? &optimize_size_tune_info : cpu->tune_info; + /* Use -mtune's setting for slow_unaligned_access, even when optimizing + for size. For architectures that trap and emulate unaligned accesses, + the performance cost is too great, even for -Os. Similarly, if + -m[no-]strict-align is left unspecified, heed -mtune's advice. */ + riscv_slow_unaligned_access = (cpu->tune_info->slow_unaligned_access + || TARGET_STRICT_ALIGN); + if ((target_flags_explicit & MASK_STRICT_ALIGN) == 0 + && cpu->tune_info->slow_unaligned_access) + target_flags |= MASK_STRICT_ALIGN; + /* If the user hasn't specified a branch cost, use the processor's default. */ if (riscv_branch_cost == 0) @@ -3960,26 +3991,15 @@ riscv_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value) emit_insn (gen_clear_cache (addr, end_addr)); } -/* Return leaf_function_p () and memoize the result. */ - -static bool -riscv_leaf_function_p (void) -{ - if (cfun->machine->is_leaf == 0) - cfun->machine->is_leaf = leaf_function_p () ? 1 : -1; - - return cfun->machine->is_leaf > 0; -} - /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */ static bool riscv_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED, tree exp ATTRIBUTE_UNUSED) { - /* When optimzing for size, don't use sibcalls in non-leaf routines */ + /* Don't use sibcalls when use save-restore routine. */ if (TARGET_SAVE_RESTORE) - return riscv_leaf_function_p (); + return false; return true; } diff --git a/gcc/config/riscv/riscv.h b/gcc/config/riscv/riscv.h index 8d4c75e6770..c5d134cbe57 100644 --- a/gcc/config/riscv/riscv.h +++ b/gcc/config/riscv/riscv.h @@ -126,10 +126,11 @@ along with GCC; see the file COPYING3. If not see /* There is no point aligning anything to a rounder boundary than this. */ #define BIGGEST_ALIGNMENT 128 -/* The user-level ISA permits misaligned accesses, but they may execute - extremely slowly and non-atomically. Some privileged architectures - do not permit them at all. It is best to enforce strict alignment. */ -#define STRICT_ALIGNMENT 1 +/* The user-level ISA permits unaligned accesses, but they are not required + of the privileged architecture. */ +#define STRICT_ALIGNMENT TARGET_STRICT_ALIGN + +#define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) riscv_slow_unaligned_access /* Define this if you wish to imitate the way many other C compilers handle alignment of bitfields and the structures that contain @@ -864,6 +865,7 @@ while (0) #ifndef USED_FOR_TARGET extern const enum reg_class riscv_regno_to_class[]; extern bool riscv_hard_regno_mode_ok[][FIRST_PSEUDO_REGISTER]; +extern bool riscv_slow_unaligned_access; #endif #define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \ diff --git a/gcc/config/riscv/riscv.md b/gcc/config/riscv/riscv.md index 4cbb2431335..5f216d3255b 100644 --- a/gcc/config/riscv/riscv.md +++ b/gcc/config/riscv/riscv.md @@ -259,6 +259,9 @@ ;; Iterator for QImode extension patterns. (define_mode_iterator SUPERQI [HI SI (DI "TARGET_64BIT")]) +;; Iterator for extending loads. +(define_mode_iterator ZERO_EXTEND_LOAD [QI HI (SI "TARGET_64BIT")]) + ;; Iterator for hardware integer modes narrower than XLEN. (define_mode_iterator SUBX [QI HI (SI "TARGET_64BIT")]) @@ -1426,7 +1429,13 @@ (match_operand 1 "pmode_register_operand")] "" { +#ifdef ICACHE_FLUSH_FUNC + emit_library_call (gen_rtx_SYMBOL_REF (Pmode, ICACHE_FLUSH_FUNC), + LCT_NORMAL, VOIDmode, 3, operands[0], Pmode, + operands[1], Pmode, const0_rtx, Pmode); +#else emit_insn (gen_fence_i ()); +#endif DONE; }) diff --git a/gcc/config/riscv/riscv.opt b/gcc/config/riscv/riscv.opt index 0466bb29d14..cfd0335d082 100644 --- a/gcc/config/riscv/riscv.opt +++ b/gcc/config/riscv/riscv.opt @@ -84,6 +84,10 @@ mcmodel= Target Report RejectNegative Joined Enum(code_model) Var(riscv_cmodel) Init(TARGET_DEFAULT_CMODEL) Specify the code model. +mstrict-align +Target Report Mask(STRICT_ALIGN) Save +Do not generate unaligned memory accesses. + Enum Name(code_model) Type(enum riscv_code_model) Known code models (for use with the -mcmodel= option): |