aboutsummaryrefslogtreecommitdiff
path: root/gcc/f/gbe/2.7.2.2.diff
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/f/gbe/2.7.2.2.diff')
-rw-r--r--gcc/f/gbe/2.7.2.2.diff11296
1 files changed, 11296 insertions, 0 deletions
diff --git a/gcc/f/gbe/2.7.2.2.diff b/gcc/f/gbe/2.7.2.2.diff
new file mode 100644
index 00000000000..e99ba671741
--- /dev/null
+++ b/gcc/f/gbe/2.7.2.2.diff
@@ -0,0 +1,11296 @@
+IMPORTANT: After applying this patch, you must rebuild the
+Info documentation derived from the Texinfo files in the
+gcc distribution, as this patch does not include patches
+to any derived files (due to differences in the way gcc
+version 2.7.2.2 is obtained by users). Use the following
+command sequence after applying this patch:
+
+ cd gcc-2.7.2.2; make -f Makefile.in gcc.info
+
+If that fails due to `makeinfo' not being installed, obtain
+texinfo-3.11.tar.gz from a GNU distribution site, unpack,
+build, and install it, and try the above command sequence
+again.
+
+
+diff -rcp2N gcc-2.7.2.2/ChangeLog g77-new/ChangeLog
+*** gcc-2.7.2.2/ChangeLog Thu Feb 20 19:24:10 1997
+--- g77-new/ChangeLog Mon Aug 11 06:48:02 1997
+***************
+*** 1,2 ****
+--- 1,244 ----
++ Sun Aug 10 18:14:24 1997 Craig Burley <burley@gnu.ai.mit.edu>
++
++ Integrate C front end part of patch for better alias
++ handling from John Carr <jfc@mit.edu>:
++ * c-decl.c (grokdeclarator): Check for RID_RESTRICT
++ flag; diagnose certain misuses; set DECL_RESTRICT as
++ appropriate.
++ * c-lex.c (init_lex): Set up RID_RESTRICT pointer.
++ Unset `restrict' as reserved word.
++ * c-lex.h: Replace RID_NOALIAS with RID_RESTRICT.
++ * c-parse.gperf: Add `restrict' and `__restrict'
++ keywords.
++ * tree.h: Add DECL_RESTRICT flag.
++
++ Sun Aug 10 14:50:30 1997 Jim Wilson <wilson@cygnus.com>
++
++ * sdbout.c (plain_type_1, case ARRAY_TYPE): Verify that TYPE_DOMAIN
++ has integer TYPE_{MAX,MIN}_VALUE before using them.
++
++ Mon Jul 28 15:35:38 1997 Craig Burley <burley@gnu.ai.mit.edu>
++
++ * combine.c (num_sign_bit_copies): Speed up the 961126-1.c
++ case of repeated (neg (neg (neg ...))) so c-torture runs
++ in less time.
++
++ * reload.c (find_reloads_toplev, find_reloads_address):
++ These now return whether replacement by a constant, so
++ caller can know to do other replacements. Currently if
++ caller doesn't want that info and such replacement would
++ happen, we crash so as to investigate the problem and
++ learn more about it. All callers updated.
++ (find_reloads): If pseudo replaced by constant, always
++ update duplicates of it.
++
++ Mon Jul 21 00:00:24 1997 Craig Burley <burley@gnu.ai.mit.edu>
++
++ * fold-const.c (size_binop): Make sure overflows
++ are flagged properly, so as to avoid silently generating
++ bad code for, e.g., a too-large array.
++
++ Sun Jul 13 22:23:14 1997 Craig Burley <burley@gnu.ai.mit.edu>
++
++ * stmt.c (expand_expr_stmt): Must generate code for
++ statements within an expression (gcc's `({ ... )}')
++ even if -fsyntax-only.
++
++ Mon Jun 30 17:23:07 1997 Michael Meissner <meissner@cygnus.com>
++
++ * gcc.c (process_command): If -save-temps and -pipe were specified
++ together, don't do -pipe.
++
++ Thu Jun 26 05:40:46 1997 Craig Burley <burley@gnu.ai.mit.edu>
++
++ * stor-layout.c (get_best_mode): Handle negative bitpos
++ correctly, so caller doesn't get into infinite recursion
++ trying to cope with a spurious VOIDmode.
++
++ Tue Jun 24 19:46:31 1997 Craig Burley <burley@gnu.ai.mit.edu>
++
++ * varasm.c (assemble_variable): If low part of size
++ doesn't fit in an int, variable is too large.
++
++ Sat Jun 21 12:09:00 1997 Craig Burley <burley@gnu.ai.mit.edu>
++
++ * toplev.c (rest_of_compilation): Also temporarily set
++ flag_unroll_all_loops to 0 during first of two calls
++ to loop_optimize, and clean up code a bit to make it
++ easier to read.
++
++ * expr.c (safe_from_p_1, safe_from_p): Fix these to use
++ TREE_SET_CODE instead of TREE_CODE.
++
++ Thu Jun 19 19:30:47 1997 Craig Burley <burley@gnu.ai.mit.edu>
++
++ * config/alpha/alpha.c: Don't include <stamp.h> on
++ GNU Linux machines.
++
++ * config/alpha/elf.c: New file for ELF systems.
++
++ * config/alpha/xm-alpha.h: Don't declare alloca()
++ if it's already a macro (probably defined in stdlib.h).
++
++ * config/alpha/xm-linux.h (HAVE_STRERROR): #define
++ this, according to what various people suggest.
++
++ * config.guess, configure: Make some (hopefully safe)
++ changes, based mostly on gcc-2.8.0-in-development,
++ in the hopes that these make some systems configure
++ "out of the box" more easily, especially Alpha systems.
++
++ Mon Jun 9 04:26:53 1997 Craig Burley <burley@gnu.ai.mit.edu>
++
++ * expr.c (safe_from_p): Don't examine a given SAVE_EXPR
++ node more than once, to avoid combinatorial explosion
++ in complex expressions. Fortran case that triggered
++ this had a complicated *and* complex expression with
++ 293 unique nodes, resulting in 28 minutes of compile
++ time mostly spent in a single top-level safe_from_p()
++ call due to all the redundant SAVE_EXPR traversals.
++ This change reduced the time to around 2 seconds.
++ (safe_from_p_1): New helper function that does almost
++ exactly what the old safe_from_p() did.
++
++ Sun May 18 21:18:48 1997 Craig Burley <burley@gnu.ai.mit.edu>
++
++ * fold-const.c (fold): Clarify why TRUNC_DIV_EXPR
++ and FLOOR_DIV_EXPR aren't rewritten to EXACT_DIV_EXPR,
++ clean up related code.
++
++ Sat May 3 13:53:00 1997 Craig Burley <burley@gnu.ai.mit.edu>
++
++ * config.sub: Change all `i[345]' to `i[3456]' to
++ support Pentium Pro (this change was already made
++ in configure for gcc-2.7.2.2).
++
++ From Toon Moene <toon@moene.indiv.nluug.nl>:
++ * toplev.c (rest_of_compilation): Unroll loops
++ only the final time through loop optimization.
++
++ Sun Apr 20 10:45:35 1997 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
++
++ * final.c (profile_function): Only call ASM_OUTPUT_REG_{PUSH,POP}
++ if defined.
++
++ Wed Apr 16 22:26:16 1997 Craig Burley <burley@gnu.ai.mit.edu>
++
++ * alias.c, cse.c, loop.c, rtl.c, rtl.h, sched.c:
++ Make changes submitted by <jfc@mit.edu>.
++
++ Sun Apr 13 19:32:53 1997 Craig Burley <burley@gnu.ai.mit.edu>
++
++ * fold-const.c (fold): If extra warnings enabled,
++ warn about integer division by zero.
++
++ Sun Apr 13 08:15:31 1997 Bernd Schmidt <crux@Pool.Informatik.RWTH-Aachen.DE>
++
++ * final.c (profile_function): Save the static chain register
++ around the call to the profiler function.
++
++ Sat Apr 12 14:56:42 1997 Craig Burley <burley@gnu.ai.mit.edu>
++
++ * unroll.c (find_splittable_givs): Permit more cases
++ of mult_val/add_val to agree by using rtx_equal_p
++ to compare them instead of requiring them to be
++ integers with the same value. Also don't bother
++ checking if ADDRESS_COST not defined (they will be
++ equal in that case).
++
++ Fri Apr 11 03:30:04 1997 Craig Burley <burley@gnu.ai.mit.edu>
++
++ * unroll.c (find_splittable_givs): Must create a new
++ register if the mult_val and add_val fields don't
++ agree.
++
++ Fri Apr 4 23:00:55 1997 Craig Burley <burley@gnu.ai.mit.edu>
++
++ * fold-const.c (fold): Don't call multiple_of_p if
++ arg1 is constant zero, to avoid crashing; simplify
++ code accordingly.
++
++ Wed Feb 26 13:09:33 1997 Michael Meissner <meissner@cygnus.com>
++
++ * reload.c (debug_reload): Fix format string to print
++ reload_nocombine[r].
++
++ Sun Feb 23 15:26:53 1997 Craig Burley <burley@gnu.ai.mit.edu>
++
++ * fold-const.c (multiple_of_p): Clean up and improve.
++ (fold): Clean up invocation of multiple_of_p.
++
++ Sat Feb 8 04:53:27 1997 Craig Burley <burley@gnu.ai.mit.edu>
++
++ From <jfc@jfc.tiac.net> Fri, 07 Feb 1997 22:02:21 -0500:
++ * alias.c (init_alias_analysis): Reduce amount of time
++ needed to simplify the reg_base_value array in the
++ typical case (especially involving function inlining).
++
++ Fri Jan 10 17:22:17 1997 Craig Burley <burley@gnu.ai.mit.edu>
++
++ Minor improvements/fixes to better alias handling:
++ * Makefile.in (alias.o): Fix typo in rule (was RLT_H).
++ * cse.c, sched.c: Fix up some indenting.
++ * toplev.c: Add -fargument-alias flag, so Fortran users
++ can turn C-style aliasing on once g77 defaults to
++ -fargument-noalias-global.
++
++ Integrate patch for better alias handling from
++ John Carr <jfc@mit.edu>:
++ * Makefile.in (OBJS, alias.o): New module and rule.
++ * alias.c: New source module.
++ * calls.c (expand_call): Recognize alias status of calls
++ to malloc().
++ * combine.c (distribute_notes): New REG_NOALIAS note.
++ * rtl.h (REG_NOALIAS): Ditto.
++ Many other changes for new alias.c module.
++ * cse.c: Many changes, and much code moved into alias.c.
++ * flags.h (flag_alias_check, flag_argument_noalias):
++ New flags.
++ * toplev.c: New flags and related options.
++ * local-alloc.c (validate_equiv_mem_from_store):
++ Caller of true_dependence changed.
++ * loop.c (NUM_STORES): Increase to 50 from 20.
++ (prescan_loop): "const" functions don't alter unknown addresses.
++ (invariant_p): Caller of true_dependence changed.
++ (record_giv): Zero new unrolled and shared flags.
++ (emit_iv_add_mult): Record base value for register.
++ * sched.c: Many changes, mostly moving code to alias.c.
++ (sched_note_set): SCHED_SORT macro def form, but not function,
++ inexplicably changed.
++ * unroll.c: Record base values for registers, etc.
++
++ Fri Jan 3 04:01:00 1997 Craig Burley <burley@gnu.ai.mit.edu>
++
++ * loop.c (check_final_value): Handle insns with no luid's
++ appropriately, instead of crashing on INSN_LUID macro
++ invocations.
++
++ Mon Dec 23 00:49:19 1996 Craig Burley <burley@gnu.ai.mit.edu>
++
++ * config/alpha/alpha.md: Fix pattern that matches if_then_else
++ involving DF target, DF comparison, SF source.
++
++ Fri Dec 20 15:42:52 1996 Craig Burley <burley@gnu.ai.mit.edu>
++
++ * fold-const.c (multiple_of_p): New function.
++ (fold): Use new function to turn *_DIV_EXPR into EXACT_DIV_EXPR.
++
++ Tue Oct 22 18:32:20 1996 Jim Wilson <wilson@cygnus.com>
++
++ * unroll.c (unroll_loop): Always reject loops with unbalanced blocks.
++
++ Tue Sep 24 19:37:00 1996 Jim Wilson <wilson@cygnus.com>
++
++ * reload.c (push_secondary_reload): Do strip paradoxical SUBREG
++ even if reload_class is CLASS_CANNOT_CHANGE_SIZE. Change reload_mode
++ to mode in SECONDARY_MEMORY_NEEDED and get_secondary_mem calls.
++
++ Mon Aug 5 16:53:36 1996 Doug Evans <dje@fallis.cygnus.com>
++
++ * stor-layout.c (layout_record): Correct overflow test for 0 sized
++ fields.
++
+ Sat Jun 29 12:33:39 1996 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+
+*************** Tue Jun 11 20:18:03 1996 Per Bothner <b
+*** 8,11 ****
+--- 250,259 ----
+ * alpha.h (FIXPROTO_INIT): Define new macro.
+
++ Sat May 18 20:17:27 1996 Jim Wilson <wilson@cygnus.com>
++
++ * unroll.c (copy_loop_body): When update split DEST_ADDR giv,
++ check to make sure it was split.
++ (find_splittable_givs): Fix reversed test of verify_addresses result.
++
+ Fri May 10 18:35:00 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+*************** Mon Feb 19 07:35:07 1996 Torbjorn Granl
+*** 66,69 ****
+--- 314,322 ----
+ * rs6000.md (not:SI with assign and compare): Fix typo.
+
++ Tue Feb 13 17:43:46 1996 Jim Wilson <wilson@cygnus.com>
++
++ * integrate.c (save_constants_in_decl_trees): New function.
++ (save_for_inline_copying, save_for_inline_nocopy): Call it.
++
+ Wed Jan 24 18:00:12 1996 Brendan Kehoe <brendan@lisa.cygnus.com>
+
+*************** Tue Jan 16 06:01:28 1996 Thomas Graiche
+*** 81,88 ****
+--- 334,357 ----
+ * i386/freebsd.h (ASM_WEAKEN_LABEL): Deleted; not supported.
+
++ Mon Jan 15 07:22:59 1996 Michel Delval (mfd@ccv.fr)
++
++ * reload.c (find_equiv_reg): Apply single_set, not PATTERN, to WHERE.
++
+ Sun Jan 7 17:11:11 1996 David Edelsohn <edelsohn@mhpcc.edu>
+
+ * collect2.c (scan_libraries): Correct Import File ID interpretation.
+
++ Mon Jan 1 09:05:07 1996 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
++
++ * local-alloc.c (reg_equiv_replacement): New variable.
++ (memref_referenced_p, case REG): Check for reg_equiv_replacement.
++ (update_equiv_regs): reg_equiv_replacement now file-scope.
++
++ Fri Dec 22 17:29:42 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
++
++ * reload.c (find_valid_class): New function.
++ (push_reload): Use it in cases where a SUBREG and its contents
++ both need to be reloaded.
++
+ Thu Dec 28 22:24:53 1995 Michael Meissner <meissner@tiktok.cygnus.com>
+
+*************** Mon Dec 18 18:40:34 1995 Jim Wilson <w
+*** 99,102 ****
+--- 368,376 ----
+ above.
+
++ Sun Dec 17 06:37:00 1995 Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
++
++ * reload.c (push_secondary_reload): Don't strip paradoxical SUBREG
++ if reload_class is CLASS_CANNOT_CHANGE_SIZE.
++
+ Sat Dec 16 07:03:33 1995 Philippe De Muyter (phdm@info.ucl.ac.be)
+
+*************** Sat Dec 9 18:05:03 1995 Jim Wilson <w
+*** 113,116 ****
+--- 387,395 ----
+ * expr.c (expand_expr, case INDIRECT_REF): Correct typo in May 8
+ change.
++
++ Fri Dec 8 19:17:30 1995 Mike Meissner <meissner@beauty.cygnus.com>
++
++ * rs6000/rs6000.c (input_operand): Allow any integer constant, not
++ just integers that fit in 1 instruction.
+
+ Sun Nov 26 14:47:42 1995 Richard Kenner <kenner@mole.gnu.ai.mit.edu>
+diff -rcp2N gcc-2.7.2.2/Makefile.in g77-new/Makefile.in
+*** gcc-2.7.2.2/Makefile.in Sun Nov 26 14:44:25 1995
+--- g77-new/Makefile.in Sun Aug 10 18:46:06 1997
+*************** OBJS = toplev.o version.o tree.o print-t
+*** 519,523 ****
+ integrate.o jump.o cse.o loop.o unroll.o flow.o stupid.o combine.o \
+ regclass.o local-alloc.o global.o reload.o reload1.o caller-save.o \
+! insn-peep.o reorg.o sched.o final.o recog.o reg-stack.o \
+ insn-opinit.o insn-recog.o insn-extract.o insn-output.o insn-emit.o \
+ insn-attrtab.o $(out_object_file) getpwd.o convert.o $(EXTRA_OBJS)
+--- 519,523 ----
+ integrate.o jump.o cse.o loop.o unroll.o flow.o stupid.o combine.o \
+ regclass.o local-alloc.o global.o reload.o reload1.o caller-save.o \
+! insn-peep.o reorg.o alias.o sched.o final.o recog.o reg-stack.o \
+ insn-opinit.o insn-recog.o insn-extract.o insn-output.o insn-emit.o \
+ insn-attrtab.o $(out_object_file) getpwd.o convert.o $(EXTRA_OBJS)
+*************** LIB2FUNCS = _muldi3 _divdi3 _moddi3 _udi
+*** 570,574 ****
+ _fixxfdi _fixunsxfdi _floatdixf _fixunsxfsi \
+ _fixtfdi _fixunstfdi _floatditf \
+! __gcc_bcmp _varargs _eprintf _op_new _op_vnew _new_handler _op_delete \
+ _op_vdel _bb _shtab _clear_cache _trampoline __main _exit _ctors _eh \
+ _pure
+--- 570,575 ----
+ _fixxfdi _fixunsxfdi _floatdixf _fixunsxfsi \
+ _fixtfdi _fixunstfdi _floatditf \
+! __gcc_bcmp _varargs __dummy _eprintf \
+! _op_new _op_vnew _new_handler _op_delete \
+ _op_vdel _bb _shtab _clear_cache _trampoline __main _exit _ctors _eh \
+ _pure
+*************** expr.o : expr.c $(CONFIG_H) $(RTL_H) $(T
+*** 1179,1183 ****
+ insn-flags.h insn-codes.h expr.h insn-config.h recog.h output.h \
+ typeclass.h bytecode.h bc-opcode.h bc-typecd.h bc-typecd.def bc-optab.h \
+! bc-emit.h modemap.def
+ calls.o : calls.c $(CONFIG_H) $(RTL_H) $(TREE_H) flags.h expr.h insn-codes.h \
+ insn-flags.h
+--- 1180,1184 ----
+ insn-flags.h insn-codes.h expr.h insn-config.h recog.h output.h \
+ typeclass.h bytecode.h bc-opcode.h bc-typecd.h bc-typecd.def bc-optab.h \
+! bc-emit.h modemap.def hard-reg-set.h
+ calls.o : calls.c $(CONFIG_H) $(RTL_H) $(TREE_H) flags.h expr.h insn-codes.h \
+ insn-flags.h
+*************** reorg.o : reorg.c $(CONFIG_H) $(RTL_H) c
+*** 1238,1241 ****
+--- 1239,1243 ----
+ basic-block.h regs.h insn-config.h insn-attr.h insn-flags.h recog.h \
+ flags.h output.h
++ alias.o : $(CONFIG_H) $(RTL_H) flags.h hard-reg-set.h regs.h
+ sched.o : sched.c $(CONFIG_H) $(RTL_H) basic-block.h regs.h hard-reg-set.h \
+ flags.h insn-config.h insn-attr.h
+diff -rcp2N gcc-2.7.2.2/alias.c g77-new/alias.c
+*** gcc-2.7.2.2/alias.c Wed Dec 31 19:00:00 1969
+--- g77-new/alias.c Thu Jul 10 20:08:43 1997
+***************
+*** 0 ****
+--- 1,996 ----
++ /* Alias analysis for GNU C, by John Carr (jfc@mit.edu).
++ Derived in part from sched.c */
++ #include "config.h"
++ #include "rtl.h"
++ #include "expr.h"
++ #include "regs.h"
++ #include "hard-reg-set.h"
++ #include "flags.h"
++
++ static rtx canon_rtx PROTO((rtx));
++ static int rtx_equal_for_memref_p PROTO((rtx, rtx));
++ static rtx find_symbolic_term PROTO((rtx));
++ static int memrefs_conflict_p PROTO((int, rtx, int, rtx,
++ HOST_WIDE_INT));
++
++ /* Set up all info needed to perform alias analysis on memory references. */
++
++ #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
++
++ /* reg_base_value[N] gives an address to which register N is related.
++ If all sets after the first add or subtract to the current value
++ or otherwise modify it so it does not point to a different top level
++ object, reg_base_value[N] is equal to the address part of the source
++ of the first set. The value will be a SYMBOL_REF, a LABEL_REF, or
++ (address (reg)) to indicate that the address is derived from an
++ argument or fixed register. */
++ rtx *reg_base_value;
++ unsigned int reg_base_value_size; /* size of reg_base_value array */
++ #define REG_BASE_VALUE(X) \
++ (REGNO (X) < reg_base_value_size ? reg_base_value[REGNO (X)] : 0)
++
++ /* Vector indexed by N giving the initial (unchanging) value known
++ for pseudo-register N. */
++ rtx *reg_known_value;
++
++ /* Indicates number of valid entries in reg_known_value. */
++ static int reg_known_value_size;
++
++ /* Vector recording for each reg_known_value whether it is due to a
++ REG_EQUIV note. Future passes (viz., reload) may replace the
++ pseudo with the equivalent expression and so we account for the
++ dependences that would be introduced if that happens. */
++ /* ??? This is a problem only on the Convex. The REG_EQUIV notes created in
++ assign_parms mention the arg pointer, and there are explicit insns in the
++ RTL that modify the arg pointer. Thus we must ensure that such insns don't
++ get scheduled across each other because that would invalidate the REG_EQUIV
++ notes. One could argue that the REG_EQUIV notes are wrong, but solving
++ the problem in the scheduler will likely give better code, so we do it
++ here. */
++ char *reg_known_equiv_p;
++
++ /* Inside SRC, the source of a SET, find a base address. */
++
++ /* When copying arguments into pseudo-registers, record the (ADDRESS)
++ expression for the argument directly so that even if the argument
++ register is changed later (e.g. for a function call) the original
++ value is noted. */
++ static int copying_arguments;
++
++ static rtx
++ find_base_value (src)
++ register rtx src;
++ {
++ switch (GET_CODE (src))
++ {
++ case SYMBOL_REF:
++ case LABEL_REF:
++ return src;
++
++ case REG:
++ if (copying_arguments && REGNO (src) < FIRST_PSEUDO_REGISTER)
++ return reg_base_value[REGNO (src)];
++ return src;
++
++ case MEM:
++ /* Check for an argument passed in memory. Only record in the
++ copying-arguments block; it is too hard to track changes
++ otherwise. */
++ if (copying_arguments
++ && (XEXP (src, 0) == arg_pointer_rtx
++ || (GET_CODE (XEXP (src, 0)) == PLUS
++ && XEXP (XEXP (src, 0), 0) == arg_pointer_rtx)))
++ return gen_rtx (ADDRESS, VOIDmode, src);
++ return 0;
++
++ case CONST:
++ src = XEXP (src, 0);
++ if (GET_CODE (src) != PLUS && GET_CODE (src) != MINUS)
++ break;
++ /* fall through */
++ case PLUS:
++ case MINUS:
++ /* Guess which operand to set the register equivalent to. */
++ /* If the first operand is a symbol or the second operand is
++ an integer, the first operand is the base address. */
++ if (GET_CODE (XEXP (src, 0)) == SYMBOL_REF
++ || GET_CODE (XEXP (src, 0)) == LABEL_REF
++ || GET_CODE (XEXP (src, 1)) == CONST_INT)
++ return XEXP (src, 0);
++ /* If an operand is a register marked as a pointer, it is the base. */
++ if (GET_CODE (XEXP (src, 0)) == REG
++ && REGNO_POINTER_FLAG (REGNO (XEXP (src, 0))))
++ src = XEXP (src, 0);
++ else if (GET_CODE (XEXP (src, 1)) == REG
++ && REGNO_POINTER_FLAG (REGNO (XEXP (src, 1))))
++ src = XEXP (src, 1);
++ else
++ return 0;
++ if (copying_arguments && REGNO (src) < FIRST_PSEUDO_REGISTER)
++ return reg_base_value[REGNO (src)];
++ return src;
++
++ case AND:
++ /* If the second operand is constant set the base
++ address to the first operand. */
++ if (GET_CODE (XEXP (src, 1)) == CONST_INT
++ && GET_CODE (XEXP (src, 0)) == REG)
++ {
++ src = XEXP (src, 0);
++ if (copying_arguments && REGNO (src) < FIRST_PSEUDO_REGISTER)
++ return reg_base_value[REGNO (src)];
++ return src;
++ }
++ return 0;
++
++ case HIGH:
++ return XEXP (src, 0);
++ }
++
++ return 0;
++ }
++
++ /* Called from init_alias_analysis indirectly through note_stores. */
++
++ /* while scanning insns to find base values, reg_seen[N] is nonzero if
++ register N has been set in this function. */
++ static char *reg_seen;
++
++ static
++ void record_set (dest, set)
++ rtx dest, set;
++ {
++ register int regno;
++ rtx src;
++
++ if (GET_CODE (dest) != REG)
++ return;
++
++ regno = REGNO (dest);
++
++ if (set)
++ {
++ /* A CLOBBER wipes out any old value but does not prevent a previously
++ unset register from acquiring a base address (i.e. reg_seen is not
++ set). */
++ if (GET_CODE (set) == CLOBBER)
++ {
++ reg_base_value[regno] = 0;
++ return;
++ }
++ src = SET_SRC (set);
++ }
++ else
++ {
++ static int unique_id;
++ if (reg_seen[regno])
++ {
++ reg_base_value[regno] = 0;
++ return;
++ }
++ reg_seen[regno] = 1;
++ reg_base_value[regno] = gen_rtx (ADDRESS, Pmode,
++ GEN_INT (unique_id++));
++ return;
++ }
++
++ /* This is not the first set. If the new value is not related to the
++ old value, forget the base value. Note that the following code is
++ not detected:
++ extern int x, y; int *p = &x; p += (&y-&x);
++ ANSI C does not allow computing the difference of addresses
++ of distinct top level objects. */
++ if (reg_base_value[regno])
++ switch (GET_CODE (src))
++ {
++ case PLUS:
++ case MINUS:
++ if (XEXP (src, 0) != dest && XEXP (src, 1) != dest)
++ reg_base_value[regno] = 0;
++ break;
++ case AND:
++ if (XEXP (src, 0) != dest || GET_CODE (XEXP (src, 1)) != CONST_INT)
++ reg_base_value[regno] = 0;
++ break;
++ case LO_SUM:
++ if (XEXP (src, 0) != dest)
++ reg_base_value[regno] = 0;
++ break;
++ default:
++ reg_base_value[regno] = 0;
++ break;
++ }
++ /* If this is the first set of a register, record the value. */
++ else if ((regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno])
++ && ! reg_seen[regno] && reg_base_value[regno] == 0)
++ reg_base_value[regno] = find_base_value (src);
++
++ reg_seen[regno] = 1;
++ }
++
++ /* Called from loop optimization when a new pseudo-register is created. */
++ void
++ record_base_value (regno, val)
++ int regno;
++ rtx val;
++ {
++ if (!flag_alias_check || regno >= reg_base_value_size)
++ return;
++ if (GET_CODE (val) == REG)
++ {
++ if (REGNO (val) < reg_base_value_size)
++ reg_base_value[regno] = reg_base_value[REGNO (val)];
++ return;
++ }
++ reg_base_value[regno] = find_base_value (val);
++ }
++
++ static rtx
++ canon_rtx (x)
++ rtx x;
++ {
++ /* Recursively look for equivalences. */
++ if (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER
++ && REGNO (x) < reg_known_value_size)
++ return reg_known_value[REGNO (x)] == x
++ ? x : canon_rtx (reg_known_value[REGNO (x)]);
++ else if (GET_CODE (x) == PLUS)
++ {
++ rtx x0 = canon_rtx (XEXP (x, 0));
++ rtx x1 = canon_rtx (XEXP (x, 1));
++
++ if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
++ {
++ /* We can tolerate LO_SUMs being offset here; these
++ rtl are used for nothing other than comparisons. */
++ if (GET_CODE (x0) == CONST_INT)
++ return plus_constant_for_output (x1, INTVAL (x0));
++ else if (GET_CODE (x1) == CONST_INT)
++ return plus_constant_for_output (x0, INTVAL (x1));
++ return gen_rtx (PLUS, GET_MODE (x), x0, x1);
++ }
++ }
++ /* This gives us much better alias analysis when called from
++ the loop optimizer. Note we want to leave the original
++ MEM alone, but need to return the canonicalized MEM with
++ all the flags with their original values. */
++ else if (GET_CODE (x) == MEM)
++ {
++ rtx addr = canon_rtx (XEXP (x, 0));
++ if (addr != XEXP (x, 0))
++ {
++ rtx new = gen_rtx (MEM, GET_MODE (x), addr);
++ MEM_VOLATILE_P (new) = MEM_VOLATILE_P (x);
++ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (x);
++ MEM_IN_STRUCT_P (new) = MEM_IN_STRUCT_P (x);
++ x = new;
++ }
++ }
++ return x;
++ }
++
++ /* Return 1 if X and Y are identical-looking rtx's.
++
++ We use the data in reg_known_value above to see if two registers with
++ different numbers are, in fact, equivalent. */
++
++ static int
++ rtx_equal_for_memref_p (x, y)
++ rtx x, y;
++ {
++ register int i;
++ register int j;
++ register enum rtx_code code;
++ register char *fmt;
++
++ if (x == 0 && y == 0)
++ return 1;
++ if (x == 0 || y == 0)
++ return 0;
++ x = canon_rtx (x);
++ y = canon_rtx (y);
++
++ if (x == y)
++ return 1;
++
++ code = GET_CODE (x);
++ /* Rtx's of different codes cannot be equal. */
++ if (code != GET_CODE (y))
++ return 0;
++
++ /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
++ (REG:SI x) and (REG:HI x) are NOT equivalent. */
++
++ if (GET_MODE (x) != GET_MODE (y))
++ return 0;
++
++ /* REG, LABEL_REF, and SYMBOL_REF can be compared nonrecursively. */
++
++ if (code == REG)
++ return REGNO (x) == REGNO (y);
++ if (code == LABEL_REF)
++ return XEXP (x, 0) == XEXP (y, 0);
++ if (code == SYMBOL_REF)
++ return XSTR (x, 0) == XSTR (y, 0);
++
++ /* For commutative operations, the RTX match if the operand match in any
++ order. Also handle the simple binary and unary cases without a loop. */
++ if (code == EQ || code == NE || GET_RTX_CLASS (code) == 'c')
++ return ((rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0))
++ && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1)))
++ || (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 1))
++ && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 0))));
++ else if (GET_RTX_CLASS (code) == '<' || GET_RTX_CLASS (code) == '2')
++ return (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0))
++ && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1)));
++ else if (GET_RTX_CLASS (code) == '1')
++ return rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0));
++
++ /* Compare the elements. If any pair of corresponding elements
++ fail to match, return 0 for the whole things. */
++
++ fmt = GET_RTX_FORMAT (code);
++ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
++ {
++ switch (fmt[i])
++ {
++ case 'w':
++ if (XWINT (x, i) != XWINT (y, i))
++ return 0;
++ break;
++
++ case 'n':
++ case 'i':
++ if (XINT (x, i) != XINT (y, i))
++ return 0;
++ break;
++
++ case 'V':
++ case 'E':
++ /* Two vectors must have the same length. */
++ if (XVECLEN (x, i) != XVECLEN (y, i))
++ return 0;
++
++ /* And the corresponding elements must match. */
++ for (j = 0; j < XVECLEN (x, i); j++)
++ if (rtx_equal_for_memref_p (XVECEXP (x, i, j), XVECEXP (y, i, j)) == 0)
++ return 0;
++ break;
++
++ case 'e':
++ if (rtx_equal_for_memref_p (XEXP (x, i), XEXP (y, i)) == 0)
++ return 0;
++ break;
++
++ case 'S':
++ case 's':
++ if (strcmp (XSTR (x, i), XSTR (y, i)))
++ return 0;
++ break;
++
++ case 'u':
++ /* These are just backpointers, so they don't matter. */
++ break;
++
++ case '0':
++ break;
++
++ /* It is believed that rtx's at this level will never
++ contain anything but integers and other rtx's,
++ except for within LABEL_REFs and SYMBOL_REFs. */
++ default:
++ abort ();
++ }
++ }
++ return 1;
++ }
++
++ /* Given an rtx X, find a SYMBOL_REF or LABEL_REF within
++ X and return it, or return 0 if none found. */
++
++ static rtx
++ find_symbolic_term (x)
++ rtx x;
++ {
++ register int i;
++ register enum rtx_code code;
++ register char *fmt;
++
++ code = GET_CODE (x);
++ if (code == SYMBOL_REF || code == LABEL_REF)
++ return x;
++ if (GET_RTX_CLASS (code) == 'o')
++ return 0;
++
++ fmt = GET_RTX_FORMAT (code);
++ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
++ {
++ rtx t;
++
++ if (fmt[i] == 'e')
++ {
++ t = find_symbolic_term (XEXP (x, i));
++ if (t != 0)
++ return t;
++ }
++ else if (fmt[i] == 'E')
++ break;
++ }
++ return 0;
++ }
++
++ static rtx
++ find_base_term (x)
++ register rtx x;
++ {
++ switch (GET_CODE (x))
++ {
++ case REG:
++ return REG_BASE_VALUE (x);
++
++ case HIGH:
++ return find_base_term (XEXP (x, 0));
++
++ case CONST:
++ x = XEXP (x, 0);
++ if (GET_CODE (x) != PLUS && GET_CODE (x) != MINUS)
++ return 0;
++ /* fall through */
++ case LO_SUM:
++ case PLUS:
++ case MINUS:
++ {
++ rtx tmp = find_base_term (XEXP (x, 0));
++ if (tmp)
++ return tmp;
++ return find_base_term (XEXP (x, 1));
++ }
++
++ case AND:
++ if (GET_CODE (XEXP (x, 0)) == REG && GET_CODE (XEXP (x, 1)) == CONST_INT)
++ return REG_BASE_VALUE (XEXP (x, 0));
++ return 0;
++
++ case SYMBOL_REF:
++ case LABEL_REF:
++ return x;
++
++ default:
++ return 0;
++ }
++ }
++
++ /* Return 0 if the addresses X and Y are known to point to different
++ objects, 1 if they might be pointers to the same object. */
++
++ static int
++ base_alias_check (x, y)
++ rtx x, y;
++ {
++ rtx x_base = find_base_term (x);
++ rtx y_base = find_base_term (y);
++
++ /* If either base address is unknown or the base addresses are equal,
++ nothing is known about aliasing. */
++
++ if (x_base == 0 || y_base == 0 || rtx_equal_p (x_base, y_base))
++ return 1;
++
++ /* The base addresses of the read and write are different
++ expressions. If they are both symbols there is no
++ conflict. */
++ if (GET_CODE (x_base) != ADDRESS && GET_CODE (y_base) != ADDRESS)
++ return 0;
++
++ /* If one address is a stack reference there can be no alias:
++ stack references using different base registers do not alias,
++ a stack reference can not alias a parameter, and a stack reference
++ can not alias a global. */
++ if ((GET_CODE (x_base) == ADDRESS && GET_MODE (x_base) == Pmode)
++ || (GET_CODE (y_base) == ADDRESS && GET_MODE (y_base) == Pmode))
++ return 0;
++
++ if (! flag_argument_noalias)
++ return 1;
++
++ if (flag_argument_noalias > 1)
++ return 0;
++
++ /* Weak noalias assertion (arguments are distinct, but may match globals). */
++ return ! (GET_MODE (x_base) == VOIDmode && GET_MODE (y_base) == VOIDmode);
++ }
++
++ /* Return nonzero if X and Y (memory addresses) could reference the
++ same location in memory. C is an offset accumulator. When
++ C is nonzero, we are testing aliases between X and Y + C.
++ XSIZE is the size in bytes of the X reference,
++ similarly YSIZE is the size in bytes for Y.
++
++ If XSIZE or YSIZE is zero, we do not know the amount of memory being
++ referenced (the reference was BLKmode), so make the most pessimistic
++ assumptions.
++
++ We recognize the following cases of non-conflicting memory:
++
++ (1) addresses involving the frame pointer cannot conflict
++ with addresses involving static variables.
++ (2) static variables with different addresses cannot conflict.
++
++ Nice to notice that varying addresses cannot conflict with fp if no
++ local variables had their addresses taken, but that's too hard now. */
++
++
++ static int
++ memrefs_conflict_p (xsize, x, ysize, y, c)
++ register rtx x, y;
++ int xsize, ysize;
++ HOST_WIDE_INT c;
++ {
++ if (GET_CODE (x) == HIGH)
++ x = XEXP (x, 0);
++ else if (GET_CODE (x) == LO_SUM)
++ x = XEXP (x, 1);
++ else
++ x = canon_rtx (x);
++ if (GET_CODE (y) == HIGH)
++ y = XEXP (y, 0);
++ else if (GET_CODE (y) == LO_SUM)
++ y = XEXP (y, 1);
++ else
++ y = canon_rtx (y);
++
++ if (rtx_equal_for_memref_p (x, y))
++ {
++ if (xsize == 0 || ysize == 0)
++ return 1;
++ if (c >= 0 && xsize > c)
++ return 1;
++ if (c < 0 && ysize+c > 0)
++ return 1;
++ return 0;
++ }
++
++ if (y == frame_pointer_rtx || y == hard_frame_pointer_rtx
++ || y == stack_pointer_rtx)
++ {
++ rtx t = y;
++ int tsize = ysize;
++ y = x; ysize = xsize;
++ x = t; xsize = tsize;
++ }
++
++ if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
++ || x == stack_pointer_rtx)
++ {
++ rtx y1;
++
++ if (CONSTANT_P (y))
++ return 0;
++
++ if (GET_CODE (y) == PLUS
++ && canon_rtx (XEXP (y, 0)) == x
++ && (y1 = canon_rtx (XEXP (y, 1)))
++ && GET_CODE (y1) == CONST_INT)
++ {
++ c += INTVAL (y1);
++ return (xsize == 0 || ysize == 0
++ || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
++ }
++
++ if (GET_CODE (y) == PLUS
++ && (y1 = canon_rtx (XEXP (y, 0)))
++ && CONSTANT_P (y1))
++ return 0;
++
++ return 1;
++ }
++
++ if (GET_CODE (x) == PLUS)
++ {
++ /* The fact that X is canonicalized means that this
++ PLUS rtx is canonicalized. */
++ rtx x0 = XEXP (x, 0);
++ rtx x1 = XEXP (x, 1);
++
++ if (GET_CODE (y) == PLUS)
++ {
++ /* The fact that Y is canonicalized means that this
++ PLUS rtx is canonicalized. */
++ rtx y0 = XEXP (y, 0);
++ rtx y1 = XEXP (y, 1);
++
++ if (rtx_equal_for_memref_p (x1, y1))
++ return memrefs_conflict_p (xsize, x0, ysize, y0, c);
++ if (rtx_equal_for_memref_p (x0, y0))
++ return memrefs_conflict_p (xsize, x1, ysize, y1, c);
++ if (GET_CODE (x1) == CONST_INT)
++ if (GET_CODE (y1) == CONST_INT)
++ return memrefs_conflict_p (xsize, x0, ysize, y0,
++ c - INTVAL (x1) + INTVAL (y1));
++ else
++ return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
++ else if (GET_CODE (y1) == CONST_INT)
++ return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
++
++ /* Handle case where we cannot understand iteration operators,
++ but we notice that the base addresses are distinct objects. */
++ /* ??? Is this still necessary? */
++ x = find_symbolic_term (x);
++ if (x == 0)
++ return 1;
++ y = find_symbolic_term (y);
++ if (y == 0)
++ return 1;
++ return rtx_equal_for_memref_p (x, y);
++ }
++ else if (GET_CODE (x1) == CONST_INT)
++ return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
++ }
++ else if (GET_CODE (y) == PLUS)
++ {
++ /* The fact that Y is canonicalized means that this
++ PLUS rtx is canonicalized. */
++ rtx y0 = XEXP (y, 0);
++ rtx y1 = XEXP (y, 1);
++
++ if (GET_CODE (y1) == CONST_INT)
++ return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
++ else
++ return 1;
++ }
++
++ if (GET_CODE (x) == GET_CODE (y))
++ switch (GET_CODE (x))
++ {
++ case MULT:
++ {
++ /* Handle cases where we expect the second operands to be the
++ same, and check only whether the first operand would conflict
++ or not. */
++ rtx x0, y0;
++ rtx x1 = canon_rtx (XEXP (x, 1));
++ rtx y1 = canon_rtx (XEXP (y, 1));
++ if (! rtx_equal_for_memref_p (x1, y1))
++ return 1;
++ x0 = canon_rtx (XEXP (x, 0));
++ y0 = canon_rtx (XEXP (y, 0));
++ if (rtx_equal_for_memref_p (x0, y0))
++ return (xsize == 0 || ysize == 0
++ || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
++
++ /* Can't properly adjust our sizes. */
++ if (GET_CODE (x1) != CONST_INT)
++ return 1;
++ xsize /= INTVAL (x1);
++ ysize /= INTVAL (x1);
++ c /= INTVAL (x1);
++ return memrefs_conflict_p (xsize, x0, ysize, y0, c);
++ }
++ }
++
++ /* Treat an access through an AND (e.g. a subword access on an Alpha)
++ as an access with indeterminate size. */
++ if (GET_CODE (x) == AND && GET_CODE (XEXP (x, 1)) == CONST_INT)
++ return memrefs_conflict_p (0, XEXP (x, 0), ysize, y, c);
++ if (GET_CODE (y) == AND && GET_CODE (XEXP (y, 1)) == CONST_INT)
++ return memrefs_conflict_p (xsize, x, 0, XEXP (y, 0), c);
++
++ if (CONSTANT_P (x))
++ {
++ if (GET_CODE (x) == CONST_INT && GET_CODE (y) == CONST_INT)
++ {
++ c += (INTVAL (y) - INTVAL (x));
++ return (xsize == 0 || ysize == 0
++ || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
++ }
++
++ if (GET_CODE (x) == CONST)
++ {
++ if (GET_CODE (y) == CONST)
++ return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
++ ysize, canon_rtx (XEXP (y, 0)), c);
++ else
++ return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
++ ysize, y, c);
++ }
++ if (GET_CODE (y) == CONST)
++ return memrefs_conflict_p (xsize, x, ysize,
++ canon_rtx (XEXP (y, 0)), c);
++
++ if (CONSTANT_P (y))
++ return (rtx_equal_for_memref_p (x, y)
++ && (xsize == 0 || ysize == 0
++ || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)));
++
++ return 1;
++ }
++ return 1;
++ }
++
++ /* Functions to compute memory dependencies.
++
++ Since we process the insns in execution order, we can build tables
++ to keep track of what registers are fixed (and not aliased), what registers
++ are varying in known ways, and what registers are varying in unknown
++ ways.
++
++ If both memory references are volatile, then there must always be a
++ dependence between the two references, since their order can not be
++ changed. A volatile and non-volatile reference can be interchanged
++ though.
++
++ A MEM_IN_STRUCT reference at a non-QImode varying address can never
++ conflict with a non-MEM_IN_STRUCT reference at a fixed address. We must
++ allow QImode aliasing because the ANSI C standard allows character
++ pointers to alias anything. We are assuming that characters are
++ always QImode here. */
++
++ /* Read dependence: X is read after read in MEM takes place. There can
++ only be a dependence here if both reads are volatile. */
++
++ int
++ read_dependence (mem, x)
++ rtx mem;
++ rtx x;
++ {
++ return MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem);
++ }
++
++ /* True dependence: X is read after store in MEM takes place. */
++
++ int
++ true_dependence (mem, mem_mode, x, varies)
++ rtx mem;
++ enum machine_mode mem_mode;
++ rtx x;
++ int (*varies)();
++ {
++ rtx x_addr, mem_addr;
++
++ if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
++ return 1;
++
++ x_addr = XEXP (x, 0);
++ mem_addr = XEXP (mem, 0);
++
++ if (flag_alias_check && ! base_alias_check (x_addr, mem_addr))
++ return 0;
++
++ /* If X is an unchanging read, then it can't possibly conflict with any
++ non-unchanging store. It may conflict with an unchanging write though,
++ because there may be a single store to this address to initialize it.
++ Just fall through to the code below to resolve the case where we have
++ both an unchanging read and an unchanging write. This won't handle all
++ cases optimally, but the possible performance loss should be
++ negligible. */
++ if (RTX_UNCHANGING_P (x) && ! RTX_UNCHANGING_P (mem))
++ return 0;
++
++ x_addr = canon_rtx (x_addr);
++ mem_addr = canon_rtx (mem_addr);
++ if (mem_mode == VOIDmode)
++ mem_mode = GET_MODE (mem);
++
++ if (! memrefs_conflict_p (mem_mode, mem_addr, SIZE_FOR_MODE (x), x_addr, 0))
++ return 0;
++
++ /* If both references are struct references, or both are not, nothing
++ is known about aliasing.
++
++ If either reference is QImode or BLKmode, ANSI C permits aliasing.
++
++ If both addresses are constant, or both are not, nothing is known
++ about aliasing. */
++ if (MEM_IN_STRUCT_P (x) == MEM_IN_STRUCT_P (mem)
++ || mem_mode == QImode || mem_mode == BLKmode
++ || GET_MODE (x) == QImode || GET_MODE (mem) == BLKmode
++ || varies (x_addr) == varies (mem_addr))
++ return 1;
++
++ /* One memory reference is to a constant address, one is not.
++ One is to a structure, the other is not.
++
++ If either memory reference is a variable structure the other is a
++ fixed scalar and there is no aliasing. */
++ if ((MEM_IN_STRUCT_P (mem) && varies (mem_addr))
++ || (MEM_IN_STRUCT_P (x) && varies (x)))
++ return 0;
++
++ return 1;
++ }
++
++ /* Anti dependence: X is written after read in MEM takes place. */
++
++ int
++ anti_dependence (mem, x)
++ rtx mem;
++ rtx x;
++ {
++ if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
++ return 1;
++
++ if (flag_alias_check && ! base_alias_check (XEXP (x, 0), XEXP (mem, 0)))
++ return 0;
++
++ /* If MEM is an unchanging read, then it can't possibly conflict with
++ the store to X, because there is at most one store to MEM, and it must
++ have occurred somewhere before MEM. */
++ x = canon_rtx (x);
++ mem = canon_rtx (mem);
++ if (RTX_UNCHANGING_P (mem))
++ return 0;
++
++ return (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
++ SIZE_FOR_MODE (x), XEXP (x, 0), 0)
++ && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
++ && GET_MODE (mem) != QImode
++ && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
++ && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
++ && GET_MODE (x) != QImode
++ && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem)));
++ }
++
++ /* Output dependence: X is written after store in MEM takes place. */
++
++ int
++ output_dependence (mem, x)
++ register rtx mem;
++ register rtx x;
++ {
++ if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
++ return 1;
++
++ if (flag_alias_check && !base_alias_check (XEXP (x, 0), XEXP (mem, 0)))
++ return 0;
++
++ x = canon_rtx (x);
++ mem = canon_rtx (mem);
++ return (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
++ SIZE_FOR_MODE (x), XEXP (x, 0), 0)
++ && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
++ && GET_MODE (mem) != QImode
++ && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
++ && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
++ && GET_MODE (x) != QImode
++ && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem)));
++ }
++
++ void
++ init_alias_analysis ()
++ {
++ int maxreg = max_reg_num ();
++ int changed;
++ register int i;
++ register rtx insn;
++ rtx note;
++ rtx set;
++
++ reg_known_value_size = maxreg;
++
++ reg_known_value
++ = (rtx *) oballoc ((maxreg - FIRST_PSEUDO_REGISTER) * sizeof (rtx))
++ - FIRST_PSEUDO_REGISTER;
++ reg_known_equiv_p =
++ oballoc (maxreg - FIRST_PSEUDO_REGISTER) - FIRST_PSEUDO_REGISTER;
++ bzero ((char *) (reg_known_value + FIRST_PSEUDO_REGISTER),
++ (maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx));
++ bzero (reg_known_equiv_p + FIRST_PSEUDO_REGISTER,
++ (maxreg - FIRST_PSEUDO_REGISTER) * sizeof (char));
++
++ if (flag_alias_check)
++ {
++ /* Overallocate reg_base_value to allow some growth during loop
++ optimization. Loop unrolling can create a large number of
++ registers. */
++ reg_base_value_size = maxreg * 2;
++ reg_base_value = (rtx *)oballoc (reg_base_value_size * sizeof (rtx));
++ reg_seen = (char *)alloca (reg_base_value_size);
++ bzero (reg_base_value, reg_base_value_size * sizeof (rtx));
++ bzero (reg_seen, reg_base_value_size);
++
++ /* Mark all hard registers which may contain an address.
++ The stack, frame and argument pointers may contain an address.
++ An argument register which can hold a Pmode value may contain
++ an address even if it is not in BASE_REGS.
++
++ The address expression is VOIDmode for an argument and
++ Pmode for other registers. */
++ #ifndef OUTGOING_REGNO
++ #define OUTGOING_REGNO(N) N
++ #endif
++ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
++ /* Check whether this register can hold an incoming pointer
++ argument. FUNCTION_ARG_REGNO_P tests outgoing register
++ numbers, so translate if necessary due to register windows. */
++ if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (i)) && HARD_REGNO_MODE_OK (i, Pmode))
++ reg_base_value[i] = gen_rtx (ADDRESS, VOIDmode,
++ gen_rtx (REG, Pmode, i));
++
++ reg_base_value[STACK_POINTER_REGNUM]
++ = gen_rtx (ADDRESS, Pmode, stack_pointer_rtx);
++ reg_base_value[ARG_POINTER_REGNUM]
++ = gen_rtx (ADDRESS, Pmode, arg_pointer_rtx);
++ reg_base_value[FRAME_POINTER_REGNUM]
++ = gen_rtx (ADDRESS, Pmode, frame_pointer_rtx);
++ reg_base_value[HARD_FRAME_POINTER_REGNUM]
++ = gen_rtx (ADDRESS, Pmode, hard_frame_pointer_rtx);
++ }
++
++ copying_arguments = 1;
++ /* Fill in the entries with known constant values. */
++ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
++ {
++ if (flag_alias_check && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
++ {
++ /* If this insn has a noalias note, process it, Otherwise,
++ scan for sets. A simple set will have no side effects
++ which could change the base value of any other register. */
++ rtx noalias_note;
++ if (GET_CODE (PATTERN (insn)) == SET
++ && (noalias_note = find_reg_note (insn, REG_NOALIAS, NULL_RTX)))
++ record_set (SET_DEST (PATTERN (insn)), 0);
++ else
++ note_stores (PATTERN (insn), record_set);
++ }
++ else if (GET_CODE (insn) == NOTE
++ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_FUNCTION_BEG)
++ copying_arguments = 0;
++
++ if ((set = single_set (insn)) != 0
++ && GET_CODE (SET_DEST (set)) == REG
++ && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
++ && (((note = find_reg_note (insn, REG_EQUAL, 0)) != 0
++ && reg_n_sets[REGNO (SET_DEST (set))] == 1)
++ || (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != 0)
++ && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
++ {
++ int regno = REGNO (SET_DEST (set));
++ reg_known_value[regno] = XEXP (note, 0);
++ reg_known_equiv_p[regno] = REG_NOTE_KIND (note) == REG_EQUIV;
++ }
++ }
++
++ /* Fill in the remaining entries. */
++ for (i = FIRST_PSEUDO_REGISTER; i < maxreg; i++)
++ if (reg_known_value[i] == 0)
++ reg_known_value[i] = regno_reg_rtx[i];
++
++ if (! flag_alias_check)
++ return;
++
++ /* Simplify the reg_base_value array so that no register refers to
++ another register, except to special registers indirectly through
++ ADDRESS expressions.
++
++ In theory this loop can take as long as O(registers^2), but unless
++ there are very long dependency chains it will run in close to linear
++ time. */
++ do
++ {
++ changed = 0;
++ for (i = FIRST_PSEUDO_REGISTER; i < reg_base_value_size; i++)
++ {
++ rtx base = reg_base_value[i];
++ if (base && GET_CODE (base) == REG)
++ {
++ int base_regno = REGNO (base);
++ if (base_regno == i) /* register set from itself */
++ reg_base_value[i] = 0;
++ else
++ reg_base_value[i] = reg_base_value[base_regno];
++ changed = 1;
++ }
++ }
++ }
++ while (changed);
++
++ reg_seen = 0;
++ }
++
++ void
++ end_alias_analysis ()
++ {
++ reg_known_value = 0;
++ reg_base_value = 0;
++ reg_base_value_size = 0;
++ }
+diff -rcp2N gcc-2.7.2.2/c-decl.c g77-new/c-decl.c
+*** gcc-2.7.2.2/c-decl.c Fri Oct 27 05:44:43 1995
+--- g77-new/c-decl.c Sun Aug 10 18:46:24 1997
+*************** init_decl_processing ()
+*** 3207,3210 ****
+--- 3207,3223 ----
+ builtin_function ("__builtin_cosl", ldouble_ftype_ldouble,
+ BUILT_IN_COS, "cosl");
++ builtin_function ("__builtin_setjmp",
++ build_function_type (integer_type_node,
++ tree_cons (NULL_TREE,
++ ptr_type_node, endlink)),
++ BUILT_IN_SETJMP, NULL_PTR);
++ builtin_function ("__builtin_longjmp",
++ build_function_type
++ (void_type_node,
++ tree_cons (NULL, ptr_type_node,
++ tree_cons (NULL_TREE,
++ integer_type_node,
++ endlink))),
++ BUILT_IN_LONGJMP, NULL_PTR);
+
+ /* In an ANSI C program, it is okay to supply built-in meanings
+*************** grokdeclarator (declarator, declspecs, d
+*** 4049,4052 ****
+--- 4062,4066 ----
+ int volatilep;
+ int inlinep;
++ int restrictp;
+ int explicit_int = 0;
+ int explicit_char = 0;
+*************** grokdeclarator (declarator, declspecs, d
+*** 4342,4349 ****
+--- 4356,4366 ----
+ volatilep = !! (specbits & 1 << (int) RID_VOLATILE) + TYPE_VOLATILE (type);
+ inlinep = !! (specbits & (1 << (int) RID_INLINE));
++ restrictp = !! (specbits & (1 << (int) RID_RESTRICT));
+ if (constp > 1)
+ pedwarn ("duplicate `const'");
+ if (volatilep > 1)
+ pedwarn ("duplicate `volatile'");
++ if (restrictp)
++ error ("`restrict' used in non-parameter or non-pointer type declaration");
+ if (! flag_gen_aux_info && (TYPE_READONLY (type) || TYPE_VOLATILE (type)))
+ type = TYPE_MAIN_VARIANT (type);
+*************** grokdeclarator (declarator, declspecs, d
+*** 4693,4696 ****
+--- 4710,4715 ----
+ else if (TREE_VALUE (typemodlist) == ridpointers[(int) RID_VOLATILE])
+ volatilep++;
++ else if (TREE_VALUE (typemodlist) == ridpointers[(int) RID_RESTRICT])
++ restrictp++;
+ else if (!erred)
+ {
+*************** grokdeclarator (declarator, declspecs, d
+*** 4703,4706 ****
+--- 4722,4727 ----
+ if (volatilep > 1)
+ pedwarn ("duplicate `volatile'");
++ if (restrictp > 1)
++ pedwarn ("duplicate `restrict'");
+ }
+
+*************** grokdeclarator (declarator, declspecs, d
+*** 4844,4847 ****
+--- 4865,4875 ----
+ }
+
++ if (restrictp)
++ {
++ if (TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE)
++ error ("`restrict' applied to non-pointer");
++ DECL_RESTRICT (decl) = 1;
++ }
++
+ DECL_ARG_TYPE_AS_WRITTEN (decl) = type_as_written;
+ }
+*************** start_struct (code, name)
+*** 5365,5368 ****
+--- 5393,5397 ----
+ pushtag (name, ref);
+ C_TYPE_BEING_DEFINED (ref) = 1;
++ TYPE_PACKED (ref) = flag_pack_struct;
+ return ref;
+ }
+*************** start_enum (name)
+*** 5806,5809 ****
+--- 5835,5841 ----
+ enum_overflow = 0;
+
++ if (flag_short_enums)
++ TYPE_PACKED (enumtype) = 1;
++
+ return enumtype;
+ }
+*************** finish_enum (enumtype, values, attribute
+*** 5862,5867 ****
+ precision = MAX (lowprec, highprec);
+
+! if (flag_short_enums || TYPE_PACKED (enumtype)
+! || precision > TYPE_PRECISION (integer_type_node))
+ /* Use the width of the narrowest normal C type which is wide enough. */
+ TYPE_PRECISION (enumtype) = TYPE_PRECISION (type_for_size (precision, 1));
+--- 5894,5898 ----
+ precision = MAX (lowprec, highprec);
+
+! if (TYPE_PACKED (enumtype) || precision > TYPE_PRECISION (integer_type_node))
+ /* Use the width of the narrowest normal C type which is wide enough. */
+ TYPE_PRECISION (enumtype) = TYPE_PRECISION (type_for_size (precision, 1));
+diff -rcp2N gcc-2.7.2.2/c-gperf.h g77-new/c-gperf.h
+*** gcc-2.7.2.2/c-gperf.h Fri Mar 4 14:15:53 1994
+--- g77-new/c-gperf.h Mon Aug 11 02:58:47 1997
+***************
+*** 1,15 ****
+ /* C code produced by gperf version 2.5 (GNU C++ version) */
+! /* Command-line: gperf -p -j1 -i 1 -g -o -t -G -N is_reserved_word -k1,3,$ c-parse.gperf */
+ struct resword { char *name; short token; enum rid rid; };
+
+! #define TOTAL_KEYWORDS 79
+ #define MIN_WORD_LENGTH 2
+ #define MAX_WORD_LENGTH 20
+! #define MIN_HASH_VALUE 10
+! #define MAX_HASH_VALUE 144
+! /* maximum key range = 135, duplicates = 0 */
+
+ #ifdef __GNUC__
+! __inline
+ #endif
+ static unsigned int
+--- 1,16 ----
+ /* C code produced by gperf version 2.5 (GNU C++ version) */
+! /* Command-line: gperf -p -j1 -i 1 -g -o -t -G -N is_reserved_word -k1,3,$ ../g77-new/c-parse.gperf */
+! /* Command-line: gperf -p -j1 -i 1 -g -o -t -N is_reserved_word -k1,3,$ c-parse.gperf */
+ struct resword { char *name; short token; enum rid rid; };
+
+! #define TOTAL_KEYWORDS 81
+ #define MIN_WORD_LENGTH 2
+ #define MAX_WORD_LENGTH 20
+! #define MIN_HASH_VALUE 11
+! #define MAX_HASH_VALUE 157
+! /* maximum key range = 147, duplicates = 0 */
+
+ #ifdef __GNUC__
+! inline
+ #endif
+ static unsigned int
+*************** hash (str, len)
+*** 20,36 ****
+ static unsigned char asso_values[] =
+ {
+! 145, 145, 145, 145, 145, 145, 145, 145, 145, 145,
+! 145, 145, 145, 145, 145, 145, 145, 145, 145, 145,
+! 145, 145, 145, 145, 145, 145, 145, 145, 145, 145,
+! 145, 145, 145, 145, 145, 145, 145, 145, 145, 145,
+! 145, 145, 145, 145, 145, 145, 145, 145, 145, 145,
+! 145, 145, 145, 145, 145, 145, 145, 145, 145, 145,
+! 145, 145, 145, 145, 25, 145, 145, 145, 145, 145,
+! 145, 145, 145, 145, 145, 145, 145, 145, 145, 145,
+! 145, 145, 145, 145, 145, 145, 145, 145, 145, 145,
+! 145, 145, 145, 145, 145, 1, 145, 46, 8, 15,
+! 61, 6, 36, 48, 3, 5, 145, 18, 63, 25,
+! 29, 76, 1, 145, 13, 2, 1, 51, 37, 9,
+! 9, 1, 3, 145, 145, 145, 145, 145,
+ };
+ register int hval = len;
+--- 21,37 ----
+ static unsigned char asso_values[] =
+ {
+! 158, 158, 158, 158, 158, 158, 158, 158, 158, 158,
+! 158, 158, 158, 158, 158, 158, 158, 158, 158, 158,
+! 158, 158, 158, 158, 158, 158, 158, 158, 158, 158,
+! 158, 158, 158, 158, 158, 158, 158, 158, 158, 158,
+! 158, 158, 158, 158, 158, 158, 158, 158, 158, 158,
+! 158, 158, 158, 158, 158, 158, 158, 158, 158, 158,
+! 158, 158, 158, 158, 2, 158, 158, 158, 158, 158,
+! 158, 158, 158, 158, 158, 158, 158, 158, 158, 158,
+! 158, 158, 158, 158, 158, 158, 158, 158, 158, 158,
+! 158, 158, 158, 158, 158, 1, 158, 18, 1, 58,
+! 56, 6, 44, 64, 13, 45, 158, 4, 26, 68,
+! 2, 74, 1, 158, 2, 13, 1, 33, 48, 5,
+! 5, 3, 12, 158, 158, 158, 158, 158,
+ };
+ register int hval = len;
+*************** hash (str, len)
+*** 44,47 ****
+--- 45,49 ----
+ case 1:
+ hval += asso_values[str[0]];
++ break;
+ }
+ return hval + asso_values[str[len - 1]];
+*************** hash (str, len)
+*** 50,166 ****
+ static struct resword wordlist[] =
+ {
+! {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",},
+! {"",},
+! {"int", TYPESPEC, RID_INT},
+! {"",}, {"",},
+! {"__typeof__", TYPEOF, NORID},
+! {"__signed__", TYPESPEC, RID_SIGNED},
+! {"__imag__", IMAGPART, NORID},
+! {"switch", SWITCH, NORID},
+! {"__inline__", SCSPEC, RID_INLINE},
+! {"else", ELSE, NORID},
+! {"__iterator__", SCSPEC, RID_ITERATOR},
+! {"__inline", SCSPEC, RID_INLINE},
+! {"__extension__", EXTENSION, NORID},
+! {"struct", STRUCT, NORID},
+! {"__real__", REALPART, NORID},
+! {"__const", TYPE_QUAL, RID_CONST},
+! {"while", WHILE, NORID},
+! {"__const__", TYPE_QUAL, RID_CONST},
+! {"case", CASE, NORID},
+! {"__complex__", TYPESPEC, RID_COMPLEX},
+! {"__iterator", SCSPEC, RID_ITERATOR},
+! {"bycopy", TYPE_QUAL, RID_BYCOPY},
+! {"",}, {"",}, {"",},
+! {"__complex", TYPESPEC, RID_COMPLEX},
+! {"",},
+! {"in", TYPE_QUAL, RID_IN},
+! {"break", BREAK, NORID},
+! {"@defs", DEFS, NORID},
+! {"",}, {"",}, {"",},
+! {"extern", SCSPEC, RID_EXTERN},
+! {"if", IF, NORID},
+! {"typeof", TYPEOF, NORID},
+! {"typedef", SCSPEC, RID_TYPEDEF},
+! {"__typeof", TYPEOF, NORID},
+! {"sizeof", SIZEOF, NORID},
+! {"",},
+! {"return", RETURN, NORID},
+! {"const", TYPE_QUAL, RID_CONST},
+! {"__volatile__", TYPE_QUAL, RID_VOLATILE},
+! {"@private", PRIVATE, NORID},
+! {"@selector", SELECTOR, NORID},
+! {"__volatile", TYPE_QUAL, RID_VOLATILE},
+! {"__asm__", ASM_KEYWORD, NORID},
+! {"",}, {"",},
+! {"continue", CONTINUE, NORID},
+! {"__alignof__", ALIGNOF, NORID},
+! {"__imag", IMAGPART, NORID},
+! {"__attribute__", ATTRIBUTE, NORID},
+! {"",}, {"",},
+! {"__attribute", ATTRIBUTE, NORID},
+! {"for", FOR, NORID},
+! {"",},
+! {"@encode", ENCODE, NORID},
+! {"id", OBJECTNAME, RID_ID},
+! {"static", SCSPEC, RID_STATIC},
+! {"@interface", INTERFACE, NORID},
+! {"",},
+! {"__signed", TYPESPEC, RID_SIGNED},
+! {"",},
+! {"__label__", LABEL, NORID},
+! {"",}, {"",},
+! {"__asm", ASM_KEYWORD, NORID},
+! {"char", TYPESPEC, RID_CHAR},
+! {"",},
+! {"inline", SCSPEC, RID_INLINE},
+! {"out", TYPE_QUAL, RID_OUT},
+! {"register", SCSPEC, RID_REGISTER},
+! {"__real", REALPART, NORID},
+! {"short", TYPESPEC, RID_SHORT},
+! {"",},
+! {"enum", ENUM, NORID},
+! {"inout", TYPE_QUAL, RID_INOUT},
+! {"",},
+! {"oneway", TYPE_QUAL, RID_ONEWAY},
+! {"union", UNION, NORID},
+! {"",},
+! {"__alignof", ALIGNOF, NORID},
+! {"",},
+! {"@implementation", IMPLEMENTATION, NORID},
+! {"",},
+! {"@class", CLASS, NORID},
+! {"",},
+! {"@public", PUBLIC, NORID},
+! {"asm", ASM_KEYWORD, NORID},
+! {"",}, {"",}, {"",}, {"",}, {"",},
+! {"default", DEFAULT, NORID},
+! {"",},
+! {"void", TYPESPEC, RID_VOID},
+! {"",},
+! {"@protected", PROTECTED, NORID},
+! {"@protocol", PROTOCOL, NORID},
+! {"",}, {"",}, {"",},
+! {"volatile", TYPE_QUAL, RID_VOLATILE},
+! {"",}, {"",},
+! {"signed", TYPESPEC, RID_SIGNED},
+! {"float", TYPESPEC, RID_FLOAT},
+! {"@end", END, NORID},
+! {"",}, {"",},
+! {"unsigned", TYPESPEC, RID_UNSIGNED},
+! {"@compatibility_alias", ALIAS, NORID},
+! {"double", TYPESPEC, RID_DOUBLE},
+! {"",}, {"",},
+! {"auto", SCSPEC, RID_AUTO},
+! {"",},
+! {"goto", GOTO, NORID},
+! {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",},
+! {"do", DO, NORID},
+! {"",}, {"",}, {"",}, {"",},
+! {"long", TYPESPEC, RID_LONG},
+ };
+
+ #ifdef __GNUC__
+! __inline
+ #endif
+ struct resword *
+--- 52,167 ----
+ static struct resword wordlist[] =
+ {
+! {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",},
+! {"",}, {"",},
+! {"return", RETURN, NORID},
+! {"__real__", REALPART, NORID},
+! {"__typeof__", TYPEOF, NORID},
+! {"__restrict", TYPE_QUAL, RID_RESTRICT},
+! {"extern", SCSPEC, RID_EXTERN},
+! {"break", BREAK, NORID},
+! {"@encode", ENCODE, NORID},
+! {"@private", PRIVATE, NORID},
+! {"@selector", SELECTOR, NORID},
+! {"@interface", INTERFACE, NORID},
+! {"__extension__", EXTENSION, NORID},
+! {"struct", STRUCT, NORID},
+! {"",},
+! {"restrict", TYPE_QUAL, RID_RESTRICT},
+! {"__signed__", TYPESPEC, RID_SIGNED},
+! {"@defs", DEFS, NORID},
+! {"__asm__", ASM_KEYWORD, NORID},
+! {"",},
+! {"else", ELSE, NORID},
+! {"",},
+! {"__alignof__", ALIGNOF, NORID},
+! {"",},
+! {"__attribute__", ATTRIBUTE, NORID},
+! {"",},
+! {"__real", REALPART, NORID},
+! {"__attribute", ATTRIBUTE, NORID},
+! {"__label__", LABEL, NORID},
+! {"",},
+! {"@protocol", PROTOCOL, NORID},
+! {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",},
+! {"@class", CLASS, NORID},
+! {"",},
+! {"in", TYPE_QUAL, RID_IN},
+! {"int", TYPESPEC, RID_INT},
+! {"for", FOR, NORID},
+! {"typeof", TYPEOF, NORID},
+! {"typedef", SCSPEC, RID_TYPEDEF},
+! {"__typeof", TYPEOF, NORID},
+! {"__imag__", IMAGPART, NORID},
+! {"",},
+! {"__inline__", SCSPEC, RID_INLINE},
+! {"__iterator", SCSPEC, RID_ITERATOR},
+! {"__iterator__", SCSPEC, RID_ITERATOR},
+! {"__inline", SCSPEC, RID_INLINE},
+! {"while", WHILE, NORID},
+! {"__volatile__", TYPE_QUAL, RID_VOLATILE},
+! {"",},
+! {"@end", END, NORID},
+! {"__volatile", TYPE_QUAL, RID_VOLATILE},
+! {"const", TYPE_QUAL, RID_CONST},
+! {"__const", TYPE_QUAL, RID_CONST},
+! {"bycopy", TYPE_QUAL, RID_BYCOPY},
+! {"__const__", TYPE_QUAL, RID_CONST},
+! {"@protected", PROTECTED, NORID},
+! {"__complex__", TYPESPEC, RID_COMPLEX},
+! {"__alignof", ALIGNOF, NORID},
+! {"__complex", TYPESPEC, RID_COMPLEX},
+! {"continue", CONTINUE, NORID},
+! {"sizeof", SIZEOF, NORID},
+! {"register", SCSPEC, RID_REGISTER},
+! {"switch", SWITCH, NORID},
+! {"__signed", TYPESPEC, RID_SIGNED},
+! {"out", TYPE_QUAL, RID_OUT},
+! {"",},
+! {"case", CASE, NORID},
+! {"char", TYPESPEC, RID_CHAR},
+! {"inline", SCSPEC, RID_INLINE},
+! {"",},
+! {"union", UNION, NORID},
+! {"",},
+! {"@implementation", IMPLEMENTATION, NORID},
+! {"volatile", TYPE_QUAL, RID_VOLATILE},
+! {"oneway", TYPE_QUAL, RID_ONEWAY},
+! {"",},
+! {"if", IF, NORID},
+! {"__asm", ASM_KEYWORD, NORID},
+! {"short", TYPESPEC, RID_SHORT},
+! {"",},
+! {"static", SCSPEC, RID_STATIC},
+! {"long", TYPESPEC, RID_LONG},
+! {"auto", SCSPEC, RID_AUTO},
+! {"",}, {"",},
+! {"@public", PUBLIC, NORID},
+! {"double", TYPESPEC, RID_DOUBLE},
+! {"",},
+! {"id", OBJECTNAME, RID_ID},
+! {"",}, {"",}, {"",}, {"",},
+! {"default", DEFAULT, NORID},
+! {"@compatibility_alias", ALIAS, NORID},
+! {"unsigned", TYPESPEC, RID_UNSIGNED},
+! {"enum", ENUM, NORID},
+! {"",}, {"",}, {"",}, {"",},
+! {"__imag", IMAGPART, NORID},
+! {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",},
+! {"float", TYPESPEC, RID_FLOAT},
+! {"inout", TYPE_QUAL, RID_INOUT},
+! {"",}, {"",}, {"",}, {"",}, {"",}, {"",},
+! {"do", DO, NORID},
+! {"",}, {"",}, {"",}, {"",}, {"",}, {"",},
+! {"signed", TYPESPEC, RID_SIGNED},
+! {"",}, {"",}, {"",},
+! {"goto", GOTO, NORID},
+! {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",},
+! {"void", TYPESPEC, RID_VOID},
+! {"",}, {"",}, {"",},
+! {"asm", ASM_KEYWORD, NORID},
+ };
+
+ #ifdef __GNUC__
+! inline
+ #endif
+ struct resword *
+diff -rcp2N gcc-2.7.2.2/c-lex.c g77-new/c-lex.c
+*** gcc-2.7.2.2/c-lex.c Thu Jun 15 07:11:39 1995
+--- g77-new/c-lex.c Sun Aug 10 18:46:49 1997
+*************** init_lex ()
+*** 173,176 ****
+--- 173,177 ----
+ ridpointers[(int) RID_CONST] = get_identifier ("const");
+ ridpointers[(int) RID_VOLATILE] = get_identifier ("volatile");
++ ridpointers[(int) RID_RESTRICT] = get_identifier ("restrict");
+ ridpointers[(int) RID_AUTO] = get_identifier ("auto");
+ ridpointers[(int) RID_STATIC] = get_identifier ("static");
+*************** init_lex ()
+*** 206,209 ****
+--- 207,211 ----
+ UNSET_RESERVED_WORD ("iterator");
+ UNSET_RESERVED_WORD ("complex");
++ UNSET_RESERVED_WORD ("restrict");
+ }
+ if (flag_no_asm)
+*************** init_lex ()
+*** 214,217 ****
+--- 216,220 ----
+ UNSET_RESERVED_WORD ("iterator");
+ UNSET_RESERVED_WORD ("complex");
++ UNSET_RESERVED_WORD ("restrict");
+ }
+ }
+*************** yylex ()
+*** 1433,1437 ****
+ /* Create a node with determined type and value. */
+ if (imag)
+! yylval.ttype = build_complex (convert (type, integer_zero_node),
+ build_real (type, value));
+ else
+--- 1436,1441 ----
+ /* Create a node with determined type and value. */
+ if (imag)
+! yylval.ttype = build_complex (NULL_TREE,
+! convert (type, integer_zero_node),
+ build_real (type, value));
+ else
+*************** yylex ()
+*** 1624,1629 ****
+ <= TYPE_PRECISION (integer_type_node))
+ yylval.ttype
+! = build_complex (integer_zero_node,
+! convert (integer_type_node, yylval.ttype));
+ else
+ error ("complex integer constant is too wide for `complex int'");
+--- 1628,1634 ----
+ <= TYPE_PRECISION (integer_type_node))
+ yylval.ttype
+! = build_complex (NULL_TREE, integer_zero_node,
+! convert (integer_type_node,
+! yylval.ttype));
+ else
+ error ("complex integer constant is too wide for `complex int'");
+diff -rcp2N gcc-2.7.2.2/c-lex.h g77-new/c-lex.h
+*** gcc-2.7.2.2/c-lex.h Thu Jun 15 07:12:22 1995
+--- g77-new/c-lex.h Sun Aug 10 18:10:55 1997
+*************** enum rid
+*** 43,47 ****
+ RID_VOLATILE,
+ RID_INLINE,
+! RID_NOALIAS,
+ RID_ITERATOR,
+ RID_COMPLEX,
+--- 43,47 ----
+ RID_VOLATILE,
+ RID_INLINE,
+! RID_RESTRICT,
+ RID_ITERATOR,
+ RID_COMPLEX,
+diff -rcp2N gcc-2.7.2.2/c-parse.gperf g77-new/c-parse.gperf
+*** gcc-2.7.2.2/c-parse.gperf Fri Apr 9 19:00:44 1993
+--- g77-new/c-parse.gperf Sun Aug 10 18:10:55 1997
+*************** __label__, LABEL, NORID
+*** 36,39 ****
+--- 36,40 ----
+ __real, REALPART, NORID
+ __real__, REALPART, NORID
++ __restrict, TYPE_QUAL, RID_RESTRICT
+ __signed, TYPESPEC, RID_SIGNED
+ __signed__, TYPESPEC, RID_SIGNED
+*************** oneway, TYPE_QUAL, RID_ONEWAY
+*** 69,72 ****
+--- 70,74 ----
+ out, TYPE_QUAL, RID_OUT
+ register, SCSPEC, RID_REGISTER
++ restrict, TYPE_QUAL, RID_RESTRICT
+ return, RETURN, NORID
+ short, TYPESPEC, RID_SHORT
+diff -rcp2N gcc-2.7.2.2/c-typeck.c g77-new/c-typeck.c
+*** gcc-2.7.2.2/c-typeck.c Thu Feb 20 19:24:11 1997
+--- g77-new/c-typeck.c Sun Aug 10 18:46:29 1997
+*************** pointer_int_sum (resultcode, ptrop, into
+*** 2681,2686 ****
+ so the multiply won't overflow spuriously. */
+
+! if (TYPE_PRECISION (TREE_TYPE (intop)) != POINTER_SIZE)
+! intop = convert (type_for_size (POINTER_SIZE, 0), intop);
+
+ /* Replace the integer argument with a suitable product by the object size.
+--- 2681,2688 ----
+ so the multiply won't overflow spuriously. */
+
+! if (TYPE_PRECISION (TREE_TYPE (intop)) != TYPE_PRECISION (sizetype)
+! || TREE_UNSIGNED (TREE_TYPE (intop)) != TREE_UNSIGNED (sizetype))
+! intop = convert (type_for_size (TYPE_PRECISION (sizetype),
+! TREE_UNSIGNED (sizetype)), intop);
+
+ /* Replace the integer argument with a suitable product by the object size.
+diff -rcp2N gcc-2.7.2.2/calls.c g77-new/calls.c
+*** gcc-2.7.2.2/calls.c Thu Oct 26 21:53:43 1995
+--- g77-new/calls.c Sun Aug 10 18:46:16 1997
+*************** expand_call (exp, target, ignore)
+*** 564,567 ****
+--- 564,569 ----
+ /* Nonzero if it is plausible that this is a call to alloca. */
+ int may_be_alloca;
++ /* Nonzero if this is a call to malloc or a related function. */
++ int is_malloc;
+ /* Nonzero if this is a call to setjmp or a related function. */
+ int returns_twice;
+*************** expand_call (exp, target, ignore)
+*** 741,745 ****
+ if (stack_arg_under_construction || i >= 0)
+ {
+! rtx insn = NEXT_INSN (before_call), seq;
+
+ /* Look for a call in the inline function code.
+--- 743,749 ----
+ if (stack_arg_under_construction || i >= 0)
+ {
+! rtx first_insn
+! = before_call ? NEXT_INSN (before_call) : get_insns ();
+! rtx insn, seq;
+
+ /* Look for a call in the inline function code.
+*************** expand_call (exp, target, ignore)
+*** 749,753 ****
+
+ if (OUTGOING_ARGS_SIZE (DECL_SAVED_INSNS (fndecl)) == 0)
+! for (; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == CALL_INSN)
+ break;
+--- 753,757 ----
+
+ if (OUTGOING_ARGS_SIZE (DECL_SAVED_INSNS (fndecl)) == 0)
+! for (insn = first_insn; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == CALL_INSN)
+ break;
+*************** expand_call (exp, target, ignore)
+*** 781,785 ****
+ seq = get_insns ();
+ end_sequence ();
+! emit_insns_before (seq, NEXT_INSN (before_call));
+ emit_stack_restore (SAVE_BLOCK, old_stack_level, NULL_RTX);
+ }
+--- 785,789 ----
+ seq = get_insns ();
+ end_sequence ();
+! emit_insns_before (seq, first_insn);
+ emit_stack_restore (SAVE_BLOCK, old_stack_level, NULL_RTX);
+ }
+*************** expand_call (exp, target, ignore)
+*** 852,855 ****
+--- 856,860 ----
+ returns_twice = 0;
+ is_longjmp = 0;
++ is_malloc = 0;
+
+ if (name != 0 && IDENTIFIER_LENGTH (DECL_NAME (fndecl)) <= 15)
+*************** expand_call (exp, target, ignore)
+*** 891,894 ****
+--- 896,903 ----
+ && ! strcmp (tname, "longjmp"))
+ is_longjmp = 1;
++ /* Only recognize malloc when alias analysis is enabled. */
++ else if (tname[0] == 'm' && flag_alias_check
++ && ! strcmp(tname, "malloc"))
++ is_malloc = 1;
+ }
+
+*************** expand_call (exp, target, ignore)
+*** 1087,1090 ****
+--- 1096,1100 ----
+
+ store_expr (args[i].tree_value, copy, 0);
++ is_const = 0;
+
+ args[i].tree_value = build1 (ADDR_EXPR,
+*************** expand_call (exp, target, ignore)
+*** 1363,1367 ****
+ /* Now we are about to start emitting insns that can be deleted
+ if a libcall is deleted. */
+! if (is_const)
+ start_sequence ();
+
+--- 1373,1377 ----
+ /* Now we are about to start emitting insns that can be deleted
+ if a libcall is deleted. */
+! if (is_const || is_malloc)
+ start_sequence ();
+
+*************** expand_call (exp, target, ignore)
+*** 1951,1954 ****
+--- 1961,1978 ----
+ end_sequence ();
+ emit_insns (insns);
++ }
++ else if (is_malloc)
++ {
++ rtx temp = gen_reg_rtx (GET_MODE (valreg));
++ rtx last, insns;
++
++ emit_move_insn (temp, valreg);
++ last = get_last_insn ();
++ REG_NOTES (last) =
++ gen_rtx (EXPR_LIST, REG_NOALIAS, temp, REG_NOTES (last));
++ insns = get_insns ();
++ end_sequence ();
++ emit_insns (insns);
++ valreg = temp;
+ }
+
+diff -rcp2N gcc-2.7.2.2/cccp.c g77-new/cccp.c
+*** gcc-2.7.2.2/cccp.c Thu Oct 26 18:07:26 1995
+--- g77-new/cccp.c Sun Aug 10 18:45:53 1997
+*************** initialize_builtins (inp, outp)
+*** 9626,9629 ****
+--- 9626,9630 ----
+ so that it is present only when truly compiling with GNU C. */
+ /* install ((U_CHAR *) "__GNUC__", -1, T_CONST, "2", -1); */
++ install ((U_CHAR *) "__HAVE_BUILTIN_SETJMP__", -1, T_CONST, "1", -1);
+
+ if (debug_output)
+diff -rcp2N gcc-2.7.2.2/combine.c g77-new/combine.c
+*** gcc-2.7.2.2/combine.c Sun Nov 26 14:32:07 1995
+--- g77-new/combine.c Mon Jul 28 21:44:17 1997
+*************** num_sign_bit_copies (x, mode)
+*** 7326,7329 ****
+--- 7326,7335 ----
+
+ case NEG:
++ while (GET_MODE (XEXP (x, 0)) == GET_MODE (x)
++ && GET_CODE (XEXP (x, 0)) == NEG
++ && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
++ && GET_CODE (XEXP (XEXP (x, 0), 0)) == NEG)
++ x = XEXP (XEXP (x, 0), 0); /* Speed up 961126-1.c */
++
+ /* In general, this subtracts one sign bit copy. But if the value
+ is known to be positive, the number of sign bit copies is the
+*************** distribute_notes (notes, from_insn, i3,
+*** 10648,10651 ****
+--- 10654,10658 ----
+ case REG_EQUIV:
+ case REG_NONNEG:
++ case REG_NOALIAS:
+ /* These notes say something about results of an insn. We can
+ only support them if they used to be on I3 in which case they
+diff -rcp2N gcc-2.7.2.2/config/alpha/alpha.c g77-new/config/alpha/alpha.c
+*** gcc-2.7.2.2/config/alpha/alpha.c Thu Feb 20 19:24:11 1997
+--- g77-new/config/alpha/alpha.c Thu Jul 10 20:08:47 1997
+*************** direct_return ()
+*** 1239,1243 ****
+ cross-compiler. Otherwise, use the versions in /usr/include/stamp.h. */
+
+! #if !defined(CROSS_COMPILE) && !defined(_WIN32)
+ #include <stamp.h>
+ #endif
+--- 1239,1243 ----
+ cross-compiler. Otherwise, use the versions in /usr/include/stamp.h. */
+
+! #if !defined(CROSS_COMPILE) && !defined(_WIN32) && !defined(__linux__)
+ #include <stamp.h>
+ #endif
+*************** output_prolog (file, size)
+*** 1370,1373 ****
+--- 1370,1378 ----
+
+ alpha_function_needs_gp = 0;
++ #ifdef __linux__
++ if(profile_flag) {
++ alpha_function_needs_gp = 1;
++ }
++ #endif
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if ((GET_CODE (insn) == CALL_INSN)
+diff -rcp2N gcc-2.7.2.2/config/alpha/alpha.h g77-new/config/alpha/alpha.h
+*** gcc-2.7.2.2/config/alpha/alpha.h Thu Feb 20 19:24:12 1997
+--- g77-new/config/alpha/alpha.h Sun Aug 10 19:21:39 1997
+*************** extern int target_flags;
+*** 112,116 ****
+--- 112,118 ----
+ {"", TARGET_DEFAULT | TARGET_CPU_DEFAULT} }
+
++ #ifndef TARGET_DEFAULT
+ #define TARGET_DEFAULT 3
++ #endif
+
+ #ifndef TARGET_CPU_DEFAULT
+*************** extern int target_flags;
+*** 252,255 ****
+--- 254,260 ----
+ /* No data type wants to be aligned rounder than this. */
+ #define BIGGEST_ALIGNMENT 64
++
++ /* For atomic access to objects, must have at least 32-bit alignment. */
++ #define MINIMUM_ATOMIC_ALIGNMENT 32
+
+ /* Make strings word-aligned so strcpy from constants will be faster. */
+diff -rcp2N gcc-2.7.2.2/config/alpha/alpha.md g77-new/config/alpha/alpha.md
+*** gcc-2.7.2.2/config/alpha/alpha.md Fri Oct 27 06:49:59 1995
+--- g77-new/config/alpha/alpha.md Thu Jul 10 20:08:48 1997
+***************
+*** 1746,1752 ****
+ (if_then_else:DF
+ (match_operator 3 "signed_comparison_operator"
+! [(match_operand:DF 1 "reg_or_fp0_operand" "fG,fG")
+ (match_operand:DF 2 "fp0_operand" "G,G")])
+! (float_extend:DF (match_operand:SF 4 "reg_or_fp0_operand" "fG,0"))
+ (match_operand:DF 5 "reg_or_fp0_operand" "0,fG")))]
+ "TARGET_FP"
+--- 1746,1752 ----
+ (if_then_else:DF
+ (match_operator 3 "signed_comparison_operator"
+! [(match_operand:DF 4 "reg_or_fp0_operand" "fG,fG")
+ (match_operand:DF 2 "fp0_operand" "G,G")])
+! (float_extend:DF (match_operand:SF 1 "reg_or_fp0_operand" "fG,0"))
+ (match_operand:DF 5 "reg_or_fp0_operand" "0,fG")))]
+ "TARGET_FP"
+diff -rcp2N gcc-2.7.2.2/config/alpha/elf.h g77-new/config/alpha/elf.h
+*** gcc-2.7.2.2/config/alpha/elf.h Wed Dec 31 19:00:00 1969
+--- g77-new/config/alpha/elf.h Thu Jul 10 20:08:49 1997
+***************
+*** 0 ****
+--- 1,522 ----
++ /* Definitions of target machine for GNU compiler, for DEC Alpha w/ELF.
++ Copyright (C) 1996 Free Software Foundation, Inc.
++ Contributed by Richard Henderson (rth@tamu.edu).
++
++ This file is part of GNU CC.
++
++ GNU CC is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2, or (at your option)
++ any later version.
++
++ GNU CC is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with GNU CC; see the file COPYING. If not, write to
++ the Free Software Foundation, 59 Temple Place - Suite 330,
++ Boston, MA 02111-1307, USA. */
++
++ /* This is used on Alpha platforms that use the ELF format.
++ Currently only Linux uses this. */
++
++ #include "alpha/linux.h"
++
++ #undef TARGET_VERSION
++ #define TARGET_VERSION fprintf (stderr, " (Alpha Linux/ELF)");
++
++ #undef OBJECT_FORMAT_COFF
++ #undef EXTENDED_COFF
++ #define OBJECT_FORMAT_ELF
++
++ #define SDB_DEBUGGING_INFO
++
++ #undef ASM_FINAL_SPEC
++
++ #undef CPP_PREDEFINES
++ #define CPP_PREDEFINES "\
++ -D__alpha -D__alpha__ -D__linux__ -D__linux -D_LONGLONG -Dlinux -Dunix \
++ -Asystem(linux) -Acpu(alpha) -Amachine(alpha) -D__ELF__"
++
++ #undef LINK_SPEC
++ #define LINK_SPEC "-m elf64alpha -G 8 %{O*:-O3} %{!O*:-O1} \
++ %{shared:-shared} \
++ %{!shared: \
++ %{!static: \
++ %{rdynamic:-export-dynamic} \
++ %{!dynamic-linker:-dynamic-linker /lib/ld.so.1}} \
++ %{static:-static}}"
++
++ /* Output at beginning of assembler file. */
++
++ #undef ASM_FILE_START
++ #define ASM_FILE_START(FILE) \
++ { \
++ alpha_write_verstamp (FILE); \
++ output_file_directive (FILE, main_input_filename); \
++ fprintf (FILE, "\t.version\t\"01.01\"\n"); \
++ fprintf (FILE, "\t.set noat\n"); \
++ }
++
++ #define ASM_OUTPUT_SOURCE_LINE(STREAM, LINE) \
++ alpha_output_lineno (STREAM, LINE)
++ extern void alpha_output_lineno ();
++
++ extern void output_file_directive ();
++
++ /* Attach a special .ident directive to the end of the file to identify
++ the version of GCC which compiled this code. The format of the
++ .ident string is patterned after the ones produced by native svr4
++ C compilers. */
++
++ #define IDENT_ASM_OP ".ident"
++
++ #ifdef IDENTIFY_WITH_IDENT
++ #define ASM_IDENTIFY_GCC(FILE) /* nothing */
++ #define ASM_IDENTIFY_LANGUAGE(FILE) \
++ fprintf(FILE, "\t%s \"GCC (%s) %s\"\n", IDENT_ASM_OP, \
++ lang_identify(), version_string)
++ #else
++ #define ASM_FILE_END(FILE) \
++ do { \
++ fprintf ((FILE), "\t%s\t\"GCC: (GNU) %s\"\n", \
++ IDENT_ASM_OP, version_string); \
++ } while (0)
++ #endif
++
++ /* Allow #sccs in preprocessor. */
++
++ #define SCCS_DIRECTIVE
++
++ /* Output #ident as a .ident. */
++
++ #define ASM_OUTPUT_IDENT(FILE, NAME) \
++ fprintf (FILE, "\t%s\t\"%s\"\n", IDENT_ASM_OP, NAME);
++
++ /* This is how to allocate empty space in some section. The .zero
++ pseudo-op is used for this on most svr4 assemblers. */
++
++ #define SKIP_ASM_OP ".zero"
++
++ #undef ASM_OUTPUT_SKIP
++ #define ASM_OUTPUT_SKIP(FILE,SIZE) \
++ fprintf (FILE, "\t%s\t%u\n", SKIP_ASM_OP, (SIZE))
++
++ /* Output the label which precedes a jumptable. Note that for all svr4
++ systems where we actually generate jumptables (which is to say every
++ svr4 target except i386, where we use casesi instead) we put the jump-
++ tables into the .rodata section and since other stuff could have been
++ put into the .rodata section prior to any given jumptable, we have to
++ make sure that the location counter for the .rodata section gets pro-
++ perly re-aligned prior to the actual beginning of the jump table. */
++
++ #define ALIGN_ASM_OP ".align"
++
++ #ifndef ASM_OUTPUT_BEFORE_CASE_LABEL
++ #define ASM_OUTPUT_BEFORE_CASE_LABEL(FILE,PREFIX,NUM,TABLE) \
++ ASM_OUTPUT_ALIGN ((FILE), 2);
++ #endif
++
++ #undef ASM_OUTPUT_CASE_LABEL
++ #define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,JUMPTABLE) \
++ do { \
++ ASM_OUTPUT_BEFORE_CASE_LABEL (FILE, PREFIX, NUM, JUMPTABLE) \
++ ASM_OUTPUT_INTERNAL_LABEL (FILE, PREFIX, NUM); \
++ } while (0)
++
++ /* The standard SVR4 assembler seems to require that certain builtin
++ library routines (e.g. .udiv) be explicitly declared as .globl
++ in each assembly file where they are referenced. */
++
++ #define ASM_OUTPUT_EXTERNAL_LIBCALL(FILE, FUN) \
++ ASM_GLOBALIZE_LABEL (FILE, XSTR (FUN, 0))
++
++ /* This says how to output assembler code to declare an
++ uninitialized external linkage data object. Under SVR4,
++ the linker seems to want the alignment of data objects
++ to depend on their types. We do exactly that here. */
++
++ #define COMMON_ASM_OP ".comm"
++
++ #undef ASM_OUTPUT_ALIGNED_COMMON
++ #define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \
++ do { \
++ fprintf ((FILE), "\t%s\t", COMMON_ASM_OP); \
++ assemble_name ((FILE), (NAME)); \
++ fprintf ((FILE), ",%u,%u\n", (SIZE), (ALIGN) / BITS_PER_UNIT); \
++ } while (0)
++
++ /* This says how to output assembler code to declare an
++ uninitialized internal linkage data object. Under SVR4,
++ the linker seems to want the alignment of data objects
++ to depend on their types. We do exactly that here. */
++
++ #define LOCAL_ASM_OP ".local"
++
++ #undef ASM_OUTPUT_ALIGNED_LOCAL
++ #define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \
++ do { \
++ fprintf ((FILE), "\t%s\t", LOCAL_ASM_OP); \
++ assemble_name ((FILE), (NAME)); \
++ fprintf ((FILE), "\n"); \
++ ASM_OUTPUT_ALIGNED_COMMON (FILE, NAME, SIZE, ALIGN); \
++ } while (0)
++
++ /* This is the pseudo-op used to generate a 64-bit word of data with a
++ specific value in some section. */
++
++ #define INT_ASM_OP ".quad"
++
++ /* This is the pseudo-op used to generate a contiguous sequence of byte
++ values from a double-quoted string WITHOUT HAVING A TERMINATING NUL
++ AUTOMATICALLY APPENDED. This is the same for most svr4 assemblers. */
++
++ #undef ASCII_DATA_ASM_OP
++ #define ASCII_DATA_ASM_OP ".ascii"
++
++ /* Support const sections and the ctors and dtors sections for g++.
++ Note that there appears to be two different ways to support const
++ sections at the moment. You can either #define the symbol
++ READONLY_DATA_SECTION (giving it some code which switches to the
++ readonly data section) or else you can #define the symbols
++ EXTRA_SECTIONS, EXTRA_SECTION_FUNCTIONS, SELECT_SECTION, and
++ SELECT_RTX_SECTION. We do both here just to be on the safe side. */
++
++ #define USE_CONST_SECTION 1
++
++ #define CONST_SECTION_ASM_OP ".section\t.rodata"
++
++ /* Define the pseudo-ops used to switch to the .ctors and .dtors sections.
++
++ Note that we want to give these sections the SHF_WRITE attribute
++ because these sections will actually contain data (i.e. tables of
++ addresses of functions in the current root executable or shared library
++ file) and, in the case of a shared library, the relocatable addresses
++ will have to be properly resolved/relocated (and then written into) by
++ the dynamic linker when it actually attaches the given shared library
++ to the executing process. (Note that on SVR4, you may wish to use the
++ `-z text' option to the ELF linker, when building a shared library, as
++ an additional check that you are doing everything right. But if you do
++ use the `-z text' option when building a shared library, you will get
++ errors unless the .ctors and .dtors sections are marked as writable
++ via the SHF_WRITE attribute.) */
++
++ #define CTORS_SECTION_ASM_OP ".section\t.ctors,\"aw\""
++ #define DTORS_SECTION_ASM_OP ".section\t.dtors,\"aw\""
++
++ /* On svr4, we *do* have support for the .init and .fini sections, and we
++ can put stuff in there to be executed before and after `main'. We let
++ crtstuff.c and other files know this by defining the following symbols.
++ The definitions say how to change sections to the .init and .fini
++ sections. This is the same for all known svr4 assemblers. */
++
++ #define INIT_SECTION_ASM_OP ".section\t.init"
++ #define FINI_SECTION_ASM_OP ".section\t.fini"
++
++ /* Support non-common, uninitialized data in the .bss section. */
++
++ #define BSS_SECTION_ASM_OP ".section\t.bss"
++
++ /* A default list of other sections which we might be "in" at any given
++ time. For targets that use additional sections (e.g. .tdesc) you
++ should override this definition in the target-specific file which
++ includes this file. */
++
++ #undef EXTRA_SECTIONS
++ #define EXTRA_SECTIONS in_const, in_ctors, in_dtors, in_bss
++
++ /* A default list of extra section function definitions. For targets
++ that use additional sections (e.g. .tdesc) you should override this
++ definition in the target-specific file which includes this file. */
++
++ #undef EXTRA_SECTION_FUNCTIONS
++ #define EXTRA_SECTION_FUNCTIONS \
++ CONST_SECTION_FUNCTION \
++ CTORS_SECTION_FUNCTION \
++ DTORS_SECTION_FUNCTION \
++ BSS_SECTION_FUNCTION
++
++ #undef READONLY_DATA_SECTION
++ #define READONLY_DATA_SECTION() const_section ()
++
++ extern void text_section ();
++
++ #define CONST_SECTION_FUNCTION \
++ void \
++ const_section () \
++ { \
++ if (!USE_CONST_SECTION) \
++ text_section(); \
++ else if (in_section != in_const) \
++ { \
++ fprintf (asm_out_file, "%s\n", CONST_SECTION_ASM_OP); \
++ in_section = in_const; \
++ } \
++ }
++
++ #define CTORS_SECTION_FUNCTION \
++ void \
++ ctors_section () \
++ { \
++ if (in_section != in_ctors) \
++ { \
++ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
++ in_section = in_ctors; \
++ } \
++ }
++
++ #define DTORS_SECTION_FUNCTION \
++ void \
++ dtors_section () \
++ { \
++ if (in_section != in_dtors) \
++ { \
++ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
++ in_section = in_dtors; \
++ } \
++ }
++
++ #define BSS_SECTION_FUNCTION \
++ void \
++ bss_section () \
++ { \
++ if (in_section != in_bss) \
++ { \
++ fprintf (asm_out_file, "%s\n", BSS_SECTION_ASM_OP); \
++ in_section = in_bss; \
++ } \
++ }
++
++
++ /* Switch into a generic section.
++ This is currently only used to support section attributes.
++
++ We make the section read-only and executable for a function decl,
++ read-only for a const data decl, and writable for a non-const data decl. */
++ #define ASM_OUTPUT_SECTION_NAME(FILE, DECL, NAME) \
++ fprintf (FILE, ".section\t%s,\"%s\",@progbits\n", NAME, \
++ (DECL) && TREE_CODE (DECL) == FUNCTION_DECL ? "ax" : \
++ (DECL) && TREE_READONLY (DECL) ? "a" : "aw")
++
++
++ /* A C statement (sans semicolon) to output an element in the table of
++ global constructors. */
++ #define ASM_OUTPUT_CONSTRUCTOR(FILE,NAME) \
++ do { \
++ ctors_section (); \
++ fprintf (FILE, "\t%s\t ", INT_ASM_OP); \
++ assemble_name (FILE, NAME); \
++ fprintf (FILE, "\n"); \
++ } while (0)
++
++ /* A C statement (sans semicolon) to output an element in the table of
++ global destructors. */
++ #define ASM_OUTPUT_DESTRUCTOR(FILE,NAME) \
++ do { \
++ dtors_section (); \
++ fprintf (FILE, "\t%s\t ", INT_ASM_OP); \
++ assemble_name (FILE, NAME); \
++ fprintf (FILE, "\n"); \
++ } while (0)
++
++ /* A C statement or statements to switch to the appropriate
++ section for output of DECL. DECL is either a `VAR_DECL' node
++ or a constant of some sort. RELOC indicates whether forming
++ the initial value of DECL requires link-time relocations. */
++
++ #define SELECT_SECTION(DECL,RELOC) \
++ { \
++ if (TREE_CODE (DECL) == STRING_CST) \
++ { \
++ if (! flag_writable_strings) \
++ const_section (); \
++ else \
++ data_section (); \
++ } \
++ else if (TREE_CODE (DECL) == VAR_DECL) \
++ { \
++ if ((flag_pic && RELOC) \
++ || !TREE_READONLY (DECL) || TREE_SIDE_EFFECTS (DECL) \
++ || !DECL_INITIAL (DECL) \
++ || (DECL_INITIAL (DECL) != error_mark_node \
++ && !TREE_CONSTANT (DECL_INITIAL (DECL)))) \
++ { \
++ if (DECL_COMMON (DECL) \
++ && !DECL_INITIAL (DECL)) \
++ /* || DECL_INITIAL (DECL) == error_mark_node)) */ \
++ bss_section(); \
++ else \
++ data_section (); \
++ } \
++ else \
++ const_section (); \
++ } \
++ else \
++ const_section (); \
++ }
++
++ /* A C statement or statements to switch to the appropriate
++ section for output of RTX in mode MODE. RTX is some kind
++ of constant in RTL. The argument MODE is redundant except
++ in the case of a `const_int' rtx. Currently, these always
++ go into the const section. */
++
++ #undef SELECT_RTX_SECTION
++ #define SELECT_RTX_SECTION(MODE,RTX) const_section()
++
++ /* Define the strings used for the special svr4 .type and .size directives.
++ These strings generally do not vary from one system running svr4 to
++ another, but if a given system (e.g. m88k running svr) needs to use
++ different pseudo-op names for these, they may be overridden in the
++ file which includes this one. */
++
++ #define TYPE_ASM_OP ".type"
++ #define SIZE_ASM_OP ".size"
++
++ /* This is how we tell the assembler that a symbol is weak. */
++
++ #define ASM_WEAKEN_LABEL(FILE,NAME) \
++ do { fputs ("\t.weak\t", FILE); assemble_name (FILE, NAME); \
++ fputc ('\n', FILE); } while (0)
++
++ /* This is how we tell the assembler that two symbols have the same value. */
++
++ #define ASM_OUTPUT_DEF(FILE,NAME1,NAME2) \
++ do { assemble_name(FILE, NAME1); \
++ fputs(" = ", FILE); \
++ assemble_name(FILE, NAME2); \
++ fputc('\n', FILE); } while (0)
++
++ /* The following macro defines the format used to output the second
++ operand of the .type assembler directive. Different svr4 assemblers
++ expect various different forms for this operand. The one given here
++ is just a default. You may need to override it in your machine-
++ specific tm.h file (depending upon the particulars of your assembler). */
++
++ #define TYPE_OPERAND_FMT "@%s"
++
++ /* Write the extra assembler code needed to declare a function's result.
++ Most svr4 assemblers don't require any special declaration of the
++ result value, but there are exceptions. */
++
++ #ifndef ASM_DECLARE_RESULT
++ #define ASM_DECLARE_RESULT(FILE, RESULT)
++ #endif
++
++ /* These macros generate the special .type and .size directives which
++ are used to set the corresponding fields of the linker symbol table
++ entries in an ELF object file under SVR4. These macros also output
++ the starting labels for the relevant functions/objects. */
++
++ /* Write the extra assembler code needed to declare an object properly. */
++
++ #define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \
++ do { \
++ fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \
++ assemble_name (FILE, NAME); \
++ putc (',', FILE); \
++ fprintf (FILE, TYPE_OPERAND_FMT, "object"); \
++ putc ('\n', FILE); \
++ size_directive_output = 0; \
++ if (!flag_inhibit_size_directive && DECL_SIZE (DECL)) \
++ { \
++ size_directive_output = 1; \
++ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
++ assemble_name (FILE, NAME); \
++ fprintf (FILE, ",%d\n", int_size_in_bytes (TREE_TYPE (DECL))); \
++ } \
++ ASM_OUTPUT_LABEL(FILE, NAME); \
++ } while (0)
++
++ /* Output the size directive for a decl in rest_of_decl_compilation
++ in the case where we did not do so before the initializer.
++ Once we find the error_mark_node, we know that the value of
++ size_directive_output was set
++ by ASM_DECLARE_OBJECT_NAME when it was run for the same decl. */
++
++ #define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END) \
++ do { \
++ char *name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
++ if (!flag_inhibit_size_directive && DECL_SIZE (DECL) \
++ && ! AT_END && TOP_LEVEL \
++ && DECL_INITIAL (DECL) == error_mark_node \
++ && !size_directive_output) \
++ { \
++ size_directive_output = 1; \
++ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
++ assemble_name (FILE, name); \
++ fprintf (FILE, ",%d\n", int_size_in_bytes (TREE_TYPE (DECL))); \
++ } \
++ } while (0)
++
++ /* A table of bytes codes used by the ASM_OUTPUT_ASCII and
++ ASM_OUTPUT_LIMITED_STRING macros. Each byte in the table
++ corresponds to a particular byte value [0..255]. For any
++ given byte value, if the value in the corresponding table
++ position is zero, the given character can be output directly.
++ If the table value is 1, the byte must be output as a \ooo
++ octal escape. If the tables value is anything else, then the
++ byte value should be output as a \ followed by the value
++ in the table. Note that we can use standard UN*X escape
++ sequences for many control characters, but we don't use
++ \a to represent BEL because some svr4 assemblers (e.g. on
++ the i386) don't know about that. Also, we don't use \v
++ since some versions of gas, such as 2.2 did not accept it. */
++
++ #define ESCAPES \
++ "\1\1\1\1\1\1\1\1btn\1fr\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
++ \0\0\"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
++ \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\\\0\0\0\
++ \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1\
++ \1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
++ \1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
++ \1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
++ \1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1"
++
++ /* Some svr4 assemblers have a limit on the number of characters which
++ can appear in the operand of a .string directive. If your assembler
++ has such a limitation, you should define STRING_LIMIT to reflect that
++ limit. Note that at least some svr4 assemblers have a limit on the
++ actual number of bytes in the double-quoted string, and that they
++ count each character in an escape sequence as one byte. Thus, an
++ escape sequence like \377 would count as four bytes.
++
++ If your target assembler doesn't support the .string directive, you
++ should define this to zero.
++ */
++
++ #define STRING_LIMIT ((unsigned) 256)
++
++ #define STRING_ASM_OP ".string"
++
++ /*
++ * We always use gas here, so we don't worry about ECOFF assembler problems.
++ */
++ #undef TARGET_GAS
++ #define TARGET_GAS (1)
++
++ #undef PREFERRED_DEBUGGING_TYPE
++ #define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
++
++ /* Provide a STARTFILE_SPEC appropriate for Linux. Here we add
++ the Linux magical crtbegin.o file (see crtstuff.c) which
++ provides part of the support for getting C++ file-scope static
++ object constructed before entering `main'. */
++
++ #undef STARTFILE_SPEC
++ #define STARTFILE_SPEC \
++ "%{!shared: \
++ %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} %{!p:crt1.o%s}}}\
++ crti.o%s crtbegin.o%s"
++
++ /* Provide a ENDFILE_SPEC appropriate for Linux. Here we tack on
++ the Linux magical crtend.o file (see crtstuff.c) which
++ provides part of the support for getting C++ file-scope static
++ object constructed before entering `main', followed by a normal
++ Linux "finalizer" file, `crtn.o'. */
++
++ #undef ENDFILE_SPEC
++ #define ENDFILE_SPEC \
++ "crtend.o%s crtn.o%s"
+diff -rcp2N gcc-2.7.2.2/config/alpha/linux.h g77-new/config/alpha/linux.h
+*** gcc-2.7.2.2/config/alpha/linux.h Wed Dec 31 19:00:00 1969
+--- g77-new/config/alpha/linux.h Thu Jul 10 20:08:49 1997
+***************
+*** 0 ****
+--- 1,72 ----
++ /* Definitions of target machine for GNU compiler, for Alpha Linux,
++ using ECOFF.
++ Copyright (C) 1995 Free Software Foundation, Inc.
++ Contributed by Bob Manson.
++ Derived from work contributed by Cygnus Support,
++ (c) 1993 Free Software Foundation.
++
++ This file is part of GNU CC.
++
++ GNU CC is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2, or (at your option)
++ any later version.
++
++ GNU CC is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with GNU CC; see the file COPYING. If not, write to
++ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
++
++ #define TARGET_DEFAULT (3 | MASK_GAS)
++
++ #include "alpha/alpha.h"
++
++ #undef TARGET_VERSION
++ #define TARGET_VERSION fprintf (stderr, " (Linux/Alpha)");
++
++ #undef CPP_PREDEFINES
++ #define CPP_PREDEFINES "\
++ -D__alpha -D__alpha__ -D__linux__ -D__linux -D_LONGLONG -Dlinux -Dunix \
++ -Asystem(linux) -Acpu(alpha) -Amachine(alpha)"
++
++ /* We don't actually need any of these; the MD_ vars are ignored
++ anyway for cross-compilers, and the other specs won't get picked up
++ 'coz the user is supposed to do ld -r (hmm, perhaps that should be
++ the default). In any case, setting them thus will catch some
++ common user errors. */
++
++ #undef MD_EXEC_PREFIX
++ #undef MD_STARTFILE_PREFIX
++
++ #undef LIB_SPEC
++ #define LIB_SPEC "%{pg:-lgmon} %{pg:-lc_p} %{!pg:-lc}"
++
++ #undef LINK_SPEC
++ #define LINK_SPEC \
++ "-G 8 %{O*:-O3} %{!O*:-O1}"
++
++ #undef ASM_SPEC
++ #define ASM_SPEC "-nocpp"
++
++ /* Can't do stabs */
++ #undef SDB_DEBUGGING_INFO
++
++ /* Prefer dbx. */
++ #undef PREFERRED_DEBUGGING_TYPE
++ #define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
++
++ #undef FUNCTION_PROFILER
++
++ #define FUNCTION_PROFILER(FILE, LABELNO) \
++ do { \
++ fputs ("\tlda $27,_mcount\n", (FILE)); \
++ fputs ("\tjsr $26,($27),_mcount\n", (FILE)); \
++ fputs ("\tldgp $29,0($26)\n", (FILE)); \
++ } while (0);
++
++ /* Generate calls to memcpy, etc., not bcopy, etc. */
++ #define TARGET_MEM_FUNCTIONS
+diff -rcp2N gcc-2.7.2.2/config/alpha/t-linux g77-new/config/alpha/t-linux
+*** gcc-2.7.2.2/config/alpha/t-linux Wed Dec 31 19:00:00 1969
+--- g77-new/config/alpha/t-linux Thu Jul 10 20:08:49 1997
+***************
+*** 0 ****
+--- 1,3 ----
++ # Our header files are supposed to be correct, nein?
++ FIXINCLUDES =
++ STMP_FIXPROTO =
+diff -rcp2N gcc-2.7.2.2/config/alpha/x-linux g77-new/config/alpha/x-linux
+*** gcc-2.7.2.2/config/alpha/x-linux Wed Dec 31 19:00:00 1969
+--- g77-new/config/alpha/x-linux Thu Jul 10 20:08:49 1997
+***************
+*** 0 ****
+--- 1 ----
++ CLIB=-lbfd -liberty
+diff -rcp2N gcc-2.7.2.2/config/alpha/xm-alpha.h g77-new/config/alpha/xm-alpha.h
+*** gcc-2.7.2.2/config/alpha/xm-alpha.h Thu Aug 31 17:52:27 1995
+--- g77-new/config/alpha/xm-alpha.h Thu Jul 10 20:08:49 1997
+*************** Boston, MA 02111-1307, USA. */
+*** 46,51 ****
+--- 46,53 ----
+ #include <alloca.h>
+ #else
++ #ifndef alloca
+ extern void *alloca ();
+ #endif
++ #endif
+
+ /* The host compiler has problems with enum bitfields since it makes
+*************** extern void *malloc (), *realloc (), *ca
+*** 68,72 ****
+--- 70,76 ----
+ /* OSF/1 has vprintf. */
+
++ #ifndef linux /* 1996/02/22 mauro@craftwork.com -- unreliable with Linux */
+ #define HAVE_VPRINTF
++ #endif
+
+ /* OSF/1 has putenv. */
+diff -rcp2N gcc-2.7.2.2/config/alpha/xm-linux.h g77-new/config/alpha/xm-linux.h
+*** gcc-2.7.2.2/config/alpha/xm-linux.h Wed Dec 31 19:00:00 1969
+--- g77-new/config/alpha/xm-linux.h Thu Jul 10 20:08:49 1997
+***************
+*** 0 ****
+--- 1,10 ----
++ #ifndef _XM_LINUX_H
++ #define _XM_LINUX_H
++
++ #include "xm-alpha.h"
++
++ #define HAVE_STRERROR
++
++ #define DONT_DECLARE_SYS_SIGLIST
++ #define USE_BFD
++ #endif
+diff -rcp2N gcc-2.7.2.2/config/i386/i386.c g77-new/config/i386/i386.c
+*** gcc-2.7.2.2/config/i386/i386.c Sun Oct 22 07:13:21 1995
+--- g77-new/config/i386/i386.c Sun Aug 10 18:46:09 1997
+*************** standard_80387_constant_p (x)
+*** 1290,1294 ****
+ set_float_handler (handler);
+ REAL_VALUE_FROM_CONST_DOUBLE (d, x);
+! is0 = REAL_VALUES_EQUAL (d, dconst0);
+ is1 = REAL_VALUES_EQUAL (d, dconst1);
+ set_float_handler (NULL_PTR);
+--- 1290,1294 ----
+ set_float_handler (handler);
+ REAL_VALUE_FROM_CONST_DOUBLE (d, x);
+! is0 = REAL_VALUES_EQUAL (d, dconst0) && !REAL_VALUE_MINUS_ZERO (d);
+ is1 = REAL_VALUES_EQUAL (d, dconst1);
+ set_float_handler (NULL_PTR);
+diff -rcp2N gcc-2.7.2.2/config/mips/mips.c g77-new/config/mips/mips.c
+*** gcc-2.7.2.2/config/mips/mips.c Thu Feb 20 19:24:13 1997
+--- g77-new/config/mips/mips.c Sun Aug 10 18:45:43 1997
+*************** expand_block_move (operands)
+*** 2360,2365 ****
+
+ else if (constp && bytes <= 2*MAX_MOVE_BYTES)
+! emit_insn (gen_movstrsi_internal (gen_rtx (MEM, BLKmode, dest_reg),
+! gen_rtx (MEM, BLKmode, src_reg),
+ bytes_rtx, align_rtx));
+
+--- 2360,2367 ----
+
+ else if (constp && bytes <= 2*MAX_MOVE_BYTES)
+! emit_insn (gen_movstrsi_internal (change_address (operands[0],
+! BLKmode, dest_reg),
+! change_address (orig_src, BLKmode,
+! src_reg),
+ bytes_rtx, align_rtx));
+
+diff -rcp2N gcc-2.7.2.2/config/mips/mips.h g77-new/config/mips/mips.h
+*** gcc-2.7.2.2/config/mips/mips.h Thu Nov 9 11:23:09 1995
+--- g77-new/config/mips/mips.h Sun Aug 10 18:46:44 1997
+*************** typedef struct mips_args {
+*** 2160,2170 ****
+ } \
+ \
+! /* Flush the instruction cache. */ \
+! /* ??? Are the modes right? Maybe they should depend on -mint64/-mlong64? */\
+ /* ??? Should check the return value for errors. */ \
+! emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "cacheflush"), \
+ 0, VOIDmode, 3, addr, Pmode, \
+ GEN_INT (TRAMPOLINE_SIZE), SImode, \
+! GEN_INT (1), SImode); \
+ }
+
+--- 2160,2170 ----
+ } \
+ \
+! /* Flush both caches. We need to flush the data cache in case \
+! the system has a write-back cache. */ \
+ /* ??? Should check the return value for errors. */ \
+! emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "_flush_cache"), \
+ 0, VOIDmode, 3, addr, Pmode, \
+ GEN_INT (TRAMPOLINE_SIZE), SImode, \
+! GEN_INT (3), TYPE_MODE (integer_type_node)); \
+ }
+
+*************** typedef struct mips_args {
+*** 2388,2392 ****
+ ((GET_CODE (X) != CONST_DOUBLE \
+ || mips_const_double_ok (X, GET_MODE (X))) \
+! && ! (GET_CODE (X) == CONST && ABI_64BIT))
+
+ /* A C compound statement that attempts to replace X with a valid
+--- 2388,2393 ----
+ ((GET_CODE (X) != CONST_DOUBLE \
+ || mips_const_double_ok (X, GET_MODE (X))) \
+! && ! (GET_CODE (X) == CONST \
+! && (ABI_64BIT || GET_CODE (XEXP (X, 0)) == MINUS)))
+
+ /* A C compound statement that attempts to replace X with a valid
+diff -rcp2N gcc-2.7.2.2/config/mips/sni-gas.h g77-new/config/mips/sni-gas.h
+*** gcc-2.7.2.2/config/mips/sni-gas.h Wed Dec 31 19:00:00 1969
+--- g77-new/config/mips/sni-gas.h Sun Aug 10 18:46:33 1997
+***************
+*** 0 ****
+--- 1,43 ----
++ #include "mips/sni-svr4.h"
++
++ /* Enable debugging. */
++ #define DBX_DEBUGGING_INFO
++ #define SDB_DEBUGGING_INFO
++ #define MIPS_DEBUGGING_INFO
++
++ #define DWARF_DEBUGGING_INFO
++ #undef PREFERRED_DEBUGGING_TYPE
++ #define PREFERRED_DEBUGGING_TYPE DWARF_DEBUG
++
++ /* We need to use .esize and .etype instead of .size and .type to
++ avoid conflicting with ELF directives. These are only recognized
++ by gas, anyhow, not the native assembler. */
++ #undef PUT_SDB_SIZE
++ #define PUT_SDB_SIZE(a) \
++ do { \
++ extern FILE *asm_out_text_file; \
++ fprintf (asm_out_text_file, "\t.esize\t%d;", (a)); \
++ } while (0)
++
++ #undef PUT_SDB_TYPE
++ #define PUT_SDB_TYPE(a) \
++ do { \
++ extern FILE *asm_out_text_file; \
++ fprintf (asm_out_text_file, "\t.etype\t0x%x;", (a)); \
++ } while (0)
++
++
++ /* This is how to equate one symbol to another symbol. The syntax used is
++ `SYM1=SYM2'. Note that this is different from the way equates are done
++ with most svr4 assemblers, where the syntax is `.set SYM1,SYM2'. */
++
++ #define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \
++ do { fprintf ((FILE), "\t"); \
++ assemble_name (FILE, LABEL1); \
++ fprintf (FILE, " = "); \
++ assemble_name (FILE, LABEL2); \
++ fprintf (FILE, "\n"); \
++ } while (0)
++
++
++
+diff -rcp2N gcc-2.7.2.2/config/mips/sni-svr4.h g77-new/config/mips/sni-svr4.h
+*** gcc-2.7.2.2/config/mips/sni-svr4.h Wed Dec 31 19:00:00 1969
+--- g77-new/config/mips/sni-svr4.h Sun Aug 10 18:46:33 1997
+***************
+*** 0 ****
+--- 1,103 ----
++ /* Definitions of target machine for GNU compiler. SNI SINIX version.
++ Copyright (C) 1996 Free Software Foundation, Inc.
++ Contributed by Marco Walther (Marco.Walther@mch.sni.de).
++
++ This file is part of GNU CC.
++
++ GNU CC is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2, or (at your option)
++ any later version.
++
++ GNU CC is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with GNU CC; see the file COPYING. If not, write to
++ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
++
++ #define MIPS_SVR4
++
++ #define CPP_PREDEFINES "\
++ -Dmips -Dunix -Dhost_mips -DMIPSEB -DR3000 -DSYSTYPE_SVR4 \
++ -D_mips -D_unix -D_host_mips -D_MIPSEB -D_R3000 -D_SYSTYPE_SVR4 \
++ -Asystem(unix) -Asystem(svr4) -Acpu(mips) -Amachine(mips)"
++
++ #define CPP_SPEC "\
++ %{.cc: -D__LANGUAGE_C_PLUS_PLUS -D_LANGUAGE_C_PLUS_PLUS} \
++ %{.cxx: -D__LANGUAGE_C_PLUS_PLUS -D_LANGUAGE_C_PLUS_PLUS} \
++ %{.C: -D__LANGUAGE_C_PLUS_PLUS -D_LANGUAGE_C_PLUS_PLUS} \
++ %{.m: -D__LANGUAGE_OBJECTIVE_C -D_LANGUAGE_OBJECTIVE_C} \
++ %{.S: -D__LANGUAGE_ASSEMBLY -D_LANGUAGE_ASSEMBLY %{!ansi:-DLANGUAGE_ASSEMBLY}} \
++ %{.s: -D__LANGUAGE_ASSEMBLY -D_LANGUAGE_ASSEMBLY %{!ansi:-DLANGUAGE_ASSEMBLY}} \
++ %{!.S:%{!.s: -D__LANGUAGE_C -D_LANGUAGE_C %{!ansi:-DLANGUAGE_C}}} \
++ -D__SIZE_TYPE__=unsigned\\ int -D__PTRDIFF_TYPE__=int"
++
++ #define LINK_SPEC "\
++ %{G*} \
++ %{!mgas: \
++ %{dy} %{dn}}"
++
++ #define LIB_SPEC "\
++ %{p:-lprof1} \
++ %{!p:%{pg:-lprof1} \
++ %{!pg:-L/usr/ccs/lib/ -lc /usr/ccs/lib/crtn.o%s}}"
++
++ #define STARTFILE_SPEC "\
++ %{pg:gcrt0.o%s} \
++ %{!pg:%{p:mcrt0.o%s} \
++ %{!p:/usr/ccs/lib/crt1.o /usr/ccs/lib/crti.o /usr/ccs/lib/values-Xt.o%s}}"
++
++ /* Mips System V.4 doesn't have a getpagesize() function needed by the
++ trampoline code, so use the POSIX sysconf function to get it.
++ This is only done when compiling the trampoline code. */
++
++ #ifdef L_trampoline
++ #include <unistd.h>
++
++ #define getpagesize() sysconf(_SC_PAGE_SIZE)
++ #endif /* L_trampoline */
++
++ /* Use atexit for static constructors/destructors, instead of defining
++ our own exit function. */
++ #define HAVE_ATEXIT
++
++ /* Generate calls to memcpy, etc., not bcopy, etc. */
++ #define TARGET_MEM_FUNCTIONS
++
++ #define OBJECT_FORMAT_ELF
++
++ #define TARGET_DEFAULT MASK_ABICALLS
++ #define ABICALLS_ASM_OP ".option pic2"
++
++ #define MACHINE_TYPE "SNI running SINIX 5.42"
++
++ #define MIPS_DEFAULT_GVALUE 0
++
++ #define NM_FLAGS "-p"
++
++ /* wir haben ein Problem, wenn in einem Assembler-File keine .text-section
++ erzeugt wird. Dann landen diese Pseudo-Labels in irgendeiner anderen
++ section, z.B. .reginfo. Das macht den ld sehr ungluecklich. */
++
++ #define ASM_IDENTIFY_GCC(mw_stream) \
++ fprintf(mw_stream, "\t.ident \"gcc2_compiled.\"\n");
++
++ #define ASM_IDENTIFY_LANGUAGE(STREAM)
++
++ #define ASM_LONG ".word\t"
++ #define ASM_GLOBAL ".rdata\n\t\t.globl\t"
++
++ #include "mips/mips.h"
++
++ /* We do not want to run mips-tfile! */
++ #undef ASM_FINAL_SPEC
++
++ #undef OBJECT_FORMAT_COFF
++
++ /* We don't support debugging info for now. */
++ #undef DBX_DEBUGGING_INFO
++ #undef SDB_DEBUGGING_INFO
++ #undef MIPS_DEBUGGING_INFO
+diff -rcp2N gcc-2.7.2.2/config/mips/x-sni-svr4 g77-new/config/mips/x-sni-svr4
+*** gcc-2.7.2.2/config/mips/x-sni-svr4 Wed Dec 31 19:00:00 1969
+--- g77-new/config/mips/x-sni-svr4 Sun Aug 10 18:46:33 1997
+***************
+*** 0 ****
+--- 1,18 ----
++ # Define CC and OLDCC as the same, so that the tests:
++ # if [ x"$(OLDCC)" = x"$(CC)" ] ...
++ #
++ # will succeed (if OLDCC != CC, it is assumed that GCC is
++ # being used in secondary stage builds).
++ # -Olimit is so the user can use -O2. Down with fixed
++ # size tables!
++
++ CC = $(OLDCC)
++ OPT =
++ OLDCC = cc -Olimit 3000 $(OPT)
++
++ X_CFLAGS = -DNO_SYS_SIGLIST
++
++ # Show we need to use the C version of ALLOCA
++ # The SVR3 configurations have it, but the SVR4 configurations don't.
++ # For now, just try using it for all SVR* configurations.
++ ALLOCA = alloca.o
+diff -rcp2N gcc-2.7.2.2/config/msdos/configur.bat g77-new/config/msdos/configur.bat
+*** gcc-2.7.2.2/config/msdos/configur.bat Mon Aug 28 05:55:47 1995
+--- g77-new/config/msdos/configur.bat Sun Aug 10 19:08:05 1997
+*************** sed -f config/msdos/top.sed Makefile.in
+*** 18,21 ****
+--- 18,27 ----
+ set LANG=
+
++ if not exist ada\make-lang.in goto no_ada
++ sed -f config/msdos/top.sed ada\make-lang.in >> Makefile
++ sed -f config/msdos/top.sed ada\makefile.in > ada\Makefile
++ set LANG=%LANG% ada.&
++ :no_ada
++
+ if not exist cp\make-lang.in goto no_cp
+ sed -f config/msdos/top.sed cp\make-lang.in >> Makefile
+diff -rcp2N gcc-2.7.2.2/config/pa/pa.c g77-new/config/pa/pa.c
+*** gcc-2.7.2.2/config/pa/pa.c Sun Oct 22 07:45:20 1995
+--- g77-new/config/pa/pa.c Sun Aug 10 18:45:44 1997
+*************** output_move_double (operands)
+*** 1344,1369 ****
+ do them in the other order.
+
+! RMS says "This happens only for registers;
+! such overlap can't happen in memory unless the user explicitly
+! sets it up, and that is an undefined circumstance."
+!
+! but it happens on the HP-PA when loading parameter registers,
+! so I am going to define that circumstance, and make it work
+! as expected. */
+
+! if (optype0 == REGOP && (optype1 == MEMOP || optype1 == OFFSOP)
+! && reg_overlap_mentioned_p (operands[0], XEXP (operands[1], 0)))
+ {
+- /* XXX THIS PROBABLY DOESN'T WORK. */
+ /* Do the late half first. */
+ if (addreg1)
+ output_asm_insn ("ldo 4(%0),%0", &addreg1);
+ output_asm_insn (singlemove_string (latehalf), latehalf);
+ if (addreg1)
+ output_asm_insn ("ldo -4(%0),%0", &addreg1);
+- /* Then clobber. */
+ return singlemove_string (operands);
+ }
+
+ if (optype0 == REGOP && optype1 == REGOP
+ && REGNO (operands[0]) == REGNO (operands[1]) + 1)
+--- 1344,1377 ----
+ do them in the other order.
+
+! This can happen in two cases:
+
+! mem -> register where the first half of the destination register
+! is the same register used in the memory's address. Reload
+! can create such insns.
+!
+! mem in this case will be either register indirect or register
+! indirect plus a valid offset.
+!
+! register -> register move where REGNO(dst) == REGNO(src + 1)
+! someone (Tim/Tege?) claimed this can happen for parameter loads.
+!
+! Handle mem -> register case first. */
+! if (optype0 == REGOP
+! && (optype1 == MEMOP || optype1 == OFFSOP)
+! && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
+! operands[1], 0))
+ {
+ /* Do the late half first. */
+ if (addreg1)
+ output_asm_insn ("ldo 4(%0),%0", &addreg1);
+ output_asm_insn (singlemove_string (latehalf), latehalf);
++
++ /* Then clobber. */
+ if (addreg1)
+ output_asm_insn ("ldo -4(%0),%0", &addreg1);
+ return singlemove_string (operands);
+ }
+
++ /* Now handle register -> register case. */
+ if (optype0 == REGOP && optype1 == REGOP
+ && REGNO (operands[0]) == REGNO (operands[1]) + 1)
+diff -rcp2N gcc-2.7.2.2/config/pa/pa.md g77-new/config/pa/pa.md
+*** gcc-2.7.2.2/config/pa/pa.md Mon Aug 14 09:00:49 1995
+--- g77-new/config/pa/pa.md Sun Aug 10 18:45:45 1997
+***************
+*** 1828,1832 ****
+ (define_insn ""
+ [(set (match_operand:DF 0 "reg_or_nonsymb_mem_operand"
+! "=f,*r,Q,?o,?Q,f,*&r,*&r")
+ (match_operand:DF 1 "reg_or_0_or_nonsymb_mem_operand"
+ "fG,*rG,f,*r,*r,Q,o,Q"))]
+--- 1828,1832 ----
+ (define_insn ""
+ [(set (match_operand:DF 0 "reg_or_nonsymb_mem_operand"
+! "=f,*r,Q,?o,?Q,f,*r,*r")
+ (match_operand:DF 1 "reg_or_0_or_nonsymb_mem_operand"
+ "fG,*rG,f,*r,*r,Q,o,Q"))]
+***************
+*** 1846,1850 ****
+ (define_insn ""
+ [(set (match_operand:DF 0 "reg_or_nonsymb_mem_operand"
+! "=r,?o,?Q,&r,&r")
+ (match_operand:DF 1 "reg_or_0_or_nonsymb_mem_operand"
+ "rG,r,r,o,Q"))]
+--- 1846,1850 ----
+ (define_insn ""
+ [(set (match_operand:DF 0 "reg_or_nonsymb_mem_operand"
+! "=r,?o,?Q,r,r")
+ (match_operand:DF 1 "reg_or_0_or_nonsymb_mem_operand"
+ "rG,r,r,o,Q"))]
+***************
+*** 2019,2023 ****
+ (define_insn ""
+ [(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand"
+! "=r,o,Q,&r,&r,&r,f,f,*T")
+ (match_operand:DI 1 "general_operand"
+ "rM,r,r,o,Q,i,fM,*T,f"))]
+--- 2019,2023 ----
+ (define_insn ""
+ [(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand"
+! "=r,o,Q,r,r,r,f,f,*T")
+ (match_operand:DI 1 "general_operand"
+ "rM,r,r,o,Q,i,fM,*T,f"))]
+***************
+*** 2037,2041 ****
+ (define_insn ""
+ [(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand"
+! "=r,o,Q,&r,&r,&r")
+ (match_operand:DI 1 "general_operand"
+ "rM,r,r,o,Q,i"))]
+--- 2037,2041 ----
+ (define_insn ""
+ [(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand"
+! "=r,o,Q,r,r,r")
+ (match_operand:DI 1 "general_operand"
+ "rM,r,r,o,Q,i"))]
+diff -rcp2N gcc-2.7.2.2/config/rs6000/rs6000.c g77-new/config/rs6000/rs6000.c
+*** gcc-2.7.2.2/config/rs6000/rs6000.c Thu Feb 20 19:24:14 1997
+--- g77-new/config/rs6000/rs6000.c Sun Aug 10 04:44:05 1997
+*************** input_operand (op, mode)
+*** 724,730 ****
+ return 1;
+
+! /* For HImode and QImode, any constant is valid. */
+! if ((mode == HImode || mode == QImode)
+! && GET_CODE (op) == CONST_INT)
+ return 1;
+
+--- 724,729 ----
+ return 1;
+
+! /* For integer modes, any constant is ok. */
+! if (GET_CODE (op) == CONST_INT)
+ return 1;
+
+diff -rcp2N gcc-2.7.2.2/config/sparc/sol2.h g77-new/config/sparc/sol2.h
+*** gcc-2.7.2.2/config/sparc/sol2.h Sat Aug 19 17:36:45 1995
+--- g77-new/config/sparc/sol2.h Sun Aug 10 18:45:53 1997
+*************** do { \
+*** 166,168 ****
+ /* Define for support of TFmode long double and REAL_ARITHMETIC.
+ Sparc ABI says that long double is 4 words. */
+! #define LONG_DOUBLE_TYPE_SIZE 128
+--- 166,168 ----
+ /* Define for support of TFmode long double and REAL_ARITHMETIC.
+ Sparc ABI says that long double is 4 words. */
+! #define LONG_DOUBLE_TYPE_SIZE 64
+diff -rcp2N gcc-2.7.2.2/config/sparc/sparc.c g77-new/config/sparc/sparc.c
+*** gcc-2.7.2.2/config/sparc/sparc.c Tue Sep 12 18:32:24 1995
+--- g77-new/config/sparc/sparc.c Sun Aug 10 18:46:03 1997
+*************** Boston, MA 02111-1307, USA. */
+*** 40,46 ****
+ /* 1 if the caller has placed an "unimp" insn immediately after the call.
+ This is used in v8 code when calling a function that returns a structure.
+! v9 doesn't have this. */
+
+! #define SKIP_CALLERS_UNIMP_P (!TARGET_V9 && current_function_returns_struct)
+
+ /* Global variables for machine-dependent things. */
+--- 40,51 ----
+ /* 1 if the caller has placed an "unimp" insn immediately after the call.
+ This is used in v8 code when calling a function that returns a structure.
+! v9 doesn't have this. Be careful to have this test be the same as that
+! used on the call. */
+
+! #define SKIP_CALLERS_UNIMP_P \
+! (!TARGET_V9 && current_function_returns_struct \
+! && ! integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))) \
+! && (TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl))) \
+! == INTEGER_CST))
+
+ /* Global variables for machine-dependent things. */
+diff -rcp2N gcc-2.7.2.2/config/sparc/sparc.h g77-new/config/sparc/sparc.h
+*** gcc-2.7.2.2/config/sparc/sparc.h Thu Feb 20 19:24:15 1997
+--- g77-new/config/sparc/sparc.h Sun Aug 10 18:46:13 1997
+*************** extern int leaf_function;
+*** 1526,1533 ****
+
+ /* Output assembler code to FILE to increment profiler label # LABELNO
+! for profiling a function entry. */
+
+ #define FUNCTION_PROFILER(FILE, LABELNO) \
+ do { \
+ fputs ("\tsethi %hi(", (FILE)); \
+ ASM_OUTPUT_INTERNAL_LABELREF (FILE, "LP", LABELNO); \
+--- 1526,1540 ----
+
+ /* Output assembler code to FILE to increment profiler label # LABELNO
+! for profiling a function entry.
+!
+! 32 bit sparc uses %g2 as the STATIC_CHAIN_REGNUM which gets clobbered
+! during profiling so we need to save/restore it around the call to mcount.
+! We're guaranteed that a save has just been done, and we use the space
+! allocated for intreg/fpreg value passing. */
+
+ #define FUNCTION_PROFILER(FILE, LABELNO) \
+ do { \
++ if (! TARGET_V9) \
++ fputs ("\tst %g2,[%fp-4]\n", FILE); \
+ fputs ("\tsethi %hi(", (FILE)); \
+ ASM_OUTPUT_INTERNAL_LABELREF (FILE, "LP", LABELNO); \
+*************** extern int leaf_function;
+*** 1539,1542 ****
+--- 1546,1551 ----
+ ASM_OUTPUT_INTERNAL_LABELREF (FILE, "LP", LABELNO); \
+ fputs ("),%o0,%o0\n", (FILE)); \
++ if (! TARGET_V9) \
++ fputs ("\tld [%fp-4],%g2\n", FILE); \
+ } while (0)
+
+diff -rcp2N gcc-2.7.2.2/config/sparc/sparc.md g77-new/config/sparc/sparc.md
+*** gcc-2.7.2.2/config/sparc/sparc.md Tue Sep 12 18:57:35 1995
+--- g77-new/config/sparc/sparc.md Sun Aug 10 18:46:27 1997
+***************
+*** 4799,4803 ****
+ abort ();
+
+! if (GET_CODE (XEXP (operands[0], 0)) == LABEL_REF)
+ {
+ /* This is really a PIC sequence. We want to represent
+--- 4799,4803 ----
+ abort ();
+
+! if (GET_CODE (XEXP (operands[0], 0)) == LABEL_REF)
+ {
+ /* This is really a PIC sequence. We want to represent
+***************
+*** 4809,4824 ****
+
+ if (! TARGET_V9 && INTVAL (operands[3]) != 0)
+! emit_jump_insn (gen_rtx (PARALLEL, VOIDmode, gen_rtvec (3,
+! gen_rtx (SET, VOIDmode, pc_rtx,
+! XEXP (operands[0], 0)),
+! operands[3],
+! gen_rtx (CLOBBER, VOIDmode,
+! gen_rtx (REG, Pmode, 15)))));
+ else
+! emit_jump_insn (gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2,
+! gen_rtx (SET, VOIDmode, pc_rtx,
+! XEXP (operands[0], 0)),
+! gen_rtx (CLOBBER, VOIDmode,
+! gen_rtx (REG, Pmode, 15)))));
+ goto finish_call;
+ }
+--- 4809,4828 ----
+
+ if (! TARGET_V9 && INTVAL (operands[3]) != 0)
+! emit_jump_insn
+! (gen_rtx (PARALLEL, VOIDmode,
+! gen_rtvec (3,
+! gen_rtx (SET, VOIDmode, pc_rtx,
+! XEXP (operands[0], 0)),
+! GEN_INT (INTVAL (operands[3]) & 0xfff),
+! gen_rtx (CLOBBER, VOIDmode,
+! gen_rtx (REG, Pmode, 15)))));
+ else
+! emit_jump_insn
+! (gen_rtx (PARALLEL, VOIDmode,
+! gen_rtvec (2,
+! gen_rtx (SET, VOIDmode, pc_rtx,
+! XEXP (operands[0], 0)),
+! gen_rtx (CLOBBER, VOIDmode,
+! gen_rtx (REG, Pmode, 15)))));
+ goto finish_call;
+ }
+***************
+*** 4839,4852 ****
+
+ if (! TARGET_V9 && INTVAL (operands[3]) != 0)
+! emit_call_insn (gen_rtx (PARALLEL, VOIDmode, gen_rtvec (3,
+! gen_rtx (CALL, VOIDmode, fn_rtx, nregs_rtx),
+! operands[3],
+! gen_rtx (CLOBBER, VOIDmode,
+! gen_rtx (REG, Pmode, 15)))));
+ else
+! emit_call_insn (gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2,
+! gen_rtx (CALL, VOIDmode, fn_rtx, nregs_rtx),
+! gen_rtx (CLOBBER, VOIDmode,
+! gen_rtx (REG, Pmode, 15)))));
+
+ finish_call:
+--- 4843,4858 ----
+
+ if (! TARGET_V9 && INTVAL (operands[3]) != 0)
+! emit_call_insn
+! (gen_rtx (PARALLEL, VOIDmode,
+! gen_rtvec (3, gen_rtx (CALL, VOIDmode, fn_rtx, nregs_rtx),
+! GEN_INT (INTVAL (operands[3]) & 0xfff),
+! gen_rtx (CLOBBER, VOIDmode,
+! gen_rtx (REG, Pmode, 15)))));
+ else
+! emit_call_insn
+! (gen_rtx (PARALLEL, VOIDmode,
+! gen_rtvec (2, gen_rtx (CALL, VOIDmode, fn_rtx, nregs_rtx),
+! gen_rtx (CLOBBER, VOIDmode,
+! gen_rtx (REG, Pmode, 15)))));
+
+ finish_call:
+***************
+*** 4911,4915 ****
+ (clobber (reg:SI 15))]
+ ;;- Do not use operand 1 for most machines.
+! "! TARGET_V9 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) > 0"
+ "call %a0,%1\;nop\;unimp %2"
+ [(set_attr "type" "call_no_delay_slot")])
+--- 4917,4921 ----
+ (clobber (reg:SI 15))]
+ ;;- Do not use operand 1 for most machines.
+! "! TARGET_V9 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 0"
+ "call %a0,%1\;nop\;unimp %2"
+ [(set_attr "type" "call_no_delay_slot")])
+***************
+*** 4923,4927 ****
+ (clobber (reg:SI 15))]
+ ;;- Do not use operand 1 for most machines.
+! "! TARGET_V9 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) > 0"
+ "call %a0,%1\;nop\;unimp %2"
+ [(set_attr "type" "call_no_delay_slot")])
+--- 4929,4933 ----
+ (clobber (reg:SI 15))]
+ ;;- Do not use operand 1 for most machines.
+! "! TARGET_V9 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 0"
+ "call %a0,%1\;nop\;unimp %2"
+ [(set_attr "type" "call_no_delay_slot")])
+***************
+*** 5178,5184 ****
+ emit_insn (gen_rtx (USE, VOIDmode, stack_pointer_rtx));
+ emit_insn (gen_rtx (USE, VOIDmode, static_chain_rtx));
+- emit_insn (gen_rtx (USE, VOIDmode, gen_rtx (REG, Pmode, 8)));
+ /* Return, restoring reg window and jumping to goto handler. */
+ emit_insn (gen_goto_handler_and_restore ());
+ DONE;
+ }")
+--- 5184,5190 ----
+ emit_insn (gen_rtx (USE, VOIDmode, stack_pointer_rtx));
+ emit_insn (gen_rtx (USE, VOIDmode, static_chain_rtx));
+ /* Return, restoring reg window and jumping to goto handler. */
+ emit_insn (gen_goto_handler_and_restore ());
++ emit_barrier ();
+ DONE;
+ }")
+***************
+*** 5192,5200 ****
+
+ (define_insn "goto_handler_and_restore"
+! [(unspec_volatile [(const_int 0)] 2)]
+ ""
+ "jmp %%o0+0\;restore"
+ [(set_attr "type" "misc")
+ (set_attr "length" "2")])
+
+ ;; Special pattern for the FLUSH instruction.
+--- 5198,5237 ----
+
+ (define_insn "goto_handler_and_restore"
+! [(unspec_volatile [(const_int 0)] 2)
+! (use (reg:SI 8))]
+ ""
+ "jmp %%o0+0\;restore"
+ [(set_attr "type" "misc")
+ (set_attr "length" "2")])
++
++ ;; Pattern for use after a setjmp to store FP and the return register
++ ;; into the stack area.
++
++ (define_expand "setjmp"
++ [(const_int 0)]
++ ""
++ "
++ {
++ if (TARGET_V9)
++ emit_insn (gen_setjmp_64 ());
++ else
++ emit_insn (gen_setjmp_32 ());
++
++ DONE;
++ }")
++
++ (define_expand "setjmp_32"
++ [(set (mem:SI (plus:SI (reg:SI 14) (const_int 56))) (match_dup 0))
++ (set (mem:SI (plus:SI (reg:SI 14) (const_int 60))) (reg:SI 31))]
++ ""
++ "
++ { operands[0] = frame_pointer_rtx; }")
++
++ (define_expand "setjmp_64"
++ [(set (mem:DI (plus:DI (reg:DI 14) (const_int 112))) (match_dup 0))
++ (set (mem:DI (plus:DI (reg:DI 14) (const_int 120))) (reg:DI 31))]
++ ""
++ "
++ { operands[0] = frame_pointer_rtx; }")
+
+ ;; Special pattern for the FLUSH instruction.
+diff -rcp2N gcc-2.7.2.2/config/x-linux g77-new/config/x-linux
+*** gcc-2.7.2.2/config/x-linux Tue Mar 28 07:43:37 1995
+--- g77-new/config/x-linux Thu Jul 10 20:08:49 1997
+*************** BOOT_CFLAGS = -O $(CFLAGS) -Iinclude
+*** 13,14 ****
+--- 13,17 ----
+ # Don't run fixproto
+ STMP_FIXPROTO =
++
++ # Don't install "assert.h" in gcc. We use the one in glibc.
++ INSTALL_ASSERT_H =
+diff -rcp2N gcc-2.7.2.2/config/x-linux-aout g77-new/config/x-linux-aout
+*** gcc-2.7.2.2/config/x-linux-aout Wed Dec 31 19:00:00 1969
+--- g77-new/config/x-linux-aout Thu Jul 10 20:08:49 1997
+***************
+*** 0 ****
+--- 1,14 ----
++ # It is defined in config/xm-linux.h.
++ # X_CFLAGS = -DPOSIX
++
++ # The following is needed when compiling stages 2 and 3 because gcc's
++ # limits.h must be picked up before /usr/include/limits.h. This is because
++ # each does an #include_next of the other if the other hasn't been included.
++ # /usr/include/limits.h loses if it gets found first because /usr/include is
++ # at the end of the search order. When a new version of gcc is released,
++ # gcc's limits.h hasn't been installed yet and hence isn't found.
++
++ BOOT_CFLAGS = -O $(CFLAGS) -Iinclude
++
++ # Don't run fixproto
++ STMP_FIXPROTO =
+diff -rcp2N gcc-2.7.2.2/config.guess g77-new/config.guess
+*** gcc-2.7.2.2/config.guess Thu Feb 20 19:24:32 1997
+--- g77-new/config.guess Thu Jul 10 20:08:50 1997
+*************** trap 'rm -f dummy.c dummy.o dummy; exit
+*** 52,63 ****
+
+ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
+- alpha:OSF1:V*:*)
+- # After 1.2, OSF1 uses "V1.3" for uname -r.
+- echo alpha-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^V//'`
+- exit 0 ;;
+ alpha:OSF1:*:*)
+ # 1.2 uses "1.2" for uname -r.
+! echo alpha-dec-osf${UNAME_RELEASE}
+! exit 0 ;;
+ 21064:Windows_NT:50:3)
+ echo alpha-dec-winnt3.5
+--- 52,62 ----
+
+ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
+ alpha:OSF1:*:*)
++ # A Vn.n version is a released version.
++ # A Tn.n version is a released field test version.
++ # A Xn.n version is an unreleased experimental baselevel.
+ # 1.2 uses "1.2" for uname -r.
+! echo alpha-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[VTX]//'`
+! exit 0 ;;
+ 21064:Windows_NT:50:3)
+ echo alpha-dec-winnt3.5
+*************** case "${UNAME_MACHINE}:${UNAME_SYSTEM}:$
+*** 154,161 ****
+ echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
+ exit 0 ;;
+! ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
+ echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
+ exit 0 ;; # Note that: echo "'`uname -s`'" gives 'AIX '
+! i[34]86:AIX:*:*)
+ echo i386-ibm-aix
+ exit 0 ;;
+--- 153,160 ----
+ echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
+ exit 0 ;;
+! ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
+ echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
+ exit 0 ;; # Note that: echo "'`uname -s`'" gives 'AIX '
+! i?86:AIX:*:*)
+ echo i386-ibm-aix
+ exit 0 ;;
+*************** EOF
+*** 220,224 ****
+ 9000/31? ) HP_ARCH=m68000 ;;
+ 9000/[34]?? ) HP_ARCH=m68k ;;
+! 9000/7?? | 9000/8?[79] ) HP_ARCH=hppa1.1 ;;
+ 9000/8?? ) HP_ARCH=hppa1.0 ;;
+ esac
+--- 219,223 ----
+ 9000/31? ) HP_ARCH=m68000 ;;
+ 9000/[34]?? ) HP_ARCH=m68k ;;
+! 9000/7?? | 9000/8?[1679] ) HP_ARCH=hppa1.1 ;;
+ 9000/8?? ) HP_ARCH=hppa1.0 ;;
+ esac
+*************** EOF
+*** 304,308 ****
+ echo m68k-hp-netbsd${UNAME_RELEASE}
+ exit 0 ;;
+! i[34]86:BSD/386:*:* | *:BSD/OS:*:*)
+ echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
+ exit 0 ;;
+--- 303,307 ----
+ echo m68k-hp-netbsd${UNAME_RELEASE}
+ exit 0 ;;
+! i?86:BSD/386:*:* | *:BSD/OS:*:*)
+ echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
+ exit 0 ;;
+*************** EOF
+*** 314,318 ****
+ exit 0 ;;
+ *:GNU:*:*)
+! echo `echo ${UNAME_MACHINE}|sed -e 's,/.*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
+ exit 0 ;;
+ *:Linux:*:*)
+--- 313,317 ----
+ exit 0 ;;
+ *:GNU:*:*)
+! echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
+ exit 0 ;;
+ *:Linux:*:*)
+*************** EOF
+*** 320,330 ****
+ # first see if it will tell us.
+ ld_help_string=`ld --help 2>&1`
+! # if echo $ld_help_string | grep >/dev/null 2>&1 "supported emulations: elf_i[345]86"; then
+ # echo "${UNAME_MACHINE}-unknown-linux" ; exit 0
+! if echo $ld_help_string | grep >/dev/null 2>&1 "supported emulations: i[345]86linux"; then
+ echo "${UNAME_MACHINE}-unknown-linuxaout" ; exit 0
+! elif echo $ld_help_string | grep >/dev/null 2>&1 "supported emulations: i[345]86coff"; then
+ echo "${UNAME_MACHINE}-unknown-linuxcoff" ; exit 0
+ elif test "${UNAME_MACHINE}" = "alpha" ; then
+ echo alpha-unknown-linux ; exit 0
+ else
+--- 319,333 ----
+ # first see if it will tell us.
+ ld_help_string=`ld --help 2>&1`
+! # if echo $ld_help_string | grep >/dev/null 2>&1 "supported emulations: elf_i?86"; then
+ # echo "${UNAME_MACHINE}-unknown-linux" ; exit 0
+! if echo $ld_help_string | grep >/dev/null 2>&1 "supported emulations: i?86linux"; then
+ echo "${UNAME_MACHINE}-unknown-linuxaout" ; exit 0
+! elif echo $ld_help_string | grep >/dev/null 2>&1 "supported emulations: i?86coff"; then
+ echo "${UNAME_MACHINE}-unknown-linuxcoff" ; exit 0
+ elif test "${UNAME_MACHINE}" = "alpha" ; then
++ as_version_string=`as --version 2>&1`
++ if echo $as_version_string | grep >/dev/null 2>&1 " version 2.6 "; then
++ echo alpha-unknown-linuxoldas ; exit 0
++ fi
+ echo alpha-unknown-linux ; exit 0
+ else
+*************** EOF
+*** 363,370 ****
+ # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. earlier versions
+ # are messed up and put the nodename in both sysname and nodename.
+! i[34]86:DYNIX/ptx:4*:*)
+ echo i386-sequent-sysv4
+ exit 0 ;;
+! i[34]86:*:4.*:* | i[34]86:SYSTEM_V:4.*:*)
+ if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
+ echo ${UNAME_MACHINE}-univel-sysv${UNAME_RELEASE}
+--- 366,373 ----
+ # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. earlier versions
+ # are messed up and put the nodename in both sysname and nodename.
+! i?86:DYNIX/ptx:4*:*)
+ echo i386-sequent-sysv4
+ exit 0 ;;
+! i?86:*:4.*:* | i?86:SYSTEM_V:4.*:*)
+ if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
+ echo ${UNAME_MACHINE}-univel-sysv${UNAME_RELEASE}
+*************** EOF
+*** 373,377 ****
+ fi
+ exit 0 ;;
+! i[34]86:*:3.2:*)
+ if test -f /usr/options/cb.name; then
+ UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
+--- 376,380 ----
+ fi
+ exit 0 ;;
+! i?86:*:3.2:*)
+ if test -f /usr/options/cb.name; then
+ UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
+*************** EOF
+*** 380,383 ****
+--- 383,388 ----
+ UNAME_REL=`(/bin/uname -X|egrep Release|sed -e 's/.*= //')`
+ (/bin/uname -X|egrep i80486 >/dev/null) && UNAME_MACHINE=i486
++ (/bin/uname -X|egrep '^Machine.*Pentium' >/dev/null) \
++ && UNAME_MACHINE=i586
+ echo ${UNAME_MACHINE}-unknown-sco$UNAME_REL
+ else
+*************** EOF
+*** 402,406 ****
+ echo m68010-convergent-sysv
+ exit 0 ;;
+! M680[234]0:*:R3V[567]*:*)
+ test -r /sysV68 && echo 'm68k-motorola-sysv' && exit 0 ;;
+ 3[34]??:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0)
+--- 407,411 ----
+ echo m68010-convergent-sysv
+ exit 0 ;;
+! M68*:*:R3V[567]*:*)
+ test -r /sysV68 && echo 'm68k-motorola-sysv' && exit 0 ;;
+ 3[34]??:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0)
+*************** EOF
+*** 410,414 ****
+ uname -p 2>/dev/null | grep 86 >/dev/null \
+ && echo i486-ncr-sysv4 && exit 0 ;;
+! m680[234]0:LynxOS:2.[23]*:*)
+ echo m68k-lynx-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+--- 415,419 ----
+ uname -p 2>/dev/null | grep 86 >/dev/null \
+ && echo i486-ncr-sysv4 && exit 0 ;;
+! m68*:LynxOS:2.*:*)
+ echo m68k-lynx-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+*************** EOF
+*** 416,426 ****
+ echo m68k-atari-sysv4
+ exit 0 ;;
+! i[34]86:LynxOS:2.[23]*:*)
+ echo i386-lynx-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+! TSUNAMI:LynxOS:2.[23]*:*)
+ echo sparc-lynx-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+! rs6000:LynxOS:2.[23]*:*)
+ echo rs6000-lynx-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+--- 421,431 ----
+ echo m68k-atari-sysv4
+ exit 0 ;;
+! i?86:LynxOS:2.*:*)
+ echo i386-lynx-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+! TSUNAMI:LynxOS:2.*:*)
+ echo sparc-lynx-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+! rs6000:LynxOS:2.*:* | PowerPC:LynxOS:2.*:*)
+ echo rs6000-lynx-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+*************** main ()
+*** 479,483 ****
+ int version;
+ version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`;
+! printf ("%s-next-nextstep%s\n", __ARCHITECTURE__, version==2 ? "2" : "3");
+ exit (0);
+ #endif
+--- 484,488 ----
+ int version;
+ version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`;
+! printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version);
+ exit (0);
+ #endif
+diff -rcp2N gcc-2.7.2.2/config.sub g77-new/config.sub
+*** gcc-2.7.2.2/config.sub Thu Jun 15 17:01:49 1995
+--- g77-new/config.sub Thu Jul 10 20:08:50 1997
+*************** case $basic_machine in
+*** 130,134 ****
+ # Recognize the basic CPU types without company name.
+ # Some are omitted here because they have special meanings below.
+! tahoe | i[345]86 | i860 | m68k | m68000 | m88k | ns32k | arm \
+ | arme[lb] | pyramid \
+ | tron | a29k | 580 | i960 | h8300 | hppa1.0 | hppa1.1 \
+--- 130,134 ----
+ # Recognize the basic CPU types without company name.
+ # Some are omitted here because they have special meanings below.
+! tahoe | i[3456]86 | i860 | m68k | m68000 | m88k | ns32k | arm \
+ | arme[lb] | pyramid \
+ | tron | a29k | 580 | i960 | h8300 | hppa1.0 | hppa1.1 \
+*************** case $basic_machine in
+*** 145,149 ****
+ ;;
+ # Recognize the basic CPU types with company name.
+! vax-* | tahoe-* | i[345]86-* | i860-* | m68k-* | m68000-* | m88k-* \
+ | sparc-* | ns32k-* | fx80-* | arm-* | c[123]* \
+ | mips-* | pyramid-* | tron-* | a29k-* | romp-* | rs6000-* | power-* \
+--- 145,149 ----
+ ;;
+ # Recognize the basic CPU types with company name.
+! vax-* | tahoe-* | i[3456]86-* | i860-* | m68k-* | m68000-* | m88k-* \
+ | sparc-* | ns32k-* | fx80-* | arm-* | c[123]* \
+ | mips-* | pyramid-* | tron-* | a29k-* | romp-* | rs6000-* | power-* \
+*************** case $basic_machine in
+*** 309,325 ****
+ ;;
+ # I'm not sure what "Sysv32" means. Should this be sysv3.2?
+! i[345]86v32)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-unknown/'`
+ os=-sysv32
+ ;;
+! i[345]86v4*)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-unknown/'`
+ os=-sysv4
+ ;;
+! i[345]86v)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-unknown/'`
+ os=-sysv
+ ;;
+! i[345]86sol2)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-unknown/'`
+ os=-solaris2
+--- 309,325 ----
+ ;;
+ # I'm not sure what "Sysv32" means. Should this be sysv3.2?
+! i[3456]86v32)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-unknown/'`
+ os=-sysv32
+ ;;
+! i[3456]86v4*)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-unknown/'`
+ os=-sysv4
+ ;;
+! i[3456]86v)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-unknown/'`
+ os=-sysv
+ ;;
+! i[3456]86sol2)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-unknown/'`
+ os=-solaris2
+diff -rcp2N gcc-2.7.2.2/configure g77-new/configure
+*** gcc-2.7.2.2/configure Thu Feb 20 19:24:33 1997
+--- g77-new/configure Sun Aug 10 18:46:31 1997
+*************** exec_prefix='$(prefix)'
+*** 82,85 ****
+--- 82,86 ----
+ # The default g++ include directory is $(libdir)/g++-include.
+ gxx_include_dir='$(libdir)/g++-include'
++ #gxx_include_dir='$(exec_prefix)/include/g++'
+
+ # Default --program-transform-name to nothing.
+*************** for machine in $canon_build $canon_host
+*** 548,551 ****
+--- 549,578 ----
+ use_collect2=yes
+ ;;
++ alpha-*-linux*oldas*)
++ tm_file=alpha/linux.h
++ tmake_file=alpha/t-linux
++ xmake_file=alpha/x-linux
++ fixincludes=Makefile.in
++ xm_file=alpha/xm-linux.h
++ gas=yes gnu_ld=yes
++ ;;
++ alpha-*-linux*ecoff*)
++ tm_file=alpha/linux.h
++ tmake_file=alpha/t-linux
++ xmake_file=alpha/x-linux
++ fixincludes=Makefile.in
++ xm_file=alpha/xm-linux.h
++ extra_parts="crtbegin.o crtbeginS.o crtend.o crtendS.o"
++ gas=yes gnu_ld=yes
++ ;;
++ alpha-*-linux*)
++ tm_file=alpha/elf.h
++ tmake_file=alpha/t-linux
++ xmake_file=alpha/x-linux
++ fixincludes=Makefile.in
++ xm_file=alpha/xm-linux.h
++ extra_parts="crtbegin.o crtbeginS.o crtend.o crtendS.o"
++ gas=yes gnu_ld=yes
++ ;;
+ alpha-dec-osf[23456789]*)
+ tm_file=alpha/osf2.h
+*************** for machine in $canon_build $canon_host
+*** 985,989 ****
+ cpu_type=i386 # with a.out format using pre BFD linkers
+ xm_file=i386/xm-linux.h
+! xmake_file=x-linux
+ tm_file=i386/linux-oldld.h
+ fixincludes=Makefile.in # The headers are ok already.
+--- 1012,1016 ----
+ cpu_type=i386 # with a.out format using pre BFD linkers
+ xm_file=i386/xm-linux.h
+! xmake_file=x-linux-aout
+ tm_file=i386/linux-oldld.h
+ fixincludes=Makefile.in # The headers are ok already.
+*************** for machine in $canon_build $canon_host
+*** 994,998 ****
+ cpu_type=i386 # with a.out format
+ xm_file=i386/xm-linux.h
+! xmake_file=x-linux
+ tm_file=i386/linux-aout.h
+ fixincludes=Makefile.in # The headers are ok already.
+--- 1021,1025 ----
+ cpu_type=i386 # with a.out format
+ xm_file=i386/xm-linux.h
+! xmake_file=x-linux-aout
+ tm_file=i386/linux-aout.h
+ fixincludes=Makefile.in # The headers are ok already.
+*************** for machine in $canon_build $canon_host
+*** 1003,1007 ****
+ cpu_type=i386 # with ELF format, using GNU libc v1.
+ xm_file=i386/xm-linux.h
+! xmake_file=x-linux
+ tmake_file=t-linux-libc1
+ tm_file=i386/linux.h
+--- 1030,1034 ----
+ cpu_type=i386 # with ELF format, using GNU libc v1.
+ xm_file=i386/xm-linux.h
+! xmake_file=x-linux-aout
+ tmake_file=t-linux-libc1
+ tm_file=i386/linux.h
+*************** for machine in $canon_build $canon_host
+*** 1651,1654 ****
+--- 1678,1702 ----
+ use_collect2=yes
+ ;;
++ mips-sni-sysv4)
++ if [ x$gas = xyes ]
++ then
++ if [ x$stabs = xyes ]
++ then
++ tm_file=mips/iris5gdb.h
++ else
++ tm_file=mips/sni-gas.h
++ fi
++ else
++ tm_file=mips/sni-svr4.h
++ fi
++ xm_file=mips/xm-sysv.h
++ xmake_file=mips/x-sni-svr4
++ tmake_file=mips/t-mips-gas
++ if [ x$gnu_ld != xyes ]
++ then
++ use_collect2=yes
++ fi
++ broken_install=yes
++ ;;
+ mips-sgi-irix5*) # SGI System V.4., IRIX 5
+ if [ x$gas = xyes ]
+*************** MAYBE_TARGET_DEFAULT = -DTARGET_CPU_DEFA
+*** 2980,2984 ****
+ rm Makefile.sed
+ echo 's| ||' > Makefile.sed
+! echo "s|^target=.*$|target=${target}|" >> Makefile.sed
+ echo "s|^xmake_file=.*$|xmake_file=${dep_host_xmake_file}|" >> Makefile.sed
+ echo "s|^tmake_file=.*$|tmake_file=${dep_tmake_file}|" >> Makefile.sed
+--- 3028,3032 ----
+ rm Makefile.sed
+ echo 's| ||' > Makefile.sed
+! echo "s|^target=.*$|target=${canon_target}|" >> Makefile.sed
+ echo "s|^xmake_file=.*$|xmake_file=${dep_host_xmake_file}|" >> Makefile.sed
+ echo "s|^tmake_file=.*$|tmake_file=${dep_tmake_file}|" >> Makefile.sed
+diff -rcp2N gcc-2.7.2.2/cse.c g77-new/cse.c
+*** gcc-2.7.2.2/cse.c Sun Nov 26 14:47:05 1995
+--- g77-new/cse.c Sun Aug 10 18:46:37 1997
+*************** static struct table_elt *last_jump_equiv
+*** 520,544 ****
+ static int constant_pool_entries_cost;
+
+- /* Bits describing what kind of values in memory must be invalidated
+- for a particular instruction. If all three bits are zero,
+- no memory refs need to be invalidated. Each bit is more powerful
+- than the preceding ones, and if a bit is set then the preceding
+- bits are also set.
+-
+- Here is how the bits are set:
+- Pushing onto the stack invalidates only the stack pointer,
+- writing at a fixed address invalidates only variable addresses,
+- writing in a structure element at variable address
+- invalidates all but scalar variables,
+- and writing in anything else at variable address invalidates everything. */
+-
+- struct write_data
+- {
+- int sp : 1; /* Invalidate stack pointer. */
+- int var : 1; /* Invalidate variable addresses. */
+- int nonscalar : 1; /* Invalidate all but scalar variables. */
+- int all : 1; /* Invalidate all memory refs. */
+- };
+-
+ /* Define maximum length of a branch path. */
+
+--- 520,523 ----
+*************** static void merge_equiv_classes PROTO((s
+*** 626,632 ****
+ struct table_elt *));
+ static void invalidate PROTO((rtx, enum machine_mode));
+ static void remove_invalid_refs PROTO((int));
+ static void rehash_using_reg PROTO((rtx));
+! static void invalidate_memory PROTO((struct write_data *));
+ static void invalidate_for_call PROTO((void));
+ static rtx use_related_value PROTO((rtx, struct table_elt *));
+--- 605,612 ----
+ struct table_elt *));
+ static void invalidate PROTO((rtx, enum machine_mode));
++ static int cse_rtx_varies_p PROTO((rtx));
+ static void remove_invalid_refs PROTO((int));
+ static void rehash_using_reg PROTO((rtx));
+! static void invalidate_memory PROTO((void));
+ static void invalidate_for_call PROTO((void));
+ static rtx use_related_value PROTO((rtx, struct table_elt *));
+*************** static void set_nonvarying_address_compo
+*** 638,644 ****
+ HOST_WIDE_INT *));
+ static int refers_to_p PROTO((rtx, rtx));
+- static int refers_to_mem_p PROTO((rtx, rtx, HOST_WIDE_INT,
+- HOST_WIDE_INT));
+- static int cse_rtx_addr_varies_p PROTO((rtx));
+ static rtx canon_reg PROTO((rtx, rtx));
+ static void find_best_addr PROTO((rtx, rtx *));
+--- 618,621 ----
+*************** static void record_jump_cond PROTO((enum
+*** 656,661 ****
+ rtx, rtx, int));
+ static void cse_insn PROTO((rtx, int));
+! static void note_mem_written PROTO((rtx, struct write_data *));
+! static void invalidate_from_clobbers PROTO((struct write_data *, rtx));
+ static rtx cse_process_notes PROTO((rtx, rtx));
+ static void cse_around_loop PROTO((rtx));
+--- 633,638 ----
+ rtx, rtx, int));
+ static void cse_insn PROTO((rtx, int));
+! static int note_mem_written PROTO((rtx));
+! static void invalidate_from_clobbers PROTO((rtx));
+ static rtx cse_process_notes PROTO((rtx, rtx));
+ static void cse_around_loop PROTO((rtx));
+*************** invalidate (x, full_mode)
+*** 1512,1517 ****
+ register int i;
+ register struct table_elt *p;
+- rtx base;
+- HOST_WIDE_INT start, end;
+
+ /* If X is a register, dependencies on its contents
+--- 1489,1492 ----
+*************** invalidate (x, full_mode)
+*** 1605,1611 ****
+ full_mode = GET_MODE (x);
+
+- set_nonvarying_address_components (XEXP (x, 0), GET_MODE_SIZE (full_mode),
+- &base, &start, &end);
+-
+ for (i = 0; i < NBUCKETS; i++)
+ {
+--- 1580,1583 ----
+*************** invalidate (x, full_mode)
+*** 1614,1618 ****
+ {
+ next = p->next_same_hash;
+! if (refers_to_mem_p (p->exp, base, start, end))
+ remove_from_table (p, i);
+ }
+--- 1586,1594 ----
+ {
+ next = p->next_same_hash;
+! /* Invalidate ASM_OPERANDS which reference memory (this is easier
+! than checking all the aliases). */
+! if (p->in_memory
+! && (GET_CODE (p->exp) != MEM
+! || true_dependence (x, full_mode, p->exp, cse_rtx_varies_p)))
+ remove_from_table (p, i);
+ }
+*************** rehash_using_reg (x)
+*** 1695,1722 ****
+ }
+
+- /* Remove from the hash table all expressions that reference memory,
+- or some of them as specified by *WRITES. */
+-
+- static void
+- invalidate_memory (writes)
+- struct write_data *writes;
+- {
+- register int i;
+- register struct table_elt *p, *next;
+- int all = writes->all;
+- int nonscalar = writes->nonscalar;
+-
+- for (i = 0; i < NBUCKETS; i++)
+- for (p = table[i]; p; p = next)
+- {
+- next = p->next_same_hash;
+- if (p->in_memory
+- && (all
+- || (nonscalar && p->in_struct)
+- || cse_rtx_addr_varies_p (p->exp)))
+- remove_from_table (p, i);
+- }
+- }
+-
+ /* Remove from the hash table any expression that is a call-clobbered
+ register. Also update their TICK values. */
+--- 1671,1674 ----
+*************** invalidate_for_call ()
+*** 1756,1759 ****
+--- 1708,1717 ----
+ next = p->next_same_hash;
+
++ if (p->in_memory)
++ {
++ remove_from_table (p, hash);
++ continue;
++ }
++
+ if (GET_CODE (p->exp) != REG
+ || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
+*************** canon_hash (x, mode)
+*** 1946,1950 ****
+ return 0;
+ }
+! if (! RTX_UNCHANGING_P (x))
+ {
+ hash_arg_in_memory = 1;
+--- 1904,1908 ----
+ return 0;
+ }
+! if (! RTX_UNCHANGING_P (x) || FIXED_BASE_PLUS_P (XEXP (x, 0)))
+ {
+ hash_arg_in_memory = 1;
+*************** set_nonvarying_address_components (addr,
+*** 2395,2477 ****
+ }
+
+! /* Return 1 iff any subexpression of X refers to memory
+! at an address of BASE plus some offset
+! such that any of the bytes' offsets fall between START (inclusive)
+! and END (exclusive).
+!
+! The value is undefined if X is a varying address (as determined by
+! cse_rtx_addr_varies_p). This function is not used in such cases.
+!
+! When used in the cse pass, `qty_const' is nonzero, and it is used
+! to treat an address that is a register with a known constant value
+! as if it were that constant value.
+! In the loop pass, `qty_const' is zero, so this is not done. */
+!
+! static int
+! refers_to_mem_p (x, base, start, end)
+! rtx x, base;
+! HOST_WIDE_INT start, end;
+! {
+! register HOST_WIDE_INT i;
+! register enum rtx_code code;
+! register char *fmt;
+!
+! repeat:
+! if (x == 0)
+! return 0;
+!
+! code = GET_CODE (x);
+! if (code == MEM)
+! {
+! register rtx addr = XEXP (x, 0); /* Get the address. */
+! rtx mybase;
+! HOST_WIDE_INT mystart, myend;
+!
+! set_nonvarying_address_components (addr, GET_MODE_SIZE (GET_MODE (x)),
+! &mybase, &mystart, &myend);
+!
+!
+! /* refers_to_mem_p is never called with varying addresses.
+! If the base addresses are not equal, there is no chance
+! of the memory addresses conflicting. */
+! if (! rtx_equal_p (mybase, base))
+! return 0;
+!
+! return myend > start && mystart < end;
+! }
+!
+! /* X does not match, so try its subexpressions. */
+!
+! fmt = GET_RTX_FORMAT (code);
+! for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+! if (fmt[i] == 'e')
+! {
+! if (i == 0)
+! {
+! x = XEXP (x, 0);
+! goto repeat;
+! }
+! else
+! if (refers_to_mem_p (XEXP (x, i), base, start, end))
+! return 1;
+! }
+! else if (fmt[i] == 'E')
+! {
+! int j;
+! for (j = 0; j < XVECLEN (x, i); j++)
+! if (refers_to_mem_p (XVECEXP (x, i, j), base, start, end))
+! return 1;
+! }
+!
+! return 0;
+! }
+!
+! /* Nonzero if X refers to memory at a varying address;
+ except that a register which has at the moment a known constant value
+ isn't considered variable. */
+
+ static int
+! cse_rtx_addr_varies_p (x)
+! rtx x;
+ {
+ /* We need not check for X and the equivalence class being of the same
+--- 2353,2363 ----
+ }
+
+! /* Nonzero if X, a memory address, refers to a varying address;
+ except that a register which has at the moment a known constant value
+ isn't considered variable. */
+
+ static int
+! cse_rtx_varies_p (x)
+! register rtx x;
+ {
+ /* We need not check for X and the equivalence class being of the same
+*************** cse_rtx_addr_varies_p (x)
+*** 2479,2497 ****
+ doesn't vary in any mode. */
+
+! if (GET_CODE (x) == MEM
+! && GET_CODE (XEXP (x, 0)) == REG
+! && REGNO_QTY_VALID_P (REGNO (XEXP (x, 0)))
+! && GET_MODE (XEXP (x, 0)) == qty_mode[reg_qty[REGNO (XEXP (x, 0))]]
+! && qty_const[reg_qty[REGNO (XEXP (x, 0))]] != 0)
+ return 0;
+
+! if (GET_CODE (x) == MEM
+! && GET_CODE (XEXP (x, 0)) == PLUS
+! && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+! && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
+! && REGNO_QTY_VALID_P (REGNO (XEXP (XEXP (x, 0), 0)))
+! && (GET_MODE (XEXP (XEXP (x, 0), 0))
+! == qty_mode[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]])
+! && qty_const[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]])
+ return 0;
+
+--- 2365,2381 ----
+ doesn't vary in any mode. */
+
+! if (GET_CODE (x) == REG
+! && REGNO_QTY_VALID_P (REGNO (x))
+! && GET_MODE (x) == qty_mode[reg_qty[REGNO (x)]]
+! && qty_const[reg_qty[REGNO (x)]] != 0)
+ return 0;
+
+! if (GET_CODE (x) == PLUS
+! && GET_CODE (XEXP (x, 1)) == CONST_INT
+! && GET_CODE (XEXP (x, 0)) == REG
+! && REGNO_QTY_VALID_P (REGNO (XEXP (x, 0)))
+! && (GET_MODE (XEXP (x, 0))
+! == qty_mode[reg_qty[REGNO (XEXP (x, 0))]])
+! && qty_const[reg_qty[REGNO (XEXP (x, 0))]])
+ return 0;
+
+*************** cse_rtx_addr_varies_p (x)
+*** 2501,2519 ****
+ load fp minus a constant into a register, then a MEM which is the
+ sum of the two `constant' registers. */
+! if (GET_CODE (x) == MEM
+! && GET_CODE (XEXP (x, 0)) == PLUS
+! && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
+! && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG
+! && REGNO_QTY_VALID_P (REGNO (XEXP (XEXP (x, 0), 0)))
+! && (GET_MODE (XEXP (XEXP (x, 0), 0))
+! == qty_mode[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]])
+! && qty_const[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]]
+! && REGNO_QTY_VALID_P (REGNO (XEXP (XEXP (x, 0), 1)))
+! && (GET_MODE (XEXP (XEXP (x, 0), 1))
+! == qty_mode[reg_qty[REGNO (XEXP (XEXP (x, 0), 1))]])
+! && qty_const[reg_qty[REGNO (XEXP (XEXP (x, 0), 1))]])
+ return 0;
+
+! return rtx_addr_varies_p (x);
+ }
+
+--- 2385,2402 ----
+ load fp minus a constant into a register, then a MEM which is the
+ sum of the two `constant' registers. */
+! if (GET_CODE (x) == PLUS
+! && GET_CODE (XEXP (x, 0)) == REG
+! && GET_CODE (XEXP (x, 1)) == REG
+! && REGNO_QTY_VALID_P (REGNO (XEXP (x, 0)))
+! && (GET_MODE (XEXP (x, 0))
+! == qty_mode[reg_qty[REGNO (XEXP (x, 0))]])
+! && qty_const[reg_qty[REGNO (XEXP (x, 0))]]
+! && REGNO_QTY_VALID_P (REGNO (XEXP (x, 1)))
+! && (GET_MODE (XEXP (x, 1))
+! == qty_mode[reg_qty[REGNO (XEXP (x, 1))]])
+! && qty_const[reg_qty[REGNO (XEXP (x, 1))]])
+ return 0;
+
+! return rtx_varies_p (x);
+ }
+
+*************** cse_insn (insn, in_libcall_block)
+*** 6105,6110 ****
+ rtx this_insn_cc0 = 0;
+ enum machine_mode this_insn_cc0_mode;
+- struct write_data writes_memory;
+- static struct write_data init = {0, 0, 0, 0};
+
+ rtx src_eqv = 0;
+--- 5988,5991 ----
+*************** cse_insn (insn, in_libcall_block)
+*** 6118,6122 ****
+
+ this_insn = insn;
+- writes_memory = init;
+
+ /* Find all the SETs and CLOBBERs in this instruction.
+--- 5999,6002 ----
+*************** cse_insn (insn, in_libcall_block)
+*** 6220,6232 ****
+ else if (GET_CODE (y) == CLOBBER)
+ {
+! /* If we clobber memory, take note of that,
+! and canon the address.
+ This does nothing when a register is clobbered
+ because we have already invalidated the reg. */
+ if (GET_CODE (XEXP (y, 0)) == MEM)
+! {
+! canon_reg (XEXP (y, 0), NULL_RTX);
+! note_mem_written (XEXP (y, 0), &writes_memory);
+! }
+ }
+ else if (GET_CODE (y) == USE
+--- 6100,6108 ----
+ else if (GET_CODE (y) == CLOBBER)
+ {
+! /* If we clobber memory, canon the address.
+ This does nothing when a register is clobbered
+ because we have already invalidated the reg. */
+ if (GET_CODE (XEXP (y, 0)) == MEM)
+! canon_reg (XEXP (y, 0), NULL_RTX);
+ }
+ else if (GET_CODE (y) == USE
+*************** cse_insn (insn, in_libcall_block)
+*** 6247,6254 ****
+ {
+ if (GET_CODE (XEXP (x, 0)) == MEM)
+! {
+! canon_reg (XEXP (x, 0), NULL_RTX);
+! note_mem_written (XEXP (x, 0), &writes_memory);
+! }
+ }
+
+--- 6123,6127 ----
+ {
+ if (GET_CODE (XEXP (x, 0)) == MEM)
+! canon_reg (XEXP (x, 0), NULL_RTX);
+ }
+
+*************** cse_insn (insn, in_libcall_block)
+*** 6674,6678 ****
+ }
+ #endif /* LOAD_EXTEND_OP */
+!
+ if (src == src_folded)
+ src_folded = 0;
+--- 6547,6551 ----
+ }
+ #endif /* LOAD_EXTEND_OP */
+!
+ if (src == src_folded)
+ src_folded = 0;
+*************** cse_insn (insn, in_libcall_block)
+*** 6860,6864 ****
+ || (GET_CODE (src_folded) != MEM
+ && ! src_folded_force_flag))
+! && GET_MODE_CLASS (mode) != MODE_CC)
+ {
+ src_folded_force_flag = 1;
+--- 6733,6738 ----
+ || (GET_CODE (src_folded) != MEM
+ && ! src_folded_force_flag))
+! && GET_MODE_CLASS (mode) != MODE_CC
+! && mode != VOIDmode)
+ {
+ src_folded_force_flag = 1;
+*************** cse_insn (insn, in_libcall_block)
+*** 6983,6993 ****
+ if (GET_CODE (dest) == MEM)
+ {
+ dest = fold_rtx (dest, insn);
+-
+- /* Decide whether we invalidate everything in memory,
+- or just things at non-fixed places.
+- Writing a large aggregate must invalidate everything
+- because we don't know how long it is. */
+- note_mem_written (dest, &writes_memory);
+ }
+
+--- 6857,6869 ----
+ if (GET_CODE (dest) == MEM)
+ {
++ #ifdef PUSH_ROUNDING
++ /* Stack pushes invalidate the stack pointer. */
++ rtx addr = XEXP (dest, 0);
++ if ((GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC
++ || GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC)
++ && XEXP (addr, 0) == stack_pointer_rtx)
++ invalidate (stack_pointer_rtx, Pmode);
++ #endif
+ dest = fold_rtx (dest, insn);
+ }
+
+*************** cse_insn (insn, in_libcall_block)
+*** 7234,7238 ****
+ sets[i].src_elt = src_eqv_elt;
+
+! invalidate_from_clobbers (&writes_memory, x);
+
+ /* Some registers are invalidated by subroutine calls. Memory is
+--- 7110,7114 ----
+ sets[i].src_elt = src_eqv_elt;
+
+! invalidate_from_clobbers (x);
+
+ /* Some registers are invalidated by subroutine calls. Memory is
+*************** cse_insn (insn, in_libcall_block)
+*** 7241,7248 ****
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+- static struct write_data everything = {0, 1, 1, 1};
+-
+ if (! CONST_CALL_P (insn))
+! invalidate_memory (&everything);
+ invalidate_for_call ();
+ }
+--- 7117,7122 ----
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ if (! CONST_CALL_P (insn))
+! invalidate_memory ();
+ invalidate_for_call ();
+ }
+*************** cse_insn (insn, in_libcall_block)
+*** 7265,7270 ****
+ we have just done an invalidate_memory that covers even those. */
+ if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
+! || (GET_CODE (dest) == MEM && ! writes_memory.all
+! && ! cse_rtx_addr_varies_p (dest)))
+ invalidate (dest, VOIDmode);
+ else if (GET_CODE (dest) == STRICT_LOW_PART
+--- 7139,7143 ----
+ we have just done an invalidate_memory that covers even those. */
+ if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
+! || GET_CODE (dest) == MEM)
+ invalidate (dest, VOIDmode);
+ else if (GET_CODE (dest) == STRICT_LOW_PART
+*************** cse_insn (insn, in_libcall_block)
+*** 7359,7363 ****
+ sets[i].dest_hash, GET_MODE (dest));
+ elt->in_memory = (GET_CODE (sets[i].inner_dest) == MEM
+! && ! RTX_UNCHANGING_P (sets[i].inner_dest));
+
+ if (elt->in_memory)
+--- 7232,7238 ----
+ sets[i].dest_hash, GET_MODE (dest));
+ elt->in_memory = (GET_CODE (sets[i].inner_dest) == MEM
+! && (! RTX_UNCHANGING_P (sets[i].inner_dest)
+! || FIXED_BASE_PLUS_P (XEXP (sets[i].inner_dest,
+! 0))));
+
+ if (elt->in_memory)
+*************** cse_insn (insn, in_libcall_block)
+*** 7532,7580 ****
+ }
+
+- /* Store 1 in *WRITES_PTR for those categories of memory ref
+- that must be invalidated when the expression WRITTEN is stored in.
+- If WRITTEN is null, say everything must be invalidated. */
+-
+ static void
+! note_mem_written (written, writes_ptr)
+! rtx written;
+! struct write_data *writes_ptr;
+! {
+! static struct write_data everything = {0, 1, 1, 1};
+!
+! if (written == 0)
+! *writes_ptr = everything;
+! else if (GET_CODE (written) == MEM)
+! {
+! /* Pushing or popping the stack invalidates just the stack pointer. */
+! rtx addr = XEXP (written, 0);
+! if ((GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC
+! || GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC)
+! && GET_CODE (XEXP (addr, 0)) == REG
+! && REGNO (XEXP (addr, 0)) == STACK_POINTER_REGNUM)
+! {
+! writes_ptr->sp = 1;
+! return;
+! }
+! else if (GET_MODE (written) == BLKmode)
+! *writes_ptr = everything;
+! /* (mem (scratch)) means clobber everything. */
+! else if (GET_CODE (addr) == SCRATCH)
+! *writes_ptr = everything;
+! else if (cse_rtx_addr_varies_p (written))
+! {
+! /* A varying address that is a sum indicates an array element,
+! and that's just as good as a structure element
+! in implying that we need not invalidate scalar variables.
+! However, we must allow QImode aliasing of scalars, because the
+! ANSI C standard allows character pointers to alias anything. */
+! if (! ((MEM_IN_STRUCT_P (written)
+! || GET_CODE (XEXP (written, 0)) == PLUS)
+! && GET_MODE (written) != QImode))
+! writes_ptr->all = 1;
+! writes_ptr->nonscalar = 1;
+! }
+! writes_ptr->var = 1;
+ }
+ }
+
+--- 7407,7450 ----
+ }
+
+ static void
+! invalidate_memory ()
+! {
+! register int i;
+! register struct table_elt *p, *next;
+!
+! for (i = 0; i < NBUCKETS; i++)
+! for (p = table[i]; p; p = next)
+! {
+! next = p->next_same_hash;
+! if (p->in_memory)
+! remove_from_table (p, i);
+! }
+! }
+!
+! static int
+! note_mem_written (mem)
+! register rtx mem;
+! {
+! if (mem == 0 || GET_CODE(mem) != MEM )
+! return 0;
+! else
+! {
+! register rtx addr = XEXP (mem, 0);
+! /* Pushing or popping the stack invalidates just the stack pointer. */
+! if ((GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC
+! || GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC)
+! && GET_CODE (XEXP (addr, 0)) == REG
+! && REGNO (XEXP (addr, 0)) == STACK_POINTER_REGNUM)
+! {
+! if (reg_tick[STACK_POINTER_REGNUM] >= 0)
+! reg_tick[STACK_POINTER_REGNUM]++;
+!
+! /* This should be *very* rare. */
+! if (TEST_HARD_REG_BIT (hard_regs_in_table, STACK_POINTER_REGNUM))
+! invalidate (stack_pointer_rtx, VOIDmode);
+! return 1;
+ }
++ return 0;
++ }
+ }
+
+*************** note_mem_written (written, writes_ptr)
+*** 7584,7612 ****
+ alias with something that is SET or CLOBBERed.
+
+- W points to the writes_memory for this insn, a struct write_data
+- saying which kinds of memory references must be invalidated.
+ X is the pattern of the insn. */
+
+ static void
+! invalidate_from_clobbers (w, x)
+! struct write_data *w;
+ rtx x;
+ {
+- /* If W->var is not set, W specifies no action.
+- If W->all is set, this step gets all memory refs
+- so they can be ignored in the rest of this function. */
+- if (w->var)
+- invalidate_memory (w);
+-
+- if (w->sp)
+- {
+- if (reg_tick[STACK_POINTER_REGNUM] >= 0)
+- reg_tick[STACK_POINTER_REGNUM]++;
+-
+- /* This should be *very* rare. */
+- if (TEST_HARD_REG_BIT (hard_regs_in_table, STACK_POINTER_REGNUM))
+- invalidate (stack_pointer_rtx, VOIDmode);
+- }
+-
+ if (GET_CODE (x) == CLOBBER)
+ {
+--- 7454,7463 ----
+ alias with something that is SET or CLOBBERed.
+
+ X is the pattern of the insn. */
+
+ static void
+! invalidate_from_clobbers (x)
+ rtx x;
+ {
+ if (GET_CODE (x) == CLOBBER)
+ {
+*************** invalidate_from_clobbers (w, x)
+*** 7615,7619 ****
+ {
+ if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG
+! || (GET_CODE (ref) == MEM && ! w->all))
+ invalidate (ref, VOIDmode);
+ else if (GET_CODE (ref) == STRICT_LOW_PART
+--- 7466,7470 ----
+ {
+ if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG
+! || GET_CODE (ref) == MEM)
+ invalidate (ref, VOIDmode);
+ else if (GET_CODE (ref) == STRICT_LOW_PART
+*************** invalidate_from_clobbers (w, x)
+*** 7631,7643 ****
+ {
+ rtx ref = XEXP (y, 0);
+! if (ref)
+! {
+! if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG
+! || (GET_CODE (ref) == MEM && !w->all))
+! invalidate (ref, VOIDmode);
+! else if (GET_CODE (ref) == STRICT_LOW_PART
+! || GET_CODE (ref) == ZERO_EXTRACT)
+! invalidate (XEXP (ref, 0), GET_MODE (ref));
+! }
+ }
+ }
+--- 7482,7491 ----
+ {
+ rtx ref = XEXP (y, 0);
+! if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG
+! || GET_CODE (ref) == MEM)
+! invalidate (ref, VOIDmode);
+! else if (GET_CODE (ref) == STRICT_LOW_PART
+! || GET_CODE (ref) == ZERO_EXTRACT)
+! invalidate (XEXP (ref, 0), GET_MODE (ref));
+ }
+ }
+*************** cse_around_loop (loop_start)
+*** 7800,7807 ****
+ }
+
+- /* Variable used for communications between the next two routines. */
+-
+- static struct write_data skipped_writes_memory;
+-
+ /* Process one SET of an insn that was skipped. We ignore CLOBBERs
+ since they are done elsewhere. This function is called via note_stores. */
+--- 7648,7651 ----
+*************** invalidate_skipped_set (dest, set)
+*** 7812,7815 ****
+--- 7656,7675 ----
+ rtx dest;
+ {
++ enum rtx_code code = GET_CODE (dest);
++
++ if (code == MEM
++ && ! note_mem_written (dest) /* If this is not a stack push ... */
++ /* There are times when an address can appear varying and be a PLUS
++ during this scan when it would be a fixed address were we to know
++ the proper equivalences. So invalidate all memory if there is
++ a BLKmode or nonscalar memory reference or a reference to a
++ variable address. */
++ && (MEM_IN_STRUCT_P (dest) || GET_MODE (dest) == BLKmode
++ || cse_rtx_varies_p (XEXP (dest, 0))))
++ {
++ invalidate_memory ();
++ return;
++ }
++
+ if (GET_CODE (set) == CLOBBER
+ #ifdef HAVE_cc0
+*************** invalidate_skipped_set (dest, set)
+*** 7819,7837 ****
+ return;
+
+! if (GET_CODE (dest) == MEM)
+! note_mem_written (dest, &skipped_writes_memory);
+!
+! /* There are times when an address can appear varying and be a PLUS
+! during this scan when it would be a fixed address were we to know
+! the proper equivalences. So promote "nonscalar" to be "all". */
+! if (skipped_writes_memory.nonscalar)
+! skipped_writes_memory.all = 1;
+!
+! if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
+! || (! skipped_writes_memory.all && ! cse_rtx_addr_varies_p (dest)))
+! invalidate (dest, VOIDmode);
+! else if (GET_CODE (dest) == STRICT_LOW_PART
+! || GET_CODE (dest) == ZERO_EXTRACT)
+ invalidate (XEXP (dest, 0), GET_MODE (dest));
+ }
+
+--- 7679,7686 ----
+ return;
+
+! if (code == STRICT_LOW_PART || code == ZERO_EXTRACT)
+ invalidate (XEXP (dest, 0), GET_MODE (dest));
++ else if (code == REG || code == SUBREG || code == MEM)
++ invalidate (dest, VOIDmode);
+ }
+
+*************** invalidate_skipped_block (start)
+*** 7845,7850 ****
+ {
+ rtx insn;
+- static struct write_data init = {0, 0, 0, 0};
+- static struct write_data everything = {0, 1, 1, 1};
+
+ for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
+--- 7694,7697 ----
+*************** invalidate_skipped_block (start)
+*** 7854,7867 ****
+ continue;
+
+- skipped_writes_memory = init;
+-
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ invalidate_for_call ();
+- skipped_writes_memory = everything;
+ }
+
+ note_stores (PATTERN (insn), invalidate_skipped_set);
+- invalidate_from_clobbers (&skipped_writes_memory, PATTERN (insn));
+ }
+ }
+--- 7701,7712 ----
+ continue;
+
+ if (GET_CODE (insn) == CALL_INSN)
+ {
++ if (! CONST_CALL_P (insn))
++ invalidate_memory ();
+ invalidate_for_call ();
+ }
+
+ note_stores (PATTERN (insn), invalidate_skipped_set);
+ }
+ }
+*************** cse_set_around_loop (x, insn, loop_start
+*** 7913,7920 ****
+ {
+ struct table_elt *src_elt;
+- static struct write_data init = {0, 0, 0, 0};
+- struct write_data writes_memory;
+-
+- writes_memory = init;
+
+ /* If this is a SET, see if we can replace SET_SRC, but ignore SETs that
+--- 7758,7761 ----
+*************** cse_set_around_loop (x, insn, loop_start
+*** 7976,7991 ****
+
+ /* Now invalidate anything modified by X. */
+! note_mem_written (SET_DEST (x), &writes_memory);
+!
+! if (writes_memory.var)
+! invalidate_memory (&writes_memory);
+!
+! /* See comment on similar code in cse_insn for explanation of these tests. */
+ if (GET_CODE (SET_DEST (x)) == REG || GET_CODE (SET_DEST (x)) == SUBREG
+! || (GET_CODE (SET_DEST (x)) == MEM && ! writes_memory.all
+! && ! cse_rtx_addr_varies_p (SET_DEST (x))))
+ invalidate (SET_DEST (x), VOIDmode);
+ else if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
+! || GET_CODE (SET_DEST (x)) == ZERO_EXTRACT)
+ invalidate (XEXP (SET_DEST (x), 0), GET_MODE (SET_DEST (x)));
+ }
+--- 7817,7828 ----
+
+ /* Now invalidate anything modified by X. */
+! note_mem_written (SET_DEST (x));
+!
+! /* See comment on similar code in cse_insn for explanation of these tests. */
+ if (GET_CODE (SET_DEST (x)) == REG || GET_CODE (SET_DEST (x)) == SUBREG
+! || GET_CODE (SET_DEST (x)) == MEM)
+ invalidate (SET_DEST (x), VOIDmode);
+ else if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
+! || GET_CODE (SET_DEST (x)) == ZERO_EXTRACT)
+ invalidate (XEXP (SET_DEST (x), 0), GET_MODE (SET_DEST (x)));
+ }
+*************** cse_main (f, nregs, after_loop, file)
+*** 8234,8237 ****
+--- 8071,8075 ----
+
+ init_recog ();
++ init_alias_analysis ();
+
+ max_reg = nregs;
+*************** cse_basic_block (from, to, next_branch,
+*** 8405,8408 ****
+--- 8243,8247 ----
+ int to_usage = 0;
+ int in_libcall_block = 0;
++ int num_insns = 0;
+
+ /* Each of these arrays is undefined before max_reg, so only allocate
+*************** cse_basic_block (from, to, next_branch,
+*** 8437,8440 ****
+--- 8276,8299 ----
+ {
+ register enum rtx_code code;
++ int i;
++ struct table_elt *p, *next;
++
++ /* If we have processed 1,000 insns, flush the hash table to avoid
++ extreme quadratic behavior. */
++ if (num_insns++ > 1000)
++ {
++ for (i = 0; i < NBUCKETS; i++)
++ for (p = table[i]; p; p = next)
++ {
++ next = p->next_same_hash;
++
++ if (GET_CODE (p->exp) == REG)
++ invalidate (p->exp, p->mode);
++ else
++ remove_from_table (p, i);
++ }
++
++ num_insns = 0;
++ }
+
+ /* See if this is a branch that is part of the path. If so, and it is
+diff -rcp2N gcc-2.7.2.2/dwarfout.c g77-new/dwarfout.c
+*** gcc-2.7.2.2/dwarfout.c Thu Oct 26 21:40:07 1995
+--- g77-new/dwarfout.c Sun Aug 10 18:47:19 1997
+*************** output_bound_representation (bound, dim_
+*** 1629,1705 ****
+ {
+
+! case ERROR_MARK:
+! return;
+
+ /* All fixed-bounds are represented by INTEGER_CST nodes. */
+
+! case INTEGER_CST:
+! ASM_OUTPUT_DWARF_DATA4 (asm_out_file,
+! (unsigned) TREE_INT_CST_LOW (bound));
+! break;
+!
+! /* Dynamic bounds may be represented by NOP_EXPR nodes containing
+! SAVE_EXPR nodes. */
+!
+! case NOP_EXPR:
+! bound = TREE_OPERAND (bound, 0);
+! /* ... fall thru... */
+!
+! case SAVE_EXPR:
+! {
+! char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+! char end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+!
+! sprintf (begin_label, BOUND_BEGIN_LABEL_FMT,
+! current_dienum, dim_num, u_or_l);
+
+! sprintf (end_label, BOUND_END_LABEL_FMT,
+! current_dienum, dim_num, u_or_l);
+
+! ASM_OUTPUT_DWARF_DELTA2 (asm_out_file, end_label, begin_label);
+! ASM_OUTPUT_LABEL (asm_out_file, begin_label);
+
+! /* If we are working on a bound for a dynamic dimension in C,
+! the dynamic dimension in question had better have a static
+! (zero) lower bound and a dynamic *upper* bound. */
+
+! if (u_or_l != 'u')
+! abort ();
+
+! /* If optimization is turned on, the SAVE_EXPRs that describe
+! how to access the upper bound values are essentially bogus.
+! They only describe (at best) how to get at these values at
+! the points in the generated code right after they have just
+! been computed. Worse yet, in the typical case, the upper
+! bound values will not even *be* computed in the optimized
+! code, so these SAVE_EXPRs are entirely bogus.
+!
+! In order to compensate for this fact, we check here to see
+! if optimization is enabled, and if so, we effectively create
+! an empty location description for the (unknown and unknowable)
+! upper bound.
+!
+! This should not cause too much trouble for existing (stupid?)
+! debuggers because they have to deal with empty upper bounds
+! location descriptions anyway in order to be able to deal with
+! incomplete array types.
+!
+! Of course an intelligent debugger (GDB?) should be able to
+! comprehend that a missing upper bound specification in a
+! array type used for a storage class `auto' local array variable
+! indicates that the upper bound is both unknown (at compile-
+! time) and unknowable (at run-time) due to optimization.
+! */
+!
+! if (! optimize)
+! output_loc_descriptor
+! (eliminate_regs (SAVE_EXPR_RTL (bound), 0, NULL_RTX));
+
+! ASM_OUTPUT_LABEL (asm_out_file, end_label);
+! }
+! break;
+
+- default:
+- abort ();
+ }
+ }
+--- 1629,1699 ----
+ {
+
+! case ERROR_MARK:
+! return;
+
+ /* All fixed-bounds are represented by INTEGER_CST nodes. */
+
+! case INTEGER_CST:
+! ASM_OUTPUT_DWARF_DATA4 (asm_out_file,
+! (unsigned) TREE_INT_CST_LOW (bound));
+! break;
+
+! default:
+
+! /* Dynamic bounds may be represented by NOP_EXPR nodes containing
+! SAVE_EXPR nodes, in which case we can do something, or as
+! an expression, which we cannot represent. */
+! {
+! char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+! char end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+! sprintf (begin_label, BOUND_BEGIN_LABEL_FMT,
+! current_dienum, dim_num, u_or_l);
+
+! sprintf (end_label, BOUND_END_LABEL_FMT,
+! current_dienum, dim_num, u_or_l);
+
+! ASM_OUTPUT_DWARF_DELTA2 (asm_out_file, end_label, begin_label);
+! ASM_OUTPUT_LABEL (asm_out_file, begin_label);
+!
+! /* If optimization is turned on, the SAVE_EXPRs that describe
+! how to access the upper bound values are essentially bogus.
+! They only describe (at best) how to get at these values at
+! the points in the generated code right after they have just
+! been computed. Worse yet, in the typical case, the upper
+! bound values will not even *be* computed in the optimized
+! code, so these SAVE_EXPRs are entirely bogus.
+!
+! In order to compensate for this fact, we check here to see
+! if optimization is enabled, and if so, we effectively create
+! an empty location description for the (unknown and unknowable)
+! upper bound.
+!
+! This should not cause too much trouble for existing (stupid?)
+! debuggers because they have to deal with empty upper bounds
+! location descriptions anyway in order to be able to deal with
+! incomplete array types.
+!
+! Of course an intelligent debugger (GDB?) should be able to
+! comprehend that a missing upper bound specification in a
+! array type used for a storage class `auto' local array variable
+! indicates that the upper bound is both unknown (at compile-
+! time) and unknowable (at run-time) due to optimization. */
+!
+! if (! optimize)
+! {
+! while (TREE_CODE (bound) == NOP_EXPR
+! || TREE_CODE (bound) == CONVERT_EXPR)
+! bound = TREE_OPERAND (bound, 0);
+!
+! if (TREE_CODE (bound) == SAVE_EXPR)
+! output_loc_descriptor
+! (eliminate_regs (SAVE_EXPR_RTL (bound), 0, NULL_RTX));
+! }
+
+! ASM_OUTPUT_LABEL (asm_out_file, end_label);
+! }
+! break;
+
+ }
+ }
+*************** type_attribute (type, decl_const, decl_v
+*** 2857,2861 ****
+ register int root_type_modified;
+
+! if (TREE_CODE (type) == ERROR_MARK)
+ return;
+
+--- 2851,2855 ----
+ register int root_type_modified;
+
+! if (code == ERROR_MARK)
+ return;
+
+*************** type_attribute (type, decl_const, decl_v
+*** 2864,2869 ****
+ type `void', so this only applies to function return types. */
+
+! if (TREE_CODE (type) == VOID_TYPE)
+ return;
+
+ root_type_modified = (code == POINTER_TYPE || code == REFERENCE_TYPE
+--- 2858,2869 ----
+ type `void', so this only applies to function return types. */
+
+! if (code == VOID_TYPE)
+ return;
++
++ /* If this is a subtype, find the underlying type. Eventually,
++ this should write out the appropriate subtype info. */
++ while ((code == INTEGER_TYPE || code == REAL_TYPE)
++ && TREE_TYPE (type) != 0)
++ type = TREE_TYPE (type), code = TREE_CODE (type);
+
+ root_type_modified = (code == POINTER_TYPE || code == REFERENCE_TYPE
+diff -rcp2N gcc-2.7.2.2/emit-rtl.c g77-new/emit-rtl.c
+*** gcc-2.7.2.2/emit-rtl.c Thu Sep 14 16:09:30 1995
+--- g77-new/emit-rtl.c Sun Aug 10 18:47:08 1997
+*************** max_label_num ()
+*** 545,548 ****
+--- 545,565 ----
+ }
+
++ /* Identify REG (which may be a CONCAT) as a user register. */
++
++ void
++ mark_user_reg (reg)
++ rtx reg;
++ {
++ if (GET_CODE (reg) == CONCAT)
++ {
++ REG_USERVAR_P (XEXP (reg, 0)) = 1;
++ REG_USERVAR_P (XEXP (reg, 1)) = 1;
++ }
++ else if (GET_CODE (reg) == REG)
++ REG_USERVAR_P (reg) = 1;
++ else
++ abort ();
++ }
++
+ /* Return first label number used in this function (if any were used). */
+
+*************** change_address (memref, mode, addr)
+*** 1315,1318 ****
+--- 1332,1338 ----
+ addr = memory_address (mode, addr);
+
++ if (rtx_equal_p (addr, XEXP (memref, 0)) && mode == GET_MODE (memref))
++ return memref;
++
+ new = gen_rtx (MEM, mode, addr);
+ MEM_VOLATILE_P (new) = MEM_VOLATILE_P (memref);
+diff -rcp2N gcc-2.7.2.2/explow.c g77-new/explow.c
+*** gcc-2.7.2.2/explow.c Thu Jun 15 07:30:10 1995
+--- g77-new/explow.c Sun Aug 10 18:46:30 1997
+*************** convert_memory_address (to_mode, x)
+*** 305,310 ****
+--- 305,313 ----
+ rtx x;
+ {
++ enum machine_mode from_mode = to_mode == ptr_mode ? Pmode : ptr_mode;
+ rtx temp;
+
++ /* Here we handle some special cases. If none of them apply, fall through
++ to the default case. */
+ switch (GET_CODE (x))
+ {
+*************** convert_memory_address (to_mode, x)
+*** 321,339 ****
+ return temp;
+
+- case PLUS:
+- case MULT:
+- return gen_rtx (GET_CODE (x), to_mode,
+- convert_memory_address (to_mode, XEXP (x, 0)),
+- convert_memory_address (to_mode, XEXP (x, 1)));
+-
+ case CONST:
+ return gen_rtx (CONST, to_mode,
+ convert_memory_address (to_mode, XEXP (x, 0)));
+
+! default:
+! return convert_modes (to_mode,
+! to_mode == ptr_mode ? Pmode : ptr_mode,
+! x, POINTERS_EXTEND_UNSIGNED);
+ }
+ }
+ #endif
+--- 324,348 ----
+ return temp;
+
+ case CONST:
+ return gen_rtx (CONST, to_mode,
+ convert_memory_address (to_mode, XEXP (x, 0)));
+
+! case PLUS:
+! case MULT:
+! /* For addition the second operand is a small constant, we can safely
+! permute the converstion and addition operation. We can always safely
+! permute them if we are making the address narrower. In addition,
+! always permute the operations if this is a constant. */
+! if (GET_MODE_SIZE (to_mode) < GET_MODE_SIZE (from_mode)
+! || (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT
+! && (INTVAL (XEXP (x, 1)) + 20000 < 40000
+! || CONSTANT_P (XEXP (x, 0)))))
+! return gen_rtx (GET_CODE (x), to_mode,
+! convert_memory_address (to_mode, XEXP (x, 0)),
+! convert_memory_address (to_mode, XEXP (x, 1)));
+ }
++
++ return convert_modes (to_mode, from_mode,
++ x, POINTERS_EXTEND_UNSIGNED);
+ }
+ #endif
+diff -rcp2N gcc-2.7.2.2/expmed.c g77-new/expmed.c
+*** gcc-2.7.2.2/expmed.c Thu Jul 13 19:25:37 1995
+--- g77-new/expmed.c Sun Aug 10 18:46:23 1997
+*************** store_bit_field (str_rtx, bitsize, bitnu
+*** 399,402 ****
+--- 399,403 ----
+ #ifdef HAVE_insv
+ if (HAVE_insv
++ && GET_MODE (value) != BLKmode
+ && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
+ /* Ensure insv's size is wide enough for this field. */
+*************** store_split_bit_field (op0, bitsize, bit
+*** 777,781 ****
+ done in extract_bit_field, so that the two calls to
+ extract_fixed_bit_field will have comparable arguments. */
+! if (GET_CODE (value) != MEM)
+ total_bits = BITS_PER_WORD;
+ else
+--- 778,782 ----
+ done in extract_bit_field, so that the two calls to
+ extract_fixed_bit_field will have comparable arguments. */
+! if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
+ total_bits = BITS_PER_WORD;
+ else
+*************** store_split_bit_field (op0, bitsize, bit
+*** 790,797 ****
+ /* The args are chosen so that the last part includes the
+ lsb. Give extract_bit_field the value it needs (with
+! endianness compensation) to fetch the piece we want. */
+! part = extract_fixed_bit_field (word_mode, value, 0, thissize,
+! total_bits - bitsize + bitsdone,
+! NULL_RTX, 1, align);
+ }
+ else
+--- 791,807 ----
+ /* The args are chosen so that the last part includes the
+ lsb. Give extract_bit_field the value it needs (with
+! endianness compensation) to fetch the piece we want.
+!
+! ??? We have no idea what the alignment of VALUE is, so
+! we have to use a guess. */
+! part
+! = extract_fixed_bit_field
+! (word_mode, value, 0, thissize,
+! total_bits - bitsize + bitsdone, NULL_RTX, 1,
+! GET_MODE (value) == VOIDmode
+! ? UNITS_PER_WORD
+! : (GET_MODE (value) == BLKmode
+! ? 1
+! : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
+ }
+ else
+*************** store_split_bit_field (op0, bitsize, bit
+*** 803,808 ****
+ & (((HOST_WIDE_INT) 1 << thissize) - 1));
+ else
+! part = extract_fixed_bit_field (word_mode, value, 0, thissize,
+! bitsdone, NULL_RTX, 1, align);
+ }
+
+--- 813,824 ----
+ & (((HOST_WIDE_INT) 1 << thissize) - 1));
+ else
+! part
+! = extract_fixed_bit_field
+! (word_mode, value, 0, thissize, bitsdone, NULL_RTX, 1,
+! GET_MODE (value) == VOIDmode
+! ? UNITS_PER_WORD
+! : (GET_MODE (value) == BLKmode
+! ? 1
+! : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
+ }
+
+*************** extract_bit_field (str_rtx, bitsize, bit
+*** 876,882 ****
+ rtx spec_target_subreg = 0;
+
+- if (GET_CODE (str_rtx) == MEM && ! MEM_IN_STRUCT_P (str_rtx))
+- abort ();
+-
+ /* Discount the part of the structure before the desired byte.
+ We need to know how many bytes are safe to reference after it. */
+--- 892,895 ----
+*************** expand_divmod (rem_flag, code, mode, op0
+*** 3189,3193 ****
+ Notice that we compute also the final remainder value here,
+ and return the result right away. */
+! if (target == 0)
+ target = gen_reg_rtx (compute_mode);
+
+--- 3202,3206 ----
+ Notice that we compute also the final remainder value here,
+ and return the result right away. */
+! if (target == 0 || GET_MODE (target) != compute_mode)
+ target = gen_reg_rtx (compute_mode);
+
+*************** expand_divmod (rem_flag, code, mode, op0
+*** 3316,3320 ****
+ remainder. Notice that we compute also the final remainder
+ value here, and return the result right away. */
+! if (target == 0)
+ target = gen_reg_rtx (compute_mode);
+
+--- 3329,3333 ----
+ remainder. Notice that we compute also the final remainder
+ value here, and return the result right away. */
+! if (target == 0 || GET_MODE (target) != compute_mode)
+ target = gen_reg_rtx (compute_mode);
+
+*************** expand_divmod (rem_flag, code, mode, op0
+*** 3418,3422 ****
+ remainder. Notice that we compute also the final remainder
+ value here, and return the result right away. */
+! if (target == 0)
+ target = gen_reg_rtx (compute_mode);
+ if (rem_flag)
+--- 3431,3435 ----
+ remainder. Notice that we compute also the final remainder
+ value here, and return the result right away. */
+! if (target == 0 || GET_MODE (target) != compute_mode)
+ target = gen_reg_rtx (compute_mode);
+ if (rem_flag)
+*************** expand_divmod (rem_flag, code, mode, op0
+*** 3602,3605 ****
+--- 3615,3621 ----
+ if (quotient == 0)
+ {
++ if (target && GET_MODE (target) != compute_mode)
++ target = 0;
++
+ if (rem_flag)
+ {
+*************** expand_divmod (rem_flag, code, mode, op0
+*** 3653,3656 ****
+--- 3669,3675 ----
+ if (rem_flag)
+ {
++ if (target && GET_MODE (target) != compute_mode)
++ target = 0;
++
+ if (quotient == 0)
+ /* No divide instruction either. Use library for remainder. */
+diff -rcp2N gcc-2.7.2.2/expr.c g77-new/expr.c
+*** gcc-2.7.2.2/expr.c Thu Feb 20 19:24:17 1997
+--- g77-new/expr.c Sun Aug 10 18:47:21 1997
+*************** Boston, MA 02111-1307, USA. */
+*** 27,30 ****
+--- 27,31 ----
+ #include "flags.h"
+ #include "regs.h"
++ #include "hard-reg-set.h"
+ #include "function.h"
+ #include "insn-flags.h"
+*************** extern int stack_depth;
+*** 139,143 ****
+ extern int max_stack_depth;
+ extern struct obstack permanent_obstack;
+!
+
+ static rtx enqueue_insn PROTO((rtx, rtx));
+--- 140,144 ----
+ extern int max_stack_depth;
+ extern struct obstack permanent_obstack;
+! extern rtx arg_pointer_save_area;
+
+ static rtx enqueue_insn PROTO((rtx, rtx));
+*************** expand_assignment (to, from, want_value,
+*** 2498,2503 ****
+
+ push_temp_slots ();
+! tem = get_inner_reference (to, &bitsize, &bitpos, &offset,
+! &mode1, &unsignedp, &volatilep);
+
+ /* If we are going to use store_bit_field and extract_bit_field,
+--- 2499,2504 ----
+
+ push_temp_slots ();
+! tem = get_inner_reference (to, &bitsize, &bitpos, &offset, &mode1,
+! &unsignedp, &volatilep, &alignment);
+
+ /* If we are going to use store_bit_field and extract_bit_field,
+*************** expand_assignment (to, from, want_value,
+*** 2507,2511 ****
+ tem = stabilize_reference (tem);
+
+- alignment = TYPE_ALIGN (TREE_TYPE (tem)) / BITS_PER_UNIT;
+ to_rtx = expand_expr (tem, NULL_RTX, VOIDmode, 0);
+ if (offset != 0)
+--- 2508,2511 ----
+*************** expand_assignment (to, from, want_value,
+*** 2518,2529 ****
+ gen_rtx (PLUS, ptr_mode, XEXP (to_rtx, 0),
+ force_reg (ptr_mode, offset_rtx)));
+- /* If we have a variable offset, the known alignment
+- is only that of the innermost structure containing the field.
+- (Actually, we could sometimes do better by using the
+- align of an element of the innermost array, but no need.) */
+- if (TREE_CODE (to) == COMPONENT_REF
+- || TREE_CODE (to) == BIT_FIELD_REF)
+- alignment
+- = TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (to, 0))) / BITS_PER_UNIT;
+ }
+ if (volatilep)
+--- 2518,2521 ----
+*************** store_expr (exp, target, want_value)
+*** 2775,2780 ****
+ which will often result in some optimizations. Do the conversion
+ in two steps: first change the signedness, if needed, then
+! the extend. */
+! if (! want_value)
+ {
+ if (TREE_UNSIGNED (TREE_TYPE (exp))
+--- 2767,2775 ----
+ which will often result in some optimizations. Do the conversion
+ in two steps: first change the signedness, if needed, then
+! the extend. But don't do this if the type of EXP is a subtype
+! of something else since then the conversion might involve
+! more than just converting modes. */
+! if (! want_value && INTEGRAL_TYPE_P (TREE_TYPE (exp))
+! && TREE_TYPE (TREE_TYPE (exp)) == 0)
+ {
+ if (TREE_UNSIGNED (TREE_TYPE (exp))
+*************** store_constructor (exp, target)
+*** 3071,3074 ****
+--- 3066,3077 ----
+ }
+
++ if (TREE_READONLY (field))
++ {
++ if (GET_CODE (to_rtx) == MEM)
++ to_rtx = change_address (to_rtx, GET_MODE (to_rtx),
++ XEXP (to_rtx, 0));
++ RTX_UNCHANGING_P (to_rtx) = 1;
++ }
++
+ store_field (to_rtx, bitsize, bitpos, mode, TREE_VALUE (elt),
+ /* The alignment of TARGET is
+*************** store_field (target, bitsize, bitpos, mo
+*** 3414,3417 ****
+--- 3417,3432 ----
+ rtx temp = expand_expr (exp, NULL_RTX, VOIDmode, 0);
+
++ /* If BITSIZE is narrower than the size of the type of EXP
++ we will be narrowing TEMP. Normally, what's wanted are the
++ low-order bits. However, if EXP's type is a record and this is
++ big-endian machine, we want the upper BITSIZE bits. */
++ if (BYTES_BIG_ENDIAN && GET_MODE_CLASS (GET_MODE (temp)) == MODE_INT
++ && bitsize < GET_MODE_BITSIZE (GET_MODE (temp))
++ && TREE_CODE (TREE_TYPE (exp)) == RECORD_TYPE)
++ temp = expand_shift (RSHIFT_EXPR, GET_MODE (temp), temp,
++ size_int (GET_MODE_BITSIZE (GET_MODE (temp))
++ - bitsize),
++ temp, 1);
++
+ /* Unless MODE is VOIDmode or BLKmode, convert TEMP to
+ MODE. */
+*************** store_field (target, bitsize, bitpos, mo
+*** 3420,3423 ****
+--- 3435,3459 ----
+ temp = convert_modes (mode, TYPE_MODE (TREE_TYPE (exp)), temp, 1);
+
++ /* If the modes of TARGET and TEMP are both BLKmode, both
++ must be in memory and BITPOS must be aligned on a byte
++ boundary. If so, we simply do a block copy. */
++ if (GET_MODE (target) == BLKmode && GET_MODE (temp) == BLKmode)
++ {
++ if (GET_CODE (target) != MEM || GET_CODE (temp) != MEM
++ || bitpos % BITS_PER_UNIT != 0)
++ abort ();
++
++ target = change_address (target, VOIDmode,
++ plus_constant (XEXP (target, 0),
++ bitpos / BITS_PER_UNIT));
++
++ emit_block_move (target, temp,
++ GEN_INT ((bitsize + BITS_PER_UNIT - 1)
++ / BITS_PER_UNIT),
++ 1);
++
++ return value_mode == VOIDmode ? const0_rtx : target;
++ }
++
+ /* Store the value in the bitfield. */
+ store_bit_field (target, bitsize, bitpos, mode, temp, align, total_size);
+*************** get_inner_unaligned_p (exp)
+*** 3515,3518 ****
+--- 3551,3557 ----
+ This offset is in addition to the bit position.
+ If the position is not variable, we store 0 in *POFFSET.
++ We set *PALIGNMENT to the alignment in bytes of the address that will be
++ computed. This is the alignment of the thing we return if *POFFSET
++ is zero, but can be more less strictly aligned if *POFFSET is nonzero.
+
+ If any of the extraction expressions is volatile,
+*************** get_inner_unaligned_p (exp)
+*** 3525,3533 ****
+ If the field describes a variable-sized object, *PMODE is set to
+ VOIDmode and *PBITSIZE is set to -1. An access cannot be made in
+! this case, but the address of the object can be found. */
+
+ tree
+ get_inner_reference (exp, pbitsize, pbitpos, poffset, pmode,
+! punsignedp, pvolatilep)
+ tree exp;
+ int *pbitsize;
+--- 3564,3572 ----
+ If the field describes a variable-sized object, *PMODE is set to
+ VOIDmode and *PBITSIZE is set to -1. An access cannot be made in
+! this case, but the address of the object can be found. */
+
+ tree
+ get_inner_reference (exp, pbitsize, pbitpos, poffset, pmode,
+! punsignedp, pvolatilep, palignment)
+ tree exp;
+ int *pbitsize;
+*************** get_inner_reference (exp, pbitsize, pbit
+*** 3537,3540 ****
+--- 3576,3580 ----
+ int *punsignedp;
+ int *pvolatilep;
++ int *palignment;
+ {
+ tree orig_exp = exp;
+*************** get_inner_reference (exp, pbitsize, pbit
+*** 3542,3545 ****
+--- 3582,3586 ----
+ enum machine_mode mode = VOIDmode;
+ tree offset = integer_zero_node;
++ int alignment = BIGGEST_ALIGNMENT;
+
+ if (TREE_CODE (exp) == COMPONENT_REF)
+*************** get_inner_reference (exp, pbitsize, pbit
+*** 3599,3607 ****
+
+ *pbitpos += TREE_INT_CST_LOW (constant);
+!
+! if (var)
+! offset = size_binop (PLUS_EXPR, offset,
+! size_binop (EXACT_DIV_EXPR, var,
+! size_int (BITS_PER_UNIT)));
+ }
+
+--- 3640,3646 ----
+
+ *pbitpos += TREE_INT_CST_LOW (constant);
+! offset = size_binop (PLUS_EXPR, offset,
+! size_binop (EXACT_DIV_EXPR, var,
+! size_int (BITS_PER_UNIT)));
+ }
+
+*************** get_inner_reference (exp, pbitsize, pbit
+*** 3629,3633 ****
+
+ index = fold (build (MULT_EXPR, index_type, index,
+! TYPE_SIZE (TREE_TYPE (exp))));
+
+ if (TREE_CODE (index) == INTEGER_CST
+--- 3668,3673 ----
+
+ index = fold (build (MULT_EXPR, index_type, index,
+! convert (index_type,
+! TYPE_SIZE (TREE_TYPE (exp)))));
+
+ if (TREE_CODE (index) == INTEGER_CST
+*************** get_inner_reference (exp, pbitsize, pbit
+*** 3652,3666 ****
+ if (TREE_THIS_VOLATILE (exp))
+ *pvolatilep = 1;
+ exp = TREE_OPERAND (exp, 0);
+ }
+
+! /* If this was a bit-field, see if there is a mode that allows direct
+! access in case EXP is in memory. */
+! if (mode == VOIDmode && *pbitsize != 0 && *pbitpos % *pbitsize == 0)
+! {
+! mode = mode_for_size (*pbitsize, MODE_INT, 0);
+! if (mode == BLKmode)
+! mode = VOIDmode;
+! }
+
+ if (integer_zerop (offset))
+--- 3692,3708 ----
+ if (TREE_THIS_VOLATILE (exp))
+ *pvolatilep = 1;
++
++ /* If the offset is non-constant already, then we can't assume any
++ alignment more than the alignment here. */
++ if (! integer_zerop (offset))
++ alignment = MIN (alignment, TYPE_ALIGN (TREE_TYPE (exp)));
++
+ exp = TREE_OPERAND (exp, 0);
+ }
+
+! if (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd')
+! alignment = MIN (alignment, DECL_ALIGN (exp));
+! else if (TREE_TYPE (exp) != 0)
+! alignment = MIN (alignment, TYPE_ALIGN (TREE_TYPE (exp)));
+
+ if (integer_zerop (offset))
+*************** get_inner_reference (exp, pbitsize, pbit
+*** 3672,3675 ****
+--- 3714,3718 ----
+ *pmode = mode;
+ *poffset = offset;
++ *palignment = alignment / BITS_PER_UNIT;
+ return exp;
+ }
+*************** init_noncopied_parts (lhs, list)
+*** 3812,3820 ****
+ }
+
+! /* Subroutine of expand_expr: return nonzero iff there is no way that
+ EXP can reference X, which is being modified. */
+
+ static int
+! safe_from_p (x, exp)
+ rtx x;
+ tree exp;
+--- 3855,3867 ----
+ }
+
+! static int safe_from_p_count;
+! static int safe_from_p_size;
+! static tree *safe_from_p_rewritten;
+!
+! /* Subroutine of safe_from_p: return nonzero iff there is no way that
+ EXP can reference X, which is being modified. */
+
+ static int
+! safe_from_p_1 (x, exp)
+ rtx x;
+ tree exp;
+*************** safe_from_p (x, exp)
+*** 3822,3825 ****
+--- 3869,3873 ----
+ rtx exp_rtl = 0;
+ int i, nops;
++ int is_save_expr = 0;
+
+ if (x == 0
+*************** safe_from_p (x, exp)
+*** 3860,3878 ****
+
+ case 'x':
+! if (TREE_CODE (exp) == TREE_LIST)
+! return ((TREE_VALUE (exp) == 0
+! || safe_from_p (x, TREE_VALUE (exp)))
+! && (TREE_CHAIN (exp) == 0
+! || safe_from_p (x, TREE_CHAIN (exp))));
+! else
+! return 0;
+
+ case '1':
+! return safe_from_p (x, TREE_OPERAND (exp, 0));
+
+ case '2':
+ case '<':
+! return (safe_from_p (x, TREE_OPERAND (exp, 0))
+! && safe_from_p (x, TREE_OPERAND (exp, 1)));
+
+ case 'e':
+--- 3908,3933 ----
+
+ case 'x':
+! switch (TREE_CODE (exp))
+! {
+! case TREE_LIST:
+! return ((TREE_VALUE (exp) == 0
+! || safe_from_p_1 (x, TREE_VALUE (exp)))
+! && (TREE_CHAIN (exp) == 0
+! || safe_from_p_1 (x, TREE_CHAIN (exp))));
+!
+! case ERROR_MARK:
+! return 1;
+!
+! default:
+! return 0;
+! }
+
+ case '1':
+! return safe_from_p_1 (x, TREE_OPERAND (exp, 0));
+
+ case '2':
+ case '<':
+! return (safe_from_p_1 (x, TREE_OPERAND (exp, 0))
+! && safe_from_p_1 (x, TREE_OPERAND (exp, 1)));
+
+ case 'e':
+*************** safe_from_p (x, exp)
+*** 3887,3891 ****
+ case ADDR_EXPR:
+ return (staticp (TREE_OPERAND (exp, 0))
+! || safe_from_p (x, TREE_OPERAND (exp, 0)));
+
+ case INDIRECT_REF:
+--- 3942,3946 ----
+ case ADDR_EXPR:
+ return (staticp (TREE_OPERAND (exp, 0))
+! || safe_from_p_1 (x, TREE_OPERAND (exp, 0)));
+
+ case INDIRECT_REF:
+*************** safe_from_p (x, exp)
+*** 3922,3928 ****
+
+ case CLEANUP_POINT_EXPR:
+! return safe_from_p (x, TREE_OPERAND (exp, 0));
+
+ case SAVE_EXPR:
+ exp_rtl = SAVE_EXPR_RTL (exp);
+ break;
+--- 3977,3984 ----
+
+ case CLEANUP_POINT_EXPR:
+! return safe_from_p_1 (x, TREE_OPERAND (exp, 0));
+
+ case SAVE_EXPR:
++ is_save_expr = 1;
+ exp_rtl = SAVE_EXPR_RTL (exp);
+ break;
+*************** safe_from_p (x, exp)
+*** 3931,3935 ****
+ /* The only operand we look at is operand 1. The rest aren't
+ part of the expression. */
+! return safe_from_p (x, TREE_OPERAND (exp, 1));
+
+ case METHOD_CALL_EXPR:
+--- 3987,3991 ----
+ /* The only operand we look at is operand 1. The rest aren't
+ part of the expression. */
+! return safe_from_p_1 (x, TREE_OPERAND (exp, 1));
+
+ case METHOD_CALL_EXPR:
+*************** safe_from_p (x, exp)
+*** 3945,3949 ****
+ for (i = 0; i < nops; i++)
+ if (TREE_OPERAND (exp, i) != 0
+! && ! safe_from_p (x, TREE_OPERAND (exp, i)))
+ return 0;
+ }
+--- 4001,4005 ----
+ for (i = 0; i < nops; i++)
+ if (TREE_OPERAND (exp, i) != 0
+! && ! safe_from_p_1 (x, TREE_OPERAND (exp, i)))
+ return 0;
+ }
+*************** safe_from_p (x, exp)
+*** 3969,3975 ****
+--- 4025,4087 ----
+
+ /* If we reach here, it is safe. */
++ if (is_save_expr)
++ {
++ /* This SAVE_EXPR might appear many times in the top-level
++ safe_from_p() expression, and if it has a complex
++ subexpression, examining it multiple times could result
++ in a combinatorial explosion. E.g. on an Alpha Cabriolet
++ running at least 200MHz, a Fortran test case compiled with
++ optimization took about 28 minutes to compile -- even though
++ it was only a few lines long, and the complicated line causing
++ so much time to be spent in the earlier version of safe_from_p()
++ had only 293 or so unique nodes.
++
++ So, turn this SAVE_EXPR into an ERROR_MARK for now, but remember
++ where it is so we can turn it back in the top-level safe_from_p()
++ when we're done. */
++
++ if (safe_from_p_count > safe_from_p_size)
++ return 0; /* For now, don't bother re-sizing the array. */
++ safe_from_p_rewritten[safe_from_p_count++] = exp;
++ TREE_SET_CODE (exp, ERROR_MARK);
++ }
++
+ return 1;
+ }
+
++ /* Subroutine of expand_expr: return nonzero iff there is no way that
++ EXP can reference X, which is being modified. */
++
++ static int
++ safe_from_p (x, exp)
++ rtx x;
++ tree exp;
++ {
++ int rtn;
++ int i;
++ tree trees[128];
++
++ safe_from_p_count = 0;
++ safe_from_p_size = sizeof (trees) / sizeof (trees[0]);
++ safe_from_p_rewritten = &trees[0];
++
++ rtn = safe_from_p_1 (x, exp);
++
++ #if 0
++ if (safe_from_p_count != 0)
++ fprintf (stderr, "%s:%d: safe_from_p_count = %d\n",
++ input_filename, lineno, safe_from_p_count);
++ #endif
++
++ for (i = 0; i < safe_from_p_count; ++i)
++ {
++ if (TREE_CODE (trees [i]) != ERROR_MARK)
++ abort ();
++ TREE_SET_CODE (trees[i], SAVE_EXPR);
++ }
++
++ return rtn;
++ }
++
+ /* Subroutine of expand_expr: return nonzero iff EXP is an
+ expression whose type is statically determinable. */
+*************** expand_expr (exp, target, tmode, modifie
+*** 4534,4537 ****
+--- 4646,4658 ----
+ }
+ }
++
++ if (TREE_READONLY (exp))
++ {
++ if (GET_CODE (target) == MEM)
++ target = change_address (target, GET_MODE (target),
++ XEXP (target, 0));
++ RTX_UNCHANGING_P (target) = 1;
++ }
++
+ store_constructor (exp, target);
+ return target;
+*************** expand_expr (exp, target, tmode, modifie
+*** 4543,4567 ****
+ tree exp2;
+
+! /* A SAVE_EXPR as the address in an INDIRECT_EXPR is generated
+! for *PTR += ANYTHING where PTR is put inside the SAVE_EXPR.
+! This code has the same general effect as simply doing
+! expand_expr on the save expr, except that the expression PTR
+! is computed for use as a memory address. This means different
+! code, suitable for indexing, may be generated. */
+! if (TREE_CODE (exp1) == SAVE_EXPR
+! && SAVE_EXPR_RTL (exp1) == 0
+! && TYPE_MODE (TREE_TYPE (exp1)) == ptr_mode)
+! {
+! temp = expand_expr (TREE_OPERAND (exp1, 0), NULL_RTX,
+! VOIDmode, EXPAND_SUM);
+! op0 = memory_address (mode, temp);
+! op0 = copy_all_regs (op0);
+! SAVE_EXPR_RTL (exp1) = op0;
+! }
+! else
+! {
+! op0 = expand_expr (exp1, NULL_RTX, VOIDmode, EXPAND_SUM);
+! op0 = memory_address (mode, op0);
+! }
+
+ temp = gen_rtx (MEM, mode, op0);
+--- 4664,4669 ----
+ tree exp2;
+
+! op0 = expand_expr (exp1, NULL_RTX, VOIDmode, EXPAND_SUM);
+! op0 = memory_address (mode, op0);
+
+ temp = gen_rtx (MEM, mode, op0);
+*************** expand_expr (exp, target, tmode, modifie
+*** 4770,4776 ****
+ tree offset;
+ int volatilep = 0;
+- tree tem = get_inner_reference (exp, &bitsize, &bitpos, &offset,
+- &mode1, &unsignedp, &volatilep);
+ int alignment;
+
+ /* If we got back the original object, something is wrong. Perhaps
+--- 4872,4879 ----
+ tree offset;
+ int volatilep = 0;
+ int alignment;
++ tree tem = get_inner_reference (exp, &bitsize, &bitpos, &offset,
++ &mode1, &unsignedp, &volatilep,
++ &alignment);
+
+ /* If we got back the original object, something is wrong. Perhaps
+*************** expand_expr (exp, target, tmode, modifie
+*** 4793,4797 ****
+ != INTEGER_CST)
+ ? target : NULL_RTX),
+! VOIDmode, EXPAND_SUM);
+
+ /* If this is a constant, put it into a register if it is a
+--- 4896,4901 ----
+ != INTEGER_CST)
+ ? target : NULL_RTX),
+! VOIDmode,
+! modifier == EXPAND_INITIALIZER ? modifier : 0);
+
+ /* If this is a constant, put it into a register if it is a
+*************** expand_expr (exp, target, tmode, modifie
+*** 4806,4810 ****
+ }
+
+- alignment = TYPE_ALIGN (TREE_TYPE (tem)) / BITS_PER_UNIT;
+ if (offset != 0)
+ {
+--- 4910,4913 ----
+*************** expand_expr (exp, target, tmode, modifie
+*** 4816,4827 ****
+ gen_rtx (PLUS, ptr_mode, XEXP (op0, 0),
+ force_reg (ptr_mode, offset_rtx)));
+- /* If we have a variable offset, the known alignment
+- is only that of the innermost structure containing the field.
+- (Actually, we could sometimes do better by using the
+- size of an element of the innermost array, but no need.) */
+- if (TREE_CODE (exp) == COMPONENT_REF
+- || TREE_CODE (exp) == BIT_FIELD_REF)
+- alignment = (TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (exp, 0)))
+- / BITS_PER_UNIT);
+ }
+
+--- 4919,4922 ----
+*************** expand_expr (exp, target, tmode, modifie
+*** 4844,4848 ****
+ && modifier != EXPAND_SUM
+ && modifier != EXPAND_INITIALIZER
+! && ((mode1 != BLKmode && ! direct_load[(int) mode1])
+ /* If the field isn't aligned enough to fetch as a memref,
+ fetch it as a bit field. */
+--- 4939,4945 ----
+ && modifier != EXPAND_SUM
+ && modifier != EXPAND_INITIALIZER
+! && ((mode1 != BLKmode && ! direct_load[(int) mode1]
+! && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
+! && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT)
+ /* If the field isn't aligned enough to fetch as a memref,
+ fetch it as a bit field. */
+*************** expand_expr (exp, target, tmode, modifie
+*** 4857,4861 ****
+
+ if (ext_mode == BLKmode)
+! abort ();
+
+ op0 = extract_bit_field (validize_mem (op0), bitsize, bitpos,
+--- 4954,4982 ----
+
+ if (ext_mode == BLKmode)
+! {
+! /* In this case, BITPOS must start at a byte boundary and
+! TARGET, if specified, must be a MEM. */
+! if (GET_CODE (op0) != MEM
+! || (target != 0 && GET_CODE (target) != MEM)
+! || bitpos % BITS_PER_UNIT != 0)
+! abort ();
+!
+! op0 = change_address (op0, VOIDmode,
+! plus_constant (XEXP (op0, 0),
+! bitpos / BITS_PER_UNIT));
+! if (target == 0)
+! {
+! target
+! = assign_stack_temp (mode, int_size_in_bytes (type), 0);
+! MEM_IN_STRUCT_P (target) = AGGREGATE_TYPE_P (type);
+! }
+!
+! emit_block_move (target, op0,
+! GEN_INT ((bitsize + BITS_PER_UNIT - 1)
+! / BITS_PER_UNIT),
+! 1);
+!
+! return target;
+! }
+
+ op0 = extract_bit_field (validize_mem (op0), bitsize, bitpos,
+*************** expand_expr (exp, target, tmode, modifie
+*** 4863,4866 ****
+--- 4984,4999 ----
+ alignment,
+ int_size_in_bytes (TREE_TYPE (tem)));
++
++ /* If the result is a record type and BITSIZE is narrower than
++ the mode of OP0, an integral mode, and this is a big endian
++ machine, we must put the field into the high-order bits. */
++ if (TREE_CODE (type) == RECORD_TYPE && BYTES_BIG_ENDIAN
++ && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
++ && bitsize < GET_MODE_BITSIZE (GET_MODE (op0)))
++ op0 = expand_shift (LSHIFT_EXPR, GET_MODE (op0), op0,
++ size_int (GET_MODE_BITSIZE (GET_MODE (op0))
++ - bitsize),
++ op0, 1);
++
+ if (mode == BLKmode)
+ {
+*************** expand_expr (exp, target, tmode, modifie
+*** 4877,4880 ****
+--- 5010,5018 ----
+ }
+
++ /* If the result is BLKmode, use that to access the object
++ now as well. */
++ if (mode == BLKmode)
++ mode1 = BLKmode;
++
+ /* Get a reference to just this component. */
+ if (modifier == EXPAND_CONST_ADDRESS
+*************** expand_expr (exp, target, tmode, modifie
+*** 4888,4895 ****
+ MEM_IN_STRUCT_P (op0) = 1;
+ MEM_VOLATILE_P (op0) |= volatilep;
+! if (mode == mode1 || mode1 == BLKmode || mode1 == tmode)
+ return op0;
+! if (target == 0)
+ target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
+ convert_move (target, op0, unsignedp);
+ return target;
+--- 5026,5036 ----
+ MEM_IN_STRUCT_P (op0) = 1;
+ MEM_VOLATILE_P (op0) |= volatilep;
+! if (mode == mode1 || mode1 == BLKmode || mode1 == tmode
+! || modifier == EXPAND_CONST_ADDRESS
+! || modifier == EXPAND_INITIALIZER)
+ return op0;
+! else if (target == 0)
+ target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
++
+ convert_move (target, op0, unsignedp);
+ return target;
+*************** expand_builtin (exp, target, subtarget,
+*** 7986,7989 ****
+--- 8127,8365 ----
+ #endif
+
++ /* __builtin_setjmp is passed a pointer to an array of five words
++ (not all will be used on all machines). It operates similarly to
++ the C library function of the same name, but is more efficient.
++ Much of the code below (and for longjmp) is copied from the handling
++ of non-local gotos.
++
++ NOTE: This is intended for use by GNAT and will only work in
++ the method used by it. This code will likely NOT survive to
++ the GCC 2.8.0 release. */
++ case BUILT_IN_SETJMP:
++ if (arglist == 0
++ || TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) != POINTER_TYPE)
++ break;
++
++ {
++ rtx buf_addr = expand_expr (TREE_VALUE (arglist), subtarget,
++ VOIDmode, 0);
++ rtx lab1 = gen_label_rtx (), lab2 = gen_label_rtx ();
++ enum machine_mode sa_mode = Pmode;
++ rtx stack_save;
++ int old_inhibit_defer_pop = inhibit_defer_pop;
++ int return_pops = RETURN_POPS_ARGS (get_identifier ("__dummy"),
++ get_identifier ("__dummy"), 0);
++ rtx next_arg_reg;
++ CUMULATIVE_ARGS args_so_far;
++ int current_call_is_indirect = 1;
++ int i;
++
++ #ifdef POINTERS_EXTEND_UNSIGNED
++ buf_addr = convert_memory_address (Pmode, buf_addr);
++ #endif
++
++ buf_addr = force_reg (Pmode, buf_addr);
++
++ if (target == 0 || GET_CODE (target) != REG
++ || REGNO (target) < FIRST_PSEUDO_REGISTER)
++ target = gen_reg_rtx (value_mode);
++
++ emit_queue ();
++
++ CONST_CALL_P (emit_note (NULL_PTR, NOTE_INSN_SETJMP)) = 1;
++ current_function_calls_setjmp = 1;
++
++ /* We store the frame pointer and the address of lab1 in the buffer
++ and use the rest of it for the stack save area, which is
++ machine-dependent. */
++ emit_move_insn (gen_rtx (MEM, Pmode, buf_addr),
++ virtual_stack_vars_rtx);
++ emit_move_insn
++ (validize_mem (gen_rtx (MEM, Pmode,
++ plus_constant (buf_addr,
++ GET_MODE_SIZE (Pmode)))),
++ gen_rtx (LABEL_REF, Pmode, lab1));
++
++ #ifdef HAVE_save_stack_nonlocal
++ if (HAVE_save_stack_nonlocal)
++ sa_mode = insn_operand_mode[(int) CODE_FOR_save_stack_nonlocal][0];
++ #endif
++
++ current_function_has_nonlocal_goto = 1;
++
++ stack_save = gen_rtx (MEM, sa_mode,
++ plus_constant (buf_addr,
++ 2 * GET_MODE_SIZE (Pmode)));
++ emit_stack_save (SAVE_NONLOCAL, &stack_save, NULL_RTX);
++
++ #ifdef HAVE_setjmp
++ if (HAVE_setjmp)
++ emit_insn (gen_setjmp ());
++ #endif
++
++ /* Set TARGET to zero and branch around the other case. */
++ emit_move_insn (target, const0_rtx);
++ emit_jump_insn (gen_jump (lab2));
++ emit_barrier ();
++ emit_label (lab1);
++
++ /* Note that setjmp clobbers FP when we get here, so we have to
++ make sure it's marked as used by this function. */
++ emit_insn (gen_rtx (USE, VOIDmode, hard_frame_pointer_rtx));
++
++ /* Mark the static chain as clobbered here so life information
++ doesn't get messed up for it. */
++ emit_insn (gen_rtx (CLOBBER, VOIDmode, static_chain_rtx));
++
++ /* Now put in the code to restore the frame pointer, and argument
++ pointer, if needed. The code below is from expand_end_bindings
++ in stmt.c; see detailed documentation there. */
++ #ifdef HAVE_nonlocal_goto
++ if (! HAVE_nonlocal_goto)
++ #endif
++ emit_move_insn (virtual_stack_vars_rtx, hard_frame_pointer_rtx);
++
++ #if ARG_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
++ if (fixed_regs[ARG_POINTER_REGNUM])
++ {
++ #ifdef ELIMINABLE_REGS
++ static struct elims {int from, to;} elim_regs[] = ELIMINABLE_REGS;
++
++ for (i = 0; i < sizeof elim_regs / sizeof elim_regs[0]; i++)
++ if (elim_regs[i].from == ARG_POINTER_REGNUM
++ && elim_regs[i].to == HARD_FRAME_POINTER_REGNUM)
++ break;
++
++ if (i == sizeof elim_regs / sizeof elim_regs [0])
++ #endif
++ {
++ /* Now restore our arg pointer from the address at which it
++ was saved in our stack frame.
++ If there hasn't be space allocated for it yet, make
++ some now. */
++ if (arg_pointer_save_area == 0)
++ arg_pointer_save_area
++ = assign_stack_local (Pmode, GET_MODE_SIZE (Pmode), 0);
++ emit_move_insn (virtual_incoming_args_rtx,
++ copy_to_reg (arg_pointer_save_area));
++ }
++ }
++ #endif
++
++ #ifdef HAVE_nonlocal_goto_receiver
++ if (HAVE_nonlocal_goto_receiver)
++ emit_insn (gen_nonlocal_goto_receiver ());
++ #endif
++ /* The static chain pointer contains the address of dummy function.
++ We need to call it here to handle some PIC cases of restoring
++ a global pointer. Then return 1. */
++ op0 = copy_to_mode_reg (Pmode, static_chain_rtx);
++
++ /* We can't actually call emit_library_call here, so do everything
++ it does, which isn't much for a libfunc with no args. */
++ op0 = memory_address (FUNCTION_MODE, op0);
++
++ INIT_CUMULATIVE_ARGS (args_so_far, NULL_TREE,
++ gen_rtx (SYMBOL_REF, Pmode, "__dummy"));
++ next_arg_reg = FUNCTION_ARG (args_so_far, VOIDmode, void_type_node, 1);
++
++ #ifndef ACCUMULATE_OUTGOING_ARGS
++ #ifdef HAVE_call_pop
++ if (HAVE_call_pop)
++ emit_call_insn (gen_call_pop (gen_rtx (MEM, FUNCTION_MODE, op0),
++ const0_rtx, next_arg_reg,
++ GEN_INT (return_pops)));
++ else
++ #endif
++ #endif
++
++ #ifdef HAVE_call
++ if (HAVE_call)
++ emit_call_insn (gen_call (gen_rtx (MEM, FUNCTION_MODE, op0),
++ const0_rtx, next_arg_reg, const0_rtx));
++ else
++ #endif
++ abort ();
++
++ emit_move_insn (target, const1_rtx);
++ emit_label (lab2);
++ return target;
++ }
++
++ /* __builtin_longjmp is passed a pointer to an array of five words
++ and a value, which is a dummy. It's similar to the C library longjmp
++ function but works with __builtin_setjmp above. */
++ case BUILT_IN_LONGJMP:
++ if (arglist == 0 || TREE_CHAIN (arglist) == 0
++ || TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) != POINTER_TYPE)
++ break;
++
++ {
++ tree dummy_id = get_identifier ("__dummy");
++ tree dummy_type = build_function_type (void_type_node, NULL_TREE);
++ tree dummy_decl = build_decl (FUNCTION_DECL, dummy_id, dummy_type);
++ #ifdef POINTERS_EXTEND_UNSIGNED
++ rtx buf_addr
++ = force_reg (Pmode,
++ convert_memory_address
++ (Pmode,
++ expand_expr (TREE_VALUE (arglist),
++ NULL_RTX, VOIDmode, 0)));
++ #else
++ rtx buf_addr
++ = force_reg (Pmode, expand_expr (TREE_VALUE (arglist),
++ NULL_RTX,
++ VOIDmode, 0));
++ #endif
++ rtx fp = gen_rtx (MEM, Pmode, buf_addr);
++ rtx lab = gen_rtx (MEM, Pmode,
++ plus_constant (buf_addr, GET_MODE_SIZE (Pmode)));
++ enum machine_mode sa_mode
++ #ifdef HAVE_save_stack_nonlocal
++ = (HAVE_save_stack_nonlocal
++ ? insn_operand_mode[(int) CODE_FOR_save_stack_nonlocal][0]
++ : Pmode);
++ #else
++ = Pmode;
++ #endif
++ rtx stack = gen_rtx (MEM, sa_mode,
++ plus_constant (buf_addr,
++ 2 * GET_MODE_SIZE (Pmode)));
++
++ DECL_EXTERNAL (dummy_decl) = 1;
++ TREE_PUBLIC (dummy_decl) = 1;
++ make_decl_rtl (dummy_decl, NULL_PTR, 1);
++
++ /* Expand the second expression just for side-effects. */
++ expand_expr (TREE_VALUE (TREE_CHAIN (arglist)),
++ const0_rtx, VOIDmode, 0);
++
++ assemble_external (dummy_decl);
++
++ /* Pick up FP, label, and SP from the block and jump. This code is
++ from expand_goto in stmt.c; see there for detailed comments. */
++ #if HAVE_nonlocal_goto
++ if (HAVE_nonlocal_goto)
++ emit_insn (gen_nonlocal_goto (fp, lab, stack,
++ XEXP (DECL_RTL (dummy_decl), 0)));
++ else
++ #endif
++ {
++ lab = copy_to_reg (lab);
++ emit_move_insn (hard_frame_pointer_rtx, fp);
++ emit_stack_restore (SAVE_NONLOCAL, stack, NULL_RTX);
++
++ /* Put in the static chain register the address of the dummy
++ function. */
++ emit_move_insn (static_chain_rtx, XEXP (DECL_RTL (dummy_decl), 0));
++ emit_insn (gen_rtx (USE, VOIDmode, hard_frame_pointer_rtx));
++ emit_insn (gen_rtx (USE, VOIDmode, stack_pointer_rtx));
++ emit_insn (gen_rtx (USE, VOIDmode, static_chain_rtx));
++ emit_indirect_jump (lab);
++ }
++
++ return const0_rtx;
++ }
++
+ default: /* just do library call, if unknown builtin */
+ error ("built-in function `%s' not currently supported",
+*************** preexpand_calls (exp)
+*** 8688,8701 ****
+ case CALL_EXPR:
+ /* Do nothing if already expanded. */
+! if (CALL_EXPR_RTL (exp) != 0)
+ return;
+
+! /* Do nothing to built-in functions. */
+! if (TREE_CODE (TREE_OPERAND (exp, 0)) != ADDR_EXPR
+! || TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)) != FUNCTION_DECL
+! || ! DECL_BUILT_IN (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
+! /* Do nothing if the call returns a variable-sized object. */
+! || TREE_CODE (TYPE_SIZE (TREE_TYPE(exp))) != INTEGER_CST)
+! CALL_EXPR_RTL (exp) = expand_call (exp, NULL_RTX, 0);
+ return;
+
+--- 9064,9078 ----
+ case CALL_EXPR:
+ /* Do nothing if already expanded. */
+! if (CALL_EXPR_RTL (exp) != 0
+! /* Do nothing if the call returns a variable-sized object. */
+! || TREE_CODE (TYPE_SIZE (TREE_TYPE(exp))) != INTEGER_CST
+! /* Do nothing to built-in functions. */
+! || (TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
+! && (TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
+! == FUNCTION_DECL)
+! && DECL_BUILT_IN (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))))
+ return;
+
+! CALL_EXPR_RTL (exp) = expand_call (exp, NULL_RTX, 0);
+ return;
+
+*************** do_jump (exp, if_false_label, if_true_la
+*** 9087,9090 ****
+--- 9464,9468 ----
+ push_temp_slots ();
+ expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, 0);
++ preserve_temp_slots (NULL_RTX);
+ free_temp_slots ();
+ pop_temp_slots ();
+*************** do_jump (exp, if_false_label, if_true_la
+*** 9103,9111 ****
+ tree offset;
+ int volatilep = 0;
+
+ /* Get description of this reference. We don't actually care
+ about the underlying object here. */
+ get_inner_reference (exp, &bitsize, &bitpos, &offset,
+! &mode, &unsignedp, &volatilep);
+
+ type = type_for_size (bitsize, unsignedp);
+--- 9481,9491 ----
+ tree offset;
+ int volatilep = 0;
++ int alignment;
+
+ /* Get description of this reference. We don't actually care
+ about the underlying object here. */
+ get_inner_reference (exp, &bitsize, &bitpos, &offset,
+! &mode, &unsignedp, &volatilep,
+! &alignment);
+
+ type = type_for_size (bitsize, unsignedp);
+diff -rcp2N gcc-2.7.2.2/final.c g77-new/final.c
+*** gcc-2.7.2.2/final.c Sun Nov 26 13:50:00 1995
+--- g77-new/final.c Thu Jul 10 20:11:16 1997
+*************** profile_function (file)
+*** 983,991 ****
+ text_section ();
+
+! #ifdef STRUCT_VALUE_INCOMING_REGNUM
+ if (sval)
+ ASM_OUTPUT_REG_PUSH (file, STRUCT_VALUE_INCOMING_REGNUM);
+ #else
+! #ifdef STRUCT_VALUE_REGNUM
+ if (sval)
+ ASM_OUTPUT_REG_PUSH (file, STRUCT_VALUE_REGNUM);
+--- 983,991 ----
+ text_section ();
+
+! #if defined(STRUCT_VALUE_INCOMING_REGNUM) && defined(ASM_OUTPUT_REG_PUSH)
+ if (sval)
+ ASM_OUTPUT_REG_PUSH (file, STRUCT_VALUE_INCOMING_REGNUM);
+ #else
+! #if defined(STRUCT_VALUE_REGNUM) && defined(ASM_OUTPUT_REG_PUSH)
+ if (sval)
+ ASM_OUTPUT_REG_PUSH (file, STRUCT_VALUE_REGNUM);
+*************** profile_function (file)
+*** 993,1027 ****
+ #endif
+
+! #if 0
+! #ifdef STATIC_CHAIN_INCOMING_REGNUM
+ if (cxt)
+ ASM_OUTPUT_REG_PUSH (file, STATIC_CHAIN_INCOMING_REGNUM);
+ #else
+! #ifdef STATIC_CHAIN_REGNUM
+ if (cxt)
+ ASM_OUTPUT_REG_PUSH (file, STATIC_CHAIN_REGNUM);
+ #endif
+ #endif
+- #endif /* 0 */
+
+ FUNCTION_PROFILER (file, profile_label_no);
+
+! #if 0
+! #ifdef STATIC_CHAIN_INCOMING_REGNUM
+ if (cxt)
+ ASM_OUTPUT_REG_POP (file, STATIC_CHAIN_INCOMING_REGNUM);
+ #else
+! #ifdef STATIC_CHAIN_REGNUM
+ if (cxt)
+ ASM_OUTPUT_REG_POP (file, STATIC_CHAIN_REGNUM);
+ #endif
+ #endif
+- #endif /* 0 */
+
+! #ifdef STRUCT_VALUE_INCOMING_REGNUM
+ if (sval)
+ ASM_OUTPUT_REG_POP (file, STRUCT_VALUE_INCOMING_REGNUM);
+ #else
+! #ifdef STRUCT_VALUE_REGNUM
+ if (sval)
+ ASM_OUTPUT_REG_POP (file, STRUCT_VALUE_REGNUM);
+--- 993,1023 ----
+ #endif
+
+! #if defined(STATIC_CHAIN_INCOMING_REGNUM) && defined(ASM_OUTPUT_REG_PUSH)
+ if (cxt)
+ ASM_OUTPUT_REG_PUSH (file, STATIC_CHAIN_INCOMING_REGNUM);
+ #else
+! #if defined(STATIC_CHAIN_REGNUM) && defined(ASM_OUTPUT_REG_PUSH)
+ if (cxt)
+ ASM_OUTPUT_REG_PUSH (file, STATIC_CHAIN_REGNUM);
+ #endif
+ #endif
+
+ FUNCTION_PROFILER (file, profile_label_no);
+
+! #if defined(STATIC_CHAIN_INCOMING_REGNUM) && defined(ASM_OUTPUT_REG_PUSH)
+ if (cxt)
+ ASM_OUTPUT_REG_POP (file, STATIC_CHAIN_INCOMING_REGNUM);
+ #else
+! #if defined(STATIC_CHAIN_REGNUM) && defined(ASM_OUTPUT_REG_PUSH)
+ if (cxt)
+ ASM_OUTPUT_REG_POP (file, STATIC_CHAIN_REGNUM);
+ #endif
+ #endif
+
+! #if defined(STRUCT_VALUE_INCOMING_REGNUM) && defined(ASM_OUTPUT_REG_PUSH)
+ if (sval)
+ ASM_OUTPUT_REG_POP (file, STRUCT_VALUE_INCOMING_REGNUM);
+ #else
+! #if defined(STRUCT_VALUE_REGNUM) && defined(ASM_OUTPUT_REG_PUSH)
+ if (sval)
+ ASM_OUTPUT_REG_POP (file, STRUCT_VALUE_REGNUM);
+diff -rcp2N gcc-2.7.2.2/flags.h g77-new/flags.h
+*** gcc-2.7.2.2/flags.h Thu Jun 15 07:34:11 1995
+--- g77-new/flags.h Thu Jul 10 20:08:56 1997
+*************** extern int flag_unroll_loops;
+*** 204,207 ****
+--- 204,221 ----
+ extern int flag_unroll_all_loops;
+
++ /* Nonzero forces all invariant computations in loops to be moved
++ outside the loop. */
++
++ extern int flag_move_all_movables;
++
++ /* Nonzero forces all general induction variables in loops to be
++ strength reduced. */
++
++ extern int flag_reduce_all_givs;
++
++ /* Nonzero gets another run of loop_optimize performed. */
++
++ extern int flag_rerun_loop_opt;
++
+ /* Nonzero for -fcse-follow-jumps:
+ have cse follow jumps to do a more extensive job. */
+*************** extern int flag_gnu_linker;
+*** 339,342 ****
+--- 353,369 ----
+ /* Tag all structures with __attribute__(packed) */
+ extern int flag_pack_struct;
++
++ /* 1 if alias checking is enabled: symbols do not alias each other
++ and parameters do not alias the current stack frame. */
++ extern int flag_alias_check;
++
++ /* This flag is only tested if alias checking is enabled.
++ 0 if pointer arguments may alias each other. True in C.
++ 1 if pointer arguments may not alias each other but may alias
++ global variables.
++ 2 if pointer arguments may not alias each other and may not
++ alias global variables. True in Fortran.
++ The value is ignored if flag_alias_check is 0. */
++ extern int flag_argument_noalias;
+
+ /* Other basic status info about current function. */
+diff -rcp2N gcc-2.7.2.2/flow.c g77-new/flow.c
+*** gcc-2.7.2.2/flow.c Mon Aug 28 06:23:34 1995
+--- g77-new/flow.c Sun Aug 10 18:46:11 1997
+*************** static HARD_REG_SET elim_reg_set;
+*** 288,292 ****
+ /* Forward declarations */
+ static void find_basic_blocks PROTO((rtx, rtx));
+! static int uses_reg_or_mem PROTO((rtx));
+ static void mark_label_ref PROTO((rtx, rtx, int));
+ static void life_analysis PROTO((rtx, int));
+--- 288,292 ----
+ /* Forward declarations */
+ static void find_basic_blocks PROTO((rtx, rtx));
+! static int jmp_uses_reg_or_mem PROTO((rtx));
+ static void mark_label_ref PROTO((rtx, rtx, int));
+ static void life_analysis PROTO((rtx, int));
+*************** find_basic_blocks (f, nonlocal_label_lis
+*** 554,563 ****
+ if (GET_CODE (XVECEXP (pat, 0, i)) == SET
+ && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
+! && uses_reg_or_mem (SET_SRC (XVECEXP (pat, 0, i))))
+ computed_jump = 1;
+ }
+ else if (GET_CODE (pat) == SET
+ && SET_DEST (pat) == pc_rtx
+! && uses_reg_or_mem (SET_SRC (pat)))
+ computed_jump = 1;
+
+--- 554,563 ----
+ if (GET_CODE (XVECEXP (pat, 0, i)) == SET
+ && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
+! && jmp_uses_reg_or_mem (SET_SRC (XVECEXP (pat, 0, i))))
+ computed_jump = 1;
+ }
+ else if (GET_CODE (pat) == SET
+ && SET_DEST (pat) == pc_rtx
+! && jmp_uses_reg_or_mem (SET_SRC (pat)))
+ computed_jump = 1;
+
+*************** find_basic_blocks (f, nonlocal_label_lis
+*** 760,767 ****
+ /* Subroutines of find_basic_blocks. */
+
+! /* Return 1 if X contain a REG or MEM that is not in the constant pool. */
+
+ static int
+! uses_reg_or_mem (x)
+ rtx x;
+ {
+--- 760,768 ----
+ /* Subroutines of find_basic_blocks. */
+
+! /* Return 1 if X, the SRC_SRC of SET of (pc) contain a REG or MEM that is
+! not in the constant pool and not in the condition of an IF_THEN_ELSE. */
+
+ static int
+! jmp_uses_reg_or_mem (x)
+ rtx x;
+ {
+*************** uses_reg_or_mem (x)
+*** 770,778 ****
+ char *fmt;
+
+! if (code == REG
+! || (code == MEM
+! && ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
+! && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))))
+! return 1;
+
+ fmt = GET_RTX_FORMAT (code);
+--- 771,796 ----
+ char *fmt;
+
+! switch (code)
+! {
+! case CONST:
+! case LABEL_REF:
+! case PC:
+! return 0;
+!
+! case REG:
+! return 1;
+!
+! case MEM:
+! return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
+! && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
+!
+! case IF_THEN_ELSE:
+! return (jmp_uses_reg_or_mem (XEXP (x, 1))
+! || jmp_uses_reg_or_mem (XEXP (x, 2)));
+!
+! case PLUS: case MINUS: case MULT:
+! return (jmp_uses_reg_or_mem (XEXP (x, 0))
+! || jmp_uses_reg_or_mem (XEXP (x, 1)));
+! }
+
+ fmt = GET_RTX_FORMAT (code);
+*************** uses_reg_or_mem (x)
+*** 780,789 ****
+ {
+ if (fmt[i] == 'e'
+! && uses_reg_or_mem (XEXP (x, i)))
+ return 1;
+
+ if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+! if (uses_reg_or_mem (XVECEXP (x, i, j)))
+ return 1;
+ }
+--- 798,807 ----
+ {
+ if (fmt[i] == 'e'
+! && jmp_uses_reg_or_mem (XEXP (x, i)))
+ return 1;
+
+ if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+! if (jmp_uses_reg_or_mem (XVECEXP (x, i, j)))
+ return 1;
+ }
+*************** propagate_block (old, first, last, final
+*** 1605,1614 ****
+
+ /* Each call clobbers all call-clobbered regs that are not
+! global. Note that the function-value reg is a
+ call-clobbered reg, and mark_set_regs has already had
+ a chance to handle it. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+! if (call_used_regs[i] && ! global_regs[i])
+ dead[i / REGSET_ELT_BITS]
+ |= ((REGSET_ELT_TYPE) 1 << (i % REGSET_ELT_BITS));
+--- 1623,1633 ----
+
+ /* Each call clobbers all call-clobbered regs that are not
+! global or fixed. Note that the function-value reg is a
+ call-clobbered reg, and mark_set_regs has already had
+ a chance to handle it. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+! if (call_used_regs[i] && ! global_regs[i]
+! && ! fixed_regs[i])
+ dead[i / REGSET_ELT_BITS]
+ |= ((REGSET_ELT_TYPE) 1 << (i % REGSET_ELT_BITS));
+diff -rcp2N gcc-2.7.2.2/fold-const.c g77-new/fold-const.c
+*** gcc-2.7.2.2/fold-const.c Fri Sep 15 18:26:12 1995
+--- g77-new/fold-const.c Sun Aug 10 18:47:18 1997
+*************** static tree unextend PROTO((tree, int, i
+*** 80,83 ****
+--- 80,84 ----
+ static tree fold_truthop PROTO((enum tree_code, tree, tree, tree));
+ static tree strip_compound_expr PROTO((tree, tree));
++ static int multiple_of_p PROTO((tree, tree, tree));
+
+ #ifndef BRANCH_COST
+*************** const_binop (code, arg1, arg2, notrunc)
+*** 1077,1080 ****
+--- 1078,1083 ----
+ if (int2h == 0 && int2l > 0
+ && TREE_TYPE (arg1) == sizetype
++ && ! TREE_CONSTANT_OVERFLOW (arg1)
++ && ! TREE_CONSTANT_OVERFLOW (arg2)
+ && int1h == 0 && int1l >= 0)
+ {
+*************** const_binop (code, arg1, arg2, notrunc)
+*** 1230,1233 ****
+--- 1233,1237 ----
+ if (TREE_CODE (arg1) == COMPLEX_CST)
+ {
++ register tree type = TREE_TYPE (arg1);
+ register tree r1 = TREE_REALPART (arg1);
+ register tree i1 = TREE_IMAGPART (arg1);
+*************** const_binop (code, arg1, arg2, notrunc)
+*** 1239,1253 ****
+ {
+ case PLUS_EXPR:
+! t = build_complex (const_binop (PLUS_EXPR, r1, r2, notrunc),
+ const_binop (PLUS_EXPR, i1, i2, notrunc));
+ break;
+
+ case MINUS_EXPR:
+! t = build_complex (const_binop (MINUS_EXPR, r1, r2, notrunc),
+ const_binop (MINUS_EXPR, i1, i2, notrunc));
+ break;
+
+ case MULT_EXPR:
+! t = build_complex (const_binop (MINUS_EXPR,
+ const_binop (MULT_EXPR,
+ r1, r2, notrunc),
+--- 1243,1260 ----
+ {
+ case PLUS_EXPR:
+! t = build_complex (type,
+! const_binop (PLUS_EXPR, r1, r2, notrunc),
+ const_binop (PLUS_EXPR, i1, i2, notrunc));
+ break;
+
+ case MINUS_EXPR:
+! t = build_complex (type,
+! const_binop (MINUS_EXPR, r1, r2, notrunc),
+ const_binop (MINUS_EXPR, i1, i2, notrunc));
+ break;
+
+ case MULT_EXPR:
+! t = build_complex (type,
+! const_binop (MINUS_EXPR,
+ const_binop (MULT_EXPR,
+ r1, r2, notrunc),
+*************** const_binop (code, arg1, arg2, notrunc)
+*** 1271,1293 ****
+ notrunc);
+
+! t = build_complex
+! (const_binop (INTEGRAL_TYPE_P (TREE_TYPE (r1))
+! ? TRUNC_DIV_EXPR : RDIV_EXPR,
+! const_binop (PLUS_EXPR,
+! const_binop (MULT_EXPR, r1, r2,
+! notrunc),
+! const_binop (MULT_EXPR, i1, i2,
+! notrunc),
+! notrunc),
+! magsquared, notrunc),
+! const_binop (INTEGRAL_TYPE_P (TREE_TYPE (r1))
+! ? TRUNC_DIV_EXPR : RDIV_EXPR,
+! const_binop (MINUS_EXPR,
+! const_binop (MULT_EXPR, i1, r2,
+! notrunc),
+! const_binop (MULT_EXPR, r1, i2,
+! notrunc),
+! notrunc),
+! magsquared, notrunc));
+ }
+ break;
+--- 1278,1302 ----
+ notrunc);
+
+! t = build_complex (type,
+! const_binop
+! (INTEGRAL_TYPE_P (TREE_TYPE (r1))
+! ? TRUNC_DIV_EXPR : RDIV_EXPR,
+! const_binop (PLUS_EXPR,
+! const_binop (MULT_EXPR, r1, r2,
+! notrunc),
+! const_binop (MULT_EXPR, i1, i2,
+! notrunc),
+! notrunc),
+! magsquared, notrunc),
+! const_binop
+! (INTEGRAL_TYPE_P (TREE_TYPE (r1))
+! ? TRUNC_DIV_EXPR : RDIV_EXPR,
+! const_binop (MINUS_EXPR,
+! const_binop (MULT_EXPR, i1, r2,
+! notrunc),
+! const_binop (MULT_EXPR, r1, i2,
+! notrunc),
+! notrunc),
+! magsquared, notrunc));
+ }
+ break;
+*************** const_binop (code, arg1, arg2, notrunc)
+*** 1296,1300 ****
+ abort ();
+ }
+- TREE_TYPE (t) = TREE_TYPE (arg1);
+ return t;
+ }
+--- 1305,1308 ----
+*************** size_binop (code, arg0, arg1)
+*** 1346,1363 ****
+ {
+ /* And some specific cases even faster than that. */
+! if (code == PLUS_EXPR
+! && TREE_INT_CST_LOW (arg0) == 0
+! && TREE_INT_CST_HIGH (arg0) == 0)
+ return arg1;
+! if (code == MINUS_EXPR
+! && TREE_INT_CST_LOW (arg1) == 0
+! && TREE_INT_CST_HIGH (arg1) == 0)
+ return arg0;
+! if (code == MULT_EXPR
+! && TREE_INT_CST_LOW (arg0) == 1
+! && TREE_INT_CST_HIGH (arg0) == 0)
+ return arg1;
+ /* Handle general case of two integer constants. */
+! return const_binop (code, arg0, arg1, 0);
+ }
+
+--- 1354,1367 ----
+ {
+ /* And some specific cases even faster than that. */
+! if (code == PLUS_EXPR && integer_zerop (arg0))
+ return arg1;
+! else if ((code == MINUS_EXPR || code == PLUS_EXPR)
+! && integer_zerop (arg1))
+ return arg0;
+! else if (code == MULT_EXPR && integer_onep (arg0))
+ return arg1;
++
+ /* Handle general case of two integer constants. */
+! return const_binop (code, arg0, arg1, 1);
+ }
+
+*************** fold_convert (t, arg1)
+*** 1482,1486 ****
+ {
+ if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
+! return arg1;
+ else if (setjmp (float_error))
+ {
+--- 1486,1494 ----
+ {
+ if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
+! {
+! t = arg1;
+! TREE_TYPE (arg1) = type;
+! return t;
+! }
+ else if (setjmp (float_error))
+ {
+*************** operand_equal_p (arg0, arg1, only_const)
+*** 1644,1687 ****
+ STRIP_NOPS (arg1);
+
+! /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
+! We don't care about side effects in that case because the SAVE_EXPR
+! takes care of that for us. */
+! if (TREE_CODE (arg0) == SAVE_EXPR && arg0 == arg1)
+! return ! only_const;
+!
+! if (TREE_SIDE_EFFECTS (arg0) || TREE_SIDE_EFFECTS (arg1))
+ return 0;
+
+! if (TREE_CODE (arg0) == TREE_CODE (arg1)
+! && TREE_CODE (arg0) == ADDR_EXPR
+! && TREE_OPERAND (arg0, 0) == TREE_OPERAND (arg1, 0))
+! return 1;
+!
+! if (TREE_CODE (arg0) == TREE_CODE (arg1)
+! && TREE_CODE (arg0) == INTEGER_CST
+! && TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
+! && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1))
+ return 1;
+
+! /* Detect when real constants are equal. */
+! if (TREE_CODE (arg0) == TREE_CODE (arg1)
+! && TREE_CODE (arg0) == REAL_CST)
+! return !bcmp ((char *) &TREE_REAL_CST (arg0),
+! (char *) &TREE_REAL_CST (arg1),
+! sizeof (REAL_VALUE_TYPE));
+
+ if (only_const)
+ return 0;
+
+- if (arg0 == arg1)
+- return 1;
+-
+- if (TREE_CODE (arg0) != TREE_CODE (arg1))
+- return 0;
+- /* This is needed for conversions and for COMPONENT_REF.
+- Might as well play it safe and always test this. */
+- if (TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
+- return 0;
+-
+ switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
+ {
+--- 1652,1705 ----
+ STRIP_NOPS (arg1);
+
+! if (TREE_CODE (arg0) != TREE_CODE (arg1)
+! /* This is needed for conversions and for COMPONENT_REF.
+! Might as well play it safe and always test this. */
+! || TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
+ return 0;
+
+! /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
+! We don't care about side effects in that case because the SAVE_EXPR
+! takes care of that for us. In all other cases, two expressions are
+! equal if they have no side effects. If we have two identical
+! expressions with side effects that should be treated the same due
+! to the only side effects being identical SAVE_EXPR's, that will
+! be detected in the recursive calls below. */
+! if (arg0 == arg1 && ! only_const
+! && (TREE_CODE (arg0) == SAVE_EXPR
+! || (! TREE_SIDE_EFFECTS (arg0) && ! TREE_SIDE_EFFECTS (arg1))))
+ return 1;
+
+! /* Next handle constant cases, those for which we can return 1 even
+! if ONLY_CONST is set. */
+! if (TREE_CONSTANT (arg0) && TREE_CONSTANT (arg1))
+! switch (TREE_CODE (arg0))
+! {
+! case INTEGER_CST:
+! return (TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
+! && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1));
+!
+! case REAL_CST:
+! return REAL_VALUES_EQUAL (TREE_REAL_CST (arg0), TREE_REAL_CST (arg1));
+!
+! case COMPLEX_CST:
+! return (operand_equal_p (TREE_REALPART (arg0), TREE_REALPART (arg1),
+! only_const)
+! && operand_equal_p (TREE_IMAGPART (arg0), TREE_IMAGPART (arg1),
+! only_const));
+!
+! case STRING_CST:
+! return (TREE_STRING_LENGTH (arg0) == TREE_STRING_LENGTH (arg1)
+! && ! strncmp (TREE_STRING_POINTER (arg0),
+! TREE_STRING_POINTER (arg1),
+! TREE_STRING_LENGTH (arg0)));
+!
+! case ADDR_EXPR:
+! return operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0),
+! 0);
+! }
+
+ if (only_const)
+ return 0;
+
+ switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
+ {
+*************** operand_equal_p (arg0, arg1, only_const)
+*** 1698,1705 ****
+ case '<':
+ case '2':
+! return (operand_equal_p (TREE_OPERAND (arg0, 0),
+! TREE_OPERAND (arg1, 0), 0)
+ && operand_equal_p (TREE_OPERAND (arg0, 1),
+! TREE_OPERAND (arg1, 1), 0));
+
+ case 'r':
+--- 1716,1735 ----
+ case '<':
+ case '2':
+! if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0)
+! && operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1),
+! 0))
+! return 1;
+!
+! /* For commutative ops, allow the other order. */
+! return ((TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MULT_EXPR
+! || TREE_CODE (arg0) == MIN_EXPR || TREE_CODE (arg0) == MAX_EXPR
+! || TREE_CODE (arg0) == BIT_IOR_EXPR
+! || TREE_CODE (arg0) == BIT_XOR_EXPR
+! || TREE_CODE (arg0) == BIT_AND_EXPR
+! || TREE_CODE (arg0) == NE_EXPR || TREE_CODE (arg0) == EQ_EXPR)
+! && operand_equal_p (TREE_OPERAND (arg0, 0),
+! TREE_OPERAND (arg1, 1), 0)
+ && operand_equal_p (TREE_OPERAND (arg0, 1),
+! TREE_OPERAND (arg1, 0), 0));
+
+ case 'r':
+*************** optimize_bit_field_compare (code, compar
+*** 2212,2215 ****
+--- 2242,2246 ----
+ int lunsignedp, runsignedp;
+ int lvolatilep = 0, rvolatilep = 0;
++ int alignment;
+ tree linner, rinner;
+ tree mask;
+*************** optimize_bit_field_compare (code, compar
+*** 2220,2224 ****
+ extraction at all and so can do nothing. */
+ linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
+! &lunsignedp, &lvolatilep);
+ if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
+ || offset != 0)
+--- 2251,2255 ----
+ extraction at all and so can do nothing. */
+ linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
+! &lunsignedp, &lvolatilep, &alignment);
+ if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
+ || offset != 0)
+*************** optimize_bit_field_compare (code, compar
+*** 2229,2234 ****
+ /* If this is not a constant, we can only do something if bit positions,
+ sizes, and signedness are the same. */
+! rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset,
+! &rmode, &runsignedp, &rvolatilep);
+
+ if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize
+--- 2260,2265 ----
+ /* If this is not a constant, we can only do something if bit positions,
+ sizes, and signedness are the same. */
+! rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset, &rmode,
+! &runsignedp, &rvolatilep, &alignment);
+
+ if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize
+*************** decode_field_reference (exp, pbitsize, p
+*** 2403,2406 ****
+--- 2434,2438 ----
+ tree unsigned_type;
+ int precision;
++ int alignment;
+
+ /* All the optimizations using this function assume integer fields.
+*************** decode_field_reference (exp, pbitsize, p
+*** 2423,2427 ****
+
+ inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
+! punsignedp, pvolatilep);
+ if ((inner == exp && and_mask == 0)
+ || *pbitsize < 0 || offset != 0)
+--- 2455,2459 ----
+
+ inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
+! punsignedp, pvolatilep, &alignment);
+ if ((inner == exp && and_mask == 0)
+ || *pbitsize < 0 || offset != 0)
+*************** strip_compound_expr (t, s)
+*** 3065,3068 ****
+--- 3097,3200 ----
+ }
+
++ /* Determine if first argument is a multiple of second argument.
++ Return 0 if it is not, or is not easily determined to so be.
++
++ An example of the sort of thing we care about (at this point --
++ this routine could surely be made more general, and expanded
++ to do what the *_DIV_EXPR's fold() cases do now) is discovering
++ that
++
++ SAVE_EXPR (I) * SAVE_EXPR (J * 8)
++
++ is a multiple of
++
++ SAVE_EXPR (J * 8)
++
++ when we know that the two `SAVE_EXPR (J * 8)' nodes are the
++ same node (which means they will have the same value at run
++ time, even though we don't know when they'll be assigned).
++
++ This code also handles discovering that
++
++ SAVE_EXPR (I) * SAVE_EXPR (J * 8)
++
++ is a multiple of
++
++ 8
++
++ (of course) so we don't have to worry about dealing with a
++ possible remainder.
++
++ Note that we _look_ inside a SAVE_EXPR only to determine
++ how it was calculated; it is not safe for fold() to do much
++ of anything else with the internals of a SAVE_EXPR, since
++ fold() cannot know when it will be evaluated at run time.
++ For example, the latter example above _cannot_ be implemented
++ as
++
++ SAVE_EXPR (I) * J
++
++ or any variant thereof, since the value of J at evaluation time
++ of the original SAVE_EXPR is not necessarily the same at the time
++ the new expression is evaluated. The only optimization of this
++ sort that would be valid is changing
++
++ SAVE_EXPR (I) * SAVE_EXPR (SAVE_EXPR (J) * 8)
++ divided by
++ 8
++
++ to
++
++ SAVE_EXPR (I) * SAVE_EXPR (J)
++
++ (where the same SAVE_EXPR (J) is used in the original and the
++ transformed version). */
++
++ static int
++ multiple_of_p (type, top, bottom)
++ tree type;
++ tree top;
++ tree bottom;
++ {
++ if (operand_equal_p (top, bottom, 0))
++ return 1;
++
++ if (TREE_CODE (type) != INTEGER_TYPE)
++ return 0;
++
++ switch (TREE_CODE (top))
++ {
++ case MULT_EXPR:
++ return (multiple_of_p (type, TREE_OPERAND (top, 0), bottom)
++ || multiple_of_p (type, TREE_OPERAND (top, 1), bottom));
++
++ case PLUS_EXPR:
++ case MINUS_EXPR:
++ return (multiple_of_p (type, TREE_OPERAND (top, 0), bottom)
++ && multiple_of_p (type, TREE_OPERAND (top, 1), bottom));
++
++ case NOP_EXPR:
++ /* Punt if conversion from non-integral or wider integral type. */
++ if ((TREE_CODE (TREE_TYPE (TREE_OPERAND (top, 0))) != INTEGER_TYPE)
++ || (TYPE_PRECISION (type)
++ < TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (top, 0)))))
++ return 0;
++ /* Fall through. */
++ case SAVE_EXPR:
++ return multiple_of_p (type, TREE_OPERAND (top, 0), bottom);
++
++ case INTEGER_CST:
++ if ((TREE_CODE (bottom) != INTEGER_CST)
++ || (tree_int_cst_sgn (top) < 0)
++ || (tree_int_cst_sgn (bottom) < 0))
++ return 0;
++ return integer_zerop (const_binop (TRUNC_MOD_EXPR,
++ top, bottom, 0));
++
++ default:
++ return 0;
++ }
++ }
++
+ /* Perform constant folding and related simplification of EXPR.
+ The related simplifications include x*1 => x, x*0 => 0, etc.,
+*************** fold (expr)
+*** 3611,3615 ****
+ TREE_OPERAND (arg0, 1))));
+ else if (TREE_CODE (arg0) == COMPLEX_CST)
+! return build_complex (TREE_OPERAND (arg0, 0),
+ fold (build1 (NEGATE_EXPR,
+ TREE_TYPE (TREE_TYPE (arg0)),
+--- 3743,3747 ----
+ TREE_OPERAND (arg0, 1))));
+ else if (TREE_CODE (arg0) == COMPLEX_CST)
+! return build_complex (type, TREE_OPERAND (arg0, 0),
+ fold (build1 (NEGATE_EXPR,
+ TREE_TYPE (TREE_TYPE (arg0)),
+*************** fold (expr)
+*** 4014,4018 ****
+ return non_lvalue (convert (type, arg0));
+ if (integer_zerop (arg1))
+! return t;
+
+ /* If we have ((a / C1) / C2) where both division are the same type, try
+--- 4146,4166 ----
+ return non_lvalue (convert (type, arg0));
+ if (integer_zerop (arg1))
+! {
+! if (extra_warnings)
+! warning ("integer division by zero");
+! return t;
+! }
+!
+! /* If arg0 is a multiple of arg1, then rewrite to the fastest div
+! operation, EXACT_DIV_EXPR. Otherwise, handle folding of
+! general divide. Note that only CEIL_DIV_EXPR is rewritten now,
+! only because the others seem to be faster in some cases, e.g. the
+! nonoptimized TRUNC_DIV_EXPR or FLOOR_DIV_EXPR on DEC Alpha. This
+! is probably just due to more work being done on it in expmed.c than
+! on EXACT_DIV_EXPR, and could presumably be fixed, since
+! EXACT_DIV_EXPR should _never_ be slower than *_DIV_EXPR. */
+! if ((code == CEIL_DIV_EXPR)
+! && multiple_of_p (type, arg0, arg1))
+! return fold (build (EXACT_DIV_EXPR, type, arg0, arg1));
+
+ /* If we have ((a / C1) / C2) where both division are the same type, try
+*************** fold (expr)
+*** 4049,4053 ****
+ tree xarg0 = arg0;
+
+! if (TREE_CODE (xarg0) == SAVE_EXPR)
+ have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
+
+--- 4197,4201 ----
+ tree xarg0 = arg0;
+
+! if (TREE_CODE (xarg0) == SAVE_EXPR && SAVE_EXPR_RTL (xarg0) == 0)
+ have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
+
+*************** fold (expr)
+*** 4067,4071 ****
+ }
+
+! if (TREE_CODE (xarg0) == SAVE_EXPR)
+ have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
+
+--- 4215,4219 ----
+ }
+
+! if (TREE_CODE (xarg0) == SAVE_EXPR && SAVE_EXPR_RTL (xarg0) == 0)
+ have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
+
+*************** fold (expr)
+*** 5050,5054 ****
+ case COMPLEX_EXPR:
+ if (wins)
+! return build_complex (arg0, arg1);
+ return t;
+
+--- 5198,5202 ----
+ case COMPLEX_EXPR:
+ if (wins)
+! return build_complex (type, arg0, arg1);
+ return t;
+
+diff -rcp2N gcc-2.7.2.2/function.c g77-new/function.c
+*** gcc-2.7.2.2/function.c Sun Nov 26 14:50:26 1995
+--- g77-new/function.c Sun Aug 10 18:47:24 1997
+*************** free_temps_for_rtl_expr (t)
+*** 1184,1187 ****
+--- 1184,1202 ----
+ }
+
++ /* Mark all temporaries ever allocated in this functon as not suitable
++ for reuse until the current level is exited. */
++
++ void
++ mark_all_temps_used ()
++ {
++ struct temp_slot *p;
++
++ for (p = temp_slots; p; p = p->next)
++ {
++ p->in_use = 1;
++ p->level = MIN (p->level, temp_slot_level);
++ }
++ }
++
+ /* Push deeper into the nesting level for stack temporaries. */
+
+*************** pop_temp_slots ()
+*** 1208,1211 ****
+--- 1223,1237 ----
+ temp_slot_level--;
+ }
++
++ /* Initialize temporary slots. */
++
++ void
++ init_temp_slots ()
++ {
++ /* We have not allocated any temporaries yet. */
++ temp_slots = 0;
++ temp_slot_level = 0;
++ target_temp_slot_level = 0;
++ }
+
+ /* Retroactively move an auto variable from a register to a stack slot.
+*************** instantiate_virtual_regs_1 (loc, object,
+*** 2838,2842 ****
+ case MEM:
+ /* Most cases of MEM that convert to valid addresses have already been
+! handled by our scan of regno_reg_rtx. The only special handling we
+ need here is to make a copy of the rtx to ensure it isn't being
+ shared if we have to change it to a pseudo.
+--- 2864,2868 ----
+ case MEM:
+ /* Most cases of MEM that convert to valid addresses have already been
+! handled by our scan of decls. The only special handling we
+ need here is to make a copy of the rtx to ensure it isn't being
+ shared if we have to change it to a pseudo.
+*************** instantiate_virtual_regs_1 (loc, object,
+*** 2896,2900 ****
+ has less restrictions on an address that some other insn.
+ In that case, we will modify the shared address. This case
+! doesn't seem very likely, though. */
+
+ if (instantiate_virtual_regs_1 (&XEXP (x, 0),
+--- 2922,2928 ----
+ has less restrictions on an address that some other insn.
+ In that case, we will modify the shared address. This case
+! doesn't seem very likely, though. One case where this could
+! happen is in the case of a USE or CLOBBER reference, but we
+! take care of that below. */
+
+ if (instantiate_virtual_regs_1 (&XEXP (x, 0),
+*************** instantiate_virtual_regs_1 (loc, object,
+*** 2909,2914 ****
+
+ /* Fall through to generic unary operation case. */
+- case USE:
+- case CLOBBER:
+ case SUBREG:
+ case STRICT_LOW_PART:
+--- 2937,2940 ----
+*************** instantiate_virtual_regs_1 (loc, object,
+*** 2927,2930 ****
+--- 2953,2973 ----
+ goto restart;
+
++ case USE:
++ case CLOBBER:
++ /* If the operand is a MEM, see if the change is a valid MEM. If not,
++ go ahead and make the invalid one, but do it to a copy. For a REG,
++ just make the recursive call, since there's no chance of a problem. */
++
++ if ((GET_CODE (XEXP (x, 0)) == MEM
++ && instantiate_virtual_regs_1 (&XEXP (XEXP (x, 0), 0), XEXP (x, 0),
++ 0))
++ || (GET_CODE (XEXP (x, 0)) == REG
++ && instantiate_virtual_regs_1 (&XEXP (x, 0), 0, 0)))
++ return 1;
++
++ XEXP (x, 0) = copy_rtx (XEXP (x, 0));
++ loc = &XEXP (x, 0);
++ goto restart;
++
+ case REG:
+ /* Try to replace with a PLUS. If that doesn't work, compute the sum
+*************** assign_parms (fndecl, second_time)
+*** 3404,3409 ****
+
+ /* If this is a memory ref that contains aggregate components,
+! mark it as such for cse and loop optimize. */
+ MEM_IN_STRUCT_P (stack_parm) = aggregate;
+ }
+
+--- 3447,3454 ----
+
+ /* If this is a memory ref that contains aggregate components,
+! mark it as such for cse and loop optimize. Likewise if it
+! is readonly. */
+ MEM_IN_STRUCT_P (stack_parm) = aggregate;
++ RTX_UNCHANGING_P (stack_parm) = TREE_READONLY (parm);
+ }
+
+*************** assign_parms (fndecl, second_time)
+*** 3627,3631 ****
+
+ parmreg = gen_reg_rtx (promoted_nominal_mode);
+! REG_USERVAR_P (parmreg) = 1;
+
+ /* If this was an item that we received a pointer to, set DECL_RTL
+--- 3672,3676 ----
+
+ parmreg = gen_reg_rtx (promoted_nominal_mode);
+! mark_user_reg (parmreg);
+
+ /* If this was an item that we received a pointer to, set DECL_RTL
+*************** assign_parms (fndecl, second_time)
+*** 3695,3699 ****
+ Pmode above. We must use the actual mode of the parm. */
+ parmreg = gen_reg_rtx (TYPE_MODE (TREE_TYPE (parm)));
+! REG_USERVAR_P (parmreg) = 1;
+ emit_move_insn (parmreg, DECL_RTL (parm));
+ DECL_RTL (parm) = parmreg;
+--- 3740,3744 ----
+ Pmode above. We must use the actual mode of the parm. */
+ parmreg = gen_reg_rtx (TYPE_MODE (TREE_TYPE (parm)));
+! mark_user_reg (parmreg);
+ emit_move_insn (parmreg, DECL_RTL (parm));
+ DECL_RTL (parm) = parmreg;
+*************** init_function_start (subr, filename, lin
+*** 4814,4821 ****
+ rtl_expr_chain = 0;
+
+! /* We have not allocated any temporaries yet. */
+! temp_slots = 0;
+! temp_slot_level = 0;
+! target_temp_slot_level = 0;
+
+ /* Within function body, compute a type's size as soon it is laid out. */
+--- 4859,4864 ----
+ rtl_expr_chain = 0;
+
+! /* Set up to allocate temporaries. */
+! init_temp_slots ();
+
+ /* Within function body, compute a type's size as soon it is laid out. */
+diff -rcp2N gcc-2.7.2.2/gcc.c g77-new/gcc.c
+*** gcc-2.7.2.2/gcc.c Tue Sep 12 17:15:11 1995
+--- g77-new/gcc.c Sun Aug 10 18:47:14 1997
+*************** static int is_directory PROTO((char *,
+*** 296,300 ****
+ static void validate_switches PROTO((char *));
+ static void validate_all_switches PROTO((void));
+! static void give_switch PROTO((int, int));
+ static int used_arg PROTO((char *, int));
+ static int default_arg PROTO((char *, int));
+--- 296,300 ----
+ static void validate_switches PROTO((char *));
+ static void validate_all_switches PROTO((void));
+! static void give_switch PROTO((int, int, int));
+ static int used_arg PROTO((char *, int));
+ static int default_arg PROTO((char *, int));
+*************** or with constant text in a single argume
+*** 405,408 ****
+--- 405,409 ----
+ name starts with `o'. %{o*} would substitute this text,
+ including the space; thus, two arguments would be generated.
++ %{^S*} likewise, but don't put a blank between a switch and any args.
+ %{S*:X} substitutes X if one or more switches whose names start with -S are
+ specified to CC. Note that the tail part of the -S option
+*************** process_command (argc, argv)
+*** 2828,2831 ****
+--- 2829,2835 ----
+ infiles[n_infiles++].name = argv[i];
+ }
++ /* -save-temps overrides -pipe, so that temp files are produced */
++ else if (save_temps_flag && strcmp (argv[i], "-pipe") == 0)
++ ;
+ else if (argv[i][0] == '-' && argv[i][1] != 0)
+ {
+*************** handle_braces (p)
+*** 3832,3835 ****
+--- 3836,3844 ----
+ int negate = 0;
+ int suffix = 0;
++ int include_blanks = 1;
++
++ if (*p == '^')
++ /* A '^' after the open-brace means to not give blanks before args. */
++ include_blanks = 0, ++p;
+
+ if (*p == '|')
+*************** handle_braces (p)
+*** 3897,3901 ****
+ if (!strncmp (switches[i].part1, filter, p - filter)
+ && check_live_switch (i, p - filter))
+! give_switch (i, 0);
+ }
+ else
+--- 3906,3910 ----
+ if (!strncmp (switches[i].part1, filter, p - filter)
+ && check_live_switch (i, p - filter))
+! give_switch (i, 0, include_blanks);
+ }
+ else
+*************** handle_braces (p)
+*** 3936,3940 ****
+ do_spec_1 (string, 0, &switches[i].part1[hard_match_len]);
+ /* Pass any arguments this switch has. */
+! give_switch (i, 1);
+ }
+
+--- 3945,3949 ----
+ do_spec_1 (string, 0, &switches[i].part1[hard_match_len]);
+ /* Pass any arguments this switch has. */
+! give_switch (i, 1, 1);
+ }
+
+*************** handle_braces (p)
+*** 3980,3984 ****
+ if (*p == '}')
+ {
+! give_switch (i, 0);
+ }
+ else
+--- 3989,3993 ----
+ if (*p == '}')
+ {
+! give_switch (i, 0, include_blanks);
+ }
+ else
+*************** check_live_switch (switchnum, prefix_len
+*** 4081,4090 ****
+ This cannot fail since it never finishes a command line.
+
+! If OMIT_FIRST_WORD is nonzero, then we omit .part1 of the argument. */
+
+ static void
+! give_switch (switchnum, omit_first_word)
+ int switchnum;
+ int omit_first_word;
+ {
+ if (!omit_first_word)
+--- 4090,4103 ----
+ This cannot fail since it never finishes a command line.
+
+! If OMIT_FIRST_WORD is nonzero, then we omit .part1 of the argument.
+!
+! If INCLUDE_BLANKS is nonzero, then we include blanks before each argument
+! of the switch. */
+
+ static void
+! give_switch (switchnum, omit_first_word, include_blanks)
+ int switchnum;
+ int omit_first_word;
++ int include_blanks;
+ {
+ if (!omit_first_word)
+*************** give_switch (switchnum, omit_first_word)
+*** 4093,4097 ****
+ do_spec_1 (switches[switchnum].part1, 1, NULL_PTR);
+ }
+! do_spec_1 (" ", 0, NULL_PTR);
+ if (switches[switchnum].args != 0)
+ {
+--- 4106,4110 ----
+ do_spec_1 (switches[switchnum].part1, 1, NULL_PTR);
+ }
+!
+ if (switches[switchnum].args != 0)
+ {
+*************** give_switch (switchnum, omit_first_word)
+*** 4099,4106 ****
+ for (p = switches[switchnum].args; *p; p++)
+ {
+ do_spec_1 (*p, 1, NULL_PTR);
+- do_spec_1 (" ", 0, NULL_PTR);
+ }
+ }
+ switches[switchnum].valid = 1;
+ }
+--- 4112,4122 ----
+ for (p = switches[switchnum].args; *p; p++)
+ {
++ if (include_blanks)
++ do_spec_1 (" ", 0, NULL_PTR);
+ do_spec_1 (*p, 1, NULL_PTR);
+ }
+ }
++
++ do_spec_1 (" ", 0, NULL_PTR);
+ switches[switchnum].valid = 1;
+ }
+diff -rcp2N gcc-2.7.2.2/gcc.texi g77-new/gcc.texi
+*** gcc-2.7.2.2/gcc.texi Thu Feb 20 19:24:19 1997
+--- g77-new/gcc.texi Thu Jul 10 20:08:58 1997
+*************** original English.
+*** 149,152 ****
+--- 149,153 ----
+ @sp 3
+ @center Last updated 29 June 1996
++ @center (Revised for GNU Fortran 1997-01-10)
+ @sp 1
+ @c The version number appears twice more in this file.
+diff -rcp2N gcc-2.7.2.2/glimits.h g77-new/glimits.h
+*** gcc-2.7.2.2/glimits.h Wed Sep 29 17:30:54 1993
+--- g77-new/glimits.h Thu Jul 10 20:08:58 1997
+***************
+*** 64,68 ****
+ (Same as `int'). */
+ #ifndef __LONG_MAX__
+! #define __LONG_MAX__ 2147483647L
+ #endif
+ #undef LONG_MIN
+--- 64,72 ----
+ (Same as `int'). */
+ #ifndef __LONG_MAX__
+! # ifndef __alpha__
+! # define __LONG_MAX__ 2147483647L
+! # else
+! # define __LONG_MAX__ 9223372036854775807LL
+! # endif /* __alpha__ */
+ #endif
+ #undef LONG_MIN
+diff -rcp2N gcc-2.7.2.2/integrate.c g77-new/integrate.c
+*** gcc-2.7.2.2/integrate.c Fri Oct 20 18:48:13 1995
+--- g77-new/integrate.c Sun Aug 10 18:46:31 1997
+*************** static rtx copy_for_inline PROTO((rtx));
+*** 67,70 ****
+--- 67,71 ----
+ static void integrate_parm_decls PROTO((tree, struct inline_remap *, rtvec));
+ static void integrate_decl_tree PROTO((tree, int, struct inline_remap *));
++ static void save_constants_in_decl_trees PROTO ((tree));
+ static void subst_constants PROTO((rtx *, rtx, struct inline_remap *));
+ static void restore_constants PROTO((rtx *));
+*************** save_for_inline_copying (fndecl)
+*** 435,438 ****
+--- 436,443 ----
+ }
+
++ /* Also scan all decls, and replace any constant pool references with the
++ actual constant. */
++ save_constants_in_decl_trees (DECL_INITIAL (fndecl));
++
+ /* Clear out the constant pool so that we can recreate it with the
+ copied constants below. */
+*************** save_for_inline_nocopy (fndecl)
+*** 781,784 ****
+--- 786,793 ----
+ }
+
++ /* Also scan all decls, and replace any constant pool references with the
++ actual constant. */
++ save_constants_in_decl_trees (DECL_INITIAL (fndecl));
++
+ /* We have now allocated all that needs to be allocated permanently
+ on the rtx obstack. Set our high-water mark, so that we
+*************** expand_inline_function (fndecl, parms, t
+*** 1571,1575 ****
+ if (GET_CODE (XEXP (loc, 0)) == REG)
+ {
+! temp = force_reg (Pmode, structure_value_addr);
+ map->reg_map[REGNO (XEXP (loc, 0))] = temp;
+ if ((CONSTANT_P (structure_value_addr)
+--- 1580,1585 ----
+ if (GET_CODE (XEXP (loc, 0)) == REG)
+ {
+! temp = force_reg (Pmode,
+! force_operand (structure_value_addr, NULL_RTX));
+ map->reg_map[REGNO (XEXP (loc, 0))] = temp;
+ if ((CONSTANT_P (structure_value_addr)
+*************** integrate_decl_tree (let, level, map)
+*** 2029,2032 ****
+--- 2039,2059 ----
+ }
+ }
++ }
++
++ /* Given a BLOCK node LET, search for all DECL_RTL fields, and pass them
++ through save_constants. */
++
++ static void
++ save_constants_in_decl_trees (let)
++ tree let;
++ {
++ tree t;
++
++ for (t = BLOCK_VARS (let); t; t = TREE_CHAIN (t))
++ if (DECL_RTL (t) != 0)
++ save_constants (&DECL_RTL (t));
++
++ for (t = BLOCK_SUBBLOCKS (let); t; t = TREE_CHAIN (t))
++ save_constants_in_decl_trees (t);
+ }
+
+diff -rcp2N gcc-2.7.2.2/invoke.texi g77-new/invoke.texi
+*** gcc-2.7.2.2/invoke.texi Tue Oct 3 11:40:43 1995
+--- g77-new/invoke.texi Thu Jul 10 20:09:00 1997
+***************
+*** 1,3 ****
+! @c Copyright (C) 1988, 89, 92, 93, 94, 1995 Free Software Foundation, Inc.
+ @c This is part of the GCC manual.
+ @c For copying conditions, see the file gcc.texi.
+--- 1,3 ----
+! @c Copyright (C) 1988, 89, 92-95, 1997 Free Software Foundation, Inc.
+ @c This is part of the GCC manual.
+ @c For copying conditions, see the file gcc.texi.
+*************** in the following sections.
+*** 149,152 ****
+--- 149,153 ----
+ -fschedule-insns2 -fstrength-reduce -fthread-jumps
+ -funroll-all-loops -funroll-loops
++ -fmove-all-movables -freduce-all-givs -frerun-loop-opt
+ -O -O0 -O1 -O2 -O3
+ @end smallexample
+*************** in addition to the above:
+*** 331,334 ****
+--- 332,337 ----
+ -fshort-double -fvolatile -fvolatile-global
+ -fverbose-asm -fpack-struct +e0 +e1
++ -fargument-alias -fargument-noalias
++ -fargument-noalias-global
+ @end smallexample
+ @end table
+*************** Print extra warning messages for these e
+*** 1253,1256 ****
+--- 1256,1304 ----
+
+ @itemize @bullet
++ @cindex division by zero
++ @cindex zero, division by
++ @item
++ An integer division by zero is detected.
++
++ Some cases of division by zero might occur as the result
++ of using so-called ``safe'' macros.
++ For example:
++
++ @smallexample
++ #define BUCKETS(b) (((b) != NULL) ? (b)->buckets : 0)
++ @dots{...}
++ i = j / BUCKETS(b);
++ @end smallexample
++
++ Although analysis of the context of the above code could
++ prove that @samp{b} is never null when it is executed,
++ the division-by-zero warning is still useful, because
++ @code{gcc} generates code to do the division by zero at
++ run time so as to generate a run-time fault,
++ and tidy programmers will want to find ways to prevent
++ this needless code from being generated.
++
++ Note that @code{gcc} transforms expressions so as to find
++ opportunities for performing expensive operations
++ (such as division) at compile time instead of generating
++ code to perform them at run time.
++ For example, @code{gcc} transforms:
++
++ @smallexample
++ 2 / (i == 0)
++ @end smallexample
++
++ into:
++
++ @smallexample
++ (i == 0) ? (2 / 1) : (2 / 0)
++ @end smallexample
++
++ As a result, the division-by-zero warning might occur
++ in contexts where the divisor seems to be a non-constant.
++ It is useful in this case as well, because programmers might want
++ to clean up the code so the compiled code does not include
++ dead code to divide by zero.
++
+ @cindex @code{longjmp} warnings
+ @item
+*************** and usually makes programs run more slow
+*** 1941,1944 ****
+--- 1989,2037 ----
+ implies @samp{-fstrength-reduce} as well as @samp{-frerun-cse-after-loop}.
+
++ @item -fmove-all-movables
++ Forces all invariant computations in loops to be moved
++ outside the loop.
++ This option is provided primarily to improve performance
++ for some Fortran code, though it might improve code written
++ in other languages.
++
++ @emph{Note:} When compiling programs written in Fortran,
++ this option is enabled by default.
++
++ Analysis of Fortran code optimization and the resulting
++ optimizations triggered by this option, and the
++ @samp{-freduce-all-givs} and @samp{-frerun-loop-opt}
++ options as well, were
++ contributed by Toon Moene (@code{toon@@moene.indiv.nluug.nl}).
++
++ These three options are intended to be removed someday, once
++ they have helped determine the efficacy of various
++ approaches to improving the performance of Fortran code.
++
++ Please let us (@code{fortran@@gnu.ai.mit.edu})
++ know how use of these options affects
++ the performance of your production code.
++ We're very interested in code that runs @emph{slower}
++ when these options are @emph{enabled}.
++
++ @item -freduce-all-givs
++ Forces all general-induction variables in loops to be
++ strength-reduced.
++ This option is provided primarily to improve performance
++ for some Fortran code, though it might improve code written
++ in other languages.
++
++ @emph{Note:} When compiling programs written in Fortran,
++ this option is enabled by default.
++
++ @item -frerun-loop-opt
++ Runs loop optimizations a second time.
++ This option is provided primarily to improve performance
++ for some Fortran code, though it might improve code written
++ in other languages.
++
++ @emph{Note:} When compiling programs written in Fortran,
++ this option is enabled by default.
++
+ @item -fno-peephole
+ Disable any machine-specific peephole optimizations.
+*************** compilation).
+*** 4229,4232 ****
+--- 4322,4397 ----
+ With @samp{+e1}, G++ actually generates the code implementing virtual
+ functions defined in the code, and makes them publicly visible.
++
++ @cindex aliasing of parameters
++ @cindex parameters, aliased
++ @item -fargument-alias
++ @item -fargument-noalias
++ @item -fargument-noalias-global
++ Specify the possible relationships among parameters and between
++ parameters and global data.
++
++ @samp{-fargument-alias} specifies that arguments (parameters) may
++ alias each other and may alias global storage.
++ @samp{-fargument-noalias} specifies that arguments do not alias
++ each other, but may alias global storage.
++ @samp{-fargument-noalias-global} specifies that arguments do not
++ alias each other and do not alias global storage.
++
++ For code written in C, C++, and Objective-C, @samp{-fargument-alias}
++ is the default.
++ For code written in Fortran, @samp{-fargument-noalias-global} is
++ the default, though this is pertinent only on systems where
++ @code{g77} is installed.
++ (See the documentation for other compilers for information on the
++ defaults for their respective languages.)
++
++ Normally, @code{gcc} assumes that a write through a pointer
++ passed as a parameter to the current function might modify a
++ value pointed to by another pointer passed as a parameter, or
++ in global storage.
++
++ For example, consider this code:
++
++ @example
++ void x(int *i, int *j)
++ @{
++ extern int k;
++
++ ++*i;
++ ++*j;
++ ++k;
++ @}
++ @end example
++
++ When compiling the above function, @code{gcc} assumes that @samp{i} might
++ be a pointer to the same variable as @samp{j}, and that either @samp{i},
++ @samp{j}, or both might be a pointer to @samp{k}.
++
++ Therefore, @code{gcc} does not assume it can generate code to read
++ @samp{*i}, @samp{*j}, and @samp{k} into separate registers, increment
++ each register, then write the incremented values back out.
++
++ Instead, @code{gcc} must generate code that reads @samp{*i},
++ increments it, and writes it back before reading @samp{*j},
++ in case @samp{i} and @samp{j} are aliased, and, similarly,
++ that writes @samp{*j} before reading @samp{k}.
++ The result is code that, on many systems, takes longer to execute,
++ due to the way many processors schedule instruction execution.
++
++ Compiling the above code with the @samp{-fargument-noalias} option
++ allows @code{gcc} to assume that @samp{i} and @samp{j} do not alias
++ each other, but either might alias @samp{k}.
++
++ Compiling the above code with the @samp{-fargument-noalias-global}
++ option allows @code{gcc} to assume that no combination of @samp{i},
++ @samp{j}, and @samp{k} are aliases for each other.
++
++ @emph{Note:} Use the @samp{-fargument-noalias} and
++ @samp{-fargument-noalias-global} options with care.
++ While they can result in faster executables, they can
++ also result in executables with subtle bugs, bugs that
++ show up only when compiled for specific target systems,
++ or bugs that show up only when compiled by specific versions
++ of @code{g77}.
+ @end table
+
+diff -rcp2N gcc-2.7.2.2/libgcc2.c g77-new/libgcc2.c
+*** gcc-2.7.2.2/libgcc2.c Sun Nov 26 14:39:21 1995
+--- g77-new/libgcc2.c Sun Aug 10 18:46:07 1997
+*************** __gcc_bcmp (s1, s2, size)
+*** 1193,1196 ****
+--- 1193,1201 ----
+ #endif
+
++ #ifdef L__dummy
++ void
++ __dummy () {}
++ #endif
++
+ #ifdef L_varargs
+ #ifdef __i860__
+diff -rcp2N gcc-2.7.2.2/local-alloc.c g77-new/local-alloc.c
+*** gcc-2.7.2.2/local-alloc.c Mon Aug 21 13:15:44 1995
+--- g77-new/local-alloc.c Sun Aug 10 18:46:10 1997
+*************** static int this_insn_number;
+*** 243,246 ****
+--- 243,250 ----
+ static rtx this_insn;
+
++ /* Used to communicate changes made by update_equiv_regs to
++ memref_referenced_p. */
++ static rtx *reg_equiv_replacement;
++
+ static void alloc_qty PROTO((int, enum machine_mode, int, int));
+ static void alloc_qty_for_scratch PROTO((rtx, int, rtx, int, int));
+*************** validate_equiv_mem_from_store (dest, set
+*** 545,549 ****
+ && reg_overlap_mentioned_p (dest, equiv_mem))
+ || (GET_CODE (dest) == MEM
+! && true_dependence (dest, equiv_mem)))
+ equiv_mem_modified = 1;
+ }
+--- 549,553 ----
+ && reg_overlap_mentioned_p (dest, equiv_mem))
+ || (GET_CODE (dest) == MEM
+! && true_dependence (dest, VOIDmode, equiv_mem, rtx_varies_p)))
+ equiv_mem_modified = 1;
+ }
+*************** memref_referenced_p (memref, x)
+*** 617,621 ****
+ switch (code)
+ {
+- case REG:
+ case CONST_INT:
+ case CONST:
+--- 621,624 ----
+*************** memref_referenced_p (memref, x)
+*** 629,634 ****
+ return 0;
+
+ case MEM:
+! if (true_dependence (memref, x))
+ return 1;
+ break;
+--- 632,642 ----
+ return 0;
+
++ case REG:
++ return (reg_equiv_replacement[REGNO (x)] == 0
++ || memref_referenced_p (memref,
++ reg_equiv_replacement[REGNO (x)]));
++
+ case MEM:
+! if (true_dependence (memref, VOIDmode, x, rtx_varies_p))
+ return 1;
+ break;
+*************** optimize_reg_copy_1 (insn, dest, src)
+*** 818,827 ****
+ if (sregno >= FIRST_PSEUDO_REGISTER)
+ {
+! reg_live_length[sregno] -= length;
+! /* reg_live_length is only an approximation after combine
+! if sched is not run, so make sure that we still have
+! a reasonable value. */
+! if (reg_live_length[sregno] < 2)
+! reg_live_length[sregno] = 2;
+ reg_n_calls_crossed[sregno] -= n_calls;
+ }
+--- 826,839 ----
+ if (sregno >= FIRST_PSEUDO_REGISTER)
+ {
+! if (reg_live_length[sregno] >= 0)
+! {
+! reg_live_length[sregno] -= length;
+! /* reg_live_length is only an approximation after
+! combine if sched is not run, so make sure that we
+! still have a reasonable value. */
+! if (reg_live_length[sregno] < 2)
+! reg_live_length[sregno] = 2;
+! }
+!
+ reg_n_calls_crossed[sregno] -= n_calls;
+ }
+*************** optimize_reg_copy_1 (insn, dest, src)
+*** 829,833 ****
+ if (dregno >= FIRST_PSEUDO_REGISTER)
+ {
+! reg_live_length[dregno] += d_length;
+ reg_n_calls_crossed[dregno] += d_n_calls;
+ }
+--- 841,847 ----
+ if (dregno >= FIRST_PSEUDO_REGISTER)
+ {
+! if (reg_live_length[dregno] >= 0)
+! reg_live_length[dregno] += d_length;
+!
+ reg_n_calls_crossed[dregno] += d_n_calls;
+ }
+*************** update_equiv_regs ()
+*** 948,953 ****
+ {
+ rtx *reg_equiv_init_insn = (rtx *) alloca (max_regno * sizeof (rtx *));
+- rtx *reg_equiv_replacement = (rtx *) alloca (max_regno * sizeof (rtx *));
+ rtx insn;
+
+ bzero ((char *) reg_equiv_init_insn, max_regno * sizeof (rtx *));
+--- 962,968 ----
+ {
+ rtx *reg_equiv_init_insn = (rtx *) alloca (max_regno * sizeof (rtx *));
+ rtx insn;
++
++ reg_equiv_replacement = (rtx *) alloca (max_regno * sizeof (rtx *));
+
+ bzero ((char *) reg_equiv_init_insn, max_regno * sizeof (rtx *));
+diff -rcp2N gcc-2.7.2.2/loop.c g77-new/loop.c
+*** gcc-2.7.2.2/loop.c Thu Feb 20 19:24:20 1997
+--- g77-new/loop.c Sun Aug 10 18:46:43 1997
+*************** int *loop_number_exit_count;
+*** 111,116 ****
+ unsigned HOST_WIDE_INT loop_n_iterations;
+
+! /* Nonzero if there is a subroutine call in the current loop.
+! (unknown_address_altered is also nonzero in this case.) */
+
+ static int loop_has_call;
+--- 111,115 ----
+ unsigned HOST_WIDE_INT loop_n_iterations;
+
+! /* Nonzero if there is a subroutine call in the current loop. */
+
+ static int loop_has_call;
+*************** static char *moved_once;
+*** 160,164 ****
+ here, we just turn on unknown_address_altered. */
+
+! #define NUM_STORES 20
+ static rtx loop_store_mems[NUM_STORES];
+
+--- 159,163 ----
+ here, we just turn on unknown_address_altered. */
+
+! #define NUM_STORES 30
+ static rtx loop_store_mems[NUM_STORES];
+
+*************** scan_loop (loop_start, end, nregs)
+*** 669,673 ****
+ {
+ temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
+! if (temp && CONSTANT_P (XEXP (temp, 0)))
+ src = XEXP (temp, 0), move_insn = 1;
+ if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
+--- 668,673 ----
+ {
+ temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
+! if (temp && CONSTANT_P (XEXP (temp, 0))
+! && LEGITIMATE_CONSTANT_P (XEXP (temp, 0)))
+ src = XEXP (temp, 0), move_insn = 1;
+ if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
+*************** move_movables (movables, threshold, insn
+*** 1629,1632 ****
+--- 1629,1633 ----
+
+ if (already_moved[regno]
++ || flag_move_all_movables
+ || (threshold * savings * m->lifetime) >= insn_count
+ || (m->forces && m->forces->done
+*************** prescan_loop (start, end)
+*** 2199,2203 ****
+ else if (GET_CODE (insn) == CALL_INSN)
+ {
+! unknown_address_altered = 1;
+ loop_has_call = 1;
+ }
+--- 2200,2205 ----
+ else if (GET_CODE (insn) == CALL_INSN)
+ {
+! if (! CONST_CALL_P (insn))
+! unknown_address_altered = 1;
+ loop_has_call = 1;
+ }
+*************** invariant_p (x)
+*** 2777,2781 ****
+ /* See if there is any dependence between a store and this load. */
+ for (i = loop_store_mems_idx - 1; i >= 0; i--)
+! if (true_dependence (loop_store_mems[i], x))
+ return 0;
+
+--- 2779,2783 ----
+ /* See if there is any dependence between a store and this load. */
+ for (i = loop_store_mems_idx - 1; i >= 0; i--)
+! if (true_dependence (loop_store_mems[i], VOIDmode, x, rtx_varies_p))
+ return 0;
+
+*************** strength_reduce (scan_start, end, loop_t
+*** 3821,3826 ****
+ exit. */
+
+! if (v->lifetime * threshold * benefit < insn_count
+! && ! bl->reversed)
+ {
+ if (loop_dump_stream)
+--- 3823,3828 ----
+ exit. */
+
+! if ( ! flag_reduce_all_givs && v->lifetime * threshold * benefit < insn_count
+! && ! bl->reversed )
+ {
+ if (loop_dump_stream)
+*************** record_giv (v, insn, src_reg, dest_reg,
+*** 4375,4378 ****
+--- 4377,4382 ----
+ v->final_value = 0;
+ v->same_insn = 0;
++ v->unrolled = 0;
++ v->shared = 0;
+
+ /* The v->always_computable field is used in update_giv_derive, to
+*************** check_final_value (v, loop_start, loop_e
+*** 4652,4657 ****
+ if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
+ && LABEL_NAME (JUMP_LABEL (p))
+! && ((INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (v->insn)
+! && INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop_start))
+ || (INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (last_giv_use)
+ && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop_end))))
+--- 4656,4664 ----
+ if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
+ && LABEL_NAME (JUMP_LABEL (p))
+! && ((INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop)
+! || (INSN_UID (v->insn) >= max_uid_for_loop)
+! || (INSN_UID (last_giv_use) >= max_uid_for_loop)
+! || (INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (v->insn)
+! && INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop_start))
+ || (INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (last_giv_use)
+ && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop_end))))
+*************** emit_iv_add_mult (b, m, a, reg, insert_b
+*** 5560,5563 ****
+--- 5567,5572 ----
+
+ emit_insn_before (seq, insert_before);
++
++ record_base_value (REGNO (reg), b);
+ }
+
+diff -rcp2N gcc-2.7.2.2/loop.h g77-new/loop.h
+*** gcc-2.7.2.2/loop.h Fri Jul 14 08:23:28 1995
+--- g77-new/loop.h Thu Jul 10 20:09:03 1997
+*************** struct induction
+*** 89,92 ****
+--- 89,95 ----
+ we won't use it to eliminate a biv, it
+ would probably lose. */
++ unsigned unrolled : 1; /* 1 if new register has been allocated in
++ unrolled loop. */
++ unsigned shared : 1;
+ int lifetime; /* Length of life of this giv */
+ int times_used; /* # times this giv is used. */
+diff -rcp2N gcc-2.7.2.2/real.c g77-new/real.c
+*** gcc-2.7.2.2/real.c Tue Aug 15 17:57:18 1995
+--- g77-new/real.c Thu Jul 10 20:09:04 1997
+*************** make_nan (nan, sign, mode)
+*** 5625,5633 ****
+ }
+
+! /* Convert an SFmode target `float' value to a REAL_VALUE_TYPE.
+! This is the inverse of the function `etarsingle' invoked by
+ REAL_VALUE_TO_TARGET_SINGLE. */
+
+ REAL_VALUE_TYPE
+ ereal_from_float (f)
+ HOST_WIDE_INT f;
+--- 5625,5699 ----
+ }
+
+! /* This is the inverse of the function `etarsingle' invoked by
+ REAL_VALUE_TO_TARGET_SINGLE. */
+
+ REAL_VALUE_TYPE
++ ereal_unto_float (f)
++ long f;
++ {
++ REAL_VALUE_TYPE r;
++ unsigned EMUSHORT s[2];
++ unsigned EMUSHORT e[NE];
++
++ /* Convert 32 bit integer to array of 16 bit pieces in target machine order.
++ This is the inverse operation to what the function `endian' does. */
++ if (REAL_WORDS_BIG_ENDIAN)
++ {
++ s[0] = (unsigned EMUSHORT) (f >> 16);
++ s[1] = (unsigned EMUSHORT) f;
++ }
++ else
++ {
++ s[0] = (unsigned EMUSHORT) f;
++ s[1] = (unsigned EMUSHORT) (f >> 16);
++ }
++ /* Convert and promote the target float to E-type. */
++ e24toe (s, e);
++ /* Output E-type to REAL_VALUE_TYPE. */
++ PUT_REAL (e, &r);
++ return r;
++ }
++
++
++ /* This is the inverse of the function `etardouble' invoked by
++ REAL_VALUE_TO_TARGET_DOUBLE. */
++
++ REAL_VALUE_TYPE
++ ereal_unto_double (d)
++ long d[];
++ {
++ REAL_VALUE_TYPE r;
++ unsigned EMUSHORT s[4];
++ unsigned EMUSHORT e[NE];
++
++ /* Convert array of HOST_WIDE_INT to equivalent array of 16-bit pieces. */
++ if (REAL_WORDS_BIG_ENDIAN)
++ {
++ s[0] = (unsigned EMUSHORT) (d[0] >> 16);
++ s[1] = (unsigned EMUSHORT) d[0];
++ s[2] = (unsigned EMUSHORT) (d[1] >> 16);
++ s[3] = (unsigned EMUSHORT) d[1];
++ }
++ else
++ {
++ /* Target float words are little-endian. */
++ s[0] = (unsigned EMUSHORT) d[0];
++ s[1] = (unsigned EMUSHORT) (d[0] >> 16);
++ s[2] = (unsigned EMUSHORT) d[1];
++ s[3] = (unsigned EMUSHORT) (d[1] >> 16);
++ }
++ /* Convert target double to E-type. */
++ e53toe (s, e);
++ /* Output E-type to REAL_VALUE_TYPE. */
++ PUT_REAL (e, &r);
++ return r;
++ }
++
++
++ /* Convert an SFmode target `float' value to a REAL_VALUE_TYPE.
++ This is somewhat like ereal_unto_float, but the input types
++ for these are different. */
++
++ REAL_VALUE_TYPE
+ ereal_from_float (f)
+ HOST_WIDE_INT f;
+*************** ereal_from_float (f)
+*** 5658,5663 ****
+
+ /* Convert a DFmode target `double' value to a REAL_VALUE_TYPE.
+! This is the inverse of the function `etardouble' invoked by
+! REAL_VALUE_TO_TARGET_DOUBLE.
+
+ The DFmode is stored as an array of HOST_WIDE_INT in the target's
+--- 5724,5729 ----
+
+ /* Convert a DFmode target `double' value to a REAL_VALUE_TYPE.
+! This is somewhat like ereal_unto_double, but the input types
+! for these are different.
+
+ The DFmode is stored as an array of HOST_WIDE_INT in the target's
+diff -rcp2N gcc-2.7.2.2/real.h g77-new/real.h
+*** gcc-2.7.2.2/real.h Thu Jun 15 07:57:56 1995
+--- g77-new/real.h Thu Jul 10 20:09:05 1997
+*************** extern void ereal_to_decimal PROTO((REAL
+*** 152,155 ****
+--- 152,157 ----
+ extern int ereal_cmp PROTO((REAL_VALUE_TYPE, REAL_VALUE_TYPE));
+ extern int ereal_isneg PROTO((REAL_VALUE_TYPE));
++ extern REAL_VALUE_TYPE ereal_unto_float PROTO((long));
++ extern REAL_VALUE_TYPE ereal_unto_double PROTO((long *));
+ extern REAL_VALUE_TYPE ereal_from_float PROTO((HOST_WIDE_INT));
+ extern REAL_VALUE_TYPE ereal_from_double PROTO((HOST_WIDE_INT *));
+*************** extern REAL_VALUE_TYPE real_value_trunca
+*** 197,200 ****
+--- 199,208 ----
+ /* IN is a REAL_VALUE_TYPE. OUT is a long. */
+ #define REAL_VALUE_TO_TARGET_SINGLE(IN, OUT) ((OUT) = etarsingle ((IN)))
++
++ /* Inverse of REAL_VALUE_TO_TARGET_DOUBLE. */
++ #define REAL_VALUE_UNTO_TARGET_DOUBLE(d) (ereal_unto_double (d))
++
++ /* Inverse of REAL_VALUE_TO_TARGET_SINGLE. */
++ #define REAL_VALUE_UNTO_TARGET_SINGLE(f) (ereal_unto_float (f))
+
+ /* d is an array of HOST_WIDE_INT that holds a double precision
+diff -rcp2N gcc-2.7.2.2/recog.c g77-new/recog.c
+*** gcc-2.7.2.2/recog.c Sat Jul 1 06:52:35 1995
+--- g77-new/recog.c Sun Aug 10 18:46:55 1997
+*************** register_operand (op, mode)
+*** 872,876 ****
+ REGNO (SUBREG_REG (op)))
+ && (GET_MODE_SIZE (mode)
+! != GET_MODE_SIZE (GET_MODE (SUBREG_REG (op)))))
+ return 0;
+ #endif
+--- 872,878 ----
+ REGNO (SUBREG_REG (op)))
+ && (GET_MODE_SIZE (mode)
+! != GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
+! && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op))) != MODE_COMPLEX_INT
+! && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op))) != MODE_COMPLEX_FLOAT)
+ return 0;
+ #endif
+diff -rcp2N gcc-2.7.2.2/reload.c g77-new/reload.c
+*** gcc-2.7.2.2/reload.c Sat Nov 11 08:23:54 1995
+--- g77-new/reload.c Sun Aug 10 04:58:03 1997
+***************
+*** 1,4 ****
+ /* Search an insn for pseudo regs that must be in hard regs and are not.
+! Copyright (C) 1987, 88, 89, 92, 93, 94, 1995 Free Software Foundation, Inc.
+
+ This file is part of GNU CC.
+--- 1,4 ----
+ /* Search an insn for pseudo regs that must be in hard regs and are not.
+! Copyright (C) 1987, 88, 89, 92-5, 1996 Free Software Foundation, Inc.
+
+ This file is part of GNU CC.
+*************** static int push_secondary_reload PROTO((
+*** 292,295 ****
+--- 292,296 ----
+ enum machine_mode, enum reload_type,
+ enum insn_code *));
++ static enum reg_class find_valid_class PROTO((enum machine_mode, int));
+ static int push_reload PROTO((rtx, rtx, rtx *, rtx *, enum reg_class,
+ enum machine_mode, enum machine_mode,
+*************** static struct decomposition decompose PR
+*** 305,312 ****
+ static int immune_p PROTO((rtx, rtx, struct decomposition));
+ static int alternative_allows_memconst PROTO((char *, int));
+! static rtx find_reloads_toplev PROTO((rtx, int, enum reload_type, int, int));
+ static rtx make_memloc PROTO((rtx, int));
+ static int find_reloads_address PROTO((enum machine_mode, rtx *, rtx, rtx *,
+! int, enum reload_type, int));
+ static rtx subst_reg_equivs PROTO((rtx));
+ static rtx subst_indexed_address PROTO((rtx));
+--- 306,313 ----
+ static int immune_p PROTO((rtx, rtx, struct decomposition));
+ static int alternative_allows_memconst PROTO((char *, int));
+! static rtx find_reloads_toplev PROTO((rtx, int, enum reload_type, int, int, short *));
+ static rtx make_memloc PROTO((rtx, int));
+ static int find_reloads_address PROTO((enum machine_mode, rtx *, rtx, rtx *,
+! int, enum reload_type, int, short *));
+ static rtx subst_reg_equivs PROTO((rtx));
+ static rtx subst_indexed_address PROTO((rtx));
+*************** push_secondary_reload (in_p, x, opnum, o
+*** 590,599 ****
+
+ if (in_p && icode == CODE_FOR_nothing
+! && SECONDARY_MEMORY_NEEDED (class, reload_class, reload_mode))
+! get_secondary_mem (x, reload_mode, opnum, type);
+
+ if (! in_p && icode == CODE_FOR_nothing
+! && SECONDARY_MEMORY_NEEDED (reload_class, class, reload_mode))
+! get_secondary_mem (x, reload_mode, opnum, type);
+ #endif
+ }
+--- 591,600 ----
+
+ if (in_p && icode == CODE_FOR_nothing
+! && SECONDARY_MEMORY_NEEDED (class, reload_class, mode))
+! get_secondary_mem (x, mode, opnum, type);
+
+ if (! in_p && icode == CODE_FOR_nothing
+! && SECONDARY_MEMORY_NEEDED (reload_class, class, mode))
+! get_secondary_mem (x, mode, opnum, type);
+ #endif
+ }
+*************** get_secondary_mem (x, mode, opnum, type)
+*** 673,677 ****
+
+ find_reloads_address (mode, NULL_PTR, XEXP (loc, 0), &XEXP (loc, 0),
+! opnum, type, 0);
+ }
+
+--- 674,678 ----
+
+ find_reloads_address (mode, NULL_PTR, XEXP (loc, 0), &XEXP (loc, 0),
+! opnum, type, 0, NULL);
+ }
+
+*************** clear_secondary_mem ()
+*** 689,692 ****
+--- 690,725 ----
+ #endif /* SECONDARY_MEMORY_NEEDED */
+
++ /* Find the largest class for which every register number plus N is valid in
++ M1 (if in range). Abort if no such class exists. */
++
++ static enum reg_class
++ find_valid_class (m1, n)
++ enum machine_mode m1;
++ int n;
++ {
++ int class;
++ int regno;
++ enum reg_class best_class;
++ int best_size = 0;
++
++ for (class = 1; class < N_REG_CLASSES; class++)
++ {
++ int bad = 0;
++ for (regno = 0; regno < FIRST_PSEUDO_REGISTER && ! bad; regno++)
++ if (TEST_HARD_REG_BIT (reg_class_contents[class], regno)
++ && TEST_HARD_REG_BIT (reg_class_contents[class], regno + n)
++ && ! HARD_REGNO_MODE_OK (regno + n, m1))
++ bad = 1;
++
++ if (! bad && reg_class_size[class] > best_size)
++ best_class = class, best_size = reg_class_size[class];
++ }
++
++ if (best_size == 0)
++ abort ();
++
++ return best_class;
++ }
++
+ /* Record one reload that needs to be performed.
+ IN is an rtx saying where the data are to be found before this instruction.
+*************** push_reload (in, out, inloc, outloc, cla
+*** 894,898 ****
+ && GET_CODE (SUBREG_REG (in)) == REG
+ && REGNO (SUBREG_REG (in)) < FIRST_PSEUDO_REGISTER
+! && (! HARD_REGNO_MODE_OK (REGNO (SUBREG_REG (in)), inmode)
+ || (GET_MODE_SIZE (inmode) <= UNITS_PER_WORD
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (in)))
+--- 927,932 ----
+ && GET_CODE (SUBREG_REG (in)) == REG
+ && REGNO (SUBREG_REG (in)) < FIRST_PSEUDO_REGISTER
+! && (! HARD_REGNO_MODE_OK (REGNO (SUBREG_REG (in)) + SUBREG_WORD (in),
+! inmode)
+ || (GET_MODE_SIZE (inmode) <= UNITS_PER_WORD
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (in)))
+*************** push_reload (in, out, inloc, outloc, cla
+*** 909,913 ****
+ output before the outer reload. */
+ push_reload (SUBREG_REG (in), NULL_RTX, &SUBREG_REG (in), NULL_PTR,
+! GENERAL_REGS, VOIDmode, VOIDmode, 0, 0, opnum, type);
+ dont_remove_subreg = 1;
+ }
+--- 943,948 ----
+ output before the outer reload. */
+ push_reload (SUBREG_REG (in), NULL_RTX, &SUBREG_REG (in), NULL_PTR,
+! find_valid_class (inmode, SUBREG_WORD (in)),
+! VOIDmode, VOIDmode, 0, 0, opnum, type);
+ dont_remove_subreg = 1;
+ }
+*************** push_reload (in, out, inloc, outloc, cla
+*** 982,986 ****
+ && GET_CODE (SUBREG_REG (out)) == REG
+ && REGNO (SUBREG_REG (out)) < FIRST_PSEUDO_REGISTER
+! && (! HARD_REGNO_MODE_OK (REGNO (SUBREG_REG (out)), outmode)
+ || (GET_MODE_SIZE (outmode) <= UNITS_PER_WORD
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (out)))
+--- 1017,1022 ----
+ && GET_CODE (SUBREG_REG (out)) == REG
+ && REGNO (SUBREG_REG (out)) < FIRST_PSEUDO_REGISTER
+! && (! HARD_REGNO_MODE_OK (REGNO (SUBREG_REG (out)) + SUBREG_WORD (out),
+! outmode)
+ || (GET_MODE_SIZE (outmode) <= UNITS_PER_WORD
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (out)))
+*************** push_reload (in, out, inloc, outloc, cla
+*** 998,1002 ****
+ dont_remove_subreg = 1;
+ push_reload (SUBREG_REG (out), SUBREG_REG (out), &SUBREG_REG (out),
+! &SUBREG_REG (out), ALL_REGS, VOIDmode, VOIDmode, 0, 0,
+ opnum, RELOAD_OTHER);
+ }
+--- 1034,1040 ----
+ dont_remove_subreg = 1;
+ push_reload (SUBREG_REG (out), SUBREG_REG (out), &SUBREG_REG (out),
+! &SUBREG_REG (out),
+! find_valid_class (outmode, SUBREG_WORD (out)),
+! VOIDmode, VOIDmode, 0, 0,
+ opnum, RELOAD_OTHER);
+ }
+*************** find_reloads (insn, replace, ind_levels,
+*** 2241,2244 ****
+--- 2279,2283 ----
+ int goal_earlyclobber, this_earlyclobber;
+ enum machine_mode operand_mode[MAX_RECOG_OPERANDS];
++ short force_update[MAX_RECOG_OPERANDS];
+
+ this_insn = insn;
+*************** find_reloads (insn, replace, ind_levels,
+*** 2272,2275 ****
+--- 2311,2316 ----
+ #endif
+
++ bzero ((char *) force_update, sizeof force_update);
++
+ /* Find what kind of insn this is. NOPERANDS gets number of operands.
+ Make OPERANDS point to a vector of operand values.
+*************** find_reloads (insn, replace, ind_levels,
+*** 2469,2473 ****
+ find_reloads_address (VOIDmode, NULL_PTR,
+ recog_operand[i], recog_operand_loc[i],
+! i, operand_type[i], ind_levels);
+ substed_operand[i] = recog_operand[i] = *recog_operand_loc[i];
+ }
+--- 2510,2515 ----
+ find_reloads_address (VOIDmode, NULL_PTR,
+ recog_operand[i], recog_operand_loc[i],
+! i, operand_type[i], ind_levels,
+! &force_update[i]);
+ substed_operand[i] = recog_operand[i] = *recog_operand_loc[i];
+ }
+*************** find_reloads (insn, replace, ind_levels,
+*** 2478,2482 ****
+ XEXP (recog_operand[i], 0),
+ &XEXP (recog_operand[i], 0),
+! i, address_type[i], ind_levels))
+ address_reloaded[i] = 1;
+ substed_operand[i] = recog_operand[i] = *recog_operand_loc[i];
+--- 2520,2525 ----
+ XEXP (recog_operand[i], 0),
+ &XEXP (recog_operand[i], 0),
+! i, address_type[i], ind_levels,
+! &force_update[i]))
+ address_reloaded[i] = 1;
+ substed_operand[i] = recog_operand[i] = *recog_operand_loc[i];
+*************** find_reloads (insn, replace, ind_levels,
+*** 2487,2491 ****
+ ind_levels,
+ set != 0
+! && &SET_DEST (set) == recog_operand_loc[i]);
+ else if (code == PLUS)
+ /* We can get a PLUS as an "operand" as a result of
+--- 2530,2535 ----
+ ind_levels,
+ set != 0
+! && &SET_DEST (set) == recog_operand_loc[i],
+! &force_update[i]);
+ else if (code == PLUS)
+ /* We can get a PLUS as an "operand" as a result of
+*************** find_reloads (insn, replace, ind_levels,
+*** 2493,2497 ****
+ substed_operand[i] = recog_operand[i] = *recog_operand_loc[i]
+ = find_reloads_toplev (recog_operand[i], i, address_type[i],
+! ind_levels, 0);
+ else if (code == REG)
+ {
+--- 2537,2541 ----
+ substed_operand[i] = recog_operand[i] = *recog_operand_loc[i]
+ = find_reloads_toplev (recog_operand[i], i, address_type[i],
+! ind_levels, 0, &force_update[i]);
+ else if (code == REG)
+ {
+*************** find_reloads (insn, replace, ind_levels,
+*** 2505,2510 ****
+ if (reg_equiv_constant[regno] != 0
+ && (set == 0 || &SET_DEST (set) != recog_operand_loc[i]))
+! substed_operand[i] = recog_operand[i]
+! = reg_equiv_constant[regno];
+ #if 0 /* This might screw code in reload1.c to delete prior output-reload
+ that feeds this insn. */
+--- 2549,2557 ----
+ if (reg_equiv_constant[regno] != 0
+ && (set == 0 || &SET_DEST (set) != recog_operand_loc[i]))
+! {
+! substed_operand[i] = recog_operand[i]
+! = reg_equiv_constant[regno];
+! force_update[i] = 1;
+! }
+ #if 0 /* This might screw code in reload1.c to delete prior output-reload
+ that feeds this insn. */
+*************** find_reloads (insn, replace, ind_levels,
+*** 2545,2549 ****
+ XEXP (recog_operand[i], 0),
+ &XEXP (recog_operand[i], 0),
+! i, address_type[i], ind_levels);
+ substed_operand[i] = recog_operand[i] = *recog_operand_loc[i];
+ }
+--- 2592,2597 ----
+ XEXP (recog_operand[i], 0),
+ &XEXP (recog_operand[i], 0),
+! i, address_type[i], ind_levels,
+! &force_update[i]);
+ substed_operand[i] = recog_operand[i] = *recog_operand_loc[i];
+ }
+*************** find_reloads (insn, replace, ind_levels,
+*** 3415,3419 ****
+ = find_reloads_toplev (force_const_mem (operand_mode[i],
+ recog_operand[i]),
+! i, address_type[i], ind_levels, 0);
+ if (alternative_allows_memconst (constraints1[i],
+ goal_alternative_number))
+--- 3463,3467 ----
+ = find_reloads_toplev (force_const_mem (operand_mode[i],
+ recog_operand[i]),
+! i, address_type[i], ind_levels, 0, NULL);
+ if (alternative_allows_memconst (constraints1[i],
+ goal_alternative_number))
+*************** find_reloads (insn, replace, ind_levels,
+*** 3595,3609 ****
+ Don't do this if we aren't making replacements because we might be
+ propagating things allocated by frame pointer elimination into places
+! it doesn't expect. */
+
+! if (insn_code_number >= 0 && replace)
+! for (i = insn_n_dups[insn_code_number] - 1; i >= 0; i--)
+! {
+! int opno = recog_dup_num[i];
+! *recog_dup_loc[i] = *recog_operand_loc[opno];
+! if (operand_reloadnum[opno] >= 0)
+! push_replacement (recog_dup_loc[i], operand_reloadnum[opno],
+! insn_operand_mode[insn_code_number][opno]);
+! }
+
+ #if 0
+--- 3643,3664 ----
+ Don't do this if we aren't making replacements because we might be
+ propagating things allocated by frame pointer elimination into places
+! it doesn't expect. However, always do it for replaces of pseudos
+! by constants. */
+
+! for (i = insn_n_dups[insn_code_number] - 1; i >= 0; i--)
+! {
+! int opno = recog_dup_num[i];
+!
+! if (! (insn_code_number >= 0 && replace))
+! {
+! if (! force_update[opno])
+! continue;
+! }
+!
+! *recog_dup_loc[i] = *recog_operand_loc[opno];
+! if (operand_reloadnum[opno] >= 0)
+! push_replacement (recog_dup_loc[i], operand_reloadnum[opno],
+! insn_operand_mode[insn_code_number][opno]);
+! }
+
+ #if 0
+*************** find_reloads (insn, replace, ind_levels,
+*** 3829,3832 ****
+--- 3884,3888 ----
+ register RTX_CODE code = GET_CODE (recog_operand[i]);
+ int is_set_dest = GET_CODE (body) == SET && (i == 0);
++ short ign;
+
+ if (insn_code_number >= 0)
+*************** find_reloads (insn, replace, ind_levels,
+*** 3834,3838 ****
+ find_reloads_address (VOIDmode, NULL_PTR,
+ recog_operand[i], recog_operand_loc[i],
+! i, RELOAD_FOR_INPUT, ind_levels);
+
+ /* In these cases, we can't tell if the operand is an input
+--- 3890,3894 ----
+ find_reloads_address (VOIDmode, NULL_PTR,
+ recog_operand[i], recog_operand_loc[i],
+! i, RELOAD_FOR_INPUT, ind_levels, &ign);
+
+ /* In these cases, we can't tell if the operand is an input
+*************** find_reloads (insn, replace, ind_levels,
+*** 3845,3853 ****
+ XEXP (recog_operand[i], 0),
+ &XEXP (recog_operand[i], 0),
+! i, RELOAD_OTHER, ind_levels);
+ if (code == SUBREG)
+ recog_operand[i] = *recog_operand_loc[i]
+ = find_reloads_toplev (recog_operand[i], i, RELOAD_OTHER,
+! ind_levels, is_set_dest);
+ if (code == REG)
+ {
+--- 3901,3909 ----
+ XEXP (recog_operand[i], 0),
+ &XEXP (recog_operand[i], 0),
+! i, RELOAD_OTHER, ind_levels, &ign);
+ if (code == SUBREG)
+ recog_operand[i] = *recog_operand_loc[i]
+ = find_reloads_toplev (recog_operand[i], i, RELOAD_OTHER,
+! ind_levels, is_set_dest, &ign);
+ if (code == REG)
+ {
+*************** alternative_allows_memconst (constraint,
+*** 3908,3915 ****
+
+ IS_SET_DEST is true if X is the destination of a SET, which is not
+! appropriate to be replaced by a constant. */
+
+ static rtx
+! find_reloads_toplev (x, opnum, type, ind_levels, is_set_dest)
+ rtx x;
+ int opnum;
+--- 3964,3974 ----
+
+ IS_SET_DEST is true if X is the destination of a SET, which is not
+! appropriate to be replaced by a constant.
+!
+! FORCE_UPDATE, if non-NULL, is the address of a SHORT that is set to
+! 1 if X is replaced with something based on reg_equiv_constant. */
+
+ static rtx
+! find_reloads_toplev (x, opnum, type, ind_levels, is_set_dest, force_update)
+ rtx x;
+ int opnum;
+*************** find_reloads_toplev (x, opnum, type, ind
+*** 3917,3920 ****
+--- 3976,3980 ----
+ int ind_levels;
+ int is_set_dest;
++ short *force_update;
+ {
+ register RTX_CODE code = GET_CODE (x);
+*************** find_reloads_toplev (x, opnum, type, ind
+*** 3928,3932 ****
+ register int regno = REGNO (x);
+ if (reg_equiv_constant[regno] != 0 && !is_set_dest)
+! x = reg_equiv_constant[regno];
+ #if 0
+ /* This creates (subreg (mem...)) which would cause an unnecessary
+--- 3988,3998 ----
+ register int regno = REGNO (x);
+ if (reg_equiv_constant[regno] != 0 && !is_set_dest)
+! {
+! x = reg_equiv_constant[regno];
+! if (force_update)
+! *force_update = 1;
+! else
+! abort (); /* Learn why this happens. */
+! }
+ #if 0
+ /* This creates (subreg (mem...)) which would cause an unnecessary
+*************** find_reloads_toplev (x, opnum, type, ind
+*** 3951,3955 ****
+ find_reloads_address (GET_MODE (x), NULL_PTR,
+ XEXP (x, 0),
+! &XEXP (x, 0), opnum, type, ind_levels);
+ }
+ return x;
+--- 4017,4022 ----
+ find_reloads_address (GET_MODE (x), NULL_PTR,
+ XEXP (x, 0),
+! &XEXP (x, 0), opnum, type, ind_levels,
+! force_update);
+ }
+ return x;
+*************** find_reloads_toplev (x, opnum, type, ind
+*** 3959,3963 ****
+ rtx tem = x;
+ find_reloads_address (GET_MODE (x), &tem, XEXP (x, 0), &XEXP (x, 0),
+! opnum, type, ind_levels);
+ return tem;
+ }
+--- 4026,4030 ----
+ rtx tem = x;
+ find_reloads_address (GET_MODE (x), &tem, XEXP (x, 0), &XEXP (x, 0),
+! opnum, type, ind_levels, force_update);
+ return tem;
+ }
+*************** find_reloads_toplev (x, opnum, type, ind
+*** 3982,3986 ****
+ && (tem = gen_lowpart_common (GET_MODE (x),
+ reg_equiv_constant[regno])) != 0)
+! return tem;
+
+ if (GET_MODE_BITSIZE (GET_MODE (x)) == BITS_PER_WORD
+--- 4049,4059 ----
+ && (tem = gen_lowpart_common (GET_MODE (x),
+ reg_equiv_constant[regno])) != 0)
+! {
+! if (force_update)
+! *force_update = 1;
+! else
+! abort (); /* Learn why this happens. */
+! return tem;
+! }
+
+ if (GET_MODE_BITSIZE (GET_MODE (x)) == BITS_PER_WORD
+*************** find_reloads_toplev (x, opnum, type, ind
+*** 3990,3994 ****
+ SUBREG_WORD (x), 0,
+ GET_MODE (SUBREG_REG (x)))) != 0)
+! return tem;
+
+ if (regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] < 0
+--- 4063,4073 ----
+ SUBREG_WORD (x), 0,
+ GET_MODE (SUBREG_REG (x)))) != 0)
+! {
+! if (force_update)
+! *force_update = 1;
+! else
+! abort (); /* Learn why this happens. */
+! return tem;
+! }
+
+ if (regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] < 0
+*************** find_reloads_toplev (x, opnum, type, ind
+*** 4040,4044 ****
+ find_reloads_address (GET_MODE (x), NULL_PTR,
+ XEXP (x, 0),
+! &XEXP (x, 0), opnum, type, ind_levels);
+ }
+
+--- 4119,4124 ----
+ find_reloads_address (GET_MODE (x), NULL_PTR,
+ XEXP (x, 0),
+! &XEXP (x, 0), opnum, type, ind_levels,
+! force_update);
+ }
+
+*************** find_reloads_toplev (x, opnum, type, ind
+*** 4049,4053 ****
+ if (fmt[i] == 'e')
+ XEXP (x, i) = find_reloads_toplev (XEXP (x, i), opnum, type,
+! ind_levels, is_set_dest);
+ }
+ return x;
+--- 4129,4133 ----
+ if (fmt[i] == 'e')
+ XEXP (x, i) = find_reloads_toplev (XEXP (x, i), opnum, type,
+! ind_levels, is_set_dest, NULL);
+ }
+ return x;
+*************** make_memloc (ad, regno)
+*** 4110,4114 ****
+
+ static int
+! find_reloads_address (mode, memrefloc, ad, loc, opnum, type, ind_levels)
+ enum machine_mode mode;
+ rtx *memrefloc;
+--- 4190,4195 ----
+
+ static int
+! find_reloads_address (mode, memrefloc, ad, loc, opnum, type, ind_levels,
+! force_update)
+ enum machine_mode mode;
+ rtx *memrefloc;
+*************** find_reloads_address (mode, memrefloc, a
+*** 4118,4121 ****
+--- 4199,4203 ----
+ enum reload_type type;
+ int ind_levels;
++ short *force_update;
+ {
+ register int regno;
+*************** find_reloads_address (mode, memrefloc, a
+*** 4134,4137 ****
+--- 4216,4223 ----
+ {
+ *loc = ad = reg_equiv_constant[regno];
++ if (force_update)
++ *force_update = 1;
++ else
++ abort (); /* Learn why this happens. */
+ return 1;
+ }
+*************** find_reloads_address (mode, memrefloc, a
+*** 4141,4145 ****
+ tem = make_memloc (ad, regno);
+ find_reloads_address (GET_MODE (tem), NULL_PTR, XEXP (tem, 0),
+! &XEXP (tem, 0), opnum, type, ind_levels);
+ push_reload (tem, NULL_RTX, loc, NULL_PTR, BASE_REG_CLASS,
+ GET_MODE (ad), VOIDmode, 0, 0,
+--- 4227,4231 ----
+ tem = make_memloc (ad, regno);
+ find_reloads_address (GET_MODE (tem), NULL_PTR, XEXP (tem, 0),
+! &XEXP (tem, 0), opnum, type, ind_levels, NULL);
+ push_reload (tem, NULL_RTX, loc, NULL_PTR, BASE_REG_CLASS,
+ GET_MODE (ad), VOIDmode, 0, 0,
+*************** find_reloads_address (mode, memrefloc, a
+*** 4214,4218 ****
+ tem = ad;
+ find_reloads_address (GET_MODE (ad), &tem, XEXP (ad, 0), &XEXP (ad, 0),
+! opnum, type, ind_levels == 0 ? 0 : ind_levels - 1);
+
+ /* If tem was changed, then we must create a new memory reference to
+--- 4300,4305 ----
+ tem = ad;
+ find_reloads_address (GET_MODE (ad), &tem, XEXP (ad, 0), &XEXP (ad, 0),
+! opnum, type, ind_levels == 0 ? 0 : ind_levels - 1,
+! NULL);
+
+ /* If tem was changed, then we must create a new memory reference to
+*************** find_reloads_address_1 (x, context, loc,
+*** 4722,4726 ****
+ /* First reload the memory location's address. */
+ find_reloads_address (GET_MODE (tem), 0, XEXP (tem, 0),
+! &XEXP (tem, 0), opnum, type, ind_levels);
+ /* Put this inside a new increment-expression. */
+ x = gen_rtx (GET_CODE (x), GET_MODE (x), tem);
+--- 4809,4814 ----
+ /* First reload the memory location's address. */
+ find_reloads_address (GET_MODE (tem), 0, XEXP (tem, 0),
+! &XEXP (tem, 0), opnum, type, ind_levels,
+! NULL);
+ /* Put this inside a new increment-expression. */
+ x = gen_rtx (GET_CODE (x), GET_MODE (x), tem);
+*************** find_reloads_address_1 (x, context, loc,
+*** 4788,4792 ****
+ find_reloads_address (GET_MODE (x), &XEXP (x, 0),
+ XEXP (XEXP (x, 0), 0), &XEXP (XEXP (x, 0), 0),
+! opnum, type, ind_levels);
+
+ reloadnum = push_reload (x, NULL_RTX, loc, NULL_PTR,
+--- 4876,4880 ----
+ find_reloads_address (GET_MODE (x), &XEXP (x, 0),
+ XEXP (XEXP (x, 0), 0), &XEXP (XEXP (x, 0), 0),
+! opnum, type, ind_levels, NULL);
+
+ reloadnum = push_reload (x, NULL_RTX, loc, NULL_PTR,
+*************** find_reloads_address_1 (x, context, loc,
+*** 4818,4822 ****
+
+ find_reloads_address (GET_MODE (x), loc, XEXP (x, 0), &XEXP (x, 0),
+! opnum, type, ind_levels);
+ push_reload (*loc, NULL_RTX, loc, NULL_PTR,
+ context ? INDEX_REG_CLASS : BASE_REG_CLASS,
+--- 4906,4910 ----
+
+ find_reloads_address (GET_MODE (x), loc, XEXP (x, 0), &XEXP (x, 0),
+! opnum, type, ind_levels, NULL);
+ push_reload (*loc, NULL_RTX, loc, NULL_PTR,
+ context ? INDEX_REG_CLASS : BASE_REG_CLASS,
+*************** find_reloads_address_1 (x, context, loc,
+*** 4852,4856 ****
+ x = make_memloc (x, regno);
+ find_reloads_address (GET_MODE (x), 0, XEXP (x, 0), &XEXP (x, 0),
+! opnum, type, ind_levels);
+ }
+
+--- 4940,4944 ----
+ x = make_memloc (x, regno);
+ find_reloads_address (GET_MODE (x), 0, XEXP (x, 0), &XEXP (x, 0),
+! opnum, type, ind_levels, NULL);
+ }
+
+*************** find_reloads_address_part (x, loc, class
+*** 4965,4969 ****
+ rtx tem = x = force_const_mem (mode, x);
+ find_reloads_address (mode, &tem, XEXP (tem, 0), &XEXP (tem, 0),
+! opnum, type, ind_levels);
+ }
+
+--- 5053,5057 ----
+ rtx tem = x = force_const_mem (mode, x);
+ find_reloads_address (mode, &tem, XEXP (tem, 0), &XEXP (tem, 0),
+! opnum, type, ind_levels, NULL);
+ }
+
+*************** find_reloads_address_part (x, loc, class
+*** 4977,4981 ****
+ x = gen_rtx (PLUS, GET_MODE (x), XEXP (x, 0), tem);
+ find_reloads_address (mode, &tem, XEXP (tem, 0), &XEXP (tem, 0),
+! opnum, type, ind_levels);
+ }
+
+--- 5065,5069 ----
+ x = gen_rtx (PLUS, GET_MODE (x), XEXP (x, 0), tem);
+ find_reloads_address (mode, &tem, XEXP (tem, 0), &XEXP (tem, 0),
+! opnum, type, ind_levels, NULL);
+ }
+
+*************** find_equiv_reg (goal, insn, class, other
+*** 5518,5522 ****
+ and is also a register that appears in the address of GOAL. */
+
+! if (goal_mem && value == SET_DEST (PATTERN (where))
+ && refers_to_regno_for_reload_p (valueno,
+ (valueno
+--- 5606,5610 ----
+ and is also a register that appears in the address of GOAL. */
+
+! if (goal_mem && value == SET_DEST (single_set (where))
+ && refers_to_regno_for_reload_p (valueno,
+ (valueno
+*************** debug_reload()
+*** 5900,5904 ****
+
+ if (reload_nocombine[r])
+! fprintf (stderr, ", can combine", reload_nocombine[r]);
+
+ if (reload_secondary_p[r])
+--- 5988,5992 ----
+
+ if (reload_nocombine[r])
+! fprintf (stderr, ", can't combine %d", reload_nocombine[r]);
+
+ if (reload_secondary_p[r])
+diff -rcp2N gcc-2.7.2.2/reload1.c g77-new/reload1.c
+*** gcc-2.7.2.2/reload1.c Sun Nov 5 11:22:22 1995
+--- g77-new/reload1.c Sun Aug 10 18:47:00 1997
+*************** reload (first, global, dumpfile)
+*** 542,546 ****
+ Also find all paradoxical subregs and find largest such for each pseudo.
+ On machines with small register classes, record hard registers that
+! are used for user variables. These can never be used for spills. */
+
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+--- 542,548 ----
+ Also find all paradoxical subregs and find largest such for each pseudo.
+ On machines with small register classes, record hard registers that
+! are used for user variables. These can never be used for spills.
+! Also look for a "constant" NOTE_INSN_SETJMP. This means that all
+! caller-saved registers must be marked live. */
+
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+*************** reload (first, global, dumpfile)
+*** 548,551 ****
+--- 550,559 ----
+ rtx set = single_set (insn);
+
++ if (GET_CODE (insn) == NOTE && CONST_CALL_P (insn)
++ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP)
++ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
++ if (! call_used_regs[i])
++ regs_ever_live[i] = 1;
++
+ if (set != 0 && GET_CODE (SET_DEST (set)) == REG)
+ {
+*************** reload (first, global, dumpfile)
+*** 564,568 ****
+ if (GET_CODE (x) == MEM)
+ reg_equiv_memory_loc[i] = x;
+! else if (CONSTANT_P (x))
+ {
+ if (LEGITIMATE_CONSTANT_P (x))
+--- 572,578 ----
+ if (GET_CODE (x) == MEM)
+ reg_equiv_memory_loc[i] = x;
+! else if (CONSTANT_P (x)
+! && ! (GET_CODE (x) == CONST
+! && GET_CODE (XEXP (x, 0)) == MINUS))
+ {
+ if (LEGITIMATE_CONSTANT_P (x))
+*************** eliminate_regs (x, mem_mode, insn)
+*** 2886,2890 ****
+
+ /* Fall through to generic unary operation case. */
+- case USE:
+ case STRICT_LOW_PART:
+ case NEG: case NOT:
+--- 2896,2899 ----
+*************** eliminate_regs (x, mem_mode, insn)
+*** 2975,2978 ****
+--- 2984,3000 ----
+ return x;
+
++ case USE:
++ /* If using a register that is the source of an eliminate we still
++ think can be performed, note it cannot be performed since we don't
++ know how this register is used. */
++ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
++ if (ep->from_rtx == XEXP (x, 0))
++ ep->can_eliminate = 0;
++
++ new = eliminate_regs (XEXP (x, 0), mem_mode, insn);
++ if (new != XEXP (x, 0))
++ return gen_rtx (code, GET_MODE (x), new);
++ return x;
++
+ case CLOBBER:
+ /* If clobbering a register that is the replacement register for an
+*************** gen_reload (out, in, opnum, type)
+*** 6736,6741 ****
+--- 6758,6765 ----
+ if (GET_CODE (in) == PLUS
+ && (GET_CODE (XEXP (in, 0)) == REG
++ || GET_CODE (XEXP (in, 0)) == SUBREG
+ || GET_CODE (XEXP (in, 0)) == MEM)
+ && (GET_CODE (XEXP (in, 1)) == REG
++ || GET_CODE (XEXP (in, 1)) == SUBREG
+ || CONSTANT_P (XEXP (in, 1))
+ || GET_CODE (XEXP (in, 1)) == MEM))
+*************** gen_reload (out, in, opnum, type)
+*** 6798,6807 ****
+ we emit below. */
+
+! if (CONSTANT_P (op1) || GET_CODE (op1) == MEM
+ || (GET_CODE (op1) == REG
+ && REGNO (op1) >= FIRST_PSEUDO_REGISTER))
+ tem = op0, op0 = op1, op1 = tem;
+
+! emit_insn (gen_move_insn (out, op0));
+
+ /* If OP0 and OP1 are the same, we can use OUT for OP1.
+--- 6822,6831 ----
+ we emit below. */
+
+! if (CONSTANT_P (op1) || GET_CODE (op1) == MEM || GET_CODE (op1) == SUBREG
+ || (GET_CODE (op1) == REG
+ && REGNO (op1) >= FIRST_PSEUDO_REGISTER))
+ tem = op0, op0 = op1, op1 = tem;
+
+! gen_reload (out, op0, opnum, type);
+
+ /* If OP0 and OP1 are the same, we can use OUT for OP1.
+*************** gen_reload (out, in, opnum, type)
+*** 6831,6835 ****
+ delete_insns_since (last);
+
+! emit_insn (gen_move_insn (out, op1));
+ emit_insn (gen_add2_insn (out, op0));
+ }
+--- 6855,6859 ----
+ delete_insns_since (last);
+
+! gen_reload (out, op1, opnum, type);
+ emit_insn (gen_add2_insn (out, op0));
+ }
+*************** gen_reload (out, in, opnum, type)
+*** 6852,6857 ****
+ in = gen_rtx (REG, GET_MODE (loc), REGNO (in));
+
+! emit_insn (gen_move_insn (loc, in));
+! emit_insn (gen_move_insn (out, loc));
+ }
+ #endif
+--- 6876,6881 ----
+ in = gen_rtx (REG, GET_MODE (loc), REGNO (in));
+
+! gen_reload (loc, in, opnum, type);
+! gen_reload (out, loc, opnum, type);
+ }
+ #endif
+diff -rcp2N gcc-2.7.2.2/rtl.c g77-new/rtl.c
+*** gcc-2.7.2.2/rtl.c Thu Jun 15 08:02:59 1995
+--- g77-new/rtl.c Thu Jul 10 20:09:06 1997
+*************** char *reg_note_name[] = { "", "REG_DEAD"
+*** 179,183 ****
+ "REG_NONNEG", "REG_NO_CONFLICT", "REG_UNUSED",
+ "REG_CC_SETTER", "REG_CC_USER", "REG_LABEL",
+! "REG_DEP_ANTI", "REG_DEP_OUTPUT" };
+
+ /* Allocate an rtx vector of N elements.
+--- 179,183 ----
+ "REG_NONNEG", "REG_NO_CONFLICT", "REG_UNUSED",
+ "REG_CC_SETTER", "REG_CC_USER", "REG_LABEL",
+! "REG_DEP_ANTI", "REG_DEP_OUTPUT", "REG_NOALIAS" };
+
+ /* Allocate an rtx vector of N elements.
+diff -rcp2N gcc-2.7.2.2/rtl.h g77-new/rtl.h
+*** gcc-2.7.2.2/rtl.h Thu Jun 15 08:03:16 1995
+--- g77-new/rtl.h Thu Jul 10 20:09:07 1997
+*************** enum reg_note { REG_DEAD = 1, REG_INC =
+*** 349,353 ****
+ REG_NONNEG = 8, REG_NO_CONFLICT = 9, REG_UNUSED = 10,
+ REG_CC_SETTER = 11, REG_CC_USER = 12, REG_LABEL = 13,
+! REG_DEP_ANTI = 14, REG_DEP_OUTPUT = 15 };
+
+ /* Define macros to extract and insert the reg-note kind in an EXPR_LIST. */
+--- 349,353 ----
+ REG_NONNEG = 8, REG_NO_CONFLICT = 9, REG_UNUSED = 10,
+ REG_CC_SETTER = 11, REG_CC_USER = 12, REG_LABEL = 13,
+! REG_DEP_ANTI = 14, REG_DEP_OUTPUT = 15, REG_NOALIAS = 16 };
+
+ /* Define macros to extract and insert the reg-note kind in an EXPR_LIST. */
+*************** extern char *reg_note_name[];
+*** 432,436 ****
+ #define NOTE_INSN_FUNCTION_BEG -13
+
+-
+ #if 0 /* These are not used, and I don't know what they were for. --rms. */
+ #define NOTE_DECL_NAME(INSN) ((INSN)->fld[3].rtstr)
+--- 432,435 ----
+*************** extern char *note_insn_name[];
+*** 576,579 ****
+--- 575,579 ----
+ /* For a TRAP_IF rtx, TRAP_CONDITION is an expression. */
+ #define TRAP_CONDITION(RTX) ((RTX)->fld[0].rtx)
++ #define TRAP_CODE(RTX) ((RTX)->fld[1].rtint)
+
+ /* 1 in a SYMBOL_REF if it addresses this function's constants pool. */
+*************** extern rtx eliminate_constant_term PROTO
+*** 817,820 ****
+--- 817,830 ----
+ extern rtx expand_complex_abs PROTO((enum machine_mode, rtx, rtx, int));
+ extern enum machine_mode choose_hard_reg_mode PROTO((int, int));
++ extern int rtx_varies_p PROTO((rtx));
++ extern int may_trap_p PROTO((rtx));
++ extern int side_effects_p PROTO((rtx));
++ extern int volatile_refs_p PROTO((rtx));
++ extern int volatile_insn_p PROTO((rtx));
++ extern void remove_note PROTO((rtx, rtx));
++ extern void note_stores PROTO((rtx, void (*)()));
++ extern int refers_to_regno_p PROTO((int, int, rtx, rtx *));
++ extern int reg_overlap_mentioned_p PROTO((rtx, rtx));
++
+
+ /* Maximum number of parallel sets and clobbers in any insn in this fn.
+*************** extern rtx *regno_reg_rtx;
+*** 967,968 ****
+--- 977,987 ----
+
+ extern int rtx_to_tree_code PROTO((enum rtx_code));
++
++ extern int true_dependence PROTO((rtx, enum machine_mode, rtx, int (*)()));
++ extern int read_dependence PROTO((rtx, rtx));
++ extern int anti_dependence PROTO((rtx, rtx));
++ extern int output_dependence PROTO((rtx, rtx));
++ extern void init_alias_analysis PROTO((void));
++ extern void end_alias_analysis PROTO((void));
++ extern void mark_user_reg PROTO((rtx));
++ extern void mark_reg_pointer PROTO((rtx));
+diff -rcp2N gcc-2.7.2.2/sched.c g77-new/sched.c
+*** gcc-2.7.2.2/sched.c Thu Jun 15 08:06:39 1995
+--- g77-new/sched.c Sun Aug 10 18:46:13 1997
+*************** Boston, MA 02111-1307, USA. */
+*** 126,129 ****
+--- 126,132 ----
+ #include "insn-attr.h"
+
++ extern char *reg_known_equiv_p;
++ extern rtx *reg_known_value;
++
+ #ifdef INSN_SCHEDULING
+ /* Arrays set up by scheduling for the same respective purposes as
+*************** static int *sched_reg_live_length;
+*** 143,146 ****
+--- 146,150 ----
+ by splitting insns. */
+ static rtx *reg_last_uses;
++ static int reg_last_uses_size;
+ static rtx *reg_last_sets;
+ static regset reg_pending_sets;
+*************** struct sometimes
+*** 294,302 ****
+
+ /* Forward declarations. */
+- static rtx canon_rtx PROTO((rtx));
+- static int rtx_equal_for_memref_p PROTO((rtx, rtx));
+- static rtx find_symbolic_term PROTO((rtx));
+- static int memrefs_conflict_p PROTO((int, rtx, int, rtx,
+- HOST_WIDE_INT));
+ static void add_dependence PROTO((rtx, rtx, enum reg_note));
+ static void remove_dependence PROTO((rtx, rtx));
+--- 298,301 ----
+*************** static int priority PROTO((rtx));
+*** 314,318 ****
+ static void free_pending_lists PROTO((void));
+ static void add_insn_mem_dependence PROTO((rtx *, rtx *, rtx, rtx));
+! static void flush_pending_lists PROTO((rtx));
+ static void sched_analyze_1 PROTO((rtx, rtx));
+ static void sched_analyze_2 PROTO((rtx, rtx));
+--- 313,317 ----
+ static void free_pending_lists PROTO((void));
+ static void add_insn_mem_dependence PROTO((rtx *, rtx *, rtx, rtx));
+! static void flush_pending_lists PROTO((rtx, int));
+ static void sched_analyze_1 PROTO((rtx, rtx));
+ static void sched_analyze_2 PROTO((rtx, rtx));
+*************** void schedule_insns PROTO((FILE *));
+*** 346,885 ****
+ #endif /* INSN_SCHEDULING */
+
+- #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
+-
+- /* Vector indexed by N giving the initial (unchanging) value known
+- for pseudo-register N. */
+- static rtx *reg_known_value;
+-
+- /* Vector recording for each reg_known_value whether it is due to a
+- REG_EQUIV note. Future passes (viz., reload) may replace the
+- pseudo with the equivalent expression and so we account for the
+- dependences that would be introduced if that happens. */
+- /* ??? This is a problem only on the Convex. The REG_EQUIV notes created in
+- assign_parms mention the arg pointer, and there are explicit insns in the
+- RTL that modify the arg pointer. Thus we must ensure that such insns don't
+- get scheduled across each other because that would invalidate the REG_EQUIV
+- notes. One could argue that the REG_EQUIV notes are wrong, but solving
+- the problem in the scheduler will likely give better code, so we do it
+- here. */
+- static char *reg_known_equiv_p;
+-
+- /* Indicates number of valid entries in reg_known_value. */
+- static int reg_known_value_size;
+-
+- static rtx
+- canon_rtx (x)
+- rtx x;
+- {
+- if (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER
+- && REGNO (x) <= reg_known_value_size)
+- return reg_known_value[REGNO (x)];
+- else if (GET_CODE (x) == PLUS)
+- {
+- rtx x0 = canon_rtx (XEXP (x, 0));
+- rtx x1 = canon_rtx (XEXP (x, 1));
+-
+- if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
+- {
+- /* We can tolerate LO_SUMs being offset here; these
+- rtl are used for nothing other than comparisons. */
+- if (GET_CODE (x0) == CONST_INT)
+- return plus_constant_for_output (x1, INTVAL (x0));
+- else if (GET_CODE (x1) == CONST_INT)
+- return plus_constant_for_output (x0, INTVAL (x1));
+- return gen_rtx (PLUS, GET_MODE (x), x0, x1);
+- }
+- }
+- return x;
+- }
+-
+- /* Set up all info needed to perform alias analysis on memory references. */
+-
+- void
+- init_alias_analysis ()
+- {
+- int maxreg = max_reg_num ();
+- rtx insn;
+- rtx note;
+- rtx set;
+-
+- reg_known_value_size = maxreg;
+-
+- reg_known_value
+- = (rtx *) oballoc ((maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx))
+- - FIRST_PSEUDO_REGISTER;
+- bzero ((char *) (reg_known_value + FIRST_PSEUDO_REGISTER),
+- (maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx));
+-
+- reg_known_equiv_p
+- = (char *) oballoc ((maxreg -FIRST_PSEUDO_REGISTER) * sizeof (char))
+- - FIRST_PSEUDO_REGISTER;
+- bzero (reg_known_equiv_p + FIRST_PSEUDO_REGISTER,
+- (maxreg - FIRST_PSEUDO_REGISTER) * sizeof (char));
+-
+- /* Fill in the entries with known constant values. */
+- for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+- if ((set = single_set (insn)) != 0
+- && GET_CODE (SET_DEST (set)) == REG
+- && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
+- && (((note = find_reg_note (insn, REG_EQUAL, 0)) != 0
+- && reg_n_sets[REGNO (SET_DEST (set))] == 1)
+- || (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != 0)
+- && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
+- {
+- int regno = REGNO (SET_DEST (set));
+- reg_known_value[regno] = XEXP (note, 0);
+- reg_known_equiv_p[regno] = REG_NOTE_KIND (note) == REG_EQUIV;
+- }
+-
+- /* Fill in the remaining entries. */
+- while (--maxreg >= FIRST_PSEUDO_REGISTER)
+- if (reg_known_value[maxreg] == 0)
+- reg_known_value[maxreg] = regno_reg_rtx[maxreg];
+- }
+-
+- /* Return 1 if X and Y are identical-looking rtx's.
+-
+- We use the data in reg_known_value above to see if two registers with
+- different numbers are, in fact, equivalent. */
+-
+- static int
+- rtx_equal_for_memref_p (x, y)
+- rtx x, y;
+- {
+- register int i;
+- register int j;
+- register enum rtx_code code;
+- register char *fmt;
+-
+- if (x == 0 && y == 0)
+- return 1;
+- if (x == 0 || y == 0)
+- return 0;
+- x = canon_rtx (x);
+- y = canon_rtx (y);
+-
+- if (x == y)
+- return 1;
+-
+- code = GET_CODE (x);
+- /* Rtx's of different codes cannot be equal. */
+- if (code != GET_CODE (y))
+- return 0;
+-
+- /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
+- (REG:SI x) and (REG:HI x) are NOT equivalent. */
+-
+- if (GET_MODE (x) != GET_MODE (y))
+- return 0;
+-
+- /* REG, LABEL_REF, and SYMBOL_REF can be compared nonrecursively. */
+-
+- if (code == REG)
+- return REGNO (x) == REGNO (y);
+- if (code == LABEL_REF)
+- return XEXP (x, 0) == XEXP (y, 0);
+- if (code == SYMBOL_REF)
+- return XSTR (x, 0) == XSTR (y, 0);
+-
+- /* For commutative operations, the RTX match if the operand match in any
+- order. Also handle the simple binary and unary cases without a loop. */
+- if (code == EQ || code == NE || GET_RTX_CLASS (code) == 'c')
+- return ((rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0))
+- && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1)))
+- || (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 1))
+- && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 0))));
+- else if (GET_RTX_CLASS (code) == '<' || GET_RTX_CLASS (code) == '2')
+- return (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0))
+- && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1)));
+- else if (GET_RTX_CLASS (code) == '1')
+- return rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0));
+-
+- /* Compare the elements. If any pair of corresponding elements
+- fail to match, return 0 for the whole things. */
+-
+- fmt = GET_RTX_FORMAT (code);
+- for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+- {
+- switch (fmt[i])
+- {
+- case 'w':
+- if (XWINT (x, i) != XWINT (y, i))
+- return 0;
+- break;
+-
+- case 'n':
+- case 'i':
+- if (XINT (x, i) != XINT (y, i))
+- return 0;
+- break;
+-
+- case 'V':
+- case 'E':
+- /* Two vectors must have the same length. */
+- if (XVECLEN (x, i) != XVECLEN (y, i))
+- return 0;
+-
+- /* And the corresponding elements must match. */
+- for (j = 0; j < XVECLEN (x, i); j++)
+- if (rtx_equal_for_memref_p (XVECEXP (x, i, j), XVECEXP (y, i, j)) == 0)
+- return 0;
+- break;
+-
+- case 'e':
+- if (rtx_equal_for_memref_p (XEXP (x, i), XEXP (y, i)) == 0)
+- return 0;
+- break;
+-
+- case 'S':
+- case 's':
+- if (strcmp (XSTR (x, i), XSTR (y, i)))
+- return 0;
+- break;
+-
+- case 'u':
+- /* These are just backpointers, so they don't matter. */
+- break;
+-
+- case '0':
+- break;
+-
+- /* It is believed that rtx's at this level will never
+- contain anything but integers and other rtx's,
+- except for within LABEL_REFs and SYMBOL_REFs. */
+- default:
+- abort ();
+- }
+- }
+- return 1;
+- }
+-
+- /* Given an rtx X, find a SYMBOL_REF or LABEL_REF within
+- X and return it, or return 0 if none found. */
+-
+- static rtx
+- find_symbolic_term (x)
+- rtx x;
+- {
+- register int i;
+- register enum rtx_code code;
+- register char *fmt;
+-
+- code = GET_CODE (x);
+- if (code == SYMBOL_REF || code == LABEL_REF)
+- return x;
+- if (GET_RTX_CLASS (code) == 'o')
+- return 0;
+-
+- fmt = GET_RTX_FORMAT (code);
+- for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+- {
+- rtx t;
+-
+- if (fmt[i] == 'e')
+- {
+- t = find_symbolic_term (XEXP (x, i));
+- if (t != 0)
+- return t;
+- }
+- else if (fmt[i] == 'E')
+- break;
+- }
+- return 0;
+- }
+-
+- /* Return nonzero if X and Y (memory addresses) could reference the
+- same location in memory. C is an offset accumulator. When
+- C is nonzero, we are testing aliases between X and Y + C.
+- XSIZE is the size in bytes of the X reference,
+- similarly YSIZE is the size in bytes for Y.
+-
+- If XSIZE or YSIZE is zero, we do not know the amount of memory being
+- referenced (the reference was BLKmode), so make the most pessimistic
+- assumptions.
+-
+- We recognize the following cases of non-conflicting memory:
+-
+- (1) addresses involving the frame pointer cannot conflict
+- with addresses involving static variables.
+- (2) static variables with different addresses cannot conflict.
+-
+- Nice to notice that varying addresses cannot conflict with fp if no
+- local variables had their addresses taken, but that's too hard now. */
+-
+- /* ??? In Fortran, references to a array parameter can never conflict with
+- another array parameter. */
+-
+- static int
+- memrefs_conflict_p (xsize, x, ysize, y, c)
+- rtx x, y;
+- int xsize, ysize;
+- HOST_WIDE_INT c;
+- {
+- if (GET_CODE (x) == HIGH)
+- x = XEXP (x, 0);
+- else if (GET_CODE (x) == LO_SUM)
+- x = XEXP (x, 1);
+- else
+- x = canon_rtx (x);
+- if (GET_CODE (y) == HIGH)
+- y = XEXP (y, 0);
+- else if (GET_CODE (y) == LO_SUM)
+- y = XEXP (y, 1);
+- else
+- y = canon_rtx (y);
+-
+- if (rtx_equal_for_memref_p (x, y))
+- return (xsize == 0 || ysize == 0 ||
+- (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
+-
+- if (y == frame_pointer_rtx || y == hard_frame_pointer_rtx
+- || y == stack_pointer_rtx)
+- {
+- rtx t = y;
+- int tsize = ysize;
+- y = x; ysize = xsize;
+- x = t; xsize = tsize;
+- }
+-
+- if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
+- || x == stack_pointer_rtx)
+- {
+- rtx y1;
+-
+- if (CONSTANT_P (y))
+- return 0;
+-
+- if (GET_CODE (y) == PLUS
+- && canon_rtx (XEXP (y, 0)) == x
+- && (y1 = canon_rtx (XEXP (y, 1)))
+- && GET_CODE (y1) == CONST_INT)
+- {
+- c += INTVAL (y1);
+- return (xsize == 0 || ysize == 0
+- || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
+- }
+-
+- if (GET_CODE (y) == PLUS
+- && (y1 = canon_rtx (XEXP (y, 0)))
+- && CONSTANT_P (y1))
+- return 0;
+-
+- return 1;
+- }
+-
+- if (GET_CODE (x) == PLUS)
+- {
+- /* The fact that X is canonicalized means that this
+- PLUS rtx is canonicalized. */
+- rtx x0 = XEXP (x, 0);
+- rtx x1 = XEXP (x, 1);
+-
+- if (GET_CODE (y) == PLUS)
+- {
+- /* The fact that Y is canonicalized means that this
+- PLUS rtx is canonicalized. */
+- rtx y0 = XEXP (y, 0);
+- rtx y1 = XEXP (y, 1);
+-
+- if (rtx_equal_for_memref_p (x1, y1))
+- return memrefs_conflict_p (xsize, x0, ysize, y0, c);
+- if (rtx_equal_for_memref_p (x0, y0))
+- return memrefs_conflict_p (xsize, x1, ysize, y1, c);
+- if (GET_CODE (x1) == CONST_INT)
+- if (GET_CODE (y1) == CONST_INT)
+- return memrefs_conflict_p (xsize, x0, ysize, y0,
+- c - INTVAL (x1) + INTVAL (y1));
+- else
+- return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
+- else if (GET_CODE (y1) == CONST_INT)
+- return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
+-
+- /* Handle case where we cannot understand iteration operators,
+- but we notice that the base addresses are distinct objects. */
+- x = find_symbolic_term (x);
+- if (x == 0)
+- return 1;
+- y = find_symbolic_term (y);
+- if (y == 0)
+- return 1;
+- return rtx_equal_for_memref_p (x, y);
+- }
+- else if (GET_CODE (x1) == CONST_INT)
+- return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
+- }
+- else if (GET_CODE (y) == PLUS)
+- {
+- /* The fact that Y is canonicalized means that this
+- PLUS rtx is canonicalized. */
+- rtx y0 = XEXP (y, 0);
+- rtx y1 = XEXP (y, 1);
+-
+- if (GET_CODE (y1) == CONST_INT)
+- return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
+- else
+- return 1;
+- }
+-
+- if (GET_CODE (x) == GET_CODE (y))
+- switch (GET_CODE (x))
+- {
+- case MULT:
+- {
+- /* Handle cases where we expect the second operands to be the
+- same, and check only whether the first operand would conflict
+- or not. */
+- rtx x0, y0;
+- rtx x1 = canon_rtx (XEXP (x, 1));
+- rtx y1 = canon_rtx (XEXP (y, 1));
+- if (! rtx_equal_for_memref_p (x1, y1))
+- return 1;
+- x0 = canon_rtx (XEXP (x, 0));
+- y0 = canon_rtx (XEXP (y, 0));
+- if (rtx_equal_for_memref_p (x0, y0))
+- return (xsize == 0 || ysize == 0
+- || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
+-
+- /* Can't properly adjust our sizes. */
+- if (GET_CODE (x1) != CONST_INT)
+- return 1;
+- xsize /= INTVAL (x1);
+- ysize /= INTVAL (x1);
+- c /= INTVAL (x1);
+- return memrefs_conflict_p (xsize, x0, ysize, y0, c);
+- }
+- }
+-
+- if (CONSTANT_P (x))
+- {
+- if (GET_CODE (x) == CONST_INT && GET_CODE (y) == CONST_INT)
+- {
+- c += (INTVAL (y) - INTVAL (x));
+- return (xsize == 0 || ysize == 0
+- || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
+- }
+-
+- if (GET_CODE (x) == CONST)
+- {
+- if (GET_CODE (y) == CONST)
+- return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
+- ysize, canon_rtx (XEXP (y, 0)), c);
+- else
+- return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
+- ysize, y, c);
+- }
+- if (GET_CODE (y) == CONST)
+- return memrefs_conflict_p (xsize, x, ysize,
+- canon_rtx (XEXP (y, 0)), c);
+-
+- if (CONSTANT_P (y))
+- return (rtx_equal_for_memref_p (x, y)
+- && (xsize == 0 || ysize == 0
+- || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)));
+-
+- return 1;
+- }
+- return 1;
+- }
+-
+- /* Functions to compute memory dependencies.
+-
+- Since we process the insns in execution order, we can build tables
+- to keep track of what registers are fixed (and not aliased), what registers
+- are varying in known ways, and what registers are varying in unknown
+- ways.
+-
+- If both memory references are volatile, then there must always be a
+- dependence between the two references, since their order can not be
+- changed. A volatile and non-volatile reference can be interchanged
+- though.
+-
+- A MEM_IN_STRUCT reference at a non-QImode varying address can never
+- conflict with a non-MEM_IN_STRUCT reference at a fixed address. We must
+- allow QImode aliasing because the ANSI C standard allows character
+- pointers to alias anything. We are assuming that characters are
+- always QImode here. */
+-
+- /* Read dependence: X is read after read in MEM takes place. There can
+- only be a dependence here if both reads are volatile. */
+-
+- int
+- read_dependence (mem, x)
+- rtx mem;
+- rtx x;
+- {
+- return MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem);
+- }
+-
+- /* True dependence: X is read after store in MEM takes place. */
+-
+- int
+- true_dependence (mem, x)
+- rtx mem;
+- rtx x;
+- {
+- /* If X is an unchanging read, then it can't possibly conflict with any
+- non-unchanging store. It may conflict with an unchanging write though,
+- because there may be a single store to this address to initialize it.
+- Just fall through to the code below to resolve the case where we have
+- both an unchanging read and an unchanging write. This won't handle all
+- cases optimally, but the possible performance loss should be
+- negligible. */
+- if (RTX_UNCHANGING_P (x) && ! RTX_UNCHANGING_P (mem))
+- return 0;
+-
+- return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
+- || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
+- SIZE_FOR_MODE (x), XEXP (x, 0), 0)
+- && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
+- && GET_MODE (mem) != QImode
+- && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
+- && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
+- && GET_MODE (x) != QImode
+- && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
+- }
+-
+- /* Anti dependence: X is written after read in MEM takes place. */
+-
+- int
+- anti_dependence (mem, x)
+- rtx mem;
+- rtx x;
+- {
+- /* If MEM is an unchanging read, then it can't possibly conflict with
+- the store to X, because there is at most one store to MEM, and it must
+- have occurred somewhere before MEM. */
+- if (RTX_UNCHANGING_P (mem))
+- return 0;
+-
+- return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
+- || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
+- SIZE_FOR_MODE (x), XEXP (x, 0), 0)
+- && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
+- && GET_MODE (mem) != QImode
+- && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
+- && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
+- && GET_MODE (x) != QImode
+- && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
+- }
+-
+- /* Output dependence: X is written after store in MEM takes place. */
+-
+- int
+- output_dependence (mem, x)
+- rtx mem;
+- rtx x;
+- {
+- return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
+- || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
+- SIZE_FOR_MODE (x), XEXP (x, 0), 0)
+- && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
+- && GET_MODE (mem) != QImode
+- && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
+- && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
+- && GET_MODE (x) != QImode
+- && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
+- }
+-
+ /* Helper functions for instruction scheduling. */
+
+--- 345,348 ----
+*************** add_insn_mem_dependence (insn_list, mem_
+*** 1609,1621 ****
+
+ /* Make a dependency between every memory reference on the pending lists
+! and INSN, thus flushing the pending lists. */
+
+ static void
+! flush_pending_lists (insn)
+ rtx insn;
+ {
+ rtx link;
+
+! while (pending_read_insns)
+ {
+ add_dependence (insn, XEXP (pending_read_insns, 0), REG_DEP_ANTI);
+--- 1072,1086 ----
+
+ /* Make a dependency between every memory reference on the pending lists
+! and INSN, thus flushing the pending lists. If ONLY_WRITE, don't flush
+! the read list. */
+
+ static void
+! flush_pending_lists (insn, only_write)
+ rtx insn;
++ int only_write;
+ {
+ rtx link;
+
+! while (pending_read_insns && ! only_write)
+ {
+ add_dependence (insn, XEXP (pending_read_insns, 0), REG_DEP_ANTI);
+*************** sched_analyze_1 (x, insn)
+*** 1746,1750 ****
+ this flush occurs 8 times for sparc, and 10 times for m88k using
+ the number 32. */
+! flush_pending_lists (insn);
+ }
+ else
+--- 1211,1215 ----
+ this flush occurs 8 times for sparc, and 10 times for m88k using
+ the number 32. */
+! flush_pending_lists (insn, 0);
+ }
+ else
+*************** sched_analyze_2 (x, insn)
+*** 1922,1926 ****
+ /* If a dependency already exists, don't create a new one. */
+ if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
+! if (true_dependence (XEXP (pending_mem, 0), x))
+ add_dependence (insn, XEXP (pending, 0), 0);
+
+--- 1387,1392 ----
+ /* If a dependency already exists, don't create a new one. */
+ if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
+! if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
+! x, rtx_varies_p))
+ add_dependence (insn, XEXP (pending, 0), 0);
+
+*************** sched_analyze_2 (x, insn)
+*** 1968,1972 ****
+ reg_pending_sets_all = 1;
+
+! flush_pending_lists (insn);
+ }
+
+--- 1434,1438 ----
+ reg_pending_sets_all = 1;
+
+! flush_pending_lists (insn, 0);
+ }
+
+*************** sched_analyze_insn (x, insn, loop_notes)
+*** 2021,2025 ****
+ register RTX_CODE code = GET_CODE (x);
+ rtx link;
+! int maxreg = max_reg_num ();
+ int i;
+
+--- 1487,1491 ----
+ register RTX_CODE code = GET_CODE (x);
+ rtx link;
+! int maxreg = reg_last_uses_size;
+ int i;
+
+*************** sched_analyze_insn (x, insn, loop_notes)
+*** 2058,2062 ****
+ if (loop_notes)
+ {
+! int max_reg = max_reg_num ();
+ rtx link;
+
+--- 1524,1528 ----
+ if (loop_notes)
+ {
+! int max_reg = reg_last_uses_size;
+ rtx link;
+
+*************** sched_analyze_insn (x, insn, loop_notes)
+*** 2072,2076 ****
+ reg_pending_sets_all = 1;
+
+! flush_pending_lists (insn);
+
+ link = loop_notes;
+--- 1538,1542 ----
+ reg_pending_sets_all = 1;
+
+! flush_pending_lists (insn, 0);
+
+ link = loop_notes;
+*************** sched_analyze (head, tail)
+*** 2202,2207 ****
+ && NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP)
+ {
+! int max_reg = max_reg_num ();
+! for (i = 0; i < max_reg; i++)
+ {
+ for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
+--- 1668,1672 ----
+ && NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP)
+ {
+! for (i = 0; i < reg_last_uses_size; i++)
+ {
+ for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
+*************** sched_analyze (head, tail)
+*** 2247,2259 ****
+ loop_notes = 0;
+
+! /* We don't need to flush memory for a function call which does
+! not involve memory. */
+! if (! CONST_CALL_P (insn))
+! {
+! /* In the absence of interprocedural alias analysis,
+! we must flush all pending reads and writes, and
+! start new dependencies starting from here. */
+! flush_pending_lists (insn);
+! }
+
+ /* Depend this function call (actually, the user of this
+--- 1712,1720 ----
+ loop_notes = 0;
+
+! /* In the absence of interprocedural alias analysis, we must flush
+! all pending reads and writes, and start new dependencies starting
+! from here. But only flush writes for constant calls (which may
+! be passed a pointer to something we haven't written yet). */
+! flush_pending_lists (insn, CONST_CALL_P (insn));
+
+ /* Depend this function call (actually, the user of this
+*************** sched_analyze (head, tail)
+*** 2264,2270 ****
+ else if (GET_CODE (insn) == NOTE
+ && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
+! || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END))
+! loop_notes = gen_rtx (EXPR_LIST, REG_DEAD,
+! GEN_INT (NOTE_LINE_NUMBER (insn)), loop_notes);
+
+ if (insn == tail)
+--- 1725,1736 ----
+ else if (GET_CODE (insn) == NOTE
+ && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
+! || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
+! || (NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP
+! && GET_CODE (PREV_INSN (insn)) != CALL_INSN)))
+! {
+! loop_notes = gen_rtx (EXPR_LIST, REG_DEAD,
+! GEN_INT (NOTE_LINE_NUMBER (insn)), loop_notes);
+! CONST_CALL_P (loop_notes) = CONST_CALL_P (insn);
+! }
+
+ if (insn == tail)
+*************** sched_note_set (b, x, death)
+*** 2372,2380 ****
+
+ #define SCHED_SORT(READY, NEW_READY, OLD_READY) \
+! do { if ((NEW_READY) - (OLD_READY) == 1) \
+! swap_sort (READY, NEW_READY); \
+! else if ((NEW_READY) - (OLD_READY) > 1) \
+! qsort (READY, NEW_READY, sizeof (rtx), rank_for_schedule); } \
+! while (0)
+
+ /* Returns a positive value if y is preferred; returns a negative value if
+--- 1838,1845 ----
+
+ #define SCHED_SORT(READY, NEW_READY, OLD_READY) \
+! if ((NEW_READY) - (OLD_READY) == 1) \
+! swap_sort (READY, NEW_READY); \
+! else if ((NEW_READY) - (OLD_READY) > 1) \
+! qsort (READY, NEW_READY, sizeof (rtx), rank_for_schedule); else \
+
+ /* Returns a positive value if y is preferred; returns a negative value if
+*************** reemit_notes (insn, last)
+*** 3128,3132 ****
+ {
+ if (INTVAL (XEXP (note, 0)) == NOTE_INSN_SETJMP)
+! emit_note_after (INTVAL (XEXP (note, 0)), insn);
+ else
+ last = emit_note_before (INTVAL (XEXP (note, 0)), last);
+--- 2593,2598 ----
+ {
+ if (INTVAL (XEXP (note, 0)) == NOTE_INSN_SETJMP)
+! CONST_CALL_P (emit_note_after (INTVAL (XEXP (note, 0)), insn))
+! = CONST_CALL_P (note);
+ else
+ last = emit_note_before (INTVAL (XEXP (note, 0)), last);
+*************** schedule_block (b, file)
+*** 3174,3178 ****
+ b, INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
+
+! i = max_reg_num ();
+ reg_last_uses = (rtx *) alloca (i * sizeof (rtx));
+ bzero ((char *) reg_last_uses, i * sizeof (rtx));
+--- 2640,2644 ----
+ b, INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
+
+! reg_last_uses_size = i = max_reg_num ();
+ reg_last_uses = (rtx *) alloca (i * sizeof (rtx));
+ bzero ((char *) reg_last_uses, i * sizeof (rtx));
+*************** schedule_block (b, file)
+*** 3800,3804 ****
+ made live again later. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+! if (call_used_regs[i] || global_regs[i])
+ {
+ register int offset = i / REGSET_ELT_BITS;
+--- 3266,3271 ----
+ made live again later. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+! if ((call_used_regs[i] && ! fixed_regs[i])
+! || global_regs[i])
+ {
+ register int offset = i / REGSET_ELT_BITS;
+*************** schedule_insns (dump_file)
+*** 4717,4721 ****
+ bcopy ((char *) reg_n_deaths, (char *) sched_reg_n_deaths,
+ max_regno * sizeof (short));
+- init_alias_analysis ();
+ }
+ else
+--- 4184,4187 ----
+*************** schedule_insns (dump_file)
+*** 4726,4732 ****
+ bb_dead_regs = 0;
+ bb_live_regs = 0;
+- if (! flag_schedule_insns)
+- init_alias_analysis ();
+ }
+
+ if (write_symbols != NO_DEBUG)
+--- 4192,4213 ----
+ bb_dead_regs = 0;
+ bb_live_regs = 0;
+ }
++ init_alias_analysis ();
++ #if 0
++ if (dump_file)
++ {
++ extern rtx *reg_base_value;
++ extern int reg_base_value_size;
++ int i;
++ for (i = 0; i < reg_base_value_size; i++)
++ if (reg_base_value[i])
++ {
++ fprintf (dump_file, ";; reg_base_value[%d] = ", i);
++ print_rtl (dump_file, reg_base_value[i]);
++ fputc ('\n', dump_file);
++ }
++ }
++ #endif
++
+
+ if (write_symbols != NO_DEBUG)
+diff -rcp2N gcc-2.7.2.2/sdbout.c g77-new/sdbout.c
+*** gcc-2.7.2.2/sdbout.c Thu Jun 15 08:07:11 1995
+--- g77-new/sdbout.c Mon Aug 11 01:42:22 1997
+*************** plain_type_1 (type, level)
+*** 539,543 ****
+ sdb_dims[sdb_n_dims++]
+ = (TYPE_DOMAIN (type)
+! ? TREE_INT_CST_LOW (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) + 1
+ : 0);
+ return PUSH_DERIVED_LEVEL (DT_ARY, m);
+--- 539,546 ----
+ sdb_dims[sdb_n_dims++]
+ = (TYPE_DOMAIN (type)
+! && TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) == INTEGER_CST
+! && TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (type))) == INTEGER_CST
+! ? (TREE_INT_CST_LOW (TYPE_MAX_VALUE (TYPE_DOMAIN (type)))
+! - TREE_INT_CST_LOW (TYPE_MIN_VALUE (TYPE_DOMAIN (type))) + 1)
+ : 0);
+ return PUSH_DERIVED_LEVEL (DT_ARY, m);
+diff -rcp2N gcc-2.7.2.2/stmt.c g77-new/stmt.c
+*** gcc-2.7.2.2/stmt.c Tue Sep 12 19:01:54 1995
+--- g77-new/stmt.c Sun Aug 10 18:46:56 1997
+*************** fixup_gotos (thisblock, stack_level, cle
+*** 1244,1249 ****
+ poplevel (1, 0, 0);
+ end_sequence ();
+! f->before_jump
+! = emit_insns_after (cleanup_insns, f->before_jump);
+
+ f->cleanup_list_list = TREE_CHAIN (lists);
+--- 1244,1250 ----
+ poplevel (1, 0, 0);
+ end_sequence ();
+! if (cleanup_insns != 0)
+! f->before_jump
+! = emit_insns_after (cleanup_insns, f->before_jump);
+
+ f->cleanup_list_list = TREE_CHAIN (lists);
+*************** expand_expr_stmt (exp)
+*** 1721,1725 ****
+
+ last_expr_type = TREE_TYPE (exp);
+! if (! flag_syntax_only)
+ last_expr_value = expand_expr (exp,
+ (expr_stmts_for_value
+--- 1722,1726 ----
+
+ last_expr_type = TREE_TYPE (exp);
+! if (! flag_syntax_only || expr_stmts_for_value)
+ last_expr_value = expand_expr (exp,
+ (expr_stmts_for_value
+*************** expand_end_bindings (vars, mark_ends, do
+*** 3160,3163 ****
+--- 3161,3169 ----
+ #endif
+
++ #ifdef HAVE_nonlocal_goto_receiver
++ if (HAVE_nonlocal_goto_receiver)
++ emit_insn (gen_nonlocal_goto_receiver ());
++ #endif
++
+ /* The handler expects the desired label address in the static chain
+ register. It tests the address and does an appropriate jump
+*************** expand_decl (decl)
+*** 3369,3393 ****
+ = promote_mode (type, DECL_MODE (decl), &unsignedp, 0);
+
+! if (TREE_CODE (type) == COMPLEX_TYPE)
+! {
+! rtx realpart, imagpart;
+! enum machine_mode partmode = TYPE_MODE (TREE_TYPE (type));
+
+! /* For a complex type variable, make a CONCAT of two pseudos
+! so that the real and imaginary parts
+! can be allocated separately. */
+! realpart = gen_reg_rtx (partmode);
+! REG_USERVAR_P (realpart) = 1;
+! imagpart = gen_reg_rtx (partmode);
+! REG_USERVAR_P (imagpart) = 1;
+! DECL_RTL (decl) = gen_rtx (CONCAT, reg_mode, realpart, imagpart);
+! }
+! else
+! {
+! DECL_RTL (decl) = gen_reg_rtx (reg_mode);
+! if (TREE_CODE (type) == POINTER_TYPE)
+! mark_reg_pointer (DECL_RTL (decl));
+! REG_USERVAR_P (DECL_RTL (decl)) = 1;
+! }
+ }
+ else if (TREE_CODE (DECL_SIZE (decl)) == INTEGER_CST)
+--- 3375,3383 ----
+ = promote_mode (type, DECL_MODE (decl), &unsignedp, 0);
+
+! DECL_RTL (decl) = gen_reg_rtx (reg_mode);
+! mark_user_reg (DECL_RTL (decl));
+
+! if (TREE_CODE (type) == POINTER_TYPE)
+! mark_reg_pointer (DECL_RTL (decl));
+ }
+ else if (TREE_CODE (DECL_SIZE (decl)) == INTEGER_CST)
+*************** expand_decl (decl)
+*** 3462,3468 ****
+ free_temp_slots ();
+
+! /* Allocate space on the stack for the variable. */
+ address = allocate_dynamic_stack_space (size, NULL_RTX,
+! DECL_ALIGN (decl));
+
+ /* Reference the variable indirect through that rtx. */
+--- 3452,3461 ----
+ free_temp_slots ();
+
+! /* Allocate space on the stack for the variable. Note that
+! DECL_ALIGN says how the variable is to be aligned and we
+! cannot use it to conclude anything about the alignment of
+! the size. */
+ address = allocate_dynamic_stack_space (size, NULL_RTX,
+! TYPE_ALIGN (TREE_TYPE (decl)));
+
+ /* Reference the variable indirect through that rtx. */
+diff -rcp2N gcc-2.7.2.2/stor-layout.c g77-new/stor-layout.c
+*** gcc-2.7.2.2/stor-layout.c Thu Feb 20 19:24:20 1997
+--- g77-new/stor-layout.c Mon Aug 11 06:47:50 1997
+*************** layout_decl (decl, known_align)
+*** 255,259 ****
+ if (maximum_field_alignment != 0)
+ DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), maximum_field_alignment);
+! else if (flag_pack_struct)
+ DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), BITS_PER_UNIT);
+ }
+--- 255,259 ----
+ if (maximum_field_alignment != 0)
+ DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), maximum_field_alignment);
+! else if (DECL_PACKED (decl))
+ DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), BITS_PER_UNIT);
+ }
+*************** layout_decl (decl, known_align)
+*** 261,265 ****
+ if (DECL_BIT_FIELD (decl)
+ && TYPE_SIZE (type) != 0
+! && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
+ {
+ register enum machine_mode xmode
+--- 261,266 ----
+ if (DECL_BIT_FIELD (decl)
+ && TYPE_SIZE (type) != 0
+! && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
+! && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT)
+ {
+ register enum machine_mode xmode
+*************** layout_decl (decl, known_align)
+*** 278,281 ****
+--- 279,291 ----
+ }
+
++ /* Turn off DECL_BIT_FIELD if we won't need it set. */
++ if (DECL_BIT_FIELD (decl) && TYPE_MODE (type) == BLKmode
++ && known_align % TYPE_ALIGN (type) == 0
++ && DECL_SIZE (decl) != 0
++ && (TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST
++ || (TREE_INT_CST_LOW (DECL_SIZE (decl)) % BITS_PER_UNIT) == 0)
++ && DECL_ALIGN (decl) >= TYPE_ALIGN (type))
++ DECL_BIT_FIELD (decl) = 0;
++
+ /* Evaluate nonconstant size only once, either now or as soon as safe. */
+ if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
+*************** layout_record (rec)
+*** 380,384 ****
+ if (maximum_field_alignment != 0)
+ type_align = MIN (type_align, maximum_field_alignment);
+! else if (flag_pack_struct)
+ type_align = MIN (type_align, BITS_PER_UNIT);
+
+--- 390,394 ----
+ if (maximum_field_alignment != 0)
+ type_align = MIN (type_align, maximum_field_alignment);
+! else if (TYPE_PACKED (rec))
+ type_align = MIN (type_align, BITS_PER_UNIT);
+
+*************** layout_record (rec)
+*** 422,428 ****
+ && DECL_BIT_FIELD_TYPE (field)
+ && !DECL_PACKED (field)
+- /* If #pragma pack is in effect, turn off this feature. */
+ && maximum_field_alignment == 0
+- && !flag_pack_struct
+ && !integer_zerop (DECL_SIZE (field)))
+ {
+--- 432,436 ----
+*************** layout_record (rec)
+*** 459,463 ****
+ if (maximum_field_alignment != 0)
+ type_align = MIN (type_align, maximum_field_alignment);
+! else if (flag_pack_struct)
+ type_align = MIN (type_align, BITS_PER_UNIT);
+
+--- 467,471 ----
+ if (maximum_field_alignment != 0)
+ type_align = MIN (type_align, maximum_field_alignment);
+! else if (TYPE_PACKED (rec))
+ type_align = MIN (type_align, BITS_PER_UNIT);
+
+*************** layout_record (rec)
+*** 500,505 ****
+ /* Do nothing. */;
+ else if (TREE_CODE (dsize) == INTEGER_CST
+ && TREE_INT_CST_HIGH (dsize) == 0
+! && TREE_INT_CST_LOW (dsize) + const_size > const_size)
+ /* Use const_size if there's no overflow. */
+ const_size += TREE_INT_CST_LOW (dsize);
+--- 508,514 ----
+ /* Do nothing. */;
+ else if (TREE_CODE (dsize) == INTEGER_CST
++ && ! TREE_CONSTANT_OVERFLOW (dsize)
+ && TREE_INT_CST_HIGH (dsize) == 0
+! && TREE_INT_CST_LOW (dsize) + const_size >= const_size)
+ /* Use const_size if there's no overflow. */
+ const_size += TREE_INT_CST_LOW (dsize);
+*************** get_best_mode (bitsize, bitpos, align, l
+*** 1172,1175 ****
+--- 1181,1192 ----
+ enum machine_mode mode;
+ int unit;
++
++ if (bitpos < 0)
++ {
++ /* For correct calculations and convenience, bias negative bitpos
++ to become a non-negative value that is [1,bitsize], such that
++ the relative bit offset to a multiple of bitsize is preserved. */
++ bitpos = bitsize - ((-bitpos) % bitsize);
++ }
+
+ /* Find the narrowest integer mode that contains the bit field. */
+diff -rcp2N gcc-2.7.2.2/stupid.c g77-new/stupid.c
+*** gcc-2.7.2.2/stupid.c Sun Oct 29 07:45:22 1995
+--- g77-new/stupid.c Sun Aug 10 18:46:01 1997
+*************** static int *uid_suid;
+*** 66,69 ****
+--- 66,74 ----
+ static int last_call_suid;
+
++ /* Record the suid of the last NOTE_INSN_SETJMP
++ so we can tell whether a pseudo reg crosses any setjmp. */
++
++ static int last_setjmp_suid;
++
+ /* Element N is suid of insn where life span of pseudo reg N ends.
+ Element is 0 if register N has not been seen yet on backward scan. */
+*************** static char *regs_live;
+*** 89,92 ****
+--- 94,101 ----
+ static char *regs_change_size;
+
++ /* Indexed by reg number, nonzero if reg crosses a setjmp. */
++
++ static char *regs_crosses_setjmp;
++
+ /* Indexed by insn's suid, the set of hard regs live after that insn. */
+
+*************** stupid_life_analysis (f, nregs, file)
+*** 149,152 ****
+--- 158,162 ----
+
+ last_call_suid = i + 1;
++ last_setjmp_suid = i + 1;
+ max_suid = i + 1;
+
+*************** stupid_life_analysis (f, nregs, file)
+*** 167,170 ****
+--- 177,183 ----
+ bzero ((char *) regs_change_size, nregs * sizeof (char));
+
++ regs_crosses_setjmp = (char *) alloca (nregs * sizeof (char));
++ bzero ((char *) regs_crosses_setjmp, nregs * sizeof (char));
++
+ reg_renumber = (short *) oballoc (nregs * sizeof (short));
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+*************** stupid_life_analysis (f, nregs, file)
+*** 216,219 ****
+--- 229,236 ----
+ stupid_mark_refs (PATTERN (insn), insn);
+
++ if (GET_CODE (insn) == NOTE
++ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP)
++ last_setjmp_suid = INSN_SUID (insn);
++
+ /* Mark all call-clobbered regs as live after each call insn
+ so that a pseudo whose life span includes this insn
+*************** stupid_life_analysis (f, nregs, file)
+*** 254,259 ****
+ register int r = reg_order[i];
+
+! /* Some regnos disappear from the rtl. Ignore them to avoid crash. */
+! if (regno_reg_rtx[r] == 0)
+ continue;
+
+--- 271,277 ----
+ register int r = reg_order[i];
+
+! /* Some regnos disappear from the rtl. Ignore them to avoid crash.
+! Also don't allocate registers that cross a setjmp. */
+! if (regno_reg_rtx[r] == 0 || regs_crosses_setjmp[r])
+ continue;
+
+*************** stupid_reg_compare (r1p, r2p)
+*** 309,314 ****
+ that can hold a value of machine-mode MODE
+ (but actually we test only the first of the block for holding MODE)
+! currently free from after insn whose suid is BIRTH
+! through the insn whose suid is DEATH,
+ and return the number of the first of them.
+ Return -1 if such a block cannot be found.
+--- 327,332 ----
+ that can hold a value of machine-mode MODE
+ (but actually we test only the first of the block for holding MODE)
+! currently free from after insn whose suid is BORN_INSN
+! through the insn whose suid is DEAD_INSN,
+ and return the number of the first of them.
+ Return -1 if such a block cannot be found.
+*************** stupid_find_reg (call_preserved, class,
+*** 338,341 ****
+--- 356,366 ----
+ #endif
+
++ /* If this register's life is more than 5,000 insns, we probably
++ can't allocate it, so don't waste the time trying. This avoid
++ quadratic behavior on programs that have regularly-occurring
++ SAVE_EXPRs. */
++ if (dead_insn > born_insn + 5000)
++ return -1;
++
+ COPY_HARD_REG_SET (used,
+ call_preserved ? call_used_reg_set : fixed_reg_set);
+*************** stupid_mark_refs (x, insn)
+*** 488,491 ****
+--- 513,519 ----
+ if (last_call_suid < reg_where_dead[regno])
+ reg_n_calls_crossed[regno] += 1;
++
++ if (last_setjmp_suid < reg_where_dead[regno])
++ regs_crosses_setjmp[regno] = 1;
+ }
+ }
+diff -rcp2N gcc-2.7.2.2/toplev.c g77-new/toplev.c
+*** gcc-2.7.2.2/toplev.c Fri Oct 20 17:56:35 1995
+--- g77-new/toplev.c Sun Aug 10 18:43:36 1997
+*************** int flag_unroll_loops;
+*** 388,391 ****
+--- 388,405 ----
+ int flag_unroll_all_loops;
+
++ /* Nonzero forces all invariant computations in loops to be moved
++ outside the loop. */
++
++ int flag_move_all_movables = 0;
++
++ /* Nonzero forces all general induction variables in loops to be
++ strength reduced. */
++
++ int flag_reduce_all_givs = 0;
++
++ /* Nonzero gets another run of loop_optimize performed. */
++
++ int flag_rerun_loop_opt = 0;
++
+ /* Nonzero for -fwritable-strings:
+ store string constants in data segment and don't uniquize them. */
+*************** int flag_gnu_linker = 1;
+*** 522,525 ****
+--- 536,550 ----
+ int flag_pack_struct = 0;
+
++ /* 1 if alias checking is on (by default, when -O). */
++ int flag_alias_check = 0;
++
++ /* 0 if pointer arguments may alias each other. True in C.
++ 1 if pointer arguments may not alias each other but may alias
++ global variables.
++ 2 if pointer arguments may not alias each other and may not
++ alias global variables. True in Fortran.
++ This defaults to 0 for C. */
++ int flag_argument_noalias = 0;
++
+ /* Table of language-independent -f options.
+ STRING is the option name. VARIABLE is the address of the variable.
+*************** struct { char *string; int *variable; in
+*** 542,545 ****
+--- 567,573 ----
+ {"unroll-loops", &flag_unroll_loops, 1},
+ {"unroll-all-loops", &flag_unroll_all_loops, 1},
++ {"move-all-movables", &flag_move_all_movables, 1},
++ {"reduce-all-givs", &flag_reduce_all_givs, 1},
++ {"rerun-loop-opt", &flag_rerun_loop_opt, 1},
+ {"writable-strings", &flag_writable_strings, 1},
+ {"peephole", &flag_no_peephole, 0},
+*************** struct { char *string; int *variable; in
+*** 568,572 ****
+ {"gnu-linker", &flag_gnu_linker, 1},
+ {"pack-struct", &flag_pack_struct, 1},
+! {"bytecode", &output_bytecode, 1}
+ };
+
+--- 596,604 ----
+ {"gnu-linker", &flag_gnu_linker, 1},
+ {"pack-struct", &flag_pack_struct, 1},
+! {"bytecode", &output_bytecode, 1},
+! {"alias-check", &flag_alias_check, 1},
+! {"argument-alias", &flag_argument_noalias, 0},
+! {"argument-noalias", &flag_argument_noalias, 1},
+! {"argument-noalias-global", &flag_argument_noalias, 2}
+ };
+
+*************** rest_of_compilation (decl)
+*** 2715,2725 ****
+ finish_compilation will call rest_of_compilation again
+ for those functions that need to be output. Also defer those
+! functions that we are supposed to defer. */
+!
+! if (DECL_DEFER_OUTPUT (decl)
+! || ((specd || DECL_INLINE (decl))
+! && ((! TREE_PUBLIC (decl) && ! TREE_ADDRESSABLE (decl)
+! && ! flag_keep_inline_functions)
+! || DECL_EXTERNAL (decl))))
+ {
+ DECL_DEFER_OUTPUT (decl) = 1;
+--- 2747,2760 ----
+ finish_compilation will call rest_of_compilation again
+ for those functions that need to be output. Also defer those
+! functions that we are supposed to defer. We cannot defer
+! functions containing nested functions since the nested function
+! data is in our non-saved obstack. */
+!
+! if (! current_function_contains_functions
+! && (DECL_DEFER_OUTPUT (decl)
+! || ((specd || DECL_INLINE (decl))
+! && ((! TREE_PUBLIC (decl) && ! TREE_ADDRESSABLE (decl)
+! && ! flag_keep_inline_functions)
+! || DECL_EXTERNAL (decl)))))
+ {
+ DECL_DEFER_OUTPUT (decl) = 1;
+*************** rest_of_compilation (decl)
+*** 2893,2897 ****
+--- 2928,2951 ----
+ TIMEVAR (loop_time,
+ {
++ int save_unroll_flag;
++ int save_unroll_all_flag;
++
++ if (flag_rerun_loop_opt)
++ {
++ save_unroll_flag = flag_unroll_loops;
++ save_unroll_all_flag = flag_unroll_all_loops;
++ flag_unroll_loops = 0;
++ flag_unroll_all_loops = 0;
++ }
++
+ loop_optimize (insns, loop_dump_file);
++
++ if (flag_rerun_loop_opt)
++ {
++ flag_unroll_loops = save_unroll_flag;
++ flag_unroll_all_loops = save_unroll_all_flag;
++
++ loop_optimize (insns, loop_dump_file);
++ }
+ });
+ }
+*************** rest_of_compilation (decl)
+*** 3280,3283 ****
+--- 3334,3341 ----
+ resume_temporary_allocation ();
+
++ /* Show no temporary slots allocated. */
++
++ init_temp_slots ();
++
+ /* The parsing time is all the time spent in yyparse
+ *except* what is spent in this function. */
+*************** main (argc, argv, envp)
+*** 3383,3386 ****
+--- 3441,3445 ----
+ flag_omit_frame_pointer = 1;
+ #endif
++ flag_alias_check = 1;
+ }
+
+diff -rcp2N gcc-2.7.2.2/tree.c g77-new/tree.c
+*** gcc-2.7.2.2/tree.c Sun Oct 1 21:26:56 1995
+--- g77-new/tree.c Sun Aug 10 18:47:23 1997
+*************** build_string (len, str)
+*** 1428,1436 ****
+ /* Return a newly constructed COMPLEX_CST node whose value is
+ specified by the real and imaginary parts REAL and IMAG.
+! Both REAL and IMAG should be constant nodes.
+! The TREE_TYPE is not initialized. */
+
+ tree
+! build_complex (real, imag)
+ tree real, imag;
+ {
+--- 1428,1437 ----
+ /* Return a newly constructed COMPLEX_CST node whose value is
+ specified by the real and imaginary parts REAL and IMAG.
+! Both REAL and IMAG should be constant nodes. TYPE, if specified,
+! will be the type of the COMPLEX_CST; otherwise a new type will be made. */
+
+ tree
+! build_complex (type, real, imag)
+! tree type;
+ tree real, imag;
+ {
+*************** build_complex (real, imag)
+*** 1439,1443 ****
+ TREE_REALPART (t) = real;
+ TREE_IMAGPART (t) = imag;
+! TREE_TYPE (t) = build_complex_type (TREE_TYPE (real));
+ TREE_OVERFLOW (t) = TREE_OVERFLOW (real) | TREE_OVERFLOW (imag);
+ TREE_CONSTANT_OVERFLOW (t)
+--- 1440,1444 ----
+ TREE_REALPART (t) = real;
+ TREE_IMAGPART (t) = imag;
+! TREE_TYPE (t) = type ? type : build_complex_type (TREE_TYPE (real));
+ TREE_OVERFLOW (t) = TREE_OVERFLOW (real) | TREE_OVERFLOW (imag);
+ TREE_CONSTANT_OVERFLOW (t)
+*************** integer_zerop (expr)
+*** 1484,1487 ****
+--- 1485,1489 ----
+
+ return ((TREE_CODE (expr) == INTEGER_CST
++ && ! TREE_CONSTANT_OVERFLOW (expr)
+ && TREE_INT_CST_LOW (expr) == 0
+ && TREE_INT_CST_HIGH (expr) == 0)
+*************** integer_onep (expr)
+*** 1501,1504 ****
+--- 1503,1507 ----
+
+ return ((TREE_CODE (expr) == INTEGER_CST
++ && ! TREE_CONSTANT_OVERFLOW (expr)
+ && TREE_INT_CST_LOW (expr) == 1
+ && TREE_INT_CST_HIGH (expr) == 0)
+*************** integer_all_onesp (expr)
+*** 1525,1529 ****
+ return 1;
+
+! else if (TREE_CODE (expr) != INTEGER_CST)
+ return 0;
+
+--- 1528,1533 ----
+ return 1;
+
+! else if (TREE_CODE (expr) != INTEGER_CST
+! || TREE_CONSTANT_OVERFLOW (expr))
+ return 0;
+
+*************** integer_pow2p (expr)
+*** 1574,1578 ****
+ return 1;
+
+! if (TREE_CODE (expr) != INTEGER_CST)
+ return 0;
+
+--- 1578,1582 ----
+ return 1;
+
+! if (TREE_CODE (expr) != INTEGER_CST || TREE_CONSTANT_OVERFLOW (expr))
+ return 0;
+
+*************** real_zerop (expr)
+*** 1596,1599 ****
+--- 1600,1604 ----
+
+ return ((TREE_CODE (expr) == REAL_CST
++ && ! TREE_CONSTANT_OVERFLOW (expr)
+ && REAL_VALUES_EQUAL (TREE_REAL_CST (expr), dconst0))
+ || (TREE_CODE (expr) == COMPLEX_CST
+*************** real_onep (expr)
+*** 1611,1614 ****
+--- 1616,1620 ----
+
+ return ((TREE_CODE (expr) == REAL_CST
++ && ! TREE_CONSTANT_OVERFLOW (expr)
+ && REAL_VALUES_EQUAL (TREE_REAL_CST (expr), dconst1))
+ || (TREE_CODE (expr) == COMPLEX_CST
+*************** real_twop (expr)
+*** 1626,1629 ****
+--- 1632,1636 ----
+
+ return ((TREE_CODE (expr) == REAL_CST
++ && ! TREE_CONSTANT_OVERFLOW (expr)
+ && REAL_VALUES_EQUAL (TREE_REAL_CST (expr), dconst2))
+ || (TREE_CODE (expr) == COMPLEX_CST
+*************** staticp (arg)
+*** 2055,2061 ****
+ return 1;
+
+ case COMPONENT_REF:
+ case BIT_FIELD_REF:
+! return staticp (TREE_OPERAND (arg, 0));
+
+ #if 0
+--- 2062,2073 ----
+ return 1;
+
++ /* If we are referencing a bitfield, we can't evaluate an
++ ADDR_EXPR at compile time and so it isn't a constant. */
+ case COMPONENT_REF:
++ return (! DECL_BIT_FIELD (TREE_OPERAND (arg, 1))
++ && staticp (TREE_OPERAND (arg, 0)));
++
+ case BIT_FIELD_REF:
+! return 0;
+
+ #if 0
+*************** contains_placeholder_p (exp)
+*** 2157,2160 ****
+--- 2169,2174 ----
+ if (code == WITH_RECORD_EXPR)
+ return 0;
++ else if (code == PLACEHOLDER_EXPR)
++ return 1;
+
+ switch (TREE_CODE_CLASS (code))
+*************** substitute_in_expr (exp, f, r)
+*** 2204,2207 ****
+--- 2218,2222 ----
+ {
+ enum tree_code code = TREE_CODE (exp);
++ tree op0, op1, op2;
+ tree new = 0;
+ tree inner;
+*************** substitute_in_expr (exp, f, r)
+*** 2225,2231 ****
+ {
+ case 1:
+! new = fold (build1 (code, TREE_TYPE (exp),
+! substitute_in_expr (TREE_OPERAND (exp, 0),
+! f, r)));
+ break;
+
+--- 2240,2248 ----
+ {
+ case 1:
+! op0 = substitute_in_expr (TREE_OPERAND (exp, 0), f, r);
+! if (op0 == TREE_OPERAND (exp, 0))
+! return exp;
+!
+! new = fold (build1 (code, TREE_TYPE (exp), op0));
+ break;
+
+*************** substitute_in_expr (exp, f, r)
+*** 2238,2245 ****
+ abort ();
+
+! new = fold (build (code, TREE_TYPE (exp),
+! substitute_in_expr (TREE_OPERAND (exp, 0), f, r),
+! substitute_in_expr (TREE_OPERAND (exp, 1),
+! f, r)));
+ break;
+
+--- 2255,2264 ----
+ abort ();
+
+! op0 = substitute_in_expr (TREE_OPERAND (exp, 0), f, r);
+! op1 = substitute_in_expr (TREE_OPERAND (exp, 1), f, r);
+! if (op0 == TREE_OPERAND (exp, 0) && op1 == TREE_OPERAND (exp, 1))
+! return exp;
+!
+! new = fold (build (code, TREE_TYPE (exp), op0, op1));
+ break;
+
+*************** substitute_in_expr (exp, f, r)
+*** 2253,2261 ****
+ abort ();
+
+! new = fold (build (code, TREE_TYPE (exp),
+! substitute_in_expr (TREE_OPERAND (exp, 0), f, r),
+! substitute_in_expr (TREE_OPERAND (exp, 1), f, r),
+! substitute_in_expr (TREE_OPERAND (exp, 2),
+! f, r)));
+ }
+
+--- 2272,2283 ----
+ abort ();
+
+! op0 = substitute_in_expr (TREE_OPERAND (exp, 0), f, r);
+! op1 = substitute_in_expr (TREE_OPERAND (exp, 1), f, r);
+! op2 = substitute_in_expr (TREE_OPERAND (exp, 2), f, r);
+! if (op0 == TREE_OPERAND (exp, 0) && op1 == TREE_OPERAND (exp, 1)
+! && op2 == TREE_OPERAND (exp, 2))
+! return exp;
+!
+! new = fold (build (code, TREE_TYPE (exp), op0, op1, op2));
+ }
+
+*************** substitute_in_expr (exp, f, r)
+*** 2276,2302 ****
+ return r;
+
+! new = fold (build (code, TREE_TYPE (exp),
+! substitute_in_expr (TREE_OPERAND (exp, 0), f, r),
+ TREE_OPERAND (exp, 1)));
+ break;
+
+ case BIT_FIELD_REF:
+! new = fold (build (code, TREE_TYPE (exp),
+! substitute_in_expr (TREE_OPERAND (exp, 0), f, r),
+! substitute_in_expr (TREE_OPERAND (exp, 1), f, r),
+! substitute_in_expr (TREE_OPERAND (exp, 2), f, r)));
+ break;
+
+ case INDIRECT_REF:
+ case BUFFER_REF:
+! new = fold (build1 (code, TREE_TYPE (exp),
+! substitute_in_expr (TREE_OPERAND (exp, 0),
+! f, r)));
+ break;
+
+ case OFFSET_REF:
+! new = fold (build (code, TREE_TYPE (exp),
+! substitute_in_expr (TREE_OPERAND (exp, 0), f, r),
+! substitute_in_expr (TREE_OPERAND (exp, 1), f, r)));
+ break;
+ }
+--- 2298,2342 ----
+ return r;
+
+! /* If this expression hasn't been completed let, leave it
+! alone. */
+! if (TREE_CODE (inner) == PLACEHOLDER_EXPR
+! && TREE_TYPE (inner) == 0)
+! return exp;
+!
+! op0 = substitute_in_expr (TREE_OPERAND (exp, 0), f, r);
+! if (op0 == TREE_OPERAND (exp, 0))
+! return exp;
+!
+! new = fold (build (code, TREE_TYPE (exp), op0,
+ TREE_OPERAND (exp, 1)));
+ break;
+
+ case BIT_FIELD_REF:
+! op0 = substitute_in_expr (TREE_OPERAND (exp, 0), f, r);
+! op1 = substitute_in_expr (TREE_OPERAND (exp, 1), f, r);
+! op2 = substitute_in_expr (TREE_OPERAND (exp, 2), f, r);
+! if (op0 == TREE_OPERAND (exp, 0) && op1 == TREE_OPERAND (exp, 1)
+! && op2 == TREE_OPERAND (exp, 2))
+! return exp;
+!
+! new = fold (build (code, TREE_TYPE (exp), op0, op1, op2));
+ break;
+
+ case INDIRECT_REF:
+ case BUFFER_REF:
+! op0 = substitute_in_expr (TREE_OPERAND (exp, 0), f, r);
+! if (op0 == TREE_OPERAND (exp, 0))
+! return exp;
+!
+! new = fold (build1 (code, TREE_TYPE (exp), op0));
+ break;
+
+ case OFFSET_REF:
+! op0 = substitute_in_expr (TREE_OPERAND (exp, 0), f, r);
+! op1 = substitute_in_expr (TREE_OPERAND (exp, 1), f, r);
+! if (op0 == TREE_OPERAND (exp, 0) && op1 == TREE_OPERAND (exp, 1))
+! return exp;
+!
+! new = fold (build (code, TREE_TYPE (exp), op0, op1));
+ break;
+ }
+*************** substitute_in_expr (exp, f, r)
+*** 2311,2454 ****
+ }
+
+- /* Given a type T, a FIELD_DECL F, and a replacement value R,
+- return a new type with all size expressions that contain F
+- updated by replacing F with R. */
+-
+- tree
+- substitute_in_type (t, f, r)
+- tree t, f, r;
+- {
+- switch (TREE_CODE (t))
+- {
+- case POINTER_TYPE:
+- case VOID_TYPE:
+- return t;
+- case INTEGER_TYPE:
+- case ENUMERAL_TYPE:
+- case BOOLEAN_TYPE:
+- case CHAR_TYPE:
+- if ((TREE_CODE (TYPE_MIN_VALUE (t)) != INTEGER_CST
+- && contains_placeholder_p (TYPE_MIN_VALUE (t)))
+- || (TREE_CODE (TYPE_MAX_VALUE (t)) != INTEGER_CST
+- && contains_placeholder_p (TYPE_MAX_VALUE (t))))
+- return build_range_type (t,
+- substitute_in_expr (TYPE_MIN_VALUE (t), f, r),
+- substitute_in_expr (TYPE_MAX_VALUE (t), f, r));
+- return t;
+-
+- case REAL_TYPE:
+- if ((TYPE_MIN_VALUE (t) != 0
+- && TREE_CODE (TYPE_MIN_VALUE (t)) != REAL_CST
+- && contains_placeholder_p (TYPE_MIN_VALUE (t)))
+- || (TYPE_MAX_VALUE (t) != 0
+- && TREE_CODE (TYPE_MAX_VALUE (t)) != REAL_CST
+- && contains_placeholder_p (TYPE_MAX_VALUE (t))))
+- {
+- t = build_type_copy (t);
+-
+- if (TYPE_MIN_VALUE (t))
+- TYPE_MIN_VALUE (t) = substitute_in_expr (TYPE_MIN_VALUE (t), f, r);
+- if (TYPE_MAX_VALUE (t))
+- TYPE_MAX_VALUE (t) = substitute_in_expr (TYPE_MAX_VALUE (t), f, r);
+- }
+- return t;
+-
+- case COMPLEX_TYPE:
+- return build_complex_type (substitute_in_type (TREE_TYPE (t), f, r));
+-
+- case OFFSET_TYPE:
+- case METHOD_TYPE:
+- case REFERENCE_TYPE:
+- case FILE_TYPE:
+- case SET_TYPE:
+- case FUNCTION_TYPE:
+- case LANG_TYPE:
+- /* Don't know how to do these yet. */
+- abort ();
+-
+- case ARRAY_TYPE:
+- t = build_array_type (substitute_in_type (TREE_TYPE (t), f, r),
+- substitute_in_type (TYPE_DOMAIN (t), f, r));
+- TYPE_SIZE (t) = 0;
+- layout_type (t);
+- return t;
+-
+- case RECORD_TYPE:
+- case UNION_TYPE:
+- case QUAL_UNION_TYPE:
+- {
+- tree new = copy_node (t);
+- tree field;
+- tree last_field = 0;
+-
+- /* Start out with no fields, make new fields, and chain them
+- in. */
+-
+- TYPE_FIELDS (new) = 0;
+- TYPE_SIZE (new) = 0;
+-
+- for (field = TYPE_FIELDS (t); field;
+- field = TREE_CHAIN (field))
+- {
+- tree new_field = copy_node (field);
+-
+- TREE_TYPE (new_field)
+- = substitute_in_type (TREE_TYPE (new_field), f, r);
+-
+- /* If this is an anonymous field and the type of this field is
+- a UNION_TYPE or RECORD_TYPE with no elements, ignore it. If
+- the type just has one element, treat that as the field.
+- But don't do this if we are processing a QUAL_UNION_TYPE. */
+- if (TREE_CODE (t) != QUAL_UNION_TYPE && DECL_NAME (new_field) == 0
+- && (TREE_CODE (TREE_TYPE (new_field)) == UNION_TYPE
+- || TREE_CODE (TREE_TYPE (new_field)) == RECORD_TYPE))
+- {
+- if (TYPE_FIELDS (TREE_TYPE (new_field)) == 0)
+- continue;
+-
+- if (TREE_CHAIN (TYPE_FIELDS (TREE_TYPE (new_field))) == 0)
+- new_field = TYPE_FIELDS (TREE_TYPE (new_field));
+- }
+-
+- DECL_CONTEXT (new_field) = new;
+- DECL_SIZE (new_field) = 0;
+-
+- if (TREE_CODE (t) == QUAL_UNION_TYPE)
+- {
+- /* Do the substitution inside the qualifier and if we find
+- that this field will not be present, omit it. */
+- DECL_QUALIFIER (new_field)
+- = substitute_in_expr (DECL_QUALIFIER (field), f, r);
+- if (integer_zerop (DECL_QUALIFIER (new_field)))
+- continue;
+- }
+-
+- if (last_field == 0)
+- TYPE_FIELDS (new) = new_field;
+- else
+- TREE_CHAIN (last_field) = new_field;
+-
+- last_field = new_field;
+-
+- /* If this is a qualified type and this field will always be
+- present, we are done. */
+- if (TREE_CODE (t) == QUAL_UNION_TYPE
+- && integer_onep (DECL_QUALIFIER (new_field)))
+- break;
+- }
+-
+- /* If this used to be a qualified union type, but we now know what
+- field will be present, make this a normal union. */
+- if (TREE_CODE (new) == QUAL_UNION_TYPE
+- && (TYPE_FIELDS (new) == 0
+- || integer_onep (DECL_QUALIFIER (TYPE_FIELDS (new)))))
+- TREE_SET_CODE (new, UNION_TYPE);
+-
+- layout_type (new);
+- return new;
+- }
+- }
+- }
+-
+ /* Stabilize a reference so that we can use it any number of times
+ without causing its operands to be evaluated more than once.
+--- 2351,2354 ----
+*************** build_type_variant (type, constp, volati
+*** 3141,3145 ****
+ preserve the TYPE_NAME, since there is code that depends on this. */
+
+! for (t = TYPE_MAIN_VARIANT(type); t; t = TYPE_NEXT_VARIANT (t))
+ if (constp == TYPE_READONLY (t) && volatilep == TYPE_VOLATILE (t)
+ && TYPE_NAME (t) == TYPE_NAME (type))
+--- 3041,3045 ----
+ preserve the TYPE_NAME, since there is code that depends on this. */
+
+! for (t = TYPE_MAIN_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t))
+ if (constp == TYPE_READONLY (t) && volatilep == TYPE_VOLATILE (t)
+ && TYPE_NAME (t) == TYPE_NAME (type))
+*************** get_unwidened (op, for_type)
+*** 4051,4055 ****
+ if (TREE_CODE (op) == COMPONENT_REF
+ /* Since type_for_size always gives an integer type. */
+! && TREE_CODE (type) != REAL_TYPE)
+ {
+ unsigned innerprec = TREE_INT_CST_LOW (DECL_SIZE (TREE_OPERAND (op, 1)));
+--- 3951,3957 ----
+ if (TREE_CODE (op) == COMPONENT_REF
+ /* Since type_for_size always gives an integer type. */
+! && TREE_CODE (type) != REAL_TYPE
+! /* Don't crash if field not layed out yet. */
+! && DECL_SIZE (TREE_OPERAND (op, 1)) != 0)
+ {
+ unsigned innerprec = TREE_INT_CST_LOW (DECL_SIZE (TREE_OPERAND (op, 1)));
+diff -rcp2N gcc-2.7.2.2/tree.h g77-new/tree.h
+*** gcc-2.7.2.2/tree.h Mon Sep 25 17:49:40 1995
+--- g77-new/tree.h Sun Aug 10 18:47:08 1997
+*************** enum built_in_function
+*** 98,101 ****
+--- 98,103 ----
+ BUILT_IN_APPLY,
+ BUILT_IN_RETURN,
++ BUILT_IN_SETJMP,
++ BUILT_IN_LONGJMP,
+
+ /* C++ extensions */
+*************** struct tree_int_cst
+*** 408,411 ****
+--- 410,415 ----
+ {
+ char common[sizeof (struct tree_common)];
++ struct rtx_def *rtl; /* acts as link to register transfer language
++ (rtl) info */
+ HOST_WIDE_INT int_cst_low;
+ HOST_WIDE_INT int_cst_high;
+*************** struct tree_type
+*** 957,960 ****
+--- 961,967 ----
+ #define DECL_STATIC_DESTRUCTOR(NODE) ((NODE)->decl.static_dtor_flag)
+
++ /* In a PARM_DECL, nonzero if this is a restricted pointer. */
++ #define DECL_RESTRICT(NODE) (NODE)->decl.static_ctor_flag
++
+ /* Used to indicate that this DECL represents a compiler-generated entity. */
+ #define DECL_ARTIFICIAL(NODE) ((NODE)->decl.artificial_flag)
+*************** extern tree build_int_2_wide PROTO((HOS
+*** 1176,1180 ****
+ extern tree build_real PROTO((tree, REAL_VALUE_TYPE));
+ extern tree build_real_from_int_cst PROTO((tree, tree));
+! extern tree build_complex PROTO((tree, tree));
+ extern tree build_string PROTO((int, char *));
+ extern tree build1 PROTO((enum tree_code, tree, tree));
+--- 1183,1187 ----
+ extern tree build_real PROTO((tree, REAL_VALUE_TYPE));
+ extern tree build_real_from_int_cst PROTO((tree, tree));
+! extern tree build_complex PROTO((tree, tree, tree));
+ extern tree build_string PROTO((int, char *));
+ extern tree build1 PROTO((enum tree_code, tree, tree));
+*************** extern int contains_placeholder_p PROTO(
+*** 1378,1387 ****
+ extern tree substitute_in_expr PROTO((tree, tree, tree));
+
+- /* Given a type T, a FIELD_DECL F, and a replacement value R,
+- return a new type with all size expressions that contain F
+- updated by replacing the reference to F with R. */
+-
+- extern tree substitute_in_type PROTO((tree, tree, tree));
+-
+ /* variable_size (EXP) is like save_expr (EXP) except that it
+ is for the special case of something that is part of a
+--- 1385,1388 ----
+*************** extern tree maybe_build_cleanup PROTO((
+*** 1456,1460 ****
+ and find the ultimate containing object, which is returned. */
+
+! extern tree get_inner_reference PROTO((tree, int *, int *, tree *, enum machine_mode *, int *, int *));
+
+ /* Return the FUNCTION_DECL which provides this _DECL with its context,
+--- 1457,1463 ----
+ and find the ultimate containing object, which is returned. */
+
+! extern tree get_inner_reference PROTO((tree, int *, int *, tree *,
+! enum machine_mode *, int *,
+! int *, int *));
+
+ /* Return the FUNCTION_DECL which provides this _DECL with its context,
+diff -rcp2N gcc-2.7.2.2/unroll.c g77-new/unroll.c
+*** gcc-2.7.2.2/unroll.c Sat Aug 19 17:33:26 1995
+--- g77-new/unroll.c Thu Jul 10 20:09:10 1997
+*************** unroll_loop (loop_end, insn_count, loop_
+*** 268,273 ****
+ structure of the function. This can happen as a result of the
+ "if (foo) bar; else break;" optimization in jump.c. */
+
+! if (write_symbols != NO_DEBUG)
+ {
+ int block_begins = 0;
+--- 268,277 ----
+ structure of the function. This can happen as a result of the
+ "if (foo) bar; else break;" optimization in jump.c. */
++ /* ??? Gcc has a general policy that -g is never supposed to change the code
++ that the compiler emits, so we must disable this optimization always,
++ even if debug info is not being output. This is rare, so this should
++ not be a significant performance problem. */
+
+! if (1 /* write_symbols != NO_DEBUG */)
+ {
+ int block_begins = 0;
+*************** unroll_loop (loop_end, insn_count, loop_
+*** 633,636 ****
+--- 637,657 ----
+ }
+
++ if (unroll_type == UNROLL_NAIVE
++ && GET_CODE (last_loop_insn) == JUMP_INSN
++ && start_label != JUMP_LABEL (last_loop_insn))
++ {
++ /* ??? The loop ends with a conditional branch that does not branch back
++ to the loop start label. In this case, we must emit an unconditional
++ branch to the loop exit after emitting the final branch.
++ copy_loop_body does not have support for this currently, so we
++ give up. It doesn't seem worthwhile to unroll anyways since
++ unrolling would increase the number of branch instructions
++ executed. */
++ if (loop_dump_stream)
++ fprintf (loop_dump_stream,
++ "Unrolling failure: final conditional branch not to loop start\n");
++ return;
++ }
++
+ /* Allocate a translation table for the labels and insn numbers.
+ They will be filled in as we copy the insns in the loop. */
+*************** unroll_loop (loop_end, insn_count, loop_
+*** 995,999 ****
+ for (j = FIRST_PSEUDO_REGISTER; j < max_reg_before_loop; j++)
+ if (local_regno[j])
+! map->reg_map[j] = gen_reg_rtx (GET_MODE (regno_reg_rtx[j]));
+
+ /* The last copy needs the compare/branch insns at the end,
+--- 1016,1024 ----
+ for (j = FIRST_PSEUDO_REGISTER; j < max_reg_before_loop; j++)
+ if (local_regno[j])
+! {
+! map->reg_map[j] = gen_reg_rtx (GET_MODE (regno_reg_rtx[j]));
+! record_base_value (REGNO (map->reg_map[j]),
+! regno_reg_rtx[j]);
+! }
+
+ /* The last copy needs the compare/branch insns at the end,
+*************** unroll_loop (loop_end, insn_count, loop_
+*** 1136,1140 ****
+ for (j = FIRST_PSEUDO_REGISTER; j < max_reg_before_loop; j++)
+ if (local_regno[j])
+! map->reg_map[j] = gen_reg_rtx (GET_MODE (regno_reg_rtx[j]));
+
+ /* If loop starts with a branch to the test, then fix it so that
+--- 1161,1169 ----
+ for (j = FIRST_PSEUDO_REGISTER; j < max_reg_before_loop; j++)
+ if (local_regno[j])
+! {
+! map->reg_map[j] = gen_reg_rtx (GET_MODE (regno_reg_rtx[j]));
+! record_base_value (REGNO (map->reg_map[j]),
+! regno_reg_rtx[j]);
+! }
+
+ /* If loop starts with a branch to the test, then fix it so that
+*************** copy_loop_body (copy_start, copy_end, ma
+*** 1605,1608 ****
+--- 1634,1641 ----
+ int this_giv_inc = INTVAL (giv_inc);
+
++ /* If this DEST_ADDR giv was not split, then ignore it. */
++ if (*tv->location != tv->dest_reg)
++ continue;
++
+ /* Scale this_giv_inc if the multiplicative factors of
+ the two givs are different. */
+*************** copy_loop_body (copy_start, copy_end, ma
+*** 1631,1635 ****
+ incrementing the shared pseudo reg more than
+ once. */
+! if (! tv->same_insn)
+ {
+ /* tv->dest_reg may actually be a (PLUS (REG)
+--- 1664,1668 ----
+ incrementing the shared pseudo reg more than
+ once. */
+! if (! tv->same_insn && ! tv->shared)
+ {
+ /* tv->dest_reg may actually be a (PLUS (REG)
+*************** copy_loop_body (copy_start, copy_end, ma
+*** 1757,1760 ****
+--- 1790,1794 ----
+ giv_dest_reg = tem;
+ map->reg_map[regno] = tem;
++ record_base_value (REGNO (tem), giv_src_reg);
+ }
+ else
+*************** iteration_info (iteration_var, initial_v
+*** 2220,2231 ****
+ return;
+ }
+! /* Reject iteration variables larger than the host long size, since they
+ could result in a number of iterations greater than the range of our
+! `unsigned long' variable loop_n_iterations. */
+! else if (GET_MODE_BITSIZE (GET_MODE (iteration_var)) > HOST_BITS_PER_LONG)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+! "Loop unrolling: Iteration var rejected because mode larger than host long.\n");
+ return;
+ }
+--- 2254,2266 ----
+ return;
+ }
+! /* Reject iteration variables larger than the host wide int size, since they
+ could result in a number of iterations greater than the range of our
+! `unsigned HOST_WIDE_INT' variable loop_n_iterations. */
+! else if ((GET_MODE_BITSIZE (GET_MODE (iteration_var))
+! > HOST_BITS_PER_WIDE_INT))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+! "Loop unrolling: Iteration var rejected because mode too large.\n");
+ return;
+ }
+*************** find_splittable_regs (unroll_type, loop_
+*** 2443,2447 ****
+ {
+ rtx tem = gen_reg_rtx (bl->biv->mode);
+!
+ emit_insn_before (gen_move_insn (tem, bl->biv->src_reg),
+ loop_start);
+--- 2478,2483 ----
+ {
+ rtx tem = gen_reg_rtx (bl->biv->mode);
+!
+! record_base_value (REGNO (tem), bl->biv->add_val);
+ emit_insn_before (gen_move_insn (tem, bl->biv->src_reg),
+ loop_start);
+*************** find_splittable_regs (unroll_type, loop_
+*** 2500,2503 ****
+--- 2536,2541 ----
+ exits. */
+ rtx tem = gen_reg_rtx (bl->biv->mode);
++ record_base_value (REGNO (tem), bl->biv->add_val);
++
+ emit_insn_before (gen_move_insn (tem, bl->biv->src_reg),
+ loop_start);
+*************** find_splittable_givs (bl, unroll_type, l
+*** 2675,2678 ****
+--- 2713,2717 ----
+ rtx tem = gen_reg_rtx (bl->biv->mode);
+
++ record_base_value (REGNO (tem), bl->biv->add_val);
+ emit_insn_before (gen_move_insn (tem, bl->biv->src_reg),
+ loop_start);
+*************** find_splittable_givs (bl, unroll_type, l
+*** 2716,2719 ****
+--- 2755,2759 ----
+ {
+ rtx tem = gen_reg_rtx (v->mode);
++ record_base_value (REGNO (tem), v->add_val);
+ emit_iv_add_mult (bl->initial_value, v->mult_val,
+ v->add_val, tem, loop_start);
+*************** find_splittable_givs (bl, unroll_type, l
+*** 2734,2747 ****
+ register for the split addr giv, just to be safe. */
+
+! /* ??? If there are multiple address givs which have been
+! combined with the same dest_reg giv, then we may only need
+! one new register for them. Pulling out constants below will
+! catch some of the common cases of this. Currently, I leave
+! the work of simplifying multiple address givs to the
+! following cse pass. */
+!
+! /* As a special case, if we have multiple identical address givs
+! within a single instruction, then we do use a single pseudo
+! reg for both. This is necessary in case one is a match_dup
+ of the other. */
+
+--- 2774,2780 ----
+ register for the split addr giv, just to be safe. */
+
+! /* If we have multiple identical address givs within a
+! single instruction, then use a single pseudo reg for
+! both. This is necessary in case one is a match_dup
+ of the other. */
+
+*************** find_splittable_givs (bl, unroll_type, l
+*** 2756,2759 ****
+--- 2789,2812 ----
+ INSN_UID (v->insn));
+ }
++ /* If multiple address GIVs have been combined with the
++ same dest_reg GIV, do not create a new register for
++ each. */
++ else if (unroll_type != UNROLL_COMPLETELY
++ && v->giv_type == DEST_ADDR
++ && v->same && v->same->giv_type == DEST_ADDR
++ && v->same->unrolled
++ #ifdef ADDRESS_COST
++ /* combine_givs_p may return true when ADDRESS_COST is
++ defined even if the multiply and add values are
++ not equal. To share a register here, the values
++ must be equal, as well as related. */
++ && rtx_equal_p (v->mult_val, v->same->mult_val)
++ && rtx_equal_p (v->add_val, v->same->add_val)
++ #endif
++ )
++ {
++ v->dest_reg = v->same->dest_reg;
++ v->shared = 1;
++ }
+ else if (unroll_type != UNROLL_COMPLETELY)
+ {
+*************** find_splittable_givs (bl, unroll_type, l
+*** 2761,2765 ****
+ register to hold the split value of the DEST_ADDR giv.
+ Emit insn to initialize its value before loop start. */
+! tem = gen_reg_rtx (v->mode);
+
+ /* If the address giv has a constant in its new_reg value,
+--- 2814,2821 ----
+ register to hold the split value of the DEST_ADDR giv.
+ Emit insn to initialize its value before loop start. */
+!
+! rtx tem = gen_reg_rtx (v->mode);
+! record_base_value (REGNO (tem), v->add_val);
+! v->unrolled = 1;
+
+ /* If the address giv has a constant in its new_reg value,
+*************** find_splittable_givs (bl, unroll_type, l
+*** 2772,2781 ****
+ v->dest_reg
+ = plus_constant (tem, INTVAL (XEXP (v->new_reg,1)));
+!
+ /* Only succeed if this will give valid addresses.
+ Try to validate both the first and the last
+ address resulting from loop unrolling, if
+ one fails, then can't do const elim here. */
+! if (! verify_addresses (v, giv_inc, unroll_number))
+ {
+ /* Save the negative of the eliminated const, so
+--- 2828,2837 ----
+ v->dest_reg
+ = plus_constant (tem, INTVAL (XEXP (v->new_reg,1)));
+!
+ /* Only succeed if this will give valid addresses.
+ Try to validate both the first and the last
+ address resulting from loop unrolling, if
+ one fails, then can't do const elim here. */
+! if (verify_addresses (v, giv_inc, unroll_number))
+ {
+ /* Save the negative of the eliminated const, so
+*************** final_biv_value (bl, loop_start, loop_en
+*** 3061,3064 ****
+--- 3117,3121 ----
+
+ tem = gen_reg_rtx (bl->biv->mode);
++ record_base_value (REGNO (tem), bl->biv->add_val);
+ /* Make sure loop_end is not the last insn. */
+ if (NEXT_INSN (loop_end) == 0)
+*************** final_giv_value (v, loop_start, loop_end
+*** 3154,3157 ****
+--- 3211,3215 ----
+ /* Put the final biv value in tem. */
+ tem = gen_reg_rtx (bl->biv->mode);
++ record_base_value (REGNO (tem), bl->biv->add_val);
+ emit_iv_add_mult (increment, GEN_INT (loop_n_iterations),
+ bl->initial_value, tem, insert_before);
+diff -rcp2N gcc-2.7.2.2/varasm.c g77-new/varasm.c
+*** gcc-2.7.2.2/varasm.c Thu Aug 31 19:02:53 1995
+--- g77-new/varasm.c Sun Aug 10 22:26:32 1997
+*************** assemble_variable (decl, top_level, at_e
+*** 1067,1070 ****
+--- 1067,1072 ----
+ if (! dont_output_data)
+ {
++ int size;
++
+ if (TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
+ goto finish;
+*************** assemble_variable (decl, top_level, at_e
+*** 1072,1078 ****
+ /* This is better than explicit arithmetic, since it avoids overflow. */
+ size_tree = size_binop (CEIL_DIV_EXPR,
+! DECL_SIZE (decl), size_int (BITS_PER_UNIT));
+
+! if (TREE_INT_CST_HIGH (size_tree) != 0)
+ {
+ error_with_decl (decl, "size of variable `%s' is too large");
+--- 1074,1082 ----
+ /* This is better than explicit arithmetic, since it avoids overflow. */
+ size_tree = size_binop (CEIL_DIV_EXPR,
+! DECL_SIZE (decl), size_int (BITS_PER_UNIT));
+
+! size = TREE_INT_CST_LOW (size_tree);
+! if (TREE_INT_CST_HIGH (size_tree) != 0
+! || size != TREE_INT_CST_LOW (size_tree))
+ {
+ error_with_decl (decl, "size of variable `%s' is too large");
+*************** decode_addr_const (exp, value)
+*** 2132,2135 ****
+--- 2136,2140 ----
+ case COMPLEX_CST:
+ case CONSTRUCTOR:
++ case INTEGER_CST:
+ x = TREE_CST_RTL (target);
+ break;
+*************** const_hash (exp)
+*** 2247,2251 ****
+ return const_hash (TREE_OPERAND (exp, 0)) * 9
+ + const_hash (TREE_OPERAND (exp, 1));
+! else if (code == NOP_EXPR || code == CONVERT_EXPR)
+ return const_hash (TREE_OPERAND (exp, 0)) * 7 + 2;
+
+--- 2252,2256 ----
+ return const_hash (TREE_OPERAND (exp, 0)) * 9
+ + const_hash (TREE_OPERAND (exp, 1));
+! else if (code == NOP_EXPR || code == CONVERT_EXPR || code == NON_LVALUE_EXPR)
+ return const_hash (TREE_OPERAND (exp, 0)) * 7 + 2;
+
+*************** compare_constant_1 (exp, p)
+*** 2401,2405 ****
+ return p;
+ }
+! else if (code == NOP_EXPR || code == CONVERT_EXPR)
+ {
+ p = compare_constant_1 (TREE_OPERAND (exp, 0), p);
+--- 2406,2410 ----
+ return p;
+ }
+! else if (code == NOP_EXPR || code == CONVERT_EXPR || code == NON_LVALUE_EXPR)
+ {
+ p = compare_constant_1 (TREE_OPERAND (exp, 0), p);
+*************** copy_constant (exp)
+*** 2633,2637 ****
+
+ case COMPLEX_CST:
+! return build_complex (copy_constant (TREE_REALPART (exp)),
+ copy_constant (TREE_IMAGPART (exp)));
+
+--- 2638,2643 ----
+
+ case COMPLEX_CST:
+! return build_complex (TREE_TYPE (exp),
+! copy_constant (TREE_REALPART (exp)),
+ copy_constant (TREE_IMAGPART (exp)));
+
+*************** copy_constant (exp)
+*** 2644,2647 ****
+--- 2650,2654 ----
+ case NOP_EXPR:
+ case CONVERT_EXPR:
++ case NON_LVALUE_EXPR:
+ return build1 (TREE_CODE (exp), TREE_TYPE (exp),
+ copy_constant (TREE_OPERAND (exp, 0)));
+*************** output_constant_def (exp)
+*** 2690,2696 ****
+ register rtx def;
+
+- if (TREE_CODE (exp) == INTEGER_CST)
+- abort (); /* No TREE_CST_RTL slot in these. */
+-
+ if (TREE_CST_RTL (exp))
+ return TREE_CST_RTL (exp);
+--- 2697,2700 ----
+*************** bc_assemble_integer (exp, size)
+*** 3620,3624 ****
+ exp = fold (exp);
+
+! while (TREE_CODE (exp) == NOP_EXPR || TREE_CODE (exp) == CONVERT_EXPR)
+ exp = TREE_OPERAND (exp, 0);
+ if (TREE_CODE (exp) == INTEGER_CST)
+--- 3624,3629 ----
+ exp = fold (exp);
+
+! while (TREE_CODE (exp) == NOP_EXPR || TREE_CODE (exp) == CONVERT_EXPR
+! || TREE_CODE (exp) == NON_LVALUE_EXPR)
+ exp = TREE_OPERAND (exp, 0);
+ if (TREE_CODE (exp) == INTEGER_CST)
+*************** bc_assemble_integer (exp, size)
+*** 3631,3639 ****
+ const_part = TREE_OPERAND (exp, 0);
+ while (TREE_CODE (const_part) == NOP_EXPR
+! || TREE_CODE (const_part) == CONVERT_EXPR)
+ const_part = TREE_OPERAND (const_part, 0);
+ addr_part = TREE_OPERAND (exp, 1);
+ while (TREE_CODE (addr_part) == NOP_EXPR
+! || TREE_CODE (addr_part) == CONVERT_EXPR)
+ addr_part = TREE_OPERAND (addr_part, 0);
+ if (TREE_CODE (const_part) != INTEGER_CST)
+--- 3636,3646 ----
+ const_part = TREE_OPERAND (exp, 0);
+ while (TREE_CODE (const_part) == NOP_EXPR
+! || TREE_CODE (const_part) == CONVERT_EXPR
+! || TREE_CODE (const_part) == NON_LVALUE_EXPR)
+ const_part = TREE_OPERAND (const_part, 0);
+ addr_part = TREE_OPERAND (exp, 1);
+ while (TREE_CODE (addr_part) == NOP_EXPR
+! || TREE_CODE (addr_part) == CONVERT_EXPR
+! || TREE_CODE (addr_part) == NON_LVALUE_EXPR)
+ addr_part = TREE_OPERAND (addr_part, 0);
+ if (TREE_CODE (const_part) != INTEGER_CST)
+diff -rcp2N gcc-2.7.2.2/version.c g77-new/version.c
+*** gcc-2.7.2.2/version.c Thu Feb 20 19:24:33 1997
+--- g77-new/version.c Sun Aug 10 19:28:55 1997
+***************
+*** 1 ****
+! char *version_string = "2.7.2.2";
+--- 1 ----
+! char *version_string = "2.7.2.2.f.3b";