+2006-05-28 Kazu Hirata <kazu@codesourcery.com>
+
+ * cfgcleanup.c, cfgexpand.c, cgraphunit.c, config/arm/arm.c,
+ config/fr30/fr30.md, config/i386/i386-interix.h,
+ config/i386/i386.c, config/i386/i386.md, config/sh/superh.h,
+ config/sh/superh64.h, config/v850/v850.c, df-core.c,
+ df-problems.c, df.h, except.c, final.c, haifa-sched.c,
+ lambda-code.c, libgcc2.h, omp-low.c, optabs.c, predict.c,
+ reload.c, tree-flow.h, tree-outof-ssa.c, tree-ssa-dce.c,
+ tree-ssa-pre.c, tree-vect-transform.c: Fix comment typos.
+ Follow spelling conventions.
+ * doc/invoke.texi, doc/rtl.texi, doc/tm.texi: Fix typos.
+ Follow spelling conventions.
+
2006-05-27 Richard Guenther <rguenther@suse.de>
PR middle-end/27773
redirect_to->count += src1->count;
redirect_to->frequency += src1->frequency;
- /* We may have some registers visible trought the block. */
+ /* We may have some registers visible through the block. */
redirect_to->flags |= BB_DIRTY;
/* Recompute the frequencies and counts of outgoing edges. */
expand_used_vars_for_block (t, false);
/* Since we do not track exact variable lifetimes (which is not even
- possible for varibles whose address escapes), we mirror the block
+ possible for variables whose address escapes), we mirror the block
tree in the interference graph. Here we cause all variables at this
level, and all sublevels, to conflict. Do make certain that a
variable conflicts with itself. */
PR24561), but don't do so for always_inline functions, functions
declared inline and nested functions. These was optimized out
in the original implementation and it is unclear whether we want
- to change the behaviour here. */
+ to change the behavior here. */
if (((TREE_PUBLIC (decl)
|| (!optimize && !node->local.disregard_inline_limits
&& !DECL_DECLARED_INLINE_P (decl)
/* Encode the current state of the #pragma [no_]long_calls. */
typedef enum
{
- OFF, /* No #pramgma [no_]long_calls is in effect. */
+ OFF, /* No #pragma [no_]long_calls is in effect. */
LONG, /* #pragma long_calls is in effect. */
SHORT /* #pragma no_long_calls is in effect. */
} arm_pragma_enum;
;; Define an attribute to be used by the delay slot code.
-;; An instruction by default is considered to be 'delyabable'
+;; An instruction by default is considered to be 'delayable'
;; that is, it can be placed into a delay slot, but it is not
;; itself a delayed branch type instruction. An instruction
;; whose type is 'delayed' is one which has a delay slot, and
#undef CPP_SPEC
/* Write out the correct language type definition for the header files.
Unless we have assembler language, write out the symbols for C.
- mieee is an Alpha specific variant. Cross polination a bad idea.
+ mieee is an Alpha specific variant. Cross pollination a bad idea.
*/
#define CPP_SPEC "-remap %{posix:-D_POSIX_SOURCE} \
-isystem %$INTERIX_ROOT/usr/include"
COSTS_N_INSNS (1), /* cost of an add instruction */
/* On all chips taken into consideration lea is 2 cycles and more. With
this cost however our current implementation of synth_mult results in
- use of unnecesary temporary registers causing regression on several
+ use of unnecessary temporary registers causing regression on several
SPECfp benchmarks. */
COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
COSTS_N_INSNS (1), /* variable shift costs */
enum machine_mode mode =
GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
- /* Do not handle DImode compares that go trought special path. Also we can't
+ /* Do not handle DImode compares that go through special path. Also we can't
deal with FP compares yet. This is possible to add. */
if (mode == (TARGET_64BIT ? TImode : DImode))
return false;
#else
/* It is tempting to use ASM_OUTPUT_ALIGN here, but we don't want to do that.
The align insn is used to avoid 3 jump instructions in the row to improve
- branch prediction and the benefits hardly outweight the cost of extra 8
+ branch prediction and the benefits hardly outweigh the cost of extra 8
nops on the average inserted by full alignment pseudo operation. */
#endif
return "";
defaults and provide options --defsym _start and --defsym _stack
which are required by the SuperH configuration of GNU ld.
- This file is intended to overide sh.h */
+ This file is intended to override sh.h. */
#ifndef _SUPERH_H
/* This header file is used when the vendor name is set to 'superh'.
It configures the compiler for SH5 only and switches the default
endianess to little.
- This file is intended to overide sh.h, superh.h and sh64.h (which
+ This file is intended to override sh.h, superh.h and sh64.h (which
should have been included in that order) */
int mask;
/* If we are not using the EP register on a per-function basis
- then do not allow this optimisation at all. This is to
+ then do not allow this optimization at all. This is to
prevent the use of the SLD/SST instructions which cannot be
guaranteed to work properly due to a hardware bug. */
if (!TARGET_EP)
As for the bit vector problems, there is no interface to give a set of
blocks over with to resolve the iteration. In general, restarting a
dataflow iteration is difficult and expensive. Again, the best way to
-keep the dataflow infomation up to data (if this is really what is
+keep the dataflow information up to data (if this is really what is
needed) it to formulate a problem specific solution.
There are fine grained calls for creating and deleting references from
sparse_invalidated_by call both play this game. */
/* Private data used to compute the solution for this problem. These
- data structures are not accessable outside of this module. */
+ data structures are not accessible outside of this module. */
struct df_ru_problem_data
{
here for the defs. */
/* Private data used to compute the solution for this problem. These
- data structures are not accessable outside of this module. */
+ data structures are not accessible outside of this module. */
struct df_rd_problem_data
{
/* If the number of defs for regnum N is less than
----------------------------------------------------------------------------*/
/* Private data used to compute the solution for this problem. These
- data structures are not accessable outside of this module. */
+ data structures are not accessible outside of this module. */
struct df_urec_problem_data
{
bool earlyclobbers_found; /* True if any instruction contains an
df_ri_dump, /* Debugging. */
/* Technically this is only dependent on the live registers problem
- but it will produce infomation if built one of uninitialized
+ but it will produce information if built one of uninitialized
register problems (UR, UREC) is also run. */
df_lr_add_problem, /* Dependent problem. */
0 /* Changeable flags. */
/* The pool to allocate the block_info from. */
alloc_pool block_pool;
- /* Problem specific control infomation. */
+ /* Problem specific control information. */
/* Scanning flags. */
#define DF_HARD_REGS 1 /* Mark hard registers. */
sparse_kill, each register gets a slot and a 1 in this bitvector
means that all of the uses of that register are killed. This is
a very useful efficiency hack in that it keeps from having push
- around big groups of 1s. This is implemened by the
+ around big groups of 1s. This is implemented by the
bitmap_clear_range call. */
bitmap kill;
on load and link times of a DSO as it massively reduces the size of the
dynamic export table when the library makes heavy use of templates.
-The behaviour of this switch is not quite the same as marking the
+The behavior of this switch is not quite the same as marking the
methods as hidden directly. Normally if there is a class with default
visibility which has a hidden method, the effect of this is that the
method must be defined in only one shared object. This switch does
been applied. For instance @code{\u207F}, ``SUPERSCRIPT LATIN SMALL
LETTER N'', will display just like a regular @code{n} which has been
placed in a superscript. ISO 10646 defines the @dfn{NFKC}
-normalisation scheme to convert all these into a standard form as
+normalization scheme to convert all these into a standard form as
well, and GCC will warn if your code is not in NFKC if you use
@option{-Wnormalized=nfkc}. This warning is comparable to warning
about every identifier that contains the letter O because it might be
@itemx (ss_neg:@var{m} @var{x})
These two expressions represent the negation (subtraction from zero) of
the value represented by @var{x}, carried out in mode @var{m}. They
-differ in the behaviour on overflow of integer modes. In the case of
+differ in the behavior on overflow of integer modes. In the case of
@code{neg}, the negation of the operand may be a number not representable
in mode @var{m}, in which case it is truncated to @var{m}. @code{ss_neg}
ensures that an out-of-bounds result saturates to the maximum or minimum
@itemx (ss_ashift:@var{m} @var{x} @var{c})
These two expressions represent the result of arithmetically shifting @var{x}
left by @var{c} places. They differ in their behavior on overflow of integer
-modes. An @code{ashift} operation is a plain shift with no special behaviour
+modes. An @code{ashift} operation is a plain shift with no special behavior
in case of a change in the sign bit; @code{ss_ashift} saturates to the minimum
or maximum representable value if any of the bits shifted out differs from the
final sign bit.
@defmac FRAME_POINTER_CFA_OFFSET (@var{fundecl})
If defined, a C expression whose value is an integer giving the offset
in bytes from the frame pointer to the canonical frame address (cfa).
-The final value should conincide with that calculated by
+The final value should coincide with that calculated by
@code{INCOMING_FRAME_SP_OFFSET}.
Normally the CFA is calculated as an offset from the argument pointer,
/* Output a reference from an exception table to the type_info object TYPE.
- TT_FORMAT and TT_FORMAT_SIZE descibe the DWARF encoding method used for
+ TT_FORMAT and TT_FORMAT_SIZE describe the DWARF encoding method used for
the value. */
static void
}
/* Obtain the current length of an insn. If branch shortening has been done,
- get its actual length. Otherwise, use FALLBACK_FN to calcualte the
+ get its actual length. Otherwise, use FALLBACK_FN to calculate the
length. */
static inline int
get_attr_length_1 (rtx insn ATTRIBUTE_UNUSED,
spec_info->weakness_cutoff =
(PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
else
- /* So we won't read anything accidently. */
+ /* So we won't read anything accidentally. */
spec_info = 0;
#ifdef ENABLE_CHECKING
check_sched_flags ();
#endif
}
else
- /* So we won't read anything accidently. */
+ /* So we won't read anything accidentally. */
spec_info = 0;
/* Initialize issue_rate. */
4. Multiply the composed transformation matrix times the matrix form of the
loop.
5. Transform the newly created matrix (from step 4) back into a loop nest
- using fourier motzkin elimination to figure out the bounds. */
+ using Fourier-Motzkin elimination to figure out the bounds. */
static lambda_loopnest
lambda_compute_auxillary_space (lambda_loopnest nest,
lambda_matrix_add_mc (B, 1, B1, -1, B1, size, invariants);
/* Now compute the auxiliary space bounds by first inverting U, multiplying
- it by A1, then performing fourier motzkin. */
+ it by A1, then performing Fourier-Motzkin. */
invertedtrans = lambda_matrix_new (depth, depth);
/* Defined for L_clz. Exported here because some targets may want to use
it for their own versions of the __clz builtins. It contains the bit
position of the first set bit for the numbers 0 - 255. This avoids the
- need for a seperate table for the __ctz builtins. */
+ need for a separate table for the __ctz builtins. */
extern const UQItype __clz_tab[256];
#include "longlong.h"
if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
return true;
- /* We can only use copy-in/copy-out semantics for shared varibles
+ /* We can only use copy-in/copy-out semantics for shared variables
when we know the value is not accessible from an outer scope. */
if (shared_p)
{
This is not needed. Consider, for instance conversion from SFmode
into DImode.
- The hot path trought the code is dealing with inputs smaller than 2^63
+ The hot path through the code is dealing with inputs smaller than 2^63
and doing just the conversion, so there is no bits to lose.
In the other path we know the value is positive in the range 2^63..2^64-1
{
/* Predict early returns to be probable, as we've already taken
care for error returns and other cases are often used for
- fast paths trought function. */
+ fast paths through function. */
if (e->dest == EXIT_BLOCK_PTR
&& TREE_CODE (last_stmt (bb)) == RETURN_EXPR
&& !single_pred_p (bb))
GET_MODE (orig_op1))));
}
/* Plus in the index register may be created only as a result of
- register remateralization for expression like &localvar*4. Reload it.
+ register rematerialization for expression like &localvar*4. Reload it.
It may be possible to combine the displacement on the outer level,
but it is probably not worthwhile to do so. */
if (context == 1)
/* Use this iterator in combination with FOR_EACH_IMM_USE_STMT to
- get access to each occurence of ssavar on the stmt returned by
+ get access to each occurrence of ssavar on the stmt returned by
that iterator.. for instance:
FOR_EACH_IMM_USE_STMT (stmt, iter, var)
leader_match = leader;
/* The tree_* cfg manipulation routines use the PENDING_EDGE field
- for various PHI manipulations, so it gets cleared whhen calls are
+ for various PHI manipulations, so it gets cleared when calls are
made to make_forwarder_block(). So make sure the edge is clear,
and use the saved stmt list. */
PENDING_STMT (leader) = NULL;
nothing to the program, then we not only remove it, but we also change
the flow graph so that the current block will simply fall-thru to its
immediate post-dominator. The blocks we are circumventing will be
- removed by cleaup_tree_cfg if this change in the flow graph makes them
+ removed by cleanup_tree_cfg if this change in the flow graph makes them
unreachable. */
if (is_ctrl_stmt (t))
{
bitmap rvuse_gen;
bitmap rvuse_kill;
- /* For actually occuring loads, as long as they occur before all the
+ /* For actually occurring loads, as long as they occur before all the
other stores in the block, we know they are antic at the top of
the block, regardless of RVUSE_KILL. */
value_set_t antic_safe_loads;
bsi_insert_before (&cond_exp_bsi, cond_expr_stmt_list, BSI_SAME_STMT);
}
- /* CHECKME: we wouldn't need this if we calles update_ssa once
+ /* CHECKME: we wouldn't need this if we called update_ssa once
for all loops. */
bitmap_zero (vect_vnames_to_rename);