+2005-12-16 Kazu Hirata <kazu@codesourcery.com>
+
+ * basic-block.h, config/i386/winnt.c, config/pa/pa.c,
+ config/s390/s390.c, dfp.c, expr.c, fold-const.c, params.def,
+ reload.c, struct-equiv.c, tree-ssa-ccp.c, tree-ssa-pre.c,
+ tree-ssa-reassoc.c, tree-ssa-structalias.c: Fix comment typos.
+ * doc/invoke.texi, doc/tm.texi: Fix typos.
+
2005-12-16 Ben Elliston <bje@au.ibm.com>
* real.c (decimal_quad_format): Correct values for emin, emax.
NEED_RERUN is set. This has to be tested by the caller to re-run
the comparison if the match appears otherwise sound. The state kept in
x_start, y_start, equiv_used and check_input_conflict ensures that
- we won't loop indefinetly. */
+ we won't loop indefinitely. */
bool need_rerun;
/* If there is indication of an input conflict at the end,
CHECK_INPUT_CONFLICT is set so that we'll check for input conflicts
that are being compared. A final jump insn will not be included. */
rtx x_end, y_end;
- /* If we are matching tablejumps, X_LABEL in X_BLOCK coresponds to
+ /* If we are matching tablejumps, X_LABEL in X_BLOCK corresponds to
Y_LABEL in Y_BLOCK. */
rtx x_label, y_label;
/* The DECL_DLLIMPORT_P flag was set for decls in the class definition
by targetm.cxx.adjust_class_at_definition. Check again to emit
- warnings if the class attribute has been overriden by an
+ warnings if the class attribute has been overridden by an
out-of-class definition. */
if (associated_type (decl)
&& lookup_attribute ("dllimport",
/* If we have some deferred plabels, then we need to switch into the
data or readonly data section, and align it to a 4 byte boundary
- before outputing the deferred plabels. */
+ before outputting the deferred plabels. */
if (n_deferred_plabels)
{
switch_to_section (flag_pic ? data_section : readonly_data_section);
rtx shift; /* Bit offset with regard to lsb. */
rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
rtx modemaski; /* ~modemask */
- bool aligned; /* True if memory is aliged, false else. */
+ bool aligned; /* True if memory is aligned, false else. */
};
/* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
}
/* Helper function to real.c:do_compare() to handle decimal internal
- represenation including when one of the operands is still in the
+ representation including when one of the operands is still in the
binary internal representation. */
int
large units consisting of small inlininable functions however the overall unit
growth limit is needed to avoid exponential explosion of code size. Thus for
smaller units, the size is increased to @option{--param large-unit-insns}
-before aplying @option{--param inline-unit-growth}. The default is 10000
+before applying @option{--param inline-unit-growth}. The default is 10000
@item inline-unit-growth
Specifies maximal overall growth of the compilation unit caused by inlining.
Normally the CFA is calculated as an offset from the argument pointer,
via @code{ARG_POINTER_CFA_OFFSET}, but if the argument pointer is
variable due to the ABI, this may not be possible. If this macro is
-defined, it imples that the virtual register instantiation should be
+defined, it implies that the virtual register instantiation should be
based on the frame pointer instead of the argument pointer. Only one
of @code{FRAME_POINTER_CFA_OFFSET} and @code{ARG_POINTER_CFA_OFFSET}
should be defined.
else
comparison_code = unsignedp ? LEU : LE;
- /* Canonicalize to comparsions against 0. */
+ /* Canonicalize to comparisons against 0. */
if (op1 == const1_rtx)
{
/* Converting (a >= 1 ? a : 1) into (a > 0 ? a : 1)
if (TREE_TYPE (op0) == type)
return op0;
- /* If we have (type) (a CMP b) and type is an integal type, return
+ /* If we have (type) (a CMP b) and type is an integral type, return
new expression involving the new type. */
if (COMPARISON_CLASS_P (op0) && INTEGRAL_TYPE_P (type))
return fold_build2 (TREE_CODE (op0), type, TREE_OPERAND (op0, 0),
}
/* Optimize tan(x)/sin(x) as 1.0/cos(x) if we don't care about
- NaNs or Infintes. */
+ NaNs or Infinities. */
if (((fcode0 == BUILT_IN_TAN && fcode1 == BUILT_IN_SIN)
|| (fcode0 == BUILT_IN_TANF && fcode1 == BUILT_IN_SINF)
|| (fcode0 == BUILT_IN_TANL && fcode1 == BUILT_IN_SINL)))
the other loops cold that is not usually the case. So we need to artificially
flatten the profile.
- We need to cut the maximal predicted iterations to large enought iterations
+ We need to cut the maximal predicted iterations to large enough iterations
so the loop appears important, but safely within HOT_BB_COUNT_FRACTION
range. */
gcc_assert (insn_data[(int) icode].n_operands == 3);
/* ??? We currently have no way to represent a reload that needs
- an icode to reload from an intermediate tertiaty reload register.
+ an icode to reload from an intermediate tertiary reload register.
We should probably have a new field in struct reload to tag a
chain of scratch operand reloads onto. */
gcc_assert (class == NO_REGS);
the number of inputs an miss an input conflict. Sufficient information
is gathered so that when we make another pass, we won't have to backtrack
at the same point.
- Another issue is that information in memory atttributes and/or REG_NOTES
+ Another issue is that information in memory attributes and/or REG_NOTES
might have to be merged or discarded to make a valid match. We don't want
to discard such information when we are not certain that we want to merge
the two (partial) blocks.
SECONDARY_MEMORY_NEEDED, cannot be done directly. For our purposes, we
consider them impossible to generate after reload (even though some
might be synthesized when you throw enough code at them).
- Since we don't know while procesing a cross-jump if a local register
+ Since we don't know while processing a cross-jump if a local register
that is currently live will eventually be live and thus be an input,
we keep track of potential inputs that would require an impossible move
by using a prohibitively high cost for them.
}
/* In SET, assign the bit for the register number of REG the value VALUE.
- If REG is a hard register, do so for all its consituent registers.
+ If REG is a hard register, do so for all its constituent registers.
Return the number of registers that have become included (as a positive
number) or excluded (as a negative number). */
static int
if (mode & STRUCT_EQUIV_MATCH_JUMPS)
{
- /* The caller is expected to have comapred the jumps already, but we
+ /* The caller is expected to have compared the jumps already, but we
need to match them again to get any local registers and inputs. */
gcc_assert (!info->cur.x_start == !info->cur.y_start);
if (info->cur.x_start)
/* The regular is_gimple_min_invariant does a shallow test of the object.
It assumes that full gimplification has happened, or will happen on the
object. For a value coming from DECL_INITIAL, this is not true, so we
- have to be more strict outselves. */
+ have to be more strict ourselves. */
static bool
ccp_decl_initial_min_invariant (tree t)
bitmap_copy (dest->values, orig->values);
}
-/* Perform bitmapped set rperation DEST &= ORIG. */
+/* Perform bitmapped set operation DEST &= ORIG. */
static void
bitmap_set_and (bitmap_set_t dest, bitmap_set_t orig)
In order to promote the most redundancy elimination, you want
binary expressions whose operands are the same rank (or
- preferrably, the same value) exposed to the redundancy eliminator,
+ preferably, the same value) exposed to the redundancy eliminator,
for possible elimination.
So the way to do this if we really cared, is to build the new op
gcc_assert (found);
}
- /* Assign all the passed arguments to the approriate incoming
+ /* Assign all the passed arguments to the appropriate incoming
parameters of the function. */
fi = get_varinfo (varid);
arglist = TREE_OPERAND (rhsop, 1);
arg = DECL_ARGUMENTS (decl);
- /* Set up varirables for each argument. */
+ /* Set up variables for each argument. */
for (i = 1; i < vi->fullsize; i++)
{
varinfo_t argvi;