| 1 | --- a/config.sub |
| 2 | +++ b/config.sub |
| 3 | @@ -239,7 +239,7 @@ case $basic_machine in |
| 4 | | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ |
| 5 | | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ |
| 6 | | am33_2.0 \ |
| 7 | - | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr \ |
| 8 | + | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \ |
| 9 | | bfin \ |
| 10 | | c4x | clipper \ |
| 11 | | d10v | d30v | dlx | dsp16xx \ |
| 12 | @@ -316,7 +316,7 @@ case $basic_machine in |
| 13 | | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ |
| 14 | | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \ |
| 15 | | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ |
| 16 | - | avr-* \ |
| 17 | + | avr-* | avr32-* \ |
| 18 | | bfin-* | bs2000-* \ |
| 19 | | c[123]* | c30-* | [cjt]90-* | c4x-* | c54x-* | c55x-* | c6x-* \ |
| 20 | | clipper-* | craynv-* | cydra-* \ |
| 21 | --- a/configure.in |
| 22 | +++ b/configure.in |
| 23 | @@ -497,6 +497,9 @@ case "${target}" in |
| 24 | arm-*-riscix*) |
| 25 | noconfigdirs="$noconfigdirs ld target-libgloss ${libgcj}" |
| 26 | ;; |
| 27 | + avr32-*-*) |
| 28 | + noconfigdirs="$noconfigdirs target-libiberty target-libmudflap target-libffi ${libgcj}" |
| 29 | + ;; |
| 30 | avr-*-*) |
| 31 | noconfigdirs="$noconfigdirs target-libiberty target-libstdc++-v3 ${libgcj}" |
| 32 | ;; |
| 33 | --- a/gcc/builtins.c |
| 34 | +++ b/gcc/builtins.c |
| 35 | @@ -9228,7 +9228,7 @@ validate_arglist (tree arglist, ...) |
| 36 | |
| 37 | do |
| 38 | { |
| 39 | - code = va_arg (ap, enum tree_code); |
| 40 | + code = va_arg (ap, int); |
| 41 | switch (code) |
| 42 | { |
| 43 | case 0: |
| 44 | --- a/gcc/calls.c |
| 45 | +++ b/gcc/calls.c |
| 46 | @@ -3434,7 +3434,7 @@ emit_library_call_value_1 (int retval, r |
| 47 | for (; count < nargs; count++) |
| 48 | { |
| 49 | rtx val = va_arg (p, rtx); |
| 50 | - enum machine_mode mode = va_arg (p, enum machine_mode); |
| 51 | + enum machine_mode mode = va_arg (p, int); |
| 52 | |
| 53 | /* We cannot convert the arg value to the mode the library wants here; |
| 54 | must do it earlier where we know the signedness of the arg. */ |
| 55 | --- /dev/null |
| 56 | +++ b/gcc/config/avr32/avr32.c |
| 57 | @@ -0,0 +1,7273 @@ |
| 58 | +/* |
| 59 | + Target hooks and helper functions for AVR32. |
| 60 | + Copyright 2003-2006 Atmel Corporation. |
| 61 | + |
| 62 | + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com> |
| 63 | + Initial porting by Anders �dland. |
| 64 | + |
| 65 | + This file is part of GCC. |
| 66 | + |
| 67 | + This program is free software; you can redistribute it and/or modify |
| 68 | + it under the terms of the GNU General Public License as published by |
| 69 | + the Free Software Foundation; either version 2 of the License, or |
| 70 | + (at your option) any later version. |
| 71 | + |
| 72 | + This program is distributed in the hope that it will be useful, |
| 73 | + but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 74 | + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 75 | + GNU General Public License for more details. |
| 76 | + |
| 77 | + You should have received a copy of the GNU General Public License |
| 78 | + along with this program; if not, write to the Free Software |
| 79 | + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ |
| 80 | + |
| 81 | +#include "config.h" |
| 82 | +#include "system.h" |
| 83 | +#include "coretypes.h" |
| 84 | +#include "tm.h" |
| 85 | +#include "rtl.h" |
| 86 | +#include "tree.h" |
| 87 | +#include "obstack.h" |
| 88 | +#include "regs.h" |
| 89 | +#include "hard-reg-set.h" |
| 90 | +#include "real.h" |
| 91 | +#include "insn-config.h" |
| 92 | +#include "conditions.h" |
| 93 | +#include "output.h" |
| 94 | +#include "insn-attr.h" |
| 95 | +#include "flags.h" |
| 96 | +#include "reload.h" |
| 97 | +#include "function.h" |
| 98 | +#include "expr.h" |
| 99 | +#include "optabs.h" |
| 100 | +#include "toplev.h" |
| 101 | +#include "recog.h" |
| 102 | +#include "ggc.h" |
| 103 | +#include "except.h" |
| 104 | +#include "c-pragma.h" |
| 105 | +#include "integrate.h" |
| 106 | +#include "tm_p.h" |
| 107 | +#include "langhooks.h" |
| 108 | + |
| 109 | +#include "target.h" |
| 110 | +#include "target-def.h" |
| 111 | + |
| 112 | +#include <ctype.h> |
| 113 | + |
| 114 | +/* Forward definitions of types. */ |
| 115 | +typedef struct minipool_node Mnode; |
| 116 | +typedef struct minipool_fixup Mfix; |
| 117 | + |
| 118 | +/* Obstack for minipool constant handling. */ |
| 119 | +static struct obstack minipool_obstack; |
| 120 | +static char *minipool_startobj; |
| 121 | +static rtx minipool_vector_label; |
| 122 | + |
| 123 | +/* True if we are currently building a constant table. */ |
| 124 | +int making_const_table; |
| 125 | + |
| 126 | +/* Some forward function declarations */ |
| 127 | +static unsigned long avr32_isr_value (tree); |
| 128 | +static unsigned long avr32_compute_func_type (void); |
| 129 | +static tree avr32_handle_isr_attribute (tree *, tree, tree, int, bool *); |
| 130 | +static tree avr32_handle_acall_attribute (tree *, tree, tree, int, bool *); |
| 131 | +static tree avr32_handle_fndecl_attribute (tree * node, tree name, tree args, |
| 132 | + int flags, bool * no_add_attrs); |
| 133 | +static void avr32_reorg (void); |
| 134 | +bool avr32_return_in_msb (tree type); |
| 135 | +bool avr32_vector_mode_supported (enum machine_mode mode); |
| 136 | +static void avr32_init_libfuncs (void); |
| 137 | +void avr32_load_pic_register (void); |
| 138 | + |
| 139 | + |
| 140 | +static void |
| 141 | +avr32_add_gc_roots (void) |
| 142 | +{ |
| 143 | + gcc_obstack_init (&minipool_obstack); |
| 144 | + minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0); |
| 145 | +} |
| 146 | + |
| 147 | + |
| 148 | +/* List of all known AVR32 parts */ |
| 149 | +static const struct part_type_s avr32_part_types[] = { |
| 150 | + /* name, part_type, architecture type, macro */ |
| 151 | + {"none", PART_TYPE_AVR32_NONE, ARCH_TYPE_AVR32_AP, "__AVR32__"}, |
| 152 | + {"ap7000", PART_TYPE_AVR32_AP7000, ARCH_TYPE_AVR32_AP, "__AVR32_AP7000__"}, |
| 153 | + {"ap7010", PART_TYPE_AVR32_AP7010, ARCH_TYPE_AVR32_AP, "__AVR32_AP7010__"}, |
| 154 | + {"ap7020", PART_TYPE_AVR32_AP7020, ARCH_TYPE_AVR32_AP, "__AVR32_AP7020__"}, |
| 155 | + {"uc3a0256", PART_TYPE_AVR32_UC3A0256, ARCH_TYPE_AVR32_UC, "__AVR32_UC3A0256__"}, |
| 156 | + {"uc3a0512", PART_TYPE_AVR32_UC3A0512, ARCH_TYPE_AVR32_UC, "__AVR32_UC3A0512__"}, |
| 157 | + {"uc3a1128", PART_TYPE_AVR32_UC3A1128, ARCH_TYPE_AVR32_UC, "__AVR32_UC3A1128__"}, |
| 158 | + {"uc3a1256", PART_TYPE_AVR32_UC3A1256, ARCH_TYPE_AVR32_UC, "__AVR32_UC3A1256__"}, |
| 159 | + {"uc3a1512", PART_TYPE_AVR32_UC3A1512, ARCH_TYPE_AVR32_UC, "__AVR32_UC3A1512__"}, |
| 160 | + {NULL, 0, 0, NULL} |
| 161 | +}; |
| 162 | + |
| 163 | +/* List of all known AVR32 architectures */ |
| 164 | +static const struct arch_type_s avr32_arch_types[] = { |
| 165 | + /* name, architecture type, microarchitecture type, feature flags, macro */ |
| 166 | + {"ap", ARCH_TYPE_AVR32_AP, UARCH_TYPE_AVR32B, FLAG_AVR32_HAS_DSP | |
| 167 | + FLAG_AVR32_HAS_SIMD | FLAG_AVR32_HAS_UNALIGNED_WORD | |
| 168 | + FLAG_AVR32_HAS_BRANCH_PRED, "__AVR32_AP__"}, |
| 169 | + {"uc", ARCH_TYPE_AVR32_UC, UARCH_TYPE_AVR32A, |
| 170 | + FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW, "__AVR32_UC__"}, |
| 171 | + {NULL, 0, 0, 0, NULL} |
| 172 | +}; |
| 173 | + |
| 174 | +/* Default arch name */ |
| 175 | +const char *avr32_arch_name = "ap"; |
| 176 | +const char *avr32_part_name = "none"; |
| 177 | + |
| 178 | +const struct part_type_s *avr32_part; |
| 179 | +const struct arch_type_s *avr32_arch; |
| 180 | + |
| 181 | + |
| 182 | +/* Override command line options */ |
| 183 | +void |
| 184 | +avr32_override_options (void) |
| 185 | +{ |
| 186 | + const struct part_type_s *part; |
| 187 | + const struct arch_type_s *arch; |
| 188 | + |
| 189 | + /* Check if part type is set. */ |
| 190 | + for (part = avr32_part_types; part->name; part++) |
| 191 | + if (strcmp (part->name, avr32_part_name) == 0) |
| 192 | + break; |
| 193 | + |
| 194 | + avr32_part = part; |
| 195 | + |
| 196 | + if (!part->name) |
| 197 | + { |
| 198 | + fprintf (stderr, "Unknown part `%s' specified\nKnown part names:\n", |
| 199 | + avr32_part_name); |
| 200 | + for (part = avr32_part_types; part->name; part++) |
| 201 | + fprintf (stderr, "\t%s\n", part->name); |
| 202 | + avr32_part = &avr32_part_types[PART_TYPE_AVR32_NONE]; |
| 203 | + } |
| 204 | + |
| 205 | + avr32_arch = &avr32_arch_types[avr32_part->arch_type]; |
| 206 | + |
| 207 | + /* If part was set to "none" then check if arch was set. */ |
| 208 | + if (strcmp (avr32_part->name, "none") == 0) |
| 209 | + { |
| 210 | + /* Check if arch type is set. */ |
| 211 | + for (arch = avr32_arch_types; arch->name; arch++) |
| 212 | + if (strcmp (arch->name, avr32_arch_name) == 0) |
| 213 | + break; |
| 214 | + |
| 215 | + avr32_arch = arch; |
| 216 | + |
| 217 | + if (!arch->name) |
| 218 | + { |
| 219 | + fprintf (stderr, "Unknown arch `%s' specified\nKnown arch names:\n", |
| 220 | + avr32_arch_name); |
| 221 | + for (arch = avr32_arch_types; arch->name; arch++) |
| 222 | + fprintf (stderr, "\t%s\n", arch->name); |
| 223 | + avr32_arch = &avr32_arch_types[ARCH_TYPE_AVR32_AP]; |
| 224 | + } |
| 225 | + } |
| 226 | + |
| 227 | + /* If optimization level is two or greater, then align start of loops to a |
| 228 | + word boundary since this will allow folding the first insn of the loop. |
| 229 | + Do this only for targets supporting branch prediction. */ |
| 230 | + if (optimize >= 2 && TARGET_BRANCH_PRED) |
| 231 | + align_loops = 2; |
| 232 | + |
| 233 | + if (AVR32_ALWAYS_PIC) |
| 234 | + flag_pic = 1; |
| 235 | + |
| 236 | + if (TARGET_NO_PIC) |
| 237 | + flag_pic = 0; |
| 238 | + |
| 239 | + avr32_add_gc_roots (); |
| 240 | +} |
| 241 | + |
| 242 | + |
| 243 | +/* |
| 244 | +If defined, a function that outputs the assembler code for entry to a |
| 245 | +function. The prologue is responsible for setting up the stack frame, |
| 246 | +initializing the frame pointer register, saving registers that must be |
| 247 | +saved, and allocating size additional bytes of storage for the |
| 248 | +local variables. size is an integer. file is a stdio |
| 249 | +stream to which the assembler code should be output. |
| 250 | + |
| 251 | +The label for the beginning of the function need not be output by this |
| 252 | +macro. That has already been done when the macro is run. |
| 253 | + |
| 254 | +To determine which registers to save, the macro can refer to the array |
| 255 | +regs_ever_live: element r is nonzero if hard register |
| 256 | +r is used anywhere within the function. This implies the function |
| 257 | +prologue should save register r, provided it is not one of the |
| 258 | +call-used registers. (TARGET_ASM_FUNCTION_EPILOGUE must likewise use |
| 259 | +regs_ever_live.) |
| 260 | + |
| 261 | +On machines that have ``register windows'', the function entry code does |
| 262 | +not save on the stack the registers that are in the windows, even if |
| 263 | +they are supposed to be preserved by function calls; instead it takes |
| 264 | +appropriate steps to ``push'' the register stack, if any non-call-used |
| 265 | +registers are used in the function. |
| 266 | + |
| 267 | +On machines where functions may or may not have frame-pointers, the |
| 268 | +function entry code must vary accordingly; it must set up the frame |
| 269 | +pointer if one is wanted, and not otherwise. To determine whether a |
| 270 | +frame pointer is in wanted, the macro can refer to the variable |
| 271 | +frame_pointer_needed. The variable's value will be 1 at run |
| 272 | +time in a function that needs a frame pointer. (see Elimination). |
| 273 | + |
| 274 | +The function entry code is responsible for allocating any stack space |
| 275 | +required for the function. This stack space consists of the regions |
| 276 | +listed below. In most cases, these regions are allocated in the |
| 277 | +order listed, with the last listed region closest to the top of the |
| 278 | +stack (the lowest address if STACK_GROWS_DOWNWARD is defined, and |
| 279 | +the highest address if it is not defined). You can use a different order |
| 280 | +for a machine if doing so is more convenient or required for |
| 281 | +compatibility reasons. Except in cases where required by standard |
| 282 | +or by a debugger, there is no reason why the stack layout used by GCC |
| 283 | +need agree with that used by other compilers for a machine. |
| 284 | +*/ |
| 285 | + |
| 286 | +#undef TARGET_ASM_FUNCTION_PROLOGUE |
| 287 | +#define TARGET_ASM_FUNCTION_PROLOGUE avr32_target_asm_function_prologue |
| 288 | + |
| 289 | + |
| 290 | +#undef TARGET_DEFAULT_SHORT_ENUMS |
| 291 | +#define TARGET_DEFAULT_SHORT_ENUMS hook_bool_void_false |
| 292 | + |
| 293 | +#undef TARGET_PROMOTE_FUNCTION_ARGS |
| 294 | +#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true |
| 295 | + |
| 296 | +#undef TARGET_PROMOTE_FUNCTION_RETURN |
| 297 | +#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true |
| 298 | + |
| 299 | +#undef TARGET_PROMOTE_PROTOTYPES |
| 300 | +#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true |
| 301 | + |
| 302 | +#undef TARGET_MUST_PASS_IN_STACK |
| 303 | +#define TARGET_MUST_PASS_IN_STACK avr32_must_pass_in_stack |
| 304 | + |
| 305 | +#undef TARGET_PASS_BY_REFERENCE |
| 306 | +#define TARGET_PASS_BY_REFERENCE avr32_pass_by_reference |
| 307 | + |
| 308 | +#undef TARGET_STRICT_ARGUMENT_NAMING |
| 309 | +#define TARGET_STRICT_ARGUMENT_NAMING avr32_strict_argument_naming |
| 310 | + |
| 311 | +#undef TARGET_VECTOR_MODE_SUPPORTED_P |
| 312 | +#define TARGET_VECTOR_MODE_SUPPORTED_P avr32_vector_mode_supported |
| 313 | + |
| 314 | +#undef TARGET_RETURN_IN_MEMORY |
| 315 | +#define TARGET_RETURN_IN_MEMORY avr32_return_in_memory |
| 316 | + |
| 317 | +#undef TARGET_RETURN_IN_MSB |
| 318 | +#define TARGET_RETURN_IN_MSB avr32_return_in_msb |
| 319 | + |
| 320 | +#undef TARGET_ARG_PARTIAL_BYTES |
| 321 | +#define TARGET_ARG_PARTIAL_BYTES avr32_arg_partial_bytes |
| 322 | + |
| 323 | +#undef TARGET_STRIP_NAME_ENCODING |
| 324 | +#define TARGET_STRIP_NAME_ENCODING avr32_strip_name_encoding |
| 325 | + |
| 326 | +#define streq(string1, string2) (strcmp (string1, string2) == 0) |
| 327 | + |
| 328 | +#undef TARGET_ATTRIBUTE_TABLE |
| 329 | +#define TARGET_ATTRIBUTE_TABLE avr32_attribute_table |
| 330 | + |
| 331 | +#undef TARGET_COMP_TYPE_ATTRIBUTES |
| 332 | +#define TARGET_COMP_TYPE_ATTRIBUTES avr32_comp_type_attributes |
| 333 | + |
| 334 | + |
| 335 | +#undef TARGET_RTX_COSTS |
| 336 | +#define TARGET_RTX_COSTS avr32_rtx_costs |
| 337 | + |
| 338 | +#undef TARGET_CANNOT_FORCE_CONST_MEM |
| 339 | +#define TARGET_CANNOT_FORCE_CONST_MEM avr32_cannot_force_const_mem |
| 340 | + |
| 341 | +#undef TARGET_ASM_INTEGER |
| 342 | +#define TARGET_ASM_INTEGER avr32_assemble_integer |
| 343 | + |
| 344 | +/* |
| 345 | + * Switches to the appropriate section for output of constant pool |
| 346 | + * entry x in mode. You can assume that x is some kind of constant in |
| 347 | + * RTL. The argument mode is redundant except in the case of a |
| 348 | + * const_int rtx. Select the section by calling readonly_data_ section |
| 349 | + * or one of the alternatives for other sections. align is the |
| 350 | + * constant alignment in bits. |
| 351 | + * |
| 352 | + * The default version of this function takes care of putting symbolic |
| 353 | + * constants in flag_ pic mode in data_section and everything else in |
| 354 | + * readonly_data_section. |
| 355 | + */ |
| 356 | +#undef TARGET_ASM_SELECT_RTX_SECTION |
| 357 | +#define TARGET_ASM_SELECT_RTX_SECTION avr32_select_rtx_section |
| 358 | + |
| 359 | + |
| 360 | +/* |
| 361 | + * If non-null, this hook performs a target-specific pass over the |
| 362 | + * instruction stream. The compiler will run it at all optimization |
| 363 | + * levels, just before the point at which it normally does |
| 364 | + * delayed-branch scheduling. |
| 365 | + * |
| 366 | + * The exact purpose of the hook varies from target to target. Some |
| 367 | + * use it to do transformations that are necessary for correctness, |
| 368 | + * such as laying out in-function constant pools or avoiding hardware |
| 369 | + * hazards. Others use it as an opportunity to do some |
| 370 | + * machine-dependent optimizations. |
| 371 | + * |
| 372 | + * You need not implement the hook if it has nothing to do. The |
| 373 | + * default definition is null. |
| 374 | + */ |
| 375 | +#undef TARGET_MACHINE_DEPENDENT_REORG |
| 376 | +#define TARGET_MACHINE_DEPENDENT_REORG avr32_reorg |
| 377 | + |
| 378 | +/* Target hook for assembling integer objects. |
| 379 | + Need to handle integer vectors */ |
| 380 | +static bool |
| 381 | +avr32_assemble_integer (rtx x, unsigned int size, int aligned_p) |
| 382 | +{ |
| 383 | + if (avr32_vector_mode_supported (GET_MODE (x))) |
| 384 | + { |
| 385 | + int i, units; |
| 386 | + |
| 387 | + if (GET_CODE (x) != CONST_VECTOR) |
| 388 | + abort (); |
| 389 | + |
| 390 | + units = CONST_VECTOR_NUNITS (x); |
| 391 | + |
| 392 | + switch (GET_MODE (x)) |
| 393 | + { |
| 394 | + case V2HImode: |
| 395 | + size = 2; |
| 396 | + break; |
| 397 | + case V4QImode: |
| 398 | + size = 1; |
| 399 | + break; |
| 400 | + default: |
| 401 | + abort (); |
| 402 | + } |
| 403 | + |
| 404 | + for (i = 0; i < units; i++) |
| 405 | + { |
| 406 | + rtx elt; |
| 407 | + |
| 408 | + elt = CONST_VECTOR_ELT (x, i); |
| 409 | + assemble_integer (elt, size, i == 0 ? 32 : size * BITS_PER_UNIT, 1); |
| 410 | + } |
| 411 | + |
| 412 | + return true; |
| 413 | + } |
| 414 | + |
| 415 | + return default_assemble_integer (x, size, aligned_p); |
| 416 | +} |
| 417 | + |
| 418 | +/* |
| 419 | + * This target hook describes the relative costs of RTL expressions. |
| 420 | + * |
| 421 | + * The cost may depend on the precise form of the expression, which is |
| 422 | + * available for examination in x, and the rtx code of the expression |
| 423 | + * in which it is contained, found in outer_code. code is the |
| 424 | + * expression code--redundant, since it can be obtained with GET_CODE |
| 425 | + * (x). |
| 426 | + * |
| 427 | + * In implementing this hook, you can use the construct COSTS_N_INSNS |
| 428 | + * (n) to specify a cost equal to n fast instructions. |
| 429 | + * |
| 430 | + * On entry to the hook, *total contains a default estimate for the |
| 431 | + * cost of the expression. The hook should modify this value as |
| 432 | + * necessary. Traditionally, the default costs are COSTS_N_INSNS (5) |
| 433 | + * for multiplications, COSTS_N_INSNS (7) for division and modulus |
| 434 | + * operations, and COSTS_N_INSNS (1) for all other operations. |
| 435 | + * |
| 436 | + * When optimizing for code size, i.e. when optimize_size is non-zero, |
| 437 | + * this target hook should be used to estimate the relative size cost |
| 438 | + * of an expression, again relative to COSTS_N_INSNS. |
| 439 | + * |
| 440 | + * The hook returns true when all subexpressions of x have been |
| 441 | + * processed, and false when rtx_cost should recurse. |
| 442 | + */ |
| 443 | + |
| 444 | +/* Worker routine for avr32_rtx_costs. */ |
| 445 | +static inline int |
| 446 | +avr32_rtx_costs_1 (rtx x, enum rtx_code code ATTRIBUTE_UNUSED, |
| 447 | + enum rtx_code outer ATTRIBUTE_UNUSED) |
| 448 | +{ |
| 449 | + enum machine_mode mode = GET_MODE (x); |
| 450 | + |
| 451 | + switch (GET_CODE (x)) |
| 452 | + { |
| 453 | + case MEM: |
| 454 | + /* Using pre decrement / post increment memory operations on the |
| 455 | + avr32_uc architecture means that two writebacks must be performed |
| 456 | + and hence two cycles are needed. */ |
| 457 | + if (!optimize_size |
| 458 | + && GET_MODE_SIZE (mode) <= 2 * UNITS_PER_WORD |
| 459 | + && avr32_arch->arch_type == ARCH_TYPE_AVR32_UC |
| 460 | + && (GET_CODE (XEXP (x, 0)) == PRE_DEC |
| 461 | + || GET_CODE (XEXP (x, 0)) == POST_INC)) |
| 462 | + return COSTS_N_INSNS (4); |
| 463 | + |
| 464 | + /* Memory costs quite a lot for the first word, but subsequent words |
| 465 | + load at the equivalent of a single insn each. */ |
| 466 | + if (GET_MODE_SIZE (mode) > UNITS_PER_WORD) |
| 467 | + return COSTS_N_INSNS (2 + (GET_MODE_SIZE (mode) / UNITS_PER_WORD)); |
| 468 | + |
| 469 | + return COSTS_N_INSNS (3); |
| 470 | + case SYMBOL_REF: |
| 471 | + case CONST: |
| 472 | + /* These are valid for the pseudo insns: lda.w and call which operates |
| 473 | + on direct addresses. We assume that the cost of a lda.w is the same |
| 474 | + as the cost of a ld.w insn. */ |
| 475 | + return (outer == SET) ? COSTS_N_INSNS (3) : COSTS_N_INSNS (1); |
| 476 | + case DIV: |
| 477 | + case MOD: |
| 478 | + case UDIV: |
| 479 | + case UMOD: |
| 480 | + return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16); |
| 481 | + |
| 482 | + case ROTATE: |
| 483 | + case ROTATERT: |
| 484 | + if (mode == TImode) |
| 485 | + return COSTS_N_INSNS (100); |
| 486 | + |
| 487 | + if (mode == DImode) |
| 488 | + return COSTS_N_INSNS (10); |
| 489 | + return COSTS_N_INSNS (4); |
| 490 | + case ASHIFT: |
| 491 | + case LSHIFTRT: |
| 492 | + case ASHIFTRT: |
| 493 | + case NOT: |
| 494 | + if (mode == TImode) |
| 495 | + return COSTS_N_INSNS (10); |
| 496 | + |
| 497 | + if (mode == DImode) |
| 498 | + return COSTS_N_INSNS (4); |
| 499 | + return COSTS_N_INSNS (1); |
| 500 | + case PLUS: |
| 501 | + case MINUS: |
| 502 | + case NEG: |
| 503 | + case COMPARE: |
| 504 | + case ABS: |
| 505 | + if (GET_MODE_CLASS (mode) == MODE_FLOAT) |
| 506 | + return COSTS_N_INSNS (100); |
| 507 | + |
| 508 | + if (mode == TImode) |
| 509 | + return COSTS_N_INSNS (50); |
| 510 | + |
| 511 | + if (mode == DImode) |
| 512 | + return COSTS_N_INSNS (2); |
| 513 | + return COSTS_N_INSNS (1); |
| 514 | + |
| 515 | + case MULT: |
| 516 | + { |
| 517 | + if (GET_MODE_CLASS (mode) == MODE_FLOAT) |
| 518 | + return COSTS_N_INSNS (300); |
| 519 | + |
| 520 | + if (mode == TImode) |
| 521 | + return COSTS_N_INSNS (16); |
| 522 | + |
| 523 | + if (mode == DImode) |
| 524 | + return COSTS_N_INSNS (4); |
| 525 | + |
| 526 | + if (mode == HImode) |
| 527 | + return COSTS_N_INSNS (2); |
| 528 | + |
| 529 | + return COSTS_N_INSNS (3); |
| 530 | + } |
| 531 | + case IF_THEN_ELSE: |
| 532 | + if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC) |
| 533 | + return COSTS_N_INSNS (4); |
| 534 | + return COSTS_N_INSNS (1); |
| 535 | + case SIGN_EXTEND: |
| 536 | + case ZERO_EXTEND: |
| 537 | + /* Sign/Zero extensions of registers cost quite much since these |
| 538 | + instrcutions only take one register operand which means that gcc |
| 539 | + often must insert some move instrcutions */ |
| 540 | + if (mode == QImode || mode == HImode) |
| 541 | + return (COSTS_N_INSNS (GET_CODE (XEXP (x, 0)) == MEM ? 0 : 1)); |
| 542 | + return COSTS_N_INSNS (4); |
| 543 | + case UNSPEC: |
| 544 | + /* divmod operations */ |
| 545 | + if (XINT (x, 1) == UNSPEC_UDIVMODSI4_INTERNAL |
| 546 | + || XINT (x, 1) == UNSPEC_DIVMODSI4_INTERNAL) |
| 547 | + { |
| 548 | + return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16); |
| 549 | + } |
| 550 | + /* Fallthrough */ |
| 551 | + default: |
| 552 | + return COSTS_N_INSNS (1); |
| 553 | + } |
| 554 | +} |
| 555 | + |
| 556 | +static bool |
| 557 | +avr32_rtx_costs (rtx x, int code, int outer_code, int *total) |
| 558 | +{ |
| 559 | + *total = avr32_rtx_costs_1 (x, code, outer_code); |
| 560 | + return true; |
| 561 | +} |
| 562 | + |
| 563 | + |
| 564 | +bool |
| 565 | +avr32_cannot_force_const_mem (rtx x ATTRIBUTE_UNUSED) |
| 566 | +{ |
| 567 | + /* Do not want symbols in the constant pool when compiling pic or if using |
| 568 | + address pseudo instructions. */ |
| 569 | + return ((flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS) |
| 570 | + && avr32_find_symbol (x) != NULL_RTX); |
| 571 | +} |
| 572 | + |
| 573 | + |
| 574 | +/* Table of machine attributes. */ |
| 575 | +const struct attribute_spec avr32_attribute_table[] = { |
| 576 | + /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */ |
| 577 | + /* Interrupt Service Routines have special prologue and epilogue |
| 578 | + requirements. */ |
| 579 | + {"isr", 0, 1, false, false, false, avr32_handle_isr_attribute}, |
| 580 | + {"interrupt", 0, 1, false, false, false, avr32_handle_isr_attribute}, |
| 581 | + {"acall", 0, 1, false, true, true, avr32_handle_acall_attribute}, |
| 582 | + {"naked", 0, 0, true, false, false, avr32_handle_fndecl_attribute}, |
| 583 | + {NULL, 0, 0, false, false, false, NULL} |
| 584 | +}; |
| 585 | + |
| 586 | + |
| 587 | +typedef struct |
| 588 | +{ |
| 589 | + const char *const arg; |
| 590 | + const unsigned long return_value; |
| 591 | +} |
| 592 | +isr_attribute_arg; |
| 593 | + |
| 594 | +static const isr_attribute_arg isr_attribute_args[] = { |
| 595 | + {"FULL", AVR32_FT_ISR_FULL}, |
| 596 | + {"full", AVR32_FT_ISR_FULL}, |
| 597 | + {"HALF", AVR32_FT_ISR_HALF}, |
| 598 | + {"half", AVR32_FT_ISR_HALF}, |
| 599 | + {"NONE", AVR32_FT_ISR_NONE}, |
| 600 | + {"none", AVR32_FT_ISR_NONE}, |
| 601 | + {"UNDEF", AVR32_FT_ISR_NONE}, |
| 602 | + {"undef", AVR32_FT_ISR_NONE}, |
| 603 | + {"SWI", AVR32_FT_ISR_NONE}, |
| 604 | + {"swi", AVR32_FT_ISR_NONE}, |
| 605 | + {NULL, AVR32_FT_ISR_NONE} |
| 606 | +}; |
| 607 | + |
| 608 | +/* Returns the (interrupt) function type of the current |
| 609 | + function, or AVR32_FT_UNKNOWN if the type cannot be determined. */ |
| 610 | + |
| 611 | +static unsigned long |
| 612 | +avr32_isr_value (tree argument) |
| 613 | +{ |
| 614 | + const isr_attribute_arg *ptr; |
| 615 | + const char *arg; |
| 616 | + |
| 617 | + /* No argument - default to ISR_NONE. */ |
| 618 | + if (argument == NULL_TREE) |
| 619 | + return AVR32_FT_ISR_NONE; |
| 620 | + |
| 621 | + /* Get the value of the argument. */ |
| 622 | + if (TREE_VALUE (argument) == NULL_TREE |
| 623 | + || TREE_CODE (TREE_VALUE (argument)) != STRING_CST) |
| 624 | + return AVR32_FT_UNKNOWN; |
| 625 | + |
| 626 | + arg = TREE_STRING_POINTER (TREE_VALUE (argument)); |
| 627 | + |
| 628 | + /* Check it against the list of known arguments. */ |
| 629 | + for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++) |
| 630 | + if (streq (arg, ptr->arg)) |
| 631 | + return ptr->return_value; |
| 632 | + |
| 633 | + /* An unrecognized interrupt type. */ |
| 634 | + return AVR32_FT_UNKNOWN; |
| 635 | +} |
| 636 | + |
| 637 | + |
| 638 | + |
| 639 | +/* |
| 640 | +These hooks specify assembly directives for creating certain kinds |
| 641 | +of integer object. The TARGET_ASM_BYTE_OP directive creates a |
| 642 | +byte-sized object, the TARGET_ASM_ALIGNED_HI_OP one creates an |
| 643 | +aligned two-byte object, and so on. Any of the hooks may be |
| 644 | +NULL, indicating that no suitable directive is available. |
| 645 | + |
| 646 | +The compiler will print these strings at the start of a new line, |
| 647 | +followed immediately by the object's initial value. In most cases, |
| 648 | +the string should contain a tab, a pseudo-op, and then another tab. |
| 649 | +*/ |
| 650 | +#undef TARGET_ASM_BYTE_OP |
| 651 | +#define TARGET_ASM_BYTE_OP "\t.byte\t" |
| 652 | +#undef TARGET_ASM_ALIGNED_HI_OP |
| 653 | +#define TARGET_ASM_ALIGNED_HI_OP "\t.align 1\n\t.short\t" |
| 654 | +#undef TARGET_ASM_ALIGNED_SI_OP |
| 655 | +#define TARGET_ASM_ALIGNED_SI_OP "\t.align 2\n\t.int\t" |
| 656 | +#undef TARGET_ASM_ALIGNED_DI_OP |
| 657 | +#define TARGET_ASM_ALIGNED_DI_OP NULL |
| 658 | +#undef TARGET_ASM_ALIGNED_TI_OP |
| 659 | +#define TARGET_ASM_ALIGNED_TI_OP NULL |
| 660 | +#undef TARGET_ASM_UNALIGNED_HI_OP |
| 661 | +#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t" |
| 662 | +#undef TARGET_ASM_UNALIGNED_SI_OP |
| 663 | +#define TARGET_ASM_UNALIGNED_SI_OP "\t.int\t" |
| 664 | +#undef TARGET_ASM_UNALIGNED_DI_OP |
| 665 | +#define TARGET_ASM_UNALIGNED_DI_OP NULL |
| 666 | +#undef TARGET_ASM_UNALIGNED_TI_OP |
| 667 | +#define TARGET_ASM_UNALIGNED_TI_OP NULL |
| 668 | + |
| 669 | +#undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE |
| 670 | +#define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE avr32_sched_use_dfa_pipeline_interface |
| 671 | + |
| 672 | +#undef TARGET_ASM_OUTPUT_MI_THUNK |
| 673 | +#define TARGET_ASM_OUTPUT_MI_THUNK avr32_output_mi_thunk |
| 674 | + |
| 675 | + |
| 676 | +static void |
| 677 | +avr32_output_mi_thunk (FILE * file, |
| 678 | + tree thunk ATTRIBUTE_UNUSED, |
| 679 | + HOST_WIDE_INT delta, |
| 680 | + HOST_WIDE_INT vcall_offset, tree function) |
| 681 | +{ |
| 682 | + int mi_delta = delta; |
| 683 | + int this_regno = |
| 684 | + (avr32_return_in_memory (DECL_RESULT (function), TREE_TYPE (function)) ? |
| 685 | + INTERNAL_REGNUM (11) : INTERNAL_REGNUM (12)); |
| 686 | + |
| 687 | + |
| 688 | + if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21") |
| 689 | + || vcall_offset) |
| 690 | + { |
| 691 | + fprintf (file, "\tpushm\tr10\n"); |
| 692 | + } |
| 693 | + |
| 694 | + |
| 695 | + if (mi_delta != 0) |
| 696 | + { |
| 697 | + if (avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")) |
| 698 | + { |
| 699 | + fprintf (file, "\tsub\t%s, -0x%x\n", reg_names[this_regno], |
| 700 | + mi_delta); |
| 701 | + } |
| 702 | + else |
| 703 | + { |
| 704 | + /* Immediate is larger than k21 we must make us a temp register by |
| 705 | + pushing a register to the stack. */ |
| 706 | + fprintf (file, "\tmov\tr10, lo(%x)\n", mi_delta); |
| 707 | + fprintf (file, "\torh\tr10, hi(%x)\n", mi_delta); |
| 708 | + fprintf (file, "\tadd\t%s, r10\n", reg_names[this_regno]); |
| 709 | + } |
| 710 | + } |
| 711 | + |
| 712 | + |
| 713 | + if (vcall_offset != 0) |
| 714 | + { |
| 715 | + fprintf (file, "\tld.w\tr10, %s[0]\n", reg_names[this_regno]); |
| 716 | + fprintf (file, "\tld.w\tr10, r10[%i]\n", (int) vcall_offset); |
| 717 | + fprintf (file, "\tadd\t%s, r10\n", reg_names[this_regno]); |
| 718 | + } |
| 719 | + |
| 720 | + |
| 721 | + if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21") |
| 722 | + || vcall_offset) |
| 723 | + { |
| 724 | + fprintf (file, "\tpopm\tr10\n"); |
| 725 | + } |
| 726 | + |
| 727 | + if (flag_pic) |
| 728 | + { |
| 729 | + /* Don't know how we should do this!!! For now we'll just use an |
| 730 | + extended branch instruction and hope that the function will be |
| 731 | + reached. */ |
| 732 | + fprintf (file, "\tbral\t"); |
| 733 | + assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0)); |
| 734 | + fputc ('\n', file); |
| 735 | + } |
| 736 | + else |
| 737 | + { |
| 738 | + fprintf (file, "\tlddpc\tpc, 0f\n"); |
| 739 | + fprintf (file, "\t.align 2\n"); |
| 740 | + fputs ("0:\t.long\t", file); |
| 741 | + assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0)); |
| 742 | + fputc ('\n', file); |
| 743 | + } |
| 744 | +} |
| 745 | + |
| 746 | +/* Implements target hook vector_mode_supported. */ |
| 747 | +bool |
| 748 | +avr32_vector_mode_supported (enum machine_mode mode) |
| 749 | +{ |
| 750 | + if ((mode == V2HImode) || (mode == V4QImode)) |
| 751 | + return true; |
| 752 | + |
| 753 | + return false; |
| 754 | +} |
| 755 | + |
| 756 | + |
| 757 | +#undef TARGET_INIT_LIBFUNCS |
| 758 | +#define TARGET_INIT_LIBFUNCS avr32_init_libfuncs |
| 759 | + |
| 760 | +#undef TARGET_INIT_BUILTINS |
| 761 | +#define TARGET_INIT_BUILTINS avr32_init_builtins |
| 762 | + |
| 763 | +#undef TARGET_EXPAND_BUILTIN |
| 764 | +#define TARGET_EXPAND_BUILTIN avr32_expand_builtin |
| 765 | + |
| 766 | +tree int_ftype_int, int_ftype_void, short_ftype_short, void_ftype_int_int, |
| 767 | + void_ftype_ptr_int; |
| 768 | +tree void_ftype_int, void_ftype_void, int_ftype_ptr_int; |
| 769 | +tree short_ftype_short, int_ftype_int_short, int_ftype_short_short, |
| 770 | + short_ftype_short_short; |
| 771 | +tree int_ftype_int_int, longlong_ftype_int_short, longlong_ftype_short_short; |
| 772 | +tree void_ftype_int_int_int_int_int, void_ftype_int_int_int; |
| 773 | +tree longlong_ftype_int_int, void_ftype_int_int_longlong; |
| 774 | +tree int_ftype_int_int_int, longlong_ftype_longlong_int_short; |
| 775 | +tree longlong_ftype_longlong_short_short, int_ftype_int_short_short; |
| 776 | + |
| 777 | +#define def_builtin(NAME, TYPE, CODE) \ |
| 778 | + lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \ |
| 779 | + BUILT_IN_MD, NULL, NULL_TREE) |
| 780 | + |
| 781 | +#define def_mbuiltin(MASK, NAME, TYPE, CODE) \ |
| 782 | + do \ |
| 783 | + { \ |
| 784 | + if ((MASK)) \ |
| 785 | + lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \ |
| 786 | + BUILT_IN_MD, NULL, NULL_TREE); \ |
| 787 | + } \ |
| 788 | + while (0) |
| 789 | + |
| 790 | +struct builtin_description |
| 791 | +{ |
| 792 | + const unsigned int mask; |
| 793 | + const enum insn_code icode; |
| 794 | + const char *const name; |
| 795 | + const int code; |
| 796 | + const enum rtx_code comparison; |
| 797 | + const unsigned int flag; |
| 798 | + const tree *ftype; |
| 799 | +}; |
| 800 | + |
| 801 | +static const struct builtin_description bdesc_2arg[] = { |
| 802 | +#define DSP_BUILTIN(code, builtin, ftype) \ |
| 803 | + { 1, CODE_FOR_##code, "__builtin_" #code , \ |
| 804 | + AVR32_BUILTIN_##builtin, 0, 0, ftype } |
| 805 | + |
| 806 | + DSP_BUILTIN (mulsathh_h, MULSATHH_H, &short_ftype_short_short), |
| 807 | + DSP_BUILTIN (mulsathh_w, MULSATHH_W, &int_ftype_short_short), |
| 808 | + DSP_BUILTIN (mulsatrndhh_h, MULSATRNDHH_H, &short_ftype_short_short), |
| 809 | + DSP_BUILTIN (mulsatrndwh_w, MULSATRNDWH_W, &int_ftype_int_short), |
| 810 | + DSP_BUILTIN (mulsatwh_w, MULSATWH_W, &int_ftype_int_short), |
| 811 | + DSP_BUILTIN (satadd_h, SATADD_H, &short_ftype_short_short), |
| 812 | + DSP_BUILTIN (satsub_h, SATSUB_H, &short_ftype_short_short), |
| 813 | + DSP_BUILTIN (satadd_w, SATADD_W, &int_ftype_int_int), |
| 814 | + DSP_BUILTIN (satsub_w, SATSUB_W, &int_ftype_int_int), |
| 815 | + DSP_BUILTIN (mulwh_d, MULWH_D, &longlong_ftype_int_short), |
| 816 | + DSP_BUILTIN (mulnwh_d, MULNWH_D, &longlong_ftype_int_short) |
| 817 | +}; |
| 818 | + |
| 819 | + |
| 820 | +void |
| 821 | +avr32_init_builtins (void) |
| 822 | +{ |
| 823 | + unsigned int i; |
| 824 | + const struct builtin_description *d; |
| 825 | + tree endlink = void_list_node; |
| 826 | + tree int_endlink = tree_cons (NULL_TREE, integer_type_node, endlink); |
| 827 | + tree longlong_endlink = |
| 828 | + tree_cons (NULL_TREE, long_long_integer_type_node, endlink); |
| 829 | + tree short_endlink = |
| 830 | + tree_cons (NULL_TREE, short_integer_type_node, endlink); |
| 831 | + tree void_endlink = tree_cons (NULL_TREE, void_type_node, endlink); |
| 832 | + |
| 833 | + /* int func (int) */ |
| 834 | + int_ftype_int = build_function_type (integer_type_node, int_endlink); |
| 835 | + |
| 836 | + /* short func (short) */ |
| 837 | + short_ftype_short |
| 838 | + = build_function_type (short_integer_type_node, short_endlink); |
| 839 | + |
| 840 | + /* short func (short, short) */ |
| 841 | + short_ftype_short_short |
| 842 | + = build_function_type (short_integer_type_node, |
| 843 | + tree_cons (NULL_TREE, short_integer_type_node, |
| 844 | + short_endlink)); |
| 845 | + |
| 846 | + /* long long func (long long, short, short) */ |
| 847 | + longlong_ftype_longlong_short_short |
| 848 | + = build_function_type (long_long_integer_type_node, |
| 849 | + tree_cons (NULL_TREE, long_long_integer_type_node, |
| 850 | + tree_cons (NULL_TREE, |
| 851 | + short_integer_type_node, |
| 852 | + short_endlink))); |
| 853 | + |
| 854 | + /* long long func (short, short) */ |
| 855 | + longlong_ftype_short_short |
| 856 | + = build_function_type (long_long_integer_type_node, |
| 857 | + tree_cons (NULL_TREE, short_integer_type_node, |
| 858 | + short_endlink)); |
| 859 | + |
| 860 | + /* int func (int, int) */ |
| 861 | + int_ftype_int_int |
| 862 | + = build_function_type (integer_type_node, |
| 863 | + tree_cons (NULL_TREE, integer_type_node, |
| 864 | + int_endlink)); |
| 865 | + |
| 866 | + /* long long func (int, int) */ |
| 867 | + longlong_ftype_int_int |
| 868 | + = build_function_type (long_long_integer_type_node, |
| 869 | + tree_cons (NULL_TREE, integer_type_node, |
| 870 | + int_endlink)); |
| 871 | + |
| 872 | + /* long long int func (long long, int, short) */ |
| 873 | + longlong_ftype_longlong_int_short |
| 874 | + = build_function_type (long_long_integer_type_node, |
| 875 | + tree_cons (NULL_TREE, long_long_integer_type_node, |
| 876 | + tree_cons (NULL_TREE, integer_type_node, |
| 877 | + short_endlink))); |
| 878 | + |
| 879 | + /* long long int func (int, short) */ |
| 880 | + longlong_ftype_int_short |
| 881 | + = build_function_type (long_long_integer_type_node, |
| 882 | + tree_cons (NULL_TREE, integer_type_node, |
| 883 | + short_endlink)); |
| 884 | + |
| 885 | + /* int func (int, short, short) */ |
| 886 | + int_ftype_int_short_short |
| 887 | + = build_function_type (integer_type_node, |
| 888 | + tree_cons (NULL_TREE, integer_type_node, |
| 889 | + tree_cons (NULL_TREE, |
| 890 | + short_integer_type_node, |
| 891 | + short_endlink))); |
| 892 | + |
| 893 | + /* int func (short, short) */ |
| 894 | + int_ftype_short_short |
| 895 | + = build_function_type (integer_type_node, |
| 896 | + tree_cons (NULL_TREE, short_integer_type_node, |
| 897 | + short_endlink)); |
| 898 | + |
| 899 | + /* int func (int, short) */ |
| 900 | + int_ftype_int_short |
| 901 | + = build_function_type (integer_type_node, |
| 902 | + tree_cons (NULL_TREE, integer_type_node, |
| 903 | + short_endlink)); |
| 904 | + |
| 905 | + /* void func (int, int) */ |
| 906 | + void_ftype_int_int |
| 907 | + = build_function_type (void_type_node, |
| 908 | + tree_cons (NULL_TREE, integer_type_node, |
| 909 | + int_endlink)); |
| 910 | + |
| 911 | + /* void func (int, int, int) */ |
| 912 | + void_ftype_int_int_int |
| 913 | + = build_function_type (void_type_node, |
| 914 | + tree_cons (NULL_TREE, integer_type_node, |
| 915 | + tree_cons (NULL_TREE, integer_type_node, |
| 916 | + int_endlink))); |
| 917 | + |
| 918 | + /* void func (int, int, long long) */ |
| 919 | + void_ftype_int_int_longlong |
| 920 | + = build_function_type (void_type_node, |
| 921 | + tree_cons (NULL_TREE, integer_type_node, |
| 922 | + tree_cons (NULL_TREE, integer_type_node, |
| 923 | + longlong_endlink))); |
| 924 | + |
| 925 | + /* void func (int, int, int, int, int) */ |
| 926 | + void_ftype_int_int_int_int_int |
| 927 | + = build_function_type (void_type_node, |
| 928 | + tree_cons (NULL_TREE, integer_type_node, |
| 929 | + tree_cons (NULL_TREE, integer_type_node, |
| 930 | + tree_cons (NULL_TREE, |
| 931 | + integer_type_node, |
| 932 | + tree_cons |
| 933 | + (NULL_TREE, |
| 934 | + integer_type_node, |
| 935 | + int_endlink))))); |
| 936 | + |
| 937 | + /* void func (void *, int) */ |
| 938 | + void_ftype_ptr_int |
| 939 | + = build_function_type (void_type_node, |
| 940 | + tree_cons (NULL_TREE, ptr_type_node, int_endlink)); |
| 941 | + |
| 942 | + /* void func (int) */ |
| 943 | + void_ftype_int = build_function_type (void_type_node, int_endlink); |
| 944 | + |
| 945 | + /* void func (void) */ |
| 946 | + void_ftype_void = build_function_type (void_type_node, void_endlink); |
| 947 | + |
| 948 | + /* int func (void) */ |
| 949 | + int_ftype_void = build_function_type (integer_type_node, void_endlink); |
| 950 | + |
| 951 | + /* int func (void *, int) */ |
| 952 | + int_ftype_ptr_int |
| 953 | + = build_function_type (integer_type_node, |
| 954 | + tree_cons (NULL_TREE, ptr_type_node, int_endlink)); |
| 955 | + |
| 956 | + /* int func (int, int, int) */ |
| 957 | + int_ftype_int_int_int |
| 958 | + = build_function_type (integer_type_node, |
| 959 | + tree_cons (NULL_TREE, integer_type_node, |
| 960 | + tree_cons (NULL_TREE, integer_type_node, |
| 961 | + int_endlink))); |
| 962 | + |
| 963 | + /* Initialize avr32 builtins. */ |
| 964 | + def_builtin ("__builtin_mfsr", int_ftype_int, AVR32_BUILTIN_MFSR); |
| 965 | + def_builtin ("__builtin_mtsr", void_ftype_int_int, AVR32_BUILTIN_MTSR); |
| 966 | + def_builtin ("__builtin_mfdr", int_ftype_int, AVR32_BUILTIN_MFDR); |
| 967 | + def_builtin ("__builtin_mtdr", void_ftype_int_int, AVR32_BUILTIN_MTDR); |
| 968 | + def_builtin ("__builtin_cache", void_ftype_ptr_int, AVR32_BUILTIN_CACHE); |
| 969 | + def_builtin ("__builtin_sync", void_ftype_int, AVR32_BUILTIN_SYNC); |
| 970 | + def_builtin ("__builtin_tlbr", void_ftype_void, AVR32_BUILTIN_TLBR); |
| 971 | + def_builtin ("__builtin_tlbs", void_ftype_void, AVR32_BUILTIN_TLBS); |
| 972 | + def_builtin ("__builtin_tlbw", void_ftype_void, AVR32_BUILTIN_TLBW); |
| 973 | + def_builtin ("__builtin_breakpoint", void_ftype_void, |
| 974 | + AVR32_BUILTIN_BREAKPOINT); |
| 975 | + def_builtin ("__builtin_xchg", int_ftype_ptr_int, AVR32_BUILTIN_XCHG); |
| 976 | + def_builtin ("__builtin_ldxi", int_ftype_ptr_int, AVR32_BUILTIN_LDXI); |
| 977 | + def_builtin ("__builtin_bswap_16", short_ftype_short, |
| 978 | + AVR32_BUILTIN_BSWAP16); |
| 979 | + def_builtin ("__builtin_bswap_32", int_ftype_int, AVR32_BUILTIN_BSWAP32); |
| 980 | + def_builtin ("__builtin_cop", void_ftype_int_int_int_int_int, |
| 981 | + AVR32_BUILTIN_COP); |
| 982 | + def_builtin ("__builtin_mvcr_w", int_ftype_int_int, AVR32_BUILTIN_MVCR_W); |
| 983 | + def_builtin ("__builtin_mvrc_w", void_ftype_int_int_int, |
| 984 | + AVR32_BUILTIN_MVRC_W); |
| 985 | + def_builtin ("__builtin_mvcr_d", longlong_ftype_int_int, |
| 986 | + AVR32_BUILTIN_MVCR_D); |
| 987 | + def_builtin ("__builtin_mvrc_d", void_ftype_int_int_longlong, |
| 988 | + AVR32_BUILTIN_MVRC_D); |
| 989 | + def_builtin ("__builtin_sats", int_ftype_int_int_int, AVR32_BUILTIN_SATS); |
| 990 | + def_builtin ("__builtin_satu", int_ftype_int_int_int, AVR32_BUILTIN_SATU); |
| 991 | + def_builtin ("__builtin_satrnds", int_ftype_int_int_int, |
| 992 | + AVR32_BUILTIN_SATRNDS); |
| 993 | + def_builtin ("__builtin_satrndu", int_ftype_int_int_int, |
| 994 | + AVR32_BUILTIN_SATRNDU); |
| 995 | + def_builtin ("__builtin_musfr", void_ftype_int, AVR32_BUILTIN_MUSFR); |
| 996 | + def_builtin ("__builtin_mustr", int_ftype_void, AVR32_BUILTIN_MUSTR); |
| 997 | + def_builtin ("__builtin_macsathh_w", int_ftype_int_short_short, |
| 998 | + AVR32_BUILTIN_MACSATHH_W); |
| 999 | + def_builtin ("__builtin_macwh_d", longlong_ftype_longlong_int_short, |
| 1000 | + AVR32_BUILTIN_MACWH_D); |
| 1001 | + def_builtin ("__builtin_machh_d", longlong_ftype_longlong_short_short, |
| 1002 | + AVR32_BUILTIN_MACHH_D); |
| 1003 | + |
| 1004 | + /* Add all builtins that are more or less simple operations on two |
| 1005 | + operands. */ |
| 1006 | + for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++) |
| 1007 | + { |
| 1008 | + /* Use one of the operands; the target can have a different mode for |
| 1009 | + mask-generating compares. */ |
| 1010 | + |
| 1011 | + if (d->name == 0) |
| 1012 | + continue; |
| 1013 | + |
| 1014 | + def_mbuiltin (d->mask, d->name, *(d->ftype), d->code); |
| 1015 | + } |
| 1016 | +} |
| 1017 | + |
| 1018 | + |
| 1019 | +/* Subroutine of avr32_expand_builtin to take care of binop insns. */ |
| 1020 | + |
| 1021 | +static rtx |
| 1022 | +avr32_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target) |
| 1023 | +{ |
| 1024 | + rtx pat; |
| 1025 | + tree arg0 = TREE_VALUE (arglist); |
| 1026 | + tree arg1 = TREE_VALUE (TREE_CHAIN (arglist)); |
| 1027 | + rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| 1028 | + rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); |
| 1029 | + enum machine_mode tmode = insn_data[icode].operand[0].mode; |
| 1030 | + enum machine_mode mode0 = insn_data[icode].operand[1].mode; |
| 1031 | + enum machine_mode mode1 = insn_data[icode].operand[2].mode; |
| 1032 | + |
| 1033 | + if (!target |
| 1034 | + || GET_MODE (target) != tmode |
| 1035 | + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) |
| 1036 | + target = gen_reg_rtx (tmode); |
| 1037 | + |
| 1038 | + /* In case the insn wants input operands in modes different from the |
| 1039 | + result, abort. */ |
| 1040 | + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0)) |
| 1041 | + { |
| 1042 | + /* If op0 is already a reg we must cast it to the correct mode. */ |
| 1043 | + if (REG_P (op0)) |
| 1044 | + op0 = convert_to_mode (mode0, op0, 1); |
| 1045 | + else |
| 1046 | + op0 = copy_to_mode_reg (mode0, op0); |
| 1047 | + } |
| 1048 | + if (!(*insn_data[icode].operand[2].predicate) (op1, mode1)) |
| 1049 | + { |
| 1050 | + /* If op1 is already a reg we must cast it to the correct mode. */ |
| 1051 | + if (REG_P (op1)) |
| 1052 | + op1 = convert_to_mode (mode1, op1, 1); |
| 1053 | + else |
| 1054 | + op1 = copy_to_mode_reg (mode1, op1); |
| 1055 | + } |
| 1056 | + pat = GEN_FCN (icode) (target, op0, op1); |
| 1057 | + if (!pat) |
| 1058 | + return 0; |
| 1059 | + emit_insn (pat); |
| 1060 | + return target; |
| 1061 | +} |
| 1062 | + |
| 1063 | +/* Expand an expression EXP that calls a built-in function, |
| 1064 | + with result going to TARGET if that's convenient |
| 1065 | + (and in mode MODE if that's convenient). |
| 1066 | + SUBTARGET may be used as the target for computing one of EXP's operands. |
| 1067 | + IGNORE is nonzero if the value is to be ignored. */ |
| 1068 | + |
| 1069 | +rtx |
| 1070 | +avr32_expand_builtin (tree exp, |
| 1071 | + rtx target, |
| 1072 | + rtx subtarget ATTRIBUTE_UNUSED, |
| 1073 | + enum machine_mode mode ATTRIBUTE_UNUSED, |
| 1074 | + int ignore ATTRIBUTE_UNUSED) |
| 1075 | +{ |
| 1076 | + const struct builtin_description *d; |
| 1077 | + unsigned int i; |
| 1078 | + enum insn_code icode; |
| 1079 | + tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0); |
| 1080 | + tree arglist = TREE_OPERAND (exp, 1); |
| 1081 | + tree arg0, arg1, arg2; |
| 1082 | + rtx op0, op1, op2, pat; |
| 1083 | + enum machine_mode tmode, mode0, mode1; |
| 1084 | + enum machine_mode arg0_mode; |
| 1085 | + int fcode = DECL_FUNCTION_CODE (fndecl); |
| 1086 | + |
| 1087 | + switch (fcode) |
| 1088 | + { |
| 1089 | + default: |
| 1090 | + break; |
| 1091 | + |
| 1092 | + case AVR32_BUILTIN_SATS: |
| 1093 | + case AVR32_BUILTIN_SATU: |
| 1094 | + case AVR32_BUILTIN_SATRNDS: |
| 1095 | + case AVR32_BUILTIN_SATRNDU: |
| 1096 | + { |
| 1097 | + const char *fname; |
| 1098 | + switch (fcode) |
| 1099 | + { |
| 1100 | + default: |
| 1101 | + case AVR32_BUILTIN_SATS: |
| 1102 | + icode = CODE_FOR_sats; |
| 1103 | + fname = "sats"; |
| 1104 | + break; |
| 1105 | + case AVR32_BUILTIN_SATU: |
| 1106 | + icode = CODE_FOR_satu; |
| 1107 | + fname = "satu"; |
| 1108 | + break; |
| 1109 | + case AVR32_BUILTIN_SATRNDS: |
| 1110 | + icode = CODE_FOR_satrnds; |
| 1111 | + fname = "satrnds"; |
| 1112 | + break; |
| 1113 | + case AVR32_BUILTIN_SATRNDU: |
| 1114 | + icode = CODE_FOR_satrndu; |
| 1115 | + fname = "satrndu"; |
| 1116 | + break; |
| 1117 | + } |
| 1118 | + |
| 1119 | + arg0 = TREE_VALUE (arglist); |
| 1120 | + arg1 = TREE_VALUE (TREE_CHAIN (arglist)); |
| 1121 | + arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); |
| 1122 | + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| 1123 | + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); |
| 1124 | + op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0); |
| 1125 | + |
| 1126 | + tmode = insn_data[icode].operand[0].mode; |
| 1127 | + |
| 1128 | + |
| 1129 | + if (target == 0 |
| 1130 | + || GET_MODE (target) != tmode |
| 1131 | + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) |
| 1132 | + target = gen_reg_rtx (tmode); |
| 1133 | + |
| 1134 | + |
| 1135 | + if (!(*insn_data[icode].operand[0].predicate) (op0, GET_MODE (op0))) |
| 1136 | + { |
| 1137 | + op0 = copy_to_mode_reg (insn_data[icode].operand[0].mode, op0); |
| 1138 | + } |
| 1139 | + |
| 1140 | + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode)) |
| 1141 | + { |
| 1142 | + error ("Parameter 2 to __builtin_%s should be a constant number.", |
| 1143 | + fname); |
| 1144 | + return NULL_RTX; |
| 1145 | + } |
| 1146 | + |
| 1147 | + if (!(*insn_data[icode].operand[1].predicate) (op2, SImode)) |
| 1148 | + { |
| 1149 | + error ("Parameter 3 to __builtin_%s should be a constant number.", |
| 1150 | + fname); |
| 1151 | + return NULL_RTX; |
| 1152 | + } |
| 1153 | + |
| 1154 | + emit_move_insn (target, op0); |
| 1155 | + pat = GEN_FCN (icode) (target, op1, op2); |
| 1156 | + if (!pat) |
| 1157 | + return 0; |
| 1158 | + emit_insn (pat); |
| 1159 | + |
| 1160 | + return target; |
| 1161 | + } |
| 1162 | + case AVR32_BUILTIN_MUSTR: |
| 1163 | + icode = CODE_FOR_mustr; |
| 1164 | + tmode = insn_data[icode].operand[0].mode; |
| 1165 | + |
| 1166 | + if (target == 0 |
| 1167 | + || GET_MODE (target) != tmode |
| 1168 | + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) |
| 1169 | + target = gen_reg_rtx (tmode); |
| 1170 | + pat = GEN_FCN (icode) (target); |
| 1171 | + if (!pat) |
| 1172 | + return 0; |
| 1173 | + emit_insn (pat); |
| 1174 | + return target; |
| 1175 | + |
| 1176 | + case AVR32_BUILTIN_MFSR: |
| 1177 | + icode = CODE_FOR_mfsr; |
| 1178 | + arg0 = TREE_VALUE (arglist); |
| 1179 | + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| 1180 | + tmode = insn_data[icode].operand[0].mode; |
| 1181 | + mode0 = insn_data[icode].operand[1].mode; |
| 1182 | + |
| 1183 | + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0)) |
| 1184 | + { |
| 1185 | + error ("Parameter 1 to __builtin_mfsr must be a constant number"); |
| 1186 | + } |
| 1187 | + |
| 1188 | + if (target == 0 |
| 1189 | + || GET_MODE (target) != tmode |
| 1190 | + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) |
| 1191 | + target = gen_reg_rtx (tmode); |
| 1192 | + pat = GEN_FCN (icode) (target, op0); |
| 1193 | + if (!pat) |
| 1194 | + return 0; |
| 1195 | + emit_insn (pat); |
| 1196 | + return target; |
| 1197 | + case AVR32_BUILTIN_MTSR: |
| 1198 | + icode = CODE_FOR_mtsr; |
| 1199 | + arg0 = TREE_VALUE (arglist); |
| 1200 | + arg1 = TREE_VALUE (TREE_CHAIN (arglist)); |
| 1201 | + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| 1202 | + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); |
| 1203 | + mode0 = insn_data[icode].operand[0].mode; |
| 1204 | + mode1 = insn_data[icode].operand[1].mode; |
| 1205 | + |
| 1206 | + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0)) |
| 1207 | + { |
| 1208 | + error ("Parameter 1 to __builtin_mtsr must be a constant number"); |
| 1209 | + return gen_reg_rtx (mode0); |
| 1210 | + } |
| 1211 | + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1)) |
| 1212 | + op1 = copy_to_mode_reg (mode1, op1); |
| 1213 | + pat = GEN_FCN (icode) (op0, op1); |
| 1214 | + if (!pat) |
| 1215 | + return 0; |
| 1216 | + emit_insn (pat); |
| 1217 | + return NULL_RTX; |
| 1218 | + case AVR32_BUILTIN_MFDR: |
| 1219 | + icode = CODE_FOR_mfdr; |
| 1220 | + arg0 = TREE_VALUE (arglist); |
| 1221 | + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| 1222 | + tmode = insn_data[icode].operand[0].mode; |
| 1223 | + mode0 = insn_data[icode].operand[1].mode; |
| 1224 | + |
| 1225 | + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0)) |
| 1226 | + { |
| 1227 | + error ("Parameter 1 to __builtin_mfdr must be a constant number"); |
| 1228 | + } |
| 1229 | + |
| 1230 | + if (target == 0 |
| 1231 | + || GET_MODE (target) != tmode |
| 1232 | + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) |
| 1233 | + target = gen_reg_rtx (tmode); |
| 1234 | + pat = GEN_FCN (icode) (target, op0); |
| 1235 | + if (!pat) |
| 1236 | + return 0; |
| 1237 | + emit_insn (pat); |
| 1238 | + return target; |
| 1239 | + case AVR32_BUILTIN_MTDR: |
| 1240 | + icode = CODE_FOR_mtdr; |
| 1241 | + arg0 = TREE_VALUE (arglist); |
| 1242 | + arg1 = TREE_VALUE (TREE_CHAIN (arglist)); |
| 1243 | + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| 1244 | + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); |
| 1245 | + mode0 = insn_data[icode].operand[0].mode; |
| 1246 | + mode1 = insn_data[icode].operand[1].mode; |
| 1247 | + |
| 1248 | + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0)) |
| 1249 | + { |
| 1250 | + error ("Parameter 1 to __builtin_mtdr must be a constant number"); |
| 1251 | + return gen_reg_rtx (mode0); |
| 1252 | + } |
| 1253 | + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1)) |
| 1254 | + op1 = copy_to_mode_reg (mode1, op1); |
| 1255 | + pat = GEN_FCN (icode) (op0, op1); |
| 1256 | + if (!pat) |
| 1257 | + return 0; |
| 1258 | + emit_insn (pat); |
| 1259 | + return NULL_RTX; |
| 1260 | + case AVR32_BUILTIN_CACHE: |
| 1261 | + icode = CODE_FOR_cache; |
| 1262 | + arg0 = TREE_VALUE (arglist); |
| 1263 | + arg1 = TREE_VALUE (TREE_CHAIN (arglist)); |
| 1264 | + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| 1265 | + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); |
| 1266 | + mode0 = insn_data[icode].operand[0].mode; |
| 1267 | + mode1 = insn_data[icode].operand[1].mode; |
| 1268 | + |
| 1269 | + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1)) |
| 1270 | + { |
| 1271 | + error ("Parameter 2 to __builtin_cache must be a constant number"); |
| 1272 | + return gen_reg_rtx (mode1); |
| 1273 | + } |
| 1274 | + |
| 1275 | + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0)) |
| 1276 | + op0 = copy_to_mode_reg (mode0, op0); |
| 1277 | + |
| 1278 | + pat = GEN_FCN (icode) (op0, op1); |
| 1279 | + if (!pat) |
| 1280 | + return 0; |
| 1281 | + emit_insn (pat); |
| 1282 | + return NULL_RTX; |
| 1283 | + case AVR32_BUILTIN_SYNC: |
| 1284 | + case AVR32_BUILTIN_MUSFR: |
| 1285 | + { |
| 1286 | + const char *fname; |
| 1287 | + switch (fcode) |
| 1288 | + { |
| 1289 | + default: |
| 1290 | + case AVR32_BUILTIN_SYNC: |
| 1291 | + icode = CODE_FOR_sync; |
| 1292 | + fname = "sync"; |
| 1293 | + break; |
| 1294 | + case AVR32_BUILTIN_MUSFR: |
| 1295 | + icode = CODE_FOR_musfr; |
| 1296 | + fname = "musfr"; |
| 1297 | + break; |
| 1298 | + } |
| 1299 | + |
| 1300 | + arg0 = TREE_VALUE (arglist); |
| 1301 | + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| 1302 | + mode0 = insn_data[icode].operand[0].mode; |
| 1303 | + |
| 1304 | + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0)) |
| 1305 | + { |
| 1306 | + if (icode == CODE_FOR_musfr) |
| 1307 | + op0 = copy_to_mode_reg (mode0, op0); |
| 1308 | + else |
| 1309 | + { |
| 1310 | + error ("Parameter to __builtin_%s is illegal.", fname); |
| 1311 | + return gen_reg_rtx (mode0); |
| 1312 | + } |
| 1313 | + } |
| 1314 | + pat = GEN_FCN (icode) (op0); |
| 1315 | + if (!pat) |
| 1316 | + return 0; |
| 1317 | + emit_insn (pat); |
| 1318 | + return NULL_RTX; |
| 1319 | + } |
| 1320 | + case AVR32_BUILTIN_TLBR: |
| 1321 | + icode = CODE_FOR_tlbr; |
| 1322 | + pat = GEN_FCN (icode) (NULL_RTX); |
| 1323 | + if (!pat) |
| 1324 | + return 0; |
| 1325 | + emit_insn (pat); |
| 1326 | + return NULL_RTX; |
| 1327 | + case AVR32_BUILTIN_TLBS: |
| 1328 | + icode = CODE_FOR_tlbs; |
| 1329 | + pat = GEN_FCN (icode) (NULL_RTX); |
| 1330 | + if (!pat) |
| 1331 | + return 0; |
| 1332 | + emit_insn (pat); |
| 1333 | + return NULL_RTX; |
| 1334 | + case AVR32_BUILTIN_TLBW: |
| 1335 | + icode = CODE_FOR_tlbw; |
| 1336 | + pat = GEN_FCN (icode) (NULL_RTX); |
| 1337 | + if (!pat) |
| 1338 | + return 0; |
| 1339 | + emit_insn (pat); |
| 1340 | + return NULL_RTX; |
| 1341 | + case AVR32_BUILTIN_BREAKPOINT: |
| 1342 | + icode = CODE_FOR_breakpoint; |
| 1343 | + pat = GEN_FCN (icode) (NULL_RTX); |
| 1344 | + if (!pat) |
| 1345 | + return 0; |
| 1346 | + emit_insn (pat); |
| 1347 | + return NULL_RTX; |
| 1348 | + case AVR32_BUILTIN_XCHG: |
| 1349 | + icode = CODE_FOR_xchg; |
| 1350 | + arg0 = TREE_VALUE (arglist); |
| 1351 | + arg1 = TREE_VALUE (TREE_CHAIN (arglist)); |
| 1352 | + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| 1353 | + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); |
| 1354 | + tmode = insn_data[icode].operand[0].mode; |
| 1355 | + mode0 = insn_data[icode].operand[1].mode; |
| 1356 | + mode1 = insn_data[icode].operand[3].mode; |
| 1357 | + |
| 1358 | + if (!(*insn_data[icode].operand[3].predicate) (op1, mode1)) |
| 1359 | + { |
| 1360 | + op1 = copy_to_mode_reg (mode1, op1); |
| 1361 | + } |
| 1362 | + |
| 1363 | + if (!(*insn_data[icode].operand[2].predicate) (op0, mode0)) |
| 1364 | + { |
| 1365 | + op0 = copy_to_mode_reg (mode0, op0); |
| 1366 | + } |
| 1367 | + |
| 1368 | + if (target == 0 |
| 1369 | + || GET_MODE (target) != tmode |
| 1370 | + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) |
| 1371 | + target = gen_reg_rtx (tmode); |
| 1372 | + pat = GEN_FCN (icode) (target, op0, op0, op1); |
| 1373 | + if (!pat) |
| 1374 | + return 0; |
| 1375 | + emit_insn (pat); |
| 1376 | + return target; |
| 1377 | + case AVR32_BUILTIN_LDXI: |
| 1378 | + icode = CODE_FOR_ldxi; |
| 1379 | + arg0 = TREE_VALUE (arglist); |
| 1380 | + arg1 = TREE_VALUE (TREE_CHAIN (arglist)); |
| 1381 | + arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); |
| 1382 | + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| 1383 | + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); |
| 1384 | + op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0); |
| 1385 | + tmode = insn_data[icode].operand[0].mode; |
| 1386 | + mode0 = insn_data[icode].operand[1].mode; |
| 1387 | + mode1 = insn_data[icode].operand[2].mode; |
| 1388 | + |
| 1389 | + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0)) |
| 1390 | + { |
| 1391 | + op0 = copy_to_mode_reg (mode0, op0); |
| 1392 | + } |
| 1393 | + |
| 1394 | + if (!(*insn_data[icode].operand[2].predicate) (op1, mode1)) |
| 1395 | + { |
| 1396 | + op1 = copy_to_mode_reg (mode1, op1); |
| 1397 | + } |
| 1398 | + |
| 1399 | + if (!(*insn_data[icode].operand[3].predicate) (op2, SImode)) |
| 1400 | + { |
| 1401 | + error |
| 1402 | + ("Parameter 3 to __builtin_ldxi must be a valid extract shift operand: (0|8|16|24)"); |
| 1403 | + return gen_reg_rtx (mode0); |
| 1404 | + } |
| 1405 | + |
| 1406 | + if (target == 0 |
| 1407 | + || GET_MODE (target) != tmode |
| 1408 | + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) |
| 1409 | + target = gen_reg_rtx (tmode); |
| 1410 | + pat = GEN_FCN (icode) (target, op0, op1, op2); |
| 1411 | + if (!pat) |
| 1412 | + return 0; |
| 1413 | + emit_insn (pat); |
| 1414 | + return target; |
| 1415 | + case AVR32_BUILTIN_BSWAP16: |
| 1416 | + { |
| 1417 | + icode = CODE_FOR_bswap_16; |
| 1418 | + arg0 = TREE_VALUE (arglist); |
| 1419 | + arg0_mode = TYPE_MODE (TREE_TYPE (arg0)); |
| 1420 | + mode0 = insn_data[icode].operand[1].mode; |
| 1421 | + if (arg0_mode != mode0) |
| 1422 | + arg0 = build1 (NOP_EXPR, |
| 1423 | + (*lang_hooks.types.type_for_mode) (mode0, 0), arg0); |
| 1424 | + |
| 1425 | + op0 = expand_expr (arg0, NULL_RTX, HImode, 0); |
| 1426 | + tmode = insn_data[icode].operand[0].mode; |
| 1427 | + |
| 1428 | + |
| 1429 | + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0)) |
| 1430 | + { |
| 1431 | + op0 = copy_to_mode_reg (mode0, op0); |
| 1432 | + } |
| 1433 | + |
| 1434 | + if (target == 0 |
| 1435 | + || GET_MODE (target) != tmode |
| 1436 | + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) |
| 1437 | + { |
| 1438 | + target = gen_reg_rtx (tmode); |
| 1439 | + } |
| 1440 | + |
| 1441 | + |
| 1442 | + pat = GEN_FCN (icode) (target, op0); |
| 1443 | + if (!pat) |
| 1444 | + return 0; |
| 1445 | + emit_insn (pat); |
| 1446 | + |
| 1447 | + return target; |
| 1448 | + } |
| 1449 | + case AVR32_BUILTIN_BSWAP32: |
| 1450 | + { |
| 1451 | + icode = CODE_FOR_bswap_32; |
| 1452 | + arg0 = TREE_VALUE (arglist); |
| 1453 | + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| 1454 | + tmode = insn_data[icode].operand[0].mode; |
| 1455 | + mode0 = insn_data[icode].operand[1].mode; |
| 1456 | + |
| 1457 | + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0)) |
| 1458 | + { |
| 1459 | + op0 = copy_to_mode_reg (mode0, op0); |
| 1460 | + } |
| 1461 | + |
| 1462 | + if (target == 0 |
| 1463 | + || GET_MODE (target) != tmode |
| 1464 | + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) |
| 1465 | + target = gen_reg_rtx (tmode); |
| 1466 | + |
| 1467 | + |
| 1468 | + pat = GEN_FCN (icode) (target, op0); |
| 1469 | + if (!pat) |
| 1470 | + return 0; |
| 1471 | + emit_insn (pat); |
| 1472 | + |
| 1473 | + return target; |
| 1474 | + } |
| 1475 | + case AVR32_BUILTIN_MVCR_W: |
| 1476 | + case AVR32_BUILTIN_MVCR_D: |
| 1477 | + { |
| 1478 | + arg0 = TREE_VALUE (arglist); |
| 1479 | + arg1 = TREE_VALUE (TREE_CHAIN (arglist)); |
| 1480 | + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| 1481 | + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); |
| 1482 | + |
| 1483 | + if (fcode == AVR32_BUILTIN_MVCR_W) |
| 1484 | + icode = CODE_FOR_mvcrsi; |
| 1485 | + else |
| 1486 | + icode = CODE_FOR_mvcrdi; |
| 1487 | + |
| 1488 | + tmode = insn_data[icode].operand[0].mode; |
| 1489 | + |
| 1490 | + if (target == 0 |
| 1491 | + || GET_MODE (target) != tmode |
| 1492 | + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) |
| 1493 | + target = gen_reg_rtx (tmode); |
| 1494 | + |
| 1495 | + if (!(*insn_data[icode].operand[1].predicate) (op0, SImode)) |
| 1496 | + { |
| 1497 | + error |
| 1498 | + ("Parameter 1 to __builtin_cop is not a valid coprocessor number."); |
| 1499 | + error ("Number should be between 0 and 7."); |
| 1500 | + return NULL_RTX; |
| 1501 | + } |
| 1502 | + |
| 1503 | + if (!(*insn_data[icode].operand[2].predicate) (op1, SImode)) |
| 1504 | + { |
| 1505 | + error |
| 1506 | + ("Parameter 2 to __builtin_cop is not a valid coprocessor register number."); |
| 1507 | + error ("Number should be between 0 and 15."); |
| 1508 | + return NULL_RTX; |
| 1509 | + } |
| 1510 | + |
| 1511 | + pat = GEN_FCN (icode) (target, op0, op1); |
| 1512 | + if (!pat) |
| 1513 | + return 0; |
| 1514 | + emit_insn (pat); |
| 1515 | + |
| 1516 | + return target; |
| 1517 | + } |
| 1518 | + case AVR32_BUILTIN_MACSATHH_W: |
| 1519 | + case AVR32_BUILTIN_MACWH_D: |
| 1520 | + case AVR32_BUILTIN_MACHH_D: |
| 1521 | + { |
| 1522 | + arg0 = TREE_VALUE (arglist); |
| 1523 | + arg1 = TREE_VALUE (TREE_CHAIN (arglist)); |
| 1524 | + arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); |
| 1525 | + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| 1526 | + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); |
| 1527 | + op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0); |
| 1528 | + |
| 1529 | + icode = ((fcode == AVR32_BUILTIN_MACSATHH_W) ? CODE_FOR_macsathh_w : |
| 1530 | + (fcode == AVR32_BUILTIN_MACWH_D) ? CODE_FOR_macwh_d : |
| 1531 | + CODE_FOR_machh_d); |
| 1532 | + |
| 1533 | + tmode = insn_data[icode].operand[0].mode; |
| 1534 | + mode0 = insn_data[icode].operand[1].mode; |
| 1535 | + mode1 = insn_data[icode].operand[2].mode; |
| 1536 | + |
| 1537 | + |
| 1538 | + if (!target |
| 1539 | + || GET_MODE (target) != tmode |
| 1540 | + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) |
| 1541 | + target = gen_reg_rtx (tmode); |
| 1542 | + |
| 1543 | + if (!(*insn_data[icode].operand[0].predicate) (op0, tmode)) |
| 1544 | + { |
| 1545 | + /* If op0 is already a reg we must cast it to the correct mode. */ |
| 1546 | + if (REG_P (op0)) |
| 1547 | + op0 = convert_to_mode (tmode, op0, 1); |
| 1548 | + else |
| 1549 | + op0 = copy_to_mode_reg (tmode, op0); |
| 1550 | + } |
| 1551 | + |
| 1552 | + if (!(*insn_data[icode].operand[1].predicate) (op1, mode0)) |
| 1553 | + { |
| 1554 | + /* If op1 is already a reg we must cast it to the correct mode. */ |
| 1555 | + if (REG_P (op1)) |
| 1556 | + op1 = convert_to_mode (mode0, op1, 1); |
| 1557 | + else |
| 1558 | + op1 = copy_to_mode_reg (mode0, op1); |
| 1559 | + } |
| 1560 | + |
| 1561 | + if (!(*insn_data[icode].operand[2].predicate) (op2, mode1)) |
| 1562 | + { |
| 1563 | + /* If op1 is already a reg we must cast it to the correct mode. */ |
| 1564 | + if (REG_P (op2)) |
| 1565 | + op2 = convert_to_mode (mode1, op2, 1); |
| 1566 | + else |
| 1567 | + op2 = copy_to_mode_reg (mode1, op2); |
| 1568 | + } |
| 1569 | + |
| 1570 | + emit_move_insn (target, op0); |
| 1571 | + |
| 1572 | + pat = GEN_FCN (icode) (target, op1, op2); |
| 1573 | + if (!pat) |
| 1574 | + return 0; |
| 1575 | + emit_insn (pat); |
| 1576 | + return target; |
| 1577 | + } |
| 1578 | + case AVR32_BUILTIN_MVRC_W: |
| 1579 | + case AVR32_BUILTIN_MVRC_D: |
| 1580 | + { |
| 1581 | + arg0 = TREE_VALUE (arglist); |
| 1582 | + arg1 = TREE_VALUE (TREE_CHAIN (arglist)); |
| 1583 | + arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); |
| 1584 | + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| 1585 | + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); |
| 1586 | + op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0); |
| 1587 | + |
| 1588 | + if (fcode == AVR32_BUILTIN_MVRC_W) |
| 1589 | + icode = CODE_FOR_mvrcsi; |
| 1590 | + else |
| 1591 | + icode = CODE_FOR_mvrcdi; |
| 1592 | + |
| 1593 | + if (!(*insn_data[icode].operand[0].predicate) (op0, SImode)) |
| 1594 | + { |
| 1595 | + error ("Parameter 1 is not a valid coprocessor number."); |
| 1596 | + error ("Number should be between 0 and 7."); |
| 1597 | + return NULL_RTX; |
| 1598 | + } |
| 1599 | + |
| 1600 | + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode)) |
| 1601 | + { |
| 1602 | + error ("Parameter 2 is not a valid coprocessor register number."); |
| 1603 | + error ("Number should be between 0 and 15."); |
| 1604 | + return NULL_RTX; |
| 1605 | + } |
| 1606 | + |
| 1607 | + if (GET_CODE (op2) == CONST_INT |
| 1608 | + || GET_CODE (op2) == CONST |
| 1609 | + || GET_CODE (op2) == SYMBOL_REF || GET_CODE (op2) == LABEL_REF) |
| 1610 | + { |
| 1611 | + op2 = force_const_mem (insn_data[icode].operand[2].mode, op2); |
| 1612 | + } |
| 1613 | + |
| 1614 | + if (!(*insn_data[icode].operand[2].predicate) (op2, GET_MODE (op2))) |
| 1615 | + op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2); |
| 1616 | + |
| 1617 | + |
| 1618 | + pat = GEN_FCN (icode) (op0, op1, op2); |
| 1619 | + if (!pat) |
| 1620 | + return 0; |
| 1621 | + emit_insn (pat); |
| 1622 | + |
| 1623 | + return NULL_RTX; |
| 1624 | + } |
| 1625 | + case AVR32_BUILTIN_COP: |
| 1626 | + { |
| 1627 | + rtx op3, op4; |
| 1628 | + tree arg3, arg4; |
| 1629 | + icode = CODE_FOR_cop; |
| 1630 | + arg0 = TREE_VALUE (arglist); |
| 1631 | + arg1 = TREE_VALUE (TREE_CHAIN (arglist)); |
| 1632 | + arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); |
| 1633 | + arg3 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist)))); |
| 1634 | + arg4 = |
| 1635 | + TREE_VALUE (TREE_CHAIN |
| 1636 | + (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))))); |
| 1637 | + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| 1638 | + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); |
| 1639 | + op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0); |
| 1640 | + op3 = expand_expr (arg3, NULL_RTX, VOIDmode, 0); |
| 1641 | + op4 = expand_expr (arg4, NULL_RTX, VOIDmode, 0); |
| 1642 | + |
| 1643 | + if (!(*insn_data[icode].operand[0].predicate) (op0, SImode)) |
| 1644 | + { |
| 1645 | + error |
| 1646 | + ("Parameter 1 to __builtin_cop is not a valid coprocessor number."); |
| 1647 | + error ("Number should be between 0 and 7."); |
| 1648 | + return NULL_RTX; |
| 1649 | + } |
| 1650 | + |
| 1651 | + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode)) |
| 1652 | + { |
| 1653 | + error |
| 1654 | + ("Parameter 2 to __builtin_cop is not a valid coprocessor register number."); |
| 1655 | + error ("Number should be between 0 and 15."); |
| 1656 | + return NULL_RTX; |
| 1657 | + } |
| 1658 | + |
| 1659 | + if (!(*insn_data[icode].operand[2].predicate) (op2, SImode)) |
| 1660 | + { |
| 1661 | + error |
| 1662 | + ("Parameter 3 to __builtin_cop is not a valid coprocessor register number."); |
| 1663 | + error ("Number should be between 0 and 15."); |
| 1664 | + return NULL_RTX; |
| 1665 | + } |
| 1666 | + |
| 1667 | + if (!(*insn_data[icode].operand[3].predicate) (op3, SImode)) |
| 1668 | + { |
| 1669 | + error |
| 1670 | + ("Parameter 4 to __builtin_cop is not a valid coprocessor register number."); |
| 1671 | + error ("Number should be between 0 and 15."); |
| 1672 | + return NULL_RTX; |
| 1673 | + } |
| 1674 | + |
| 1675 | + if (!(*insn_data[icode].operand[4].predicate) (op4, SImode)) |
| 1676 | + { |
| 1677 | + error |
| 1678 | + ("Parameter 5 to __builtin_cop is not a valid coprocessor operation."); |
| 1679 | + error ("Number should be between 0 and 127."); |
| 1680 | + return NULL_RTX; |
| 1681 | + } |
| 1682 | + |
| 1683 | + pat = GEN_FCN (icode) (op0, op1, op2, op3, op4); |
| 1684 | + if (!pat) |
| 1685 | + return 0; |
| 1686 | + emit_insn (pat); |
| 1687 | + |
| 1688 | + return target; |
| 1689 | + } |
| 1690 | + } |
| 1691 | + |
| 1692 | + for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++) |
| 1693 | + if (d->code == fcode) |
| 1694 | + return avr32_expand_binop_builtin (d->icode, arglist, target); |
| 1695 | + |
| 1696 | + |
| 1697 | + /* @@@ Should really do something sensible here. */ |
| 1698 | + return NULL_RTX; |
| 1699 | +} |
| 1700 | + |
| 1701 | + |
| 1702 | +/* Handle an "interrupt" or "isr" attribute; |
| 1703 | + arguments as in struct attribute_spec.handler. */ |
| 1704 | + |
| 1705 | +static tree |
| 1706 | +avr32_handle_isr_attribute (tree * node, tree name, tree args, |
| 1707 | + int flags, bool * no_add_attrs) |
| 1708 | +{ |
| 1709 | + if (DECL_P (*node)) |
| 1710 | + { |
| 1711 | + if (TREE_CODE (*node) != FUNCTION_DECL) |
| 1712 | + { |
| 1713 | + warning ("`%s' attribute only applies to functions", |
| 1714 | + IDENTIFIER_POINTER (name)); |
| 1715 | + *no_add_attrs = true; |
| 1716 | + } |
| 1717 | + /* FIXME: the argument if any is checked for type attributes; should it |
| 1718 | + be checked for decl ones? */ |
| 1719 | + } |
| 1720 | + else |
| 1721 | + { |
| 1722 | + if (TREE_CODE (*node) == FUNCTION_TYPE |
| 1723 | + || TREE_CODE (*node) == METHOD_TYPE) |
| 1724 | + { |
| 1725 | + if (avr32_isr_value (args) == AVR32_FT_UNKNOWN) |
| 1726 | + { |
| 1727 | + warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); |
| 1728 | + *no_add_attrs = true; |
| 1729 | + } |
| 1730 | + } |
| 1731 | + else if (TREE_CODE (*node) == POINTER_TYPE |
| 1732 | + && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE |
| 1733 | + || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE) |
| 1734 | + && avr32_isr_value (args) != AVR32_FT_UNKNOWN) |
| 1735 | + { |
| 1736 | + *node = build_variant_type_copy (*node); |
| 1737 | + TREE_TYPE (*node) = build_type_attribute_variant |
| 1738 | + (TREE_TYPE (*node), |
| 1739 | + tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node)))); |
| 1740 | + *no_add_attrs = true; |
| 1741 | + } |
| 1742 | + else |
| 1743 | + { |
| 1744 | + /* Possibly pass this attribute on from the type to a decl. */ |
| 1745 | + if (flags & ((int) ATTR_FLAG_DECL_NEXT |
| 1746 | + | (int) ATTR_FLAG_FUNCTION_NEXT |
| 1747 | + | (int) ATTR_FLAG_ARRAY_NEXT)) |
| 1748 | + { |
| 1749 | + *no_add_attrs = true; |
| 1750 | + return tree_cons (name, args, NULL_TREE); |
| 1751 | + } |
| 1752 | + else |
| 1753 | + { |
| 1754 | + warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); |
| 1755 | + } |
| 1756 | + } |
| 1757 | + } |
| 1758 | + |
| 1759 | + return NULL_TREE; |
| 1760 | +} |
| 1761 | + |
| 1762 | +/* Handle an attribute requiring a FUNCTION_DECL; |
| 1763 | + arguments as in struct attribute_spec.handler. */ |
| 1764 | +static tree |
| 1765 | +avr32_handle_fndecl_attribute (tree * node, tree name, |
| 1766 | + tree args ATTRIBUTE_UNUSED, |
| 1767 | + int flags ATTRIBUTE_UNUSED, |
| 1768 | + bool * no_add_attrs) |
| 1769 | +{ |
| 1770 | + if (TREE_CODE (*node) != FUNCTION_DECL) |
| 1771 | + { |
| 1772 | + warning ("%qs attribute only applies to functions", |
| 1773 | + IDENTIFIER_POINTER (name)); |
| 1774 | + *no_add_attrs = true; |
| 1775 | + } |
| 1776 | + |
| 1777 | + return NULL_TREE; |
| 1778 | +} |
| 1779 | + |
| 1780 | + |
| 1781 | +/* Handle an acall attribute; |
| 1782 | + arguments as in struct attribute_spec.handler. */ |
| 1783 | + |
| 1784 | +static tree |
| 1785 | +avr32_handle_acall_attribute (tree * node, tree name, |
| 1786 | + tree args ATTRIBUTE_UNUSED, |
| 1787 | + int flags ATTRIBUTE_UNUSED, bool * no_add_attrs) |
| 1788 | +{ |
| 1789 | + if (TREE_CODE (*node) == FUNCTION_TYPE || TREE_CODE (*node) == METHOD_TYPE) |
| 1790 | + { |
| 1791 | + warning ("`%s' attribute not yet supported...", |
| 1792 | + IDENTIFIER_POINTER (name)); |
| 1793 | + *no_add_attrs = true; |
| 1794 | + return NULL_TREE; |
| 1795 | + } |
| 1796 | + |
| 1797 | + warning ("`%s' attribute only applies to functions", |
| 1798 | + IDENTIFIER_POINTER (name)); |
| 1799 | + *no_add_attrs = true; |
| 1800 | + return NULL_TREE; |
| 1801 | +} |
| 1802 | + |
| 1803 | + |
| 1804 | +/* Return 0 if the attributes for two types are incompatible, 1 if they |
| 1805 | + are compatible, and 2 if they are nearly compatible (which causes a |
| 1806 | + warning to be generated). */ |
| 1807 | + |
| 1808 | +static int |
| 1809 | +avr32_comp_type_attributes (tree type1, tree type2) |
| 1810 | +{ |
| 1811 | + int acall1, acall2, isr1, isr2, naked1, naked2; |
| 1812 | + |
| 1813 | + /* Check for mismatch of non-default calling convention. */ |
| 1814 | + if (TREE_CODE (type1) != FUNCTION_TYPE) |
| 1815 | + return 1; |
| 1816 | + |
| 1817 | + /* Check for mismatched call attributes. */ |
| 1818 | + acall1 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type1)) != NULL; |
| 1819 | + acall2 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type2)) != NULL; |
| 1820 | + naked1 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type1)) != NULL; |
| 1821 | + naked2 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type2)) != NULL; |
| 1822 | + isr1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL; |
| 1823 | + if (!isr1) |
| 1824 | + isr1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL; |
| 1825 | + |
| 1826 | + isr2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL; |
| 1827 | + if (!isr2) |
| 1828 | + isr2 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL; |
| 1829 | + |
| 1830 | + if ((acall1 && isr2) |
| 1831 | + || (acall2 && isr1) || (naked1 && isr2) || (naked2 && isr1)) |
| 1832 | + return 0; |
| 1833 | + |
| 1834 | + return 1; |
| 1835 | +} |
| 1836 | + |
| 1837 | + |
| 1838 | +/* Computes the type of the current function. */ |
| 1839 | + |
| 1840 | +static unsigned long |
| 1841 | +avr32_compute_func_type (void) |
| 1842 | +{ |
| 1843 | + unsigned long type = AVR32_FT_UNKNOWN; |
| 1844 | + tree a; |
| 1845 | + tree attr; |
| 1846 | + |
| 1847 | + if (TREE_CODE (current_function_decl) != FUNCTION_DECL) |
| 1848 | + abort (); |
| 1849 | + |
| 1850 | + /* Decide if the current function is volatile. Such functions never |
| 1851 | + return, and many memory cycles can be saved by not storing register |
| 1852 | + values that will never be needed again. This optimization was added to |
| 1853 | + speed up context switching in a kernel application. */ |
| 1854 | + if (optimize > 0 |
| 1855 | + && TREE_NOTHROW (current_function_decl) |
| 1856 | + && TREE_THIS_VOLATILE (current_function_decl)) |
| 1857 | + type |= AVR32_FT_VOLATILE; |
| 1858 | + |
| 1859 | + if (cfun->static_chain_decl != NULL) |
| 1860 | + type |= AVR32_FT_NESTED; |
| 1861 | + |
| 1862 | + attr = DECL_ATTRIBUTES (current_function_decl); |
| 1863 | + |
| 1864 | + a = lookup_attribute ("isr", attr); |
| 1865 | + if (a == NULL_TREE) |
| 1866 | + a = lookup_attribute ("interrupt", attr); |
| 1867 | + |
| 1868 | + if (a == NULL_TREE) |
| 1869 | + type |= AVR32_FT_NORMAL; |
| 1870 | + else |
| 1871 | + type |= avr32_isr_value (TREE_VALUE (a)); |
| 1872 | + |
| 1873 | + |
| 1874 | + a = lookup_attribute ("acall", attr); |
| 1875 | + if (a != NULL_TREE) |
| 1876 | + type |= AVR32_FT_ACALL; |
| 1877 | + |
| 1878 | + a = lookup_attribute ("naked", attr); |
| 1879 | + if (a != NULL_TREE) |
| 1880 | + type |= AVR32_FT_NAKED; |
| 1881 | + |
| 1882 | + return type; |
| 1883 | +} |
| 1884 | + |
| 1885 | +/* Returns the type of the current function. */ |
| 1886 | + |
| 1887 | +static unsigned long |
| 1888 | +avr32_current_func_type (void) |
| 1889 | +{ |
| 1890 | + if (AVR32_FUNC_TYPE (cfun->machine->func_type) == AVR32_FT_UNKNOWN) |
| 1891 | + cfun->machine->func_type = avr32_compute_func_type (); |
| 1892 | + |
| 1893 | + return cfun->machine->func_type; |
| 1894 | +} |
| 1895 | + |
| 1896 | +/* |
| 1897 | + This target hook should return true if we should not pass type solely |
| 1898 | + in registers. The file expr.h defines a definition that is usually appropriate, |
| 1899 | + refer to expr.h for additional documentation. |
| 1900 | +*/ |
| 1901 | +bool |
| 1902 | +avr32_must_pass_in_stack (enum machine_mode mode ATTRIBUTE_UNUSED, tree type) |
| 1903 | +{ |
| 1904 | + if (type && AGGREGATE_TYPE_P (type) |
| 1905 | + /* If the alignment is less than the size then pass in the struct on |
| 1906 | + the stack. */ |
| 1907 | + && ((unsigned int) TYPE_ALIGN_UNIT (type) < |
| 1908 | + (unsigned int) int_size_in_bytes (type)) |
| 1909 | + /* If we support unaligned word accesses then structs of size 4 and 8 |
| 1910 | + can have any alignment and still be passed in registers. */ |
| 1911 | + && !(TARGET_UNALIGNED_WORD |
| 1912 | + && (int_size_in_bytes (type) == 4 |
| 1913 | + || int_size_in_bytes (type) == 8)) |
| 1914 | + /* Double word structs need only a word alignment. */ |
| 1915 | + && !(int_size_in_bytes (type) == 8 && TYPE_ALIGN_UNIT (type) >= 4)) |
| 1916 | + return true; |
| 1917 | + |
| 1918 | + if (type && AGGREGATE_TYPE_P (type) |
| 1919 | + /* Structs of size 3,5,6,7 are always passed in registers. */ |
| 1920 | + && (int_size_in_bytes (type) == 3 |
| 1921 | + || int_size_in_bytes (type) == 5 |
| 1922 | + || int_size_in_bytes (type) == 6 || int_size_in_bytes (type) == 7)) |
| 1923 | + return true; |
| 1924 | + |
| 1925 | + |
| 1926 | + return (type && TREE_ADDRESSABLE (type)); |
| 1927 | +} |
| 1928 | + |
| 1929 | + |
| 1930 | +bool |
| 1931 | +avr32_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED) |
| 1932 | +{ |
| 1933 | + return true; |
| 1934 | +} |
| 1935 | + |
| 1936 | +/* |
| 1937 | + This target hook should return true if an argument at the position indicated |
| 1938 | + by cum should be passed by reference. This predicate is queried after target |
| 1939 | + independent reasons for being passed by reference, such as TREE_ADDRESSABLE (type). |
| 1940 | + |
| 1941 | + If the hook returns true, a copy of that argument is made in memory and a |
| 1942 | + pointer to the argument is passed instead of the argument itself. The pointer |
| 1943 | + is passed in whatever way is appropriate for passing a pointer to that type. |
| 1944 | +*/ |
| 1945 | +bool |
| 1946 | +avr32_pass_by_reference (CUMULATIVE_ARGS * cum ATTRIBUTE_UNUSED, |
| 1947 | + enum machine_mode mode ATTRIBUTE_UNUSED, |
| 1948 | + tree type, bool named ATTRIBUTE_UNUSED) |
| 1949 | +{ |
| 1950 | + return (type && (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)); |
| 1951 | +} |
| 1952 | + |
| 1953 | +static int |
| 1954 | +avr32_arg_partial_bytes (CUMULATIVE_ARGS * pcum ATTRIBUTE_UNUSED, |
| 1955 | + enum machine_mode mode ATTRIBUTE_UNUSED, |
| 1956 | + tree type ATTRIBUTE_UNUSED, |
| 1957 | + bool named ATTRIBUTE_UNUSED) |
| 1958 | +{ |
| 1959 | + return 0; |
| 1960 | +} |
| 1961 | + |
| 1962 | + |
| 1963 | +struct gcc_target targetm = TARGET_INITIALIZER; |
| 1964 | + |
| 1965 | +/* |
| 1966 | + Table used to convert from register number in the assembler instructions and |
| 1967 | + the register numbers used in gcc. |
| 1968 | +*/ |
| 1969 | +const int avr32_function_arg_reglist[] = |
| 1970 | +{ |
| 1971 | + INTERNAL_REGNUM (12), |
| 1972 | + INTERNAL_REGNUM (11), |
| 1973 | + INTERNAL_REGNUM (10), |
| 1974 | + INTERNAL_REGNUM (9), |
| 1975 | + INTERNAL_REGNUM (8) |
| 1976 | +}; |
| 1977 | + |
| 1978 | +rtx avr32_compare_op0 = NULL_RTX; |
| 1979 | +rtx avr32_compare_op1 = NULL_RTX; |
| 1980 | +rtx avr32_compare_operator = NULL_RTX; |
| 1981 | +rtx avr32_acc_cache = NULL_RTX; |
| 1982 | + |
| 1983 | +/* |
| 1984 | + Returns nonzero if it is allowed to store a value of mode mode in hard |
| 1985 | + register number regno. |
| 1986 | +*/ |
| 1987 | +int |
| 1988 | +avr32_hard_regno_mode_ok (int regnr, enum machine_mode mode) |
| 1989 | +{ |
| 1990 | + /* We allow only float modes in the fp-registers */ |
| 1991 | + if (regnr >= FIRST_FP_REGNUM |
| 1992 | + && regnr <= LAST_FP_REGNUM && GET_MODE_CLASS (mode) != MODE_FLOAT) |
| 1993 | + { |
| 1994 | + return 0; |
| 1995 | + } |
| 1996 | + |
| 1997 | + switch (mode) |
| 1998 | + { |
| 1999 | + case DImode: /* long long */ |
| 2000 | + case DFmode: /* double */ |
| 2001 | + case SCmode: /* __complex__ float */ |
| 2002 | + case CSImode: /* __complex__ int */ |
| 2003 | + if (regnr < 4) |
| 2004 | + { /* long long int not supported in r12, sp, lr |
| 2005 | + or pc. */ |
| 2006 | + return 0; |
| 2007 | + } |
| 2008 | + else |
| 2009 | + { |
| 2010 | + if (regnr % 2) /* long long int has to be refered in even |
| 2011 | + registers. */ |
| 2012 | + return 0; |
| 2013 | + else |
| 2014 | + return 1; |
| 2015 | + } |
| 2016 | + case CDImode: /* __complex__ long long */ |
| 2017 | + case DCmode: /* __complex__ double */ |
| 2018 | + case TImode: /* 16 bytes */ |
| 2019 | + if (regnr < 7) |
| 2020 | + return 0; |
| 2021 | + else if (regnr % 2) |
| 2022 | + return 0; |
| 2023 | + else |
| 2024 | + return 1; |
| 2025 | + default: |
| 2026 | + return 1; |
| 2027 | + } |
| 2028 | +} |
| 2029 | + |
| 2030 | + |
| 2031 | +int |
| 2032 | +avr32_rnd_operands (rtx add, rtx shift) |
| 2033 | +{ |
| 2034 | + if (GET_CODE (shift) == CONST_INT && |
| 2035 | + GET_CODE (add) == CONST_INT && INTVAL (shift) > 0) |
| 2036 | + { |
| 2037 | + if ((1 << (INTVAL (shift) - 1)) == INTVAL (add)) |
| 2038 | + return TRUE; |
| 2039 | + } |
| 2040 | + |
| 2041 | + return FALSE; |
| 2042 | +} |
| 2043 | + |
| 2044 | + |
| 2045 | + |
| 2046 | +int |
| 2047 | +avr32_const_ok_for_constraint_p (HOST_WIDE_INT value, char c, const char *str) |
| 2048 | +{ |
| 2049 | + switch (c) |
| 2050 | + { |
| 2051 | + case 'K': |
| 2052 | + case 'I': |
| 2053 | + { |
| 2054 | + HOST_WIDE_INT min_value = 0, max_value = 0; |
| 2055 | + char size_str[3]; |
| 2056 | + int const_size; |
| 2057 | + |
| 2058 | + size_str[0] = str[2]; |
| 2059 | + size_str[1] = str[3]; |
| 2060 | + size_str[2] = '\0'; |
| 2061 | + const_size = atoi (size_str); |
| 2062 | + |
| 2063 | + if (toupper (str[1]) == 'U') |
| 2064 | + { |
| 2065 | + min_value = 0; |
| 2066 | + max_value = (1 << const_size) - 1; |
| 2067 | + } |
| 2068 | + else if (toupper (str[1]) == 'S') |
| 2069 | + { |
| 2070 | + min_value = -(1 << (const_size - 1)); |
| 2071 | + max_value = (1 << (const_size - 1)) - 1; |
| 2072 | + } |
| 2073 | + |
| 2074 | + if (c == 'I') |
| 2075 | + { |
| 2076 | + value = -value; |
| 2077 | + } |
| 2078 | + |
| 2079 | + if (value >= min_value && value <= max_value) |
| 2080 | + { |
| 2081 | + return 1; |
| 2082 | + } |
| 2083 | + break; |
| 2084 | + } |
| 2085 | + case 'M': |
| 2086 | + return avr32_mask_upper_bits_operand (GEN_INT (value), VOIDmode); |
| 2087 | + } |
| 2088 | + |
| 2089 | + return 0; |
| 2090 | +} |
| 2091 | + |
| 2092 | + |
| 2093 | +/*Compute mask of which floating-point registers needs saving upon |
| 2094 | + entry to this function*/ |
| 2095 | +static unsigned long |
| 2096 | +avr32_compute_save_fp_reg_mask (void) |
| 2097 | +{ |
| 2098 | + unsigned long func_type = avr32_current_func_type (); |
| 2099 | + unsigned int save_reg_mask = 0; |
| 2100 | + unsigned int reg; |
| 2101 | + unsigned int max_reg = 7; |
| 2102 | + int save_all_call_used_regs = FALSE; |
| 2103 | + |
| 2104 | + /* This only applies for hardware floating-point implementation. */ |
| 2105 | + if (!TARGET_HARD_FLOAT) |
| 2106 | + return 0; |
| 2107 | + |
| 2108 | + if (IS_INTERRUPT (func_type)) |
| 2109 | + { |
| 2110 | + |
| 2111 | + /* Interrupt functions must not corrupt any registers, even call |
| 2112 | + clobbered ones. If this is a leaf function we can just examine the |
| 2113 | + registers used by the RTL, but otherwise we have to assume that |
| 2114 | + whatever function is called might clobber anything, and so we have |
| 2115 | + to save all the call-clobbered registers as well. */ |
| 2116 | + max_reg = 13; |
| 2117 | + save_all_call_used_regs = !current_function_is_leaf; |
| 2118 | + } |
| 2119 | + |
| 2120 | + /* All used registers used must be saved */ |
| 2121 | + for (reg = 0; reg <= max_reg; reg++) |
| 2122 | + if (regs_ever_live[INTERNAL_FP_REGNUM (reg)] |
| 2123 | + || (save_all_call_used_regs |
| 2124 | + && call_used_regs[INTERNAL_FP_REGNUM (reg)])) |
| 2125 | + save_reg_mask |= (1 << reg); |
| 2126 | + |
| 2127 | + return save_reg_mask; |
| 2128 | +} |
| 2129 | + |
| 2130 | +/*Compute mask of registers which needs saving upon function entry */ |
| 2131 | +static unsigned long |
| 2132 | +avr32_compute_save_reg_mask (int push) |
| 2133 | +{ |
| 2134 | + unsigned long func_type; |
| 2135 | + unsigned int save_reg_mask = 0; |
| 2136 | + unsigned int reg; |
| 2137 | + |
| 2138 | + func_type = avr32_current_func_type (); |
| 2139 | + |
| 2140 | + if (IS_INTERRUPT (func_type)) |
| 2141 | + { |
| 2142 | + unsigned int max_reg = 12; |
| 2143 | + |
| 2144 | + |
| 2145 | + /* Get the banking scheme for the interrupt */ |
| 2146 | + switch (func_type) |
| 2147 | + { |
| 2148 | + case AVR32_FT_ISR_FULL: |
| 2149 | + max_reg = 0; |
| 2150 | + break; |
| 2151 | + case AVR32_FT_ISR_HALF: |
| 2152 | + max_reg = 7; |
| 2153 | + break; |
| 2154 | + case AVR32_FT_ISR_NONE: |
| 2155 | + max_reg = 12; |
| 2156 | + break; |
| 2157 | + } |
| 2158 | + |
| 2159 | + /* Interrupt functions must not corrupt any registers, even call |
| 2160 | + clobbered ones. If this is a leaf function we can just examine the |
| 2161 | + registers used by the RTL, but otherwise we have to assume that |
| 2162 | + whatever function is called might clobber anything, and so we have |
| 2163 | + to save all the call-clobbered registers as well. */ |
| 2164 | + |
| 2165 | + /* Need not push the registers r8-r12 for AVR32A architectures, as this |
| 2166 | + is automatially done in hardware. We also do not have any shadow |
| 2167 | + registers. */ |
| 2168 | + if (avr32_arch->uarch_type == UARCH_TYPE_AVR32A) |
| 2169 | + { |
| 2170 | + max_reg = 7; |
| 2171 | + func_type = AVR32_FT_ISR_NONE; |
| 2172 | + } |
| 2173 | + |
| 2174 | + /* All registers which are used and is not shadowed must be saved */ |
| 2175 | + for (reg = 0; reg <= max_reg; reg++) |
| 2176 | + if (regs_ever_live[INTERNAL_REGNUM (reg)] |
| 2177 | + || (!current_function_is_leaf |
| 2178 | + && call_used_regs[INTERNAL_REGNUM (reg)])) |
| 2179 | + save_reg_mask |= (1 << reg); |
| 2180 | + |
| 2181 | + /* Check LR */ |
| 2182 | + if ((regs_ever_live[LR_REGNUM] || !current_function_is_leaf || frame_pointer_needed) && (func_type == AVR32_FT_ISR_NONE) /* Only |
| 2183 | + non-shadowed |
| 2184 | + register |
| 2185 | + models |
| 2186 | + */ ) |
| 2187 | + save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM)); |
| 2188 | + |
| 2189 | + /* Make sure that the GOT register is pushed. */ |
| 2190 | + if (max_reg >= ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM) |
| 2191 | + && current_function_uses_pic_offset_table) |
| 2192 | + save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM)); |
| 2193 | + |
| 2194 | + } |
| 2195 | + else |
| 2196 | + { |
| 2197 | + int use_pushm = optimize_size; |
| 2198 | + |
| 2199 | + /* In the normal case we only need to save those registers which are |
| 2200 | + call saved and which are used by this function. */ |
| 2201 | + for (reg = 0; reg <= 7; reg++) |
| 2202 | + if (regs_ever_live[INTERNAL_REGNUM (reg)] |
| 2203 | + && !call_used_regs[INTERNAL_REGNUM (reg)]) |
| 2204 | + save_reg_mask |= (1 << reg); |
| 2205 | + |
| 2206 | + /* Make sure that the GOT register is pushed. */ |
| 2207 | + if (current_function_uses_pic_offset_table) |
| 2208 | + save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM)); |
| 2209 | + |
| 2210 | + |
| 2211 | + /* If we optimize for size and do not have anonymous arguments: use |
| 2212 | + popm/pushm always */ |
| 2213 | + if (use_pushm) |
| 2214 | + { |
| 2215 | + if ((save_reg_mask & (1 << 0)) |
| 2216 | + || (save_reg_mask & (1 << 1)) |
| 2217 | + || (save_reg_mask & (1 << 2)) || (save_reg_mask & (1 << 3))) |
| 2218 | + save_reg_mask |= 0xf; |
| 2219 | + |
| 2220 | + if ((save_reg_mask & (1 << 4)) |
| 2221 | + || (save_reg_mask & (1 << 5)) |
| 2222 | + || (save_reg_mask & (1 << 6)) || (save_reg_mask & (1 << 7))) |
| 2223 | + save_reg_mask |= 0xf0; |
| 2224 | + |
| 2225 | + if ((save_reg_mask & (1 << 8)) || (save_reg_mask & (1 << 9))) |
| 2226 | + save_reg_mask |= 0x300; |
| 2227 | + } |
| 2228 | + |
| 2229 | + |
| 2230 | + /* Check LR */ |
| 2231 | + if ((regs_ever_live[LR_REGNUM] || !current_function_is_leaf || |
| 2232 | + (optimize_size && save_reg_mask) || frame_pointer_needed)) |
| 2233 | + { |
| 2234 | + if (push) |
| 2235 | + { |
| 2236 | + /* Push/Pop LR */ |
| 2237 | + save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM)); |
| 2238 | + } |
| 2239 | + else |
| 2240 | + { |
| 2241 | + /* Pop PC */ |
| 2242 | + save_reg_mask |= (1 << ASM_REGNUM (PC_REGNUM)); |
| 2243 | + } |
| 2244 | + } |
| 2245 | + } |
| 2246 | + |
| 2247 | + return save_reg_mask; |
| 2248 | +} |
| 2249 | + |
| 2250 | +/*Compute total size in bytes of all saved registers */ |
| 2251 | +static int |
| 2252 | +avr32_get_reg_mask_size (int reg_mask) |
| 2253 | +{ |
| 2254 | + int reg, size; |
| 2255 | + size = 0; |
| 2256 | + |
| 2257 | + for (reg = 0; reg <= 15; reg++) |
| 2258 | + if (reg_mask & (1 << reg)) |
| 2259 | + size += 4; |
| 2260 | + |
| 2261 | + return size; |
| 2262 | +} |
| 2263 | + |
| 2264 | +/*Get a register from one of the registers which are saved onto the stack |
| 2265 | + upon function entry */ |
| 2266 | + |
| 2267 | +static int |
| 2268 | +avr32_get_saved_reg (int save_reg_mask) |
| 2269 | +{ |
| 2270 | + unsigned int reg; |
| 2271 | + |
| 2272 | + /* Find the first register which is saved in the saved_reg_mask */ |
| 2273 | + for (reg = 0; reg <= 15; reg++) |
| 2274 | + if (save_reg_mask & (1 << reg)) |
| 2275 | + return reg; |
| 2276 | + |
| 2277 | + return -1; |
| 2278 | +} |
| 2279 | + |
| 2280 | +/* Return 1 if it is possible to return using a single instruction. */ |
| 2281 | +int |
| 2282 | +avr32_use_return_insn (int iscond) |
| 2283 | +{ |
| 2284 | + unsigned int func_type = avr32_current_func_type (); |
| 2285 | + unsigned long saved_int_regs; |
| 2286 | + unsigned long saved_fp_regs; |
| 2287 | + |
| 2288 | + /* Never use a return instruction before reload has run. */ |
| 2289 | + if (!reload_completed) |
| 2290 | + return 0; |
| 2291 | + |
| 2292 | + /* Must adjust the stack for vararg functions. */ |
| 2293 | + if (current_function_args_info.uses_anonymous_args) |
| 2294 | + return 0; |
| 2295 | + |
| 2296 | + /* If there a stack adjstment. */ |
| 2297 | + if (get_frame_size ()) |
| 2298 | + return 0; |
| 2299 | + |
| 2300 | + saved_int_regs = avr32_compute_save_reg_mask (TRUE); |
| 2301 | + saved_fp_regs = avr32_compute_save_fp_reg_mask (); |
| 2302 | + |
| 2303 | + /* Functions which have saved fp-regs on the stack can not be performed in |
| 2304 | + one instruction */ |
| 2305 | + if (saved_fp_regs) |
| 2306 | + return 0; |
| 2307 | + |
| 2308 | + /* Conditional returns can not be performed in one instruction if we need |
| 2309 | + to restore registers from the stack */ |
| 2310 | + if (iscond && saved_int_regs) |
| 2311 | + return 0; |
| 2312 | + |
| 2313 | + /* Conditional return can not be used for interrupt handlers. */ |
| 2314 | + if (iscond && IS_INTERRUPT (func_type)) |
| 2315 | + return 0; |
| 2316 | + |
| 2317 | + /* For interrupt handlers which needs to pop registers */ |
| 2318 | + if (saved_int_regs && IS_INTERRUPT (func_type)) |
| 2319 | + return 0; |
| 2320 | + |
| 2321 | + |
| 2322 | + /* If there are saved registers but the LR isn't saved, then we need two |
| 2323 | + instructions for the return. */ |
| 2324 | + if (saved_int_regs && !(saved_int_regs & (1 << ASM_REGNUM (LR_REGNUM)))) |
| 2325 | + return 0; |
| 2326 | + |
| 2327 | + |
| 2328 | + return 1; |
| 2329 | +} |
| 2330 | + |
| 2331 | + |
| 2332 | +/*Generate some function prologue info in the assembly file*/ |
| 2333 | + |
| 2334 | +void |
| 2335 | +avr32_target_asm_function_prologue (FILE * f, HOST_WIDE_INT frame_size) |
| 2336 | +{ |
| 2337 | + if (IS_NAKED (avr32_current_func_type ())) |
| 2338 | + fprintf (f, |
| 2339 | + "\t# Function is naked: Prologue and epilogue provided by programmer\n"); |
| 2340 | + |
| 2341 | + if (IS_INTERRUPT (avr32_current_func_type ())) |
| 2342 | + { |
| 2343 | + switch (avr32_current_func_type ()) |
| 2344 | + { |
| 2345 | + case AVR32_FT_ISR_FULL: |
| 2346 | + fprintf (f, |
| 2347 | + "\t# Interrupt Function: Fully shadowed register file\n"); |
| 2348 | + break; |
| 2349 | + case AVR32_FT_ISR_HALF: |
| 2350 | + fprintf (f, |
| 2351 | + "\t# Interrupt Function: Half shadowed register file\n"); |
| 2352 | + break; |
| 2353 | + default: |
| 2354 | + case AVR32_FT_ISR_NONE: |
| 2355 | + fprintf (f, "\t# Interrupt Function: No shadowed register file\n"); |
| 2356 | + break; |
| 2357 | + } |
| 2358 | + } |
| 2359 | + |
| 2360 | + |
| 2361 | + fprintf (f, "\t# args = %i, frame = %li, pretend = %i\n", |
| 2362 | + current_function_args_size, frame_size, |
| 2363 | + current_function_pretend_args_size); |
| 2364 | + |
| 2365 | + fprintf (f, "\t# frame_needed = %i, leaf_function = %i\n", |
| 2366 | + frame_pointer_needed, current_function_is_leaf); |
| 2367 | + |
| 2368 | + fprintf (f, "\t# uses_anonymous_args = %i\n", |
| 2369 | + current_function_args_info.uses_anonymous_args); |
| 2370 | +} |
| 2371 | + |
| 2372 | + |
| 2373 | +/* Generate and emit an insn that we will recognize as a pushm or stm. |
| 2374 | + Unfortunately, since this insn does not reflect very well the actual |
| 2375 | + semantics of the operation, we need to annotate the insn for the benefit |
| 2376 | + of DWARF2 frame unwind information. */ |
| 2377 | + |
| 2378 | +int avr32_convert_to_reglist16 (int reglist8_vect); |
| 2379 | + |
| 2380 | +static rtx |
| 2381 | +emit_multi_reg_push (int reglist, int usePUSHM) |
| 2382 | +{ |
| 2383 | + rtx insn; |
| 2384 | + rtx dwarf; |
| 2385 | + rtx tmp; |
| 2386 | + rtx reg; |
| 2387 | + int i; |
| 2388 | + int nr_regs; |
| 2389 | + int index = 0; |
| 2390 | + |
| 2391 | + if (usePUSHM) |
| 2392 | + { |
| 2393 | + insn = emit_insn (gen_pushm (gen_rtx_CONST_INT (SImode, reglist))); |
| 2394 | + reglist = avr32_convert_to_reglist16 (reglist); |
| 2395 | + } |
| 2396 | + else |
| 2397 | + { |
| 2398 | + insn = emit_insn (gen_stm (stack_pointer_rtx, |
| 2399 | + gen_rtx_CONST_INT (SImode, reglist), |
| 2400 | + gen_rtx_CONST_INT (SImode, 1))); |
| 2401 | + } |
| 2402 | + |
| 2403 | + nr_regs = avr32_get_reg_mask_size (reglist) / 4; |
| 2404 | + dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1)); |
| 2405 | + |
| 2406 | + for (i = 15; i >= 0; i--) |
| 2407 | + { |
| 2408 | + if (reglist & (1 << i)) |
| 2409 | + { |
| 2410 | + reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (i)); |
| 2411 | + tmp = gen_rtx_SET (VOIDmode, |
| 2412 | + gen_rtx_MEM (SImode, |
| 2413 | + plus_constant (stack_pointer_rtx, |
| 2414 | + 4 * index)), reg); |
| 2415 | + RTX_FRAME_RELATED_P (tmp) = 1; |
| 2416 | + XVECEXP (dwarf, 0, 1 + index++) = tmp; |
| 2417 | + } |
| 2418 | + } |
| 2419 | + |
| 2420 | + tmp = gen_rtx_SET (SImode, |
| 2421 | + stack_pointer_rtx, |
| 2422 | + gen_rtx_PLUS (SImode, |
| 2423 | + stack_pointer_rtx, |
| 2424 | + GEN_INT (-4 * nr_regs))); |
| 2425 | + RTX_FRAME_RELATED_P (tmp) = 1; |
| 2426 | + XVECEXP (dwarf, 0, 0) = tmp; |
| 2427 | + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf, |
| 2428 | + REG_NOTES (insn)); |
| 2429 | + return insn; |
| 2430 | +} |
| 2431 | + |
| 2432 | + |
| 2433 | +static rtx |
| 2434 | +emit_multi_fp_reg_push (int reglist) |
| 2435 | +{ |
| 2436 | + rtx insn; |
| 2437 | + rtx dwarf; |
| 2438 | + rtx tmp; |
| 2439 | + rtx reg; |
| 2440 | + int i; |
| 2441 | + int nr_regs; |
| 2442 | + int index = 0; |
| 2443 | + |
| 2444 | + insn = emit_insn (gen_stm_fp (stack_pointer_rtx, |
| 2445 | + gen_rtx_CONST_INT (SImode, reglist), |
| 2446 | + gen_rtx_CONST_INT (SImode, 1))); |
| 2447 | + |
| 2448 | + nr_regs = avr32_get_reg_mask_size (reglist) / 4; |
| 2449 | + dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1)); |
| 2450 | + |
| 2451 | + for (i = 15; i >= 0; i--) |
| 2452 | + { |
| 2453 | + if (reglist & (1 << i)) |
| 2454 | + { |
| 2455 | + reg = gen_rtx_REG (SImode, INTERNAL_FP_REGNUM (i)); |
| 2456 | + tmp = gen_rtx_SET (VOIDmode, |
| 2457 | + gen_rtx_MEM (SImode, |
| 2458 | + plus_constant (stack_pointer_rtx, |
| 2459 | + 4 * index)), reg); |
| 2460 | + RTX_FRAME_RELATED_P (tmp) = 1; |
| 2461 | + XVECEXP (dwarf, 0, 1 + index++) = tmp; |
| 2462 | + } |
| 2463 | + } |
| 2464 | + |
| 2465 | + tmp = gen_rtx_SET (SImode, |
| 2466 | + stack_pointer_rtx, |
| 2467 | + gen_rtx_PLUS (SImode, |
| 2468 | + stack_pointer_rtx, |
| 2469 | + GEN_INT (-4 * nr_regs))); |
| 2470 | + RTX_FRAME_RELATED_P (tmp) = 1; |
| 2471 | + XVECEXP (dwarf, 0, 0) = tmp; |
| 2472 | + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf, |
| 2473 | + REG_NOTES (insn)); |
| 2474 | + return insn; |
| 2475 | +} |
| 2476 | + |
| 2477 | +rtx |
| 2478 | +avr32_gen_load_multiple (rtx * regs, int count, rtx from, |
| 2479 | + int write_back, int in_struct_p, int scalar_p) |
| 2480 | +{ |
| 2481 | + |
| 2482 | + rtx result; |
| 2483 | + int i = 0, j; |
| 2484 | + |
| 2485 | + result = |
| 2486 | + gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count + (write_back ? 1 : 0))); |
| 2487 | + |
| 2488 | + if (write_back) |
| 2489 | + { |
| 2490 | + XVECEXP (result, 0, 0) |
| 2491 | + = gen_rtx_SET (GET_MODE (from), from, |
| 2492 | + plus_constant (from, count * 4)); |
| 2493 | + i = 1; |
| 2494 | + count++; |
| 2495 | + } |
| 2496 | + |
| 2497 | + |
| 2498 | + for (j = 0; i < count; i++, j++) |
| 2499 | + { |
| 2500 | + rtx unspec; |
| 2501 | + rtx mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4)); |
| 2502 | + MEM_IN_STRUCT_P (mem) = in_struct_p; |
| 2503 | + MEM_SCALAR_P (mem) = scalar_p; |
| 2504 | + unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, mem), UNSPEC_LDM); |
| 2505 | + XVECEXP (result, 0, i) = gen_rtx_SET (VOIDmode, regs[j], unspec); |
| 2506 | + } |
| 2507 | + |
| 2508 | + return result; |
| 2509 | +} |
| 2510 | + |
| 2511 | + |
| 2512 | +rtx |
| 2513 | +avr32_gen_store_multiple (rtx * regs, int count, rtx to, |
| 2514 | + int in_struct_p, int scalar_p) |
| 2515 | +{ |
| 2516 | + rtx result; |
| 2517 | + int i = 0, j; |
| 2518 | + |
| 2519 | + result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count)); |
| 2520 | + |
| 2521 | + for (j = 0; i < count; i++, j++) |
| 2522 | + { |
| 2523 | + rtx mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4)); |
| 2524 | + MEM_IN_STRUCT_P (mem) = in_struct_p; |
| 2525 | + MEM_SCALAR_P (mem) = scalar_p; |
| 2526 | + XVECEXP (result, 0, i) |
| 2527 | + = gen_rtx_SET (VOIDmode, mem, |
| 2528 | + gen_rtx_UNSPEC (VOIDmode, |
| 2529 | + gen_rtvec (1, regs[j]), |
| 2530 | + UNSPEC_STORE_MULTIPLE)); |
| 2531 | + } |
| 2532 | + |
| 2533 | + return result; |
| 2534 | +} |
| 2535 | + |
| 2536 | + |
| 2537 | +/* Move a block of memory if it is word aligned or we support unaligned |
| 2538 | + word memory accesses. The size must be maximum 64 bytes. */ |
| 2539 | + |
| 2540 | +int |
| 2541 | +avr32_gen_movmemsi (rtx * operands) |
| 2542 | +{ |
| 2543 | + HOST_WIDE_INT bytes_to_go; |
| 2544 | + rtx src, dst; |
| 2545 | + rtx st_src, st_dst; |
| 2546 | + int ptr_offset = 0; |
| 2547 | + int block_size; |
| 2548 | + int dst_in_struct_p, src_in_struct_p; |
| 2549 | + int dst_scalar_p, src_scalar_p; |
| 2550 | + int unaligned; |
| 2551 | + |
| 2552 | + if (GET_CODE (operands[2]) != CONST_INT |
| 2553 | + || GET_CODE (operands[3]) != CONST_INT |
| 2554 | + || INTVAL (operands[2]) > 64 |
| 2555 | + || ((INTVAL (operands[3]) & 3) && !TARGET_UNALIGNED_WORD)) |
| 2556 | + return 0; |
| 2557 | + |
| 2558 | + unaligned = (INTVAL (operands[3]) & 3) != 0; |
| 2559 | + |
| 2560 | + block_size = 4; |
| 2561 | + |
| 2562 | + st_dst = XEXP (operands[0], 0); |
| 2563 | + st_src = XEXP (operands[1], 0); |
| 2564 | + |
| 2565 | + dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]); |
| 2566 | + dst_scalar_p = MEM_SCALAR_P (operands[0]); |
| 2567 | + src_in_struct_p = MEM_IN_STRUCT_P (operands[1]); |
| 2568 | + src_scalar_p = MEM_SCALAR_P (operands[1]); |
| 2569 | + |
| 2570 | + dst = copy_to_mode_reg (SImode, st_dst); |
| 2571 | + src = copy_to_mode_reg (SImode, st_src); |
| 2572 | + |
| 2573 | + bytes_to_go = INTVAL (operands[2]); |
| 2574 | + |
| 2575 | + while (bytes_to_go) |
| 2576 | + { |
| 2577 | + enum machine_mode move_mode; |
| 2578 | + /* Seems to be a problem with reloads for the movti pattern so this is |
| 2579 | + disabled until that problem is resolved */ |
| 2580 | + |
| 2581 | + /* if ( bytes_to_go >= GET_MODE_SIZE(TImode) ) move_mode = TImode; else |
| 2582 | + */ |
| 2583 | + if ((bytes_to_go >= GET_MODE_SIZE (DImode)) && !unaligned) |
| 2584 | + move_mode = DImode; |
| 2585 | + else if (bytes_to_go >= GET_MODE_SIZE (SImode)) |
| 2586 | + move_mode = SImode; |
| 2587 | + else |
| 2588 | + move_mode = QImode; |
| 2589 | + |
| 2590 | + { |
| 2591 | + rtx dst_mem = gen_rtx_MEM (move_mode, |
| 2592 | + gen_rtx_PLUS (SImode, dst, |
| 2593 | + GEN_INT (ptr_offset))); |
| 2594 | + rtx src_mem = gen_rtx_MEM (move_mode, |
| 2595 | + gen_rtx_PLUS (SImode, src, |
| 2596 | + GEN_INT (ptr_offset))); |
| 2597 | + ptr_offset += GET_MODE_SIZE (move_mode); |
| 2598 | + bytes_to_go -= GET_MODE_SIZE (move_mode); |
| 2599 | + |
| 2600 | + MEM_IN_STRUCT_P (dst_mem) = dst_in_struct_p; |
| 2601 | + MEM_SCALAR_P (dst_mem) = dst_scalar_p; |
| 2602 | + |
| 2603 | + MEM_IN_STRUCT_P (src_mem) = src_in_struct_p; |
| 2604 | + MEM_SCALAR_P (src_mem) = src_scalar_p; |
| 2605 | + emit_move_insn (dst_mem, src_mem); |
| 2606 | + |
| 2607 | + } |
| 2608 | + } |
| 2609 | + |
| 2610 | + return 1; |
| 2611 | +} |
| 2612 | + |
| 2613 | + |
| 2614 | + |
| 2615 | +/*Expand the prologue instruction*/ |
| 2616 | +void |
| 2617 | +avr32_expand_prologue (void) |
| 2618 | +{ |
| 2619 | + rtx insn, dwarf; |
| 2620 | + unsigned long saved_reg_mask, saved_fp_reg_mask; |
| 2621 | + int reglist8 = 0; |
| 2622 | + |
| 2623 | + /* Naked functions does not have a prologue */ |
| 2624 | + if (IS_NAKED (avr32_current_func_type ())) |
| 2625 | + return; |
| 2626 | + |
| 2627 | + saved_reg_mask = avr32_compute_save_reg_mask (TRUE); |
| 2628 | + |
| 2629 | + if (saved_reg_mask) |
| 2630 | + { |
| 2631 | + /* Must push used registers */ |
| 2632 | + |
| 2633 | + /* Should we use POPM or LDM? */ |
| 2634 | + int usePUSHM = TRUE; |
| 2635 | + reglist8 = 0; |
| 2636 | + if (((saved_reg_mask & (1 << 0)) || |
| 2637 | + (saved_reg_mask & (1 << 1)) || |
| 2638 | + (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3)))) |
| 2639 | + { |
| 2640 | + /* One of R0-R3 should at least be pushed */ |
| 2641 | + if (((saved_reg_mask & (1 << 0)) && |
| 2642 | + (saved_reg_mask & (1 << 1)) && |
| 2643 | + (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3)))) |
| 2644 | + { |
| 2645 | + /* All should be pushed */ |
| 2646 | + reglist8 |= 0x01; |
| 2647 | + } |
| 2648 | + else |
| 2649 | + { |
| 2650 | + usePUSHM = FALSE; |
| 2651 | + } |
| 2652 | + } |
| 2653 | + |
| 2654 | + if (((saved_reg_mask & (1 << 4)) || |
| 2655 | + (saved_reg_mask & (1 << 5)) || |
| 2656 | + (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7)))) |
| 2657 | + { |
| 2658 | + /* One of R4-R7 should at least be pushed */ |
| 2659 | + if (((saved_reg_mask & (1 << 4)) && |
| 2660 | + (saved_reg_mask & (1 << 5)) && |
| 2661 | + (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7)))) |
| 2662 | + { |
| 2663 | + if (usePUSHM) |
| 2664 | + /* All should be pushed */ |
| 2665 | + reglist8 |= 0x02; |
| 2666 | + } |
| 2667 | + else |
| 2668 | + { |
| 2669 | + usePUSHM = FALSE; |
| 2670 | + } |
| 2671 | + } |
| 2672 | + |
| 2673 | + if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9)))) |
| 2674 | + { |
| 2675 | + /* One of R8-R9 should at least be pushed */ |
| 2676 | + if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9)))) |
| 2677 | + { |
| 2678 | + if (usePUSHM) |
| 2679 | + /* All should be pushed */ |
| 2680 | + reglist8 |= 0x04; |
| 2681 | + } |
| 2682 | + else |
| 2683 | + { |
| 2684 | + usePUSHM = FALSE; |
| 2685 | + } |
| 2686 | + } |
| 2687 | + |
| 2688 | + if (saved_reg_mask & (1 << 10)) |
| 2689 | + reglist8 |= 0x08; |
| 2690 | + |
| 2691 | + if (saved_reg_mask & (1 << 11)) |
| 2692 | + reglist8 |= 0x10; |
| 2693 | + |
| 2694 | + if (saved_reg_mask & (1 << 12)) |
| 2695 | + reglist8 |= 0x20; |
| 2696 | + |
| 2697 | + if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM))) |
| 2698 | + { |
| 2699 | + /* Push LR */ |
| 2700 | + reglist8 |= 0x40; |
| 2701 | + } |
| 2702 | + |
| 2703 | + if (usePUSHM) |
| 2704 | + { |
| 2705 | + insn = emit_multi_reg_push (reglist8, TRUE); |
| 2706 | + } |
| 2707 | + else |
| 2708 | + { |
| 2709 | + insn = emit_multi_reg_push (saved_reg_mask, FALSE); |
| 2710 | + } |
| 2711 | + RTX_FRAME_RELATED_P (insn) = 1; |
| 2712 | + |
| 2713 | + /* Prevent this instruction from being scheduled after any other |
| 2714 | + instructions. */ |
| 2715 | + emit_insn (gen_blockage ()); |
| 2716 | + } |
| 2717 | + |
| 2718 | + saved_fp_reg_mask = avr32_compute_save_fp_reg_mask (); |
| 2719 | + if (saved_fp_reg_mask) |
| 2720 | + { |
| 2721 | + insn = emit_multi_fp_reg_push (saved_fp_reg_mask); |
| 2722 | + RTX_FRAME_RELATED_P (insn) = 1; |
| 2723 | + |
| 2724 | + /* Prevent this instruction from being scheduled after any other |
| 2725 | + instructions. */ |
| 2726 | + emit_insn (gen_blockage ()); |
| 2727 | + } |
| 2728 | + |
| 2729 | + /* Set frame pointer */ |
| 2730 | + if (frame_pointer_needed) |
| 2731 | + { |
| 2732 | + insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx); |
| 2733 | + RTX_FRAME_RELATED_P (insn) = 1; |
| 2734 | + } |
| 2735 | + |
| 2736 | + if (get_frame_size () > 0) |
| 2737 | + { |
| 2738 | + if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks21")) |
| 2739 | + { |
| 2740 | + insn = emit_insn (gen_rtx_SET (SImode, |
| 2741 | + stack_pointer_rtx, |
| 2742 | + gen_rtx_PLUS (SImode, |
| 2743 | + stack_pointer_rtx, |
| 2744 | + gen_rtx_CONST_INT |
| 2745 | + (SImode, |
| 2746 | + -get_frame_size |
| 2747 | + ())))); |
| 2748 | + RTX_FRAME_RELATED_P (insn) = 1; |
| 2749 | + } |
| 2750 | + else |
| 2751 | + { |
| 2752 | + /* Immediate is larger than k21 We must either check if we can use |
| 2753 | + one of the pushed reegisters as temporary storage or we must |
| 2754 | + make us a temp register by pushing a register to the stack. */ |
| 2755 | + rtx temp_reg, const_pool_entry, insn; |
| 2756 | + if (saved_reg_mask) |
| 2757 | + { |
| 2758 | + temp_reg = |
| 2759 | + gen_rtx_REG (SImode, |
| 2760 | + INTERNAL_REGNUM (avr32_get_saved_reg |
| 2761 | + (saved_reg_mask))); |
| 2762 | + } |
| 2763 | + else |
| 2764 | + { |
| 2765 | + temp_reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (7)); |
| 2766 | + emit_move_insn (gen_rtx_MEM |
| 2767 | + (SImode, |
| 2768 | + gen_rtx_PRE_DEC (SImode, stack_pointer_rtx)), |
| 2769 | + temp_reg); |
| 2770 | + } |
| 2771 | + |
| 2772 | + const_pool_entry = |
| 2773 | + force_const_mem (SImode, |
| 2774 | + gen_rtx_CONST_INT (SImode, get_frame_size ())); |
| 2775 | + emit_move_insn (temp_reg, const_pool_entry); |
| 2776 | + |
| 2777 | + insn = emit_insn (gen_rtx_SET (SImode, |
| 2778 | + stack_pointer_rtx, |
| 2779 | + gen_rtx_MINUS (SImode, |
| 2780 | + stack_pointer_rtx, |
| 2781 | + temp_reg))); |
| 2782 | + |
| 2783 | + dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx, |
| 2784 | + gen_rtx_PLUS (SImode, stack_pointer_rtx, |
| 2785 | + GEN_INT (-get_frame_size ()))); |
| 2786 | + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, |
| 2787 | + dwarf, REG_NOTES (insn)); |
| 2788 | + RTX_FRAME_RELATED_P (insn) = 1; |
| 2789 | + |
| 2790 | + if (!saved_reg_mask) |
| 2791 | + { |
| 2792 | + insn = |
| 2793 | + emit_move_insn (temp_reg, |
| 2794 | + gen_rtx_MEM (SImode, |
| 2795 | + gen_rtx_POST_INC (SImode, |
| 2796 | + gen_rtx_REG |
| 2797 | + (SImode, |
| 2798 | + 13)))); |
| 2799 | + } |
| 2800 | + |
| 2801 | + /* Mark the temp register as dead */ |
| 2802 | + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, temp_reg, |
| 2803 | + REG_NOTES (insn)); |
| 2804 | + |
| 2805 | + |
| 2806 | + } |
| 2807 | + |
| 2808 | + /* Prevent the the stack adjustment to be scheduled after any |
| 2809 | + instructions using the frame pointer. */ |
| 2810 | + emit_insn (gen_blockage ()); |
| 2811 | + } |
| 2812 | + |
| 2813 | + /* Load GOT */ |
| 2814 | + if (flag_pic) |
| 2815 | + { |
| 2816 | + avr32_load_pic_register (); |
| 2817 | + |
| 2818 | + /* gcc does not know that load or call instructions might use the pic |
| 2819 | + register so it might schedule these instructions before the loading |
| 2820 | + of the pic register. To avoid this emit a barrier for now. TODO! |
| 2821 | + Find out a better way to let gcc know which instructions might use |
| 2822 | + the pic register. */ |
| 2823 | + emit_insn (gen_blockage ()); |
| 2824 | + } |
| 2825 | + return; |
| 2826 | +} |
| 2827 | + |
| 2828 | +void |
| 2829 | +avr32_set_return_address (rtx source) |
| 2830 | +{ |
| 2831 | + rtx addr; |
| 2832 | + unsigned long saved_regs; |
| 2833 | + |
| 2834 | + saved_regs = avr32_compute_save_reg_mask (TRUE); |
| 2835 | + |
| 2836 | + if (!(saved_regs & (1 << ASM_REGNUM (LR_REGNUM)))) |
| 2837 | + emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source); |
| 2838 | + else |
| 2839 | + { |
| 2840 | + if (frame_pointer_needed) |
| 2841 | + addr = gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM); |
| 2842 | + else |
| 2843 | + /* FIXME: Need to use scratch register if frame is large */ |
| 2844 | + addr = plus_constant (stack_pointer_rtx, get_frame_size ()); |
| 2845 | + |
| 2846 | + emit_move_insn (gen_rtx_MEM (Pmode, addr), source); |
| 2847 | + } |
| 2848 | +} |
| 2849 | + |
| 2850 | + |
| 2851 | + |
| 2852 | +/* Return the length of INSN. LENGTH is the initial length computed by |
| 2853 | + attributes in the machine-description file. */ |
| 2854 | + |
| 2855 | +int |
| 2856 | +avr32_adjust_insn_length (rtx insn ATTRIBUTE_UNUSED, |
| 2857 | + int length ATTRIBUTE_UNUSED) |
| 2858 | +{ |
| 2859 | + return length; |
| 2860 | +} |
| 2861 | + |
| 2862 | +void |
| 2863 | +avr32_output_return_instruction (int single_ret_inst ATTRIBUTE_UNUSED, |
| 2864 | + int iscond ATTRIBUTE_UNUSED, |
| 2865 | + rtx cond ATTRIBUTE_UNUSED, rtx r12_imm) |
| 2866 | +{ |
| 2867 | + |
| 2868 | + unsigned long saved_reg_mask, saved_fp_reg_mask; |
| 2869 | + int insert_ret = TRUE; |
| 2870 | + int reglist8 = 0; |
| 2871 | + int stack_adjustment = get_frame_size (); |
| 2872 | + unsigned int func_type = avr32_current_func_type (); |
| 2873 | + FILE *f = asm_out_file; |
| 2874 | + |
| 2875 | + /* Naked functions does not have an epilogue */ |
| 2876 | + if (IS_NAKED (func_type)) |
| 2877 | + return; |
| 2878 | + |
| 2879 | + saved_fp_reg_mask = avr32_compute_save_fp_reg_mask (); |
| 2880 | + |
| 2881 | + saved_reg_mask = avr32_compute_save_reg_mask (FALSE); |
| 2882 | + |
| 2883 | + /* Reset frame pointer */ |
| 2884 | + if (stack_adjustment > 0) |
| 2885 | + { |
| 2886 | + if (avr32_const_ok_for_constraint_p (stack_adjustment, 'I', "Is21")) |
| 2887 | + { |
| 2888 | + fprintf (f, "\tsub sp, %i # Reset Frame Pointer\n", |
| 2889 | + -stack_adjustment); |
| 2890 | + } |
| 2891 | + else |
| 2892 | + { |
| 2893 | + /* TODO! Is it safe to use r8 as scratch?? */ |
| 2894 | + fprintf (f, "\tmov r8, lo(%i) # Reset Frame Pointer\n", |
| 2895 | + -stack_adjustment); |
| 2896 | + fprintf (f, "\torh r8, hi(%i) # Reset Frame Pointer\n", |
| 2897 | + -stack_adjustment); |
| 2898 | + fprintf (f, "\tadd sp,r8 # Reset Frame Pointer\n"); |
| 2899 | + } |
| 2900 | + } |
| 2901 | + |
| 2902 | + if (saved_fp_reg_mask) |
| 2903 | + { |
| 2904 | + char reglist[64]; /* 64 bytes should be enough... */ |
| 2905 | + avr32_make_fp_reglist_w (saved_fp_reg_mask, (char *) reglist); |
| 2906 | + fprintf (f, "\tldcm.w\tcp0, sp++, %s\n", reglist); |
| 2907 | + if (saved_fp_reg_mask & ~0xff) |
| 2908 | + { |
| 2909 | + saved_fp_reg_mask &= ~0xff; |
| 2910 | + avr32_make_fp_reglist_d (saved_fp_reg_mask, (char *) reglist); |
| 2911 | + fprintf (f, "\tldcm.d\tcp0, sp++, %s\n", reglist); |
| 2912 | + } |
| 2913 | + } |
| 2914 | + |
| 2915 | + if (saved_reg_mask) |
| 2916 | + { |
| 2917 | + /* Must pop used registers */ |
| 2918 | + |
| 2919 | + /* Should we use POPM or LDM? */ |
| 2920 | + int usePOPM = TRUE; |
| 2921 | + if (((saved_reg_mask & (1 << 0)) || |
| 2922 | + (saved_reg_mask & (1 << 1)) || |
| 2923 | + (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3)))) |
| 2924 | + { |
| 2925 | + /* One of R0-R3 should at least be popped */ |
| 2926 | + if (((saved_reg_mask & (1 << 0)) && |
| 2927 | + (saved_reg_mask & (1 << 1)) && |
| 2928 | + (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3)))) |
| 2929 | + { |
| 2930 | + /* All should be popped */ |
| 2931 | + reglist8 |= 0x01; |
| 2932 | + } |
| 2933 | + else |
| 2934 | + { |
| 2935 | + usePOPM = FALSE; |
| 2936 | + } |
| 2937 | + } |
| 2938 | + |
| 2939 | + if (((saved_reg_mask & (1 << 4)) || |
| 2940 | + (saved_reg_mask & (1 << 5)) || |
| 2941 | + (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7)))) |
| 2942 | + { |
| 2943 | + /* One of R0-R3 should at least be popped */ |
| 2944 | + if (((saved_reg_mask & (1 << 4)) && |
| 2945 | + (saved_reg_mask & (1 << 5)) && |
| 2946 | + (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7)))) |
| 2947 | + { |
| 2948 | + if (usePOPM) |
| 2949 | + /* All should be popped */ |
| 2950 | + reglist8 |= 0x02; |
| 2951 | + } |
| 2952 | + else |
| 2953 | + { |
| 2954 | + usePOPM = FALSE; |
| 2955 | + } |
| 2956 | + } |
| 2957 | + |
| 2958 | + if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9)))) |
| 2959 | + { |
| 2960 | + /* One of R8-R9 should at least be pushed */ |
| 2961 | + if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9)))) |
| 2962 | + { |
| 2963 | + if (usePOPM) |
| 2964 | + /* All should be pushed */ |
| 2965 | + reglist8 |= 0x04; |
| 2966 | + } |
| 2967 | + else |
| 2968 | + { |
| 2969 | + usePOPM = FALSE; |
| 2970 | + } |
| 2971 | + } |
| 2972 | + |
| 2973 | + if (saved_reg_mask & (1 << 10)) |
| 2974 | + reglist8 |= 0x08; |
| 2975 | + |
| 2976 | + if (saved_reg_mask & (1 << 11)) |
| 2977 | + reglist8 |= 0x10; |
| 2978 | + |
| 2979 | + if (saved_reg_mask & (1 << 12)) |
| 2980 | + reglist8 |= 0x20; |
| 2981 | + |
| 2982 | + if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM))) |
| 2983 | + /* Pop LR */ |
| 2984 | + reglist8 |= 0x40; |
| 2985 | + |
| 2986 | + if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM))) |
| 2987 | + /* Pop LR into PC. */ |
| 2988 | + reglist8 |= 0x80; |
| 2989 | + |
| 2990 | + if (usePOPM) |
| 2991 | + { |
| 2992 | + char reglist[64]; /* 64 bytes should be enough... */ |
| 2993 | + avr32_make_reglist8 (reglist8, (char *) reglist); |
| 2994 | + |
| 2995 | + if (reglist8 & 0x80) |
| 2996 | + /* This instruction is also a return */ |
| 2997 | + insert_ret = FALSE; |
| 2998 | + |
| 2999 | + if (r12_imm && !insert_ret) |
| 3000 | + fprintf (f, "\tpopm %s, r12=%li\n", reglist, INTVAL (r12_imm)); |
| 3001 | + else |
| 3002 | + fprintf (f, "\tpopm %s\n", reglist); |
| 3003 | + |
| 3004 | + } |
| 3005 | + else |
| 3006 | + { |
| 3007 | + char reglist[64]; /* 64 bytes should be enough... */ |
| 3008 | + avr32_make_reglist16 (saved_reg_mask, (char *) reglist); |
| 3009 | + if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM))) |
| 3010 | + /* This instruction is also a return */ |
| 3011 | + insert_ret = FALSE; |
| 3012 | + |
| 3013 | + if (r12_imm && !insert_ret) |
| 3014 | + fprintf (f, "\tldm sp++, %s, r12=%li\n", reglist, |
| 3015 | + INTVAL (r12_imm)); |
| 3016 | + else |
| 3017 | + fprintf (f, "\tldm sp++, %s\n", reglist); |
| 3018 | + |
| 3019 | + } |
| 3020 | + |
| 3021 | + } |
| 3022 | + |
| 3023 | + if (IS_INTERRUPT (func_type)) |
| 3024 | + { |
| 3025 | + fprintf (f, "\trete\n"); |
| 3026 | + } |
| 3027 | + else if (insert_ret) |
| 3028 | + { |
| 3029 | + if (r12_imm) |
| 3030 | + fprintf (f, "\tretal %li\n", INTVAL (r12_imm)); |
| 3031 | + else |
| 3032 | + fprintf (f, "\tretal r12\n"); |
| 3033 | + } |
| 3034 | +} |
| 3035 | + |
| 3036 | +/* Function for converting a fp-register mask to a |
| 3037 | + reglistCPD8 register list string. */ |
| 3038 | +void |
| 3039 | +avr32_make_fp_reglist_d (int reglist_mask, char *reglist_string) |
| 3040 | +{ |
| 3041 | + int i; |
| 3042 | + |
| 3043 | + /* Make sure reglist_string is empty */ |
| 3044 | + reglist_string[0] = '\0'; |
| 3045 | + |
| 3046 | + for (i = 0; i < NUM_FP_REGS; i += 2) |
| 3047 | + { |
| 3048 | + if (reglist_mask & (1 << i)) |
| 3049 | + { |
| 3050 | + strlen (reglist_string) ? |
| 3051 | + sprintf (reglist_string, "%s, %s-%s", reglist_string, |
| 3052 | + reg_names[INTERNAL_FP_REGNUM (i)], |
| 3053 | + reg_names[INTERNAL_FP_REGNUM (i + 1)]) : |
| 3054 | + sprintf (reglist_string, "%s-%s", |
| 3055 | + reg_names[INTERNAL_FP_REGNUM (i)], |
| 3056 | + reg_names[INTERNAL_FP_REGNUM (i + 1)]); |
| 3057 | + } |
| 3058 | + } |
| 3059 | +} |
| 3060 | + |
| 3061 | +/* Function for converting a fp-register mask to a |
| 3062 | + reglistCP8 register list string. */ |
| 3063 | +void |
| 3064 | +avr32_make_fp_reglist_w (int reglist_mask, char *reglist_string) |
| 3065 | +{ |
| 3066 | + int i; |
| 3067 | + |
| 3068 | + /* Make sure reglist_string is empty */ |
| 3069 | + reglist_string[0] = '\0'; |
| 3070 | + |
| 3071 | + for (i = 0; i < NUM_FP_REGS; ++i) |
| 3072 | + { |
| 3073 | + if (reglist_mask & (1 << i)) |
| 3074 | + { |
| 3075 | + strlen (reglist_string) ? |
| 3076 | + sprintf (reglist_string, "%s, %s", reglist_string, |
| 3077 | + reg_names[INTERNAL_FP_REGNUM (i)]) : |
| 3078 | + sprintf (reglist_string, "%s", reg_names[INTERNAL_FP_REGNUM (i)]); |
| 3079 | + } |
| 3080 | + } |
| 3081 | +} |
| 3082 | + |
| 3083 | +void |
| 3084 | +avr32_make_reglist16 (int reglist16_vect, char *reglist16_string) |
| 3085 | +{ |
| 3086 | + int i; |
| 3087 | + |
| 3088 | + /* Make sure reglist16_string is empty */ |
| 3089 | + reglist16_string[0] = '\0'; |
| 3090 | + |
| 3091 | + for (i = 0; i < 16; ++i) |
| 3092 | + { |
| 3093 | + if (reglist16_vect & (1 << i)) |
| 3094 | + { |
| 3095 | + strlen (reglist16_string) ? |
| 3096 | + sprintf (reglist16_string, "%s, %s", reglist16_string, |
| 3097 | + reg_names[INTERNAL_REGNUM (i)]) : |
| 3098 | + sprintf (reglist16_string, "%s", reg_names[INTERNAL_REGNUM (i)]); |
| 3099 | + } |
| 3100 | + } |
| 3101 | +} |
| 3102 | + |
| 3103 | +int |
| 3104 | +avr32_convert_to_reglist16 (int reglist8_vect) |
| 3105 | +{ |
| 3106 | + int reglist16_vect = 0; |
| 3107 | + if (reglist8_vect & 0x1) |
| 3108 | + reglist16_vect |= 0xF; |
| 3109 | + if (reglist8_vect & 0x2) |
| 3110 | + reglist16_vect |= 0xF0; |
| 3111 | + if (reglist8_vect & 0x4) |
| 3112 | + reglist16_vect |= 0x300; |
| 3113 | + if (reglist8_vect & 0x8) |
| 3114 | + reglist16_vect |= 0x400; |
| 3115 | + if (reglist8_vect & 0x10) |
| 3116 | + reglist16_vect |= 0x800; |
| 3117 | + if (reglist8_vect & 0x20) |
| 3118 | + reglist16_vect |= 0x1000; |
| 3119 | + if (reglist8_vect & 0x40) |
| 3120 | + reglist16_vect |= 0x4000; |
| 3121 | + if (reglist8_vect & 0x80) |
| 3122 | + reglist16_vect |= 0x8000; |
| 3123 | + |
| 3124 | + return reglist16_vect; |
| 3125 | +} |
| 3126 | + |
| 3127 | +void |
| 3128 | +avr32_make_reglist8 (int reglist8_vect, char *reglist8_string) |
| 3129 | +{ |
| 3130 | + /* Make sure reglist8_string is empty */ |
| 3131 | + reglist8_string[0] = '\0'; |
| 3132 | + |
| 3133 | + if (reglist8_vect & 0x1) |
| 3134 | + sprintf (reglist8_string, "r0-r3"); |
| 3135 | + if (reglist8_vect & 0x2) |
| 3136 | + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r4-r7", |
| 3137 | + reglist8_string) : |
| 3138 | + sprintf (reglist8_string, "r4-r7"); |
| 3139 | + if (reglist8_vect & 0x4) |
| 3140 | + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r8-r9", |
| 3141 | + reglist8_string) : |
| 3142 | + sprintf (reglist8_string, "r8-r9"); |
| 3143 | + if (reglist8_vect & 0x8) |
| 3144 | + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r10", |
| 3145 | + reglist8_string) : |
| 3146 | + sprintf (reglist8_string, "r10"); |
| 3147 | + if (reglist8_vect & 0x10) |
| 3148 | + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r11", |
| 3149 | + reglist8_string) : |
| 3150 | + sprintf (reglist8_string, "r11"); |
| 3151 | + if (reglist8_vect & 0x20) |
| 3152 | + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r12", |
| 3153 | + reglist8_string) : |
| 3154 | + sprintf (reglist8_string, "r12"); |
| 3155 | + if (reglist8_vect & 0x40) |
| 3156 | + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, lr", |
| 3157 | + reglist8_string) : |
| 3158 | + sprintf (reglist8_string, "lr"); |
| 3159 | + if (reglist8_vect & 0x80) |
| 3160 | + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, pc", |
| 3161 | + reglist8_string) : |
| 3162 | + sprintf (reglist8_string, "pc"); |
| 3163 | +} |
| 3164 | + |
| 3165 | +int |
| 3166 | +avr32_eh_return_data_regno (int n) |
| 3167 | +{ |
| 3168 | + if (n >= 0 && n <= 3) |
| 3169 | + return 8 + n; |
| 3170 | + else |
| 3171 | + return INVALID_REGNUM; |
| 3172 | +} |
| 3173 | + |
| 3174 | +/* Compute the distance from register FROM to register TO. |
| 3175 | + These can be the arg pointer, the frame pointer or |
| 3176 | + the stack pointer. |
| 3177 | + Typical stack layout looks like this: |
| 3178 | + |
| 3179 | + old stack pointer -> | | |
| 3180 | + ---- |
| 3181 | + | | \ |
| 3182 | + | | saved arguments for |
| 3183 | + | | vararg functions |
| 3184 | + arg_pointer -> | | / |
| 3185 | + -- |
| 3186 | + | | \ |
| 3187 | + | | call saved |
| 3188 | + | | registers |
| 3189 | + | | / |
| 3190 | + frame ptr -> -- |
| 3191 | + | | \ |
| 3192 | + | | local |
| 3193 | + | | variables |
| 3194 | + stack ptr --> | | / |
| 3195 | + -- |
| 3196 | + | | \ |
| 3197 | + | | outgoing |
| 3198 | + | | arguments |
| 3199 | + | | / |
| 3200 | + -- |
| 3201 | + |
| 3202 | + For a given funciton some or all of these stack compomnents |
| 3203 | + may not be needed, giving rise to the possibility of |
| 3204 | + eliminating some of the registers. |
| 3205 | + |
| 3206 | + The values returned by this function must reflect the behaviour |
| 3207 | + of avr32_expand_prologue() and avr32_compute_save_reg_mask(). |
| 3208 | + |
| 3209 | + The sign of the number returned reflects the direction of stack |
| 3210 | + growth, so the values are positive for all eliminations except |
| 3211 | + from the soft frame pointer to the hard frame pointer. */ |
| 3212 | + |
| 3213 | + |
| 3214 | +int |
| 3215 | +avr32_initial_elimination_offset (int from, int to) |
| 3216 | +{ |
| 3217 | + int i; |
| 3218 | + int call_saved_regs = 0; |
| 3219 | + unsigned long saved_reg_mask, saved_fp_reg_mask; |
| 3220 | + unsigned int local_vars = get_frame_size (); |
| 3221 | + |
| 3222 | + saved_reg_mask = avr32_compute_save_reg_mask (TRUE); |
| 3223 | + saved_fp_reg_mask = avr32_compute_save_fp_reg_mask (); |
| 3224 | + |
| 3225 | + for (i = 0; i < 16; ++i) |
| 3226 | + { |
| 3227 | + if (saved_reg_mask & (1 << i)) |
| 3228 | + call_saved_regs += 4; |
| 3229 | + } |
| 3230 | + |
| 3231 | + for (i = 0; i < NUM_FP_REGS; ++i) |
| 3232 | + { |
| 3233 | + if (saved_fp_reg_mask & (1 << i)) |
| 3234 | + call_saved_regs += 4; |
| 3235 | + } |
| 3236 | + |
| 3237 | + switch (from) |
| 3238 | + { |
| 3239 | + case ARG_POINTER_REGNUM: |
| 3240 | + switch (to) |
| 3241 | + { |
| 3242 | + case STACK_POINTER_REGNUM: |
| 3243 | + return call_saved_regs + local_vars; |
| 3244 | + case FRAME_POINTER_REGNUM: |
| 3245 | + return call_saved_regs; |
| 3246 | + default: |
| 3247 | + abort (); |
| 3248 | + } |
| 3249 | + case FRAME_POINTER_REGNUM: |
| 3250 | + switch (to) |
| 3251 | + { |
| 3252 | + case STACK_POINTER_REGNUM: |
| 3253 | + return local_vars; |
| 3254 | + default: |
| 3255 | + abort (); |
| 3256 | + } |
| 3257 | + default: |
| 3258 | + abort (); |
| 3259 | + } |
| 3260 | +} |
| 3261 | + |
| 3262 | + |
| 3263 | +/* |
| 3264 | + Returns a rtx used when passing the next argument to a function. |
| 3265 | + avr32_init_cumulative_args() and avr32_function_arg_advance() sets witch |
| 3266 | + register to use. |
| 3267 | +*/ |
| 3268 | +rtx |
| 3269 | +avr32_function_arg (CUMULATIVE_ARGS * cum, enum machine_mode mode, |
| 3270 | + tree type, int named) |
| 3271 | +{ |
| 3272 | + int index = -1; |
| 3273 | + |
| 3274 | + HOST_WIDE_INT arg_size, arg_rsize; |
| 3275 | + if (type) |
| 3276 | + { |
| 3277 | + arg_size = int_size_in_bytes (type); |
| 3278 | + } |
| 3279 | + else |
| 3280 | + { |
| 3281 | + arg_size = GET_MODE_SIZE (mode); |
| 3282 | + } |
| 3283 | + arg_rsize = PUSH_ROUNDING (arg_size); |
| 3284 | + |
| 3285 | + /* |
| 3286 | + The last time this macro is called, it is called with mode == VOIDmode, |
| 3287 | + and its result is passed to the call or call_value pattern as operands 2 |
| 3288 | + and 3 respectively. */ |
| 3289 | + if (mode == VOIDmode) |
| 3290 | + { |
| 3291 | + return gen_rtx_CONST_INT (SImode, 22); /* ToDo: fixme. */ |
| 3292 | + } |
| 3293 | + |
| 3294 | + if ((*targetm.calls.must_pass_in_stack) (mode, type) || !named) |
| 3295 | + { |
| 3296 | + return NULL_RTX; |
| 3297 | + } |
| 3298 | + |
| 3299 | + if (arg_rsize == 8) |
| 3300 | + { |
| 3301 | + /* use r11:r10 or r9:r8. */ |
| 3302 | + if (!(GET_USED_INDEX (cum, 1) || GET_USED_INDEX (cum, 2))) |
| 3303 | + index = 1; |
| 3304 | + else if (!(GET_USED_INDEX (cum, 3) || GET_USED_INDEX (cum, 4))) |
| 3305 | + index = 3; |
| 3306 | + else |
| 3307 | + index = -1; |
| 3308 | + } |
| 3309 | + else if (arg_rsize == 4) |
| 3310 | + { /* Use first available register */ |
| 3311 | + index = 0; |
| 3312 | + while (index <= LAST_CUM_REG_INDEX && GET_USED_INDEX (cum, index)) |
| 3313 | + index++; |
| 3314 | + if (index > LAST_CUM_REG_INDEX) |
| 3315 | + index = -1; |
| 3316 | + } |
| 3317 | + |
| 3318 | + SET_REG_INDEX (cum, index); |
| 3319 | + |
| 3320 | + if (GET_REG_INDEX (cum) >= 0) |
| 3321 | + return gen_rtx_REG (mode, |
| 3322 | + avr32_function_arg_reglist[GET_REG_INDEX (cum)]); |
| 3323 | + |
| 3324 | + return NULL_RTX; |
| 3325 | +} |
| 3326 | + |
| 3327 | +/* |
| 3328 | + Set the register used for passing the first argument to a function. |
| 3329 | +*/ |
| 3330 | +void |
| 3331 | +avr32_init_cumulative_args (CUMULATIVE_ARGS * cum, tree fntype, |
| 3332 | + rtx libname ATTRIBUTE_UNUSED, |
| 3333 | + tree fndecl ATTRIBUTE_UNUSED) |
| 3334 | +{ |
| 3335 | + /* Set all registers as unused. */ |
| 3336 | + SET_INDEXES_UNUSED (cum); |
| 3337 | + |
| 3338 | + /* Reset uses_anonymous_args */ |
| 3339 | + cum->uses_anonymous_args = 0; |
| 3340 | + |
| 3341 | + /* Reset size of stack pushed arguments */ |
| 3342 | + cum->stack_pushed_args_size = 0; |
| 3343 | + |
| 3344 | + /* If the function is returning a value passed in memory r12 is used as a |
| 3345 | + Return Value Pointer. */ |
| 3346 | + |
| 3347 | + if (fntype != 0 && avr32_return_in_memory (TREE_TYPE (fntype), fntype)) |
| 3348 | + { |
| 3349 | + SET_REG_INDEX (cum, 0); |
| 3350 | + SET_USED_INDEX (cum, GET_REG_INDEX (cum)); |
| 3351 | + } |
| 3352 | +} |
| 3353 | + |
| 3354 | +/* |
| 3355 | + Set register used for passing the next argument to a function. Only the |
| 3356 | + Scratch Registers are used. |
| 3357 | + |
| 3358 | + number name |
| 3359 | + 15 r15 PC |
| 3360 | + 14 r14 LR |
| 3361 | + 13 r13 _SP_________ |
| 3362 | + FIRST_CUM_REG 12 r12 _||_ |
| 3363 | + 10 r11 || |
| 3364 | + 11 r10 _||_ Scratch Registers |
| 3365 | + 8 r9 || |
| 3366 | + LAST_SCRATCH_REG 9 r8 _\/_________ |
| 3367 | + 6 r7 /\ |
| 3368 | + 7 r6 || |
| 3369 | + 4 r5 || |
| 3370 | + 5 r4 || |
| 3371 | + 2 r3 || |
| 3372 | + 3 r2 || |
| 3373 | + 0 r1 || |
| 3374 | + 1 r0 _||_________ |
| 3375 | + |
| 3376 | +*/ |
| 3377 | +void |
| 3378 | +avr32_function_arg_advance (CUMULATIVE_ARGS * cum, enum machine_mode mode, |
| 3379 | + tree type, int named ATTRIBUTE_UNUSED) |
| 3380 | +{ |
| 3381 | + HOST_WIDE_INT arg_size, arg_rsize; |
| 3382 | + |
| 3383 | + if (type) |
| 3384 | + { |
| 3385 | + arg_size = int_size_in_bytes (type); |
| 3386 | + } |
| 3387 | + else |
| 3388 | + { |
| 3389 | + arg_size = GET_MODE_SIZE (mode); |
| 3390 | + } |
| 3391 | + arg_rsize = PUSH_ROUNDING (arg_size); |
| 3392 | + |
| 3393 | + /* It the argument had to be passed in stack, no register is used. */ |
| 3394 | + if ((*targetm.calls.must_pass_in_stack) (mode, type)) |
| 3395 | + { |
| 3396 | + cum->stack_pushed_args_size += PUSH_ROUNDING (int_size_in_bytes (type)); |
| 3397 | + return; |
| 3398 | + } |
| 3399 | + |
| 3400 | + /* Mark the used registers as "used". */ |
| 3401 | + if (GET_REG_INDEX (cum) >= 0) |
| 3402 | + { |
| 3403 | + SET_USED_INDEX (cum, GET_REG_INDEX (cum)); |
| 3404 | + if (arg_rsize == 8) |
| 3405 | + { |
| 3406 | + SET_USED_INDEX (cum, (GET_REG_INDEX (cum) + 1)); |
| 3407 | + } |
| 3408 | + } |
| 3409 | + else |
| 3410 | + { |
| 3411 | + /* Had to use stack */ |
| 3412 | + cum->stack_pushed_args_size += arg_rsize; |
| 3413 | + } |
| 3414 | +} |
| 3415 | + |
| 3416 | +/* |
| 3417 | + Defines witch direction to go to find the next register to use if the |
| 3418 | + argument is larger then one register or for arguments shorter than an |
| 3419 | + int which is not promoted, such as the last part of structures with |
| 3420 | + size not a multiple of 4. */ |
| 3421 | +enum direction |
| 3422 | +avr32_function_arg_padding (enum machine_mode mode ATTRIBUTE_UNUSED, |
| 3423 | + tree type) |
| 3424 | +{ |
| 3425 | + /* Pad upward for all aggregates except byte and halfword sized aggregates |
| 3426 | + which can be passed in registers. */ |
| 3427 | + if (type |
| 3428 | + && AGGREGATE_TYPE_P (type) |
| 3429 | + && (int_size_in_bytes (type) != 1) |
| 3430 | + && !((int_size_in_bytes (type) == 2) |
| 3431 | + && TYPE_ALIGN_UNIT (type) >= 2) |
| 3432 | + && (int_size_in_bytes (type) & 0x3)) |
| 3433 | + { |
| 3434 | + return upward; |
| 3435 | + } |
| 3436 | + |
| 3437 | + return downward; |
| 3438 | +} |
| 3439 | + |
| 3440 | +/* |
| 3441 | + Return a rtx used for the return value from a function call. |
| 3442 | +*/ |
| 3443 | +rtx |
| 3444 | +avr32_function_value (tree type, tree func) |
| 3445 | +{ |
| 3446 | + if (avr32_return_in_memory (type, func)) |
| 3447 | + return NULL_RTX; |
| 3448 | + |
| 3449 | + if (int_size_in_bytes (type) <= 4) |
| 3450 | + if (avr32_return_in_msb (type)) |
| 3451 | + /* Aggregates of size less than a word which does align the data in the |
| 3452 | + MSB must use SImode for r12. */ |
| 3453 | + return gen_rtx_REG (SImode, RET_REGISTER); |
| 3454 | + else |
| 3455 | + return gen_rtx_REG (TYPE_MODE (type), RET_REGISTER); |
| 3456 | + else if (int_size_in_bytes (type) <= 8) |
| 3457 | + return gen_rtx_REG (TYPE_MODE (type), INTERNAL_REGNUM (11)); |
| 3458 | + |
| 3459 | + return NULL_RTX; |
| 3460 | +} |
| 3461 | + |
| 3462 | +/* |
| 3463 | + Return a rtx used for the return value from a library function call. |
| 3464 | +*/ |
| 3465 | +rtx |
| 3466 | +avr32_libcall_value (enum machine_mode mode) |
| 3467 | +{ |
| 3468 | + |
| 3469 | + if (GET_MODE_SIZE (mode) <= 4) |
| 3470 | + return gen_rtx_REG (mode, RET_REGISTER); |
| 3471 | + else if (GET_MODE_SIZE (mode) <= 8) |
| 3472 | + return gen_rtx_REG (mode, INTERNAL_REGNUM (11)); |
| 3473 | + else |
| 3474 | + return NULL_RTX; |
| 3475 | +} |
| 3476 | + |
| 3477 | +/* Return TRUE if X references a SYMBOL_REF. */ |
| 3478 | +int |
| 3479 | +symbol_mentioned_p (rtx x) |
| 3480 | +{ |
| 3481 | + const char *fmt; |
| 3482 | + int i; |
| 3483 | + |
| 3484 | + if (GET_CODE (x) == SYMBOL_REF) |
| 3485 | + return 1; |
| 3486 | + |
| 3487 | + fmt = GET_RTX_FORMAT (GET_CODE (x)); |
| 3488 | + |
| 3489 | + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) |
| 3490 | + { |
| 3491 | + if (fmt[i] == 'E') |
| 3492 | + { |
| 3493 | + int j; |
| 3494 | + |
| 3495 | + for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
| 3496 | + if (symbol_mentioned_p (XVECEXP (x, i, j))) |
| 3497 | + return 1; |
| 3498 | + } |
| 3499 | + else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i))) |
| 3500 | + return 1; |
| 3501 | + } |
| 3502 | + |
| 3503 | + return 0; |
| 3504 | +} |
| 3505 | + |
| 3506 | +/* Return TRUE if X references a LABEL_REF. */ |
| 3507 | +int |
| 3508 | +label_mentioned_p (rtx x) |
| 3509 | +{ |
| 3510 | + const char *fmt; |
| 3511 | + int i; |
| 3512 | + |
| 3513 | + if (GET_CODE (x) == LABEL_REF) |
| 3514 | + return 1; |
| 3515 | + |
| 3516 | + fmt = GET_RTX_FORMAT (GET_CODE (x)); |
| 3517 | + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) |
| 3518 | + { |
| 3519 | + if (fmt[i] == 'E') |
| 3520 | + { |
| 3521 | + int j; |
| 3522 | + |
| 3523 | + for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
| 3524 | + if (label_mentioned_p (XVECEXP (x, i, j))) |
| 3525 | + return 1; |
| 3526 | + } |
| 3527 | + else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i))) |
| 3528 | + return 1; |
| 3529 | + } |
| 3530 | + |
| 3531 | + return 0; |
| 3532 | +} |
| 3533 | + |
| 3534 | + |
| 3535 | +int |
| 3536 | +avr32_legitimate_pic_operand_p (rtx x) |
| 3537 | +{ |
| 3538 | + |
| 3539 | + /* We can't have const, this must be broken down to a symbol. */ |
| 3540 | + if (GET_CODE (x) == CONST) |
| 3541 | + return FALSE; |
| 3542 | + |
| 3543 | + /* Can't access symbols or labels via the constant pool either */ |
| 3544 | + if ((GET_CODE (x) == SYMBOL_REF |
| 3545 | + && CONSTANT_POOL_ADDRESS_P (x) |
| 3546 | + && (symbol_mentioned_p (get_pool_constant (x)) |
| 3547 | + || label_mentioned_p (get_pool_constant (x))))) |
| 3548 | + return FALSE; |
| 3549 | + |
| 3550 | + return TRUE; |
| 3551 | +} |
| 3552 | + |
| 3553 | + |
| 3554 | +rtx |
| 3555 | +legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED, |
| 3556 | + rtx reg) |
| 3557 | +{ |
| 3558 | + |
| 3559 | + if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF) |
| 3560 | + { |
| 3561 | + int subregs = 0; |
| 3562 | + |
| 3563 | + if (reg == 0) |
| 3564 | + { |
| 3565 | + if (no_new_pseudos) |
| 3566 | + abort (); |
| 3567 | + else |
| 3568 | + reg = gen_reg_rtx (Pmode); |
| 3569 | + |
| 3570 | + subregs = 1; |
| 3571 | + } |
| 3572 | + |
| 3573 | + emit_move_insn (reg, orig); |
| 3574 | + |
| 3575 | + /* Only set current function as using pic offset table if flag_pic is |
| 3576 | + set. This is because this function is also used if |
| 3577 | + TARGET_HAS_ASM_ADDR_PSEUDOS is set. */ |
| 3578 | + if (flag_pic) |
| 3579 | + current_function_uses_pic_offset_table = 1; |
| 3580 | + |
| 3581 | + /* Put a REG_EQUAL note on this insn, so that it can be optimized by |
| 3582 | + loop. */ |
| 3583 | + return reg; |
| 3584 | + } |
| 3585 | + else if (GET_CODE (orig) == CONST) |
| 3586 | + { |
| 3587 | + rtx base, offset; |
| 3588 | + |
| 3589 | + if (flag_pic |
| 3590 | + && GET_CODE (XEXP (orig, 0)) == PLUS |
| 3591 | + && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx) |
| 3592 | + return orig; |
| 3593 | + |
| 3594 | + if (reg == 0) |
| 3595 | + { |
| 3596 | + if (no_new_pseudos) |
| 3597 | + abort (); |
| 3598 | + else |
| 3599 | + reg = gen_reg_rtx (Pmode); |
| 3600 | + } |
| 3601 | + |
| 3602 | + if (GET_CODE (XEXP (orig, 0)) == PLUS) |
| 3603 | + { |
| 3604 | + base = |
| 3605 | + legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg); |
| 3606 | + offset = |
| 3607 | + legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode, |
| 3608 | + base == reg ? 0 : reg); |
| 3609 | + } |
| 3610 | + else |
| 3611 | + abort (); |
| 3612 | + |
| 3613 | + if (GET_CODE (offset) == CONST_INT) |
| 3614 | + { |
| 3615 | + /* The base register doesn't really matter, we only want to test |
| 3616 | + the index for the appropriate mode. */ |
| 3617 | + if (!avr32_const_ok_for_constraint_p (INTVAL (offset), 'I', "Is21")) |
| 3618 | + { |
| 3619 | + if (!no_new_pseudos) |
| 3620 | + offset = force_reg (Pmode, offset); |
| 3621 | + else |
| 3622 | + abort (); |
| 3623 | + } |
| 3624 | + |
| 3625 | + if (GET_CODE (offset) == CONST_INT) |
| 3626 | + return plus_constant (base, INTVAL (offset)); |
| 3627 | + } |
| 3628 | + |
| 3629 | + return gen_rtx_PLUS (Pmode, base, offset); |
| 3630 | + } |
| 3631 | + |
| 3632 | + return orig; |
| 3633 | +} |
| 3634 | + |
| 3635 | +/* Generate code to load the PIC register. */ |
| 3636 | +void |
| 3637 | +avr32_load_pic_register (void) |
| 3638 | +{ |
| 3639 | + rtx l1, pic_tmp; |
| 3640 | + rtx global_offset_table; |
| 3641 | + |
| 3642 | + if ((current_function_uses_pic_offset_table == 0) || TARGET_NO_INIT_GOT) |
| 3643 | + return; |
| 3644 | + |
| 3645 | + if (!flag_pic) |
| 3646 | + abort (); |
| 3647 | + |
| 3648 | + l1 = gen_label_rtx (); |
| 3649 | + |
| 3650 | + global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_"); |
| 3651 | + pic_tmp = |
| 3652 | + gen_rtx_CONST (Pmode, |
| 3653 | + gen_rtx_MINUS (SImode, gen_rtx_LABEL_REF (Pmode, l1), |
| 3654 | + global_offset_table)); |
| 3655 | + emit_insn (gen_pic_load_addr |
| 3656 | + (pic_offset_table_rtx, force_const_mem (SImode, pic_tmp))); |
| 3657 | + emit_insn (gen_pic_compute_got_from_pc (pic_offset_table_rtx, l1)); |
| 3658 | + |
| 3659 | + /* Need to emit this whether or not we obey regdecls, since setjmp/longjmp |
| 3660 | + can cause life info to screw up. */ |
| 3661 | + emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx)); |
| 3662 | +} |
| 3663 | + |
| 3664 | + |
| 3665 | + |
| 3666 | +/* This hook should return true if values of type type are returned at the most |
| 3667 | + significant end of a register (in other words, if they are padded at the |
| 3668 | + least significant end). You can assume that type is returned in a register; |
| 3669 | + the caller is required to check this. Note that the register provided by |
| 3670 | + FUNCTION_VALUE must be able to hold the complete return value. For example, |
| 3671 | + if a 1-, 2- or 3-byte structure is returned at the most significant end of a |
| 3672 | + 4-byte register, FUNCTION_VALUE should provide an SImode rtx. */ |
| 3673 | +bool |
| 3674 | +avr32_return_in_msb (tree type ATTRIBUTE_UNUSED) |
| 3675 | +{ |
| 3676 | + /* if ( AGGREGATE_TYPE_P (type) ) if ((int_size_in_bytes(type) == 1) || |
| 3677 | + ((int_size_in_bytes(type) == 2) && TYPE_ALIGN_UNIT(type) >= 2)) return |
| 3678 | + false; else return true; */ |
| 3679 | + |
| 3680 | + return false; |
| 3681 | +} |
| 3682 | + |
| 3683 | + |
| 3684 | +/* |
| 3685 | + Returns one if a certain function value is going to be returned in memory |
| 3686 | + and zero if it is going to be returned in a register. |
| 3687 | + |
| 3688 | + BLKmode and all other modes that is larger than 64 bits are returned in |
| 3689 | + memory. |
| 3690 | +*/ |
| 3691 | +bool |
| 3692 | +avr32_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED) |
| 3693 | +{ |
| 3694 | + if (TYPE_MODE (type) == VOIDmode) |
| 3695 | + return false; |
| 3696 | + |
| 3697 | + if (int_size_in_bytes (type) > (2 * UNITS_PER_WORD) |
| 3698 | + || int_size_in_bytes (type) == -1) |
| 3699 | + { |
| 3700 | + return true; |
| 3701 | + } |
| 3702 | + |
| 3703 | + /* If we have an aggregate then use the same mechanism as when checking if |
| 3704 | + it should be passed on the stack. */ |
| 3705 | + if (type |
| 3706 | + && AGGREGATE_TYPE_P (type) |
| 3707 | + && (*targetm.calls.must_pass_in_stack) (TYPE_MODE (type), type)) |
| 3708 | + return true; |
| 3709 | + |
| 3710 | + return false; |
| 3711 | +} |
| 3712 | + |
| 3713 | + |
| 3714 | +/* Output the constant part of the trampoline. |
| 3715 | + lddpc r0, pc[0x8:e] ; load static chain register |
| 3716 | + lddpc pc, pc[0x8:e] ; jump to subrutine |
| 3717 | + .long 0 ; Address to static chain, |
| 3718 | + ; filled in by avr32_initialize_trampoline() |
| 3719 | + .long 0 ; Address to subrutine, |
| 3720 | + ; filled in by avr32_initialize_trampoline() |
| 3721 | +*/ |
| 3722 | +void |
| 3723 | +avr32_trampoline_template (FILE * file) |
| 3724 | +{ |
| 3725 | + fprintf (file, "\tlddpc r0, pc[8]\n"); |
| 3726 | + fprintf (file, "\tlddpc pc, pc[8]\n"); |
| 3727 | + /* make room for the address of the static chain. */ |
| 3728 | + fprintf (file, "\t.long\t0\n"); |
| 3729 | + /* make room for the address to the subrutine. */ |
| 3730 | + fprintf (file, "\t.long\t0\n"); |
| 3731 | +} |
| 3732 | + |
| 3733 | + |
| 3734 | +/* |
| 3735 | + Initialize the variable parts of a trampoline. |
| 3736 | +*/ |
| 3737 | +void |
| 3738 | +avr32_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain) |
| 3739 | +{ |
| 3740 | + /* Store the address to the static chain. */ |
| 3741 | + emit_move_insn (gen_rtx_MEM |
| 3742 | + (SImode, plus_constant (addr, TRAMPOLINE_SIZE - 4)), |
| 3743 | + static_chain); |
| 3744 | + |
| 3745 | + /* Store the address to the function. */ |
| 3746 | + emit_move_insn (gen_rtx_MEM (SImode, plus_constant (addr, TRAMPOLINE_SIZE)), |
| 3747 | + fnaddr); |
| 3748 | + |
| 3749 | + emit_insn (gen_cache (gen_rtx_REG (SImode, 13), |
| 3750 | + gen_rtx_CONST_INT (SImode, |
| 3751 | + AVR32_CACHE_INVALIDATE_ICACHE))); |
| 3752 | +} |
| 3753 | + |
| 3754 | +/* Return nonzero if X is valid as an addressing register. */ |
| 3755 | +int |
| 3756 | +avr32_address_register_rtx_p (rtx x, int strict_p) |
| 3757 | +{ |
| 3758 | + int regno; |
| 3759 | + |
| 3760 | + if (GET_CODE (x) != REG) |
| 3761 | + return 0; |
| 3762 | + |
| 3763 | + regno = REGNO (x); |
| 3764 | + |
| 3765 | + if (strict_p) |
| 3766 | + return REGNO_OK_FOR_BASE_P (regno); |
| 3767 | + |
| 3768 | + return (regno <= LAST_REGNUM || regno >= FIRST_PSEUDO_REGISTER); |
| 3769 | +} |
| 3770 | + |
| 3771 | +/* Return nonzero if INDEX is valid for an address index operand. */ |
| 3772 | +int |
| 3773 | +avr32_legitimate_index_p (enum machine_mode mode, rtx index, int strict_p) |
| 3774 | +{ |
| 3775 | + enum rtx_code code = GET_CODE (index); |
| 3776 | + |
| 3777 | + if (mode == TImode) |
| 3778 | + return 0; |
| 3779 | + |
| 3780 | + /* Standard coprocessor addressing modes. */ |
| 3781 | + if (code == CONST_INT) |
| 3782 | + { |
| 3783 | + if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT) |
| 3784 | + /* Coprocessor mem insns has a smaller reach than ordinary mem insns */ |
| 3785 | + return CONST_OK_FOR_CONSTRAINT_P (INTVAL (index), 'K', "Ku14"); |
| 3786 | + else |
| 3787 | + return CONST_OK_FOR_CONSTRAINT_P (INTVAL (index), 'K', "Ks16"); |
| 3788 | + } |
| 3789 | + |
| 3790 | + if (avr32_address_register_rtx_p (index, strict_p)) |
| 3791 | + return 1; |
| 3792 | + |
| 3793 | + if (code == MULT) |
| 3794 | + { |
| 3795 | + rtx xiop0 = XEXP (index, 0); |
| 3796 | + rtx xiop1 = XEXP (index, 1); |
| 3797 | + return ((avr32_address_register_rtx_p (xiop0, strict_p) |
| 3798 | + && power_of_two_operand (xiop1, SImode) |
| 3799 | + && (INTVAL (xiop1) <= 8)) |
| 3800 | + || (avr32_address_register_rtx_p (xiop1, strict_p) |
| 3801 | + && power_of_two_operand (xiop0, SImode) |
| 3802 | + && (INTVAL (xiop0) <= 8))); |
| 3803 | + } |
| 3804 | + else if (code == ASHIFT) |
| 3805 | + { |
| 3806 | + rtx op = XEXP (index, 1); |
| 3807 | + |
| 3808 | + return (avr32_address_register_rtx_p (XEXP (index, 0), strict_p) |
| 3809 | + && GET_CODE (op) == CONST_INT |
| 3810 | + && INTVAL (op) > 0 && INTVAL (op) <= 3); |
| 3811 | + } |
| 3812 | + |
| 3813 | + return 0; |
| 3814 | +} |
| 3815 | + |
| 3816 | +/* |
| 3817 | + Used in the GO_IF_LEGITIMATE_ADDRESS macro. Returns a nonzero value if |
| 3818 | + the RTX x is a legitimate memory address. |
| 3819 | + |
| 3820 | + Returns NO_REGS if the address is not legatime, GENERAL_REGS or ALL_REGS |
| 3821 | + if it is. |
| 3822 | +*/ |
| 3823 | + |
| 3824 | +/* Forward declaration*/ |
| 3825 | +int is_minipool_label (rtx label); |
| 3826 | + |
| 3827 | +int |
| 3828 | +avr32_legitimate_address (enum machine_mode mode ATTRIBUTE_UNUSED, |
| 3829 | + rtx x, int strict) |
| 3830 | +{ |
| 3831 | + |
| 3832 | + switch (GET_CODE (x)) |
| 3833 | + { |
| 3834 | + case REG: |
| 3835 | + return avr32_address_register_rtx_p (x, strict); |
| 3836 | + case CONST: |
| 3837 | + { |
| 3838 | + rtx label = avr32_find_symbol (x); |
| 3839 | + if (label |
| 3840 | + && |
| 3841 | + ( (CONSTANT_POOL_ADDRESS_P (label) |
| 3842 | + && !(flag_pic |
| 3843 | + && (symbol_mentioned_p (get_pool_constant (label)) |
| 3844 | + || label_mentioned_p (get_pool_constant(label))))) |
| 3845 | + /* TODO! Can this ever happen??? */ |
| 3846 | + || ((GET_CODE (label) == LABEL_REF) |
| 3847 | + && GET_CODE (XEXP (label, 0)) == CODE_LABEL |
| 3848 | + && is_minipool_label (XEXP (label, 0))))) |
| 3849 | + { |
| 3850 | + return TRUE; |
| 3851 | + } |
| 3852 | + } |
| 3853 | + break; |
| 3854 | + case LABEL_REF: |
| 3855 | + if (GET_CODE (XEXP (x, 0)) == CODE_LABEL |
| 3856 | + && is_minipool_label (XEXP (x, 0))) |
| 3857 | + { |
| 3858 | + return TRUE; |
| 3859 | + } |
| 3860 | + break; |
| 3861 | + case SYMBOL_REF: |
| 3862 | + { |
| 3863 | + if (CONSTANT_POOL_ADDRESS_P (x) |
| 3864 | + && !(flag_pic |
| 3865 | + && (symbol_mentioned_p (get_pool_constant (x)) |
| 3866 | + || label_mentioned_p (get_pool_constant (x))))) |
| 3867 | + return TRUE; |
| 3868 | + /* |
| 3869 | + A symbol_ref is only legal if it is a function. If all of them are |
| 3870 | + legal, a pseudo reg that is a constant will be replaced by a |
| 3871 | + symbol_ref and make illegale code. SYMBOL_REF_FLAG is set by |
| 3872 | + ENCODE_SECTION_INFO. */ |
| 3873 | + else if (SYMBOL_REF_RCALL_FUNCTION_P (x)) |
| 3874 | + return TRUE; |
| 3875 | + break; |
| 3876 | + } |
| 3877 | + case PRE_DEC: /* (pre_dec (...)) */ |
| 3878 | + case POST_INC: /* (post_inc (...)) */ |
| 3879 | + return avr32_address_register_rtx_p (XEXP (x, 0), strict); |
| 3880 | + case PLUS: /* (plus (...) (...)) */ |
| 3881 | + { |
| 3882 | + rtx xop0 = XEXP (x, 0); |
| 3883 | + rtx xop1 = XEXP (x, 1); |
| 3884 | + |
| 3885 | + return ((avr32_address_register_rtx_p (xop0, strict) |
| 3886 | + && avr32_legitimate_index_p (mode, xop1, strict)) |
| 3887 | + || (avr32_address_register_rtx_p (xop1, strict) |
| 3888 | + && avr32_legitimate_index_p (mode, xop0, strict))); |
| 3889 | + } |
| 3890 | + default: |
| 3891 | + break; |
| 3892 | + } |
| 3893 | + |
| 3894 | + return FALSE; |
| 3895 | +} |
| 3896 | + |
| 3897 | + |
| 3898 | +int |
| 3899 | +avr32_const_double_immediate (rtx value) |
| 3900 | +{ |
| 3901 | + HOST_WIDE_INT hi, lo; |
| 3902 | + |
| 3903 | + if (GET_CODE (value) != CONST_DOUBLE) |
| 3904 | + return FALSE; |
| 3905 | + |
| 3906 | + if (GET_MODE (value) == DImode) |
| 3907 | + { |
| 3908 | + hi = CONST_DOUBLE_HIGH (value); |
| 3909 | + lo = CONST_DOUBLE_LOW (value); |
| 3910 | + } |
| 3911 | + else |
| 3912 | + { |
| 3913 | + HOST_WIDE_INT target_float[2]; |
| 3914 | + hi = lo = 0; |
| 3915 | + real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (value), |
| 3916 | + GET_MODE (value)); |
| 3917 | + lo = target_float[0]; |
| 3918 | + hi = target_float[1]; |
| 3919 | + } |
| 3920 | + if (avr32_const_ok_for_constraint_p (lo, 'K', "Ks21") |
| 3921 | + && ((GET_MODE (value) == SFmode) |
| 3922 | + || avr32_const_ok_for_constraint_p (hi, 'K', "Ks21"))) |
| 3923 | + { |
| 3924 | + return TRUE; |
| 3925 | + } |
| 3926 | + |
| 3927 | + return FALSE; |
| 3928 | +} |
| 3929 | + |
| 3930 | + |
| 3931 | +int |
| 3932 | +avr32_legitimate_constant_p (rtx x) |
| 3933 | +{ |
| 3934 | + switch (GET_CODE (x)) |
| 3935 | + { |
| 3936 | + case CONST_INT: |
| 3937 | + return avr32_const_ok_for_constraint_p (INTVAL (x), 'K', "Ks21"); |
| 3938 | + case CONST_DOUBLE: |
| 3939 | + if (GET_MODE (x) == SFmode |
| 3940 | + || GET_MODE (x) == DFmode || GET_MODE (x) == DImode) |
| 3941 | + return avr32_const_double_immediate (x); |
| 3942 | + else |
| 3943 | + return 0; |
| 3944 | + case LABEL_REF: |
| 3945 | + return flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS; |
| 3946 | + case SYMBOL_REF: |
| 3947 | + return flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS; |
| 3948 | + case CONST: |
| 3949 | + /* We must handle this one in the movsi expansion in order for gcc not |
| 3950 | + to put it in the constant pool. */ |
| 3951 | + return 0 /* flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS */ ; |
| 3952 | + case HIGH: |
| 3953 | + case CONST_VECTOR: |
| 3954 | + return 0; |
| 3955 | + default: |
| 3956 | + printf ("%s():\n", __FUNCTION__); |
| 3957 | + debug_rtx (x); |
| 3958 | + return 1; |
| 3959 | + } |
| 3960 | +} |
| 3961 | + |
| 3962 | + |
| 3963 | +/* Strip any special encoding from labels */ |
| 3964 | +const char * |
| 3965 | +avr32_strip_name_encoding (const char *name) |
| 3966 | +{ |
| 3967 | + const char *stripped = name; |
| 3968 | + |
| 3969 | + while (1) |
| 3970 | + { |
| 3971 | + switch (stripped[0]) |
| 3972 | + { |
| 3973 | + case '#': |
| 3974 | + stripped = strchr (name + 1, '#') + 1; |
| 3975 | + break; |
| 3976 | + case '*': |
| 3977 | + stripped = &stripped[1]; |
| 3978 | + break; |
| 3979 | + default: |
| 3980 | + return stripped; |
| 3981 | + } |
| 3982 | + } |
| 3983 | +} |
| 3984 | + |
| 3985 | + |
| 3986 | + |
| 3987 | +/* Do anything needed before RTL is emitted for each function. */ |
| 3988 | +static struct machine_function * |
| 3989 | +avr32_init_machine_status (void) |
| 3990 | +{ |
| 3991 | + struct machine_function *machine; |
| 3992 | + machine = |
| 3993 | + (machine_function *) ggc_alloc_cleared (sizeof (machine_function)); |
| 3994 | + |
| 3995 | +#if AVR32_FT_UNKNOWN != 0 |
| 3996 | + machine->func_type = AVR32_FT_UNKNOWN; |
| 3997 | +#endif |
| 3998 | + |
| 3999 | + machine->minipool_label_head = 0; |
| 4000 | + machine->minipool_label_tail = 0; |
| 4001 | + return machine; |
| 4002 | +} |
| 4003 | + |
| 4004 | +void |
| 4005 | +avr32_init_expanders (void) |
| 4006 | +{ |
| 4007 | + /* Arrange to initialize and mark the machine per-function status. */ |
| 4008 | + init_machine_status = avr32_init_machine_status; |
| 4009 | +} |
| 4010 | + |
| 4011 | + |
| 4012 | +/* Return an RTX indicating where the return address to the |
| 4013 | + calling function can be found. */ |
| 4014 | + |
| 4015 | +rtx |
| 4016 | +avr32_return_addr (int count, rtx frame ATTRIBUTE_UNUSED) |
| 4017 | +{ |
| 4018 | + if (count != 0) |
| 4019 | + return NULL_RTX; |
| 4020 | + |
| 4021 | + return get_hard_reg_initial_val (Pmode, LR_REGNUM); |
| 4022 | +} |
| 4023 | + |
| 4024 | + |
| 4025 | +void |
| 4026 | +avr32_encode_section_info (tree decl, rtx rtl, int first) |
| 4027 | +{ |
| 4028 | + |
| 4029 | + if (first && DECL_P (decl)) |
| 4030 | + { |
| 4031 | + /* Set SYMBOL_REG_FLAG for local functions */ |
| 4032 | + if (!TREE_PUBLIC (decl) && TREE_CODE (decl) == FUNCTION_DECL) |
| 4033 | + { |
| 4034 | + if ((*targetm.binds_local_p) (decl)) |
| 4035 | + { |
| 4036 | + SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1; |
| 4037 | + } |
| 4038 | + } |
| 4039 | + } |
| 4040 | +} |
| 4041 | + |
| 4042 | + |
| 4043 | +void |
| 4044 | +avr32_asm_output_ascii (FILE * stream, char *ptr, int len) |
| 4045 | +{ |
| 4046 | + int i, i_new = 0; |
| 4047 | + char *new_ptr = xmalloc (4 * len); |
| 4048 | + if (new_ptr == NULL) |
| 4049 | + internal_error ("Out of memory."); |
| 4050 | + |
| 4051 | + for (i = 0; i < len; i++) |
| 4052 | + { |
| 4053 | + if (ptr[i] == '\n') |
| 4054 | + { |
| 4055 | + new_ptr[i_new++] = '\\'; |
| 4056 | + new_ptr[i_new++] = '0'; |
| 4057 | + new_ptr[i_new++] = '1'; |
| 4058 | + new_ptr[i_new++] = '2'; |
| 4059 | + } |
| 4060 | + else if (ptr[i] == '\"') |
| 4061 | + { |
| 4062 | + new_ptr[i_new++] = '\\'; |
| 4063 | + new_ptr[i_new++] = '\"'; |
| 4064 | + } |
| 4065 | + else if (ptr[i] == '\\') |
| 4066 | + { |
| 4067 | + new_ptr[i_new++] = '\\'; |
| 4068 | + new_ptr[i_new++] = '\\'; |
| 4069 | + } |
| 4070 | + else if (ptr[i] == '\0' && i + 1 < len) |
| 4071 | + { |
| 4072 | + new_ptr[i_new++] = '\\'; |
| 4073 | + new_ptr[i_new++] = '0'; |
| 4074 | + } |
| 4075 | + else |
| 4076 | + { |
| 4077 | + new_ptr[i_new++] = ptr[i]; |
| 4078 | + } |
| 4079 | + } |
| 4080 | + |
| 4081 | + /* Terminate new_ptr. */ |
| 4082 | + new_ptr[i_new] = '\0'; |
| 4083 | + fprintf (stream, "\t.ascii\t\"%s\"\n", new_ptr); |
| 4084 | + free (new_ptr); |
| 4085 | +} |
| 4086 | + |
| 4087 | + |
| 4088 | +void |
| 4089 | +avr32_asm_output_label (FILE * stream, const char *name) |
| 4090 | +{ |
| 4091 | + name = avr32_strip_name_encoding (name); |
| 4092 | + |
| 4093 | + /* Print the label. */ |
| 4094 | + assemble_name (stream, name); |
| 4095 | + fprintf (stream, ":\n"); |
| 4096 | +} |
| 4097 | + |
| 4098 | + |
| 4099 | + |
| 4100 | +void |
| 4101 | +avr32_asm_weaken_label (FILE * stream, const char *name) |
| 4102 | +{ |
| 4103 | + fprintf (stream, "\t.weak "); |
| 4104 | + assemble_name (stream, name); |
| 4105 | + fprintf (stream, "\n"); |
| 4106 | +} |
| 4107 | + |
| 4108 | +/* |
| 4109 | + Checks if a labelref is equal to a reserved word in the assembler. If it is, |
| 4110 | + insert a '_' before the label name. |
| 4111 | +*/ |
| 4112 | +void |
| 4113 | +avr32_asm_output_labelref (FILE * stream, const char *name) |
| 4114 | +{ |
| 4115 | + int verbatim = FALSE; |
| 4116 | + const char *stripped = name; |
| 4117 | + int strip_finished = FALSE; |
| 4118 | + |
| 4119 | + while (!strip_finished) |
| 4120 | + { |
| 4121 | + switch (stripped[0]) |
| 4122 | + { |
| 4123 | + case '#': |
| 4124 | + stripped = strchr (name + 1, '#') + 1; |
| 4125 | + break; |
| 4126 | + case '*': |
| 4127 | + stripped = &stripped[1]; |
| 4128 | + verbatim = TRUE; |
| 4129 | + break; |
| 4130 | + default: |
| 4131 | + strip_finished = TRUE; |
| 4132 | + break; |
| 4133 | + } |
| 4134 | + } |
| 4135 | + |
| 4136 | + if (verbatim) |
| 4137 | + fputs (stripped, stream); |
| 4138 | + else |
| 4139 | + asm_fprintf (stream, "%U%s", stripped); |
| 4140 | +} |
| 4141 | + |
| 4142 | + |
| 4143 | + |
| 4144 | +/* |
| 4145 | + Check if the comparison in compare_exp is redundant |
| 4146 | + for the condition given in next_cond given that the |
| 4147 | + needed flags are already set by an earlier instruction. |
| 4148 | + Uses cc_prev_status to check this. |
| 4149 | + |
| 4150 | + Returns NULL_RTX if the compare is not redundant |
| 4151 | + or the new condition to use in the conditional |
| 4152 | + instruction if the compare is redundant. |
| 4153 | +*/ |
| 4154 | +static rtx |
| 4155 | +is_compare_redundant (rtx compare_exp, rtx next_cond) |
| 4156 | +{ |
| 4157 | + int z_flag_valid = FALSE; |
| 4158 | + int n_flag_valid = FALSE; |
| 4159 | + rtx new_cond; |
| 4160 | + |
| 4161 | + if (GET_CODE (compare_exp) != COMPARE) |
| 4162 | + return NULL_RTX; |
| 4163 | + |
| 4164 | + |
| 4165 | + if (GET_MODE (compare_exp) != SImode) |
| 4166 | + return NULL_RTX; |
| 4167 | + |
| 4168 | + if (rtx_equal_p (cc_prev_status.mdep.value, compare_exp)) |
| 4169 | + { |
| 4170 | + /* cc0 already contains the correct comparison -> delete cmp insn */ |
| 4171 | + return next_cond; |
| 4172 | + } |
| 4173 | + |
| 4174 | + switch (cc_prev_status.mdep.flags) |
| 4175 | + { |
| 4176 | + case CC_SET_VNCZ: |
| 4177 | + case CC_SET_NCZ: |
| 4178 | + n_flag_valid = TRUE; |
| 4179 | + case CC_SET_CZ: |
| 4180 | + case CC_SET_Z: |
| 4181 | + z_flag_valid = TRUE; |
| 4182 | + } |
| 4183 | + |
| 4184 | + if (cc_prev_status.mdep.value |
| 4185 | + && REG_P (XEXP (compare_exp, 0)) |
| 4186 | + && REGNO (XEXP (compare_exp, 0)) == REGNO (cc_prev_status.mdep.value) |
| 4187 | + && GET_CODE (XEXP (compare_exp, 1)) == CONST_INT |
| 4188 | + && next_cond != NULL_RTX) |
| 4189 | + { |
| 4190 | + if (INTVAL (XEXP (compare_exp, 1)) == 0 |
| 4191 | + && z_flag_valid |
| 4192 | + && (GET_CODE (next_cond) == EQ || GET_CODE (next_cond) == NE)) |
| 4193 | + /* We can skip comparison Z flag is already reflecting ops[0] */ |
| 4194 | + return next_cond; |
| 4195 | + else if (n_flag_valid |
| 4196 | + && ((INTVAL (XEXP (compare_exp, 1)) == 0 |
| 4197 | + && (GET_CODE (next_cond) == GE |
| 4198 | + || GET_CODE (next_cond) == LT)) |
| 4199 | + || (INTVAL (XEXP (compare_exp, 1)) == -1 |
| 4200 | + && (GET_CODE (next_cond) == GT |
| 4201 | + || GET_CODE (next_cond) == LE)))) |
| 4202 | + { |
| 4203 | + /* We can skip comparison N flag is already reflecting ops[0], |
| 4204 | + which means that we can use the mi/pl conditions to check if |
| 4205 | + ops[0] is GE or LT 0. */ |
| 4206 | + if ((GET_CODE (next_cond) == GE) || (GET_CODE (next_cond) == GT)) |
| 4207 | + new_cond = |
| 4208 | + gen_rtx_UNSPEC (CCmode, gen_rtvec (2, cc0_rtx, const0_rtx), |
| 4209 | + UNSPEC_COND_PL); |
| 4210 | + else |
| 4211 | + new_cond = |
| 4212 | + gen_rtx_UNSPEC (CCmode, gen_rtvec (2, cc0_rtx, const0_rtx), |
| 4213 | + UNSPEC_COND_MI); |
| 4214 | + return new_cond; |
| 4215 | + } |
| 4216 | + } |
| 4217 | + return NULL_RTX; |
| 4218 | +} |
| 4219 | + |
| 4220 | +/* Updates cc_status. */ |
| 4221 | +void |
| 4222 | +avr32_notice_update_cc (rtx exp, rtx insn) |
| 4223 | +{ |
| 4224 | + switch (get_attr_cc (insn)) |
| 4225 | + { |
| 4226 | + case CC_CALL_SET: |
| 4227 | + CC_STATUS_INIT; |
| 4228 | + FPCC_STATUS_INIT; |
| 4229 | + /* Check if the function call returns a value in r12 */ |
| 4230 | + if (REG_P (recog_data.operand[0]) |
| 4231 | + && REGNO (recog_data.operand[0]) == RETVAL_REGNUM) |
| 4232 | + { |
| 4233 | + cc_status.flags = 0; |
| 4234 | + cc_status.mdep.value = |
| 4235 | + gen_rtx_COMPARE (SImode, recog_data.operand[0], const0_rtx); |
| 4236 | + cc_status.mdep.flags = CC_SET_VNCZ; |
| 4237 | + |
| 4238 | + } |
| 4239 | + break; |
| 4240 | + case CC_COMPARE: |
| 4241 | + /* Check that compare will not be optimized away if so nothing should |
| 4242 | + be done */ |
| 4243 | + if (is_compare_redundant (SET_SRC (exp), get_next_insn_cond (insn)) |
| 4244 | + == NULL_RTX) |
| 4245 | + { |
| 4246 | + |
| 4247 | + /* Reset the nonstandard flag */ |
| 4248 | + CC_STATUS_INIT; |
| 4249 | + cc_status.flags = 0; |
| 4250 | + cc_status.mdep.value = SET_SRC (exp); |
| 4251 | + cc_status.mdep.flags = CC_SET_VNCZ; |
| 4252 | + } |
| 4253 | + break; |
| 4254 | + case CC_FPCOMPARE: |
| 4255 | + /* Check that floating-point compare will not be optimized away if so |
| 4256 | + nothing should be done */ |
| 4257 | + if (!rtx_equal_p (cc_prev_status.mdep.fpvalue, SET_SRC (exp))) |
| 4258 | + { |
| 4259 | + /* cc0 already contains the correct comparison -> delete cmp insn */ |
| 4260 | + /* Reset the nonstandard flag */ |
| 4261 | + cc_status.mdep.fpvalue = SET_SRC (exp); |
| 4262 | + cc_status.mdep.fpflags = CC_SET_CZ; |
| 4263 | + } |
| 4264 | + break; |
| 4265 | + case CC_FROM_FPCC: |
| 4266 | + /* Flags are updated with flags from Floating-point coprocessor, set |
| 4267 | + CC_NOT_SIGNED flag since the flags are set so that unsigned |
| 4268 | + condidion codes can be used directly. */ |
| 4269 | + CC_STATUS_INIT; |
| 4270 | + cc_status.flags = CC_NOT_SIGNED; |
| 4271 | + cc_status.mdep.value = cc_status.mdep.fpvalue; |
| 4272 | + cc_status.mdep.flags = cc_status.mdep.fpflags; |
| 4273 | + break; |
| 4274 | + case CC_BLD: |
| 4275 | + /* Bit load is kind of like an inverted testsi, because the Z flag is |
| 4276 | + inverted */ |
| 4277 | + CC_STATUS_INIT; |
| 4278 | + cc_status.flags = CC_INVERTED; |
| 4279 | + cc_status.mdep.value = SET_SRC (exp); |
| 4280 | + cc_status.mdep.flags = CC_SET_Z; |
| 4281 | + break; |
| 4282 | + case CC_NONE: |
| 4283 | + /* Insn does not affect CC at all. Check if the instruction updates |
| 4284 | + some of the register currently reflected in cc0 */ |
| 4285 | + |
| 4286 | + if ((GET_CODE (exp) == SET) |
| 4287 | + && (cc_status.value1 || cc_status.value2 || cc_status.mdep.value) |
| 4288 | + && (reg_mentioned_p (SET_DEST (exp), cc_status.value1) |
| 4289 | + || reg_mentioned_p (SET_DEST (exp), cc_status.value2) |
| 4290 | + || reg_mentioned_p (SET_DEST (exp), cc_status.mdep.value))) |
| 4291 | + { |
| 4292 | + CC_STATUS_INIT; |
| 4293 | + } |
| 4294 | + |
| 4295 | + /* If this is a parallel we must step through each of the parallel |
| 4296 | + expressions */ |
| 4297 | + if (GET_CODE (exp) == PARALLEL) |
| 4298 | + { |
| 4299 | + int i; |
| 4300 | + for (i = 0; i < XVECLEN (exp, 0); ++i) |
| 4301 | + { |
| 4302 | + rtx vec_exp = XVECEXP (exp, 0, i); |
| 4303 | + if ((GET_CODE (vec_exp) == SET) |
| 4304 | + && (cc_status.value1 || cc_status.value2 |
| 4305 | + || cc_status.mdep.value) |
| 4306 | + && (reg_mentioned_p (SET_DEST (vec_exp), cc_status.value1) |
| 4307 | + || reg_mentioned_p (SET_DEST (vec_exp), |
| 4308 | + cc_status.value2) |
| 4309 | + || reg_mentioned_p (SET_DEST (vec_exp), |
| 4310 | + cc_status.mdep.value))) |
| 4311 | + { |
| 4312 | + CC_STATUS_INIT; |
| 4313 | + } |
| 4314 | + } |
| 4315 | + } |
| 4316 | + |
| 4317 | + /* Check if we have memory opartions with post_inc or pre_dec on the |
| 4318 | + register currently reflected in cc0 */ |
| 4319 | + if (GET_CODE (exp) == SET |
| 4320 | + && GET_CODE (SET_SRC (exp)) == MEM |
| 4321 | + && (GET_CODE (XEXP (SET_SRC (exp), 0)) == POST_INC |
| 4322 | + || GET_CODE (XEXP (SET_SRC (exp), 0)) == PRE_DEC) |
| 4323 | + && |
| 4324 | + (reg_mentioned_p |
| 4325 | + (XEXP (XEXP (SET_SRC (exp), 0), 0), cc_status.value1) |
| 4326 | + || reg_mentioned_p (XEXP (XEXP (SET_SRC (exp), 0), 0), |
| 4327 | + cc_status.value2) |
| 4328 | + || reg_mentioned_p (XEXP (XEXP (SET_SRC (exp), 0), 0), |
| 4329 | + cc_status.mdep.value))) |
| 4330 | + CC_STATUS_INIT; |
| 4331 | + |
| 4332 | + if (GET_CODE (exp) == SET |
| 4333 | + && GET_CODE (SET_DEST (exp)) == MEM |
| 4334 | + && (GET_CODE (XEXP (SET_DEST (exp), 0)) == POST_INC |
| 4335 | + || GET_CODE (XEXP (SET_DEST (exp), 0)) == PRE_DEC) |
| 4336 | + && |
| 4337 | + (reg_mentioned_p |
| 4338 | + (XEXP (XEXP (SET_DEST (exp), 0), 0), cc_status.value1) |
| 4339 | + || reg_mentioned_p (XEXP (XEXP (SET_DEST (exp), 0), 0), |
| 4340 | + cc_status.value2) |
| 4341 | + || reg_mentioned_p (XEXP (XEXP (SET_DEST (exp), 0), 0), |
| 4342 | + cc_status.mdep.value))) |
| 4343 | + CC_STATUS_INIT; |
| 4344 | + break; |
| 4345 | + |
| 4346 | + case CC_SET_VNCZ: |
| 4347 | + CC_STATUS_INIT; |
| 4348 | + cc_status.mdep.value = recog_data.operand[0]; |
| 4349 | + cc_status.mdep.flags = CC_SET_VNCZ; |
| 4350 | + break; |
| 4351 | + |
| 4352 | + case CC_SET_NCZ: |
| 4353 | + CC_STATUS_INIT; |
| 4354 | + cc_status.mdep.value = recog_data.operand[0]; |
| 4355 | + cc_status.mdep.flags = CC_SET_NCZ; |
| 4356 | + break; |
| 4357 | + |
| 4358 | + case CC_SET_CZ: |
| 4359 | + CC_STATUS_INIT; |
| 4360 | + cc_status.mdep.value = recog_data.operand[0]; |
| 4361 | + cc_status.mdep.flags = CC_SET_CZ; |
| 4362 | + break; |
| 4363 | + |
| 4364 | + case CC_SET_Z: |
| 4365 | + CC_STATUS_INIT; |
| 4366 | + cc_status.mdep.value = recog_data.operand[0]; |
| 4367 | + cc_status.mdep.flags = CC_SET_Z; |
| 4368 | + break; |
| 4369 | + |
| 4370 | + case CC_CLOBBER: |
| 4371 | + CC_STATUS_INIT; |
| 4372 | + break; |
| 4373 | + |
| 4374 | + default: |
| 4375 | + CC_STATUS_INIT; |
| 4376 | + } |
| 4377 | +} |
| 4378 | + |
| 4379 | + |
| 4380 | +/* |
| 4381 | + Outputs to stdio stream stream the assembler syntax for an instruction |
| 4382 | + operand x. x is an RTL expression. |
| 4383 | +*/ |
| 4384 | +void |
| 4385 | +avr32_print_operand (FILE * stream, rtx x, int code) |
| 4386 | +{ |
| 4387 | + int error = 0; |
| 4388 | + |
| 4389 | + switch (GET_CODE (x)) |
| 4390 | + { |
| 4391 | + case UNSPEC: |
| 4392 | + switch (XINT (x, 1)) |
| 4393 | + { |
| 4394 | + case UNSPEC_COND_PL: |
| 4395 | + if (code == 'i') |
| 4396 | + fputs ("mi", stream); |
| 4397 | + else |
| 4398 | + fputs ("pl", stream); |
| 4399 | + break; |
| 4400 | + case UNSPEC_COND_MI: |
| 4401 | + if (code == 'i') |
| 4402 | + fputs ("pl", stream); |
| 4403 | + else |
| 4404 | + fputs ("mi", stream); |
| 4405 | + break; |
| 4406 | + default: |
| 4407 | + error = 1; |
| 4408 | + } |
| 4409 | + break; |
| 4410 | + case EQ: |
| 4411 | + if (code == 'i') |
| 4412 | + fputs ("ne", stream); |
| 4413 | + else |
| 4414 | + fputs ("eq", stream); |
| 4415 | + break; |
| 4416 | + case NE: |
| 4417 | + if (code == 'i') |
| 4418 | + fputs ("eq", stream); |
| 4419 | + else |
| 4420 | + fputs ("ne", stream); |
| 4421 | + break; |
| 4422 | + case GT: |
| 4423 | + if (code == 'i') |
| 4424 | + fputs ("le", stream); |
| 4425 | + else |
| 4426 | + fputs ("gt", stream); |
| 4427 | + break; |
| 4428 | + case GTU: |
| 4429 | + if (code == 'i') |
| 4430 | + fputs ("ls", stream); |
| 4431 | + else |
| 4432 | + fputs ("hi", stream); |
| 4433 | + break; |
| 4434 | + case LT: |
| 4435 | + if (code == 'i') |
| 4436 | + fputs ("ge", stream); |
| 4437 | + else |
| 4438 | + fputs ("lt", stream); |
| 4439 | + break; |
| 4440 | + case LTU: |
| 4441 | + if (code == 'i') |
| 4442 | + fputs ("hs", stream); |
| 4443 | + else |
| 4444 | + fputs ("lo", stream); |
| 4445 | + break; |
| 4446 | + case GE: |
| 4447 | + if (code == 'i') |
| 4448 | + fputs ("lt", stream); |
| 4449 | + else |
| 4450 | + fputs ("ge", stream); |
| 4451 | + break; |
| 4452 | + case GEU: |
| 4453 | + if (code == 'i') |
| 4454 | + fputs ("lo", stream); |
| 4455 | + else |
| 4456 | + fputs ("hs", stream); |
| 4457 | + break; |
| 4458 | + case LE: |
| 4459 | + if (code == 'i') |
| 4460 | + fputs ("gt", stream); |
| 4461 | + else |
| 4462 | + fputs ("le", stream); |
| 4463 | + break; |
| 4464 | + case LEU: |
| 4465 | + if (code == 'i') |
| 4466 | + fputs ("hi", stream); |
| 4467 | + else |
| 4468 | + fputs ("ls", stream); |
| 4469 | + break; |
| 4470 | + case CONST_INT: |
| 4471 | + { |
| 4472 | + int value = INTVAL (x); |
| 4473 | + |
| 4474 | + if (code == 'i') |
| 4475 | + { |
| 4476 | + value++; |
| 4477 | + } |
| 4478 | + |
| 4479 | + if (code == 'p') |
| 4480 | + { |
| 4481 | + /* Set to bit position of first bit set in immediate */ |
| 4482 | + int i, bitpos = 32; |
| 4483 | + for (i = 0; i < 32; i++) |
| 4484 | + if (value & (1 << i)) |
| 4485 | + { |
| 4486 | + bitpos = i; |
| 4487 | + break; |
| 4488 | + } |
| 4489 | + value = bitpos; |
| 4490 | + } |
| 4491 | + |
| 4492 | + if (code == 'r') |
| 4493 | + { |
| 4494 | + /* Reglist 8 */ |
| 4495 | + char op[50]; |
| 4496 | + op[0] = '\0'; |
| 4497 | + |
| 4498 | + if (value & 0x01) |
| 4499 | + sprintf (op, "r0-r3"); |
| 4500 | + if (value & 0x02) |
| 4501 | + strlen (op) ? sprintf (op, "%s, r4-r7", op) : sprintf (op, |
| 4502 | + "r4-r7"); |
| 4503 | + if (value & 0x04) |
| 4504 | + strlen (op) ? sprintf (op, "%s, r8-r9", op) : sprintf (op, |
| 4505 | + "r8-r9"); |
| 4506 | + if (value & 0x08) |
| 4507 | + strlen (op) ? sprintf (op, "%s, r10", op) : sprintf (op, "r10"); |
| 4508 | + if (value & 0x10) |
| 4509 | + strlen (op) ? sprintf (op, "%s, r11", op) : sprintf (op, "r11"); |
| 4510 | + if (value & 0x20) |
| 4511 | + strlen (op) ? sprintf (op, "%s, r12", op) : sprintf (op, "r12"); |
| 4512 | + if (value & 0x40) |
| 4513 | + strlen (op) ? sprintf (op, "%s, lr", op) : sprintf (op, "lr"); |
| 4514 | + if (value & 0x80) |
| 4515 | + strlen (op) ? sprintf (op, "%s, pc", op) : sprintf (op, "pc"); |
| 4516 | + |
| 4517 | + fputs (op, stream); |
| 4518 | + } |
| 4519 | + else if (code == 's') |
| 4520 | + { |
| 4521 | + /* Reglist 16 */ |
| 4522 | + char reglist16_string[100]; |
| 4523 | + int i; |
| 4524 | + reglist16_string[0] = '\0'; |
| 4525 | + |
| 4526 | + for (i = 0; i < 16; ++i) |
| 4527 | + { |
| 4528 | + if (value & (1 << i)) |
| 4529 | + { |
| 4530 | + strlen (reglist16_string) ? sprintf (reglist16_string, |
| 4531 | + "%s, %s", |
| 4532 | + reglist16_string, |
| 4533 | + reg_names |
| 4534 | + [INTERNAL_REGNUM |
| 4535 | + (i)]) : |
| 4536 | + sprintf (reglist16_string, "%s", |
| 4537 | + reg_names[INTERNAL_REGNUM (i)]); |
| 4538 | + } |
| 4539 | + } |
| 4540 | + fputs (reglist16_string, stream); |
| 4541 | + } |
| 4542 | + else if (code == 'C') |
| 4543 | + { |
| 4544 | + /* RegListCP8 */ |
| 4545 | + char reglist_string[100]; |
| 4546 | + avr32_make_fp_reglist_w (value, (char *) reglist_string); |
| 4547 | + fputs (reglist_string, stream); |
| 4548 | + } |
| 4549 | + else if (code == 'D') |
| 4550 | + { |
| 4551 | + /* RegListCPD8 */ |
| 4552 | + char reglist_string[100]; |
| 4553 | + avr32_make_fp_reglist_d (value, (char *) reglist_string); |
| 4554 | + fputs (reglist_string, stream); |
| 4555 | + } |
| 4556 | + else if (code == 'd') |
| 4557 | + { |
| 4558 | + /* Print in decimal format */ |
| 4559 | + fprintf (stream, "%d", value); |
| 4560 | + } |
| 4561 | + else if (code == 'h') |
| 4562 | + { |
| 4563 | + /* Print halfword part of word */ |
| 4564 | + fputs (value ? "b" : "t", stream); |
| 4565 | + } |
| 4566 | + else |
| 4567 | + { |
| 4568 | + /* Normal constant */ |
| 4569 | + fprintf (stream, "%d", value); |
| 4570 | + } |
| 4571 | + break; |
| 4572 | + } |
| 4573 | + case CONST_DOUBLE: |
| 4574 | + { |
| 4575 | + HOST_WIDE_INT hi, lo; |
| 4576 | + if (GET_MODE (x) == DImode) |
| 4577 | + { |
| 4578 | + hi = CONST_DOUBLE_HIGH (x); |
| 4579 | + lo = CONST_DOUBLE_LOW (x); |
| 4580 | + } |
| 4581 | + else |
| 4582 | + { |
| 4583 | + HOST_WIDE_INT target_float[2]; |
| 4584 | + hi = lo = 0; |
| 4585 | + real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (x), |
| 4586 | + GET_MODE (x)); |
| 4587 | + /* For doubles the most significant part starts at index 0. */ |
| 4588 | + if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD) |
| 4589 | + { |
| 4590 | + hi = target_float[0]; |
| 4591 | + lo = target_float[1]; |
| 4592 | + } |
| 4593 | + else |
| 4594 | + { |
| 4595 | + lo = target_float[0]; |
| 4596 | + } |
| 4597 | + } |
| 4598 | + |
| 4599 | + if (avr32_const_ok_for_constraint_p (lo, 'K', "Ks21") |
| 4600 | + && ((GET_MODE (x) == SFmode) |
| 4601 | + || avr32_const_ok_for_constraint_p (hi, 'K', "Ks21"))) |
| 4602 | + { |
| 4603 | + if (code == 'm') |
| 4604 | + fprintf (stream, "%ld", hi); |
| 4605 | + else |
| 4606 | + fprintf (stream, "%ld", lo); |
| 4607 | + } |
| 4608 | + else |
| 4609 | + { |
| 4610 | + fprintf (stream, "value too large"); |
| 4611 | + } |
| 4612 | + break; |
| 4613 | + } |
| 4614 | + case CONST: |
| 4615 | + output_addr_const (stream, XEXP (XEXP (x, 0), 0)); |
| 4616 | + fprintf (stream, "+%ld", INTVAL (XEXP (XEXP (x, 0), 1))); |
| 4617 | + break; |
| 4618 | + case REG: |
| 4619 | + /* Swap register name if the register is DImode or DFmode. */ |
| 4620 | + if (GET_MODE (x) == DImode || GET_MODE (x) == DFmode) |
| 4621 | + { |
| 4622 | + /* Double register must have an even numbered address */ |
| 4623 | + gcc_assert (!(REGNO (x) % 2)); |
| 4624 | + if (code == 'm') |
| 4625 | + fputs (reg_names[true_regnum (x)], stream); |
| 4626 | + else |
| 4627 | + fputs (reg_names[true_regnum (x) + 1], stream); |
| 4628 | + } |
| 4629 | + else if (GET_MODE (x) == TImode) |
| 4630 | + { |
| 4631 | + switch (code) |
| 4632 | + { |
| 4633 | + case 'T': |
| 4634 | + fputs (reg_names[true_regnum (x)], stream); |
| 4635 | + break; |
| 4636 | + case 'U': |
| 4637 | + fputs (reg_names[true_regnum (x) + 1], stream); |
| 4638 | + break; |
| 4639 | + case 'L': |
| 4640 | + fputs (reg_names[true_regnum (x) + 2], stream); |
| 4641 | + break; |
| 4642 | + case 'B': |
| 4643 | + fputs (reg_names[true_regnum (x) + 3], stream); |
| 4644 | + break; |
| 4645 | + default: |
| 4646 | + fprintf (stream, "%s, %s, %s, %s", |
| 4647 | + reg_names[true_regnum (x) + 3], |
| 4648 | + reg_names[true_regnum (x) + 2], |
| 4649 | + reg_names[true_regnum (x) + 1], |
| 4650 | + reg_names[true_regnum (x)]); |
| 4651 | + break; |
| 4652 | + } |
| 4653 | + } |
| 4654 | + else |
| 4655 | + { |
| 4656 | + fputs (reg_names[true_regnum (x)], stream); |
| 4657 | + } |
| 4658 | + break; |
| 4659 | + case CODE_LABEL: |
| 4660 | + case LABEL_REF: |
| 4661 | + case SYMBOL_REF: |
| 4662 | + output_addr_const (stream, x); |
| 4663 | + break; |
| 4664 | + case MEM: |
| 4665 | + switch (GET_CODE (XEXP (x, 0))) |
| 4666 | + { |
| 4667 | + case LABEL_REF: |
| 4668 | + case SYMBOL_REF: |
| 4669 | + output_addr_const (stream, XEXP (x, 0)); |
| 4670 | + break; |
| 4671 | + case MEM: |
| 4672 | + switch (GET_CODE (XEXP (XEXP (x, 0), 0))) |
| 4673 | + { |
| 4674 | + case SYMBOL_REF: |
| 4675 | + output_addr_const (stream, XEXP (XEXP (x, 0), 0)); |
| 4676 | + break; |
| 4677 | + default: |
| 4678 | + error = 1; |
| 4679 | + break; |
| 4680 | + } |
| 4681 | + break; |
| 4682 | + case REG: |
| 4683 | + avr32_print_operand (stream, XEXP (x, 0), 0); |
| 4684 | + if (code != 'p') |
| 4685 | + fputs ("[0]", stream); |
| 4686 | + break; |
| 4687 | + case PRE_DEC: |
| 4688 | + fputs ("--", stream); |
| 4689 | + avr32_print_operand (stream, XEXP (XEXP (x, 0), 0), 0); |
| 4690 | + break; |
| 4691 | + case POST_INC: |
| 4692 | + avr32_print_operand (stream, XEXP (XEXP (x, 0), 0), 0); |
| 4693 | + fputs ("++", stream); |
| 4694 | + break; |
| 4695 | + case PLUS: |
| 4696 | + { |
| 4697 | + rtx op0 = XEXP (XEXP (x, 0), 0); |
| 4698 | + rtx op1 = XEXP (XEXP (x, 0), 1); |
| 4699 | + rtx base = NULL_RTX, offset = NULL_RTX; |
| 4700 | + |
| 4701 | + if (avr32_address_register_rtx_p (op0, 1)) |
| 4702 | + { |
| 4703 | + base = op0; |
| 4704 | + offset = op1; |
| 4705 | + } |
| 4706 | + else if (avr32_address_register_rtx_p (op1, 1)) |
| 4707 | + { |
| 4708 | + /* Operands are switched. */ |
| 4709 | + base = op1; |
| 4710 | + offset = op0; |
| 4711 | + } |
| 4712 | + |
| 4713 | + gcc_assert (base && offset |
| 4714 | + && avr32_address_register_rtx_p (base, 1) |
| 4715 | + && avr32_legitimate_index_p (GET_MODE (x), offset, |
| 4716 | + 1)); |
| 4717 | + |
| 4718 | + avr32_print_operand (stream, base, 0); |
| 4719 | + fputs ("[", stream); |
| 4720 | + avr32_print_operand (stream, offset, 0); |
| 4721 | + fputs ("]", stream); |
| 4722 | + break; |
| 4723 | + } |
| 4724 | + case CONST: |
| 4725 | + output_addr_const (stream, XEXP (XEXP (XEXP (x, 0), 0), 0)); |
| 4726 | + fprintf (stream, " + %ld", |
| 4727 | + INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))); |
| 4728 | + break; |
| 4729 | + default: |
| 4730 | + error = 1; |
| 4731 | + } |
| 4732 | + break; |
| 4733 | + case MULT: |
| 4734 | + { |
| 4735 | + int value = INTVAL (XEXP (x, 1)); |
| 4736 | + |
| 4737 | + /* Convert immediate in multiplication into a shift immediate */ |
| 4738 | + switch (value) |
| 4739 | + { |
| 4740 | + case 2: |
| 4741 | + value = 1; |
| 4742 | + break; |
| 4743 | + case 4: |
| 4744 | + value = 2; |
| 4745 | + break; |
| 4746 | + case 8: |
| 4747 | + value = 3; |
| 4748 | + break; |
| 4749 | + default: |
| 4750 | + value = 0; |
| 4751 | + } |
| 4752 | + fprintf (stream, "%s << %i", reg_names[true_regnum (XEXP (x, 0))], |
| 4753 | + value); |
| 4754 | + break; |
| 4755 | + } |
| 4756 | + case ASHIFT: |
| 4757 | + if (GET_CODE (XEXP (x, 1)) == CONST_INT) |
| 4758 | + fprintf (stream, "%s << %i", reg_names[true_regnum (XEXP (x, 0))], |
| 4759 | + (int) INTVAL (XEXP (x, 1))); |
| 4760 | + else if (REG_P (XEXP (x, 1))) |
| 4761 | + fprintf (stream, "%s << %s", reg_names[true_regnum (XEXP (x, 0))], |
| 4762 | + reg_names[true_regnum (XEXP (x, 1))]); |
| 4763 | + else |
| 4764 | + { |
| 4765 | + error = 1; |
| 4766 | + } |
| 4767 | + break; |
| 4768 | + case LSHIFTRT: |
| 4769 | + if (GET_CODE (XEXP (x, 1)) == CONST_INT) |
| 4770 | + fprintf (stream, "%s >> %i", reg_names[true_regnum (XEXP (x, 0))], |
| 4771 | + (int) INTVAL (XEXP (x, 1))); |
| 4772 | + else if (REG_P (XEXP (x, 1))) |
| 4773 | + fprintf (stream, "%s >> %s", reg_names[true_regnum (XEXP (x, 0))], |
| 4774 | + reg_names[true_regnum (XEXP (x, 1))]); |
| 4775 | + else |
| 4776 | + { |
| 4777 | + error = 1; |
| 4778 | + } |
| 4779 | + fprintf (stream, ">>"); |
| 4780 | + break; |
| 4781 | + case PARALLEL: |
| 4782 | + { |
| 4783 | + /* Load store multiple */ |
| 4784 | + int i; |
| 4785 | + int count = XVECLEN (x, 0); |
| 4786 | + int reglist16 = 0; |
| 4787 | + char reglist16_string[100]; |
| 4788 | + |
| 4789 | + for (i = 0; i < count; ++i) |
| 4790 | + { |
| 4791 | + rtx vec_elm = XVECEXP (x, 0, i); |
| 4792 | + if (GET_MODE (vec_elm) != SET) |
| 4793 | + { |
| 4794 | + debug_rtx (vec_elm); |
| 4795 | + internal_error ("Unknown element in parallel expression!"); |
| 4796 | + } |
| 4797 | + if (GET_MODE (XEXP (vec_elm, 0)) == REG) |
| 4798 | + { |
| 4799 | + /* Load multiple */ |
| 4800 | + reglist16 |= 1 << ASM_REGNUM (REGNO (XEXP (vec_elm, 0))); |
| 4801 | + } |
| 4802 | + else |
| 4803 | + { |
| 4804 | + /* Store multiple */ |
| 4805 | + reglist16 |= 1 << ASM_REGNUM (REGNO (XEXP (vec_elm, 1))); |
| 4806 | + } |
| 4807 | + } |
| 4808 | + |
| 4809 | + avr32_make_reglist16 (reglist16, reglist16_string); |
| 4810 | + fputs (reglist16_string, stream); |
| 4811 | + |
| 4812 | + break; |
| 4813 | + } |
| 4814 | + |
| 4815 | + default: |
| 4816 | + error = 1; |
| 4817 | + } |
| 4818 | + |
| 4819 | + if (error) |
| 4820 | + { |
| 4821 | + debug_rtx (x); |
| 4822 | + internal_error ("Illegal expression for avr32_print_operand"); |
| 4823 | + } |
| 4824 | +} |
| 4825 | + |
| 4826 | +rtx |
| 4827 | +avr32_get_note_reg_equiv (rtx insn) |
| 4828 | +{ |
| 4829 | + rtx note; |
| 4830 | + |
| 4831 | + note = find_reg_note (insn, REG_EQUIV, NULL_RTX); |
| 4832 | + |
| 4833 | + if (note != NULL_RTX) |
| 4834 | + return XEXP (note, 0); |
| 4835 | + else |
| 4836 | + return NULL_RTX; |
| 4837 | +} |
| 4838 | + |
| 4839 | +/* |
| 4840 | + Outputs to stdio stream stream the assembler syntax for an instruction |
| 4841 | + operand that is a memory reference whose address is x. x is an RTL |
| 4842 | + expression. |
| 4843 | + |
| 4844 | + ToDo: fixme. |
| 4845 | +*/ |
| 4846 | +void |
| 4847 | +avr32_print_operand_address (FILE * stream, rtx x) |
| 4848 | +{ |
| 4849 | + fprintf (stream, "(%d) /* address */", REGNO (x)); |
| 4850 | +} |
| 4851 | + |
| 4852 | +/* Return true if _GLOBAL_OFFSET_TABLE_ symbol is mentioned. */ |
| 4853 | +bool |
| 4854 | +avr32_got_mentioned_p (rtx addr) |
| 4855 | +{ |
| 4856 | + if (GET_CODE (addr) == MEM) |
| 4857 | + addr = XEXP (addr, 0); |
| 4858 | + while (GET_CODE (addr) == CONST) |
| 4859 | + addr = XEXP (addr, 0); |
| 4860 | + if (GET_CODE (addr) == SYMBOL_REF) |
| 4861 | + { |
| 4862 | + return streq (XSTR (addr, 0), "_GLOBAL_OFFSET_TABLE_"); |
| 4863 | + } |
| 4864 | + if (GET_CODE (addr) == PLUS || GET_CODE (addr) == MINUS) |
| 4865 | + { |
| 4866 | + bool l1, l2; |
| 4867 | + |
| 4868 | + l1 = avr32_got_mentioned_p (XEXP (addr, 0)); |
| 4869 | + l2 = avr32_got_mentioned_p (XEXP (addr, 1)); |
| 4870 | + return l1 || l2; |
| 4871 | + } |
| 4872 | + return false; |
| 4873 | +} |
| 4874 | + |
| 4875 | + |
| 4876 | +/* Find the symbol in an address expression. */ |
| 4877 | + |
| 4878 | +rtx |
| 4879 | +avr32_find_symbol (rtx addr) |
| 4880 | +{ |
| 4881 | + if (GET_CODE (addr) == MEM) |
| 4882 | + addr = XEXP (addr, 0); |
| 4883 | + |
| 4884 | + while (GET_CODE (addr) == CONST) |
| 4885 | + addr = XEXP (addr, 0); |
| 4886 | + |
| 4887 | + if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF) |
| 4888 | + return addr; |
| 4889 | + if (GET_CODE (addr) == PLUS) |
| 4890 | + { |
| 4891 | + rtx l1, l2; |
| 4892 | + |
| 4893 | + l1 = avr32_find_symbol (XEXP (addr, 0)); |
| 4894 | + l2 = avr32_find_symbol (XEXP (addr, 1)); |
| 4895 | + if (l1 != NULL_RTX && l2 == NULL_RTX) |
| 4896 | + return l1; |
| 4897 | + else if (l1 == NULL_RTX && l2 != NULL_RTX) |
| 4898 | + return l2; |
| 4899 | + } |
| 4900 | + |
| 4901 | + return NULL_RTX; |
| 4902 | +} |
| 4903 | + |
| 4904 | + |
| 4905 | +/* Routines for manipulation of the constant pool. */ |
| 4906 | + |
| 4907 | +/* AVR32 instructions cannot load a large constant directly into a |
| 4908 | + register; they have to come from a pc relative load. The constant |
| 4909 | + must therefore be placed in the addressable range of the pc |
| 4910 | + relative load. Depending on the precise pc relative load |
| 4911 | + instruction the range is somewhere between 256 bytes and 4k. This |
| 4912 | + means that we often have to dump a constant inside a function, and |
| 4913 | + generate code to branch around it. |
| 4914 | + |
| 4915 | + It is important to minimize this, since the branches will slow |
| 4916 | + things down and make the code larger. |
| 4917 | + |
| 4918 | + Normally we can hide the table after an existing unconditional |
| 4919 | + branch so that there is no interruption of the flow, but in the |
| 4920 | + worst case the code looks like this: |
| 4921 | + |
| 4922 | + lddpc rn, L1 |
| 4923 | + ... |
| 4924 | + rjmp L2 |
| 4925 | + align |
| 4926 | + L1: .long value |
| 4927 | + L2: |
| 4928 | + ... |
| 4929 | + |
| 4930 | + lddpc rn, L3 |
| 4931 | + ... |
| 4932 | + rjmp L4 |
| 4933 | + align |
| 4934 | + L3: .long value |
| 4935 | + L4: |
| 4936 | + ... |
| 4937 | + |
| 4938 | + We fix this by performing a scan after scheduling, which notices |
| 4939 | + which instructions need to have their operands fetched from the |
| 4940 | + constant table and builds the table. |
| 4941 | + |
| 4942 | + The algorithm starts by building a table of all the constants that |
| 4943 | + need fixing up and all the natural barriers in the function (places |
| 4944 | + where a constant table can be dropped without breaking the flow). |
| 4945 | + For each fixup we note how far the pc-relative replacement will be |
| 4946 | + able to reach and the offset of the instruction into the function. |
| 4947 | + |
| 4948 | + Having built the table we then group the fixes together to form |
| 4949 | + tables that are as large as possible (subject to addressing |
| 4950 | + constraints) and emit each table of constants after the last |
| 4951 | + barrier that is within range of all the instructions in the group. |
| 4952 | + If a group does not contain a barrier, then we forcibly create one |
| 4953 | + by inserting a jump instruction into the flow. Once the table has |
| 4954 | + been inserted, the insns are then modified to reference the |
| 4955 | + relevant entry in the pool. |
| 4956 | + |
| 4957 | + Possible enhancements to the algorithm (not implemented) are: |
| 4958 | + |
| 4959 | + 1) For some processors and object formats, there may be benefit in |
| 4960 | + aligning the pools to the start of cache lines; this alignment |
| 4961 | + would need to be taken into account when calculating addressability |
| 4962 | + of a pool. */ |
| 4963 | + |
| 4964 | +/* These typedefs are located at the start of this file, so that |
| 4965 | + they can be used in the prototypes there. This comment is to |
| 4966 | + remind readers of that fact so that the following structures |
| 4967 | + can be understood more easily. |
| 4968 | + |
| 4969 | + typedef struct minipool_node Mnode; |
| 4970 | + typedef struct minipool_fixup Mfix; */ |
| 4971 | + |
| 4972 | +struct minipool_node |
| 4973 | +{ |
| 4974 | + /* Doubly linked chain of entries. */ |
| 4975 | + Mnode *next; |
| 4976 | + Mnode *prev; |
| 4977 | + /* The maximum offset into the code that this entry can be placed. While |
| 4978 | + pushing fixes for forward references, all entries are sorted in order of |
| 4979 | + increasing max_address. */ |
| 4980 | + HOST_WIDE_INT max_address; |
| 4981 | + /* Similarly for an entry inserted for a backwards ref. */ |
| 4982 | + HOST_WIDE_INT min_address; |
| 4983 | + /* The number of fixes referencing this entry. This can become zero if we |
| 4984 | + "unpush" an entry. In this case we ignore the entry when we come to |
| 4985 | + emit the code. */ |
| 4986 | + int refcount; |
| 4987 | + /* The offset from the start of the minipool. */ |
| 4988 | + HOST_WIDE_INT offset; |
| 4989 | + /* The value in table. */ |
| 4990 | + rtx value; |
| 4991 | + /* The mode of value. */ |
| 4992 | + enum machine_mode mode; |
| 4993 | + /* The size of the value. */ |
| 4994 | + int fix_size; |
| 4995 | +}; |
| 4996 | + |
| 4997 | +struct minipool_fixup |
| 4998 | +{ |
| 4999 | + Mfix *next; |
| 5000 | + rtx insn; |
| 5001 | + HOST_WIDE_INT address; |
| 5002 | + rtx *loc; |
| 5003 | + enum machine_mode mode; |
| 5004 | + int fix_size; |
| 5005 | + rtx value; |
| 5006 | + Mnode *minipool; |
| 5007 | + HOST_WIDE_INT forwards; |
| 5008 | + HOST_WIDE_INT backwards; |
| 5009 | +}; |
| 5010 | + |
| 5011 | + |
| 5012 | +/* Fixes less than a word need padding out to a word boundary. */ |
| 5013 | +#define MINIPOOL_FIX_SIZE(mode, value) \ |
| 5014 | + (IS_FORCE_MINIPOOL(value) ? 0 : \ |
| 5015 | + (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)) |
| 5016 | + |
| 5017 | +#define IS_FORCE_MINIPOOL(x) \ |
| 5018 | + (GET_CODE(x) == UNSPEC && \ |
| 5019 | + XINT(x, 1) == UNSPEC_FORCE_MINIPOOL) |
| 5020 | + |
| 5021 | +static Mnode *minipool_vector_head; |
| 5022 | +static Mnode *minipool_vector_tail; |
| 5023 | + |
| 5024 | +/* The linked list of all minipool fixes required for this function. */ |
| 5025 | +Mfix *minipool_fix_head; |
| 5026 | +Mfix *minipool_fix_tail; |
| 5027 | +/* The fix entry for the current minipool, once it has been placed. */ |
| 5028 | +Mfix *minipool_barrier; |
| 5029 | + |
| 5030 | +/* Determines if INSN is the start of a jump table. Returns the end |
| 5031 | + of the TABLE or NULL_RTX. */ |
| 5032 | +static rtx |
| 5033 | +is_jump_table (rtx insn) |
| 5034 | +{ |
| 5035 | + rtx table; |
| 5036 | + |
| 5037 | + if (GET_CODE (insn) == JUMP_INSN |
| 5038 | + && JUMP_LABEL (insn) != NULL |
| 5039 | + && ((table = next_real_insn (JUMP_LABEL (insn))) |
| 5040 | + == next_real_insn (insn)) |
| 5041 | + && table != NULL |
| 5042 | + && GET_CODE (table) == JUMP_INSN |
| 5043 | + && (GET_CODE (PATTERN (table)) == ADDR_VEC |
| 5044 | + || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC)) |
| 5045 | + return table; |
| 5046 | + |
| 5047 | + return NULL_RTX; |
| 5048 | +} |
| 5049 | + |
| 5050 | +static HOST_WIDE_INT |
| 5051 | +get_jump_table_size (rtx insn) |
| 5052 | +{ |
| 5053 | + /* ADDR_VECs only take room if read-only data does into the text section. */ |
| 5054 | + if (JUMP_TABLES_IN_TEXT_SECTION |
| 5055 | +#if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP) |
| 5056 | + || 1 |
| 5057 | +#endif |
| 5058 | + ) |
| 5059 | + { |
| 5060 | + rtx body = PATTERN (insn); |
| 5061 | + int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0; |
| 5062 | + |
| 5063 | + return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt); |
| 5064 | + } |
| 5065 | + |
| 5066 | + return 0; |
| 5067 | +} |
| 5068 | + |
| 5069 | +/* Move a minipool fix MP from its current location to before MAX_MP. |
| 5070 | + If MAX_MP is NULL, then MP doesn't need moving, but the addressing |
| 5071 | + constraints may need updating. */ |
| 5072 | +static Mnode * |
| 5073 | +move_minipool_fix_forward_ref (Mnode * mp, Mnode * max_mp, |
| 5074 | + HOST_WIDE_INT max_address) |
| 5075 | +{ |
| 5076 | + /* This should never be true and the code below assumes these are |
| 5077 | + different. */ |
| 5078 | + if (mp == max_mp) |
| 5079 | + abort (); |
| 5080 | + |
| 5081 | + if (max_mp == NULL) |
| 5082 | + { |
| 5083 | + if (max_address < mp->max_address) |
| 5084 | + mp->max_address = max_address; |
| 5085 | + } |
| 5086 | + else |
| 5087 | + { |
| 5088 | + if (max_address > max_mp->max_address - mp->fix_size) |
| 5089 | + mp->max_address = max_mp->max_address - mp->fix_size; |
| 5090 | + else |
| 5091 | + mp->max_address = max_address; |
| 5092 | + |
| 5093 | + /* Unlink MP from its current position. Since max_mp is non-null, |
| 5094 | + mp->prev must be non-null. */ |
| 5095 | + mp->prev->next = mp->next; |
| 5096 | + if (mp->next != NULL) |
| 5097 | + mp->next->prev = mp->prev; |
| 5098 | + else |
| 5099 | + minipool_vector_tail = mp->prev; |
| 5100 | + |
| 5101 | + /* Re-insert it before MAX_MP. */ |
| 5102 | + mp->next = max_mp; |
| 5103 | + mp->prev = max_mp->prev; |
| 5104 | + max_mp->prev = mp; |
| 5105 | + |
| 5106 | + if (mp->prev != NULL) |
| 5107 | + mp->prev->next = mp; |
| 5108 | + else |
| 5109 | + minipool_vector_head = mp; |
| 5110 | + } |
| 5111 | + |
| 5112 | + /* Save the new entry. */ |
| 5113 | + max_mp = mp; |
| 5114 | + |
| 5115 | + /* Scan over the preceding entries and adjust their addresses as required. |
| 5116 | + */ |
| 5117 | + while (mp->prev != NULL |
| 5118 | + && mp->prev->max_address > mp->max_address - mp->prev->fix_size) |
| 5119 | + { |
| 5120 | + mp->prev->max_address = mp->max_address - mp->prev->fix_size; |
| 5121 | + mp = mp->prev; |
| 5122 | + } |
| 5123 | + |
| 5124 | + return max_mp; |
| 5125 | +} |
| 5126 | + |
| 5127 | +/* Add a constant to the minipool for a forward reference. Returns the |
| 5128 | + node added or NULL if the constant will not fit in this pool. */ |
| 5129 | +static Mnode * |
| 5130 | +add_minipool_forward_ref (Mfix * fix) |
| 5131 | +{ |
| 5132 | + /* If set, max_mp is the first pool_entry that has a lower constraint than |
| 5133 | + the one we are trying to add. */ |
| 5134 | + Mnode *max_mp = NULL; |
| 5135 | + HOST_WIDE_INT max_address = fix->address + fix->forwards; |
| 5136 | + Mnode *mp; |
| 5137 | + |
| 5138 | + /* If this fix's address is greater than the address of the first entry, |
| 5139 | + then we can't put the fix in this pool. We subtract the size of the |
| 5140 | + current fix to ensure that if the table is fully packed we still have |
| 5141 | + enough room to insert this value by suffling the other fixes forwards. */ |
| 5142 | + if (minipool_vector_head && |
| 5143 | + fix->address >= minipool_vector_head->max_address - fix->fix_size) |
| 5144 | + return NULL; |
| 5145 | + |
| 5146 | + /* Scan the pool to see if a constant with the same value has already been |
| 5147 | + added. While we are doing this, also note the location where we must |
| 5148 | + insert the constant if it doesn't already exist. */ |
| 5149 | + for (mp = minipool_vector_head; mp != NULL; mp = mp->next) |
| 5150 | + { |
| 5151 | + if (GET_CODE (fix->value) == GET_CODE (mp->value) |
| 5152 | + && fix->mode == mp->mode |
| 5153 | + && (GET_CODE (fix->value) != CODE_LABEL |
| 5154 | + || (CODE_LABEL_NUMBER (fix->value) |
| 5155 | + == CODE_LABEL_NUMBER (mp->value))) |
| 5156 | + && rtx_equal_p (fix->value, mp->value)) |
| 5157 | + { |
| 5158 | + /* More than one fix references this entry. */ |
| 5159 | + mp->refcount++; |
| 5160 | + return move_minipool_fix_forward_ref (mp, max_mp, max_address); |
| 5161 | + } |
| 5162 | + |
| 5163 | + /* Note the insertion point if necessary. */ |
| 5164 | + if (max_mp == NULL && mp->max_address > max_address) |
| 5165 | + max_mp = mp; |
| 5166 | + |
| 5167 | + } |
| 5168 | + |
| 5169 | + /* The value is not currently in the minipool, so we need to create a new |
| 5170 | + entry for it. If MAX_MP is NULL, the entry will be put on the end of |
| 5171 | + the list since the placement is less constrained than any existing |
| 5172 | + entry. Otherwise, we insert the new fix before MAX_MP and, if |
| 5173 | + necessary, adjust the constraints on the other entries. */ |
| 5174 | + mp = xmalloc (sizeof (*mp)); |
| 5175 | + mp->fix_size = fix->fix_size; |
| 5176 | + mp->mode = fix->mode; |
| 5177 | + mp->value = fix->value; |
| 5178 | + mp->refcount = 1; |
| 5179 | + /* Not yet required for a backwards ref. */ |
| 5180 | + mp->min_address = -65536; |
| 5181 | + |
| 5182 | + if (max_mp == NULL) |
| 5183 | + { |
| 5184 | + mp->max_address = max_address; |
| 5185 | + mp->next = NULL; |
| 5186 | + mp->prev = minipool_vector_tail; |
| 5187 | + |
| 5188 | + if (mp->prev == NULL) |
| 5189 | + { |
| 5190 | + minipool_vector_head = mp; |
| 5191 | + minipool_vector_label = gen_label_rtx (); |
| 5192 | + } |
| 5193 | + else |
| 5194 | + mp->prev->next = mp; |
| 5195 | + |
| 5196 | + minipool_vector_tail = mp; |
| 5197 | + } |
| 5198 | + else |
| 5199 | + { |
| 5200 | + if (max_address > max_mp->max_address - mp->fix_size) |
| 5201 | + mp->max_address = max_mp->max_address - mp->fix_size; |
| 5202 | + else |
| 5203 | + mp->max_address = max_address; |
| 5204 | + |
| 5205 | + mp->next = max_mp; |
| 5206 | + mp->prev = max_mp->prev; |
| 5207 | + max_mp->prev = mp; |
| 5208 | + if (mp->prev != NULL) |
| 5209 | + mp->prev->next = mp; |
| 5210 | + else |
| 5211 | + minipool_vector_head = mp; |
| 5212 | + } |
| 5213 | + |
| 5214 | + /* Save the new entry. */ |
| 5215 | + max_mp = mp; |
| 5216 | + |
| 5217 | + /* Scan over the preceding entries and adjust their addresses as required. |
| 5218 | + */ |
| 5219 | + while (mp->prev != NULL |
| 5220 | + && mp->prev->max_address > mp->max_address - mp->prev->fix_size) |
| 5221 | + { |
| 5222 | + mp->prev->max_address = mp->max_address - mp->prev->fix_size; |
| 5223 | + mp = mp->prev; |
| 5224 | + } |
| 5225 | + |
| 5226 | + return max_mp; |
| 5227 | +} |
| 5228 | + |
| 5229 | +static Mnode * |
| 5230 | +move_minipool_fix_backward_ref (Mnode * mp, Mnode * min_mp, |
| 5231 | + HOST_WIDE_INT min_address) |
| 5232 | +{ |
| 5233 | + HOST_WIDE_INT offset; |
| 5234 | + |
| 5235 | + /* This should never be true, and the code below assumes these are |
| 5236 | + different. */ |
| 5237 | + if (mp == min_mp) |
| 5238 | + abort (); |
| 5239 | + |
| 5240 | + if (min_mp == NULL) |
| 5241 | + { |
| 5242 | + if (min_address > mp->min_address) |
| 5243 | + mp->min_address = min_address; |
| 5244 | + } |
| 5245 | + else |
| 5246 | + { |
| 5247 | + /* We will adjust this below if it is too loose. */ |
| 5248 | + mp->min_address = min_address; |
| 5249 | + |
| 5250 | + /* Unlink MP from its current position. Since min_mp is non-null, |
| 5251 | + mp->next must be non-null. */ |
| 5252 | + mp->next->prev = mp->prev; |
| 5253 | + if (mp->prev != NULL) |
| 5254 | + mp->prev->next = mp->next; |
| 5255 | + else |
| 5256 | + minipool_vector_head = mp->next; |
| 5257 | + |
| 5258 | + /* Reinsert it after MIN_MP. */ |
| 5259 | + mp->prev = min_mp; |
| 5260 | + mp->next = min_mp->next; |
| 5261 | + min_mp->next = mp; |
| 5262 | + if (mp->next != NULL) |
| 5263 | + mp->next->prev = mp; |
| 5264 | + else |
| 5265 | + minipool_vector_tail = mp; |
| 5266 | + } |
| 5267 | + |
| 5268 | + min_mp = mp; |
| 5269 | + |
| 5270 | + offset = 0; |
| 5271 | + for (mp = minipool_vector_head; mp != NULL; mp = mp->next) |
| 5272 | + { |
| 5273 | + mp->offset = offset; |
| 5274 | + if (mp->refcount > 0) |
| 5275 | + offset += mp->fix_size; |
| 5276 | + |
| 5277 | + if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size) |
| 5278 | + mp->next->min_address = mp->min_address + mp->fix_size; |
| 5279 | + } |
| 5280 | + |
| 5281 | + return min_mp; |
| 5282 | +} |
| 5283 | + |
| 5284 | +/* Add a constant to the minipool for a backward reference. Returns the |
| 5285 | + node added or NULL if the constant will not fit in this pool. |
| 5286 | + |
| 5287 | + Note that the code for insertion for a backwards reference can be |
| 5288 | + somewhat confusing because the calculated offsets for each fix do |
| 5289 | + not take into account the size of the pool (which is still under |
| 5290 | + construction. */ |
| 5291 | +static Mnode * |
| 5292 | +add_minipool_backward_ref (Mfix * fix) |
| 5293 | +{ |
| 5294 | + /* If set, min_mp is the last pool_entry that has a lower constraint than |
| 5295 | + the one we are trying to add. */ |
| 5296 | + Mnode *min_mp = NULL; |
| 5297 | + /* This can be negative, since it is only a constraint. */ |
| 5298 | + HOST_WIDE_INT min_address = fix->address - fix->backwards; |
| 5299 | + Mnode *mp; |
| 5300 | + |
| 5301 | + /* If we can't reach the current pool from this insn, or if we can't insert |
| 5302 | + this entry at the end of the pool without pushing other fixes out of |
| 5303 | + range, then we don't try. This ensures that we can't fail later on. */ |
| 5304 | + if (min_address >= minipool_barrier->address |
| 5305 | + || (minipool_vector_tail->min_address + fix->fix_size |
| 5306 | + >= minipool_barrier->address)) |
| 5307 | + return NULL; |
| 5308 | + |
| 5309 | + /* Scan the pool to see if a constant with the same value has already been |
| 5310 | + added. While we are doing this, also note the location where we must |
| 5311 | + insert the constant if it doesn't already exist. */ |
| 5312 | + for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev) |
| 5313 | + { |
| 5314 | + if (GET_CODE (fix->value) == GET_CODE (mp->value) |
| 5315 | + && fix->mode == mp->mode |
| 5316 | + && (GET_CODE (fix->value) != CODE_LABEL |
| 5317 | + || (CODE_LABEL_NUMBER (fix->value) |
| 5318 | + == CODE_LABEL_NUMBER (mp->value))) |
| 5319 | + && rtx_equal_p (fix->value, mp->value) |
| 5320 | + /* Check that there is enough slack to move this entry to the end |
| 5321 | + of the table (this is conservative). */ |
| 5322 | + && (mp->max_address |
| 5323 | + > (minipool_barrier->address |
| 5324 | + + minipool_vector_tail->offset |
| 5325 | + + minipool_vector_tail->fix_size))) |
| 5326 | + { |
| 5327 | + mp->refcount++; |
| 5328 | + return move_minipool_fix_backward_ref (mp, min_mp, min_address); |
| 5329 | + } |
| 5330 | + |
| 5331 | + if (min_mp != NULL) |
| 5332 | + mp->min_address += fix->fix_size; |
| 5333 | + else |
| 5334 | + { |
| 5335 | + /* Note the insertion point if necessary. */ |
| 5336 | + if (mp->min_address < min_address) |
| 5337 | + { |
| 5338 | + min_mp = mp; |
| 5339 | + } |
| 5340 | + else if (mp->max_address |
| 5341 | + < minipool_barrier->address + mp->offset + fix->fix_size) |
| 5342 | + { |
| 5343 | + /* Inserting before this entry would push the fix beyond its |
| 5344 | + maximum address (which can happen if we have re-located a |
| 5345 | + forwards fix); force the new fix to come after it. */ |
| 5346 | + min_mp = mp; |
| 5347 | + min_address = mp->min_address + fix->fix_size; |
| 5348 | + } |
| 5349 | + } |
| 5350 | + } |
| 5351 | + |
| 5352 | + /* We need to create a new entry. */ |
| 5353 | + mp = xmalloc (sizeof (*mp)); |
| 5354 | + mp->fix_size = fix->fix_size; |
| 5355 | + mp->mode = fix->mode; |
| 5356 | + mp->value = fix->value; |
| 5357 | + mp->refcount = 1; |
| 5358 | + mp->max_address = minipool_barrier->address + 65536; |
| 5359 | + |
| 5360 | + mp->min_address = min_address; |
| 5361 | + |
| 5362 | + if (min_mp == NULL) |
| 5363 | + { |
| 5364 | + mp->prev = NULL; |
| 5365 | + mp->next = minipool_vector_head; |
| 5366 | + |
| 5367 | + if (mp->next == NULL) |
| 5368 | + { |
| 5369 | + minipool_vector_tail = mp; |
| 5370 | + minipool_vector_label = gen_label_rtx (); |
| 5371 | + } |
| 5372 | + else |
| 5373 | + mp->next->prev = mp; |
| 5374 | + |
| 5375 | + minipool_vector_head = mp; |
| 5376 | + } |
| 5377 | + else |
| 5378 | + { |
| 5379 | + mp->next = min_mp->next; |
| 5380 | + mp->prev = min_mp; |
| 5381 | + min_mp->next = mp; |
| 5382 | + |
| 5383 | + if (mp->next != NULL) |
| 5384 | + mp->next->prev = mp; |
| 5385 | + else |
| 5386 | + minipool_vector_tail = mp; |
| 5387 | + } |
| 5388 | + |
| 5389 | + /* Save the new entry. */ |
| 5390 | + min_mp = mp; |
| 5391 | + |
| 5392 | + if (mp->prev) |
| 5393 | + mp = mp->prev; |
| 5394 | + else |
| 5395 | + mp->offset = 0; |
| 5396 | + |
| 5397 | + /* Scan over the following entries and adjust their offsets. */ |
| 5398 | + while (mp->next != NULL) |
| 5399 | + { |
| 5400 | + if (mp->next->min_address < mp->min_address + mp->fix_size) |
| 5401 | + mp->next->min_address = mp->min_address + mp->fix_size; |
| 5402 | + |
| 5403 | + if (mp->refcount) |
| 5404 | + mp->next->offset = mp->offset + mp->fix_size; |
| 5405 | + else |
| 5406 | + mp->next->offset = mp->offset; |
| 5407 | + |
| 5408 | + mp = mp->next; |
| 5409 | + } |
| 5410 | + |
| 5411 | + return min_mp; |
| 5412 | +} |
| 5413 | + |
| 5414 | +static void |
| 5415 | +assign_minipool_offsets (Mfix * barrier) |
| 5416 | +{ |
| 5417 | + HOST_WIDE_INT offset = 0; |
| 5418 | + Mnode *mp; |
| 5419 | + |
| 5420 | + minipool_barrier = barrier; |
| 5421 | + |
| 5422 | + for (mp = minipool_vector_head; mp != NULL; mp = mp->next) |
| 5423 | + { |
| 5424 | + mp->offset = offset; |
| 5425 | + |
| 5426 | + if (mp->refcount > 0) |
| 5427 | + offset += mp->fix_size; |
| 5428 | + } |
| 5429 | +} |
| 5430 | + |
| 5431 | +/* Print a symbolic form of X to the debug file, F. */ |
| 5432 | +static void |
| 5433 | +avr32_print_value (FILE * f, rtx x) |
| 5434 | +{ |
| 5435 | + switch (GET_CODE (x)) |
| 5436 | + { |
| 5437 | + case CONST_INT: |
| 5438 | + fprintf (f, "0x%x", (int) INTVAL (x)); |
| 5439 | + return; |
| 5440 | + |
| 5441 | + case CONST_DOUBLE: |
| 5442 | + fprintf (f, "<0x%lx,0x%lx>", (long) XWINT (x, 2), (long) XWINT (x, 3)); |
| 5443 | + return; |
| 5444 | + |
| 5445 | + case CONST_VECTOR: |
| 5446 | + { |
| 5447 | + int i; |
| 5448 | + |
| 5449 | + fprintf (f, "<"); |
| 5450 | + for (i = 0; i < CONST_VECTOR_NUNITS (x); i++) |
| 5451 | + { |
| 5452 | + fprintf (f, "0x%x", (int) INTVAL (CONST_VECTOR_ELT (x, i))); |
| 5453 | + if (i < (CONST_VECTOR_NUNITS (x) - 1)) |
| 5454 | + fputc (',', f); |
| 5455 | + } |
| 5456 | + fprintf (f, ">"); |
| 5457 | + } |
| 5458 | + return; |
| 5459 | + |
| 5460 | + case CONST_STRING: |
| 5461 | + fprintf (f, "\"%s\"", XSTR (x, 0)); |
| 5462 | + return; |
| 5463 | + |
| 5464 | + case SYMBOL_REF: |
| 5465 | + fprintf (f, "`%s'", XSTR (x, 0)); |
| 5466 | + return; |
| 5467 | + |
| 5468 | + case LABEL_REF: |
| 5469 | + fprintf (f, "L%d", INSN_UID (XEXP (x, 0))); |
| 5470 | + return; |
| 5471 | + |
| 5472 | + case CONST: |
| 5473 | + avr32_print_value (f, XEXP (x, 0)); |
| 5474 | + return; |
| 5475 | + |
| 5476 | + case PLUS: |
| 5477 | + avr32_print_value (f, XEXP (x, 0)); |
| 5478 | + fprintf (f, "+"); |
| 5479 | + avr32_print_value (f, XEXP (x, 1)); |
| 5480 | + return; |
| 5481 | + |
| 5482 | + case PC: |
| 5483 | + fprintf (f, "pc"); |
| 5484 | + return; |
| 5485 | + |
| 5486 | + default: |
| 5487 | + fprintf (f, "????"); |
| 5488 | + return; |
| 5489 | + } |
| 5490 | +} |
| 5491 | + |
| 5492 | +int |
| 5493 | +is_minipool_label (rtx label) |
| 5494 | +{ |
| 5495 | + minipool_labels *cur_mp_label = cfun->machine->minipool_label_head; |
| 5496 | + |
| 5497 | + if (GET_CODE (label) != CODE_LABEL) |
| 5498 | + return FALSE; |
| 5499 | + |
| 5500 | + while (cur_mp_label) |
| 5501 | + { |
| 5502 | + if (CODE_LABEL_NUMBER (label) |
| 5503 | + == CODE_LABEL_NUMBER (cur_mp_label->label)) |
| 5504 | + return TRUE; |
| 5505 | + cur_mp_label = cur_mp_label->next; |
| 5506 | + } |
| 5507 | + return FALSE; |
| 5508 | +} |
| 5509 | + |
| 5510 | +static void |
| 5511 | +new_minipool_label (rtx label) |
| 5512 | +{ |
| 5513 | + if (!cfun->machine->minipool_label_head) |
| 5514 | + { |
| 5515 | + cfun->machine->minipool_label_head = |
| 5516 | + ggc_alloc (sizeof (minipool_labels)); |
| 5517 | + cfun->machine->minipool_label_tail = cfun->machine->minipool_label_head; |
| 5518 | + cfun->machine->minipool_label_head->label = label; |
| 5519 | + cfun->machine->minipool_label_head->next = 0; |
| 5520 | + cfun->machine->minipool_label_head->prev = 0; |
| 5521 | + } |
| 5522 | + else |
| 5523 | + { |
| 5524 | + cfun->machine->minipool_label_tail->next = |
| 5525 | + ggc_alloc (sizeof (minipool_labels)); |
| 5526 | + cfun->machine->minipool_label_tail->next->label = label; |
| 5527 | + cfun->machine->minipool_label_tail->next->next = 0; |
| 5528 | + cfun->machine->minipool_label_tail->next->prev = |
| 5529 | + cfun->machine->minipool_label_tail; |
| 5530 | + cfun->machine->minipool_label_tail = |
| 5531 | + cfun->machine->minipool_label_tail->next; |
| 5532 | + } |
| 5533 | +} |
| 5534 | + |
| 5535 | +/* Output the literal table */ |
| 5536 | +static void |
| 5537 | +dump_minipool (rtx scan) |
| 5538 | +{ |
| 5539 | + Mnode *mp; |
| 5540 | + Mnode *nmp; |
| 5541 | + |
| 5542 | + if (dump_file) |
| 5543 | + fprintf (dump_file, |
| 5544 | + ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n", |
| 5545 | + INSN_UID (scan), (unsigned long) minipool_barrier->address, 4); |
| 5546 | + |
| 5547 | + scan = emit_insn_after (gen_consttable_start (), scan); |
| 5548 | + scan = emit_insn_after (gen_align_4 (), scan); |
| 5549 | + scan = emit_label_after (minipool_vector_label, scan); |
| 5550 | + new_minipool_label (minipool_vector_label); |
| 5551 | + |
| 5552 | + for (mp = minipool_vector_head; mp != NULL; mp = nmp) |
| 5553 | + { |
| 5554 | + if (mp->refcount > 0) |
| 5555 | + { |
| 5556 | + if (dump_file) |
| 5557 | + { |
| 5558 | + fprintf (dump_file, |
| 5559 | + ";; Offset %u, min %ld, max %ld ", |
| 5560 | + (unsigned) mp->offset, (unsigned long) mp->min_address, |
| 5561 | + (unsigned long) mp->max_address); |
| 5562 | + avr32_print_value (dump_file, mp->value); |
| 5563 | + fputc ('\n', dump_file); |
| 5564 | + } |
| 5565 | + |
| 5566 | + switch (mp->fix_size) |
| 5567 | + { |
| 5568 | +#ifdef HAVE_consttable_4 |
| 5569 | + case 4: |
| 5570 | + scan = emit_insn_after (gen_consttable_4 (mp->value), scan); |
| 5571 | + break; |
| 5572 | + |
| 5573 | +#endif |
| 5574 | +#ifdef HAVE_consttable_8 |
| 5575 | + case 8: |
| 5576 | + scan = emit_insn_after (gen_consttable_8 (mp->value), scan); |
| 5577 | + break; |
| 5578 | + |
| 5579 | +#endif |
| 5580 | + case 0: |
| 5581 | + /* This can happen for force-minipool entries which just are |
| 5582 | + there to force the minipool to be generate. */ |
| 5583 | + break; |
| 5584 | + default: |
| 5585 | + abort (); |
| 5586 | + break; |
| 5587 | + } |
| 5588 | + } |
| 5589 | + |
| 5590 | + nmp = mp->next; |
| 5591 | + free (mp); |
| 5592 | + } |
| 5593 | + |
| 5594 | + minipool_vector_head = minipool_vector_tail = NULL; |
| 5595 | + scan = emit_insn_after (gen_consttable_end (), scan); |
| 5596 | + scan = emit_barrier_after (scan); |
| 5597 | +} |
| 5598 | + |
| 5599 | +/* Return the cost of forcibly inserting a barrier after INSN. */ |
| 5600 | +static int |
| 5601 | +avr32_barrier_cost (rtx insn) |
| 5602 | +{ |
| 5603 | + /* Basing the location of the pool on the loop depth is preferable, but at |
| 5604 | + the moment, the basic block information seems to be corrupt by this |
| 5605 | + stage of the compilation. */ |
| 5606 | + int base_cost = 50; |
| 5607 | + rtx next = next_nonnote_insn (insn); |
| 5608 | + |
| 5609 | + if (next != NULL && GET_CODE (next) == CODE_LABEL) |
| 5610 | + base_cost -= 20; |
| 5611 | + |
| 5612 | + switch (GET_CODE (insn)) |
| 5613 | + { |
| 5614 | + case CODE_LABEL: |
| 5615 | + /* It will always be better to place the table before the label, rather |
| 5616 | + than after it. */ |
| 5617 | + return 50; |
| 5618 | + |
| 5619 | + case INSN: |
| 5620 | + case CALL_INSN: |
| 5621 | + return base_cost; |
| 5622 | + |
| 5623 | + case JUMP_INSN: |
| 5624 | + return base_cost - 10; |
| 5625 | + |
| 5626 | + default: |
| 5627 | + return base_cost + 10; |
| 5628 | + } |
| 5629 | +} |
| 5630 | + |
| 5631 | +/* Find the best place in the insn stream in the range |
| 5632 | + (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier. |
| 5633 | + Create the barrier by inserting a jump and add a new fix entry for |
| 5634 | + it. */ |
| 5635 | +static Mfix * |
| 5636 | +create_fix_barrier (Mfix * fix, HOST_WIDE_INT max_address) |
| 5637 | +{ |
| 5638 | + HOST_WIDE_INT count = 0; |
| 5639 | + rtx barrier; |
| 5640 | + rtx from = fix->insn; |
| 5641 | + rtx selected = from; |
| 5642 | + int selected_cost; |
| 5643 | + HOST_WIDE_INT selected_address; |
| 5644 | + Mfix *new_fix; |
| 5645 | + HOST_WIDE_INT max_count = max_address - fix->address; |
| 5646 | + rtx label = gen_label_rtx (); |
| 5647 | + |
| 5648 | + selected_cost = avr32_barrier_cost (from); |
| 5649 | + selected_address = fix->address; |
| 5650 | + |
| 5651 | + while (from && count < max_count) |
| 5652 | + { |
| 5653 | + rtx tmp; |
| 5654 | + int new_cost; |
| 5655 | + |
| 5656 | + /* This code shouldn't have been called if there was a natural barrier |
| 5657 | + within range. */ |
| 5658 | + if (GET_CODE (from) == BARRIER) |
| 5659 | + abort (); |
| 5660 | + |
| 5661 | + /* Count the length of this insn. */ |
| 5662 | + count += get_attr_length (from); |
| 5663 | + |
| 5664 | + /* If there is a jump table, add its length. */ |
| 5665 | + tmp = is_jump_table (from); |
| 5666 | + if (tmp != NULL) |
| 5667 | + { |
| 5668 | + count += get_jump_table_size (tmp); |
| 5669 | + |
| 5670 | + /* Jump tables aren't in a basic block, so base the cost on the |
| 5671 | + dispatch insn. If we select this location, we will still put |
| 5672 | + the pool after the table. */ |
| 5673 | + new_cost = avr32_barrier_cost (from); |
| 5674 | + |
| 5675 | + if (count < max_count && new_cost <= selected_cost) |
| 5676 | + { |
| 5677 | + selected = tmp; |
| 5678 | + selected_cost = new_cost; |
| 5679 | + selected_address = fix->address + count; |
| 5680 | + } |
| 5681 | + |
| 5682 | + /* Continue after the dispatch table. */ |
| 5683 | + from = NEXT_INSN (tmp); |
| 5684 | + continue; |
| 5685 | + } |
| 5686 | + |
| 5687 | + new_cost = avr32_barrier_cost (from); |
| 5688 | + |
| 5689 | + if (count < max_count && new_cost <= selected_cost) |
| 5690 | + { |
| 5691 | + selected = from; |
| 5692 | + selected_cost = new_cost; |
| 5693 | + selected_address = fix->address + count; |
| 5694 | + } |
| 5695 | + |
| 5696 | + from = NEXT_INSN (from); |
| 5697 | + } |
| 5698 | + |
| 5699 | + /* Create a new JUMP_INSN that branches around a barrier. */ |
| 5700 | + from = emit_jump_insn_after (gen_jump (label), selected); |
| 5701 | + JUMP_LABEL (from) = label; |
| 5702 | + barrier = emit_barrier_after (from); |
| 5703 | + emit_label_after (label, barrier); |
| 5704 | + |
| 5705 | + /* Create a minipool barrier entry for the new barrier. */ |
| 5706 | + new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*new_fix)); |
| 5707 | + new_fix->insn = barrier; |
| 5708 | + new_fix->address = selected_address; |
| 5709 | + new_fix->next = fix->next; |
| 5710 | + fix->next = new_fix; |
| 5711 | + |
| 5712 | + return new_fix; |
| 5713 | +} |
| 5714 | + |
| 5715 | +/* Record that there is a natural barrier in the insn stream at |
| 5716 | + ADDRESS. */ |
| 5717 | +static void |
| 5718 | +push_minipool_barrier (rtx insn, HOST_WIDE_INT address) |
| 5719 | +{ |
| 5720 | + Mfix *fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*fix)); |
| 5721 | + |
| 5722 | + fix->insn = insn; |
| 5723 | + fix->address = address; |
| 5724 | + |
| 5725 | + fix->next = NULL; |
| 5726 | + if (minipool_fix_head != NULL) |
| 5727 | + minipool_fix_tail->next = fix; |
| 5728 | + else |
| 5729 | + minipool_fix_head = fix; |
| 5730 | + |
| 5731 | + minipool_fix_tail = fix; |
| 5732 | +} |
| 5733 | + |
| 5734 | +/* Record INSN, which will need fixing up to load a value from the |
| 5735 | + minipool. ADDRESS is the offset of the insn since the start of the |
| 5736 | + function; LOC is a pointer to the part of the insn which requires |
| 5737 | + fixing; VALUE is the constant that must be loaded, which is of type |
| 5738 | + MODE. */ |
| 5739 | +static void |
| 5740 | +push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx * loc, |
| 5741 | + enum machine_mode mode, rtx value) |
| 5742 | +{ |
| 5743 | + Mfix *fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*fix)); |
| 5744 | + rtx body = PATTERN (insn); |
| 5745 | + |
| 5746 | + fix->insn = insn; |
| 5747 | + fix->address = address; |
| 5748 | + fix->loc = loc; |
| 5749 | + fix->mode = mode; |
| 5750 | + fix->fix_size = MINIPOOL_FIX_SIZE (mode, value); |
| 5751 | + fix->value = value; |
| 5752 | + |
| 5753 | + if (GET_CODE (body) == PARALLEL) |
| 5754 | + { |
| 5755 | + /* Mcall : Ks16 << 2 */ |
| 5756 | + fix->forwards = ((1 << 15) - 1) << 2; |
| 5757 | + fix->backwards = (1 << 15) << 2; |
| 5758 | + } |
| 5759 | + else if (GET_CODE (body) == SET |
| 5760 | + && GET_MODE_SIZE (GET_MODE (SET_DEST (body))) == 4) |
| 5761 | + { |
| 5762 | + /* Word Load */ |
| 5763 | + if (TARGET_HARD_FLOAT |
| 5764 | + && GET_MODE_CLASS (GET_MODE (SET_DEST (body))) == MODE_FLOAT) |
| 5765 | + { |
| 5766 | + /* Ldc0.w : Ku12 << 2 */ |
| 5767 | + fix->forwards = ((1 << 12) - 1) << 2; |
| 5768 | + fix->backwards = 0; |
| 5769 | + } |
| 5770 | + else |
| 5771 | + { |
| 5772 | + if (optimize_size) |
| 5773 | + { |
| 5774 | + /* Lddpc : Ku7 << 2 */ |
| 5775 | + fix->forwards = ((1 << 7) - 1) << 2; |
| 5776 | + fix->backwards = 0; |
| 5777 | + } |
| 5778 | + else |
| 5779 | + { |
| 5780 | + /* Ld.w : Ks16 */ |
| 5781 | + fix->forwards = ((1 << 15) - 4); |
| 5782 | + fix->backwards = (1 << 15); |
| 5783 | + } |
| 5784 | + } |
| 5785 | + } |
| 5786 | + else if (GET_CODE (body) == SET |
| 5787 | + && GET_MODE_SIZE (GET_MODE (SET_DEST (body))) == 8) |
| 5788 | + { |
| 5789 | + /* Double word load */ |
| 5790 | + if (TARGET_HARD_FLOAT |
| 5791 | + && GET_MODE_CLASS (GET_MODE (SET_DEST (body))) == MODE_FLOAT) |
| 5792 | + { |
| 5793 | + /* Ldc0.d : Ku12 << 2 */ |
| 5794 | + fix->forwards = ((1 << 12) - 1) << 2; |
| 5795 | + fix->backwards = 0; |
| 5796 | + } |
| 5797 | + else |
| 5798 | + { |
| 5799 | + /* Ld.d : Ks16 */ |
| 5800 | + fix->forwards = ((1 << 15) - 4); |
| 5801 | + fix->backwards = (1 << 15); |
| 5802 | + } |
| 5803 | + } |
| 5804 | + else if (GET_CODE (body) == UNSPEC_VOLATILE |
| 5805 | + && XINT (body, 1) == VUNSPEC_MVRC) |
| 5806 | + { |
| 5807 | + /* Coprocessor load */ |
| 5808 | + /* Ldc : Ku8 << 2 */ |
| 5809 | + fix->forwards = ((1 << 8) - 1) << 2; |
| 5810 | + fix->backwards = 0; |
| 5811 | + } |
| 5812 | + else |
| 5813 | + { |
| 5814 | + /* Assume worst case which is lddpc insn. */ |
| 5815 | + fix->forwards = ((1 << 7) - 1) << 2; |
| 5816 | + fix->backwards = 0; |
| 5817 | + } |
| 5818 | + |
| 5819 | + fix->minipool = NULL; |
| 5820 | + |
| 5821 | + /* If an insn doesn't have a range defined for it, then it isn't expecting |
| 5822 | + to be reworked by this code. Better to abort now than to generate duff |
| 5823 | + assembly code. */ |
| 5824 | + if (fix->forwards == 0 && fix->backwards == 0) |
| 5825 | + abort (); |
| 5826 | + |
| 5827 | + if (dump_file) |
| 5828 | + { |
| 5829 | + fprintf (dump_file, |
| 5830 | + ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ", |
| 5831 | + GET_MODE_NAME (mode), |
| 5832 | + INSN_UID (insn), (unsigned long) address, |
| 5833 | + -1 * (long) fix->backwards, (long) fix->forwards); |
| 5834 | + avr32_print_value (dump_file, fix->value); |
| 5835 | + fprintf (dump_file, "\n"); |
| 5836 | + } |
| 5837 | + |
| 5838 | + /* Add it to the chain of fixes. */ |
| 5839 | + fix->next = NULL; |
| 5840 | + |
| 5841 | + if (minipool_fix_head != NULL) |
| 5842 | + minipool_fix_tail->next = fix; |
| 5843 | + else |
| 5844 | + minipool_fix_head = fix; |
| 5845 | + |
| 5846 | + minipool_fix_tail = fix; |
| 5847 | +} |
| 5848 | + |
| 5849 | +/* Scan INSN and note any of its operands that need fixing. |
| 5850 | + If DO_PUSHES is false we do not actually push any of the fixups |
| 5851 | + needed. The function returns TRUE is any fixups were needed/pushed. |
| 5852 | + This is used by avr32_memory_load_p() which needs to know about loads |
| 5853 | + of constants that will be converted into minipool loads. */ |
| 5854 | +static bool |
| 5855 | +note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes) |
| 5856 | +{ |
| 5857 | + bool result = false; |
| 5858 | + int opno; |
| 5859 | + |
| 5860 | + extract_insn (insn); |
| 5861 | + |
| 5862 | + if (!constrain_operands (1)) |
| 5863 | + fatal_insn_not_found (insn); |
| 5864 | + |
| 5865 | + if (recog_data.n_alternatives == 0) |
| 5866 | + return false; |
| 5867 | + |
| 5868 | + /* Fill in recog_op_alt with information about the constraints of this |
| 5869 | + insn. */ |
| 5870 | + preprocess_constraints (); |
| 5871 | + |
| 5872 | + for (opno = 0; opno < recog_data.n_operands; opno++) |
| 5873 | + { |
| 5874 | + rtx op; |
| 5875 | + |
| 5876 | + /* Things we need to fix can only occur in inputs. */ |
| 5877 | + if (recog_data.operand_type[opno] != OP_IN) |
| 5878 | + continue; |
| 5879 | + |
| 5880 | + op = recog_data.operand[opno]; |
| 5881 | + |
| 5882 | + if (avr32_const_pool_ref_operand (op, GET_MODE (op))) |
| 5883 | + { |
| 5884 | + if (do_pushes) |
| 5885 | + { |
| 5886 | + rtx cop = avoid_constant_pool_reference (op); |
| 5887 | + |
| 5888 | + /* Casting the address of something to a mode narrower than a |
| 5889 | + word can cause avoid_constant_pool_reference() to return the |
| 5890 | + pool reference itself. That's no good to us here. Lets |
| 5891 | + just hope that we can use the constant pool value directly. |
| 5892 | + */ |
| 5893 | + if (op == cop) |
| 5894 | + cop = get_pool_constant (XEXP (op, 0)); |
| 5895 | + |
| 5896 | + push_minipool_fix (insn, address, |
| 5897 | + recog_data.operand_loc[opno], |
| 5898 | + recog_data.operand_mode[opno], cop); |
| 5899 | + } |
| 5900 | + |
| 5901 | + result = true; |
| 5902 | + } |
| 5903 | + else if (TARGET_HAS_ASM_ADDR_PSEUDOS |
| 5904 | + && avr32_address_operand (op, GET_MODE (op))) |
| 5905 | + { |
| 5906 | + /* Handle pseudo instructions using a direct address. These pseudo |
| 5907 | + instructions might need entries in the constant pool and we must |
| 5908 | + therefor create a constant pool for them, in case the |
| 5909 | + assembler/linker needs to insert entries. */ |
| 5910 | + if (do_pushes) |
| 5911 | + { |
| 5912 | + /* Push a dummy constant pool entry so that the .cpool |
| 5913 | + directive should be inserted on the appropriate place in the |
| 5914 | + code even if there are no real constant pool entries. This |
| 5915 | + is used by the assembler and linker to know where to put |
| 5916 | + generated constant pool entries. */ |
| 5917 | + push_minipool_fix (insn, address, |
| 5918 | + recog_data.operand_loc[opno], |
| 5919 | + recog_data.operand_mode[opno], |
| 5920 | + gen_rtx_UNSPEC (VOIDmode, |
| 5921 | + gen_rtvec (1, const0_rtx), |
| 5922 | + UNSPEC_FORCE_MINIPOOL)); |
| 5923 | + result = true; |
| 5924 | + } |
| 5925 | + } |
| 5926 | + } |
| 5927 | + return result; |
| 5928 | +} |
| 5929 | + |
| 5930 | + |
| 5931 | +static int |
| 5932 | +avr32_insn_is_cast (rtx insn) |
| 5933 | +{ |
| 5934 | + |
| 5935 | + if (NONJUMP_INSN_P (insn) |
| 5936 | + && GET_CODE (PATTERN (insn)) == SET |
| 5937 | + && (GET_CODE (SET_SRC (PATTERN (insn))) == ZERO_EXTEND |
| 5938 | + || GET_CODE (SET_SRC (PATTERN (insn))) == SIGN_EXTEND) |
| 5939 | + && REG_P (XEXP (SET_SRC (PATTERN (insn)), 0)) |
| 5940 | + && REG_P (SET_DEST (PATTERN (insn)))) |
| 5941 | + return true; |
| 5942 | + return false; |
| 5943 | +} |
| 5944 | + |
| 5945 | +/* FIXME: The level of nesting in this function is way too deep. It needs to be |
| 5946 | + torn apart. */ |
| 5947 | +static void |
| 5948 | +avr32_reorg_optimization (void) |
| 5949 | +{ |
| 5950 | + rtx first = get_insns (); |
| 5951 | + rtx insn; |
| 5952 | + |
| 5953 | + if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0))) |
| 5954 | + { |
| 5955 | + |
| 5956 | + /* Scan through all insns looking for cast operations. */ |
| 5957 | + if (dump_file) |
| 5958 | + { |
| 5959 | + fprintf (dump_file, ";; Deleting redundant cast operations:\n"); |
| 5960 | + } |
| 5961 | + for (insn = first; insn; insn = NEXT_INSN (insn)) |
| 5962 | + { |
| 5963 | + rtx reg, src_reg, scan; |
| 5964 | + enum machine_mode mode; |
| 5965 | + int unused_cast; |
| 5966 | + rtx label_ref; |
| 5967 | + |
| 5968 | + if (avr32_insn_is_cast (insn) |
| 5969 | + && (GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == QImode |
| 5970 | + || GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == HImode)) |
| 5971 | + { |
| 5972 | + mode = GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0)); |
| 5973 | + reg = SET_DEST (PATTERN (insn)); |
| 5974 | + src_reg = XEXP (SET_SRC (PATTERN (insn)), 0); |
| 5975 | + } |
| 5976 | + else |
| 5977 | + { |
| 5978 | + continue; |
| 5979 | + } |
| 5980 | + |
| 5981 | + unused_cast = false; |
| 5982 | + label_ref = NULL_RTX; |
| 5983 | + for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan)) |
| 5984 | + { |
| 5985 | + /* Check if we have reached the destination of a simple |
| 5986 | + conditional jump which we have already scanned past. If so, |
| 5987 | + we can safely continue scanning. */ |
| 5988 | + if (LABEL_P (scan) && label_ref != NULL_RTX) |
| 5989 | + { |
| 5990 | + if (CODE_LABEL_NUMBER (scan) == |
| 5991 | + CODE_LABEL_NUMBER (XEXP (label_ref, 0))) |
| 5992 | + label_ref = NULL_RTX; |
| 5993 | + else |
| 5994 | + break; |
| 5995 | + } |
| 5996 | + |
| 5997 | + if (!INSN_P (scan)) |
| 5998 | + continue; |
| 5999 | + |
| 6000 | + /* For conditional jumps we can manage to keep on scanning if |
| 6001 | + we meet the destination label later on before any new jump |
| 6002 | + insns occure. */ |
| 6003 | + if (GET_CODE (scan) == JUMP_INSN) |
| 6004 | + { |
| 6005 | + if (any_condjump_p (scan) && label_ref == NULL_RTX) |
| 6006 | + label_ref = condjump_label (scan); |
| 6007 | + else |
| 6008 | + break; |
| 6009 | + } |
| 6010 | + |
| 6011 | + if (!reg_mentioned_p (reg, PATTERN (scan))) |
| 6012 | + continue; |
| 6013 | + |
| 6014 | + /* Check if casted register is used in this insn */ |
| 6015 | + if ((regno_use_in (REGNO (reg), PATTERN (scan)) != NULL_RTX) |
| 6016 | + && (GET_MODE (regno_use_in (REGNO (reg), PATTERN (scan))) == |
| 6017 | + GET_MODE (reg))) |
| 6018 | + { |
| 6019 | + /* If not used in the source to the set or in a memory |
| 6020 | + expression in the destiantion then the register is used |
| 6021 | + as a destination and is really dead. */ |
| 6022 | + if (single_set (scan) |
| 6023 | + && GET_CODE (PATTERN (scan)) == SET |
| 6024 | + && REG_P (SET_DEST (PATTERN (scan))) |
| 6025 | + && !regno_use_in (REGNO (reg), SET_SRC (PATTERN (scan))) |
| 6026 | + && label_ref == NULL_RTX) |
| 6027 | + { |
| 6028 | + unused_cast = true; |
| 6029 | + } |
| 6030 | + break; |
| 6031 | + } |
| 6032 | + |
| 6033 | + /* Check if register is dead or set in this insn */ |
| 6034 | + if (dead_or_set_p (scan, reg)) |
| 6035 | + { |
| 6036 | + unused_cast = true; |
| 6037 | + break; |
| 6038 | + } |
| 6039 | + } |
| 6040 | + |
| 6041 | + /* Check if we have unresolved conditional jumps */ |
| 6042 | + if (label_ref != NULL_RTX) |
| 6043 | + continue; |
| 6044 | + |
| 6045 | + if (unused_cast) |
| 6046 | + { |
| 6047 | + if (REGNO (reg) == REGNO (XEXP (SET_SRC (PATTERN (insn)), 0))) |
| 6048 | + { |
| 6049 | + /* One operand cast, safe to delete */ |
| 6050 | + if (dump_file) |
| 6051 | + { |
| 6052 | + fprintf (dump_file, |
| 6053 | + ";; INSN %i removed, casted register %i value not used.\n", |
| 6054 | + INSN_UID (insn), REGNO (reg)); |
| 6055 | + } |
| 6056 | + SET_INSN_DELETED (insn); |
| 6057 | + /* Force the instruction to be recognized again */ |
| 6058 | + INSN_CODE (insn) = -1; |
| 6059 | + } |
| 6060 | + else |
| 6061 | + { |
| 6062 | + /* Two operand cast, which really could be substituted with |
| 6063 | + a move, if the source register is dead after the cast |
| 6064 | + insn and then the insn which sets the source register |
| 6065 | + could instead directly set the destination register for |
| 6066 | + the cast. As long as there are no insns in between which |
| 6067 | + uses the register. */ |
| 6068 | + rtx link = NULL_RTX; |
| 6069 | + rtx set; |
| 6070 | + rtx src_reg = XEXP (SET_SRC (PATTERN (insn)), 0); |
| 6071 | + unused_cast = false; |
| 6072 | + |
| 6073 | + if (!find_reg_note (insn, REG_DEAD, src_reg)) |
| 6074 | + continue; |
| 6075 | + |
| 6076 | + /* Search for the insn which sets the source register */ |
| 6077 | + for (link = LOG_LINKS (insn); link; link = XEXP (link, 1)) |
| 6078 | + { |
| 6079 | + if (REG_NOTE_KIND (link) != 0) |
| 6080 | + continue; |
| 6081 | + set = single_set (XEXP (link, 0)); |
| 6082 | + if (set && rtx_equal_p (src_reg, SET_DEST (set))) |
| 6083 | + { |
| 6084 | + link = XEXP (link, 0); |
| 6085 | + break; |
| 6086 | + } |
| 6087 | + } |
| 6088 | + |
| 6089 | + /* Found no link or link is a call insn where we can not |
| 6090 | + change the destination register */ |
| 6091 | + if (link == NULL_RTX || CALL_P (link)) |
| 6092 | + continue; |
| 6093 | + |
| 6094 | + /* Scan through all insn between link and insn */ |
| 6095 | + for (scan = NEXT_INSN (link); scan; scan = NEXT_INSN (scan)) |
| 6096 | + { |
| 6097 | + /* Don't try to trace forward past a CODE_LABEL if we |
| 6098 | + haven't seen INSN yet. Ordinarily, we will only |
| 6099 | + find the setting insn in LOG_LINKS if it is in the |
| 6100 | + same basic block. However, cross-jumping can insert |
| 6101 | + code labels in between the load and the call, and |
| 6102 | + can result in situations where a single call insn |
| 6103 | + may have two targets depending on where we came |
| 6104 | + from. */ |
| 6105 | + |
| 6106 | + if (GET_CODE (scan) == CODE_LABEL) |
| 6107 | + break; |
| 6108 | + |
| 6109 | + if (!INSN_P (scan)) |
| 6110 | + continue; |
| 6111 | + |
| 6112 | + /* Don't try to trace forward past a JUMP. To optimize |
| 6113 | + safely, we would have to check that all the |
| 6114 | + instructions at the jump destination did not use REG. |
| 6115 | + */ |
| 6116 | + |
| 6117 | + if (GET_CODE (scan) == JUMP_INSN) |
| 6118 | + { |
| 6119 | + break; |
| 6120 | + } |
| 6121 | + |
| 6122 | + if (!reg_mentioned_p (src_reg, PATTERN (scan))) |
| 6123 | + continue; |
| 6124 | + |
| 6125 | + /* We have reached the cast insn */ |
| 6126 | + if (scan == insn) |
| 6127 | + { |
| 6128 | + /* We can remove cast and replace the destination |
| 6129 | + register of the link insn with the destination |
| 6130 | + of the cast */ |
| 6131 | + if (dump_file) |
| 6132 | + { |
| 6133 | + fprintf (dump_file, |
| 6134 | + ";; INSN %i removed, casted value unused. " |
| 6135 | + "Destination of removed cast operation: register %i, folded into INSN %i.\n", |
| 6136 | + INSN_UID (insn), REGNO (reg), |
| 6137 | + INSN_UID (link)); |
| 6138 | + } |
| 6139 | + /* Update link insn */ |
| 6140 | + SET_DEST (PATTERN (link)) = |
| 6141 | + gen_rtx_REG (mode, REGNO (reg)); |
| 6142 | + /* Force the instruction to be recognized again */ |
| 6143 | + INSN_CODE (link) = -1; |
| 6144 | + |
| 6145 | + /* Delete insn */ |
| 6146 | + SET_INSN_DELETED (insn); |
| 6147 | + /* Force the instruction to be recognized again */ |
| 6148 | + INSN_CODE (insn) = -1; |
| 6149 | + break; |
| 6150 | + } |
| 6151 | + } |
| 6152 | + } |
| 6153 | + } |
| 6154 | + } |
| 6155 | + } |
| 6156 | + |
| 6157 | + if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0))) |
| 6158 | + { |
| 6159 | + |
| 6160 | + /* Scan through all insns looking for shifted add operations */ |
| 6161 | + if (dump_file) |
| 6162 | + { |
| 6163 | + fprintf (dump_file, |
| 6164 | + ";; Deleting redundant shifted add operations:\n"); |
| 6165 | + } |
| 6166 | + for (insn = first; insn; insn = NEXT_INSN (insn)) |
| 6167 | + { |
| 6168 | + rtx reg, mem_expr, scan, op0, op1; |
| 6169 | + int add_only_used_as_pointer; |
| 6170 | + |
| 6171 | + if (INSN_P (insn) |
| 6172 | + && GET_CODE (PATTERN (insn)) == SET |
| 6173 | + && GET_CODE (SET_SRC (PATTERN (insn))) == PLUS |
| 6174 | + && (GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == MULT |
| 6175 | + || GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == ASHIFT) |
| 6176 | + && GET_CODE (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 1)) == |
| 6177 | + CONST_INT && REG_P (SET_DEST (PATTERN (insn))) |
| 6178 | + && REG_P (XEXP (SET_SRC (PATTERN (insn)), 1)) |
| 6179 | + && REG_P (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 0))) |
| 6180 | + { |
| 6181 | + reg = SET_DEST (PATTERN (insn)); |
| 6182 | + mem_expr = SET_SRC (PATTERN (insn)); |
| 6183 | + op0 = XEXP (XEXP (mem_expr, 0), 0); |
| 6184 | + op1 = XEXP (mem_expr, 1); |
| 6185 | + } |
| 6186 | + else |
| 6187 | + { |
| 6188 | + continue; |
| 6189 | + } |
| 6190 | + |
| 6191 | + /* Scan forward the check if the result of the shifted add |
| 6192 | + operation is only used as an address in memory operations and |
| 6193 | + that the operands to the shifted add are not clobbered. */ |
| 6194 | + add_only_used_as_pointer = false; |
| 6195 | + for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan)) |
| 6196 | + { |
| 6197 | + if (!INSN_P (scan)) |
| 6198 | + continue; |
| 6199 | + |
| 6200 | + /* Don't try to trace forward past a JUMP or CALL. To optimize |
| 6201 | + safely, we would have to check that all the instructions at |
| 6202 | + the jump destination did not use REG. */ |
| 6203 | + |
| 6204 | + if (GET_CODE (scan) == JUMP_INSN) |
| 6205 | + { |
| 6206 | + break; |
| 6207 | + } |
| 6208 | + |
| 6209 | + /* If used in a call insn then we cannot optimize it away */ |
| 6210 | + if (CALL_P (scan) && find_regno_fusage (scan, USE, REGNO (reg))) |
| 6211 | + break; |
| 6212 | + |
| 6213 | + /* If any of the operands of the shifted add are clobbered we |
| 6214 | + cannot optimize the shifted adda away */ |
| 6215 | + if ((reg_set_p (op0, scan) && (REGNO (op0) != REGNO (reg))) |
| 6216 | + || (reg_set_p (op1, scan) && (REGNO (op1) != REGNO (reg)))) |
| 6217 | + break; |
| 6218 | + |
| 6219 | + if (!reg_mentioned_p (reg, PATTERN (scan))) |
| 6220 | + continue; |
| 6221 | + |
| 6222 | + /* If used any other place than as a pointer or as the |
| 6223 | + destination register we failed */ |
| 6224 | + if (!(single_set (scan) |
| 6225 | + && GET_CODE (PATTERN (scan)) == SET |
| 6226 | + && ((MEM_P (SET_DEST (PATTERN (scan))) |
| 6227 | + && REG_P (XEXP (SET_DEST (PATTERN (scan)), 0)) |
| 6228 | + && REGNO (XEXP (SET_DEST (PATTERN (scan)), 0)) == |
| 6229 | + REGNO (reg)) || (MEM_P (SET_SRC (PATTERN (scan))) |
| 6230 | + && |
| 6231 | + REG_P (XEXP |
| 6232 | + (SET_SRC (PATTERN (scan)), |
| 6233 | + 0)) |
| 6234 | + && |
| 6235 | + REGNO (XEXP |
| 6236 | + (SET_SRC (PATTERN (scan)), |
| 6237 | + 0)) == REGNO (reg)))) |
| 6238 | + && !(GET_CODE (PATTERN (scan)) == SET |
| 6239 | + && REG_P (SET_DEST (PATTERN (scan))) |
| 6240 | + && !regno_use_in (REGNO (reg), |
| 6241 | + SET_SRC (PATTERN (scan))))) |
| 6242 | + break; |
| 6243 | + |
| 6244 | + /* Check if register is dead or set in this insn */ |
| 6245 | + if (dead_or_set_p (scan, reg)) |
| 6246 | + { |
| 6247 | + add_only_used_as_pointer = true; |
| 6248 | + break; |
| 6249 | + } |
| 6250 | + } |
| 6251 | + |
| 6252 | + if (add_only_used_as_pointer) |
| 6253 | + { |
| 6254 | + /* Lets delete the add insn and replace all memory references |
| 6255 | + which uses the pointer with the full expression. */ |
| 6256 | + if (dump_file) |
| 6257 | + { |
| 6258 | + fprintf (dump_file, |
| 6259 | + ";; Deleting INSN %i since address expression can be folded into all " |
| 6260 | + "memory references using this expression\n", |
| 6261 | + INSN_UID (insn)); |
| 6262 | + } |
| 6263 | + SET_INSN_DELETED (insn); |
| 6264 | + /* Force the instruction to be recognized again */ |
| 6265 | + INSN_CODE (insn) = -1; |
| 6266 | + |
| 6267 | + for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan)) |
| 6268 | + { |
| 6269 | + if (!INSN_P (scan)) |
| 6270 | + continue; |
| 6271 | + |
| 6272 | + if (!reg_mentioned_p (reg, PATTERN (scan))) |
| 6273 | + continue; |
| 6274 | + |
| 6275 | + /* If used any other place than as a pointer or as the |
| 6276 | + destination register we failed */ |
| 6277 | + if ((single_set (scan) |
| 6278 | + && GET_CODE (PATTERN (scan)) == SET |
| 6279 | + && ((MEM_P (SET_DEST (PATTERN (scan))) |
| 6280 | + && REG_P (XEXP (SET_DEST (PATTERN (scan)), 0)) |
| 6281 | + && REGNO (XEXP (SET_DEST (PATTERN (scan)), 0)) == |
| 6282 | + REGNO (reg)) || (MEM_P (SET_SRC (PATTERN (scan))) |
| 6283 | + && |
| 6284 | + REG_P (XEXP |
| 6285 | + (SET_SRC (PATTERN (scan)), |
| 6286 | + 0)) |
| 6287 | + && |
| 6288 | + REGNO (XEXP |
| 6289 | + (SET_SRC (PATTERN (scan)), |
| 6290 | + 0)) == REGNO (reg))))) |
| 6291 | + { |
| 6292 | + if (dump_file) |
| 6293 | + { |
| 6294 | + fprintf (dump_file, |
| 6295 | + ";; Register %i replaced by indexed address in INSN %i\n", |
| 6296 | + REGNO (reg), INSN_UID (scan)); |
| 6297 | + } |
| 6298 | + if (MEM_P (SET_DEST (PATTERN (scan)))) |
| 6299 | + XEXP (SET_DEST (PATTERN (scan)), 0) = mem_expr; |
| 6300 | + else |
| 6301 | + XEXP (SET_SRC (PATTERN (scan)), 0) = mem_expr; |
| 6302 | + } |
| 6303 | + |
| 6304 | + /* Check if register is dead or set in this insn */ |
| 6305 | + if (dead_or_set_p (scan, reg)) |
| 6306 | + { |
| 6307 | + break; |
| 6308 | + } |
| 6309 | + |
| 6310 | + } |
| 6311 | + } |
| 6312 | + } |
| 6313 | + } |
| 6314 | +} |
| 6315 | + |
| 6316 | +/* Exported to toplev.c. |
| 6317 | + |
| 6318 | + Do a final pass over the function, just before delayed branch |
| 6319 | + scheduling. */ |
| 6320 | + |
| 6321 | +static void |
| 6322 | +avr32_reorg (void) |
| 6323 | +{ |
| 6324 | + rtx insn; |
| 6325 | + HOST_WIDE_INT address = 0; |
| 6326 | + Mfix *fix; |
| 6327 | + |
| 6328 | + minipool_fix_head = minipool_fix_tail = NULL; |
| 6329 | + |
| 6330 | + /* The first insn must always be a note, or the code below won't scan it |
| 6331 | + properly. */ |
| 6332 | + insn = get_insns (); |
| 6333 | + if (GET_CODE (insn) != NOTE) |
| 6334 | + abort (); |
| 6335 | + |
| 6336 | + /* Scan all the insns and record the operands that will need fixing. */ |
| 6337 | + for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn)) |
| 6338 | + { |
| 6339 | + if (GET_CODE (insn) == BARRIER) |
| 6340 | + push_minipool_barrier (insn, address); |
| 6341 | + else if (INSN_P (insn)) |
| 6342 | + { |
| 6343 | + rtx table; |
| 6344 | + |
| 6345 | + note_invalid_constants (insn, address, true); |
| 6346 | + address += get_attr_length (insn); |
| 6347 | + |
| 6348 | + /* If the insn is a vector jump, add the size of the table and skip |
| 6349 | + the table. */ |
| 6350 | + if ((table = is_jump_table (insn)) != NULL) |
| 6351 | + { |
| 6352 | + address += get_jump_table_size (table); |
| 6353 | + insn = table; |
| 6354 | + } |
| 6355 | + } |
| 6356 | + } |
| 6357 | + |
| 6358 | + fix = minipool_fix_head; |
| 6359 | + |
| 6360 | + /* Now scan the fixups and perform the required changes. */ |
| 6361 | + while (fix) |
| 6362 | + { |
| 6363 | + Mfix *ftmp; |
| 6364 | + Mfix *fdel; |
| 6365 | + Mfix *last_added_fix; |
| 6366 | + Mfix *last_barrier = NULL; |
| 6367 | + Mfix *this_fix; |
| 6368 | + |
| 6369 | + /* Skip any further barriers before the next fix. */ |
| 6370 | + while (fix && GET_CODE (fix->insn) == BARRIER) |
| 6371 | + fix = fix->next; |
| 6372 | + |
| 6373 | + /* No more fixes. */ |
| 6374 | + if (fix == NULL) |
| 6375 | + break; |
| 6376 | + |
| 6377 | + last_added_fix = NULL; |
| 6378 | + |
| 6379 | + for (ftmp = fix; ftmp; ftmp = ftmp->next) |
| 6380 | + { |
| 6381 | + if (GET_CODE (ftmp->insn) == BARRIER) |
| 6382 | + { |
| 6383 | + if (ftmp->address >= minipool_vector_head->max_address) |
| 6384 | + break; |
| 6385 | + |
| 6386 | + last_barrier = ftmp; |
| 6387 | + } |
| 6388 | + else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL) |
| 6389 | + break; |
| 6390 | + |
| 6391 | + last_added_fix = ftmp; /* Keep track of the last fix added. |
| 6392 | + */ |
| 6393 | + } |
| 6394 | + |
| 6395 | + /* If we found a barrier, drop back to that; any fixes that we could |
| 6396 | + have reached but come after the barrier will now go in the next |
| 6397 | + mini-pool. */ |
| 6398 | + if (last_barrier != NULL) |
| 6399 | + { |
| 6400 | + /* Reduce the refcount for those fixes that won't go into this pool |
| 6401 | + after all. */ |
| 6402 | + for (fdel = last_barrier->next; |
| 6403 | + fdel && fdel != ftmp; fdel = fdel->next) |
| 6404 | + { |
| 6405 | + fdel->minipool->refcount--; |
| 6406 | + fdel->minipool = NULL; |
| 6407 | + } |
| 6408 | + |
| 6409 | + ftmp = last_barrier; |
| 6410 | + } |
| 6411 | + else |
| 6412 | + { |
| 6413 | + /* ftmp is first fix that we can't fit into this pool and there no |
| 6414 | + natural barriers that we could use. Insert a new barrier in the |
| 6415 | + code somewhere between the previous fix and this one, and |
| 6416 | + arrange to jump around it. */ |
| 6417 | + HOST_WIDE_INT max_address; |
| 6418 | + |
| 6419 | + /* The last item on the list of fixes must be a barrier, so we can |
| 6420 | + never run off the end of the list of fixes without last_barrier |
| 6421 | + being set. */ |
| 6422 | + if (ftmp == NULL) |
| 6423 | + abort (); |
| 6424 | + |
| 6425 | + max_address = minipool_vector_head->max_address; |
| 6426 | + /* Check that there isn't another fix that is in range that we |
| 6427 | + couldn't fit into this pool because the pool was already too |
| 6428 | + large: we need to put the pool before such an instruction. */ |
| 6429 | + if (ftmp->address < max_address) |
| 6430 | + max_address = ftmp->address; |
| 6431 | + |
| 6432 | + last_barrier = create_fix_barrier (last_added_fix, max_address); |
| 6433 | + } |
| 6434 | + |
| 6435 | + assign_minipool_offsets (last_barrier); |
| 6436 | + |
| 6437 | + while (ftmp) |
| 6438 | + { |
| 6439 | + if (GET_CODE (ftmp->insn) != BARRIER |
| 6440 | + && ((ftmp->minipool = add_minipool_backward_ref (ftmp)) |
| 6441 | + == NULL)) |
| 6442 | + break; |
| 6443 | + |
| 6444 | + ftmp = ftmp->next; |
| 6445 | + } |
| 6446 | + |
| 6447 | + /* Scan over the fixes we have identified for this pool, fixing them up |
| 6448 | + and adding the constants to the pool itself. */ |
| 6449 | + for (this_fix = fix; this_fix && ftmp != this_fix; |
| 6450 | + this_fix = this_fix->next) |
| 6451 | + if (GET_CODE (this_fix->insn) != BARRIER |
| 6452 | + /* Do nothing for entries present just to force the insertion of |
| 6453 | + a minipool. */ |
| 6454 | + && !IS_FORCE_MINIPOOL (this_fix->value)) |
| 6455 | + { |
| 6456 | + rtx addr = plus_constant (gen_rtx_LABEL_REF (VOIDmode, |
| 6457 | + minipool_vector_label), |
| 6458 | + this_fix->minipool->offset); |
| 6459 | + *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr); |
| 6460 | + } |
| 6461 | + |
| 6462 | + dump_minipool (last_barrier->insn); |
| 6463 | + fix = ftmp; |
| 6464 | + } |
| 6465 | + |
| 6466 | + /* Free the minipool memory. */ |
| 6467 | + obstack_free (&minipool_obstack, minipool_startobj); |
| 6468 | + |
| 6469 | + avr32_reorg_optimization (); |
| 6470 | +} |
| 6471 | + |
| 6472 | + |
| 6473 | +/* |
| 6474 | + Hook for doing some final scanning of instructions. Does nothing yet...*/ |
| 6475 | +void |
| 6476 | +avr32_final_prescan_insn (rtx insn ATTRIBUTE_UNUSED, |
| 6477 | + rtx * opvec ATTRIBUTE_UNUSED, |
| 6478 | + int noperands ATTRIBUTE_UNUSED) |
| 6479 | +{ |
| 6480 | + return; |
| 6481 | +} |
| 6482 | + |
| 6483 | + |
| 6484 | + |
| 6485 | +int |
| 6486 | +avr32_expand_movcc (enum machine_mode mode, rtx operands[]) |
| 6487 | +{ |
| 6488 | + rtx operator; |
| 6489 | + rtx compare_op0 = avr32_compare_op0; |
| 6490 | + rtx compare_op1 = avr32_compare_op1; |
| 6491 | + |
| 6492 | + /* Only allow certain compare operations */ |
| 6493 | + if (GET_MODE (compare_op0) != DImode |
| 6494 | + && GET_MODE (compare_op0) != SImode |
| 6495 | + && GET_MODE (compare_op0) != HImode && GET_MODE (compare_op0) != QImode) |
| 6496 | + return FALSE; |
| 6497 | + |
| 6498 | + if (GET_CODE (compare_op0) == MEM) |
| 6499 | + { |
| 6500 | + if (no_new_pseudos) |
| 6501 | + return FALSE; |
| 6502 | + else |
| 6503 | + compare_op0 = force_reg (GET_MODE (compare_op0), compare_op0); |
| 6504 | + } |
| 6505 | + |
| 6506 | + if (GET_CODE (compare_op1) == MEM) |
| 6507 | + { |
| 6508 | + if (no_new_pseudos) |
| 6509 | + return FALSE; |
| 6510 | + else |
| 6511 | + compare_op1 = force_reg (GET_MODE (compare_op1), compare_op1); |
| 6512 | + } |
| 6513 | + |
| 6514 | + /* For DI, HI and QI mode force comparison operands to registers */ |
| 6515 | + if (GET_MODE (compare_op0) == DImode |
| 6516 | + || GET_MODE (compare_op0) == HImode || GET_MODE (compare_op0) == QImode) |
| 6517 | + { |
| 6518 | + if (GET_CODE (compare_op0) != REG) |
| 6519 | + { |
| 6520 | + if (no_new_pseudos) |
| 6521 | + return FALSE; |
| 6522 | + else |
| 6523 | + compare_op0 = force_reg (GET_MODE (compare_op0), compare_op0); |
| 6524 | + } |
| 6525 | + |
| 6526 | + if (GET_CODE (compare_op1) != REG) |
| 6527 | + { |
| 6528 | + if (no_new_pseudos) |
| 6529 | + return FALSE; |
| 6530 | + else |
| 6531 | + compare_op1 = force_reg (GET_MODE (compare_op0), compare_op1); |
| 6532 | + } |
| 6533 | + } |
| 6534 | + |
| 6535 | + /* Force any immediate compare operands for SI, larger than the L |
| 6536 | + constraint, to a register */ |
| 6537 | + if (GET_MODE (compare_op0) == SImode) |
| 6538 | + { |
| 6539 | + if ((GET_CODE (compare_op0) == CONST_INT |
| 6540 | + && !avr32_const_ok_for_constraint_p (INTVAL (compare_op0), 'K', |
| 6541 | + "Ks21"))) |
| 6542 | + { |
| 6543 | + if (no_new_pseudos) |
| 6544 | + return FALSE; |
| 6545 | + else |
| 6546 | + compare_op0 = force_reg (SImode, compare_op0); |
| 6547 | + } |
| 6548 | + |
| 6549 | + if ((GET_CODE (compare_op1) == CONST_INT |
| 6550 | + && !avr32_const_ok_for_constraint_p (INTVAL (compare_op1), 'K', |
| 6551 | + "Ks21"))) |
| 6552 | + { |
| 6553 | + if (no_new_pseudos) |
| 6554 | + return FALSE; |
| 6555 | + else |
| 6556 | + compare_op1 = force_reg (SImode, compare_op1); |
| 6557 | + } |
| 6558 | + } |
| 6559 | + |
| 6560 | + /* If we have immediates larger than can be allowed in conditional mov |
| 6561 | + instructions, force them to registers */ |
| 6562 | + if (GET_CODE (operands[2]) == CONST_INT |
| 6563 | + && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks08")) |
| 6564 | + { |
| 6565 | + if (no_new_pseudos) |
| 6566 | + return FALSE; |
| 6567 | + else |
| 6568 | + operands[2] = force_reg (mode, operands[2]); |
| 6569 | + } |
| 6570 | + |
| 6571 | + if (GET_CODE (operands[3]) == CONST_INT |
| 6572 | + && !avr32_const_ok_for_constraint_p (INTVAL (operands[3]), 'K', "Ks08")) |
| 6573 | + { |
| 6574 | + if (no_new_pseudos) |
| 6575 | + return FALSE; |
| 6576 | + else |
| 6577 | + operands[3] = force_reg (mode, operands[3]); |
| 6578 | + } |
| 6579 | + |
| 6580 | + /* Emit the actual instruction */ |
| 6581 | + operator = gen_rtx_EQ (VOIDmode, const0_rtx, const0_rtx); |
| 6582 | + PUT_CODE (operator, GET_CODE (operands[1])); |
| 6583 | + switch (mode) |
| 6584 | + { |
| 6585 | + case SImode: |
| 6586 | + switch (GET_MODE (compare_op0)) |
| 6587 | + { |
| 6588 | + case SImode: |
| 6589 | + emit_insn (gen_movsicc_cmpsi |
| 6590 | + (operands[0], operator, operands[2], operands[3], |
| 6591 | + compare_op0, compare_op1)); |
| 6592 | + break; |
| 6593 | + case DImode: |
| 6594 | + emit_insn (gen_movsicc_cmpdi |
| 6595 | + (operands[0], operator, operands[2], operands[3], |
| 6596 | + compare_op0, compare_op1)); |
| 6597 | + break; |
| 6598 | + case HImode: |
| 6599 | + emit_insn (gen_movsicc_cmphi |
| 6600 | + (operands[0], operator, operands[2], operands[3], |
| 6601 | + compare_op0, compare_op1)); |
| 6602 | + break; |
| 6603 | + case QImode: |
| 6604 | + emit_insn (gen_movsicc_cmpqi |
| 6605 | + (operands[0], operator, operands[2], operands[3], |
| 6606 | + compare_op0, compare_op1)); |
| 6607 | + break; |
| 6608 | + default: |
| 6609 | + return FALSE; |
| 6610 | + } |
| 6611 | + break; |
| 6612 | + case HImode: |
| 6613 | + switch (GET_MODE (compare_op0)) |
| 6614 | + { |
| 6615 | + case SImode: |
| 6616 | + emit_insn (gen_movhicc_cmpsi |
| 6617 | + (operands[0], operator, operands[2], operands[3], |
| 6618 | + compare_op0, compare_op1)); |
| 6619 | + break; |
| 6620 | + case DImode: |
| 6621 | + emit_insn (gen_movhicc_cmpdi |
| 6622 | + (operands[0], operator, operands[2], operands[3], |
| 6623 | + compare_op0, compare_op1)); |
| 6624 | + break; |
| 6625 | + case HImode: |
| 6626 | + emit_insn (gen_movhicc_cmphi |
| 6627 | + (operands[0], operator, operands[2], operands[3], |
| 6628 | + compare_op0, compare_op1)); |
| 6629 | + break; |
| 6630 | + case QImode: |
| 6631 | + emit_insn (gen_movhicc_cmpqi |
| 6632 | + (operands[0], operator, operands[2], operands[3], |
| 6633 | + compare_op0, compare_op1)); |
| 6634 | + break; |
| 6635 | + default: |
| 6636 | + return FALSE; |
| 6637 | + } |
| 6638 | + break; |
| 6639 | + case QImode: |
| 6640 | + switch (GET_MODE (compare_op0)) |
| 6641 | + { |
| 6642 | + case SImode: |
| 6643 | + emit_insn (gen_movqicc_cmpsi |
| 6644 | + (operands[0], operator, operands[2], operands[3], |
| 6645 | + compare_op0, compare_op1)); |
| 6646 | + break; |
| 6647 | + case DImode: |
| 6648 | + emit_insn (gen_movqicc_cmpdi |
| 6649 | + (operands[0], operator, operands[2], operands[3], |
| 6650 | + compare_op0, compare_op1)); |
| 6651 | + break; |
| 6652 | + case HImode: |
| 6653 | + emit_insn (gen_movqicc_cmphi |
| 6654 | + (operands[0], operator, operands[2], operands[3], |
| 6655 | + compare_op0, compare_op1)); |
| 6656 | + break; |
| 6657 | + case QImode: |
| 6658 | + emit_insn (gen_movqicc_cmpqi |
| 6659 | + (operands[0], operator, operands[2], operands[3], |
| 6660 | + compare_op0, compare_op1)); |
| 6661 | + break; |
| 6662 | + default: |
| 6663 | + return FALSE; |
| 6664 | + } |
| 6665 | + break; |
| 6666 | + default: |
| 6667 | + return FALSE; |
| 6668 | + } |
| 6669 | + |
| 6670 | + return TRUE; |
| 6671 | +} |
| 6672 | + |
| 6673 | + |
| 6674 | +int |
| 6675 | +avr32_expand_addcc (enum machine_mode mode, rtx operands[]) |
| 6676 | +{ |
| 6677 | + rtx operator; |
| 6678 | + rtx compare_op0 = avr32_compare_op0; |
| 6679 | + rtx compare_op1 = avr32_compare_op1; |
| 6680 | + |
| 6681 | + /* Check if we have an add/sub with an k8 immediate */ |
| 6682 | + if (!(GET_CODE (operands[3]) == CONST_INT |
| 6683 | + && avr32_const_ok_for_constraint_p (-INTVAL (operands[3]), 'K', |
| 6684 | + "Ks08"))) |
| 6685 | + return FALSE; |
| 6686 | + else |
| 6687 | + /* Flip sign */ |
| 6688 | + operands[3] = GEN_INT (-INTVAL (operands[3])); |
| 6689 | + |
| 6690 | + /* Only allow certain compare operations */ |
| 6691 | + if (GET_MODE (compare_op0) != DImode |
| 6692 | + && GET_MODE (compare_op0) != SImode |
| 6693 | + && GET_MODE (compare_op0) != HImode && GET_MODE (compare_op0) != QImode) |
| 6694 | + return FALSE; |
| 6695 | + |
| 6696 | + if (GET_CODE (compare_op0) == MEM) |
| 6697 | + { |
| 6698 | + if (no_new_pseudos) |
| 6699 | + return FALSE; |
| 6700 | + else |
| 6701 | + compare_op0 = force_reg (GET_MODE (compare_op0), compare_op0); |
| 6702 | + } |
| 6703 | + |
| 6704 | + if (GET_CODE (compare_op1) == MEM) |
| 6705 | + { |
| 6706 | + if (no_new_pseudos) |
| 6707 | + return FALSE; |
| 6708 | + else |
| 6709 | + compare_op1 = force_reg (GET_MODE (compare_op1), compare_op1); |
| 6710 | + } |
| 6711 | + |
| 6712 | + /* For DI, HI and QI mode force comparison operands to registers */ |
| 6713 | + if (GET_MODE (compare_op0) == DImode |
| 6714 | + || GET_MODE (compare_op0) == HImode || GET_MODE (compare_op0) == QImode) |
| 6715 | + { |
| 6716 | + if (GET_CODE (compare_op0) != REG) |
| 6717 | + { |
| 6718 | + if (no_new_pseudos) |
| 6719 | + return FALSE; |
| 6720 | + else |
| 6721 | + compare_op0 = force_reg (GET_MODE (compare_op0), compare_op0); |
| 6722 | + } |
| 6723 | + |
| 6724 | + if (GET_CODE (compare_op1) != REG) |
| 6725 | + { |
| 6726 | + if (no_new_pseudos) |
| 6727 | + return FALSE; |
| 6728 | + else |
| 6729 | + compare_op1 = force_reg (GET_MODE (compare_op0), compare_op1); |
| 6730 | + } |
| 6731 | + } |
| 6732 | + |
| 6733 | + /* Force any immediate compare operands for SI, larger than the L |
| 6734 | + constraint, to a register */ |
| 6735 | + if (GET_MODE (compare_op0) == SImode) |
| 6736 | + { |
| 6737 | + if ((GET_CODE (compare_op0) == CONST_INT |
| 6738 | + && !avr32_const_ok_for_constraint_p (INTVAL (compare_op0), 'K', |
| 6739 | + "Ks21"))) |
| 6740 | + { |
| 6741 | + if (no_new_pseudos) |
| 6742 | + return FALSE; |
| 6743 | + else |
| 6744 | + compare_op0 = force_reg (SImode, compare_op0); |
| 6745 | + } |
| 6746 | + |
| 6747 | + if ((GET_CODE (compare_op1) == CONST_INT |
| 6748 | + && !avr32_const_ok_for_constraint_p (INTVAL (compare_op1), 'K', |
| 6749 | + "Ks21"))) |
| 6750 | + { |
| 6751 | + if (no_new_pseudos) |
| 6752 | + return FALSE; |
| 6753 | + else |
| 6754 | + compare_op1 = force_reg (SImode, compare_op1); |
| 6755 | + } |
| 6756 | + } |
| 6757 | + |
| 6758 | + /* If we have immediates larger than can be allowed in conditional mov |
| 6759 | + instructions, force them to registers */ |
| 6760 | + if (GET_CODE (operands[2]) == CONST_INT |
| 6761 | + && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks08")) |
| 6762 | + { |
| 6763 | + if (no_new_pseudos) |
| 6764 | + return FALSE; |
| 6765 | + else |
| 6766 | + operands[2] = force_reg (mode, operands[2]); |
| 6767 | + } |
| 6768 | + |
| 6769 | + if (GET_CODE (operands[3]) == CONST_INT |
| 6770 | + && !avr32_const_ok_for_constraint_p (INTVAL (operands[3]), 'K', "Ks08")) |
| 6771 | + { |
| 6772 | + if (no_new_pseudos) |
| 6773 | + return FALSE; |
| 6774 | + else |
| 6775 | + operands[3] = force_reg (mode, operands[3]); |
| 6776 | + } |
| 6777 | + |
| 6778 | + if (GET_CODE (operands[0]) != REG) |
| 6779 | + { |
| 6780 | + if (no_new_pseudos) |
| 6781 | + return FALSE; |
| 6782 | + else |
| 6783 | + operands[0] = force_reg (GET_MODE (operands[0]), operands[0]); |
| 6784 | + } |
| 6785 | + |
| 6786 | + if (GET_CODE (operands[2]) != REG) |
| 6787 | + { |
| 6788 | + if (no_new_pseudos) |
| 6789 | + return FALSE; |
| 6790 | + else |
| 6791 | + operands[2] = force_reg (GET_MODE (operands[2]), operands[2]); |
| 6792 | + } |
| 6793 | + |
| 6794 | + /* Check if operands[0] and operands[2] are different */ |
| 6795 | + if (REGNO (operands[0]) != REGNO (operands[2])) |
| 6796 | + { |
| 6797 | + emit_move_insn (operands[0], operands[2]); |
| 6798 | + operands[2] = operands[0]; |
| 6799 | + } |
| 6800 | + |
| 6801 | + /* Emit the actual instruction */ |
| 6802 | + operator = gen_rtx_EQ (VOIDmode, const0_rtx, const0_rtx); |
| 6803 | + PUT_CODE (operator, GET_CODE (operands[1])); |
| 6804 | + switch (mode) |
| 6805 | + { |
| 6806 | + case SImode: |
| 6807 | + switch (GET_MODE (compare_op0)) |
| 6808 | + { |
| 6809 | + case SImode: |
| 6810 | + emit_insn (gen_addsicc_cmpsi |
| 6811 | + (operands[0], operator, operands[2], operands[3], |
| 6812 | + compare_op0, compare_op1)); |
| 6813 | + break; |
| 6814 | + case DImode: |
| 6815 | + emit_insn (gen_addsicc_cmpdi |
| 6816 | + (operands[0], operator, operands[2], operands[3], |
| 6817 | + compare_op0, compare_op1)); |
| 6818 | + break; |
| 6819 | + case HImode: |
| 6820 | + emit_insn (gen_addsicc_cmphi |
| 6821 | + (operands[0], operator, operands[2], operands[3], |
| 6822 | + compare_op0, compare_op1)); |
| 6823 | + break; |
| 6824 | + case QImode: |
| 6825 | + emit_insn (gen_addsicc_cmpqi |
| 6826 | + (operands[0], operator, operands[2], operands[3], |
| 6827 | + compare_op0, compare_op1)); |
| 6828 | + break; |
| 6829 | + default: |
| 6830 | + return FALSE; |
| 6831 | + } |
| 6832 | + break; |
| 6833 | + case HImode: |
| 6834 | + switch (GET_MODE (compare_op0)) |
| 6835 | + { |
| 6836 | + case SImode: |
| 6837 | + emit_insn (gen_addhicc_cmpsi |
| 6838 | + (operands[0], operator, operands[2], operands[3], |
| 6839 | + compare_op0, compare_op1)); |
| 6840 | + break; |
| 6841 | + case DImode: |
| 6842 | + emit_insn (gen_addhicc_cmpdi |
| 6843 | + (operands[0], operator, operands[2], operands[3], |
| 6844 | + compare_op0, compare_op1)); |
| 6845 | + break; |
| 6846 | + case HImode: |
| 6847 | + emit_insn (gen_addhicc_cmphi |
| 6848 | + (operands[0], operator, operands[2], operands[3], |
| 6849 | + compare_op0, compare_op1)); |
| 6850 | + break; |
| 6851 | + case QImode: |
| 6852 | + emit_insn (gen_addhicc_cmpqi |
| 6853 | + (operands[0], operator, operands[2], operands[3], |
| 6854 | + compare_op0, compare_op1)); |
| 6855 | + break; |
| 6856 | + default: |
| 6857 | + return FALSE; |
| 6858 | + } |
| 6859 | + break; |
| 6860 | + case QImode: |
| 6861 | + switch (GET_MODE (compare_op0)) |
| 6862 | + { |
| 6863 | + case SImode: |
| 6864 | + emit_insn (gen_addqicc_cmpsi |
| 6865 | + (operands[0], operator, operands[2], operands[3], |
| 6866 | + compare_op0, compare_op1)); |
| 6867 | + break; |
| 6868 | + case DImode: |
| 6869 | + emit_insn (gen_addqicc_cmpdi |
| 6870 | + (operands[0], operator, operands[2], operands[3], |
| 6871 | + compare_op0, compare_op1)); |
| 6872 | + break; |
| 6873 | + case HImode: |
| 6874 | + emit_insn (gen_addqicc_cmphi |
| 6875 | + (operands[0], operator, operands[2], operands[3], |
| 6876 | + compare_op0, compare_op1)); |
| 6877 | + break; |
| 6878 | + case QImode: |
| 6879 | + emit_insn (gen_addqicc_cmpqi |
| 6880 | + (operands[0], operator, operands[2], operands[3], |
| 6881 | + compare_op0, compare_op1)); |
| 6882 | + break; |
| 6883 | + default: |
| 6884 | + return FALSE; |
| 6885 | + } |
| 6886 | + break; |
| 6887 | + default: |
| 6888 | + return FALSE; |
| 6889 | + } |
| 6890 | + |
| 6891 | + return TRUE; |
| 6892 | +} |
| 6893 | + |
| 6894 | +/* Function for changing the condition on the next instruction, |
| 6895 | + should be used when emmiting compare instructions and |
| 6896 | + the condition of the next instruction needs to change. |
| 6897 | +*/ |
| 6898 | +int |
| 6899 | +set_next_insn_cond (rtx cur_insn, rtx new_cond) |
| 6900 | +{ |
| 6901 | + rtx next_insn = next_nonnote_insn (cur_insn); |
| 6902 | + if ((next_insn != NULL_RTX) |
| 6903 | + && (INSN_P (next_insn)) |
| 6904 | + && (GET_CODE (PATTERN (next_insn)) == SET) |
| 6905 | + && (GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE)) |
| 6906 | + { |
| 6907 | + /* Branch instructions */ |
| 6908 | + XEXP (SET_SRC (PATTERN (next_insn)), 0) = new_cond; |
| 6909 | + /* Force the instruction to be recognized again */ |
| 6910 | + INSN_CODE (next_insn) = -1; |
| 6911 | + return TRUE; |
| 6912 | + } |
| 6913 | + else if ((next_insn != NULL_RTX) |
| 6914 | + && (INSN_P (next_insn)) |
| 6915 | + && (GET_CODE (PATTERN (next_insn)) == SET) |
| 6916 | + && comparison_operator (SET_SRC (PATTERN (next_insn)), |
| 6917 | + GET_MODE (SET_SRC (PATTERN (next_insn))))) |
| 6918 | + { |
| 6919 | + /* scc with no compare */ |
| 6920 | + SET_SRC (PATTERN (next_insn)) = new_cond; |
| 6921 | + /* Force the instruction to be recognized again */ |
| 6922 | + INSN_CODE (next_insn) = -1; |
| 6923 | + return TRUE; |
| 6924 | + } |
| 6925 | + |
| 6926 | + return FALSE; |
| 6927 | +} |
| 6928 | + |
| 6929 | +/* Function for obtaining the condition for the next instruction |
| 6930 | + after cur_insn. |
| 6931 | +*/ |
| 6932 | +rtx |
| 6933 | +get_next_insn_cond (rtx cur_insn) |
| 6934 | +{ |
| 6935 | + rtx next_insn = next_nonnote_insn (cur_insn); |
| 6936 | + rtx cond = NULL_RTX; |
| 6937 | + if ((next_insn != NULL_RTX) |
| 6938 | + && (INSN_P (next_insn)) |
| 6939 | + && (GET_CODE (PATTERN (next_insn)) == SET) |
| 6940 | + && (GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE)) |
| 6941 | + { |
| 6942 | + /* Branch instructions */ |
| 6943 | + cond = XEXP (SET_SRC (PATTERN (next_insn)), 0); |
| 6944 | + } |
| 6945 | + else if ((next_insn != NULL_RTX) |
| 6946 | + && (INSN_P (next_insn)) |
| 6947 | + && (GET_CODE (PATTERN (next_insn)) == SET) |
| 6948 | + && comparison_operator (SET_SRC (PATTERN (next_insn)), |
| 6949 | + GET_MODE (SET_SRC (PATTERN (next_insn))))) |
| 6950 | + { |
| 6951 | + /* scc with no compare */ |
| 6952 | + cond = SET_SRC (PATTERN (next_insn)); |
| 6953 | + } |
| 6954 | + |
| 6955 | + return cond; |
| 6956 | +} |
| 6957 | + |
| 6958 | +int |
| 6959 | +avr32_expand_scc (enum rtx_code cond, rtx * operands) |
| 6960 | +{ |
| 6961 | + |
| 6962 | + rtx comparation; |
| 6963 | + /* Only allow certain compare operations */ |
| 6964 | + if (GET_MODE (avr32_compare_op0) != DImode |
| 6965 | + && GET_MODE (avr32_compare_op0) != SImode |
| 6966 | + && GET_MODE (avr32_compare_op0) != HImode |
| 6967 | + && GET_MODE (avr32_compare_op0) != QImode) |
| 6968 | + return FALSE; |
| 6969 | + |
| 6970 | + /* Delete compare instruction as it is merged into this instruction */ |
| 6971 | + remove_insn (get_last_insn_anywhere ()); |
| 6972 | + |
| 6973 | + if (!REG_P (avr32_compare_op0)) |
| 6974 | + avr32_compare_op0 = |
| 6975 | + force_reg (GET_MODE (avr32_compare_op0), avr32_compare_op0); |
| 6976 | + |
| 6977 | + if (GET_MODE (avr32_compare_op0) != SImode && !REG_P (avr32_compare_op1)) |
| 6978 | + { |
| 6979 | + avr32_compare_op1 = |
| 6980 | + force_reg (GET_MODE (avr32_compare_op0), avr32_compare_op1); |
| 6981 | + } |
| 6982 | + else if (GET_MODE (avr32_compare_op0) == SImode |
| 6983 | + && !REG_P (avr32_compare_op1) |
| 6984 | + && (GET_CODE (avr32_compare_op1) != CONST_INT |
| 6985 | + || (GET_CODE (avr32_compare_op1) == CONST_INT |
| 6986 | + && |
| 6987 | + !avr32_const_ok_for_constraint_p (INTVAL |
| 6988 | + (avr32_compare_op1), 'K', |
| 6989 | + "Ks21")))) |
| 6990 | + avr32_compare_op1 = |
| 6991 | + force_reg (GET_MODE (avr32_compare_op0), avr32_compare_op1); |
| 6992 | + |
| 6993 | + |
| 6994 | + comparation = |
| 6995 | + gen_rtx_EQ (SImode, |
| 6996 | + gen_rtx_COMPARE (GET_MODE (avr32_compare_op0), |
| 6997 | + avr32_compare_op0, avr32_compare_op1), |
| 6998 | + const0_rtx); |
| 6999 | + /* Set correct condition */ |
| 7000 | + PUT_CODE (comparation, cond); |
| 7001 | + emit_insn (gen_rtx_SET (VOIDmode, operands[0], comparation)); |
| 7002 | + return TRUE; |
| 7003 | +} |
| 7004 | + |
| 7005 | +rtx |
| 7006 | +avr32_output_cmp (rtx cond, enum machine_mode mode, rtx op0, rtx op1) |
| 7007 | +{ |
| 7008 | + |
| 7009 | + rtx new_cond = NULL_RTX; |
| 7010 | + rtx ops[2]; |
| 7011 | + rtx compare_pattern; |
| 7012 | + ops[0] = op0; |
| 7013 | + ops[1] = op1; |
| 7014 | + |
| 7015 | + compare_pattern = gen_rtx_COMPARE (mode, op0, op1); |
| 7016 | + |
| 7017 | + new_cond = is_compare_redundant (compare_pattern, cond); |
| 7018 | + |
| 7019 | + if (new_cond != NULL_RTX) |
| 7020 | + return new_cond; |
| 7021 | + |
| 7022 | + /* Insert compare */ |
| 7023 | + switch (mode) |
| 7024 | + { |
| 7025 | + case QImode: |
| 7026 | + output_asm_insn ("cp.b\t%0, %1", ops); |
| 7027 | + break; |
| 7028 | + case HImode: |
| 7029 | + output_asm_insn ("cp.h\t%0, %1", ops); |
| 7030 | + break; |
| 7031 | + case SImode: |
| 7032 | + output_asm_insn ("cp.w\t%0, %1", ops); |
| 7033 | + break; |
| 7034 | + case DImode: |
| 7035 | + if (rtx_equal_p (op1, const0_rtx)) |
| 7036 | + output_asm_insn ("cp.w\t%0, %1\ncpc\t%m0", ops); |
| 7037 | + else |
| 7038 | + output_asm_insn ("cp.w\t%0, %1\ncpc\t%m0, %m1", ops); |
| 7039 | + break; |
| 7040 | + default: |
| 7041 | + internal_error ("Unknown comparison mode"); |
| 7042 | + break; |
| 7043 | + } |
| 7044 | + |
| 7045 | + return cond; |
| 7046 | +} |
| 7047 | + |
| 7048 | +int |
| 7049 | +avr32_load_multiple_operation (rtx op, |
| 7050 | + enum machine_mode mode ATTRIBUTE_UNUSED) |
| 7051 | +{ |
| 7052 | + int count = XVECLEN (op, 0); |
| 7053 | + unsigned int dest_regno; |
| 7054 | + rtx src_addr; |
| 7055 | + rtx elt; |
| 7056 | + int i = 1, base = 0; |
| 7057 | + |
| 7058 | + if (count <= 1 || GET_CODE (XVECEXP (op, 0, 0)) != SET) |
| 7059 | + return 0; |
| 7060 | + |
| 7061 | + /* Check to see if this might be a write-back. */ |
| 7062 | + if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS) |
| 7063 | + { |
| 7064 | + i++; |
| 7065 | + base = 1; |
| 7066 | + |
| 7067 | + /* Now check it more carefully. */ |
| 7068 | + if (GET_CODE (SET_DEST (elt)) != REG |
| 7069 | + || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG |
| 7070 | + || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT |
| 7071 | + || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4) |
| 7072 | + return 0; |
| 7073 | + } |
| 7074 | + |
| 7075 | + /* Perform a quick check so we don't blow up below. */ |
| 7076 | + if (count <= 1 |
| 7077 | + || GET_CODE (XVECEXP (op, 0, i - 1)) != SET |
| 7078 | + || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG |
| 7079 | + || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != UNSPEC) |
| 7080 | + return 0; |
| 7081 | + |
| 7082 | + dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1))); |
| 7083 | + src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0); |
| 7084 | + |
| 7085 | + for (; i < count; i++) |
| 7086 | + { |
| 7087 | + elt = XVECEXP (op, 0, i); |
| 7088 | + |
| 7089 | + if (GET_CODE (elt) != SET |
| 7090 | + || GET_CODE (SET_DEST (elt)) != REG |
| 7091 | + || GET_MODE (SET_DEST (elt)) != SImode |
| 7092 | + || GET_CODE (SET_SRC (elt)) != UNSPEC) |
| 7093 | + return 0; |
| 7094 | + } |
| 7095 | + |
| 7096 | + return 1; |
| 7097 | +} |
| 7098 | + |
| 7099 | +int |
| 7100 | +avr32_store_multiple_operation (rtx op, |
| 7101 | + enum machine_mode mode ATTRIBUTE_UNUSED) |
| 7102 | +{ |
| 7103 | + int count = XVECLEN (op, 0); |
| 7104 | + int src_regno; |
| 7105 | + rtx dest_addr; |
| 7106 | + rtx elt; |
| 7107 | + int i = 1; |
| 7108 | + |
| 7109 | + if (count <= 1 || GET_CODE (XVECEXP (op, 0, 0)) != SET) |
| 7110 | + return 0; |
| 7111 | + |
| 7112 | + /* Perform a quick check so we don't blow up below. */ |
| 7113 | + if (count <= i |
| 7114 | + || GET_CODE (XVECEXP (op, 0, i - 1)) != SET |
| 7115 | + || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM |
| 7116 | + || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != UNSPEC) |
| 7117 | + return 0; |
| 7118 | + |
| 7119 | + src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1))); |
| 7120 | + dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0); |
| 7121 | + |
| 7122 | + for (; i < count; i++) |
| 7123 | + { |
| 7124 | + elt = XVECEXP (op, 0, i); |
| 7125 | + |
| 7126 | + if (GET_CODE (elt) != SET |
| 7127 | + || GET_CODE (SET_DEST (elt)) != MEM |
| 7128 | + || GET_MODE (SET_DEST (elt)) != SImode |
| 7129 | + || GET_CODE (SET_SRC (elt)) != UNSPEC) |
| 7130 | + return 0; |
| 7131 | + } |
| 7132 | + |
| 7133 | + return 1; |
| 7134 | +} |
| 7135 | + |
| 7136 | +int |
| 7137 | +avr32_valid_macmac_bypass (rtx insn_out, rtx insn_in) |
| 7138 | +{ |
| 7139 | + /* Check if they use the same accumulator */ |
| 7140 | + if (rtx_equal_p |
| 7141 | + (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in)))) |
| 7142 | + { |
| 7143 | + return TRUE; |
| 7144 | + } |
| 7145 | + |
| 7146 | + return FALSE; |
| 7147 | +} |
| 7148 | + |
| 7149 | +int |
| 7150 | +avr32_valid_mulmac_bypass (rtx insn_out, rtx insn_in) |
| 7151 | +{ |
| 7152 | + /* |
| 7153 | + Check if the mul instruction produces the accumulator for the mac |
| 7154 | + instruction. */ |
| 7155 | + if (rtx_equal_p |
| 7156 | + (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in)))) |
| 7157 | + { |
| 7158 | + return TRUE; |
| 7159 | + } |
| 7160 | + return FALSE; |
| 7161 | +} |
| 7162 | + |
| 7163 | +int |
| 7164 | +avr32_store_bypass (rtx insn_out, rtx insn_in) |
| 7165 | +{ |
| 7166 | + /* Only valid bypass if the output result is used as an src in the store |
| 7167 | + instruction, NOT if used as a pointer or base. */ |
| 7168 | + if (rtx_equal_p |
| 7169 | + (SET_DEST (PATTERN (insn_out)), SET_SRC (PATTERN (insn_in)))) |
| 7170 | + { |
| 7171 | + return TRUE; |
| 7172 | + } |
| 7173 | + |
| 7174 | + return FALSE; |
| 7175 | +} |
| 7176 | + |
| 7177 | +int |
| 7178 | +avr32_mul_waw_bypass (rtx insn_out, rtx insn_in) |
| 7179 | +{ |
| 7180 | + /* Check if the register holding the result from the mul instruction is |
| 7181 | + used as a result register in the input instruction. */ |
| 7182 | + if (rtx_equal_p |
| 7183 | + (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in)))) |
| 7184 | + { |
| 7185 | + return TRUE; |
| 7186 | + } |
| 7187 | + |
| 7188 | + return FALSE; |
| 7189 | +} |
| 7190 | + |
| 7191 | +int |
| 7192 | +avr32_valid_load_double_bypass (rtx insn_out, rtx insn_in) |
| 7193 | +{ |
| 7194 | + /* Check if the first loaded word in insn_out is used in insn_in. */ |
| 7195 | + rtx dst_reg; |
| 7196 | + rtx second_loaded_reg; |
| 7197 | + |
| 7198 | + /* If this is a double alu operation then the bypass is not valid */ |
| 7199 | + if ((get_attr_type (insn_in) == TYPE_ALU |
| 7200 | + || get_attr_type (insn_in) == TYPE_ALU2) |
| 7201 | + && (GET_MODE_SIZE (GET_MODE (SET_DEST (PATTERN (insn_out)))) > 4)) |
| 7202 | + return FALSE; |
| 7203 | + |
| 7204 | + /* Get the destination register in the load */ |
| 7205 | + if (!REG_P (SET_DEST (PATTERN (insn_out)))) |
| 7206 | + return FALSE; |
| 7207 | + |
| 7208 | + dst_reg = SET_DEST (PATTERN (insn_out)); |
| 7209 | + second_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 1); |
| 7210 | + |
| 7211 | + if (!reg_mentioned_p (second_loaded_reg, PATTERN (insn_in))) |
| 7212 | + return TRUE; |
| 7213 | + |
| 7214 | + return FALSE; |
| 7215 | +} |
| 7216 | + |
| 7217 | + |
| 7218 | +int |
| 7219 | +avr32_valid_load_quad_bypass (rtx insn_out, rtx insn_in) |
| 7220 | +{ |
| 7221 | + /* |
| 7222 | + Check if the two first loaded word in insn_out are used in insn_in. */ |
| 7223 | + rtx dst_reg; |
| 7224 | + rtx third_loaded_reg, fourth_loaded_reg; |
| 7225 | + |
| 7226 | + /* Get the destination register in the load */ |
| 7227 | + if (!REG_P (SET_DEST (PATTERN (insn_out)))) |
| 7228 | + return FALSE; |
| 7229 | + |
| 7230 | + dst_reg = SET_DEST (PATTERN (insn_out)); |
| 7231 | + third_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 2); |
| 7232 | + fourth_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 3); |
| 7233 | + |
| 7234 | + if (!reg_mentioned_p (third_loaded_reg, PATTERN (insn_in)) |
| 7235 | + && !reg_mentioned_p (fourth_loaded_reg, PATTERN (insn_in))) |
| 7236 | + { |
| 7237 | + return TRUE; |
| 7238 | + } |
| 7239 | + |
| 7240 | + return FALSE; |
| 7241 | +} |
| 7242 | + |
| 7243 | +int |
| 7244 | +avr32_sched_use_dfa_pipeline_interface (void) |
| 7245 | +{ |
| 7246 | + /* No need to scedule on avr32_uc architecture. */ |
| 7247 | + return (avr32_arch->arch_type != ARCH_TYPE_AVR32_UC); |
| 7248 | +} |
| 7249 | + |
| 7250 | +void |
| 7251 | +avr32_select_rtx_section (enum machine_mode mode ATTRIBUTE_UNUSED, |
| 7252 | + rtx x ATTRIBUTE_UNUSED, |
| 7253 | + unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED) |
| 7254 | +{ |
| 7255 | + /* Let ASM_OUTPUT_POOL_PROLOGUE take care of this */ |
| 7256 | +} |
| 7257 | + |
| 7258 | +/* Set up library functions to comply to AVR32 ABI */ |
| 7259 | + |
| 7260 | +static void |
| 7261 | +avr32_init_libfuncs (void) |
| 7262 | +{ |
| 7263 | + /* Convert gcc run-time function names to AVR32 ABI names */ |
| 7264 | + |
| 7265 | + /* Double-precision floating-point arithmetic. */ |
| 7266 | + set_optab_libfunc (add_optab, DFmode, "__avr32_f64_add"); |
| 7267 | + set_optab_libfunc (sdiv_optab, DFmode, "__avr32_f64_div"); |
| 7268 | + set_optab_libfunc (smul_optab, DFmode, "__avr32_f64_mul"); |
| 7269 | + set_optab_libfunc (neg_optab, DFmode, NULL); |
| 7270 | + set_optab_libfunc (sub_optab, DFmode, "__avr32_f64_sub"); |
| 7271 | + |
| 7272 | + /* Double-precision comparisons. */ |
| 7273 | + set_optab_libfunc (eq_optab, DFmode, "__avr32_f64_cmp_eq"); |
| 7274 | + set_optab_libfunc (ne_optab, DFmode, NULL); |
| 7275 | + set_optab_libfunc (lt_optab, DFmode, "__avr32_f64_cmp_lt"); |
| 7276 | + set_optab_libfunc (le_optab, DFmode, NULL); |
| 7277 | + set_optab_libfunc (ge_optab, DFmode, "__avr32_f64_cmp_ge"); |
| 7278 | + set_optab_libfunc (gt_optab, DFmode, NULL); |
| 7279 | + |
| 7280 | + /* Single-precision floating-point arithmetic. */ |
| 7281 | + set_optab_libfunc (add_optab, SFmode, "__avr32_f32_add"); |
| 7282 | + set_optab_libfunc (sdiv_optab, SFmode, "__avr32_f32_div"); |
| 7283 | + set_optab_libfunc (smul_optab, SFmode, "__avr32_f32_mul"); |
| 7284 | + set_optab_libfunc (neg_optab, SFmode, NULL); |
| 7285 | + set_optab_libfunc (sub_optab, SFmode, "__avr32_f32_sub"); |
| 7286 | + |
| 7287 | + /* Single-precision comparisons. */ |
| 7288 | + set_optab_libfunc (eq_optab, SFmode, "__avr32_f32_cmp_eq"); |
| 7289 | + set_optab_libfunc (ne_optab, SFmode, NULL); |
| 7290 | + set_optab_libfunc (lt_optab, SFmode, "__avr32_f32_cmp_lt"); |
| 7291 | + set_optab_libfunc (le_optab, SFmode, NULL); |
| 7292 | + set_optab_libfunc (ge_optab, SFmode, "__avr32_f32_cmp_ge"); |
| 7293 | + set_optab_libfunc (gt_optab, SFmode, NULL); |
| 7294 | + |
| 7295 | + /* Floating-point to integer conversions. */ |
| 7296 | + set_conv_libfunc (sfix_optab, SImode, DFmode, "__avr32_f64_to_s32"); |
| 7297 | + set_conv_libfunc (ufix_optab, SImode, DFmode, "__avr32_f64_to_u32"); |
| 7298 | + set_conv_libfunc (sfix_optab, DImode, DFmode, "__avr32_f64_to_s64"); |
| 7299 | + set_conv_libfunc (ufix_optab, DImode, DFmode, "__avr32_f64_to_u64"); |
| 7300 | + set_conv_libfunc (sfix_optab, SImode, SFmode, "__avr32_f32_to_s32"); |
| 7301 | + set_conv_libfunc (ufix_optab, SImode, SFmode, "__avr32_f32_to_u32"); |
| 7302 | + set_conv_libfunc (sfix_optab, DImode, SFmode, "__avr32_f32_to_s64"); |
| 7303 | + set_conv_libfunc (ufix_optab, DImode, SFmode, "__avr32_f32_to_u64"); |
| 7304 | + |
| 7305 | + /* Conversions between floating types. */ |
| 7306 | + set_conv_libfunc (trunc_optab, SFmode, DFmode, "__avr32_f64_to_f32"); |
| 7307 | + set_conv_libfunc (sext_optab, DFmode, SFmode, "__avr32_f32_to_f64"); |
| 7308 | + |
| 7309 | + /* Integer to floating-point conversions. Table 8. */ |
| 7310 | + set_conv_libfunc (sfloat_optab, DFmode, SImode, "__avr32_s32_to_f64"); |
| 7311 | + set_conv_libfunc (sfloat_optab, DFmode, DImode, "__avr32_s64_to_f64"); |
| 7312 | + set_conv_libfunc (sfloat_optab, SFmode, SImode, "__avr32_s32_to_f32"); |
| 7313 | + set_conv_libfunc (sfloat_optab, SFmode, DImode, "__avr32_s64_to_f32"); |
| 7314 | + set_conv_libfunc (ufloat_optab, DFmode, SImode, "__avr32_u32_to_f64"); |
| 7315 | + set_conv_libfunc (ufloat_optab, SFmode, SImode, "__avr32_u32_to_f32"); |
| 7316 | + /* TODO: Add these to gcc library functions */ |
| 7317 | + |
| 7318 | + set_conv_libfunc (ufloat_optab, DFmode, DImode, NULL); |
| 7319 | + set_conv_libfunc (ufloat_optab, SFmode, DImode, NULL); |
| 7320 | + |
| 7321 | + /* Long long. Table 9. */ |
| 7322 | + set_optab_libfunc (smul_optab, DImode, "__avr32_mul64"); |
| 7323 | + set_optab_libfunc (sdiv_optab, DImode, "__avr32_sdiv64"); |
| 7324 | + set_optab_libfunc (udiv_optab, DImode, "__avr32_udiv64"); |
| 7325 | + set_optab_libfunc (smod_optab, DImode, "__avr32_smod64"); |
| 7326 | + set_optab_libfunc (umod_optab, DImode, "__avr32_umod64"); |
| 7327 | + set_optab_libfunc (ashl_optab, DImode, "__avr32_lsl64"); |
| 7328 | + set_optab_libfunc (lshr_optab, DImode, "__avr32_lsr64"); |
| 7329 | + set_optab_libfunc (ashr_optab, DImode, "__avr32_asr64"); |
| 7330 | +} |
| 7331 | --- /dev/null |
| 7332 | +++ b/gcc/config/avr32/avr32-elf.h |
| 7333 | @@ -0,0 +1,82 @@ |
| 7334 | +/* |
| 7335 | + Elf specific definitions. |
| 7336 | + Copyright 2003-2006 Atmel Corporation. |
| 7337 | + |
| 7338 | + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com> |
| 7339 | + |
| 7340 | + This file is part of GCC. |
| 7341 | + |
| 7342 | + This program is free software; you can redistribute it and/or modify |
| 7343 | + it under the terms of the GNU General Public License as published by |
| 7344 | + the Free Software Foundation; either version 2 of the License, or |
| 7345 | + (at your option) any later version. |
| 7346 | + |
| 7347 | + This program is distributed in the hope that it will be useful, |
| 7348 | + but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 7349 | + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 7350 | + GNU General Public License for more details. |
| 7351 | + |
| 7352 | + You should have received a copy of the GNU General Public License |
| 7353 | + along with this program; if not, write to the Free Software |
| 7354 | + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ |
| 7355 | + |
| 7356 | + |
| 7357 | +/***************************************************************************** |
| 7358 | + * Controlling the Compilator Driver, 'gcc' |
| 7359 | + *****************************************************************************/ |
| 7360 | + |
| 7361 | +/* Run-time Target Specification. */ |
| 7362 | +#undef TARGET_VERSION |
| 7363 | +#define TARGET_VERSION fputs (" (AVR32 GNU with ELF)", stderr); |
| 7364 | + |
| 7365 | +/* |
| 7366 | +Another C string constant used much like LINK_SPEC. The |
| 7367 | +difference between the two is that STARTFILE_SPEC is used at |
| 7368 | +the very beginning of the command given to the linker. |
| 7369 | + |
| 7370 | +If this macro is not defined, a default is provided that loads the |
| 7371 | +standard C startup file from the usual place. See gcc.c. |
| 7372 | +*/ |
| 7373 | +#undef STARTFILE_SPEC |
| 7374 | +#define STARTFILE_SPEC "crt0%O%s crti%O%s crtbegin%O%s" |
| 7375 | + |
| 7376 | +#undef LINK_SPEC |
| 7377 | +#define LINK_SPEC "%{muse-oscall:--defsym __do_not_use_oscall_coproc__=0} %{mrelax|O*:%{mno-relax|O0|O1: ;:--relax}} %{mpart=*:-mavr32elf_%*} %{mcpu=*:-mavr32elf_%*}" |
| 7378 | + |
| 7379 | + |
| 7380 | +/* |
| 7381 | +Another C string constant used much like LINK_SPEC. The |
| 7382 | +difference between the two is that ENDFILE_SPEC is used at |
| 7383 | +the very end of the command given to the linker. |
| 7384 | + |
| 7385 | +Do not define this macro if it does not need to do anything. |
| 7386 | +*/ |
| 7387 | +#undef ENDFILE_SPEC |
| 7388 | +#define ENDFILE_SPEC "crtend%O%s crtn%O%s" |
| 7389 | + |
| 7390 | + |
| 7391 | +/* Target CPU builtins. */ |
| 7392 | +#define TARGET_CPU_CPP_BUILTINS() \ |
| 7393 | + do \ |
| 7394 | + { \ |
| 7395 | + builtin_define ("__avr32__"); \ |
| 7396 | + builtin_define ("__AVR32__"); \ |
| 7397 | + builtin_define ("__AVR32_ELF__"); \ |
| 7398 | + builtin_define (avr32_part->macro); \ |
| 7399 | + builtin_define (avr32_arch->macro); \ |
| 7400 | + if (avr32_arch->uarch_type == UARCH_TYPE_AVR32A) \ |
| 7401 | + builtin_define ("__AVR32_AVR32A__"); \ |
| 7402 | + else \ |
| 7403 | + builtin_define ("__AVR32_AVR32B__"); \ |
| 7404 | + if (TARGET_UNALIGNED_WORD) \ |
| 7405 | + builtin_define ("__AVR32_HAS_UNALIGNED_WORD__"); \ |
| 7406 | + if (TARGET_SIMD) \ |
| 7407 | + builtin_define ("__AVR32_HAS_SIMD__"); \ |
| 7408 | + if (TARGET_DSP) \ |
| 7409 | + builtin_define ("__AVR32_HAS_DSP__"); \ |
| 7410 | + if (TARGET_RMW) \ |
| 7411 | + builtin_define ("__AVR32_HAS_RMW__"); \ |
| 7412 | + if (TARGET_BRANCH_PRED) \ |
| 7413 | + builtin_define ("__AVR32_HAS_BRANCH_PRED__"); \ |
| 7414 | + } \ |
| 7415 | + while (0) |
| 7416 | --- /dev/null |
| 7417 | +++ b/gcc/config/avr32/avr32.h |
| 7418 | @@ -0,0 +1,3322 @@ |
| 7419 | +/* |
| 7420 | + Definitions of target machine for AVR32. |
| 7421 | + Copyright 2003-2006 Atmel Corporation. |
| 7422 | + |
| 7423 | + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com> |
| 7424 | + Initial porting by Anders �dland. |
| 7425 | + |
| 7426 | + This file is part of GCC. |
| 7427 | + |
| 7428 | + This program is free software; you can redistribute it and/or modify |
| 7429 | + it under the terms of the GNU General Public License as published by |
| 7430 | + the Free Software Foundation; either version 2 of the License, or |
| 7431 | + (at your option) any later version. |
| 7432 | + |
| 7433 | + This program is distributed in the hope that it will be useful, |
| 7434 | + but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 7435 | + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 7436 | + GNU General Public License for more details. |
| 7437 | + |
| 7438 | + You should have received a copy of the GNU General Public License |
| 7439 | + along with this program; if not, write to the Free Software |
| 7440 | + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ |
| 7441 | + |
| 7442 | +#ifndef GCC_AVR32_H |
| 7443 | +#define GCC_AVR32_H |
| 7444 | + |
| 7445 | + |
| 7446 | +#ifndef OBJECT_FORMAT_ELF |
| 7447 | +#error avr32.h included before elfos.h |
| 7448 | +#endif |
| 7449 | + |
| 7450 | +#ifndef LOCAL_LABEL_PREFIX |
| 7451 | +#define LOCAL_LABEL_PREFIX "." |
| 7452 | +#endif |
| 7453 | + |
| 7454 | +#ifndef SUBTARGET_CPP_SPEC |
| 7455 | +#define SUBTARGET_CPP_SPEC "-D__ELF__" |
| 7456 | +#endif |
| 7457 | + |
| 7458 | + |
| 7459 | +extern struct rtx_def *avr32_compare_op0; |
| 7460 | +extern struct rtx_def *avr32_compare_op1; |
| 7461 | + |
| 7462 | + |
| 7463 | +extern struct rtx_def *avr32_acc_cache; |
| 7464 | + |
| 7465 | +/* cache instruction op5 codes */ |
| 7466 | +#define AVR32_CACHE_INVALIDATE_ICACHE 1 |
| 7467 | + |
| 7468 | +/* These bits describe the different types of function supported |
| 7469 | + by the AVR32 backend. They are exclusive. ie a function cannot be both a |
| 7470 | + normal function and an interworked function, for example. Knowing the |
| 7471 | + type of a function is important for determining its prologue and |
| 7472 | + epilogue sequences. |
| 7473 | + Note value 7 is currently unassigned. Also note that the interrupt |
| 7474 | + function types all have bit 2 set, so that they can be tested for easily. |
| 7475 | + Note that 0 is deliberately chosen for AVR32_FT_UNKNOWN so that when the |
| 7476 | + machine_function structure is initialized (to zero) func_type will |
| 7477 | + default to unknown. This will force the first use of avr32_current_func_type |
| 7478 | + to call avr32_compute_func_type. */ |
| 7479 | +#define AVR32_FT_UNKNOWN 0 /* Type has not yet been determined. |
| 7480 | + */ |
| 7481 | +#define AVR32_FT_NORMAL 1 /* Your normal, straightforward |
| 7482 | + function. */ |
| 7483 | +#define AVR32_FT_ACALL 2 /* An acall function. */ |
| 7484 | +#define AVR32_FT_EXCEPTION_HANDLER 3 /* A C++ exception handler. */ |
| 7485 | +#define AVR32_FT_ISR_FULL 4 /* A fully shadowed interrupt mode. */ |
| 7486 | +#define AVR32_FT_ISR_HALF 5 /* A half shadowed interrupt mode. */ |
| 7487 | +#define AVR32_FT_ISR_NONE 6 /* No shadow registers. */ |
| 7488 | + |
| 7489 | +#define AVR32_FT_TYPE_MASK ((1 << 3) - 1) |
| 7490 | + |
| 7491 | +/* In addition functions can have several type modifiers, |
| 7492 | + outlined by these bit masks: */ |
| 7493 | +#define AVR32_FT_INTERRUPT (1 << 2) /* Note overlap with FT_ISR |
| 7494 | + and above. */ |
| 7495 | +#define AVR32_FT_NAKED (1 << 3) /* No prologue or epilogue. */ |
| 7496 | +#define AVR32_FT_VOLATILE (1 << 4) /* Does not return. */ |
| 7497 | +#define AVR32_FT_NESTED (1 << 5) /* Embedded inside another |
| 7498 | + func. */ |
| 7499 | + |
| 7500 | +/* Some macros to test these flags. */ |
| 7501 | +#define AVR32_FUNC_TYPE(t) (t & AVR32_FT_TYPE_MASK) |
| 7502 | +#define IS_INTERRUPT(t) (t & AVR32_FT_INTERRUPT) |
| 7503 | +#define IS_VOLATILE(t) (t & AVR32_FT_VOLATILE) |
| 7504 | +#define IS_NAKED(t) (t & AVR32_FT_NAKED) |
| 7505 | +#define IS_NESTED(t) (t & AVR32_FT_NESTED) |
| 7506 | + |
| 7507 | + |
| 7508 | +typedef struct minipool_labels |
| 7509 | +GTY ((chain_next ("%h.next"), chain_prev ("%h.prev"))) |
| 7510 | +{ |
| 7511 | + rtx label; |
| 7512 | + struct minipool_labels *prev; |
| 7513 | + struct minipool_labels *next; |
| 7514 | +} minipool_labels; |
| 7515 | + |
| 7516 | +/* A C structure for machine-specific, per-function data. |
| 7517 | + This is added to the cfun structure. */ |
| 7518 | + |
| 7519 | +typedef struct machine_function |
| 7520 | +GTY (()) |
| 7521 | +{ |
| 7522 | + /* Records the type of the current function. */ |
| 7523 | + unsigned long func_type; |
| 7524 | + /* List of minipool labels, use for checking if code label is valid in a |
| 7525 | + memory expression */ |
| 7526 | + minipool_labels *minipool_label_head; |
| 7527 | + minipool_labels *minipool_label_tail; |
| 7528 | +} machine_function; |
| 7529 | + |
| 7530 | +/* Initialize data used by insn expanders. This is called from insn_emit, |
| 7531 | + once for every function before code is generated. */ |
| 7532 | +#define INIT_EXPANDERS avr32_init_expanders () |
| 7533 | + |
| 7534 | +/****************************************************************************** |
| 7535 | + * SPECS |
| 7536 | + *****************************************************************************/ |
| 7537 | + |
| 7538 | +#ifndef ASM_SPEC |
| 7539 | +#define ASM_SPEC "%{fpic:--pic} %{mrelax|O*:%{mno-relax|O0|O1: ;:--linkrelax}} %{march=*:-march=%*} %{mpart=*:-mpart=%*}" |
| 7540 | +#endif |
| 7541 | + |
| 7542 | +#ifndef MULTILIB_DEFAULTS |
| 7543 | +#define MULTILIB_DEFAULTS { "march=ap" } |
| 7544 | +#endif |
| 7545 | + |
| 7546 | +/****************************************************************************** |
| 7547 | + * Run-time Target Specification |
| 7548 | + *****************************************************************************/ |
| 7549 | +#ifndef TARGET_VERSION |
| 7550 | +#define TARGET_VERSION fprintf(stderr, " (AVR32, GNU assembler syntax)"); |
| 7551 | +#endif |
| 7552 | + |
| 7553 | +/* Part types. Keep this in sync with the order of avr32_part_types in avr32.c*/ |
| 7554 | +enum part_type |
| 7555 | +{ |
| 7556 | + PART_TYPE_AVR32_NONE, |
| 7557 | + PART_TYPE_AVR32_AP7000, |
| 7558 | + PART_TYPE_AVR32_AP7010, |
| 7559 | + PART_TYPE_AVR32_AP7020, |
| 7560 | + PART_TYPE_AVR32_UC3A0256, |
| 7561 | + PART_TYPE_AVR32_UC3A0512, |
| 7562 | + PART_TYPE_AVR32_UC3A1128, |
| 7563 | + PART_TYPE_AVR32_UC3A1256, |
| 7564 | + PART_TYPE_AVR32_UC3A1512 |
| 7565 | +}; |
| 7566 | + |
| 7567 | +/* Microarchitectures. */ |
| 7568 | +enum microarchitecture_type |
| 7569 | +{ |
| 7570 | + UARCH_TYPE_AVR32A, |
| 7571 | + UARCH_TYPE_AVR32B |
| 7572 | +}; |
| 7573 | + |
| 7574 | +/* Architectures types which specifies the pipeline. |
| 7575 | + Keep this in sync with avr32_arch_types in avr32.c*/ |
| 7576 | +enum architecture_type |
| 7577 | +{ |
| 7578 | + ARCH_TYPE_AVR32_AP, |
| 7579 | + ARCH_TYPE_AVR32_UC |
| 7580 | +}; |
| 7581 | + |
| 7582 | +/* Flag specifying if the cpu has support for DSP instructions.*/ |
| 7583 | +#define FLAG_AVR32_HAS_DSP (1 << 0) |
| 7584 | +/* Flag specifying if the cpu has support for Read-Modify-Write |
| 7585 | + instructions.*/ |
| 7586 | +#define FLAG_AVR32_HAS_RMW (1 << 1) |
| 7587 | +/* Flag specifying if the cpu has support for SIMD instructions. */ |
| 7588 | +#define FLAG_AVR32_HAS_SIMD (1 << 2) |
| 7589 | +/* Flag specifying if the cpu has support for unaligned memory word access. */ |
| 7590 | +#define FLAG_AVR32_HAS_UNALIGNED_WORD (1 << 3) |
| 7591 | +/* Flag specifying if the cpu has support for branch prediction. */ |
| 7592 | +#define FLAG_AVR32_HAS_BRANCH_PRED (1 << 4) |
| 7593 | + |
| 7594 | +/* Structure for holding information about different avr32 CPUs/parts */ |
| 7595 | +struct part_type_s |
| 7596 | +{ |
| 7597 | + const char *const name; |
| 7598 | + enum part_type part_type; |
| 7599 | + enum architecture_type arch_type; |
| 7600 | + /* Must lie outside user's namespace. NULL == no macro. */ |
| 7601 | + const char *const macro; |
| 7602 | +}; |
| 7603 | + |
| 7604 | +/* Structure for holding information about different avr32 pipeline |
| 7605 | + architectures. */ |
| 7606 | +struct arch_type_s |
| 7607 | +{ |
| 7608 | + const char *const name; |
| 7609 | + enum architecture_type arch_type; |
| 7610 | + enum microarchitecture_type uarch_type; |
| 7611 | + const unsigned long feature_flags; |
| 7612 | + /* Must lie outside user's namespace. NULL == no macro. */ |
| 7613 | + const char *const macro; |
| 7614 | +}; |
| 7615 | + |
| 7616 | +extern const struct part_type_s *avr32_part; |
| 7617 | +extern const struct arch_type_s *avr32_arch; |
| 7618 | + |
| 7619 | +#define TARGET_SIMD (avr32_arch->feature_flags & FLAG_AVR32_HAS_SIMD) |
| 7620 | +#define TARGET_DSP (avr32_arch->feature_flags & FLAG_AVR32_HAS_DSP) |
| 7621 | +#define TARGET_RMW (avr32_arch->feature_flags & FLAG_AVR32_HAS_RMW) |
| 7622 | +#define TARGET_UNALIGNED_WORD (avr32_arch->feature_flags & FLAG_AVR32_HAS_UNALIGNED_WORD) |
| 7623 | +#define TARGET_BRANCH_PRED (avr32_arch->feature_flags & FLAG_AVR32_HAS_BRANCH_PRED) |
| 7624 | + |
| 7625 | +#define CAN_DEBUG_WITHOUT_FP |
| 7626 | + |
| 7627 | +/****************************************************************************** |
| 7628 | + * Storage Layout |
| 7629 | + *****************************************************************************/ |
| 7630 | + |
| 7631 | +/* |
| 7632 | +Define this macro to have the value 1 if the most significant bit in a |
| 7633 | +byte has the lowest number; otherwise define it to have the value zero. |
| 7634 | +This means that bit-field instructions count from the most significant |
| 7635 | +bit. If the machine has no bit-field instructions, then this must still |
| 7636 | +be defined, but it doesn't matter which value it is defined to. This |
| 7637 | +macro need not be a constant. |
| 7638 | + |
| 7639 | +This macro does not affect the way structure fields are packed into |
| 7640 | +bytes or words; that is controlled by BYTES_BIG_ENDIAN. |
| 7641 | +*/ |
| 7642 | +#define BITS_BIG_ENDIAN 0 |
| 7643 | + |
| 7644 | +/* |
| 7645 | +Define this macro to have the value 1 if the most significant byte in a |
| 7646 | +word has the lowest number. This macro need not be a constant. |
| 7647 | +*/ |
| 7648 | +/* |
| 7649 | + Data is stored in an big-endian way. |
| 7650 | +*/ |
| 7651 | +#define BYTES_BIG_ENDIAN 1 |
| 7652 | + |
| 7653 | +/* |
| 7654 | +Define this macro to have the value 1 if, in a multiword object, the |
| 7655 | +most significant word has the lowest number. This applies to both |
| 7656 | +memory locations and registers; GCC fundamentally assumes that the |
| 7657 | +order of words in memory is the same as the order in registers. This |
| 7658 | +macro need not be a constant. |
| 7659 | +*/ |
| 7660 | +/* |
| 7661 | + Data is stored in an bin-endian way. |
| 7662 | +*/ |
| 7663 | +#define WORDS_BIG_ENDIAN 1 |
| 7664 | + |
| 7665 | +/* |
| 7666 | +Define this macro if WORDS_BIG_ENDIAN is not constant. This must be a |
| 7667 | +constant value with the same meaning as WORDS_BIG_ENDIAN, which will be |
| 7668 | +used only when compiling libgcc2.c. Typically the value will be set |
| 7669 | +based on preprocessor defines. |
| 7670 | +*/ |
| 7671 | +#define LIBGCC2_WORDS_BIG_ENDIAN WORDS_BIG_ENDIAN |
| 7672 | + |
| 7673 | +/* |
| 7674 | +Define this macro to have the value 1 if DFmode, XFmode or |
| 7675 | +TFmode floating point numbers are stored in memory with the word |
| 7676 | +containing the sign bit at the lowest address; otherwise define it to |
| 7677 | +have the value 0. This macro need not be a constant. |
| 7678 | + |
| 7679 | +You need not define this macro if the ordering is the same as for |
| 7680 | +multi-word integers. |
| 7681 | +*/ |
| 7682 | +/* #define FLOAT_WORDS_BIG_ENDIAN 1 */ |
| 7683 | + |
| 7684 | +/* |
| 7685 | +Define this macro to be the number of bits in an addressable storage |
| 7686 | +unit (byte); normally 8. |
| 7687 | +*/ |
| 7688 | +#define BITS_PER_UNIT 8 |
| 7689 | + |
| 7690 | +/* |
| 7691 | +Number of bits in a word; normally 32. |
| 7692 | +*/ |
| 7693 | +#define BITS_PER_WORD 32 |
| 7694 | + |
| 7695 | +/* |
| 7696 | +Maximum number of bits in a word. If this is undefined, the default is |
| 7697 | +BITS_PER_WORD. Otherwise, it is the constant value that is the |
| 7698 | +largest value that BITS_PER_WORD can have at run-time. |
| 7699 | +*/ |
| 7700 | +/* MAX_BITS_PER_WORD not defined*/ |
| 7701 | + |
| 7702 | +/* |
| 7703 | +Number of storage units in a word; normally 4. |
| 7704 | +*/ |
| 7705 | +#define UNITS_PER_WORD 4 |
| 7706 | + |
| 7707 | +/* |
| 7708 | +Minimum number of units in a word. If this is undefined, the default is |
| 7709 | +UNITS_PER_WORD. Otherwise, it is the constant value that is the |
| 7710 | +smallest value that UNITS_PER_WORD can have at run-time. |
| 7711 | +*/ |
| 7712 | +/* MIN_UNITS_PER_WORD not defined */ |
| 7713 | + |
| 7714 | +/* |
| 7715 | +Width of a pointer, in bits. You must specify a value no wider than the |
| 7716 | +width of Pmode. If it is not equal to the width of Pmode, |
| 7717 | +you must define POINTERS_EXTEND_UNSIGNED. |
| 7718 | +*/ |
| 7719 | +#define POINTER_SIZE 32 |
| 7720 | + |
| 7721 | +/* |
| 7722 | +A C expression whose value is greater than zero if pointers that need to be |
| 7723 | +extended from being POINTER_SIZE bits wide to Pmode are to |
| 7724 | +be zero-extended and zero if they are to be sign-extended. If the value |
| 7725 | +is less then zero then there must be an "ptr_extend" instruction that |
| 7726 | +extends a pointer from POINTER_SIZE to Pmode. |
| 7727 | + |
| 7728 | +You need not define this macro if the POINTER_SIZE is equal |
| 7729 | +to the width of Pmode. |
| 7730 | +*/ |
| 7731 | +/* #define POINTERS_EXTEND_UNSIGNED */ |
| 7732 | + |
| 7733 | +/* |
| 7734 | +A Macro to update M and UNSIGNEDP when an object whose type |
| 7735 | +is TYPE and which has the specified mode and signedness is to be |
| 7736 | +stored in a register. This macro is only called when TYPE is a |
| 7737 | +scalar type. |
| 7738 | + |
| 7739 | +On most RISC machines, which only have operations that operate on a full |
| 7740 | +register, define this macro to set M to word_mode if |
| 7741 | +M is an integer mode narrower than BITS_PER_WORD. In most |
| 7742 | +cases, only integer modes should be widened because wider-precision |
| 7743 | +floating-point operations are usually more expensive than their narrower |
| 7744 | +counterparts. |
| 7745 | + |
| 7746 | +For most machines, the macro definition does not change UNSIGNEDP. |
| 7747 | +However, some machines, have instructions that preferentially handle |
| 7748 | +either signed or unsigned quantities of certain modes. For example, on |
| 7749 | +the DEC Alpha, 32-bit loads from memory and 32-bit add instructions |
| 7750 | +sign-extend the result to 64 bits. On such machines, set |
| 7751 | +UNSIGNEDP according to which kind of extension is more efficient. |
| 7752 | + |
| 7753 | +Do not define this macro if it would never modify M. |
| 7754 | +*/ |
| 7755 | +#define PROMOTE_MODE(M, UNSIGNEDP, TYPE) \ |
| 7756 | + do \ |
| 7757 | + { \ |
| 7758 | + if (GET_MODE_CLASS (M) == MODE_INT \ |
| 7759 | + && GET_MODE_SIZE (M) < 4) \ |
| 7760 | + { \ |
| 7761 | + (M) = SImode; \ |
| 7762 | + } \ |
| 7763 | + } \ |
| 7764 | + while (0) |
| 7765 | + |
| 7766 | +/* Define if operations between registers always perform the operation |
| 7767 | + on the full register even if a narrower mode is specified. */ |
| 7768 | +#define WORD_REGISTER_OPERATIONS |
| 7769 | + |
| 7770 | +/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD |
| 7771 | + will either zero-extend or sign-extend. The value of this macro should |
| 7772 | + be the code that says which one of the two operations is implicitly |
| 7773 | + done, UNKNOWN if not known. */ |
| 7774 | +#define LOAD_EXTEND_OP(MODE) \ |
| 7775 | + (((MODE) == QImode) ? ZERO_EXTEND \ |
| 7776 | + : ((MODE) == HImode) ? SIGN_EXTEND : UNKNOWN) |
| 7777 | + |
| 7778 | + |
| 7779 | +/* |
| 7780 | +Define this macro if the promotion described by PROMOTE_MODE |
| 7781 | +should only be performed for outgoing function arguments or |
| 7782 | +function return values, as specified by PROMOTE_FUNCTION_ARGS |
| 7783 | +and PROMOTE_FUNCTION_RETURN, respectively. |
| 7784 | +*/ |
| 7785 | +/* #define PROMOTE_FOR_CALL_ONLY */ |
| 7786 | + |
| 7787 | +/* |
| 7788 | +Normal alignment required for function parameters on the stack, in |
| 7789 | +bits. All stack parameters receive at least this much alignment |
| 7790 | +regardless of data type. On most machines, this is the same as the |
| 7791 | +size of an integer. |
| 7792 | +*/ |
| 7793 | +#define PARM_BOUNDARY 32 |
| 7794 | + |
| 7795 | +/* |
| 7796 | +Define this macro to the minimum alignment enforced by hardware for the |
| 7797 | +stack pointer on this machine. The definition is a C expression for the |
| 7798 | +desired alignment (measured in bits). This value is used as a default |
| 7799 | +if PREFERRED_STACK_BOUNDARY is not defined. On most machines, |
| 7800 | +this should be the same as PARM_BOUNDARY. |
| 7801 | +*/ |
| 7802 | +#define STACK_BOUNDARY 32 |
| 7803 | + |
| 7804 | +/* |
| 7805 | +Define this macro if you wish to preserve a certain alignment for the |
| 7806 | +stack pointer, greater than what the hardware enforces. The definition |
| 7807 | +is a C expression for the desired alignment (measured in bits). This |
| 7808 | +macro must evaluate to a value equal to or larger than |
| 7809 | +STACK_BOUNDARY. |
| 7810 | +*/ |
| 7811 | +#define PREFERRED_STACK_BOUNDARY (TARGET_FORCE_DOUBLE_ALIGN ? 64 : 32 ) |
| 7812 | + |
| 7813 | +/* |
| 7814 | +Alignment required for a function entry point, in bits. |
| 7815 | +*/ |
| 7816 | +#define FUNCTION_BOUNDARY 16 |
| 7817 | + |
| 7818 | +/* |
| 7819 | +Biggest alignment that any data type can require on this machine, in bits. |
| 7820 | +*/ |
| 7821 | +#define BIGGEST_ALIGNMENT (TARGET_FORCE_DOUBLE_ALIGN ? 64 : 32 ) |
| 7822 | + |
| 7823 | +/* |
| 7824 | +If defined, the smallest alignment, in bits, that can be given to an |
| 7825 | +object that can be referenced in one operation, without disturbing any |
| 7826 | +nearby object. Normally, this is BITS_PER_UNIT, but may be larger |
| 7827 | +on machines that don't have byte or half-word store operations. |
| 7828 | +*/ |
| 7829 | +#define MINIMUM_ATOMIC_ALIGNMENT BITS_PER_UNIT |
| 7830 | + |
| 7831 | + |
| 7832 | +/* |
| 7833 | +An integer expression for the size in bits of the largest integer machine mode that |
| 7834 | +should actually be used. All integer machine modes of this size or smaller can be |
| 7835 | +used for structures and unions with the appropriate sizes. If this macro is undefined, |
| 7836 | +GET_MODE_BITSIZE (DImode) is assumed.*/ |
| 7837 | +#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (DImode) |
| 7838 | + |
| 7839 | + |
| 7840 | +/* |
| 7841 | +If defined, a C expression to compute the alignment given to a constant |
| 7842 | +that is being placed in memory. CONSTANT is the constant and |
| 7843 | +BASIC_ALIGN is the alignment that the object would ordinarily |
| 7844 | +have. The value of this macro is used instead of that alignment to |
| 7845 | +align the object. |
| 7846 | + |
| 7847 | +If this macro is not defined, then BASIC_ALIGN is used. |
| 7848 | + |
| 7849 | +The typical use of this macro is to increase alignment for string |
| 7850 | +constants to be word aligned so that strcpy calls that copy |
| 7851 | +constants can be done inline. |
| 7852 | +*/ |
| 7853 | +#define CONSTANT_ALIGNMENT(CONSTANT, BASIC_ALIGN) \ |
| 7854 | + ((TREE_CODE(CONSTANT) == STRING_CST) ? BITS_PER_WORD : BASIC_ALIGN) |
| 7855 | + |
| 7856 | +/* Try to align string to a word. */ |
| 7857 | +#define DATA_ALIGNMENT(TYPE, ALIGN) \ |
| 7858 | + ({(TREE_CODE (TYPE) == ARRAY_TYPE \ |
| 7859 | + && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \ |
| 7860 | + && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN));}) |
| 7861 | + |
| 7862 | +/* Try to align local store strings to a word. */ |
| 7863 | +#define LOCAL_ALIGNMENT(TYPE, ALIGN) \ |
| 7864 | + ({(TREE_CODE (TYPE) == ARRAY_TYPE \ |
| 7865 | + && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \ |
| 7866 | + && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN));}) |
| 7867 | + |
| 7868 | +/* |
| 7869 | +Define this macro to be the value 1 if instructions will fail to work |
| 7870 | +if given data not on the nominal alignment. If instructions will merely |
| 7871 | +go slower in that case, define this macro as 0. |
| 7872 | +*/ |
| 7873 | +#define STRICT_ALIGNMENT 1 |
| 7874 | + |
| 7875 | +/* |
| 7876 | +Define this if you wish to imitate the way many other C compilers handle |
| 7877 | +alignment of bit-fields and the structures that contain them. |
| 7878 | + |
| 7879 | +The behavior is that the type written for a bit-field (int, |
| 7880 | +short, or other integer type) imposes an alignment for the |
| 7881 | +entire structure, as if the structure really did contain an ordinary |
| 7882 | +field of that type. In addition, the bit-field is placed within the |
| 7883 | +structure so that it would fit within such a field, not crossing a |
| 7884 | +boundary for it. |
| 7885 | + |
| 7886 | +Thus, on most machines, a bit-field whose type is written as int |
| 7887 | +would not cross a four-byte boundary, and would force four-byte |
| 7888 | +alignment for the whole structure. (The alignment used may not be four |
| 7889 | +bytes; it is controlled by the other alignment parameters.) |
| 7890 | + |
| 7891 | +If the macro is defined, its definition should be a C expression; |
| 7892 | +a nonzero value for the expression enables this behavior. |
| 7893 | + |
| 7894 | +Note that if this macro is not defined, or its value is zero, some |
| 7895 | +bit-fields may cross more than one alignment boundary. The compiler can |
| 7896 | +support such references if there are insv, extv, and |
| 7897 | +extzv insns that can directly reference memory. |
| 7898 | + |
| 7899 | +The other known way of making bit-fields work is to define |
| 7900 | +STRUCTURE_SIZE_BOUNDARY as large as BIGGEST_ALIGNMENT. |
| 7901 | +Then every structure can be accessed with fullwords. |
| 7902 | + |
| 7903 | +Unless the machine has bit-field instructions or you define |
| 7904 | +STRUCTURE_SIZE_BOUNDARY that way, you must define |
| 7905 | +PCC_BITFIELD_TYPE_MATTERS to have a nonzero value. |
| 7906 | + |
| 7907 | +If your aim is to make GCC use the same conventions for laying out |
| 7908 | +bit-fields as are used by another compiler, here is how to investigate |
| 7909 | +what the other compiler does. Compile and run this program: |
| 7910 | + |
| 7911 | +struct foo1 |
| 7912 | +{ |
| 7913 | + char x; |
| 7914 | + char :0; |
| 7915 | + char y; |
| 7916 | +}; |
| 7917 | + |
| 7918 | +struct foo2 |
| 7919 | +{ |
| 7920 | + char x; |
| 7921 | + int :0; |
| 7922 | + char y; |
| 7923 | +}; |
| 7924 | + |
| 7925 | +main () |
| 7926 | +{ |
| 7927 | + printf ("Size of foo1 is %d\n", |
| 7928 | + sizeof (struct foo1)); |
| 7929 | + printf ("Size of foo2 is %d\n", |
| 7930 | + sizeof (struct foo2)); |
| 7931 | + exit (0); |
| 7932 | +} |
| 7933 | + |
| 7934 | +If this prints 2 and 5, then the compiler's behavior is what you would |
| 7935 | +get from PCC_BITFIELD_TYPE_MATTERS. |
| 7936 | +*/ |
| 7937 | +#define PCC_BITFIELD_TYPE_MATTERS 1 |
| 7938 | + |
| 7939 | + |
| 7940 | +/****************************************************************************** |
| 7941 | + * Layout of Source Language Data Types |
| 7942 | + *****************************************************************************/ |
| 7943 | + |
| 7944 | +/* |
| 7945 | +A C expression for the size in bits of the type int on the |
| 7946 | +target machine. If you don't define this, the default is one word. |
| 7947 | +*/ |
| 7948 | +#define INT_TYPE_SIZE 32 |
| 7949 | + |
| 7950 | +/* |
| 7951 | +A C expression for the size in bits of the type short on the |
| 7952 | +target machine. If you don't define this, the default is half a word. (If |
| 7953 | +this would be less than one storage unit, it is rounded up to one unit.) |
| 7954 | +*/ |
| 7955 | +#define SHORT_TYPE_SIZE 16 |
| 7956 | + |
| 7957 | +/* |
| 7958 | +A C expression for the size in bits of the type long on the |
| 7959 | +target machine. If you don't define this, the default is one word. |
| 7960 | +*/ |
| 7961 | +#define LONG_TYPE_SIZE 32 |
| 7962 | + |
| 7963 | + |
| 7964 | +/* |
| 7965 | +A C expression for the size in bits of the type long long on the |
| 7966 | +target machine. If you don't define this, the default is two |
| 7967 | +words. If you want to support GNU Ada on your machine, the value of this |
| 7968 | +macro must be at least 64. |
| 7969 | +*/ |
| 7970 | +#define LONG_LONG_TYPE_SIZE 64 |
| 7971 | + |
| 7972 | +/* |
| 7973 | +A C expression for the size in bits of the type char on the |
| 7974 | +target machine. If you don't define this, the default is |
| 7975 | +BITS_PER_UNIT. |
| 7976 | +*/ |
| 7977 | +#define CHAR_TYPE_SIZE 8 |
| 7978 | + |
| 7979 | + |
| 7980 | +/* |
| 7981 | +A C expression for the size in bits of the C++ type bool and |
| 7982 | +C99 type _Bool on the target machine. If you don't define |
| 7983 | +this, and you probably shouldn't, the default is CHAR_TYPE_SIZE. |
| 7984 | +*/ |
| 7985 | +#define BOOL_TYPE_SIZE 8 |
| 7986 | + |
| 7987 | + |
| 7988 | +/* |
| 7989 | +An expression whose value is 1 or 0, according to whether the type |
| 7990 | +char should be signed or unsigned by default. The user can |
| 7991 | +always override this default with the options -fsigned-char |
| 7992 | +and -funsigned-char. |
| 7993 | +*/ |
| 7994 | +/* We are using unsigned char */ |
| 7995 | +#define DEFAULT_SIGNED_CHAR 0 |
| 7996 | + |
| 7997 | + |
| 7998 | +/* |
| 7999 | +A C expression for a string describing the name of the data type to use |
| 8000 | +for size values. The typedef name size_t is defined using the |
| 8001 | +contents of the string. |
| 8002 | + |
| 8003 | +The string can contain more than one keyword. If so, separate them with |
| 8004 | +spaces, and write first any length keyword, then unsigned if |
| 8005 | +appropriate, and finally int. The string must exactly match one |
| 8006 | +of the data type names defined in the function |
| 8007 | +init_decl_processing in the file c-decl.c. You may not |
| 8008 | +omit int or change the order - that would cause the compiler to |
| 8009 | +crash on startup. |
| 8010 | + |
| 8011 | +If you don't define this macro, the default is "long unsigned int". |
| 8012 | +*/ |
| 8013 | +#define SIZE_TYPE "long unsigned int" |
| 8014 | + |
| 8015 | +/* |
| 8016 | +A C expression for a string describing the name of the data type to use |
| 8017 | +for the result of subtracting two pointers. The typedef name |
| 8018 | +ptrdiff_t is defined using the contents of the string. See |
| 8019 | +SIZE_TYPE above for more information. |
| 8020 | + |
| 8021 | +If you don't define this macro, the default is "long int". |
| 8022 | +*/ |
| 8023 | +#define PTRDIFF_TYPE "long int" |
| 8024 | + |
| 8025 | + |
| 8026 | +/* |
| 8027 | +A C expression for the size in bits of the data type for wide |
| 8028 | +characters. This is used in cpp, which cannot make use of |
| 8029 | +WCHAR_TYPE. |
| 8030 | +*/ |
| 8031 | +#define WCHAR_TYPE_SIZE 32 |
| 8032 | + |
| 8033 | + |
| 8034 | +/* |
| 8035 | +A C expression for a string describing the name of the data type to |
| 8036 | +use for wide characters passed to printf and returned from |
| 8037 | +getwc. The typedef name wint_t is defined using the |
| 8038 | +contents of the string. See SIZE_TYPE above for more |
| 8039 | +information. |
| 8040 | + |
| 8041 | +If you don't define this macro, the default is "unsigned int". |
| 8042 | +*/ |
| 8043 | +#define WINT_TYPE "unsigned int" |
| 8044 | + |
| 8045 | +/* |
| 8046 | +A C expression for a string describing the name of the data type that |
| 8047 | +can represent any value of any standard or extended signed integer type. |
| 8048 | +The typedef name intmax_t is defined using the contents of the |
| 8049 | +string. See SIZE_TYPE above for more information. |
| 8050 | + |
| 8051 | +If you don't define this macro, the default is the first of |
| 8052 | +"int", "long int", or "long long int" that has as |
| 8053 | +much precision as long long int. |
| 8054 | +*/ |
| 8055 | +#define INTMAX_TYPE "long long int" |
| 8056 | + |
| 8057 | +/* |
| 8058 | +A C expression for a string describing the name of the data type that |
| 8059 | +can represent any value of any standard or extended unsigned integer |
| 8060 | +type. The typedef name uintmax_t is defined using the contents |
| 8061 | +of the string. See SIZE_TYPE above for more information. |
| 8062 | + |
| 8063 | +If you don't define this macro, the default is the first of |
| 8064 | +"unsigned int", "long unsigned int", or "long long unsigned int" |
| 8065 | +that has as much precision as long long unsigned int. |
| 8066 | +*/ |
| 8067 | +#define UINTMAX_TYPE "long long unsigned int" |
| 8068 | + |
| 8069 | + |
| 8070 | +/****************************************************************************** |
| 8071 | + * Register Usage |
| 8072 | + *****************************************************************************/ |
| 8073 | + |
| 8074 | +/* Convert from gcc internal register number to register number |
| 8075 | + used in assembly code */ |
| 8076 | +#define ASM_REGNUM(reg) (LAST_REGNUM - (reg)) |
| 8077 | +#define ASM_FP_REGNUM(reg) (LAST_FP_REGNUM - (reg)) |
| 8078 | + |
| 8079 | +/* Convert between register number used in assembly to gcc |
| 8080 | + internal register number */ |
| 8081 | +#define INTERNAL_REGNUM(reg) (LAST_REGNUM - (reg)) |
| 8082 | +#define INTERNAL_FP_REGNUM(reg) (LAST_FP_REGNUM - (reg)) |
| 8083 | + |
| 8084 | +/** Basic Characteristics of Registers **/ |
| 8085 | + |
| 8086 | +/* |
| 8087 | +Number of hardware registers known to the compiler. They receive |
| 8088 | +numbers 0 through FIRST_PSEUDO_REGISTER-1; thus, the first |
| 8089 | +pseudo register's number really is assigned the number |
| 8090 | +FIRST_PSEUDO_REGISTER. |
| 8091 | +*/ |
| 8092 | +#define FIRST_PSEUDO_REGISTER (LAST_FP_REGNUM + 1) |
| 8093 | + |
| 8094 | +#define FIRST_REGNUM 0 |
| 8095 | +#define LAST_REGNUM 15 |
| 8096 | +#define NUM_FP_REGS 16 |
| 8097 | +#define FIRST_FP_REGNUM 16 |
| 8098 | +#define LAST_FP_REGNUM (16+NUM_FP_REGS-1) |
| 8099 | + |
| 8100 | +/* |
| 8101 | +An initializer that says which registers are used for fixed purposes |
| 8102 | +all throughout the compiled code and are therefore not available for |
| 8103 | +general allocation. These would include the stack pointer, the frame |
| 8104 | +pointer (except on machines where that can be used as a general |
| 8105 | +register when no frame pointer is needed), the program counter on |
| 8106 | +machines where that is considered one of the addressable registers, |
| 8107 | +and any other numbered register with a standard use. |
| 8108 | + |
| 8109 | +This information is expressed as a sequence of numbers, separated by |
| 8110 | +commas and surrounded by braces. The nth number is 1 if |
| 8111 | +register n is fixed, 0 otherwise. |
| 8112 | + |
| 8113 | +The table initialized from this macro, and the table initialized by |
| 8114 | +the following one, may be overridden at run time either automatically, |
| 8115 | +by the actions of the macro CONDITIONAL_REGISTER_USAGE, or by |
| 8116 | +the user with the command options -ffixed-[reg], |
| 8117 | +-fcall-used-[reg] and -fcall-saved-[reg]. |
| 8118 | +*/ |
| 8119 | + |
| 8120 | +/* The internal gcc register numbers are reversed |
| 8121 | + compared to the real register numbers since |
| 8122 | + gcc expects data types stored over multiple |
| 8123 | + registers in the register file to be big endian |
| 8124 | + if the memory layout is big endian. But this |
| 8125 | + is not the case for avr32 so we fake a big |
| 8126 | + endian register file. */ |
| 8127 | + |
| 8128 | +#define FIXED_REGISTERS { \ |
| 8129 | + 1, /* Program Counter */ \ |
| 8130 | + 0, /* Link Register */ \ |
| 8131 | + 1, /* Stack Pointer */ \ |
| 8132 | + 0, /* r12 */ \ |
| 8133 | + 0, /* r11 */ \ |
| 8134 | + 0, /* r10 */ \ |
| 8135 | + 0, /* r9 */ \ |
| 8136 | + 0, /* r8 */ \ |
| 8137 | + 0, /* r7 */ \ |
| 8138 | + 0, /* r6 */ \ |
| 8139 | + 0, /* r5 */ \ |
| 8140 | + 0, /* r4 */ \ |
| 8141 | + 0, /* r3 */ \ |
| 8142 | + 0, /* r2 */ \ |
| 8143 | + 0, /* r1 */ \ |
| 8144 | + 0, /* r0 */ \ |
| 8145 | + 0, /* f15 */ \ |
| 8146 | + 0, /* f14 */ \ |
| 8147 | + 0, /* f13 */ \ |
| 8148 | + 0, /* f12 */ \ |
| 8149 | + 0, /* f11 */ \ |
| 8150 | + 0, /* f10 */ \ |
| 8151 | + 0, /* f9 */ \ |
| 8152 | + 0, /* f8 */ \ |
| 8153 | + 0, /* f7 */ \ |
| 8154 | + 0, /* f6 */ \ |
| 8155 | + 0, /* f5 */ \ |
| 8156 | + 0, /* f4 */ \ |
| 8157 | + 0, /* f3 */ \ |
| 8158 | + 0, /* f2*/ \ |
| 8159 | + 0, /* f1 */ \ |
| 8160 | + 0 /* f0 */ \ |
| 8161 | +} |
| 8162 | + |
| 8163 | +/* |
| 8164 | +Like FIXED_REGISTERS but has 1 for each register that is |
| 8165 | +clobbered (in general) by function calls as well as for fixed |
| 8166 | +registers. This macro therefore identifies the registers that are not |
| 8167 | +available for general allocation of values that must live across |
| 8168 | +function calls. |
| 8169 | + |
| 8170 | +If a register has 0 in CALL_USED_REGISTERS, the compiler |
| 8171 | +automatically saves it on function entry and restores it on function |
| 8172 | +exit, if the register is used within the function. |
| 8173 | +*/ |
| 8174 | +#define CALL_USED_REGISTERS { \ |
| 8175 | + 1, /* Program Counter */ \ |
| 8176 | + 0, /* Link Register */ \ |
| 8177 | + 1, /* Stack Pointer */ \ |
| 8178 | + 1, /* r12 */ \ |
| 8179 | + 1, /* r11 */ \ |
| 8180 | + 1, /* r10 */ \ |
| 8181 | + 1, /* r9 */ \ |
| 8182 | + 1, /* r8 */ \ |
| 8183 | + 0, /* r7 */ \ |
| 8184 | + 0, /* r6 */ \ |
| 8185 | + 0, /* r5 */ \ |
| 8186 | + 0, /* r4 */ \ |
| 8187 | + 0, /* r3 */ \ |
| 8188 | + 0, /* r2 */ \ |
| 8189 | + 0, /* r1 */ \ |
| 8190 | + 0, /* r0 */ \ |
| 8191 | + 1, /* f15 */ \ |
| 8192 | + 1, /* f14 */ \ |
| 8193 | + 1, /* f13 */ \ |
| 8194 | + 1, /* f12 */ \ |
| 8195 | + 1, /* f11 */ \ |
| 8196 | + 1, /* f10 */ \ |
| 8197 | + 1, /* f9 */ \ |
| 8198 | + 1, /* f8 */ \ |
| 8199 | + 0, /* f7 */ \ |
| 8200 | + 0, /* f6 */ \ |
| 8201 | + 0, /* f5 */ \ |
| 8202 | + 0, /* f4 */ \ |
| 8203 | + 0, /* f3 */ \ |
| 8204 | + 0, /* f2*/ \ |
| 8205 | + 0, /* f1*/ \ |
| 8206 | + 0, /* f0 */ \ |
| 8207 | +} |
| 8208 | + |
| 8209 | +/* Interrupt functions can only use registers that have already been |
| 8210 | + saved by the prologue, even if they would normally be |
| 8211 | + call-clobbered. */ |
| 8212 | +#define HARD_REGNO_RENAME_OK(SRC, DST) \ |
| 8213 | + (! IS_INTERRUPT (cfun->machine->func_type) || \ |
| 8214 | + regs_ever_live[DST]) |
| 8215 | + |
| 8216 | + |
| 8217 | +/* |
| 8218 | +Zero or more C statements that may conditionally modify five variables |
| 8219 | +fixed_regs, call_used_regs, global_regs, |
| 8220 | +reg_names, and reg_class_contents, to take into account |
| 8221 | +any dependence of these register sets on target flags. The first three |
| 8222 | +of these are of type char [] (interpreted as Boolean vectors). |
| 8223 | +global_regs is a const char *[], and |
| 8224 | +reg_class_contents is a HARD_REG_SET. Before the macro is |
| 8225 | +called, fixed_regs, call_used_regs, |
| 8226 | +reg_class_contents, and reg_names have been initialized |
| 8227 | +from FIXED_REGISTERS, CALL_USED_REGISTERS, |
| 8228 | +REG_CLASS_CONTENTS, and REGISTER_NAMES, respectively. |
| 8229 | +global_regs has been cleared, and any -ffixed-[reg], |
| 8230 | +-fcall-used-[reg] and -fcall-saved-[reg] |
| 8231 | +command options have been applied. |
| 8232 | + |
| 8233 | +You need not define this macro if it has no work to do. |
| 8234 | + |
| 8235 | +If the usage of an entire class of registers depends on the target |
| 8236 | +flags, you may indicate this to GCC by using this macro to modify |
| 8237 | +fixed_regs and call_used_regs to 1 for each of the |
| 8238 | +registers in the classes which should not be used by GCC. Also define |
| 8239 | +the macro REG_CLASS_FROM_LETTER to return NO_REGS if it |
| 8240 | +is called with a letter for a class that shouldn't be used. |
| 8241 | + |
| 8242 | + (However, if this class is not included in GENERAL_REGS and all |
| 8243 | +of the insn patterns whose constraints permit this class are |
| 8244 | +controlled by target switches, then GCC will automatically avoid using |
| 8245 | +these registers when the target switches are opposed to them.) |
| 8246 | +*/ |
| 8247 | +#define CONDITIONAL_REGISTER_USAGE \ |
| 8248 | + do \ |
| 8249 | + { \ |
| 8250 | + int regno; \ |
| 8251 | + \ |
| 8252 | + if (TARGET_SOFT_FLOAT) \ |
| 8253 | + { \ |
| 8254 | + for (regno = FIRST_FP_REGNUM; \ |
| 8255 | + regno <= LAST_FP_REGNUM; ++regno) \ |
| 8256 | + fixed_regs[regno] = call_used_regs[regno] = 1; \ |
| 8257 | + } \ |
| 8258 | + if (flag_pic) \ |
| 8259 | + { \ |
| 8260 | + fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \ |
| 8261 | + call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \ |
| 8262 | + } \ |
| 8263 | + } \ |
| 8264 | + while (0) |
| 8265 | + |
| 8266 | + |
| 8267 | +/* |
| 8268 | +If the program counter has a register number, define this as that |
| 8269 | +register number. Otherwise, do not define it. |
| 8270 | +*/ |
| 8271 | + |
| 8272 | +#define LAST_AVR32_REGNUM 16 |
| 8273 | + |
| 8274 | + |
| 8275 | +/** Order of Allocation of Registers **/ |
| 8276 | + |
| 8277 | +/* |
| 8278 | +If defined, an initializer for a vector of integers, containing the |
| 8279 | +numbers of hard registers in the order in which GCC should prefer |
| 8280 | +to use them (from most preferred to least). |
| 8281 | + |
| 8282 | +If this macro is not defined, registers are used lowest numbered first |
| 8283 | +(all else being equal). |
| 8284 | + |
| 8285 | +One use of this macro is on machines where the highest numbered |
| 8286 | +registers must always be saved and the save-multiple-registers |
| 8287 | +instruction supports only sequences of consecutive registers. On such |
| 8288 | +machines, define REG_ALLOC_ORDER to be an initializer that lists |
| 8289 | +the highest numbered allocable register first. |
| 8290 | +*/ |
| 8291 | +#define REG_ALLOC_ORDER \ |
| 8292 | +{ \ |
| 8293 | + INTERNAL_REGNUM(8), \ |
| 8294 | + INTERNAL_REGNUM(9), \ |
| 8295 | + INTERNAL_REGNUM(10), \ |
| 8296 | + INTERNAL_REGNUM(11), \ |
| 8297 | + INTERNAL_REGNUM(12), \ |
| 8298 | + LR_REGNUM, \ |
| 8299 | + INTERNAL_REGNUM(7), \ |
| 8300 | + INTERNAL_REGNUM(6), \ |
| 8301 | + INTERNAL_REGNUM(5), \ |
| 8302 | + INTERNAL_REGNUM(4), \ |
| 8303 | + INTERNAL_REGNUM(3), \ |
| 8304 | + INTERNAL_REGNUM(2), \ |
| 8305 | + INTERNAL_REGNUM(1), \ |
| 8306 | + INTERNAL_REGNUM(0), \ |
| 8307 | + INTERNAL_FP_REGNUM(15), \ |
| 8308 | + INTERNAL_FP_REGNUM(14), \ |
| 8309 | + INTERNAL_FP_REGNUM(13), \ |
| 8310 | + INTERNAL_FP_REGNUM(12), \ |
| 8311 | + INTERNAL_FP_REGNUM(11), \ |
| 8312 | + INTERNAL_FP_REGNUM(10), \ |
| 8313 | + INTERNAL_FP_REGNUM(9), \ |
| 8314 | + INTERNAL_FP_REGNUM(8), \ |
| 8315 | + INTERNAL_FP_REGNUM(7), \ |
| 8316 | + INTERNAL_FP_REGNUM(6), \ |
| 8317 | + INTERNAL_FP_REGNUM(5), \ |
| 8318 | + INTERNAL_FP_REGNUM(4), \ |
| 8319 | + INTERNAL_FP_REGNUM(3), \ |
| 8320 | + INTERNAL_FP_REGNUM(2), \ |
| 8321 | + INTERNAL_FP_REGNUM(1), \ |
| 8322 | + INTERNAL_FP_REGNUM(0), \ |
| 8323 | + SP_REGNUM, \ |
| 8324 | + PC_REGNUM \ |
| 8325 | +} |
| 8326 | + |
| 8327 | + |
| 8328 | +/** How Values Fit in Registers **/ |
| 8329 | + |
| 8330 | +/* |
| 8331 | +A C expression for the number of consecutive hard registers, starting |
| 8332 | +at register number REGNO, required to hold a value of mode |
| 8333 | +MODE. |
| 8334 | + |
| 8335 | +On a machine where all registers are exactly one word, a suitable |
| 8336 | +definition of this macro is |
| 8337 | + |
| 8338 | +#define HARD_REGNO_NREGS(REGNO, MODE) \ |
| 8339 | + ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \ |
| 8340 | + / UNITS_PER_WORD) |
| 8341 | +*/ |
| 8342 | +#define HARD_REGNO_NREGS(REGNO, MODE) \ |
| 8343 | + ((unsigned int)((GET_MODE_SIZE(MODE) + UNITS_PER_WORD -1 ) / UNITS_PER_WORD)) |
| 8344 | + |
| 8345 | +/* |
| 8346 | +A C expression that is nonzero if it is permissible to store a value |
| 8347 | +of mode MODE in hard register number REGNO (or in several |
| 8348 | +registers starting with that one). For a machine where all registers |
| 8349 | +are equivalent, a suitable definition is |
| 8350 | + |
| 8351 | + #define HARD_REGNO_MODE_OK(REGNO, MODE) 1 |
| 8352 | + |
| 8353 | +You need not include code to check for the numbers of fixed registers, |
| 8354 | +because the allocation mechanism considers them to be always occupied. |
| 8355 | + |
| 8356 | +On some machines, double-precision values must be kept in even/odd |
| 8357 | +register pairs. You can implement that by defining this macro to reject |
| 8358 | +odd register numbers for such modes. |
| 8359 | + |
| 8360 | +The minimum requirement for a mode to be OK in a register is that the |
| 8361 | +mov[mode] instruction pattern support moves between the |
| 8362 | +register and other hard register in the same class and that moving a |
| 8363 | +value into the register and back out not alter it. |
| 8364 | + |
| 8365 | +Since the same instruction used to move word_mode will work for |
| 8366 | +all narrower integer modes, it is not necessary on any machine for |
| 8367 | +HARD_REGNO_MODE_OK to distinguish between these modes, provided |
| 8368 | +you define patterns movhi, etc., to take advantage of this. This |
| 8369 | +is useful because of the interaction between HARD_REGNO_MODE_OK |
| 8370 | +and MODES_TIEABLE_P; it is very desirable for all integer modes |
| 8371 | +to be tieable. |
| 8372 | + |
| 8373 | +Many machines have special registers for floating point arithmetic. |
| 8374 | +Often people assume that floating point machine modes are allowed only |
| 8375 | +in floating point registers. This is not true. Any registers that |
| 8376 | +can hold integers can safely hold a floating point machine |
| 8377 | +mode, whether or not floating arithmetic can be done on it in those |
| 8378 | +registers. Integer move instructions can be used to move the values. |
| 8379 | + |
| 8380 | +On some machines, though, the converse is true: fixed-point machine |
| 8381 | +modes may not go in floating registers. This is true if the floating |
| 8382 | +registers normalize any value stored in them, because storing a |
| 8383 | +non-floating value there would garble it. In this case, |
| 8384 | +HARD_REGNO_MODE_OK should reject fixed-point machine modes in |
| 8385 | +floating registers. But if the floating registers do not automatically |
| 8386 | +normalize, if you can store any bit pattern in one and retrieve it |
| 8387 | +unchanged without a trap, then any machine mode may go in a floating |
| 8388 | +register, so you can define this macro to say so. |
| 8389 | + |
| 8390 | +The primary significance of special floating registers is rather that |
| 8391 | +they are the registers acceptable in floating point arithmetic |
| 8392 | +instructions. However, this is of no concern to |
| 8393 | +HARD_REGNO_MODE_OK. You handle it by writing the proper |
| 8394 | +constraints for those instructions. |
| 8395 | + |
| 8396 | +On some machines, the floating registers are especially slow to access, |
| 8397 | +so that it is better to store a value in a stack frame than in such a |
| 8398 | +register if floating point arithmetic is not being done. As long as the |
| 8399 | +floating registers are not in class GENERAL_REGS, they will not |
| 8400 | +be used unless some pattern's constraint asks for one. |
| 8401 | +*/ |
| 8402 | +#define HARD_REGNO_MODE_OK(REGNO, MODE) avr32_hard_regno_mode_ok(REGNO, MODE) |
| 8403 | + |
| 8404 | +/* |
| 8405 | +A C expression that is nonzero if a value of mode |
| 8406 | +MODE1 is accessible in mode MODE2 without copying. |
| 8407 | + |
| 8408 | +If HARD_REGNO_MODE_OK(R, MODE1) and |
| 8409 | +HARD_REGNO_MODE_OK(R, MODE2) are always the same for |
| 8410 | +any R, then MODES_TIEABLE_P(MODE1, MODE2) |
| 8411 | +should be nonzero. If they differ for any R, you should define |
| 8412 | +this macro to return zero unless some other mechanism ensures the |
| 8413 | +accessibility of the value in a narrower mode. |
| 8414 | + |
| 8415 | +You should define this macro to return nonzero in as many cases as |
| 8416 | +possible since doing so will allow GCC to perform better register |
| 8417 | +allocation. |
| 8418 | +*/ |
| 8419 | +#define MODES_TIEABLE_P(MODE1, MODE2) \ |
| 8420 | + (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2)) |
| 8421 | + |
| 8422 | + |
| 8423 | + |
| 8424 | +/****************************************************************************** |
| 8425 | + * Register Classes |
| 8426 | + *****************************************************************************/ |
| 8427 | + |
| 8428 | +/* |
| 8429 | +An enumeral type that must be defined with all the register class names |
| 8430 | +as enumeral values. NO_REGS must be first. ALL_REGS |
| 8431 | +must be the last register class, followed by one more enumeral value, |
| 8432 | +LIM_REG_CLASSES, which is not a register class but rather |
| 8433 | +tells how many classes there are. |
| 8434 | + |
| 8435 | +Each register class has a number, which is the value of casting |
| 8436 | +the class name to type int. The number serves as an index |
| 8437 | +in many of the tables described below. |
| 8438 | +*/ |
| 8439 | +enum reg_class |
| 8440 | +{ |
| 8441 | + NO_REGS, |
| 8442 | + GENERAL_REGS, |
| 8443 | + FP_REGS, |
| 8444 | + ALL_REGS, |
| 8445 | + LIM_REG_CLASSES |
| 8446 | +}; |
| 8447 | + |
| 8448 | +/* |
| 8449 | +The number of distinct register classes, defined as follows: |
| 8450 | + #define N_REG_CLASSES (int) LIM_REG_CLASSES |
| 8451 | +*/ |
| 8452 | +#define N_REG_CLASSES (int)LIM_REG_CLASSES |
| 8453 | + |
| 8454 | +/* |
| 8455 | +An initializer containing the names of the register classes as C string |
| 8456 | +constants. These names are used in writing some of the debugging dumps. |
| 8457 | +*/ |
| 8458 | +#define REG_CLASS_NAMES \ |
| 8459 | +{ \ |
| 8460 | + "NO_REGS", \ |
| 8461 | + "GENERAL_REGS", \ |
| 8462 | + "FLOATING_POINT_REGS", \ |
| 8463 | + "ALL_REGS" \ |
| 8464 | +} |
| 8465 | + |
| 8466 | +/* |
| 8467 | +An initializer containing the contents of the register classes, as integers |
| 8468 | +which are bit masks. The nth integer specifies the contents of class |
| 8469 | +n. The way the integer mask is interpreted is that |
| 8470 | +register r is in the class if mask & (1 << r) is 1. |
| 8471 | + |
| 8472 | +When the machine has more than 32 registers, an integer does not suffice. |
| 8473 | +Then the integers are replaced by sub-initializers, braced groupings containing |
| 8474 | +several integers. Each sub-initializer must be suitable as an initializer |
| 8475 | +for the type HARD_REG_SET which is defined in hard-reg-set.h. |
| 8476 | +In this situation, the first integer in each sub-initializer corresponds to |
| 8477 | +registers 0 through 31, the second integer to registers 32 through 63, and |
| 8478 | +so on. |
| 8479 | +*/ |
| 8480 | +#define REG_CLASS_CONTENTS { \ |
| 8481 | + {0x00000000}, /* NO_REGS */ \ |
| 8482 | + {0x0000FFFF}, /* GENERAL_REGS */ \ |
| 8483 | + {0xFFFF0000}, /* FP_REGS */ \ |
| 8484 | + {0x7FFFFFFF}, /* ALL_REGS */ \ |
| 8485 | +} |
| 8486 | + |
| 8487 | + |
| 8488 | +/* |
| 8489 | +A C expression whose value is a register class containing hard register |
| 8490 | +REGNO. In general there is more than one such class; choose a class |
| 8491 | +which is minimal, meaning that no smaller class also contains the |
| 8492 | +register. |
| 8493 | +*/ |
| 8494 | +#define REGNO_REG_CLASS(REGNO) ((REGNO < 16) ? GENERAL_REGS : FP_REGS) |
| 8495 | + |
| 8496 | +/* |
| 8497 | +A macro whose definition is the name of the class to which a valid |
| 8498 | +base register must belong. A base register is one used in an address |
| 8499 | +which is the register value plus a displacement. |
| 8500 | +*/ |
| 8501 | +#define BASE_REG_CLASS GENERAL_REGS |
| 8502 | + |
| 8503 | +/* |
| 8504 | +This is a variation of the BASE_REG_CLASS macro which allows |
| 8505 | +the selection of a base register in a mode depenedent manner. If |
| 8506 | +mode is VOIDmode then it should return the same value as |
| 8507 | +BASE_REG_CLASS. |
| 8508 | +*/ |
| 8509 | +#define MODE_BASE_REG_CLASS(MODE) BASE_REG_CLASS |
| 8510 | + |
| 8511 | +/* |
| 8512 | +A macro whose definition is the name of the class to which a valid |
| 8513 | +index register must belong. An index register is one used in an |
| 8514 | +address where its value is either multiplied by a scale factor or |
| 8515 | +added to another register (as well as added to a displacement). |
| 8516 | +*/ |
| 8517 | +#define INDEX_REG_CLASS BASE_REG_CLASS |
| 8518 | + |
| 8519 | +/* |
| 8520 | +A C expression which defines the machine-dependent operand constraint |
| 8521 | +letters for register classes. If CHAR is such a letter, the |
| 8522 | +value should be the register class corresponding to it. Otherwise, |
| 8523 | +the value should be NO_REGS. The register letter r, |
| 8524 | +corresponding to class GENERAL_REGS, will not be passed |
| 8525 | +to this macro; you do not need to handle it. |
| 8526 | +*/ |
| 8527 | +#define REG_CLASS_FROM_LETTER(CHAR) ((CHAR) == 'f' ? FP_REGS : NO_REGS) |
| 8528 | + |
| 8529 | + |
| 8530 | +/* These assume that REGNO is a hard or pseudo reg number. |
| 8531 | + They give nonzero only if REGNO is a hard reg of the suitable class |
| 8532 | + or a pseudo reg currently allocated to a suitable hard reg. |
| 8533 | + Since they use reg_renumber, they are safe only once reg_renumber |
| 8534 | + has been allocated, which happens in local-alloc.c. */ |
| 8535 | +#define TEST_REGNO(R, TEST, VALUE) \ |
| 8536 | + ((R TEST VALUE) || ((unsigned) reg_renumber[R] TEST VALUE)) |
| 8537 | + |
| 8538 | +/* |
| 8539 | +A C expression which is nonzero if register number num is suitable for use as a base |
| 8540 | +register in operand addresses. It may be either a suitable hard register or a pseudo |
| 8541 | +register that has been allocated such a hard register. |
| 8542 | +*/ |
| 8543 | +#define REGNO_OK_FOR_BASE_P(NUM) TEST_REGNO(NUM, <=, LAST_REGNUM) |
| 8544 | + |
| 8545 | +/* |
| 8546 | +A C expression which is nonzero if register number NUM is |
| 8547 | +suitable for use as an index register in operand addresses. It may be |
| 8548 | +either a suitable hard register or a pseudo register that has been |
| 8549 | +allocated such a hard register. |
| 8550 | + |
| 8551 | +The difference between an index register and a base register is that |
| 8552 | +the index register may be scaled. If an address involves the sum of |
| 8553 | +two registers, neither one of them scaled, then either one may be |
| 8554 | +labeled the ``base'' and the other the ``index''; but whichever |
| 8555 | +labeling is used must fit the machine's constraints of which registers |
| 8556 | +may serve in each capacity. The compiler will try both labelings, |
| 8557 | +looking for one that is valid, and will reload one or both registers |
| 8558 | +only if neither labeling works. |
| 8559 | +*/ |
| 8560 | +#define REGNO_OK_FOR_INDEX_P(NUM) TEST_REGNO(NUM, <=, LAST_REGNUM) |
| 8561 | + |
| 8562 | +/* |
| 8563 | +A C expression that places additional restrictions on the register class |
| 8564 | +to use when it is necessary to copy value X into a register in class |
| 8565 | +CLASS. The value is a register class; perhaps CLASS, or perhaps |
| 8566 | +another, smaller class. On many machines, the following definition is |
| 8567 | +safe: #define PREFERRED_RELOAD_CLASS(X,CLASS) CLASS |
| 8568 | + |
| 8569 | +Sometimes returning a more restrictive class makes better code. For |
| 8570 | +example, on the 68000, when X is an integer constant that is in range |
| 8571 | +for a 'moveq' instruction, the value of this macro is always |
| 8572 | +DATA_REGS as long as CLASS includes the data registers. |
| 8573 | +Requiring a data register guarantees that a 'moveq' will be used. |
| 8574 | + |
| 8575 | +If X is a const_double, by returning NO_REGS |
| 8576 | +you can force X into a memory constant. This is useful on |
| 8577 | +certain machines where immediate floating values cannot be loaded into |
| 8578 | +certain kinds of registers. |
| 8579 | +*/ |
| 8580 | +#define PREFERRED_RELOAD_CLASS(X, CLASS) CLASS |
| 8581 | + |
| 8582 | + |
| 8583 | + |
| 8584 | +/* |
| 8585 | +A C expression for the maximum number of consecutive registers |
| 8586 | +of class CLASS needed to hold a value of mode MODE. |
| 8587 | + |
| 8588 | +This is closely related to the macro HARD_REGNO_NREGS. In fact, |
| 8589 | +the value of the macro CLASS_MAX_NREGS(CLASS, MODE) |
| 8590 | +should be the maximum value of HARD_REGNO_NREGS(REGNO, MODE) |
| 8591 | +for all REGNO values in the class CLASS. |
| 8592 | + |
| 8593 | +This macro helps control the handling of multiple-word values |
| 8594 | +in the reload pass. |
| 8595 | +*/ |
| 8596 | +#define CLASS_MAX_NREGS(CLASS, MODE) /* ToDo:fixme */ \ |
| 8597 | + (unsigned int)((GET_MODE_SIZE(MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD) |
| 8598 | + |
| 8599 | + |
| 8600 | +/* |
| 8601 | + Using CONST_OK_FOR_CONSTRAINT_P instead of CONS_OK_FOR_LETTER_P |
| 8602 | + in order to support constraints with more than one letter. |
| 8603 | + Only two letters are then used for constant constraints, |
| 8604 | + the letter 'K' and the letter 'I'. The constraint starting with |
| 8605 | + these letters must consist of four characters. The character following |
| 8606 | + 'K' or 'I' must be either 'u' (unsigned) or 's' (signed) to specify |
| 8607 | + if the constant is zero or sign extended. The last two characters specify |
| 8608 | + the length in bits of the constant. The base constraint letter 'I' means |
| 8609 | + that this is an negated constant, meaning that actually -VAL should be |
| 8610 | + checked to lie withing the valid range instead of VAL which is used when |
| 8611 | + 'K' is the base constraint letter. |
| 8612 | + |
| 8613 | +*/ |
| 8614 | + |
| 8615 | +#define CONSTRAINT_LEN(C, STR) \ |
| 8616 | + ( ((C) == 'K' || (C) == 'I') ? 4 : \ |
| 8617 | + ((C) == 'R') ? 5 : \ |
| 8618 | + ((C) == 'N' || (C) == 'O' || \ |
| 8619 | + (C) == 'P' || (C) == 'L' || (C) == 'J') ? -1 : \ |
| 8620 | + DEFAULT_CONSTRAINT_LEN((C), (STR)) ) |
| 8621 | + |
| 8622 | +#define CONST_OK_FOR_CONSTRAINT_P(VALUE, C, STR) \ |
| 8623 | + avr32_const_ok_for_constraint_p(VALUE, C, STR) |
| 8624 | + |
| 8625 | +/* |
| 8626 | +A C expression that defines the machine-dependent operand constraint |
| 8627 | +letters that specify particular ranges of const_double values ('G' or 'H'). |
| 8628 | + |
| 8629 | +If C is one of those letters, the expression should check that |
| 8630 | +VALUE, an RTX of code const_double, is in the appropriate |
| 8631 | +range and return 1 if so, 0 otherwise. If C is not one of those |
| 8632 | +letters, the value should be 0 regardless of VALUE. |
| 8633 | + |
| 8634 | +const_double is used for all floating-point constants and for |
| 8635 | +DImode fixed-point constants. A given letter can accept either |
| 8636 | +or both kinds of values. It can use GET_MODE to distinguish |
| 8637 | +between these kinds. |
| 8638 | +*/ |
| 8639 | +#define CONST_DOUBLE_OK_FOR_LETTER_P(OP, C) \ |
| 8640 | + ((C) == 'G' ? avr32_const_double_immediate(OP) : 0) |
| 8641 | + |
| 8642 | +/* |
| 8643 | +A C expression that defines the optional machine-dependent constraint |
| 8644 | +letters that can be used to segregate specific types of operands, usually |
| 8645 | +memory references, for the target machine. Any letter that is not |
| 8646 | +elsewhere defined and not matched by REG_CLASS_FROM_LETTER |
| 8647 | +may be used. Normally this macro will not be defined. |
| 8648 | + |
| 8649 | +If it is required for a particular target machine, it should return 1 |
| 8650 | +if VALUE corresponds to the operand type represented by the |
| 8651 | +constraint letter C. If C is not defined as an extra |
| 8652 | +constraint, the value returned should be 0 regardless of VALUE. |
| 8653 | + |
| 8654 | +For example, on the ROMP, load instructions cannot have their output |
| 8655 | +in r0 if the memory reference contains a symbolic address. Constraint |
| 8656 | +letter 'Q' is defined as representing a memory address that does |
| 8657 | +not contain a symbolic address. An alternative is specified with |
| 8658 | +a 'Q' constraint on the input and 'r' on the output. The next |
| 8659 | +alternative specifies 'm' on the input and a register class that |
| 8660 | +does not include r0 on the output. |
| 8661 | +*/ |
| 8662 | +#define EXTRA_CONSTRAINT_STR(OP, C, STR) \ |
| 8663 | + ((C) == 'W' ? avr32_address_operand(OP, GET_MODE(OP)) : \ |
| 8664 | + (C) == 'R' ? (avr32_indirect_register_operand(OP, GET_MODE(OP)) || \ |
| 8665 | + (avr32_imm_disp_memory_operand(OP, GET_MODE(OP)) \ |
| 8666 | + && avr32_const_ok_for_constraint_p( \ |
| 8667 | + INTVAL(XEXP(XEXP(OP, 0), 1)), \ |
| 8668 | + (STR)[1], &(STR)[1]))) : \ |
| 8669 | + (C) == 'S' ? avr32_indexed_memory_operand(OP, GET_MODE(OP)) : \ |
| 8670 | + (C) == 'T' ? avr32_const_pool_ref_operand(OP, GET_MODE(OP)) : \ |
| 8671 | + (C) == 'U' ? SYMBOL_REF_RCALL_FUNCTION_P(OP) : \ |
| 8672 | + (C) == 'Z' ? avr32_cop_memory_operand(OP, GET_MODE(OP)) : \ |
| 8673 | + 0) |
| 8674 | + |
| 8675 | + |
| 8676 | +#define EXTRA_MEMORY_CONSTRAINT(C, STR) ( ((C) == 'R') || \ |
| 8677 | + ((C) == 'S') || \ |
| 8678 | + ((C) == 'Z') ) |
| 8679 | + |
| 8680 | + |
| 8681 | +/* Returns nonzero if op is a function SYMBOL_REF which |
| 8682 | + can be called using an rcall instruction */ |
| 8683 | +#define SYMBOL_REF_RCALL_FUNCTION_P(op) \ |
| 8684 | + ( GET_CODE(op) == SYMBOL_REF \ |
| 8685 | + && SYMBOL_REF_FUNCTION_P(op) \ |
| 8686 | + && SYMBOL_REF_LOCAL_P(op) \ |
| 8687 | + && !SYMBOL_REF_EXTERNAL_P(op) \ |
| 8688 | + && !TARGET_HAS_ASM_ADDR_PSEUDOS ) |
| 8689 | + |
| 8690 | +/****************************************************************************** |
| 8691 | + * Stack Layout and Calling Conventions |
| 8692 | + *****************************************************************************/ |
| 8693 | + |
| 8694 | +/** Basic Stack Layout **/ |
| 8695 | + |
| 8696 | +/* |
| 8697 | +Define this macro if pushing a word onto the stack moves the stack |
| 8698 | +pointer to a smaller address. |
| 8699 | + |
| 8700 | +When we say, ``define this macro if ...,'' it means that the |
| 8701 | +compiler checks this macro only with #ifdef so the precise |
| 8702 | +definition used does not matter. |
| 8703 | +*/ |
| 8704 | +/* pushm decrece SP: *(--SP) <-- Rx */ |
| 8705 | +#define STACK_GROWS_DOWNWARD |
| 8706 | + |
| 8707 | +/* |
| 8708 | +This macro defines the operation used when something is pushed |
| 8709 | +on the stack. In RTL, a push operation will be |
| 8710 | +(set (mem (STACK_PUSH_CODE (reg sp))) ...) |
| 8711 | + |
| 8712 | +The choices are PRE_DEC, POST_DEC, PRE_INC, |
| 8713 | +and POST_INC. Which of these is correct depends on |
| 8714 | +the stack direction and on whether the stack pointer points |
| 8715 | +to the last item on the stack or whether it points to the |
| 8716 | +space for the next item on the stack. |
| 8717 | + |
| 8718 | +The default is PRE_DEC when STACK_GROWS_DOWNWARD is |
| 8719 | +defined, which is almost always right, and PRE_INC otherwise, |
| 8720 | +which is often wrong. |
| 8721 | +*/ |
| 8722 | +/* pushm: *(--SP) <-- Rx */ |
| 8723 | +#define STACK_PUSH_CODE PRE_DEC |
| 8724 | + |
| 8725 | +/* Define this to nonzero if the nominal address of the stack frame |
| 8726 | + is at the high-address end of the local variables; |
| 8727 | + that is, each additional local variable allocated |
| 8728 | + goes at a more negative offset in the frame. */ |
| 8729 | +#define FRAME_GROWS_DOWNWARD 1 |
| 8730 | + |
| 8731 | + |
| 8732 | +/* |
| 8733 | +Offset from the frame pointer to the first local variable slot to be allocated. |
| 8734 | + |
| 8735 | +If FRAME_GROWS_DOWNWARD, find the next slot's offset by |
| 8736 | +subtracting the first slot's length from STARTING_FRAME_OFFSET. |
| 8737 | +Otherwise, it is found by adding the length of the first slot to the |
| 8738 | +value STARTING_FRAME_OFFSET. |
| 8739 | + (i'm not sure if the above is still correct.. had to change it to get |
| 8740 | + rid of an overfull. --mew 2feb93 ) |
| 8741 | +*/ |
| 8742 | +#define STARTING_FRAME_OFFSET 0 |
| 8743 | + |
| 8744 | +/* |
| 8745 | +Offset from the stack pointer register to the first location at which |
| 8746 | +outgoing arguments are placed. If not specified, the default value of |
| 8747 | +zero is used. This is the proper value for most machines. |
| 8748 | + |
| 8749 | +If ARGS_GROW_DOWNWARD, this is the offset to the location above |
| 8750 | +the first location at which outgoing arguments are placed. |
| 8751 | +*/ |
| 8752 | +#define STACK_POINTER_OFFSET 0 |
| 8753 | + |
| 8754 | +/* |
| 8755 | +Offset from the argument pointer register to the first argument's |
| 8756 | +address. On some machines it may depend on the data type of the |
| 8757 | +function. |
| 8758 | + |
| 8759 | +If ARGS_GROW_DOWNWARD, this is the offset to the location above |
| 8760 | +the first argument's address. |
| 8761 | +*/ |
| 8762 | +#define FIRST_PARM_OFFSET(FUNDECL) 0 |
| 8763 | + |
| 8764 | + |
| 8765 | +/* |
| 8766 | +A C expression whose value is RTL representing the address in a stack |
| 8767 | +frame where the pointer to the caller's frame is stored. Assume that |
| 8768 | +FRAMEADDR is an RTL expression for the address of the stack frame |
| 8769 | +itself. |
| 8770 | + |
| 8771 | +If you don't define this macro, the default is to return the value |
| 8772 | +of FRAMEADDR - that is, the stack frame address is also the |
| 8773 | +address of the stack word that points to the previous frame. |
| 8774 | +*/ |
| 8775 | +#define DYNAMIC_CHAIN_ADDRESS(FRAMEADDR) plus_constant ((FRAMEADDR), 4) |
| 8776 | + |
| 8777 | + |
| 8778 | +/* |
| 8779 | +A C expression whose value is RTL representing the value of the return |
| 8780 | +address for the frame COUNT steps up from the current frame, after |
| 8781 | +the prologue. FRAMEADDR is the frame pointer of the COUNT |
| 8782 | +frame, or the frame pointer of the COUNT - 1 frame if |
| 8783 | +RETURN_ADDR_IN_PREVIOUS_FRAME is defined. |
| 8784 | + |
| 8785 | +The value of the expression must always be the correct address when |
| 8786 | +COUNT is zero, but may be NULL_RTX if there is not way to |
| 8787 | +determine the return address of other frames. |
| 8788 | +*/ |
| 8789 | +#define RETURN_ADDR_RTX(COUNT, FRAMEADDR) avr32_return_addr(COUNT, FRAMEADDR) |
| 8790 | + |
| 8791 | + |
| 8792 | +/* |
| 8793 | +A C expression whose value is RTL representing the location of the |
| 8794 | +incoming return address at the beginning of any function, before the |
| 8795 | +prologue. This RTL is either a REG, indicating that the return |
| 8796 | +value is saved in 'REG', or a MEM representing a location in |
| 8797 | +the stack. |
| 8798 | + |
| 8799 | +You only need to define this macro if you want to support call frame |
| 8800 | +debugging information like that provided by DWARF 2. |
| 8801 | + |
| 8802 | +If this RTL is a REG, you should also define |
| 8803 | +DWARF_FRAME_RETURN_COLUMN to DWARF_FRAME_REGNUM (REGNO). |
| 8804 | +*/ |
| 8805 | +#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, LR_REGNUM) |
| 8806 | + |
| 8807 | + |
| 8808 | + |
| 8809 | +/* |
| 8810 | +A C expression whose value is an integer giving the offset, in bytes, |
| 8811 | +from the value of the stack pointer register to the top of the stack |
| 8812 | +frame at the beginning of any function, before the prologue. The top of |
| 8813 | +the frame is defined to be the value of the stack pointer in the |
| 8814 | +previous frame, just before the call instruction. |
| 8815 | + |
| 8816 | +You only need to define this macro if you want to support call frame |
| 8817 | +debugging information like that provided by DWARF 2. |
| 8818 | +*/ |
| 8819 | +#define INCOMING_FRAME_SP_OFFSET 0 |
| 8820 | + |
| 8821 | + |
| 8822 | +/** Exception Handling Support **/ |
| 8823 | + |
| 8824 | +#define DWARF2_UNWIND_INFO 1 |
| 8825 | + |
| 8826 | +/* |
| 8827 | +A C expression whose value is the Nth register number used for |
| 8828 | +data by exception handlers, or INVALID_REGNUM if fewer than |
| 8829 | +N registers are usable. |
| 8830 | + |
| 8831 | +The exception handling library routines communicate with the exception |
| 8832 | +handlers via a set of agreed upon registers. Ideally these registers |
| 8833 | +should be call-clobbered; it is possible to use call-saved registers, |
| 8834 | +but may negatively impact code size. The target must support at least |
| 8835 | +2 data registers, but should define 4 if there are enough free registers. |
| 8836 | + |
| 8837 | +You must define this macro if you want to support call frame exception |
| 8838 | +handling like that provided by DWARF 2. |
| 8839 | +*/ |
| 8840 | +/* |
| 8841 | + Use r8-r11 |
| 8842 | +*/ |
| 8843 | +#define EH_RETURN_DATA_REGNO(N) \ |
| 8844 | + ((N) < 4 ? INTERNAL_REGNUM((N) + 8U) : INVALID_REGNUM) |
| 8845 | + |
| 8846 | +/* |
| 8847 | +A C expression whose value is RTL representing a location in which |
| 8848 | +to store a stack adjustment to be applied before function return. |
| 8849 | +This is used to unwind the stack to an exception handler's call frame. |
| 8850 | +It will be assigned zero on code paths that return normally. |
| 8851 | + |
| 8852 | +Typically this is a call-clobbered hard register that is otherwise |
| 8853 | +untouched by the epilogue, but could also be a stack slot. |
| 8854 | + |
| 8855 | +You must define this macro if you want to support call frame exception |
| 8856 | +handling like that provided by DWARF 2. |
| 8857 | +*/ |
| 8858 | +/* |
| 8859 | + I don't think functions that may throw exceptions can ever be leaf |
| 8860 | + functions, so we may safely use LR for this. |
| 8861 | +*/ |
| 8862 | +#define EH_RETURN_STACKADJ_REGNO LR_REGNUM |
| 8863 | +#define EH_RETURN_STACKADJ_RTX gen_rtx_REG(SImode, EH_RETURN_STACKADJ_REGNO) |
| 8864 | + |
| 8865 | +/* |
| 8866 | +A C expression whose value is RTL representing a location in which |
| 8867 | +to store the address of an exception handler to which we should |
| 8868 | +return. It will not be assigned on code paths that return normally. |
| 8869 | + |
| 8870 | +Typically this is the location in the call frame at which the normal |
| 8871 | +return address is stored. For targets that return by popping an |
| 8872 | +address off the stack, this might be a memory address just below |
| 8873 | +the target call frame rather than inside the current call |
| 8874 | +frame. EH_RETURN_STACKADJ_RTX will have already been assigned, |
| 8875 | +so it may be used to calculate the location of the target call frame. |
| 8876 | + |
| 8877 | +Some targets have more complex requirements than storing to an |
| 8878 | +address calculable during initial code generation. In that case |
| 8879 | +the eh_return instruction pattern should be used instead. |
| 8880 | + |
| 8881 | +If you want to support call frame exception handling, you must |
| 8882 | +define either this macro or the eh_return instruction pattern. |
| 8883 | +*/ |
| 8884 | +/* |
| 8885 | + We define the eh_return instruction pattern, so this isn't needed. |
| 8886 | +*/ |
| 8887 | +/* #define EH_RETURN_HANDLER_RTX gen_rtx_REG(Pmode, RET_REGISTER) */ |
| 8888 | + |
| 8889 | +/* |
| 8890 | + This macro chooses the encoding of pointers embedded in the |
| 8891 | + exception handling sections. If at all possible, this should be |
| 8892 | + defined such that the exception handling section will not require |
| 8893 | + dynamic relocations, and so may be read-only. |
| 8894 | + |
| 8895 | + code is 0 for data, 1 for code labels, 2 for function |
| 8896 | + pointers. global is true if the symbol may be affected by dynamic |
| 8897 | + relocations. The macro should return a combination of the DW_EH_PE_* |
| 8898 | + defines as found in dwarf2.h. |
| 8899 | + |
| 8900 | + If this macro is not defined, pointers will not be encoded but |
| 8901 | + represented directly. |
| 8902 | +*/ |
| 8903 | +#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \ |
| 8904 | + ((flag_pic && (GLOBAL) ? DW_EH_PE_indirect : 0) \ |
| 8905 | + | (flag_pic ? DW_EH_PE_pcrel : DW_EH_PE_absptr) \ |
| 8906 | + | DW_EH_PE_sdata4) |
| 8907 | + |
| 8908 | +/* ToDo: The rest of this subsection */ |
| 8909 | + |
| 8910 | +/** Specifying How Stack Checking is Done **/ |
| 8911 | +/* ToDo: All in this subsection */ |
| 8912 | + |
| 8913 | +/** Registers That Address the Stack Frame **/ |
| 8914 | + |
| 8915 | +/* |
| 8916 | +The register number of the stack pointer register, which must also be a |
| 8917 | +fixed register according to FIXED_REGISTERS. On most machines, |
| 8918 | +the hardware determines which register this is. |
| 8919 | +*/ |
| 8920 | +/* Using r13 as stack pointer. */ |
| 8921 | +#define STACK_POINTER_REGNUM INTERNAL_REGNUM(13) |
| 8922 | + |
| 8923 | +/* |
| 8924 | +The register number of the frame pointer register, which is used to |
| 8925 | +access automatic variables in the stack frame. On some machines, the |
| 8926 | +hardware determines which register this is. On other machines, you can |
| 8927 | +choose any register you wish for this purpose. |
| 8928 | +*/ |
| 8929 | +/* Use r7 */ |
| 8930 | +#define FRAME_POINTER_REGNUM INTERNAL_REGNUM(7) |
| 8931 | + |
| 8932 | + |
| 8933 | + |
| 8934 | +/* |
| 8935 | +The register number of the arg pointer register, which is used to access |
| 8936 | +the function's argument list. On some machines, this is the same as the |
| 8937 | +frame pointer register. On some machines, the hardware determines which |
| 8938 | +register this is. On other machines, you can choose any register you |
| 8939 | +wish for this purpose. If this is not the same register as the frame |
| 8940 | +pointer register, then you must mark it as a fixed register according to |
| 8941 | +FIXED_REGISTERS, or arrange to be able to eliminate it (see Section |
| 8942 | +10.10.5 [Elimination], page 224). |
| 8943 | +*/ |
| 8944 | +/* Using r5 */ |
| 8945 | +#define ARG_POINTER_REGNUM INTERNAL_REGNUM(4) |
| 8946 | + |
| 8947 | + |
| 8948 | +/* |
| 8949 | +Register numbers used for passing a function's static chain pointer. If |
| 8950 | +register windows are used, the register number as seen by the called |
| 8951 | +function is STATIC_CHAIN_INCOMING_REGNUM, while the register |
| 8952 | +number as seen by the calling function is STATIC_CHAIN_REGNUM. If |
| 8953 | +these registers are the same, STATIC_CHAIN_INCOMING_REGNUM need |
| 8954 | +not be defined. |
| 8955 | + |
| 8956 | +The static chain register need not be a fixed register. |
| 8957 | + |
| 8958 | +If the static chain is passed in memory, these macros should not be |
| 8959 | +defined; instead, the next two macros should be defined. |
| 8960 | +*/ |
| 8961 | +/* Using r0 */ |
| 8962 | +#define STATIC_CHAIN_REGNUM INTERNAL_REGNUM(0) |
| 8963 | + |
| 8964 | + |
| 8965 | +/** Eliminating Frame Pointer and Arg Pointer **/ |
| 8966 | + |
| 8967 | +/* |
| 8968 | +A C expression which is nonzero if a function must have and use a frame |
| 8969 | +pointer. This expression is evaluated in the reload pass. If its value is |
| 8970 | +nonzero the function will have a frame pointer. |
| 8971 | + |
| 8972 | +The expression can in principle examine the current function and decide |
| 8973 | +according to the facts, but on most machines the constant 0 or the |
| 8974 | +constant 1 suffices. Use 0 when the machine allows code to be generated |
| 8975 | +with no frame pointer, and doing so saves some time or space. Use 1 |
| 8976 | +when there is no possible advantage to avoiding a frame pointer. |
| 8977 | + |
| 8978 | +In certain cases, the compiler does not know how to produce valid code |
| 8979 | +without a frame pointer. The compiler recognizes those cases and |
| 8980 | +automatically gives the function a frame pointer regardless of what |
| 8981 | +FRAME_POINTER_REQUIRED says. You don't need to worry about |
| 8982 | +them. |
| 8983 | + |
| 8984 | +In a function that does not require a frame pointer, the frame pointer |
| 8985 | +register can be allocated for ordinary usage, unless you mark it as a |
| 8986 | +fixed register. See FIXED_REGISTERS for more information. |
| 8987 | +*/ |
| 8988 | +/* We need the frame pointer when compiling for profiling */ |
| 8989 | +#define FRAME_POINTER_REQUIRED (current_function_profile) |
| 8990 | + |
| 8991 | +/* |
| 8992 | +A C statement to store in the variable DEPTH_VAR the difference |
| 8993 | +between the frame pointer and the stack pointer values immediately after |
| 8994 | +the function prologue. The value would be computed from information |
| 8995 | +such as the result of get_frame_size () and the tables of |
| 8996 | +registers regs_ever_live and call_used_regs. |
| 8997 | + |
| 8998 | +If ELIMINABLE_REGS is defined, this macro will be not be used and |
| 8999 | +need not be defined. Otherwise, it must be defined even if |
| 9000 | +FRAME_POINTER_REQUIRED is defined to always be true; in that |
| 9001 | +case, you may set DEPTH_VAR to anything. |
| 9002 | +*/ |
| 9003 | +#define INITIAL_FRAME_POINTER_OFFSET(DEPTH_VAR) ((DEPTH_VAR) = get_frame_size()) |
| 9004 | + |
| 9005 | +/* |
| 9006 | +If defined, this macro specifies a table of register pairs used to |
| 9007 | +eliminate unneeded registers that point into the stack frame. If it is not |
| 9008 | +defined, the only elimination attempted by the compiler is to replace |
| 9009 | +references to the frame pointer with references to the stack pointer. |
| 9010 | + |
| 9011 | +The definition of this macro is a list of structure initializations, each |
| 9012 | +of which specifies an original and replacement register. |
| 9013 | + |
| 9014 | +On some machines, the position of the argument pointer is not known until |
| 9015 | +the compilation is completed. In such a case, a separate hard register |
| 9016 | +must be used for the argument pointer. This register can be eliminated by |
| 9017 | +replacing it with either the frame pointer or the argument pointer, |
| 9018 | +depending on whether or not the frame pointer has been eliminated. |
| 9019 | + |
| 9020 | +In this case, you might specify: |
| 9021 | + #define ELIMINABLE_REGS \ |
| 9022 | + {{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ |
| 9023 | + {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \ |
| 9024 | + {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}} |
| 9025 | + |
| 9026 | +Note that the elimination of the argument pointer with the stack pointer is |
| 9027 | +specified first since that is the preferred elimination. |
| 9028 | +*/ |
| 9029 | +#define ELIMINABLE_REGS \ |
| 9030 | +{ \ |
| 9031 | + { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }, \ |
| 9032 | + { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \ |
| 9033 | + { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM } \ |
| 9034 | +} |
| 9035 | + |
| 9036 | +/* |
| 9037 | +A C expression that returns nonzero if the compiler is allowed to try |
| 9038 | +to replace register number FROM with register number |
| 9039 | +TO. This macro need only be defined if ELIMINABLE_REGS |
| 9040 | +is defined, and will usually be the constant 1, since most of the cases |
| 9041 | +preventing register elimination are things that the compiler already |
| 9042 | +knows about. |
| 9043 | +*/ |
| 9044 | +#define CAN_ELIMINATE(FROM, TO) 1 |
| 9045 | + |
| 9046 | +/* |
| 9047 | +This macro is similar to INITIAL_FRAME_POINTER_OFFSET. It |
| 9048 | +specifies the initial difference between the specified pair of |
| 9049 | +registers. This macro must be defined if ELIMINABLE_REGS is |
| 9050 | +defined. |
| 9051 | +*/ |
| 9052 | +#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \ |
| 9053 | + ((OFFSET) = avr32_initial_elimination_offset(FROM, TO)) |
| 9054 | + |
| 9055 | +/** Passing Function Arguments on the Stack **/ |
| 9056 | + |
| 9057 | + |
| 9058 | +/* |
| 9059 | +A C expression. If nonzero, push insns will be used to pass |
| 9060 | +outgoing arguments. |
| 9061 | +If the target machine does not have a push instruction, set it to zero. |
| 9062 | +That directs GCC to use an alternate strategy: to |
| 9063 | +allocate the entire argument block and then store the arguments into |
| 9064 | +it. When PUSH_ARGS is nonzero, PUSH_ROUNDING must be defined too. |
| 9065 | +*/ |
| 9066 | +#define PUSH_ARGS 1 |
| 9067 | + |
| 9068 | + |
| 9069 | +/* |
| 9070 | +A C expression that is the number of bytes actually pushed onto the |
| 9071 | +stack when an instruction attempts to push NPUSHED bytes. |
| 9072 | + |
| 9073 | +On some machines, the definition |
| 9074 | + |
| 9075 | + #define PUSH_ROUNDING(BYTES) (BYTES) |
| 9076 | + |
| 9077 | +will suffice. But on other machines, instructions that appear |
| 9078 | +to push one byte actually push two bytes in an attempt to maintain |
| 9079 | +alignment. Then the definition should be |
| 9080 | + |
| 9081 | + #define PUSH_ROUNDING(BYTES) (((BYTES) + 1) & ~1) |
| 9082 | +*/ |
| 9083 | +/* Push 4 bytes at the time. */ |
| 9084 | +#define PUSH_ROUNDING(NPUSHED) (((NPUSHED) + 3) & ~3) |
| 9085 | + |
| 9086 | +/* |
| 9087 | +A C expression. If nonzero, the maximum amount of space required for |
| 9088 | +outgoing arguments will be computed and placed into the variable |
| 9089 | +current_function_outgoing_args_size. No space will be pushed |
| 9090 | +onto the stack for each call; instead, the function prologue should |
| 9091 | +increase the stack frame size by this amount. |
| 9092 | + |
| 9093 | +Setting both PUSH_ARGS and ACCUMULATE_OUTGOING_ARGS is not proper. |
| 9094 | +*/ |
| 9095 | +#define ACCUMULATE_OUTGOING_ARGS 0 |
| 9096 | + |
| 9097 | + |
| 9098 | + |
| 9099 | + |
| 9100 | +/* |
| 9101 | +A C expression that should indicate the number of bytes of its own |
| 9102 | +arguments that a function pops on returning, or 0 if the |
| 9103 | +function pops no arguments and the caller must therefore pop them all |
| 9104 | +after the function returns. |
| 9105 | + |
| 9106 | +FUNDECL is a C variable whose value is a tree node that describes |
| 9107 | +the function in question. Normally it is a node of type |
| 9108 | +FUNCTION_DECL that describes the declaration of the function. |
| 9109 | +From this you can obtain the DECL_ATTRIBUTES of the function. |
| 9110 | + |
| 9111 | +FUNTYPE is a C variable whose value is a tree node that |
| 9112 | +describes the function in question. Normally it is a node of type |
| 9113 | +FUNCTION_TYPE that describes the data type of the function. |
| 9114 | +From this it is possible to obtain the data types of the value and |
| 9115 | +arguments (if known). |
| 9116 | + |
| 9117 | +When a call to a library function is being considered, FUNDECL |
| 9118 | +will contain an identifier node for the library function. Thus, if |
| 9119 | +you need to distinguish among various library functions, you can do so |
| 9120 | +by their names. Note that ``library function'' in this context means |
| 9121 | +a function used to perform arithmetic, whose name is known specially |
| 9122 | +in the compiler and was not mentioned in the C code being compiled. |
| 9123 | + |
| 9124 | +STACK_SIZE is the number of bytes of arguments passed on the |
| 9125 | +stack. If a variable number of bytes is passed, it is zero, and |
| 9126 | +argument popping will always be the responsibility of the calling function. |
| 9127 | + |
| 9128 | +On the VAX, all functions always pop their arguments, so the definition |
| 9129 | +of this macro is STACK_SIZE. On the 68000, using the standard |
| 9130 | +calling convention, no functions pop their arguments, so the value of |
| 9131 | +the macro is always 0 in this case. But an alternative calling |
| 9132 | +convention is available in which functions that take a fixed number of |
| 9133 | +arguments pop them but other functions (such as printf) pop |
| 9134 | +nothing (the caller pops all). When this convention is in use, |
| 9135 | +FUNTYPE is examined to determine whether a function takes a fixed |
| 9136 | +number of arguments. |
| 9137 | +*/ |
| 9138 | +#define RETURN_POPS_ARGS(FUNDECL, FUNTYPE, STACK_SIZE) 0 |
| 9139 | + |
| 9140 | + |
| 9141 | +/*Return true if this function can we use a single return instruction*/ |
| 9142 | +#define USE_RETURN_INSN(ISCOND) avr32_use_return_insn(ISCOND) |
| 9143 | + |
| 9144 | +/* |
| 9145 | +A C expression that should indicate the number of bytes a call sequence |
| 9146 | +pops off the stack. It is added to the value of RETURN_POPS_ARGS |
| 9147 | +when compiling a function call. |
| 9148 | + |
| 9149 | +CUM is the variable in which all arguments to the called function |
| 9150 | +have been accumulated. |
| 9151 | + |
| 9152 | +On certain architectures, such as the SH5, a call trampoline is used |
| 9153 | +that pops certain registers off the stack, depending on the arguments |
| 9154 | +that have been passed to the function. Since this is a property of the |
| 9155 | +call site, not of the called function, RETURN_POPS_ARGS is not |
| 9156 | +appropriate. |
| 9157 | +*/ |
| 9158 | +#define CALL_POPS_ARGS(CUM) 0 |
| 9159 | + |
| 9160 | +/* Passing Arguments in Registers */ |
| 9161 | + |
| 9162 | +/* |
| 9163 | +A C expression that controls whether a function argument is passed |
| 9164 | +in a register, and which register. |
| 9165 | + |
| 9166 | +The arguments are CUM, which summarizes all the previous |
| 9167 | +arguments; MODE, the machine mode of the argument; TYPE, |
| 9168 | +the data type of the argument as a tree node or 0 if that is not known |
| 9169 | +(which happens for C support library functions); and NAMED, |
| 9170 | +which is 1 for an ordinary argument and 0 for nameless arguments that |
| 9171 | +correspond to '...' in the called function's prototype. |
| 9172 | +TYPE can be an incomplete type if a syntax error has previously |
| 9173 | +occurred. |
| 9174 | + |
| 9175 | +The value of the expression is usually either a reg RTX for the |
| 9176 | +hard register in which to pass the argument, or zero to pass the |
| 9177 | +argument on the stack. |
| 9178 | + |
| 9179 | +For machines like the VAX and 68000, where normally all arguments are |
| 9180 | +pushed, zero suffices as a definition. |
| 9181 | + |
| 9182 | +The value of the expression can also be a parallel RTX. This is |
| 9183 | +used when an argument is passed in multiple locations. The mode of the |
| 9184 | +of the parallel should be the mode of the entire argument. The |
| 9185 | +parallel holds any number of expr_list pairs; each one |
| 9186 | +describes where part of the argument is passed. In each |
| 9187 | +expr_list the first operand must be a reg RTX for the hard |
| 9188 | +register in which to pass this part of the argument, and the mode of the |
| 9189 | +register RTX indicates how large this part of the argument is. The |
| 9190 | +second operand of the expr_list is a const_int which gives |
| 9191 | +the offset in bytes into the entire argument of where this part starts. |
| 9192 | +As a special exception the first expr_list in the parallel |
| 9193 | +RTX may have a first operand of zero. This indicates that the entire |
| 9194 | +argument is also stored on the stack. |
| 9195 | + |
| 9196 | +The last time this macro is called, it is called with MODE == VOIDmode, |
| 9197 | +and its result is passed to the call or call_value |
| 9198 | +pattern as operands 2 and 3 respectively. |
| 9199 | + |
| 9200 | +The usual way to make the ISO library 'stdarg.h' work on a machine |
| 9201 | +where some arguments are usually passed in registers, is to cause |
| 9202 | +nameless arguments to be passed on the stack instead. This is done |
| 9203 | +by making FUNCTION_ARG return 0 whenever NAMED is 0. |
| 9204 | + |
| 9205 | +You may use the macro MUST_PASS_IN_STACK (MODE, TYPE) |
| 9206 | +in the definition of this macro to determine if this argument is of a |
| 9207 | +type that must be passed in the stack. If REG_PARM_STACK_SPACE |
| 9208 | +is not defined and FUNCTION_ARG returns nonzero for such an |
| 9209 | +argument, the compiler will abort. If REG_PARM_STACK_SPACE is |
| 9210 | +defined, the argument will be computed in the stack and then loaded into |
| 9211 | +a register. */ |
| 9212 | + |
| 9213 | +#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \ |
| 9214 | + avr32_function_arg(&(CUM), MODE, TYPE, NAMED) |
| 9215 | + |
| 9216 | + |
| 9217 | + |
| 9218 | + |
| 9219 | +/* |
| 9220 | +A C type for declaring a variable that is used as the first argument of |
| 9221 | +FUNCTION_ARG and other related values. For some target machines, |
| 9222 | +the type int suffices and can hold the number of bytes of |
| 9223 | +argument so far. |
| 9224 | + |
| 9225 | +There is no need to record in CUMULATIVE_ARGS anything about the |
| 9226 | +arguments that have been passed on the stack. The compiler has other |
| 9227 | +variables to keep track of that. For target machines on which all |
| 9228 | +arguments are passed on the stack, there is no need to store anything in |
| 9229 | +CUMULATIVE_ARGS; however, the data structure must exist and |
| 9230 | +should not be empty, so use int. |
| 9231 | +*/ |
| 9232 | +typedef struct avr32_args |
| 9233 | +{ |
| 9234 | + /* Index representing the argument register the current function argument |
| 9235 | + will occupy */ |
| 9236 | + int index; |
| 9237 | + /* A mask with bits representing the argument registers: if a bit is set |
| 9238 | + then this register is used for an arguemnt */ |
| 9239 | + int used_index; |
| 9240 | + /* TRUE if this function has anonymous arguments */ |
| 9241 | + int uses_anonymous_args; |
| 9242 | + /* The size in bytes of the named arguments pushed on the stack */ |
| 9243 | + int stack_pushed_args_size; |
| 9244 | + /* Set to true if this function needs a Return Value Pointer */ |
| 9245 | + int use_rvp; |
| 9246 | + |
| 9247 | +} CUMULATIVE_ARGS; |
| 9248 | + |
| 9249 | + |
| 9250 | +#define FIRST_CUM_REG_INDEX 0 |
| 9251 | +#define LAST_CUM_REG_INDEX 4 |
| 9252 | +#define GET_REG_INDEX(CUM) ((CUM)->index) |
| 9253 | +#define SET_REG_INDEX(CUM, INDEX) ((CUM)->index = (INDEX)); |
| 9254 | +#define GET_USED_INDEX(CUM, INDEX) ((CUM)->used_index & (1 << (INDEX))) |
| 9255 | +#define SET_USED_INDEX(CUM, INDEX) \ |
| 9256 | + do \ |
| 9257 | + { \ |
| 9258 | + if (INDEX >= 0) \ |
| 9259 | + (CUM)->used_index |= (1 << (INDEX)); \ |
| 9260 | + } \ |
| 9261 | + while (0) |
| 9262 | +#define SET_INDEXES_UNUSED(CUM) ((CUM)->used_index = 0) |
| 9263 | + |
| 9264 | + |
| 9265 | +/* |
| 9266 | + A C statement (sans semicolon) for initializing the variable cum for the |
| 9267 | + state at the beginning of the argument list. The variable has type |
| 9268 | + CUMULATIVE_ARGS. The value of FNTYPE is the tree node for the data type of |
| 9269 | + the function which will receive the args, or 0 if the args are to a compiler |
| 9270 | + support library function. For direct calls that are not libcalls, FNDECL |
| 9271 | + contain the declaration node of the function. FNDECL is also set when |
| 9272 | + INIT_CUMULATIVE_ARGS is used to find arguments for the function being |
| 9273 | + compiled. N_NAMED_ARGS is set to the number of named arguments, including a |
| 9274 | + structure return address if it is passed as a parameter, when making a call. |
| 9275 | + When processing incoming arguments, N_NAMED_ARGS is set to -1. |
| 9276 | + |
| 9277 | + When processing a call to a compiler support library function, LIBNAME |
| 9278 | + identifies which one. It is a symbol_ref rtx which contains the name of the |
| 9279 | + function, as a string. LIBNAME is 0 when an ordinary C function call is |
| 9280 | + being processed. Thus, each time this macro is called, either LIBNAME or |
| 9281 | + FNTYPE is nonzero, but never both of them at once. |
| 9282 | +*/ |
| 9283 | +#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \ |
| 9284 | + avr32_init_cumulative_args(&(CUM), FNTYPE, LIBNAME, FNDECL) |
| 9285 | + |
| 9286 | + |
| 9287 | +/* |
| 9288 | +A C statement (sans semicolon) to update the summarizer variable |
| 9289 | +CUM to advance past an argument in the argument list. The |
| 9290 | +values MODE, TYPE and NAMED describe that argument. |
| 9291 | +Once this is done, the variable CUM is suitable for analyzing |
| 9292 | +the following argument with FUNCTION_ARG, etc. |
| 9293 | + |
| 9294 | +This macro need not do anything if the argument in question was passed |
| 9295 | +on the stack. The compiler knows how to track the amount of stack space |
| 9296 | +used for arguments without any special help. |
| 9297 | +*/ |
| 9298 | +#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \ |
| 9299 | + avr32_function_arg_advance(&(CUM), MODE, TYPE, NAMED) |
| 9300 | + |
| 9301 | +/* |
| 9302 | +If defined, a C expression which determines whether, and in which direction, |
| 9303 | +to pad out an argument with extra space. The value should be of type |
| 9304 | +enum direction: either 'upward' to pad above the argument, |
| 9305 | +'downward' to pad below, or 'none' to inhibit padding. |
| 9306 | + |
| 9307 | +The amount of padding is always just enough to reach the next |
| 9308 | +multiple of FUNCTION_ARG_BOUNDARY; this macro does not control |
| 9309 | +it. |
| 9310 | + |
| 9311 | +This macro has a default definition which is right for most systems. |
| 9312 | +For little-endian machines, the default is to pad upward. For |
| 9313 | +big-endian machines, the default is to pad downward for an argument of |
| 9314 | +constant size shorter than an int, and upward otherwise. |
| 9315 | +*/ |
| 9316 | +#define FUNCTION_ARG_PADDING(MODE, TYPE) \ |
| 9317 | + avr32_function_arg_padding(MODE, TYPE) |
| 9318 | + |
| 9319 | +/* |
| 9320 | + Specify padding for the last element of a block move between registers |
| 9321 | + and memory. First is nonzero if this is the only element. Defining |
| 9322 | + this macro allows better control of register function parameters on |
| 9323 | + big-endian machines, without using PARALLEL rtl. In particular, |
| 9324 | + MUST_PASS_IN_STACK need not test padding and mode of types in registers, |
| 9325 | + as there is no longer a "wrong" part of a register; For example, a three |
| 9326 | + byte aggregate may be passed in the high part of a register if so required. |
| 9327 | +*/ |
| 9328 | +#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \ |
| 9329 | + avr32_function_arg_padding(MODE, TYPE) |
| 9330 | + |
| 9331 | +/* |
| 9332 | +If defined, a C expression which determines whether the default |
| 9333 | +implementation of va_arg will attempt to pad down before reading the |
| 9334 | +next argument, if that argument is smaller than its aligned space as |
| 9335 | +controlled by PARM_BOUNDARY. If this macro is not defined, all such |
| 9336 | +arguments are padded down if BYTES_BIG_ENDIAN is true. |
| 9337 | +*/ |
| 9338 | +#define PAD_VARARGS_DOWN \ |
| 9339 | + (FUNCTION_ARG_PADDING (TYPE_MODE (type), type) == downward) |
| 9340 | + |
| 9341 | + |
| 9342 | +/* |
| 9343 | +A C expression that is nonzero if REGNO is the number of a hard |
| 9344 | +register in which function arguments are sometimes passed. This does |
| 9345 | +not include implicit arguments such as the static chain and |
| 9346 | +the structure-value address. On many machines, no registers can be |
| 9347 | +used for this purpose since all function arguments are pushed on the |
| 9348 | +stack. |
| 9349 | +*/ |
| 9350 | +/* |
| 9351 | + Use r8 - r12 for function arguments. |
| 9352 | +*/ |
| 9353 | +#define FUNCTION_ARG_REGNO_P(REGNO) \ |
| 9354 | + (REGNO >= 3 && REGNO <= 7) |
| 9355 | + |
| 9356 | +/* Number of registers used for passing function arguments */ |
| 9357 | +#define NUM_ARG_REGS 5 |
| 9358 | + |
| 9359 | +/* |
| 9360 | +If defined, the order in which arguments are loaded into their |
| 9361 | +respective argument registers is reversed so that the last |
| 9362 | +argument is loaded first. This macro only affects arguments |
| 9363 | +passed in registers. |
| 9364 | +*/ |
| 9365 | +/* #define LOAD_ARGS_REVERSED */ |
| 9366 | + |
| 9367 | +/** How Scalar Function Values Are Returned **/ |
| 9368 | + |
| 9369 | +/* AVR32 is using r12 as return register. */ |
| 9370 | +#define RET_REGISTER (15 - 12) |
| 9371 | + |
| 9372 | +/* |
| 9373 | +Define this macro if -traditional should not cause functions |
| 9374 | +declared to return float to convert the value to double. |
| 9375 | +*/ |
| 9376 | +/* #define TRADITIONAL_RETURN_FLOAT */ |
| 9377 | + |
| 9378 | +/* |
| 9379 | +A C expression to create an RTX representing the place where a |
| 9380 | +function returns a value of data type VALTYPE. VALTYPE is |
| 9381 | +a tree node representing a data type. Write TYPE_MODE(VALTYPE) |
| 9382 | +to get the machine mode used to represent that type. |
| 9383 | +On many machines, only the mode is relevant. (Actually, on most |
| 9384 | +machines, scalar values are returned in the same place regardless of |
| 9385 | +mode). |
| 9386 | + |
| 9387 | +The value of the expression is usually a reg RTX for the hard |
| 9388 | +register where the return value is stored. The value can also be a |
| 9389 | +parallel RTX, if the return value is in multiple places. See |
| 9390 | +FUNCTION_ARG for an explanation of the parallel form. |
| 9391 | + |
| 9392 | +If PROMOTE_FUNCTION_RETURN is defined, you must apply the same |
| 9393 | +promotion rules specified in PROMOTE_MODE if VALTYPE is a |
| 9394 | +scalar type. |
| 9395 | + |
| 9396 | +If the precise function being called is known, FUNC is a tree |
| 9397 | +node (FUNCTION_DECL) for it; otherwise, FUNC is a null |
| 9398 | +pointer. This makes it possible to use a different value-returning |
| 9399 | +convention for specific functions when all their calls are |
| 9400 | +known. |
| 9401 | + |
| 9402 | +FUNCTION_VALUE is not used for return vales with aggregate data |
| 9403 | +types, because these are returned in another way. See |
| 9404 | +STRUCT_VALUE_REGNUM and related macros, below. |
| 9405 | +*/ |
| 9406 | +#define FUNCTION_VALUE(VALTYPE, FUNC) avr32_function_value(VALTYPE, FUNC) |
| 9407 | + |
| 9408 | + |
| 9409 | +/* |
| 9410 | +A C expression to create an RTX representing the place where a library |
| 9411 | +function returns a value of mode MODE. If the precise function |
| 9412 | +being called is known, FUNC is a tree node |
| 9413 | +(FUNCTION_DECL) for it; otherwise, func is a null |
| 9414 | +pointer. This makes it possible to use a different value-returning |
| 9415 | +convention for specific functions when all their calls are |
| 9416 | +known. |
| 9417 | + |
| 9418 | +Note that "library function" in this context means a compiler |
| 9419 | +support routine, used to perform arithmetic, whose name is known |
| 9420 | +specially by the compiler and was not mentioned in the C code being |
| 9421 | +compiled. |
| 9422 | + |
| 9423 | +The definition of LIBRARY_VALUE need not be concerned aggregate |
| 9424 | +data types, because none of the library functions returns such types. |
| 9425 | +*/ |
| 9426 | +#define LIBCALL_VALUE(MODE) avr32_libcall_value(MODE) |
| 9427 | + |
| 9428 | +/* |
| 9429 | +A C expression that is nonzero if REGNO is the number of a hard |
| 9430 | +register in which the values of called function may come back. |
| 9431 | + |
| 9432 | +A register whose use for returning values is limited to serving as the |
| 9433 | +second of a pair (for a value of type double, say) need not be |
| 9434 | +recognized by this macro. So for most machines, this definition |
| 9435 | +suffices: |
| 9436 | + #define FUNCTION_VALUE_REGNO_P(N) ((N) == 0) |
| 9437 | + |
| 9438 | +If the machine has register windows, so that the caller and the called |
| 9439 | +function use different registers for the return value, this macro |
| 9440 | +should recognize only the caller's register numbers. |
| 9441 | +*/ |
| 9442 | +/* |
| 9443 | + When returning a value of mode DImode, r11:r10 is used, else r12 is used. |
| 9444 | +*/ |
| 9445 | +#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == RET_REGISTER \ |
| 9446 | + || (REGNO) == INTERNAL_REGNUM(11)) |
| 9447 | + |
| 9448 | + |
| 9449 | +/** How Large Values Are Returned **/ |
| 9450 | + |
| 9451 | + |
| 9452 | +/* |
| 9453 | +Define this macro to be 1 if all structure and union return values must be |
| 9454 | +in memory. Since this results in slower code, this should be defined |
| 9455 | +only if needed for compatibility with other compilers or with an ABI. |
| 9456 | +If you define this macro to be 0, then the conventions used for structure |
| 9457 | +and union return values are decided by the RETURN_IN_MEMORY macro. |
| 9458 | + |
| 9459 | +If not defined, this defaults to the value 1. |
| 9460 | +*/ |
| 9461 | +#define DEFAULT_PCC_STRUCT_RETURN 0 |
| 9462 | + |
| 9463 | + |
| 9464 | + |
| 9465 | + |
| 9466 | +/** Generating Code for Profiling **/ |
| 9467 | + |
| 9468 | +/* |
| 9469 | +A C statement or compound statement to output to FILE some |
| 9470 | +assembler code to call the profiling subroutine mcount. |
| 9471 | + |
| 9472 | +The details of how mcount expects to be called are determined by |
| 9473 | +your operating system environment, not by GCC. To figure them out, |
| 9474 | +compile a small program for profiling using the system's installed C |
| 9475 | +compiler and look at the assembler code that results. |
| 9476 | + |
| 9477 | +Older implementations of mcount expect the address of a counter |
| 9478 | +variable to be loaded into some register. The name of this variable is |
| 9479 | +'LP' followed by the number LABELNO, so you would generate |
| 9480 | +the name using 'LP%d' in a fprintf. |
| 9481 | +*/ |
| 9482 | +/* ToDo: fixme */ |
| 9483 | +#ifndef FUNCTION_PROFILER |
| 9484 | +#define FUNCTION_PROFILER(FILE, LABELNO) \ |
| 9485 | + fprintf((FILE), "/* profiler %d */", (LABELNO)) |
| 9486 | +#endif |
| 9487 | + |
| 9488 | + |
| 9489 | +/***************************************************************************** |
| 9490 | + * Trampolines for Nested Functions * |
| 9491 | + *****************************************************************************/ |
| 9492 | + |
| 9493 | +/* |
| 9494 | +A C statement to output, on the stream FILE, assembler code for a |
| 9495 | +block of data that contains the constant parts of a trampoline. This |
| 9496 | +code should not include a label - the label is taken care of |
| 9497 | +automatically. |
| 9498 | + |
| 9499 | +If you do not define this macro, it means no template is needed |
| 9500 | +for the target. Do not define this macro on systems where the block move |
| 9501 | +code to copy the trampoline into place would be larger than the code |
| 9502 | +to generate it on the spot. |
| 9503 | +*/ |
| 9504 | +/* ToDo: correct? */ |
| 9505 | +#define TRAMPOLINE_TEMPLATE(FILE) avr32_trampoline_template(FILE); |
| 9506 | + |
| 9507 | + |
| 9508 | +/* |
| 9509 | +A C expression for the size in bytes of the trampoline, as an integer. |
| 9510 | +*/ |
| 9511 | +/* ToDo: fixme */ |
| 9512 | +#define TRAMPOLINE_SIZE 0x0C |
| 9513 | + |
| 9514 | +/* |
| 9515 | +Alignment required for trampolines, in bits. |
| 9516 | + |
| 9517 | +If you don't define this macro, the value of BIGGEST_ALIGNMENT |
| 9518 | +is used for aligning trampolines. |
| 9519 | +*/ |
| 9520 | +#define TRAMPOLINE_ALIGNMENT 16 |
| 9521 | + |
| 9522 | +/* |
| 9523 | +A C statement to initialize the variable parts of a trampoline. |
| 9524 | +ADDR is an RTX for the address of the trampoline; FNADDR is |
| 9525 | +an RTX for the address of the nested function; STATIC_CHAIN is an |
| 9526 | +RTX for the static chain value that should be passed to the function |
| 9527 | +when it is called. |
| 9528 | +*/ |
| 9529 | +#define INITIALIZE_TRAMPOLINE(ADDR, FNADDR, STATIC_CHAIN) \ |
| 9530 | + avr32_initialize_trampoline(ADDR, FNADDR, STATIC_CHAIN) |
| 9531 | + |
| 9532 | + |
| 9533 | +/****************************************************************************** |
| 9534 | + * Implicit Calls to Library Routines |
| 9535 | + *****************************************************************************/ |
| 9536 | + |
| 9537 | +/* Tail calling. */ |
| 9538 | + |
| 9539 | +/* A C expression that evaluates to true if it is ok to perform a sibling |
| 9540 | + call to DECL. */ |
| 9541 | +#define FUNCTION_OK_FOR_SIBCALL(DECL) 0 |
| 9542 | + |
| 9543 | +#define OVERRIDE_OPTIONS avr32_override_options () |
| 9544 | + |
| 9545 | + |
| 9546 | + |
| 9547 | +/****************************************************************************** |
| 9548 | + * Addressing Modes |
| 9549 | + *****************************************************************************/ |
| 9550 | + |
| 9551 | +/* |
| 9552 | +A C expression that is nonzero if the machine supports pre-increment, |
| 9553 | +pre-decrement, post-increment, or post-decrement addressing respectively. |
| 9554 | +*/ |
| 9555 | +/* |
| 9556 | + AVR32 supports Rp++ and --Rp |
| 9557 | +*/ |
| 9558 | +#define HAVE_PRE_INCREMENT 0 |
| 9559 | +#define HAVE_PRE_DECREMENT 1 |
| 9560 | +#define HAVE_POST_INCREMENT 1 |
| 9561 | +#define HAVE_POST_DECREMENT 0 |
| 9562 | + |
| 9563 | +/* |
| 9564 | +A C expression that is nonzero if the machine supports pre- or |
| 9565 | +post-address side-effect generation involving constants other than |
| 9566 | +the size of the memory operand. |
| 9567 | +*/ |
| 9568 | +#define HAVE_PRE_MODIFY_DISP 0 |
| 9569 | +#define HAVE_POST_MODIFY_DISP 0 |
| 9570 | + |
| 9571 | +/* |
| 9572 | +A C expression that is nonzero if the machine supports pre- or |
| 9573 | +post-address side-effect generation involving a register displacement. |
| 9574 | +*/ |
| 9575 | +#define HAVE_PRE_MODIFY_REG 0 |
| 9576 | +#define HAVE_POST_MODIFY_REG 0 |
| 9577 | + |
| 9578 | +/* |
| 9579 | +A C expression that is 1 if the RTX X is a constant which |
| 9580 | +is a valid address. On most machines, this can be defined as |
| 9581 | +CONSTANT_P (X), but a few machines are more restrictive |
| 9582 | +in which constant addresses are supported. |
| 9583 | + |
| 9584 | +CONSTANT_P accepts integer-values expressions whose values are |
| 9585 | +not explicitly known, such as symbol_ref, label_ref, and |
| 9586 | +high expressions and const arithmetic expressions, in |
| 9587 | +addition to const_int and const_double expressions. |
| 9588 | +*/ |
| 9589 | +#define CONSTANT_ADDRESS_P(X) CONSTANT_P(X) |
| 9590 | + |
| 9591 | +/* |
| 9592 | +A number, the maximum number of registers that can appear in a valid |
| 9593 | +memory address. Note that it is up to you to specify a value equal to |
| 9594 | +the maximum number that GO_IF_LEGITIMATE_ADDRESS would ever |
| 9595 | +accept. |
| 9596 | +*/ |
| 9597 | +#define MAX_REGS_PER_ADDRESS 2 |
| 9598 | + |
| 9599 | +/* |
| 9600 | +A C compound statement with a conditional goto LABEL; |
| 9601 | +executed if X (an RTX) is a legitimate memory address on the |
| 9602 | +target machine for a memory operand of mode MODE. |
| 9603 | + |
| 9604 | +It usually pays to define several simpler macros to serve as |
| 9605 | +subroutines for this one. Otherwise it may be too complicated to |
| 9606 | +understand. |
| 9607 | + |
| 9608 | +This macro must exist in two variants: a strict variant and a |
| 9609 | +non-strict one. The strict variant is used in the reload pass. It |
| 9610 | +must be defined so that any pseudo-register that has not been |
| 9611 | +allocated a hard register is considered a memory reference. In |
| 9612 | +contexts where some kind of register is required, a pseudo-register |
| 9613 | +with no hard register must be rejected. |
| 9614 | + |
| 9615 | +The non-strict variant is used in other passes. It must be defined to |
| 9616 | +accept all pseudo-registers in every context where some kind of |
| 9617 | +register is required. |
| 9618 | + |
| 9619 | +Compiler source files that want to use the strict variant of this |
| 9620 | +macro define the macro REG_OK_STRICT. You should use an |
| 9621 | +#ifdef REG_OK_STRICT conditional to define the strict variant |
| 9622 | +in that case and the non-strict variant otherwise. |
| 9623 | + |
| 9624 | +Subroutines to check for acceptable registers for various purposes (one |
| 9625 | +for base registers, one for index registers, and so on) are typically |
| 9626 | +among the subroutines used to define GO_IF_LEGITIMATE_ADDRESS. |
| 9627 | +Then only these subroutine macros need have two variants; the higher |
| 9628 | +levels of macros may be the same whether strict or not. |
| 9629 | + |
| 9630 | +Normally, constant addresses which are the sum of a symbol_ref |
| 9631 | +and an integer are stored inside a const RTX to mark them as |
| 9632 | +constant. Therefore, there is no need to recognize such sums |
| 9633 | +specifically as legitimate addresses. Normally you would simply |
| 9634 | +recognize any const as legitimate. |
| 9635 | + |
| 9636 | +Usually PRINT_OPERAND_ADDRESS is not prepared to handle constant |
| 9637 | +sums that are not marked with const. It assumes that a naked |
| 9638 | +plus indicates indexing. If so, then you must reject such |
| 9639 | +naked constant sums as illegitimate addresses, so that none of them will |
| 9640 | +be given to PRINT_OPERAND_ADDRESS. |
| 9641 | + |
| 9642 | +On some machines, whether a symbolic address is legitimate depends on |
| 9643 | +the section that the address refers to. On these machines, define the |
| 9644 | +macro ENCODE_SECTION_INFO to store the information into the |
| 9645 | +symbol_ref, and then check for it here. When you see a |
| 9646 | +const, you will have to look inside it to find the |
| 9647 | +symbol_ref in order to determine the section. |
| 9648 | + |
| 9649 | +The best way to modify the name string is by adding text to the |
| 9650 | +beginning, with suitable punctuation to prevent any ambiguity. Allocate |
| 9651 | +the new name in saveable_obstack. You will have to modify |
| 9652 | +ASM_OUTPUT_LABELREF to remove and decode the added text and |
| 9653 | +output the name accordingly, and define STRIP_NAME_ENCODING to |
| 9654 | +access the original name string. |
| 9655 | + |
| 9656 | +You can check the information stored here into the symbol_ref in |
| 9657 | +the definitions of the macros GO_IF_LEGITIMATE_ADDRESS and |
| 9658 | +PRINT_OPERAND_ADDRESS. |
| 9659 | +*/ |
| 9660 | +#ifdef REG_OK_STRICT |
| 9661 | +# define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \ |
| 9662 | + do \ |
| 9663 | + { \ |
| 9664 | + if (avr32_legitimate_address(MODE, X, 1)) \ |
| 9665 | + goto LABEL; \ |
| 9666 | + } \ |
| 9667 | + while (0) |
| 9668 | +#else |
| 9669 | +# define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \ |
| 9670 | + do \ |
| 9671 | + { \ |
| 9672 | + if (avr32_legitimate_address(MODE, X, 0)) \ |
| 9673 | + goto LABEL; \ |
| 9674 | + } \ |
| 9675 | + while (0) |
| 9676 | +#endif |
| 9677 | + |
| 9678 | +/* |
| 9679 | +A C expression that is nonzero if X (assumed to be a reg |
| 9680 | +RTX) is valid for use as a base register. For hard registers, it |
| 9681 | +should always accept those which the hardware permits and reject the |
| 9682 | +others. Whether the macro accepts or rejects pseudo registers must be |
| 9683 | +controlled by REG_OK_STRICT as described above. This usually |
| 9684 | +requires two variant definitions, of which REG_OK_STRICT |
| 9685 | +controls the one actually used. |
| 9686 | +*/ |
| 9687 | +#ifdef REG_OK_STRICT |
| 9688 | +# define REG_OK_FOR_BASE_P(X) \ |
| 9689 | + REGNO_OK_FOR_BASE_P(REGNO(X)) |
| 9690 | +#else |
| 9691 | +# define REG_OK_FOR_BASE_P(X) \ |
| 9692 | + ((REGNO(X) <= LAST_REGNUM) || (REGNO(X) >= FIRST_PSEUDO_REGISTER)) |
| 9693 | +#endif |
| 9694 | + |
| 9695 | + |
| 9696 | +/* |
| 9697 | +A C expression that is nonzero if X (assumed to be a reg |
| 9698 | +RTX) is valid for use as an index register. |
| 9699 | + |
| 9700 | +The difference between an index register and a base register is that |
| 9701 | +the index register may be scaled. If an address involves the sum of |
| 9702 | +two registers, neither one of them scaled, then either one may be |
| 9703 | +labeled the "base" and the other the "index"; but whichever |
| 9704 | +labeling is used must fit the machine's constraints of which registers |
| 9705 | +may serve in each capacity. The compiler will try both labelings, |
| 9706 | +looking for one that is valid, and will reload one or both registers |
| 9707 | +only if neither labeling works. |
| 9708 | +*/ |
| 9709 | +#define REG_OK_FOR_INDEX_P(X) \ |
| 9710 | + REG_OK_FOR_BASE_P(X) |
| 9711 | + |
| 9712 | + |
| 9713 | +/* |
| 9714 | +A C compound statement that attempts to replace X with a valid |
| 9715 | +memory address for an operand of mode MODE. win will be a |
| 9716 | +C statement label elsewhere in the code; the macro definition may use |
| 9717 | + |
| 9718 | + GO_IF_LEGITIMATE_ADDRESS (MODE, X, WIN); |
| 9719 | + |
| 9720 | +to avoid further processing if the address has become legitimate. |
| 9721 | + |
| 9722 | +X will always be the result of a call to break_out_memory_refs, |
| 9723 | +and OLDX will be the operand that was given to that function to produce |
| 9724 | +X. |
| 9725 | + |
| 9726 | +The code generated by this macro should not alter the substructure of |
| 9727 | +X. If it transforms X into a more legitimate form, it |
| 9728 | +should assign X (which will always be a C variable) a new value. |
| 9729 | + |
| 9730 | +It is not necessary for this macro to come up with a legitimate |
| 9731 | +address. The compiler has standard ways of doing so in all cases. In |
| 9732 | +fact, it is safe for this macro to do nothing. But often a |
| 9733 | +machine-dependent strategy can generate better code. |
| 9734 | +*/ |
| 9735 | +#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \ |
| 9736 | + do \ |
| 9737 | + { \ |
| 9738 | + if (GET_CODE(X) == PLUS \ |
| 9739 | + && GET_CODE(XEXP(X, 0)) == REG \ |
| 9740 | + && GET_CODE(XEXP(X, 1)) == CONST_INT \ |
| 9741 | + && !CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(X, 1)), \ |
| 9742 | + 'K', "Ks16")) \ |
| 9743 | + { \ |
| 9744 | + rtx index = force_reg(SImode, XEXP(X, 1)); \ |
| 9745 | + X = gen_rtx_PLUS( SImode, XEXP(X, 0), index); \ |
| 9746 | + } \ |
| 9747 | + GO_IF_LEGITIMATE_ADDRESS(MODE, X, WIN); \ |
| 9748 | + } \ |
| 9749 | + while(0) |
| 9750 | + |
| 9751 | + |
| 9752 | +/* |
| 9753 | +A C statement or compound statement with a conditional |
| 9754 | +goto LABEL; executed if memory address X (an RTX) can have |
| 9755 | +different meanings depending on the machine mode of the memory |
| 9756 | +reference it is used for or if the address is valid for some modes |
| 9757 | +but not others. |
| 9758 | + |
| 9759 | +Autoincrement and autodecrement addresses typically have mode-dependent |
| 9760 | +effects because the amount of the increment or decrement is the size |
| 9761 | +of the operand being addressed. Some machines have other mode-dependent |
| 9762 | +addresses. Many RISC machines have no mode-dependent addresses. |
| 9763 | + |
| 9764 | +You may assume that ADDR is a valid address for the machine. |
| 9765 | +*/ |
| 9766 | +#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL) \ |
| 9767 | + do \ |
| 9768 | + { \ |
| 9769 | + if (GET_CODE (ADDR) == POST_INC \ |
| 9770 | + || GET_CODE (ADDR) == PRE_DEC) \ |
| 9771 | + goto LABEL; \ |
| 9772 | + } \ |
| 9773 | + while (0) |
| 9774 | + |
| 9775 | +/* |
| 9776 | +A C expression that is nonzero if X is a legitimate constant for |
| 9777 | +an immediate operand on the target machine. You can assume that |
| 9778 | +X satisfies CONSTANT_P, so you need not check this. In fact, |
| 9779 | +'1' is a suitable definition for this macro on machines where |
| 9780 | +anything CONSTANT_P is valid. |
| 9781 | +*/ |
| 9782 | +#define LEGITIMATE_CONSTANT_P(X) avr32_legitimate_constant_p(X) |
| 9783 | + |
| 9784 | + |
| 9785 | +/****************************************************************************** |
| 9786 | + * Condition Code Status |
| 9787 | + *****************************************************************************/ |
| 9788 | + |
| 9789 | +#define HAVE_conditional_move 1 |
| 9790 | + |
| 9791 | +/* |
| 9792 | +C code for a data type which is used for declaring the mdep |
| 9793 | +component of cc_status. It defaults to int. |
| 9794 | + |
| 9795 | +This macro is not used on machines that do not use cc0. |
| 9796 | +*/ |
| 9797 | + |
| 9798 | +typedef struct |
| 9799 | +{ |
| 9800 | + int flags; |
| 9801 | + rtx value; |
| 9802 | + int fpflags; |
| 9803 | + rtx fpvalue; |
| 9804 | +} avr32_status_reg; |
| 9805 | + |
| 9806 | + |
| 9807 | +#define CC_STATUS_MDEP avr32_status_reg |
| 9808 | + |
| 9809 | +/* |
| 9810 | +A C expression to initialize the mdep field to "empty". |
| 9811 | +The default definition does nothing, since most machines don't use |
| 9812 | +the field anyway. If you want to use the field, you should probably |
| 9813 | +define this macro to initialize it. |
| 9814 | + |
| 9815 | +This macro is not used on machines that do not use cc0. |
| 9816 | +*/ |
| 9817 | + |
| 9818 | +#define CC_STATUS_MDEP_INIT \ |
| 9819 | + (cc_status.mdep.flags = CC_NONE , cc_status.mdep.value = 0) |
| 9820 | + |
| 9821 | +#define FPCC_STATUS_INIT \ |
| 9822 | + (cc_status.mdep.fpflags = CC_NONE , cc_status.mdep.fpvalue = 0) |
| 9823 | + |
| 9824 | +/* |
| 9825 | +A C compound statement to set the components of cc_status |
| 9826 | +appropriately for an insn INSN whose body is EXP. It is |
| 9827 | +this macro's responsibility to recognize insns that set the condition |
| 9828 | +code as a byproduct of other activity as well as those that explicitly |
| 9829 | +set (cc0). |
| 9830 | + |
| 9831 | +This macro is not used on machines that do not use cc0. |
| 9832 | + |
| 9833 | +If there are insns that do not set the condition code but do alter |
| 9834 | +other machine registers, this macro must check to see whether they |
| 9835 | +invalidate the expressions that the condition code is recorded as |
| 9836 | +reflecting. For example, on the 68000, insns that store in address |
| 9837 | +registers do not set the condition code, which means that usually |
| 9838 | +NOTICE_UPDATE_CC can leave cc_status unaltered for such |
| 9839 | +insns. But suppose that the previous insn set the condition code |
| 9840 | +based on location 'a4@@(102)' and the current insn stores a new |
| 9841 | +value in 'a4'. Although the condition code is not changed by |
| 9842 | +this, it will no longer be true that it reflects the contents of |
| 9843 | +'a4@@(102)'. Therefore, NOTICE_UPDATE_CC must alter |
| 9844 | +cc_status in this case to say that nothing is known about the |
| 9845 | +condition code value. |
| 9846 | + |
| 9847 | +The definition of NOTICE_UPDATE_CC must be prepared to deal |
| 9848 | +with the results of peephole optimization: insns whose patterns are |
| 9849 | +parallel RTXs containing various reg, mem or |
| 9850 | +constants which are just the operands. The RTL structure of these |
| 9851 | +insns is not sufficient to indicate what the insns actually do. What |
| 9852 | +NOTICE_UPDATE_CC should do when it sees one is just to run |
| 9853 | +CC_STATUS_INIT. |
| 9854 | + |
| 9855 | +A possible definition of NOTICE_UPDATE_CC is to call a function |
| 9856 | +that looks at an attribute (see Insn Attributes) named, for example, |
| 9857 | +'cc'. This avoids having detailed information about patterns in |
| 9858 | +two places, the 'md' file and in NOTICE_UPDATE_CC. |
| 9859 | +*/ |
| 9860 | + |
| 9861 | +#define NOTICE_UPDATE_CC(EXP, INSN) avr32_notice_update_cc(EXP, INSN) |
| 9862 | + |
| 9863 | + |
| 9864 | + |
| 9865 | + |
| 9866 | +/****************************************************************************** |
| 9867 | + * Describing Relative Costs of Operations |
| 9868 | + *****************************************************************************/ |
| 9869 | + |
| 9870 | + |
| 9871 | + |
| 9872 | +/* |
| 9873 | +A C expression for the cost of moving data of mode MODE from a |
| 9874 | +register in class FROM to one in class TO. The classes are |
| 9875 | +expressed using the enumeration values such as GENERAL_REGS. A |
| 9876 | +value of 2 is the default; other values are interpreted relative to |
| 9877 | +that. |
| 9878 | + |
| 9879 | +It is not required that the cost always equal 2 when FROM is the |
| 9880 | +same as TO; on some machines it is expensive to move between |
| 9881 | +registers if they are not general registers. |
| 9882 | + |
| 9883 | +If reload sees an insn consisting of a single set between two |
| 9884 | +hard registers, and if REGISTER_MOVE_COST applied to their |
| 9885 | +classes returns a value of 2, reload does not check to ensure that the |
| 9886 | +constraints of the insn are met. Setting a cost of other than 2 will |
| 9887 | +allow reload to verify that the constraints are met. You should do this |
| 9888 | +if the movm pattern's constraints do not allow such copying. |
| 9889 | +*/ |
| 9890 | +#define REGISTER_MOVE_COST(MODE, FROM, TO) \ |
| 9891 | + ((GET_MODE_SIZE(MODE) <= 4) ? 2: \ |
| 9892 | + (GET_MODE_SIZE(MODE) <= 8) ? 3: \ |
| 9893 | + 4) |
| 9894 | + |
| 9895 | +/* |
| 9896 | +A C expression for the cost of moving data of mode MODE between a |
| 9897 | +register of class CLASS and memory; IN is zero if the value |
| 9898 | +is to be written to memory, nonzero if it is to be read in. This cost |
| 9899 | +is relative to those in REGISTER_MOVE_COST. If moving between |
| 9900 | +registers and memory is more expensive than between two registers, you |
| 9901 | +should define this macro to express the relative cost. |
| 9902 | + |
| 9903 | +If you do not define this macro, GCC uses a default cost of 4 plus |
| 9904 | +the cost of copying via a secondary reload register, if one is |
| 9905 | +needed. If your machine requires a secondary reload register to copy |
| 9906 | +between memory and a register of CLASS but the reload mechanism is |
| 9907 | +more complex than copying via an intermediate, define this macro to |
| 9908 | +reflect the actual cost of the move. |
| 9909 | + |
| 9910 | +GCC defines the function memory_move_secondary_cost if |
| 9911 | +secondary reloads are needed. It computes the costs due to copying via |
| 9912 | +a secondary register. If your machine copies from memory using a |
| 9913 | +secondary register in the conventional way but the default base value of |
| 9914 | +4 is not correct for your machine, define this macro to add some other |
| 9915 | +value to the result of that function. The arguments to that function |
| 9916 | +are the same as to this macro. |
| 9917 | +*/ |
| 9918 | +/* |
| 9919 | + Memory moves are costly |
| 9920 | +*/ |
| 9921 | +#define MEMORY_MOVE_COST(MODE, CLASS, IN) 10 |
| 9922 | +/* |
| 9923 | + (((IN) ? ((GET_MODE_SIZE(MODE) < 4) ? 4 : \ |
| 9924 | + (GET_MODE_SIZE(MODE) > 8) ? 6 : \ |
| 9925 | + 3) \ |
| 9926 | + : ((GET_MODE_SIZE(MODE) > 8) ? 4 : 2))) |
| 9927 | +*/ |
| 9928 | + |
| 9929 | +/* |
| 9930 | +A C expression for the cost of a branch instruction. A value of 1 is |
| 9931 | +the default; other values are interpreted relative to that. |
| 9932 | +*/ |
| 9933 | + /* Try to use conditionals as much as possible */ |
| 9934 | +#define BRANCH_COST (TARGET_BRANCH_PRED ? 3 : 5) |
| 9935 | + |
| 9936 | +/*A C expression for the maximum number of instructions to execute via conditional |
| 9937 | + execution instructions instead of a branch. A value of BRANCH_COST+1 is the default |
| 9938 | + if the machine does not use cc0, and 1 if it does use cc0.*/ |
| 9939 | +#define MAX_CONDITIONAL_EXECUTE 3 |
| 9940 | + |
| 9941 | +/* |
| 9942 | +Define this macro as a C expression which is nonzero if accessing less |
| 9943 | +than a word of memory (i.e.: a char or a short) is no |
| 9944 | +faster than accessing a word of memory, i.e., if such access |
| 9945 | +require more than one instruction or if there is no difference in cost |
| 9946 | +between byte and (aligned) word loads. |
| 9947 | + |
| 9948 | +When this macro is not defined, the compiler will access a field by |
| 9949 | +finding the smallest containing object; when it is defined, a fullword |
| 9950 | +load will be used if alignment permits. Unless bytes accesses are |
| 9951 | +faster than word accesses, using word accesses is preferable since it |
| 9952 | +may eliminate subsequent memory access if subsequent accesses occur to |
| 9953 | +other fields in the same word of the structure, but to different bytes. |
| 9954 | +*/ |
| 9955 | +#define SLOW_BYTE_ACCESS 1 |
| 9956 | + |
| 9957 | + |
| 9958 | +/* |
| 9959 | +Define this macro if it is as good or better to call a constant |
| 9960 | +function address than to call an address kept in a register. |
| 9961 | +*/ |
| 9962 | +#define NO_FUNCTION_CSE |
| 9963 | + |
| 9964 | + |
| 9965 | +/****************************************************************************** |
| 9966 | + * Adjusting the Instruction Scheduler |
| 9967 | + *****************************************************************************/ |
| 9968 | + |
| 9969 | +/***************************************************************************** |
| 9970 | + * Dividing the Output into Sections (Texts, Data, ...) * |
| 9971 | + *****************************************************************************/ |
| 9972 | + |
| 9973 | +/* |
| 9974 | +A C expression whose value is a string, including spacing, containing the |
| 9975 | +assembler operation that should precede instructions and read-only data. |
| 9976 | +Normally "\t.text" is right. |
| 9977 | +*/ |
| 9978 | +#define TEXT_SECTION_ASM_OP "\t.text" |
| 9979 | +/* |
| 9980 | +A C statement that switches to the default section containing instructions. |
| 9981 | +Normally this is not needed, as simply defining TEXT_SECTION_ASM_OP |
| 9982 | +is enough. The MIPS port uses this to sort all functions after all data |
| 9983 | +declarations. |
| 9984 | +*/ |
| 9985 | +/* #define TEXT_SECTION */ |
| 9986 | + |
| 9987 | +/* |
| 9988 | +A C expression whose value is a string, including spacing, containing the |
| 9989 | +assembler operation to identify the following data as writable initialized |
| 9990 | +data. Normally "\t.data" is right. |
| 9991 | +*/ |
| 9992 | +#define DATA_SECTION_ASM_OP "\t.data" |
| 9993 | + |
| 9994 | +/* |
| 9995 | +If defined, a C expression whose value is a string, including spacing, |
| 9996 | +containing the assembler operation to identify the following data as |
| 9997 | +shared data. If not defined, DATA_SECTION_ASM_OP will be used. |
| 9998 | +*/ |
| 9999 | + |
| 10000 | +/* |
| 10001 | +A C expression whose value is a string, including spacing, containing |
| 10002 | +the assembler operation to identify the following data as read-only |
| 10003 | +initialized data. |
| 10004 | +*/ |
| 10005 | +#undef READONLY_DATA_SECTION_ASM_OP |
| 10006 | +#define READONLY_DATA_SECTION_ASM_OP \ |
| 10007 | + ((TARGET_USE_RODATA_SECTION) ? \ |
| 10008 | + "\t.section\t.rodata" : \ |
| 10009 | + TEXT_SECTION_ASM_OP ) |
| 10010 | + |
| 10011 | + |
| 10012 | +/* |
| 10013 | +If defined, a C expression whose value is a string, including spacing, |
| 10014 | +containing the assembler operation to identify the following data as |
| 10015 | +uninitialized global data. If not defined, and neither |
| 10016 | +ASM_OUTPUT_BSS nor ASM_OUTPUT_ALIGNED_BSS are defined, |
| 10017 | +uninitialized global data will be output in the data section if |
| 10018 | +-fno-common is passed, otherwise ASM_OUTPUT_COMMON will be |
| 10019 | +used. |
| 10020 | +*/ |
| 10021 | +#define BSS_SECTION_ASM_OP "\t.section\t.bss" |
| 10022 | + |
| 10023 | +/* |
| 10024 | +If defined, a C expression whose value is a string, including spacing, |
| 10025 | +containing the assembler operation to identify the following data as |
| 10026 | +uninitialized global shared data. If not defined, and |
| 10027 | +BSS_SECTION_ASM_OP is, the latter will be used. |
| 10028 | +*/ |
| 10029 | +/*#define SHARED_BSS_SECTION_ASM_OP "\trseg\tshared_bbs_section:data:noroot(0)\n"*/ |
| 10030 | +/* |
| 10031 | +If defined, a C expression whose value is a string, including spacing, |
| 10032 | +containing the assembler operation to identify the following data as |
| 10033 | +initialization code. If not defined, GCC will assume such a section does |
| 10034 | +not exist. |
| 10035 | +*/ |
| 10036 | +#undef INIT_SECTION_ASM_OP |
| 10037 | +#define INIT_SECTION_ASM_OP "\t.section\t.init" |
| 10038 | + |
| 10039 | +/* |
| 10040 | +If defined, a C expression whose value is a string, including spacing, |
| 10041 | +containing the assembler operation to identify the following data as |
| 10042 | +finalization code. If not defined, GCC will assume such a section does |
| 10043 | +not exist. |
| 10044 | +*/ |
| 10045 | +#undef FINI_SECTION_ASM_OP |
| 10046 | +#define FINI_SECTION_ASM_OP "\t.section\t.fini" |
| 10047 | + |
| 10048 | +/* |
| 10049 | +If defined, an ASM statement that switches to a different section |
| 10050 | +via SECTION_OP, calls FUNCTION, and switches back to |
| 10051 | +the text section. This is used in crtstuff.c if |
| 10052 | +INIT_SECTION_ASM_OP or FINI_SECTION_ASM_OP to calls |
| 10053 | +to initialization and finalization functions from the init and fini |
| 10054 | +sections. By default, this macro uses a simple function call. Some |
| 10055 | +ports need hand-crafted assembly code to avoid dependencies on |
| 10056 | +registers initialized in the function prologue or to ensure that |
| 10057 | +constant pools don't end up too far way in the text section. |
| 10058 | +*/ |
| 10059 | +#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \ |
| 10060 | + asm ( SECTION_OP "\n" \ |
| 10061 | + "mcall r6[" USER_LABEL_PREFIX #FUNC "@got]\n" \ |
| 10062 | + TEXT_SECTION_ASM_OP); |
| 10063 | + |
| 10064 | + |
| 10065 | +/* |
| 10066 | +Define this macro to be an expression with a nonzero value if jump |
| 10067 | +tables (for tablejump insns) should be output in the text |
| 10068 | +section, along with the assembler instructions. Otherwise, the |
| 10069 | +readonly data section is used. |
| 10070 | + |
| 10071 | +This macro is irrelevant if there is no separate readonly data section. |
| 10072 | +*/ |
| 10073 | +#define JUMP_TABLES_IN_TEXT_SECTION 1 |
| 10074 | + |
| 10075 | + |
| 10076 | +/****************************************************************************** |
| 10077 | + * Position Independent Code (PIC) |
| 10078 | + *****************************************************************************/ |
| 10079 | + |
| 10080 | +#ifndef AVR32_ALWAYS_PIC |
| 10081 | +#define AVR32_ALWAYS_PIC 0 |
| 10082 | +#endif |
| 10083 | + |
| 10084 | +/* GOT is set to r6 */ |
| 10085 | +#define PIC_OFFSET_TABLE_REGNUM INTERNAL_REGNUM(6) |
| 10086 | + |
| 10087 | +/* |
| 10088 | +A C expression that is nonzero if X is a legitimate immediate |
| 10089 | +operand on the target machine when generating position independent code. |
| 10090 | +You can assume that X satisfies CONSTANT_P, so you need not |
| 10091 | +check this. You can also assume flag_pic is true, so you need not |
| 10092 | +check it either. You need not define this macro if all constants |
| 10093 | +(including SYMBOL_REF) can be immediate operands when generating |
| 10094 | +position independent code. |
| 10095 | +*/ |
| 10096 | +/* We can't directly access anything that contains a symbol, |
| 10097 | + nor can we indirect via the constant pool. */ |
| 10098 | +#define LEGITIMATE_PIC_OPERAND_P(X) avr32_legitimate_pic_operand_p(X) |
| 10099 | + |
| 10100 | + |
| 10101 | +/* We need to know when we are making a constant pool; this determines |
| 10102 | + whether data needs to be in the GOT or can be referenced via a GOT |
| 10103 | + offset. */ |
| 10104 | +extern int making_const_table; |
| 10105 | + |
| 10106 | +/****************************************************************************** |
| 10107 | + * Defining the Output Assembler Language |
| 10108 | + *****************************************************************************/ |
| 10109 | + |
| 10110 | + |
| 10111 | +/* |
| 10112 | +A C string constant describing how to begin a comment in the target |
| 10113 | +assembler language. The compiler assumes that the comment will end at |
| 10114 | +the end of the line. |
| 10115 | +*/ |
| 10116 | +#define ASM_COMMENT_START "# " |
| 10117 | + |
| 10118 | +/* |
| 10119 | +A C string constant for text to be output before each asm |
| 10120 | +statement or group of consecutive ones. Normally this is |
| 10121 | +"#APP", which is a comment that has no effect on most |
| 10122 | +assemblers but tells the GNU assembler that it must check the lines |
| 10123 | +that follow for all valid assembler constructs. |
| 10124 | +*/ |
| 10125 | +#undef ASM_APP_ON |
| 10126 | +#define ASM_APP_ON "#APP\n" |
| 10127 | + |
| 10128 | +/* |
| 10129 | +A C string constant for text to be output after each asm |
| 10130 | +statement or group of consecutive ones. Normally this is |
| 10131 | +"#NO_APP", which tells the GNU assembler to resume making the |
| 10132 | +time-saving assumptions that are valid for ordinary compiler output. |
| 10133 | +*/ |
| 10134 | +#undef ASM_APP_OFF |
| 10135 | +#define ASM_APP_OFF "#NO_APP\n" |
| 10136 | + |
| 10137 | + |
| 10138 | + |
| 10139 | +#define FILE_ASM_OP "\t.file\n" |
| 10140 | +#define IDENT_ASM_OP "\t.ident\t" |
| 10141 | +#define SET_ASM_OP "\t.set\t" |
| 10142 | + |
| 10143 | + |
| 10144 | +/* |
| 10145 | + * Output assembly directives to switch to section name. The section |
| 10146 | + * should have attributes as specified by flags, which is a bit mask |
| 10147 | + * of the SECTION_* flags defined in 'output.h'. If align is nonzero, |
| 10148 | + * it contains an alignment in bytes to be used for the section, |
| 10149 | + * otherwise some target default should be used. Only targets that |
| 10150 | + * must specify an alignment within the section directive need pay |
| 10151 | + * attention to align -- we will still use ASM_OUTPUT_ALIGN. |
| 10152 | + * |
| 10153 | + * NOTE: This one must not be moved to avr32.c |
| 10154 | + */ |
| 10155 | +#undef TARGET_ASM_NAMED_SECTION |
| 10156 | +#define TARGET_ASM_NAMED_SECTION default_elf_asm_named_section |
| 10157 | + |
| 10158 | + |
| 10159 | +/* |
| 10160 | +You may define this macro as a C expression. You should define the |
| 10161 | +expression to have a nonzero value if GCC should output the constant |
| 10162 | +pool for a function before the code for the function, or a zero value if |
| 10163 | +GCC should output the constant pool after the function. If you do |
| 10164 | +not define this macro, the usual case, GCC will output the constant |
| 10165 | +pool before the function. |
| 10166 | +*/ |
| 10167 | +#define CONSTANT_POOL_BEFORE_FUNCTION 0 |
| 10168 | + |
| 10169 | + |
| 10170 | +/* |
| 10171 | +Define this macro as a C expression which is nonzero if the constant |
| 10172 | +EXP, of type tree, should be output after the code for a |
| 10173 | +function. The compiler will normally output all constants before the |
| 10174 | +function; you need not define this macro if this is OK. |
| 10175 | +*/ |
| 10176 | +#define CONSTANT_AFTER_FUNCTION_P(EXP) 1 |
| 10177 | + |
| 10178 | + |
| 10179 | +/* |
| 10180 | +Define this macro as a C expression which is nonzero if C is |
| 10181 | +used as a logical line separator by the assembler. |
| 10182 | + |
| 10183 | +If you do not define this macro, the default is that only |
| 10184 | +the character ';' is treated as a logical line separator. |
| 10185 | +*/ |
| 10186 | +#define IS_ASM_LOGICAL_LINE_SEPARATOR(C) ((C) == '\n') |
| 10187 | + |
| 10188 | + |
| 10189 | +/** Output of Uninitialized Variables **/ |
| 10190 | + |
| 10191 | +/* |
| 10192 | +A C statement (sans semicolon) to output to the stdio stream |
| 10193 | +STREAM the assembler definition of a common-label named |
| 10194 | +NAME whose size is SIZE bytes. The variable ROUNDED |
| 10195 | +is the size rounded up to whatever alignment the caller wants. |
| 10196 | + |
| 10197 | +Use the expression assemble_name(STREAM, NAME) to |
| 10198 | +output the name itself; before and after that, output the additional |
| 10199 | +assembler syntax for defining the name, and a newline. |
| 10200 | + |
| 10201 | +This macro controls how the assembler definitions of uninitialized |
| 10202 | +common global variables are output. |
| 10203 | +*/ |
| 10204 | +/* |
| 10205 | +#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \ |
| 10206 | + avr32_asm_output_common(STREAM, NAME, SIZE, ROUNDED) |
| 10207 | +*/ |
| 10208 | + |
| 10209 | +#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \ |
| 10210 | + do \ |
| 10211 | + { \ |
| 10212 | + fputs ("\t.comm ", (FILE)); \ |
| 10213 | + assemble_name ((FILE), (NAME)); \ |
| 10214 | + fprintf ((FILE), ",%d\n", (SIZE)); \ |
| 10215 | + } \ |
| 10216 | + while (0) |
| 10217 | + |
| 10218 | +/* |
| 10219 | + * Like ASM_OUTPUT_BSS except takes the required alignment as a |
| 10220 | + * separate, explicit argument. If you define this macro, it is used |
| 10221 | + * in place of ASM_OUTPUT_BSS, and gives you more flexibility in |
| 10222 | + * handling the required alignment of the variable. The alignment is |
| 10223 | + * specified as the number of bits. |
| 10224 | + * |
| 10225 | + * Try to use function asm_output_aligned_bss defined in file varasm.c |
| 10226 | + * when defining this macro. |
| 10227 | + */ |
| 10228 | +#define ASM_OUTPUT_ALIGNED_BSS(STREAM, DECL, NAME, SIZE, ALIGNMENT) \ |
| 10229 | + asm_output_aligned_bss (STREAM, DECL, NAME, SIZE, ALIGNMENT) |
| 10230 | + |
| 10231 | +/* |
| 10232 | +A C statement (sans semicolon) to output to the stdio stream |
| 10233 | +STREAM the assembler definition of a local-common-label named |
| 10234 | +NAME whose size is SIZE bytes. The variable ROUNDED |
| 10235 | +is the size rounded up to whatever alignment the caller wants. |
| 10236 | + |
| 10237 | +Use the expression assemble_name(STREAM, NAME) to |
| 10238 | +output the name itself; before and after that, output the additional |
| 10239 | +assembler syntax for defining the name, and a newline. |
| 10240 | + |
| 10241 | +This macro controls how the assembler definitions of uninitialized |
| 10242 | +static variables are output. |
| 10243 | +*/ |
| 10244 | +#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED) \ |
| 10245 | + do \ |
| 10246 | + { \ |
| 10247 | + fputs ("\t.lcomm ", (FILE)); \ |
| 10248 | + assemble_name ((FILE), (NAME)); \ |
| 10249 | + fprintf ((FILE), ",%d, %d\n", (SIZE), 2); \ |
| 10250 | + } \ |
| 10251 | + while (0) |
| 10252 | + |
| 10253 | + |
| 10254 | +/* |
| 10255 | +A C statement (sans semicolon) to output to the stdio stream |
| 10256 | +STREAM the assembler definition of a label named NAME. |
| 10257 | +Use the expression assemble_name(STREAM, NAME) to |
| 10258 | +output the name itself; before and after that, output the additional |
| 10259 | +assembler syntax for defining the name, and a newline. |
| 10260 | +*/ |
| 10261 | +#define ASM_OUTPUT_LABEL(STREAM, NAME) avr32_asm_output_label(STREAM, NAME) |
| 10262 | + |
| 10263 | +/* A C string containing the appropriate assembler directive to |
| 10264 | + * specify the size of a symbol, without any arguments. On systems |
| 10265 | + * that use ELF, the default (in 'config/elfos.h') is '"\t.size\t"'; |
| 10266 | + * on other systems, the default is not to define this macro. |
| 10267 | + * |
| 10268 | + * Define this macro only if it is correct to use the default |
| 10269 | + * definitions of ASM_ OUTPUT_SIZE_DIRECTIVE and |
| 10270 | + * ASM_OUTPUT_MEASURED_SIZE for your system. If you need your own |
| 10271 | + * custom definitions of those macros, or if you do not need explicit |
| 10272 | + * symbol sizes at all, do not define this macro. |
| 10273 | + */ |
| 10274 | +#define SIZE_ASM_OP "\t.size\t" |
| 10275 | + |
| 10276 | + |
| 10277 | +/* |
| 10278 | +A C statement (sans semicolon) to output to the stdio stream |
| 10279 | +STREAM some commands that will make the label NAME global; |
| 10280 | +that is, available for reference from other files. Use the expression |
| 10281 | +assemble_name(STREAM, NAME) to output the name |
| 10282 | +itself; before and after that, output the additional assembler syntax |
| 10283 | +for making that name global, and a newline. |
| 10284 | +*/ |
| 10285 | +#define GLOBAL_ASM_OP "\t.globl\t" |
| 10286 | + |
| 10287 | + |
| 10288 | + |
| 10289 | +/* |
| 10290 | +A C expression which evaluates to true if the target supports weak symbols. |
| 10291 | + |
| 10292 | +If you don't define this macro, defaults.h provides a default |
| 10293 | +definition. If either ASM_WEAKEN_LABEL or ASM_WEAKEN_DECL |
| 10294 | +is defined, the default definition is '1'; otherwise, it is |
| 10295 | +'0'. Define this macro if you want to control weak symbol support |
| 10296 | +with a compiler flag such as -melf. |
| 10297 | +*/ |
| 10298 | +#define SUPPORTS_WEAK 1 |
| 10299 | + |
| 10300 | +/* |
| 10301 | +A C statement (sans semicolon) to output to the stdio stream |
| 10302 | +STREAM a reference in assembler syntax to a label named |
| 10303 | +NAME. This should add '_' to the front of the name, if that |
| 10304 | +is customary on your operating system, as it is in most Berkeley Unix |
| 10305 | +systems. This macro is used in assemble_name. |
| 10306 | +*/ |
| 10307 | +#define ASM_OUTPUT_LABELREF(STREAM, NAME) \ |
| 10308 | + avr32_asm_output_labelref(STREAM, NAME) |
| 10309 | + |
| 10310 | + |
| 10311 | + |
| 10312 | +/* |
| 10313 | +A C expression to assign to OUTVAR (which is a variable of type |
| 10314 | +char *) a newly allocated string made from the string |
| 10315 | +NAME and the number NUMBER, with some suitable punctuation |
| 10316 | +added. Use alloca to get space for the string. |
| 10317 | + |
| 10318 | +The string will be used as an argument to ASM_OUTPUT_LABELREF to |
| 10319 | +produce an assembler label for an internal static variable whose name is |
| 10320 | +NAME. Therefore, the string must be such as to result in valid |
| 10321 | +assembler code. The argument NUMBER is different each time this |
| 10322 | +macro is executed; it prevents conflicts between similarly-named |
| 10323 | +internal static variables in different scopes. |
| 10324 | + |
| 10325 | +Ideally this string should not be a valid C identifier, to prevent any |
| 10326 | +conflict with the user's own symbols. Most assemblers allow periods |
| 10327 | +or percent signs in assembler symbols; putting at least one of these |
| 10328 | +between the name and the number will suffice. |
| 10329 | +*/ |
| 10330 | +#define ASM_FORMAT_PRIVATE_NAME(OUTVAR, NAME, NUMBER) \ |
| 10331 | + do \ |
| 10332 | + { \ |
| 10333 | + (OUTVAR) = (char *) alloca (strlen ((NAME)) + 10); \ |
| 10334 | + sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER)); \ |
| 10335 | + } \ |
| 10336 | + while (0) |
| 10337 | + |
| 10338 | + |
| 10339 | +/** Macros Controlling Initialization Routines **/ |
| 10340 | + |
| 10341 | + |
| 10342 | +/* |
| 10343 | +If defined, main will not call __main as described above. |
| 10344 | +This macro should be defined for systems that control start-up code |
| 10345 | +on a symbol-by-symbol basis, such as OSF/1, and should not |
| 10346 | +be defined explicitly for systems that support INIT_SECTION_ASM_OP. |
| 10347 | +*/ |
| 10348 | +/* |
| 10349 | + __main is not defined when debugging. |
| 10350 | +*/ |
| 10351 | +#define HAS_INIT_SECTION |
| 10352 | + |
| 10353 | + |
| 10354 | +/** Output of Assembler Instructions **/ |
| 10355 | + |
| 10356 | +/* |
| 10357 | +A C initializer containing the assembler's names for the machine |
| 10358 | +registers, each one as a C string constant. This is what translates |
| 10359 | +register numbers in the compiler into assembler language. |
| 10360 | +*/ |
| 10361 | + |
| 10362 | +#define REGISTER_NAMES \ |
| 10363 | +{ \ |
| 10364 | + "pc", "lr", \ |
| 10365 | + "sp", "r12", \ |
| 10366 | + "r11", "r10", \ |
| 10367 | + "r9", "r8", \ |
| 10368 | + "r7", "r6", \ |
| 10369 | + "r5", "r4", \ |
| 10370 | + "r3", "r2", \ |
| 10371 | + "r1", "r0", \ |
| 10372 | + "f15","f14", \ |
| 10373 | + "f13","f12", \ |
| 10374 | + "f11","f10", \ |
| 10375 | + "f9", "f8", \ |
| 10376 | + "f7", "f6", \ |
| 10377 | + "f5", "f4", \ |
| 10378 | + "f3", "f2", \ |
| 10379 | + "f1", "f0" \ |
| 10380 | +} |
| 10381 | + |
| 10382 | +/* |
| 10383 | +A C compound statement to output to stdio stream STREAM the |
| 10384 | +assembler syntax for an instruction operand X. X is an |
| 10385 | +RTL expression. |
| 10386 | + |
| 10387 | +CODE is a value that can be used to specify one of several ways |
| 10388 | +of printing the operand. It is used when identical operands must be |
| 10389 | +printed differently depending on the context. CODE comes from |
| 10390 | +the '%' specification that was used to request printing of the |
| 10391 | +operand. If the specification was just '%digit' then |
| 10392 | +CODE is 0; if the specification was '%ltr digit' |
| 10393 | +then CODE is the ASCII code for ltr. |
| 10394 | + |
| 10395 | +If X is a register, this macro should print the register's name. |
| 10396 | +The names can be found in an array reg_names whose type is |
| 10397 | +char *[]. reg_names is initialized from REGISTER_NAMES. |
| 10398 | + |
| 10399 | +When the machine description has a specification '%punct' |
| 10400 | +(a '%' followed by a punctuation character), this macro is called |
| 10401 | +with a null pointer for X and the punctuation character for |
| 10402 | +CODE. |
| 10403 | +*/ |
| 10404 | +#define PRINT_OPERAND(STREAM, X, CODE) avr32_print_operand(STREAM, X, CODE) |
| 10405 | + |
| 10406 | +/* A C statement to be executed just prior to the output of |
| 10407 | + assembler code for INSN, to modify the extracted operands so |
| 10408 | + they will be output differently. |
| 10409 | + |
| 10410 | + Here the argument OPVEC is the vector containing the operands |
| 10411 | + extracted from INSN, and NOPERANDS is the number of elements of |
| 10412 | + the vector which contain meaningful data for this insn. |
| 10413 | + The contents of this vector are what will be used to convert the insn |
| 10414 | + template into assembler code, so you can change the assembler output |
| 10415 | + by changing the contents of the vector. */ |
| 10416 | +#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \ |
| 10417 | + avr32_final_prescan_insn ((INSN), (OPVEC), (NOPERANDS)) |
| 10418 | + |
| 10419 | +/* |
| 10420 | +A C expression which evaluates to true if CODE is a valid |
| 10421 | +punctuation character for use in the PRINT_OPERAND macro. If |
| 10422 | +PRINT_OPERAND_PUNCT_VALID_P is not defined, it means that no |
| 10423 | +punctuation characters (except for the standard one, '%') are used |
| 10424 | +in this way. |
| 10425 | +*/ |
| 10426 | +/* |
| 10427 | + 'm' refers to the most significant word in a two-register mode. |
| 10428 | +*/ |
| 10429 | +#define PRINT_OPERAND_PUNCT_VALID_P(CODE) ((CODE) == 'm' || (CODE) == 'e') |
| 10430 | + |
| 10431 | +/* |
| 10432 | +A C compound statement to output to stdio stream STREAM the |
| 10433 | +assembler syntax for an instruction operand that is a memory reference |
| 10434 | +whose address is X. X is an RTL expression. |
| 10435 | + |
| 10436 | +On some machines, the syntax for a symbolic address depends on the |
| 10437 | +section that the address refers to. On these machines, define the macro |
| 10438 | +ENCODE_SECTION_INFO to store the information into the |
| 10439 | +symbol_ref, and then check for it here. (see Assembler Format.) |
| 10440 | +*/ |
| 10441 | +#define PRINT_OPERAND_ADDRESS(STREAM, X) avr32_print_operand_address(STREAM, X) |
| 10442 | + |
| 10443 | + |
| 10444 | +/** Output of Dispatch Tables **/ |
| 10445 | + |
| 10446 | +/* |
| 10447 | + * A C statement to output to the stdio stream stream an assembler |
| 10448 | + * pseudo-instruction to generate a difference between two |
| 10449 | + * labels. value and rel are the numbers of two internal labels. The |
| 10450 | + * definitions of these labels are output using |
| 10451 | + * (*targetm.asm_out.internal_label), and they must be printed in the |
| 10452 | + * same way here. For example, |
| 10453 | + * |
| 10454 | + * fprintf (stream, "\t.word L%d-L%d\n", |
| 10455 | + * value, rel) |
| 10456 | + * |
| 10457 | + * You must provide this macro on machines where the addresses in a |
| 10458 | + * dispatch table are relative to the table's own address. If defined, |
| 10459 | + * GCC will also use this macro on all machines when producing |
| 10460 | + * PIC. body is the body of the ADDR_DIFF_VEC; it is provided so that |
| 10461 | + * the mode and flags can be read. |
| 10462 | + */ |
| 10463 | +#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \ |
| 10464 | + fprintf(STREAM, "\tbral\t%sL%d\n", LOCAL_LABEL_PREFIX, VALUE) |
| 10465 | + |
| 10466 | +/* |
| 10467 | +This macro should be provided on machines where the addresses |
| 10468 | +in a dispatch table are absolute. |
| 10469 | + |
| 10470 | +The definition should be a C statement to output to the stdio stream |
| 10471 | +STREAM an assembler pseudo-instruction to generate a reference to |
| 10472 | +a label. VALUE is the number of an internal label whose |
| 10473 | +definition is output using ASM_OUTPUT_INTERNAL_LABEL. |
| 10474 | +For example, |
| 10475 | + |
| 10476 | +fprintf(STREAM, "\t.word L%d\n", VALUE) |
| 10477 | +*/ |
| 10478 | + |
| 10479 | +#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \ |
| 10480 | + fprintf(STREAM, "\t.long %sL%d\n", LOCAL_LABEL_PREFIX, VALUE) |
| 10481 | + |
| 10482 | +/** Assembler Commands for Exception Regions */ |
| 10483 | + |
| 10484 | +/* ToDo: All of this subsection */ |
| 10485 | + |
| 10486 | +/** Assembler Commands for Alignment */ |
| 10487 | + |
| 10488 | + |
| 10489 | +/* |
| 10490 | +A C statement to output to the stdio stream STREAM an assembler |
| 10491 | +command to advance the location counter to a multiple of 2 to the |
| 10492 | +POWER bytes. POWER will be a C expression of type int. |
| 10493 | +*/ |
| 10494 | +#define ASM_OUTPUT_ALIGN(STREAM, POWER) \ |
| 10495 | + do \ |
| 10496 | + { \ |
| 10497 | + if ((POWER) != 0) \ |
| 10498 | + fprintf(STREAM, "\t.align\t%d\n", POWER); \ |
| 10499 | + } \ |
| 10500 | + while (0) |
| 10501 | + |
| 10502 | +/* |
| 10503 | +Like ASM_OUTPUT_ALIGN, except that the \nop" instruction is used for padding, if |
| 10504 | +necessary. |
| 10505 | +*/ |
| 10506 | +#define ASM_OUTPUT_ALIGN_WITH_NOP(STREAM, POWER) \ |
| 10507 | + fprintf(STREAM, "\t.balignw\t%d, 0xd703\n", (1 << POWER)) |
| 10508 | + |
| 10509 | + |
| 10510 | + |
| 10511 | +/****************************************************************************** |
| 10512 | + * Controlling Debugging Information Format |
| 10513 | + *****************************************************************************/ |
| 10514 | + |
| 10515 | +/* How to renumber registers for dbx and gdb. */ |
| 10516 | +#define DBX_REGISTER_NUMBER(REGNO) ASM_REGNUM (REGNO) |
| 10517 | + |
| 10518 | +/* The DWARF 2 CFA column which tracks the return address. */ |
| 10519 | +#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM(LR_REGNUM) |
| 10520 | + |
| 10521 | +/* |
| 10522 | +Define this macro if GCC should produce dwarf version 2 format |
| 10523 | +debugging output in response to the -g option. |
| 10524 | + |
| 10525 | +To support optional call frame debugging information, you must also |
| 10526 | +define INCOMING_RETURN_ADDR_RTX and either set |
| 10527 | +RTX_FRAME_RELATED_P on the prologue insns if you use RTL for the |
| 10528 | +prologue, or call dwarf2out_def_cfa and dwarf2out_reg_save |
| 10529 | +as appropriate from TARGET_ASM_FUNCTION_PROLOGUE if you don't. |
| 10530 | +*/ |
| 10531 | +#define DWARF2_DEBUGGING_INFO 1 |
| 10532 | + |
| 10533 | + |
| 10534 | +#define DWARF2_ASM_LINE_DEBUG_INFO 1 |
| 10535 | +#define DWARF2_FRAME_INFO 1 |
| 10536 | + |
| 10537 | + |
| 10538 | +/****************************************************************************** |
| 10539 | + * Miscellaneous Parameters |
| 10540 | + *****************************************************************************/ |
| 10541 | + |
| 10542 | +/* ToDo: a lot */ |
| 10543 | + |
| 10544 | +/* |
| 10545 | +An alias for a machine mode name. This is the machine mode that |
| 10546 | +elements of a jump-table should have. |
| 10547 | +*/ |
| 10548 | +#define CASE_VECTOR_MODE SImode |
| 10549 | + |
| 10550 | +/* |
| 10551 | +Define this macro to be a C expression to indicate when jump-tables |
| 10552 | +should contain relative addresses. If jump-tables never contain |
| 10553 | +relative addresses, then you need not define this macro. |
| 10554 | +*/ |
| 10555 | +#define CASE_VECTOR_PC_RELATIVE 0 |
| 10556 | + |
| 10557 | +/* |
| 10558 | +The maximum number of bytes that a single instruction can move quickly |
| 10559 | +between memory and registers or between two memory locations. |
| 10560 | +*/ |
| 10561 | +#define MOVE_MAX (2*UNITS_PER_WORD) |
| 10562 | + |
| 10563 | + |
| 10564 | +/* A C expression that is nonzero if on this machine the number of bits actually used |
| 10565 | + for the count of a shift operation is equal to the number of bits needed to represent |
| 10566 | + the size of the object being shifted. When this macro is nonzero, the compiler will |
| 10567 | + assume that it is safe to omit a sign-extend, zero-extend, and certain bitwise 'and' |
| 10568 | + instructions that truncates the count of a shift operation. On machines that have |
| 10569 | + instructions that act on bit-fields at variable positions, which may include 'bit test' |
| 10570 | + 378 GNU Compiler Collection (GCC) Internals |
| 10571 | + instructions, a nonzero SHIFT_COUNT_TRUNCATED also enables deletion of truncations |
| 10572 | + of the values that serve as arguments to bit-field instructions. |
| 10573 | + If both types of instructions truncate the count (for shifts) and position (for bit-field |
| 10574 | + operations), or if no variable-position bit-field instructions exist, you should define |
| 10575 | + this macro. |
| 10576 | + However, on some machines, such as the 80386 and the 680x0, truncation only applies |
| 10577 | + to shift operations and not the (real or pretended) bit-field operations. Define SHIFT_ |
| 10578 | + COUNT_TRUNCATED to be zero on such machines. Instead, add patterns to the 'md' file |
| 10579 | + that include the implied truncation of the shift instructions. |
| 10580 | + You need not dene this macro if it would always have the value of zero. */ |
| 10581 | +#define SHIFT_COUNT_TRUNCATED 1 |
| 10582 | + |
| 10583 | +/* |
| 10584 | +A C expression which is nonzero if on this machine it is safe to |
| 10585 | +convert an integer of INPREC bits to one of OUTPREC |
| 10586 | +bits (where OUTPREC is smaller than INPREC) by merely |
| 10587 | +operating on it as if it had only OUTPREC bits. |
| 10588 | + |
| 10589 | +On many machines, this expression can be 1. |
| 10590 | + |
| 10591 | +When TRULY_NOOP_TRUNCATION returns 1 for a pair of sizes for |
| 10592 | +modes for which MODES_TIEABLE_P is 0, suboptimal code can result. |
| 10593 | +If this is the case, making TRULY_NOOP_TRUNCATION return 0 in |
| 10594 | +such cases may improve things. |
| 10595 | +*/ |
| 10596 | +#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1 |
| 10597 | + |
| 10598 | +/* |
| 10599 | +An alias for the machine mode for pointers. On most machines, define |
| 10600 | +this to be the integer mode corresponding to the width of a hardware |
| 10601 | +pointer; SImode on 32-bit machine or DImode on 64-bit machines. |
| 10602 | +On some machines you must define this to be one of the partial integer |
| 10603 | +modes, such as PSImode. |
| 10604 | + |
| 10605 | +The width of Pmode must be at least as large as the value of |
| 10606 | +POINTER_SIZE. If it is not equal, you must define the macro |
| 10607 | +POINTERS_EXTEND_UNSIGNED to specify how pointers are extended |
| 10608 | +to Pmode. |
| 10609 | +*/ |
| 10610 | +#define Pmode SImode |
| 10611 | + |
| 10612 | +/* |
| 10613 | +An alias for the machine mode used for memory references to functions |
| 10614 | +being called, in call RTL expressions. On most machines this |
| 10615 | +should be QImode. |
| 10616 | +*/ |
| 10617 | +#define FUNCTION_MODE SImode |
| 10618 | + |
| 10619 | + |
| 10620 | +#define REG_S_P(x) \ |
| 10621 | + (REG_P (x) || (GET_CODE (x) == SUBREG && REG_P (XEXP (x, 0)))) |
| 10622 | + |
| 10623 | + |
| 10624 | +/* If defined, modifies the length assigned to instruction INSN as a |
| 10625 | + function of the context in which it is used. LENGTH is an lvalue |
| 10626 | + that contains the initially computed length of the insn and should |
| 10627 | + be updated with the correct length of the insn. */ |
| 10628 | +#define ADJUST_INSN_LENGTH(INSN, LENGTH) \ |
| 10629 | + ((LENGTH) = avr32_adjust_insn_length ((INSN), (LENGTH))) |
| 10630 | + |
| 10631 | + |
| 10632 | +#define CLZ_DEFINED_VALUE_AT_ZERO(mode, value) \ |
| 10633 | + (value = 32, (mode == SImode)) |
| 10634 | + |
| 10635 | +#define CTZ_DEFINED_VALUE_AT_ZERO(mode, value) \ |
| 10636 | + (value = 32, (mode == SImode)) |
| 10637 | + |
| 10638 | +#define UNITS_PER_SIMD_WORD UNITS_PER_WORD |
| 10639 | + |
| 10640 | +#define STORE_FLAG_VALUE 1 |
| 10641 | + |
| 10642 | +enum avr32_builtins |
| 10643 | +{ |
| 10644 | + AVR32_BUILTIN_MTSR, |
| 10645 | + AVR32_BUILTIN_MFSR, |
| 10646 | + AVR32_BUILTIN_MTDR, |
| 10647 | + AVR32_BUILTIN_MFDR, |
| 10648 | + AVR32_BUILTIN_CACHE, |
| 10649 | + AVR32_BUILTIN_SYNC, |
| 10650 | + AVR32_BUILTIN_TLBR, |
| 10651 | + AVR32_BUILTIN_TLBS, |
| 10652 | + AVR32_BUILTIN_TLBW, |
| 10653 | + AVR32_BUILTIN_BREAKPOINT, |
| 10654 | + AVR32_BUILTIN_XCHG, |
| 10655 | + AVR32_BUILTIN_LDXI, |
| 10656 | + AVR32_BUILTIN_BSWAP16, |
| 10657 | + AVR32_BUILTIN_BSWAP32, |
| 10658 | + AVR32_BUILTIN_COP, |
| 10659 | + AVR32_BUILTIN_MVCR_W, |
| 10660 | + AVR32_BUILTIN_MVRC_W, |
| 10661 | + AVR32_BUILTIN_MVCR_D, |
| 10662 | + AVR32_BUILTIN_MVRC_D, |
| 10663 | + AVR32_BUILTIN_MULSATHH_H, |
| 10664 | + AVR32_BUILTIN_MULSATHH_W, |
| 10665 | + AVR32_BUILTIN_MULSATRNDHH_H, |
| 10666 | + AVR32_BUILTIN_MULSATRNDWH_W, |
| 10667 | + AVR32_BUILTIN_MULSATWH_W, |
| 10668 | + AVR32_BUILTIN_MACSATHH_W, |
| 10669 | + AVR32_BUILTIN_SATADD_H, |
| 10670 | + AVR32_BUILTIN_SATSUB_H, |
| 10671 | + AVR32_BUILTIN_SATADD_W, |
| 10672 | + AVR32_BUILTIN_SATSUB_W, |
| 10673 | + AVR32_BUILTIN_MULWH_D, |
| 10674 | + AVR32_BUILTIN_MULNWH_D, |
| 10675 | + AVR32_BUILTIN_MACWH_D, |
| 10676 | + AVR32_BUILTIN_MACHH_D, |
| 10677 | + AVR32_BUILTIN_MUSFR, |
| 10678 | + AVR32_BUILTIN_MUSTR, |
| 10679 | + AVR32_BUILTIN_SATS, |
| 10680 | + AVR32_BUILTIN_SATU, |
| 10681 | + AVR32_BUILTIN_SATRNDS, |
| 10682 | + AVR32_BUILTIN_SATRNDU |
| 10683 | +}; |
| 10684 | + |
| 10685 | + |
| 10686 | +#define FLOAT_LIB_COMPARE_RETURNS_BOOL(MODE, COMPARISON) \ |
| 10687 | + ((MODE == SFmode) || (MODE == DFmode)) |
| 10688 | + |
| 10689 | +#define RENAME_LIBRARY_SET ".set" |
| 10690 | + |
| 10691 | +/* Make ABI_NAME an alias for __GCC_NAME. */ |
| 10692 | +#define RENAME_LIBRARY(GCC_NAME, ABI_NAME) \ |
| 10693 | + __asm__ (".globl\t__avr32_" #ABI_NAME "\n" \ |
| 10694 | + ".set\t__avr32_" #ABI_NAME \ |
| 10695 | + ", __" #GCC_NAME "\n"); |
| 10696 | + |
| 10697 | +/* Give libgcc functions avr32 ABI name. */ |
| 10698 | +#ifdef L_muldi3 |
| 10699 | +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (muldi3, mul64) |
| 10700 | +#endif |
| 10701 | +#ifdef L_divdi3 |
| 10702 | +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (divdi3, sdiv64) |
| 10703 | +#endif |
| 10704 | +#ifdef L_udivdi3 |
| 10705 | +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (udivdi3, udiv64) |
| 10706 | +#endif |
| 10707 | +#ifdef L_moddi3 |
| 10708 | +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (moddi3, smod64) |
| 10709 | +#endif |
| 10710 | +#ifdef L_umoddi3 |
| 10711 | +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (umoddi3, umod64) |
| 10712 | +#endif |
| 10713 | +#ifdef L_ashldi3 |
| 10714 | +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (ashldi3, lsl64) |
| 10715 | +#endif |
| 10716 | +#ifdef L_lshrdi3 |
| 10717 | +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (lshrdi3, lsr64) |
| 10718 | +#endif |
| 10719 | +#ifdef L_ashrdi3 |
| 10720 | +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (ashrdi3, asr64) |
| 10721 | +#endif |
| 10722 | + |
| 10723 | +#ifdef L_fixsfdi |
| 10724 | +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixsfdi, f32_to_s64) |
| 10725 | +#endif |
| 10726 | +#ifdef L_fixunssfdi |
| 10727 | +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixunssfdi, f32_to_u64) |
| 10728 | +#endif |
| 10729 | +#ifdef L_floatdidf |
| 10730 | +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatdidf, s64_to_f64) |
| 10731 | +#endif |
| 10732 | +#ifdef L_floatdisf |
| 10733 | +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatdisf, s64_to_f32) |
| 10734 | +#endif |
| 10735 | + |
| 10736 | +#ifdef L_addsub_sf |
| 10737 | +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (addsf3, f32_add); RENAME_LIBRARY (subsf3, f32_sub) |
| 10738 | +#endif |
| 10739 | + |
| 10740 | +#endif |
| 10741 | --- /dev/null |
| 10742 | +++ b/gcc/config/avr32/avr32.md |
| 10743 | @@ -0,0 +1,4694 @@ |
| 10744 | +;; AVR32 machine description file. |
| 10745 | +;; Copyright 2003-2006 Atmel Corporation. |
| 10746 | +;; |
| 10747 | +;; Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com> |
| 10748 | +;; |
| 10749 | +;; This file is part of GCC. |
| 10750 | +;; |
| 10751 | +;; This program is free software; you can redistribute it and/or modify |
| 10752 | +;; it under the terms of the GNU General Public License as published by |
| 10753 | +;; the Free Software Foundation; either version 2 of the License, or |
| 10754 | +;; (at your option) any later version. |
| 10755 | +;; |
| 10756 | +;; This program is distributed in the hope that it will be useful, |
| 10757 | +;; but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10758 | +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10759 | +;; GNU General Public License for more details. |
| 10760 | +;; |
| 10761 | +;; You should have received a copy of the GNU General Public License |
| 10762 | +;; along with this program; if not, write to the Free Software |
| 10763 | +;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 10764 | + |
| 10765 | +;; -*- Mode: Scheme -*- |
| 10766 | + |
| 10767 | +(define_attr "type" "alu,alu2,alu_sat,mulhh,mulwh,mulww_w,mulww_d,div,machh_w,macww_w,macww_d,branch,call,load,load_rm,store,load2,load4,store2,store4,fmul,fcmps,fcmpd,fcast,fmv,fmvcpu,fldd,fstd,flds,fsts,fstm" |
| 10768 | + (const_string "alu")) |
| 10769 | + |
| 10770 | + |
| 10771 | +(define_attr "cc" "none,set_vncz,set_ncz,set_cz,set_z,bld,compare,clobber,call_set,fpcompare,from_fpcc" |
| 10772 | + (const_string "none")) |
| 10773 | + |
| 10774 | + |
| 10775 | +(define_attr "pipeline" "ap,uc" |
| 10776 | + (const_string "ap")) |
| 10777 | + |
| 10778 | +(define_attr "length" "" |
| 10779 | + (const_int 4)) |
| 10780 | + |
| 10781 | + |
| 10782 | +;; Uses of UNSPEC in this file: |
| 10783 | +(define_constants |
| 10784 | + [(UNSPEC_PUSHM 0) |
| 10785 | + (UNSPEC_POPM 1) |
| 10786 | + (UNSPEC_UDIVMODSI4_INTERNAL 2) |
| 10787 | + (UNSPEC_DIVMODSI4_INTERNAL 3) |
| 10788 | + (UNSPEC_STM 4) |
| 10789 | + (UNSPEC_LDM 5) |
| 10790 | + (UNSPEC_MOVSICC 6) |
| 10791 | + (UNSPEC_ADDSICC 7) |
| 10792 | + (UNSPEC_COND_MI 8) |
| 10793 | + (UNSPEC_COND_PL 9) |
| 10794 | + (UNSPEC_PIC_SYM 10) |
| 10795 | + (UNSPEC_PIC_BASE 11) |
| 10796 | + (UNSPEC_STORE_MULTIPLE 12) |
| 10797 | + (UNSPEC_STMFP 13) |
| 10798 | + (UNSPEC_FPCC_TO_REG 14) |
| 10799 | + (UNSPEC_REG_TO_CC 15) |
| 10800 | + (UNSPEC_FORCE_MINIPOOL 16) |
| 10801 | + (UNSPEC_SATS 17) |
| 10802 | + (UNSPEC_SATU 18) |
| 10803 | + (UNSPEC_SATRNDS 19) |
| 10804 | + (UNSPEC_SATRNDU 20) |
| 10805 | + ]) |
| 10806 | + |
| 10807 | +(define_constants |
| 10808 | + [(VUNSPEC_EPILOGUE 0) |
| 10809 | + (VUNSPEC_CACHE 1) |
| 10810 | + (VUNSPEC_MTSR 2) |
| 10811 | + (VUNSPEC_MFSR 3) |
| 10812 | + (VUNSPEC_BLOCKAGE 4) |
| 10813 | + (VUNSPEC_SYNC 5) |
| 10814 | + (VUNSPEC_TLBR 6) |
| 10815 | + (VUNSPEC_TLBW 7) |
| 10816 | + (VUNSPEC_TLBS 8) |
| 10817 | + (VUNSPEC_BREAKPOINT 9) |
| 10818 | + (VUNSPEC_MTDR 10) |
| 10819 | + (VUNSPEC_MFDR 11) |
| 10820 | + (VUNSPEC_MVCR 12) |
| 10821 | + (VUNSPEC_MVRC 13) |
| 10822 | + (VUNSPEC_COP 14) |
| 10823 | + (VUNSPEC_ALIGN 15) |
| 10824 | + (VUNSPEC_POOL_START 16) |
| 10825 | + (VUNSPEC_POOL_END 17) |
| 10826 | + (VUNSPEC_POOL_4 18) |
| 10827 | + (VUNSPEC_POOL_8 19) |
| 10828 | + (VUNSPEC_MUSFR 20) |
| 10829 | + (VUNSPEC_MUSTR 21) |
| 10830 | + ]) |
| 10831 | + |
| 10832 | +(define_constants |
| 10833 | + [ |
| 10834 | + ;; R7 = 15-7 = 8 |
| 10835 | + (FP_REGNUM 8) |
| 10836 | + ;; Return Register = R12 = 15 - 12 = 3 |
| 10837 | + (RETVAL_REGNUM 3) |
| 10838 | + ;; SP = R13 = 15 - 13 = 2 |
| 10839 | + (SP_REGNUM 2) |
| 10840 | + ;; LR = R14 = 15 - 14 = 1 |
| 10841 | + (LR_REGNUM 1) |
| 10842 | + ;; PC = R15 = 15 - 15 = 0 |
| 10843 | + (PC_REGNUM 0) |
| 10844 | + ;; FPSR = GENERAL_REGS + 1 = 17 |
| 10845 | + (FPCC_REGNUM 17) |
| 10846 | + ]) |
| 10847 | + |
| 10848 | + |
| 10849 | + |
| 10850 | + |
| 10851 | +;;****************************************************************************** |
| 10852 | +;; Macros |
| 10853 | +;;****************************************************************************** |
| 10854 | + |
| 10855 | +;; Integer Modes for basic alu insns |
| 10856 | +(define_mode_macro INTM [SI HI QI]) |
| 10857 | +(define_mode_attr alu_cc_attr [(SI "set_vncz") (HI "clobber") (QI "clobber")]) |
| 10858 | + |
| 10859 | +;; Move word modes |
| 10860 | +(define_mode_macro MOVM [SI V2HI V4QI]) |
| 10861 | + |
| 10862 | +;; For mov/addcc insns |
| 10863 | +(define_mode_macro ADDCC [SI HI QI]) |
| 10864 | +(define_mode_macro MOVCC [SI HI QI]) |
| 10865 | +(define_mode_macro CMP [DI SI HI QI]) |
| 10866 | +(define_mode_attr cmp_constraint [(DI "r") (SI "rKs21") (HI "r") (QI "r")]) |
| 10867 | +(define_mode_attr cmp_predicate [(DI "register_operand") |
| 10868 | + (SI "register_immediate_operand") |
| 10869 | + (HI "register_operand") |
| 10870 | + (QI "register_operand")]) |
| 10871 | + |
| 10872 | +;; For all conditional insns |
| 10873 | +(define_code_macro any_cond [eq ne gt ge lt le gtu geu ltu leu]) |
| 10874 | +(define_code_attr cond [(eq "eq") (ne "ne") (gt "gt") (ge "ge") (lt "lt") (le "le") |
| 10875 | + (gtu "hi") (geu "hs") (ltu "lo") (leu "ls")]) |
| 10876 | +(define_code_attr invcond [(eq "ne") (ne "eq") (gt "le") (ge "lt") (lt "ge") (le "gt") |
| 10877 | + (gtu "ls") (geu "lo") (ltu "hs") (leu "hi")]) |
| 10878 | + |
| 10879 | +;; For logical operations |
| 10880 | +(define_code_macro logical [and ior xor]) |
| 10881 | +(define_code_attr logical_insn [(and "and") (ior "or") (xor "eor")]) |
| 10882 | + |
| 10883 | +;; Load the predicates |
| 10884 | +(include "predicates.md") |
| 10885 | + |
| 10886 | + |
| 10887 | +;;****************************************************************************** |
| 10888 | +;; Automaton pipeline description for avr32 |
| 10889 | +;;****************************************************************************** |
| 10890 | + |
| 10891 | +(define_automaton "avr32_ap") |
| 10892 | + |
| 10893 | + |
| 10894 | +(define_cpu_unit "is" "avr32_ap") |
| 10895 | +(define_cpu_unit "a1,m1,da" "avr32_ap") |
| 10896 | +(define_cpu_unit "a2,m2,d" "avr32_ap") |
| 10897 | + |
| 10898 | +;;Alu instructions |
| 10899 | +(define_insn_reservation "alu_op" 1 |
| 10900 | + (and (eq_attr "pipeline" "ap") |
| 10901 | + (eq_attr "type" "alu")) |
| 10902 | + "is,a1,a2") |
| 10903 | + |
| 10904 | +(define_insn_reservation "alu2_op" 2 |
| 10905 | + (and (eq_attr "pipeline" "ap") |
| 10906 | + (eq_attr "type" "alu2")) |
| 10907 | + "is,is+a1,a1+a2,a2") |
| 10908 | + |
| 10909 | +(define_insn_reservation "alu_sat_op" 2 |
| 10910 | + (and (eq_attr "pipeline" "ap") |
| 10911 | + (eq_attr "type" "alu_sat")) |
| 10912 | + "is,a1,a2") |
| 10913 | + |
| 10914 | + |
| 10915 | +;;Mul instructions |
| 10916 | +(define_insn_reservation "mulhh_op" 2 |
| 10917 | + (and (eq_attr "pipeline" "ap") |
| 10918 | + (eq_attr "type" "mulhh,mulwh")) |
| 10919 | + "is,m1,m2") |
| 10920 | + |
| 10921 | +(define_insn_reservation "mulww_w_op" 3 |
| 10922 | + (and (eq_attr "pipeline" "ap") |
| 10923 | + (eq_attr "type" "mulww_w")) |
| 10924 | + "is,m1,m1+m2,m2") |
| 10925 | + |
| 10926 | +(define_insn_reservation "mulww_d_op" 5 |
| 10927 | + (and (eq_attr "pipeline" "ap") |
| 10928 | + (eq_attr "type" "mulww_d")) |
| 10929 | + "is,m1,m1+m2,m1+m2,m2,m2") |
| 10930 | + |
| 10931 | +(define_insn_reservation "div_op" 33 |
| 10932 | + (and (eq_attr "pipeline" "ap") |
| 10933 | + (eq_attr "type" "div")) |
| 10934 | + "is,m1,m1*31 + m2*31,m2") |
| 10935 | + |
| 10936 | +(define_insn_reservation "machh_w_op" 3 |
| 10937 | + (and (eq_attr "pipeline" "ap") |
| 10938 | + (eq_attr "type" "machh_w")) |
| 10939 | + "is*2,m1,m2") |
| 10940 | + |
| 10941 | + |
| 10942 | +(define_insn_reservation "macww_w_op" 4 |
| 10943 | + (and (eq_attr "pipeline" "ap") |
| 10944 | + (eq_attr "type" "macww_w")) |
| 10945 | + "is*2,m1,m1,m2") |
| 10946 | + |
| 10947 | + |
| 10948 | +(define_insn_reservation "macww_d_op" 6 |
| 10949 | + (and (eq_attr "pipeline" "ap") |
| 10950 | + (eq_attr "type" "macww_d")) |
| 10951 | + "is*2,m1,m1+m2,m1+m2,m2") |
| 10952 | + |
| 10953 | +;;Bypasses for Mac instructions, because of accumulator cache. |
| 10954 | +;;Set latency as low as possible in order to let the compiler let |
| 10955 | +;;mul -> mac and mac -> mac combinations which use the same |
| 10956 | +;;accumulator cache be placed close together to avoid any |
| 10957 | +;;instructions which can ruin the accumulator cache come inbetween. |
| 10958 | +(define_bypass 4 "machh_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass") |
| 10959 | +(define_bypass 5 "macww_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass") |
| 10960 | +(define_bypass 7 "macww_d_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass") |
| 10961 | + |
| 10962 | +(define_bypass 3 "mulhh_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass") |
| 10963 | +(define_bypass 4 "mulww_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass") |
| 10964 | +(define_bypass 6 "mulww_d_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass") |
| 10965 | + |
| 10966 | + |
| 10967 | +;;Bypasses for all mul/mac instructions followed by an instruction |
| 10968 | +;;which reads the output AND writes the result to the same register. |
| 10969 | +;;This will generate an Write After Write hazard which gives an |
| 10970 | +;;extra cycle before the result is ready. |
| 10971 | +(define_bypass 0 "machh_w_op" "machh_w_op" "avr32_valid_macmac_bypass") |
| 10972 | +(define_bypass 0 "macww_w_op" "macww_w_op" "avr32_valid_macmac_bypass") |
| 10973 | +(define_bypass 0 "macww_d_op" "macww_d_op" "avr32_valid_macmac_bypass") |
| 10974 | + |
| 10975 | +(define_bypass 0 "mulhh_op" "machh_w_op" "avr32_valid_mulmac_bypass") |
| 10976 | +(define_bypass 0 "mulww_w_op" "macww_w_op" "avr32_valid_mulmac_bypass") |
| 10977 | +(define_bypass 0 "mulww_d_op" "macww_d_op" "avr32_valid_mulmac_bypass") |
| 10978 | + |
| 10979 | +;;Branch and call instructions |
| 10980 | +;;We assume that all branches and rcalls are predicted correctly :-) |
| 10981 | +;;while calls use a lot of cycles. |
| 10982 | +(define_insn_reservation "branch_op" 0 |
| 10983 | + (and (eq_attr "pipeline" "ap") |
| 10984 | + (eq_attr "type" "branch")) |
| 10985 | + "nothing") |
| 10986 | + |
| 10987 | +(define_insn_reservation "call_op" 10 |
| 10988 | + (and (eq_attr "pipeline" "ap") |
| 10989 | + (eq_attr "type" "call")) |
| 10990 | + "nothing") |
| 10991 | + |
| 10992 | + |
| 10993 | +;;Load store instructions |
| 10994 | +(define_insn_reservation "load_op" 2 |
| 10995 | + (and (eq_attr "pipeline" "ap") |
| 10996 | + (eq_attr "type" "load")) |
| 10997 | + "is,da,d") |
| 10998 | + |
| 10999 | +(define_insn_reservation "load_rm_op" 3 |
| 11000 | + (and (eq_attr "pipeline" "ap") |
| 11001 | + (eq_attr "type" "load_rm")) |
| 11002 | + "is,da,d") |
| 11003 | + |
| 11004 | + |
| 11005 | +(define_insn_reservation "store_op" 0 |
| 11006 | + (and (eq_attr "pipeline" "ap") |
| 11007 | + (eq_attr "type" "store")) |
| 11008 | + "is,da,d") |
| 11009 | + |
| 11010 | + |
| 11011 | +(define_insn_reservation "load_double_op" 3 |
| 11012 | + (and (eq_attr "pipeline" "ap") |
| 11013 | + (eq_attr "type" "load2")) |
| 11014 | + "is,da,da+d,d") |
| 11015 | + |
| 11016 | +(define_insn_reservation "load_quad_op" 4 |
| 11017 | + (and (eq_attr "pipeline" "ap") |
| 11018 | + (eq_attr "type" "load4")) |
| 11019 | + "is,da,da+d,da+d,d") |
| 11020 | + |
| 11021 | +(define_insn_reservation "store_double_op" 0 |
| 11022 | + (and (eq_attr "pipeline" "ap") |
| 11023 | + (eq_attr "type" "store2")) |
| 11024 | + "is,da,da+d,d") |
| 11025 | + |
| 11026 | + |
| 11027 | +(define_insn_reservation "store_quad_op" 0 |
| 11028 | + (and (eq_attr "pipeline" "ap") |
| 11029 | + (eq_attr "type" "store4")) |
| 11030 | + "is,da,da+d,da+d,d") |
| 11031 | + |
| 11032 | +;;For store the operand to write to memory is read in d and |
| 11033 | +;;the real latency between any instruction and a store is therefore |
| 11034 | +;;one less than for the instructions which reads the operands in the first |
| 11035 | +;;excecution stage |
| 11036 | +(define_bypass 2 "load_double_op" "store_double_op" "avr32_store_bypass") |
| 11037 | +(define_bypass 3 "load_quad_op" "store_quad_op" "avr32_store_bypass") |
| 11038 | +(define_bypass 1 "load_op" "store_op" "avr32_store_bypass") |
| 11039 | +(define_bypass 2 "load_rm_op" "store_op" "avr32_store_bypass") |
| 11040 | +(define_bypass 1 "alu_sat_op" "store_op" "avr32_store_bypass") |
| 11041 | +(define_bypass 1 "alu2_op" "store_op" "avr32_store_bypass") |
| 11042 | +(define_bypass 1 "mulhh_op" "store_op" "avr32_store_bypass") |
| 11043 | +(define_bypass 2 "mulww_w_op" "store_op" "avr32_store_bypass") |
| 11044 | +(define_bypass 4 "mulww_d_op" "store_op" "avr32_store_bypass" ) |
| 11045 | +(define_bypass 2 "machh_w_op" "store_op" "avr32_store_bypass") |
| 11046 | +(define_bypass 3 "macww_w_op" "store_op" "avr32_store_bypass") |
| 11047 | +(define_bypass 5 "macww_d_op" "store_op" "avr32_store_bypass") |
| 11048 | + |
| 11049 | + |
| 11050 | +; Bypass for load double operation. If only the first loaded word is needed |
| 11051 | +; then the latency is 2 |
| 11052 | +(define_bypass 2 "load_double_op" |
| 11053 | + "load_op,load_rm_op,alu_sat_op, alu2_op, alu_op, mulhh_op, mulww_w_op, |
| 11054 | + mulww_d_op, machh_w_op, macww_w_op, macww_d_op" |
| 11055 | + "avr32_valid_load_double_bypass") |
| 11056 | + |
| 11057 | +; Bypass for load quad operation. If only the first or second loaded word is needed |
| 11058 | +; we set the latency to 2 |
| 11059 | +(define_bypass 2 "load_quad_op" |
| 11060 | + "load_op,load_rm_op,alu_sat_op, alu2_op, alu_op, mulhh_op, mulww_w_op, |
| 11061 | + mulww_d_op, machh_w_op, macww_w_op, macww_d_op" |
| 11062 | + "avr32_valid_load_quad_bypass") |
| 11063 | + |
| 11064 | + |
| 11065 | +;;****************************************************************************** |
| 11066 | +;; End of Automaton pipeline description for avr32 |
| 11067 | +;;****************************************************************************** |
| 11068 | + |
| 11069 | + |
| 11070 | + |
| 11071 | +;;============================================================================= |
| 11072 | +;; move |
| 11073 | +;;----------------------------------------------------------------------------- |
| 11074 | + |
| 11075 | +;;== char - 8 bits ============================================================ |
| 11076 | +(define_expand "movqi" |
| 11077 | + [(set (match_operand:QI 0 "nonimmediate_operand" "") |
| 11078 | + (match_operand:QI 1 "general_operand" ""))] |
| 11079 | + "" |
| 11080 | + { |
| 11081 | + if ( !no_new_pseudos ){ |
| 11082 | + if (GET_CODE (operands[1]) == MEM && optimize){ |
| 11083 | + rtx reg = gen_reg_rtx (SImode); |
| 11084 | + |
| 11085 | + emit_insn (gen_zero_extendqisi2 (reg, operands[1])); |
| 11086 | + operands[1] = gen_lowpart (QImode, reg); |
| 11087 | + } |
| 11088 | + |
| 11089 | + /* One of the ops has to be in a register. */ |
| 11090 | + if (GET_CODE (operands[0]) == MEM) |
| 11091 | + operands[1] = force_reg (QImode, operands[1]); |
| 11092 | + } |
| 11093 | + |
| 11094 | + }) |
| 11095 | + |
| 11096 | +(define_insn "*movqi_internal" |
| 11097 | + [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,m,r") |
| 11098 | + (match_operand:QI 1 "general_operand" "rKs08,m,r,i"))] |
| 11099 | + "" |
| 11100 | + "@ |
| 11101 | + mov\t%0, %1 |
| 11102 | + ld.ub\t%0, %1 |
| 11103 | + st.b\t%0, %1 |
| 11104 | + mov\t%0, %1" |
| 11105 | + [(set_attr "length" "2,4,4,4") |
| 11106 | + (set_attr "type" "alu,load_rm,store,alu")]) |
| 11107 | + |
| 11108 | + |
| 11109 | + |
| 11110 | +;;== short - 16 bits ========================================================== |
| 11111 | +(define_expand "movhi" |
| 11112 | + [(set (match_operand:HI 0 "nonimmediate_operand" "") |
| 11113 | + (match_operand:HI 1 "general_operand" ""))] |
| 11114 | + "" |
| 11115 | + { |
| 11116 | + if ( !no_new_pseudos ){ |
| 11117 | + if (GET_CODE (operands[1]) == MEM && optimize){ |
| 11118 | + rtx reg = gen_reg_rtx (SImode); |
| 11119 | + |
| 11120 | + emit_insn (gen_extendhisi2 (reg, operands[1])); |
| 11121 | + operands[1] = gen_lowpart (HImode, reg); |
| 11122 | + } |
| 11123 | + |
| 11124 | + /* One of the ops has to be in a register. */ |
| 11125 | + if (GET_CODE (operands[0]) == MEM) |
| 11126 | + operands[1] = force_reg (HImode, operands[1]); |
| 11127 | + } |
| 11128 | + |
| 11129 | + }) |
| 11130 | + |
| 11131 | +(define_insn "*movhi_internal" |
| 11132 | + [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,r") |
| 11133 | + (match_operand:HI 1 "general_operand" "rKs08,m,r,i"))] |
| 11134 | + "" |
| 11135 | + "@ |
| 11136 | + mov\t%0, %1 |
| 11137 | + ld.sh\t%0, %1 |
| 11138 | + st.h\t%0, %1 |
| 11139 | + mov\t%0, %1" |
| 11140 | + [(set_attr "length" "2,4,4,4") |
| 11141 | + (set_attr "type" "alu,load_rm,store,alu")]) |
| 11142 | + |
| 11143 | + |
| 11144 | +;;== int - 32 bits ============================================================ |
| 11145 | + |
| 11146 | +(define_expand "movmisalignsi" |
| 11147 | + [(set (match_operand:SI 0 "nonimmediate_operand" "") |
| 11148 | + (match_operand:SI 1 "nonimmediate_operand" ""))] |
| 11149 | + "TARGET_UNALIGNED_WORD" |
| 11150 | + { |
| 11151 | + } |
| 11152 | +) |
| 11153 | + |
| 11154 | +(define_expand "mov<mode>" |
| 11155 | + [(set (match_operand:MOVM 0 "nonimmediate_operand" "") |
| 11156 | + (match_operand:MOVM 1 "general_operand" ""))] |
| 11157 | + "" |
| 11158 | + { |
| 11159 | + |
| 11160 | + /* One of the ops has to be in a register. */ |
| 11161 | + if (GET_CODE (operands[0]) == MEM) |
| 11162 | + operands[1] = force_reg (<MODE>mode, operands[1]); |
| 11163 | + |
| 11164 | + |
| 11165 | + /* Check for out of range immediate constants as these may |
| 11166 | + occur during reloading, since it seems like reload does |
| 11167 | + not check if the immediate is legitimate. Don't know if |
| 11168 | + this is a bug? */ |
| 11169 | + if ( reload_in_progress |
| 11170 | + && GET_CODE(operands[1]) == CONST_INT |
| 11171 | + && !avr32_const_ok_for_constraint_p(INTVAL(operands[1]), 'K', "Ks21") ){ |
| 11172 | + operands[1] = force_const_mem(SImode, operands[1]); |
| 11173 | + } |
| 11174 | + |
| 11175 | + if ( (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS) |
| 11176 | + && !avr32_legitimate_pic_operand_p(operands[1]) ) |
| 11177 | + operands[1] = legitimize_pic_address (operands[1], <MODE>mode, |
| 11178 | + (no_new_pseudos ? operands[0] : 0)); |
| 11179 | + else if ( flag_pic && avr32_address_operand(operands[1], GET_MODE(operands[1])) ) |
| 11180 | + /* If we have an address operand then this function uses the pic register. */ |
| 11181 | + current_function_uses_pic_offset_table = 1; |
| 11182 | + }) |
| 11183 | + |
| 11184 | + |
| 11185 | +(define_insn "mov<mode>_internal" |
| 11186 | + [(set (match_operand:MOVM 0 "nonimmediate_operand" "=r,r,r,m,r") |
| 11187 | + (match_operand:MOVM 1 "general_operand" "rKs08,Ks21,m,r,W"))] |
| 11188 | + "" |
| 11189 | + { |
| 11190 | + switch (which_alternative) { |
| 11191 | + case 0: |
| 11192 | + case 1: return "mov\t%0, %1"; |
| 11193 | + case 2: |
| 11194 | + if ( (REG_P(XEXP(operands[1], 0)) |
| 11195 | + && REGNO(XEXP(operands[1], 0)) == SP_REGNUM) |
| 11196 | + || (GET_CODE(XEXP(operands[1], 0)) == PLUS |
| 11197 | + && REGNO(XEXP(XEXP(operands[1], 0), 0)) == SP_REGNUM |
| 11198 | + && GET_CODE(XEXP(XEXP(operands[1], 0), 1)) == CONST_INT |
| 11199 | + && INTVAL(XEXP(XEXP(operands[1], 0), 1)) % 4 == 0 |
| 11200 | + && INTVAL(XEXP(XEXP(operands[1], 0), 1)) <= 0x1FC) ) |
| 11201 | + return "lddsp\t%0, %1"; |
| 11202 | + else if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])) ) |
| 11203 | + return "lddpc\t%0, %1"; |
| 11204 | + else |
| 11205 | + return "ld.w\t%0, %1"; |
| 11206 | + case 3: |
| 11207 | + if ( (REG_P(XEXP(operands[0], 0)) |
| 11208 | + && REGNO(XEXP(operands[0], 0)) == SP_REGNUM) |
| 11209 | + || (GET_CODE(XEXP(operands[0], 0)) == PLUS |
| 11210 | + && REGNO(XEXP(XEXP(operands[0], 0), 0)) == SP_REGNUM |
| 11211 | + && GET_CODE(XEXP(XEXP(operands[0], 0), 1)) == CONST_INT |
| 11212 | + && INTVAL(XEXP(XEXP(operands[0], 0), 1)) % 4 == 0 |
| 11213 | + && INTVAL(XEXP(XEXP(operands[0], 0), 1)) <= 0x1FC) ) |
| 11214 | + return "stdsp\t%0, %1"; |
| 11215 | + else |
| 11216 | + return "st.w\t%0, %1"; |
| 11217 | + case 4: |
| 11218 | + if ( TARGET_HAS_ASM_ADDR_PSEUDOS ) |
| 11219 | + return "lda.w\t%0, %1"; |
| 11220 | + else |
| 11221 | + return "ld.w\t%0, r6[%1@got]"; |
| 11222 | + default: |
| 11223 | + abort(); |
| 11224 | + } |
| 11225 | + } |
| 11226 | + |
| 11227 | + [(set_attr "length" "2,4,4,4,8") |
| 11228 | + (set_attr "type" "alu,alu,load,store,load") |
| 11229 | + (set_attr "cc" "none,none,none,none,clobber")]) |
| 11230 | + |
| 11231 | + |
| 11232 | +;; These instructions are for loading constants which cannot be loaded |
| 11233 | +;; directly from the constant pool because the offset is too large |
| 11234 | +;; high and lo_sum are used even tough for our case it should be |
| 11235 | +;; low and high sum :-) |
| 11236 | +(define_insn "mov_symbol_lo" |
| 11237 | + [(set (match_operand:SI 0 "register_operand" "=r") |
| 11238 | + (high:SI (match_operand:SI 1 "immediate_operand" "i" )))] |
| 11239 | + "" |
| 11240 | + "mov\t%0, lo(%1)" |
| 11241 | + [(set_attr "type" "alu") |
| 11242 | + (set_attr "length" "4")] |
| 11243 | +) |
| 11244 | + |
| 11245 | +(define_insn "add_symbol_hi" |
| 11246 | + [(set (match_operand:SI 0 "register_operand" "=r") |
| 11247 | + (lo_sum:SI (match_dup 0) |
| 11248 | + (match_operand:SI 1 "immediate_operand" "i" )))] |
| 11249 | + "" |
| 11250 | + "orh\t%0, hi(%1)" |
| 11251 | + [(set_attr "type" "alu") |
| 11252 | + (set_attr "length" "4")] |
| 11253 | +) |
| 11254 | + |
| 11255 | + |
| 11256 | + |
| 11257 | +;; When generating pic, we need to load the symbol offset into a register. |
| 11258 | +;; So that the optimizer does not confuse this with a normal symbol load |
| 11259 | +;; we use an unspec. The offset will be loaded from a constant pool entry, |
| 11260 | +;; since that is the only type of relocation we can use. |
| 11261 | +(define_insn "pic_load_addr" |
| 11262 | + [(set (match_operand:SI 0 "register_operand" "=r") |
| 11263 | + (unspec:SI [(match_operand:SI 1 "" "")] UNSPEC_PIC_SYM))] |
| 11264 | + "flag_pic && CONSTANT_POOL_ADDRESS_P(XEXP(operands[1], 0))" |
| 11265 | + "lddpc\t%0, %1" |
| 11266 | + [(set_attr "type" "load") |
| 11267 | + (set_attr "length" "4")] |
| 11268 | +) |
| 11269 | + |
| 11270 | +(define_insn "pic_compute_got_from_pc" |
| 11271 | + [(set (match_operand:SI 0 "register_operand" "+r") |
| 11272 | + (unspec:SI [(minus:SI (pc) |
| 11273 | + (match_dup 0))] UNSPEC_PIC_BASE)) |
| 11274 | + (use (label_ref (match_operand 1 "" "")))] |
| 11275 | + "flag_pic" |
| 11276 | + { |
| 11277 | + (*targetm.asm_out.internal_label) (asm_out_file, "L", |
| 11278 | + CODE_LABEL_NUMBER (operands[1])); |
| 11279 | + return \"rsub\t%0, pc\"; |
| 11280 | + } |
| 11281 | + [(set_attr "cc" "clobber") |
| 11282 | + (set_attr "length" "2")] |
| 11283 | +) |
| 11284 | + |
| 11285 | +;;== long long int - 64 bits ================================================== |
| 11286 | +(define_expand "movdi" |
| 11287 | + [(set (match_operand:DI 0 "nonimmediate_operand" "") |
| 11288 | + (match_operand:DI 1 "general_operand" ""))] |
| 11289 | + "" |
| 11290 | + { |
| 11291 | + |
| 11292 | + /* One of the ops has to be in a register. */ |
| 11293 | + if (GET_CODE (operands[0]) != REG) |
| 11294 | + operands[1] = force_reg (DImode, operands[1]); |
| 11295 | + |
| 11296 | + }) |
| 11297 | + |
| 11298 | + |
| 11299 | +(define_insn_and_split "*movdi_internal" |
| 11300 | + [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,r,r,m") |
| 11301 | + (match_operand:DI 1 "general_operand" "r,Ks08,Ks21,G,m,r"))] |
| 11302 | + "" |
| 11303 | + { |
| 11304 | + switch (which_alternative ){ |
| 11305 | + case 1: |
| 11306 | + case 2: |
| 11307 | + if ( INTVAL(operands[1]) < 0 ) |
| 11308 | + return "mov\t%0, %1\;mov\t%m0, -1"; |
| 11309 | + else |
| 11310 | + return "mov\t%0, %1\;mov\t%m0, 0"; |
| 11311 | + case 0: |
| 11312 | + case 3: |
| 11313 | + return "mov\t%0, %1\;mov\t%m0, %m1"; |
| 11314 | + case 4: |
| 11315 | + if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1]))) |
| 11316 | + return "ld.d\t%0, pc[%1 - .]"; |
| 11317 | + else |
| 11318 | + return "ld.d\t%0, %1"; |
| 11319 | + case 5: |
| 11320 | + return "st.d\t%0, %1"; |
| 11321 | + default: |
| 11322 | + abort(); |
| 11323 | + } |
| 11324 | + } |
| 11325 | + "reload_completed && |
| 11326 | + (REG_P(operands[0]) && |
| 11327 | + (REG_P(operands[1]) || avr32_const_double_immediate(operands[1]) || |
| 11328 | + ((GET_CODE(operands[1]) == CONST_INT) && avr32_const_ok_for_constraint_p(INTVAL(operands[1]), 'K', \"Ks21\")) ))" |
| 11329 | + [(set (match_dup 0) (match_dup 1)) |
| 11330 | + (set (match_dup 2) (match_dup 3))] |
| 11331 | + { |
| 11332 | + operands[2] = gen_highpart (SImode, operands[0]); |
| 11333 | + operands[0] = gen_lowpart (SImode, operands[0]); |
| 11334 | + if ( REG_P(operands[1]) ){ |
| 11335 | + operands[3] = gen_highpart(SImode, operands[1]); |
| 11336 | + operands[1] = gen_lowpart(SImode, operands[1]); |
| 11337 | + } else if ( GET_CODE(operands[1]) == CONST_DOUBLE ){ |
| 11338 | + operands[3] = GEN_INT(CONST_DOUBLE_LOW(operands[1])); |
| 11339 | + operands[1] = GEN_INT(CONST_DOUBLE_HIGH(operands[1])); |
| 11340 | + } else if ( GET_CODE(operands[1]) == CONST_INT ){ |
| 11341 | + operands[3] = GEN_INT((INTVAL(operands[1]) < 0) ? -1 : 0); |
| 11342 | + operands[1] = operands[1]; |
| 11343 | + } else { |
| 11344 | + internal_error("Illegal operand[1] for movdi split!"); |
| 11345 | + } |
| 11346 | + } |
| 11347 | + |
| 11348 | + [(set_attr "length" "4,6,8,8,4,4") |
| 11349 | + (set_attr "type" "alu2,alu2,alu2,alu2,load2,store2")]) |
| 11350 | + |
| 11351 | + |
| 11352 | +;;== 128 bits ================================================== |
| 11353 | +(define_expand "movti" |
| 11354 | + [(set (match_operand:TI 0 "nonimmediate_operand" "") |
| 11355 | + (match_operand:TI 1 "general_operand" ""))] |
| 11356 | + "" |
| 11357 | + { |
| 11358 | + |
| 11359 | + /* One of the ops has to be in a register. */ |
| 11360 | + if (GET_CODE (operands[0]) != REG) |
| 11361 | + operands[1] = force_reg (TImode, operands[1]); |
| 11362 | + |
| 11363 | + /* We must fix any pre_dec for loads and post_inc stores */ |
| 11364 | + if ( GET_CODE (operands[0]) == MEM |
| 11365 | + && GET_CODE (XEXP(operands[0],0)) == POST_INC ){ |
| 11366 | + emit_move_insn(gen_rtx_MEM(TImode, XEXP(XEXP(operands[0],0),0)), operands[1]); |
| 11367 | + emit_insn(gen_addsi3(XEXP(XEXP(operands[0],0),0), XEXP(XEXP(operands[0],0),0), GEN_INT(GET_MODE_SIZE(TImode)))); |
| 11368 | + DONE; |
| 11369 | + } |
| 11370 | + |
| 11371 | + if ( GET_CODE (operands[1]) == MEM |
| 11372 | + && GET_CODE (XEXP(operands[1],0)) == PRE_DEC ){ |
| 11373 | + emit_insn(gen_addsi3(XEXP(XEXP(operands[1],0),0), XEXP(XEXP(operands[1],0),0), GEN_INT(-GET_MODE_SIZE(TImode)))); |
| 11374 | + emit_move_insn(operands[0], gen_rtx_MEM(TImode, XEXP(XEXP(operands[1],0),0))); |
| 11375 | + DONE; |
| 11376 | + } |
| 11377 | + |
| 11378 | + if (GET_CODE (operands[1]) == CONST_INT){ |
| 11379 | + unsigned int sign_extend = (INTVAL(operands[1]) < 0) ? 0xFFFFFFFF : 0; |
| 11380 | + emit_move_insn(gen_rtx_SUBREG(SImode, operands[0], 12), operands[1]); |
| 11381 | + emit_move_insn(gen_rtx_SUBREG(SImode, operands[0], 8), GEN_INT(sign_extend)); |
| 11382 | + emit_move_insn(gen_rtx_SUBREG(SImode, operands[0], 4), GEN_INT(sign_extend)); |
| 11383 | + emit_move_insn(gen_rtx_SUBREG(SImode, operands[0], 0), GEN_INT(sign_extend)); |
| 11384 | + DONE; |
| 11385 | + } |
| 11386 | + |
| 11387 | + if (GET_CODE (operands[0]) == REG |
| 11388 | + && GET_CODE (operands[1]) == REG){ |
| 11389 | + emit_move_insn(gen_rtx_SUBREG(SImode, operands[0], 12), gen_rtx_SUBREG(SImode, operands[1], 12)); |
| 11390 | + emit_move_insn(gen_rtx_SUBREG(SImode, operands[0], 8), gen_rtx_SUBREG(SImode, operands[1], 8)); |
| 11391 | + emit_move_insn(gen_rtx_SUBREG(SImode, operands[0], 4), gen_rtx_SUBREG(SImode, operands[1], 4)); |
| 11392 | + emit_move_insn(gen_rtx_SUBREG(SImode, operands[0], 0), gen_rtx_SUBREG(SImode, operands[1], 0)); |
| 11393 | + DONE; |
| 11394 | + } |
| 11395 | + }) |
| 11396 | + |
| 11397 | + |
| 11398 | +(define_insn "*movti_internal" |
| 11399 | + [(set (match_operand:TI 0 "nonimmediate_operand" "=r,r, <RKu00,r") |
| 11400 | + (match_operand:TI 1 "loadti_operand" " r,RKu00>,r,m"))] |
| 11401 | + "" |
| 11402 | + "@ |
| 11403 | + mov\t%T0, %T1\;mov\t%U0, %U1\;mov\t%L0, %L1\;mov\t%B0, %B1 |
| 11404 | + ldm\t%p1, %0 |
| 11405 | + stm\t%p0, %1 |
| 11406 | + ldm\t%p1, %0" |
| 11407 | + [(set_attr "length" "8,4,4,4") |
| 11408 | + (set_attr "type" "alu,load4,store4,load4")]) |
| 11409 | + |
| 11410 | + |
| 11411 | +;;== float - 32 bits ========================================================== |
| 11412 | +(define_expand "movsf" |
| 11413 | + [(set (match_operand:SF 0 "nonimmediate_operand" "") |
| 11414 | + (match_operand:SF 1 "general_operand" ""))] |
| 11415 | + "" |
| 11416 | + { |
| 11417 | + |
| 11418 | + |
| 11419 | + /* One of the ops has to be in a register. */ |
| 11420 | + if (GET_CODE (operands[0]) != REG) |
| 11421 | + operands[1] = force_reg (SFmode, operands[1]); |
| 11422 | + |
| 11423 | + }) |
| 11424 | + |
| 11425 | +(define_insn "*movsf_internal" |
| 11426 | + [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,r,m") |
| 11427 | + (match_operand:SF 1 "general_operand" "r,G,m,r"))] |
| 11428 | + "TARGET_SOFT_FLOAT" |
| 11429 | + { |
| 11430 | + switch (which_alternative) { |
| 11431 | + case 0: |
| 11432 | + case 1: return "mov\t%0, %1"; |
| 11433 | + case 2: |
| 11434 | + if ( (REG_P(XEXP(operands[1], 0)) |
| 11435 | + && REGNO(XEXP(operands[1], 0)) == SP_REGNUM) |
| 11436 | + || (GET_CODE(XEXP(operands[1], 0)) == PLUS |
| 11437 | + && REGNO(XEXP(XEXP(operands[1], 0), 0)) == SP_REGNUM |
| 11438 | + && GET_CODE(XEXP(XEXP(operands[1], 0), 1)) == CONST_INT |
| 11439 | + && INTVAL(XEXP(XEXP(operands[1], 0), 1)) % 4 == 0 |
| 11440 | + && INTVAL(XEXP(XEXP(operands[1], 0), 1)) <= 0x1FC) ) |
| 11441 | + return "lddsp\t%0, %1"; |
| 11442 | + else if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])) ) |
| 11443 | + return "lddpc\t%0, %1"; |
| 11444 | + else |
| 11445 | + return "ld.w\t%0, %1"; |
| 11446 | + case 3: |
| 11447 | + if ( (REG_P(XEXP(operands[0], 0)) |
| 11448 | + && REGNO(XEXP(operands[0], 0)) == SP_REGNUM) |
| 11449 | + || (GET_CODE(XEXP(operands[0], 0)) == PLUS |
| 11450 | + && REGNO(XEXP(XEXP(operands[0], 0), 0)) == SP_REGNUM |
| 11451 | + && GET_CODE(XEXP(XEXP(operands[0], 0), 1)) == CONST_INT |
| 11452 | + && INTVAL(XEXP(XEXP(operands[0], 0), 1)) % 4 == 0 |
| 11453 | + && INTVAL(XEXP(XEXP(operands[0], 0), 1)) <= 0x1FC) ) |
| 11454 | + return "stdsp\t%0, %1"; |
| 11455 | + else |
| 11456 | + return "st.w\t%0, %1"; |
| 11457 | + default: |
| 11458 | + abort(); |
| 11459 | + } |
| 11460 | + } |
| 11461 | + |
| 11462 | + [(set_attr "length" "2,4,4,4") |
| 11463 | + (set_attr "type" "alu,alu,load,store")]) |
| 11464 | + |
| 11465 | + |
| 11466 | + |
| 11467 | +;;== double - 64 bits ========================================================= |
| 11468 | +(define_expand "movdf" |
| 11469 | + [(set (match_operand:DF 0 "nonimmediate_operand" "") |
| 11470 | + (match_operand:DF 1 "general_operand" ""))] |
| 11471 | + "" |
| 11472 | + { |
| 11473 | + /* One of the ops has to be in a register. */ |
| 11474 | + if (GET_CODE (operands[0]) != REG){ |
| 11475 | + operands[1] = force_reg (DFmode, operands[1]); |
| 11476 | + } |
| 11477 | + }) |
| 11478 | + |
| 11479 | + |
| 11480 | +(define_insn_and_split "*movdf_internal" |
| 11481 | + [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,r,m") |
| 11482 | + (match_operand:DF 1 "general_operand" "r,G,m,r"))] |
| 11483 | + "TARGET_SOFT_FLOAT" |
| 11484 | + { |
| 11485 | + switch (which_alternative ){ |
| 11486 | + case 0: |
| 11487 | + case 1: |
| 11488 | + return "mov\t%0, %1\;mov\t%m0, %m1"; |
| 11489 | + case 2: |
| 11490 | + if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1]))) |
| 11491 | + return "ld.d\t%0, pc[%1 - .]"; |
| 11492 | + else |
| 11493 | + return "ld.d\t%0, %1"; |
| 11494 | + case 3: |
| 11495 | + return "st.d\t%0, %1"; |
| 11496 | + default: |
| 11497 | + abort(); |
| 11498 | + } |
| 11499 | + } |
| 11500 | + "TARGET_SOFT_FLOAT |
| 11501 | + && reload_completed |
| 11502 | + && (REG_P(operands[0]) && REG_P(operands[1]))" |
| 11503 | + [(set (match_dup 0) (match_dup 1)) |
| 11504 | + (set (match_dup 2) (match_dup 3))] |
| 11505 | + " |
| 11506 | + { |
| 11507 | + operands[2] = gen_highpart (SImode, operands[0]); |
| 11508 | + operands[0] = gen_lowpart (SImode, operands[0]); |
| 11509 | + operands[3] = gen_highpart(SImode, operands[1]); |
| 11510 | + operands[1] = gen_lowpart(SImode, operands[1]); |
| 11511 | + } |
| 11512 | + " |
| 11513 | + |
| 11514 | + [(set_attr "length" "4,8,4,4") |
| 11515 | + (set_attr "type" "alu2,alu2,load2,store2")]) |
| 11516 | + |
| 11517 | + |
| 11518 | + |
| 11519 | + |
| 11520 | +;;============================================================================= |
| 11521 | +;; Move chunks of memory |
| 11522 | +;;============================================================================= |
| 11523 | + |
| 11524 | +(define_expand "movmemsi" |
| 11525 | + [(match_operand:BLK 0 "general_operand" "") |
| 11526 | + (match_operand:BLK 1 "general_operand" "") |
| 11527 | + (match_operand:SI 2 "const_int_operand" "") |
| 11528 | + (match_operand:SI 3 "const_int_operand" "")] |
| 11529 | + "" |
| 11530 | + " |
| 11531 | + if (avr32_gen_movmemsi (operands)) |
| 11532 | + DONE; |
| 11533 | + FAIL; |
| 11534 | + " |
| 11535 | + ) |
| 11536 | + |
| 11537 | + |
| 11538 | + |
| 11539 | + |
| 11540 | +;;============================================================================= |
| 11541 | +;; Bit field instructions |
| 11542 | +;;----------------------------------------------------------------------------- |
| 11543 | +;; Instructions to insert or extract bit-fields |
| 11544 | +;;============================================================================= |
| 11545 | + |
| 11546 | +(define_insn "insv" |
| 11547 | + [ (set (zero_extract:SI (match_operand:SI 0 "register_operand" "+r") |
| 11548 | + (match_operand:SI 1 "immediate_operand" "Ku05") |
| 11549 | + (match_operand:SI 2 "immediate_operand" "Ku05")) |
| 11550 | + (match_operand 3 "register_operand" "r"))] |
| 11551 | + "" |
| 11552 | + "bfins\t%0, %3, %2, %1" |
| 11553 | + [(set_attr "type" "alu") |
| 11554 | + (set_attr "length" "4") |
| 11555 | + (set_attr "cc" "set_ncz")]) |
| 11556 | + |
| 11557 | + |
| 11558 | + |
| 11559 | + |
| 11560 | +(define_insn "extv" |
| 11561 | + [ (set (match_operand:SI 0 "register_operand" "=r") |
| 11562 | + (sign_extract:SI (match_operand:SI 1 "register_operand" "r") |
| 11563 | + (match_operand:SI 2 "immediate_operand" "Ku05") |
| 11564 | + (match_operand:SI 3 "immediate_operand" "Ku05")))] |
| 11565 | + "" |
| 11566 | + "bfexts\t%0, %1, %3, %2" |
| 11567 | + [(set_attr "type" "alu") |
| 11568 | + (set_attr "length" "4") |
| 11569 | + (set_attr "cc" "set_ncz")]) |
| 11570 | + |
| 11571 | + |
| 11572 | +(define_insn "extzv" |
| 11573 | + [ (set (match_operand:SI 0 "register_operand" "=r") |
| 11574 | + (zero_extract:SI (match_operand:SI 1 "register_operand" "r") |
| 11575 | + (match_operand:SI 2 "immediate_operand" "Ku05") |
| 11576 | + (match_operand:SI 3 "immediate_operand" "Ku05")))] |
| 11577 | + "" |
| 11578 | + "bfextu\t%0, %1, %3, %2" |
| 11579 | + [(set_attr "type" "alu") |
| 11580 | + (set_attr "length" "4") |
| 11581 | + (set_attr "cc" "set_ncz")]) |
| 11582 | + |
| 11583 | + |
| 11584 | + |
| 11585 | +;;============================================================================= |
| 11586 | +;; Some peepholes for avoiding unnecessary cast instructions |
| 11587 | +;; followed by bfins. |
| 11588 | +;;----------------------------------------------------------------------------- |
| 11589 | + |
| 11590 | +(define_peephole2 |
| 11591 | + [(set (match_operand:SI 0 "register_operand" "") |
| 11592 | + (zero_extend:SI (match_operand:QI 1 "register_operand" ""))) |
| 11593 | + (set (zero_extract:SI (match_operand 2 "register_operand" "") |
| 11594 | + (match_operand:SI 3 "immediate_operand" "") |
| 11595 | + (match_operand:SI 4 "immediate_operand" "")) |
| 11596 | + (match_dup 0))] |
| 11597 | + "((peep2_reg_dead_p(2, operands[0]) && |
| 11598 | + (INTVAL(operands[3]) <= 8)))" |
| 11599 | + [(set (zero_extract:SI (match_dup 2) |
| 11600 | + (match_dup 3) |
| 11601 | + (match_dup 4)) |
| 11602 | + (match_dup 1))] |
| 11603 | + ) |
| 11604 | + |
| 11605 | +(define_peephole2 |
| 11606 | + [(set (match_operand:SI 0 "register_operand" "") |
| 11607 | + (zero_extend:SI (match_operand:HI 1 "register_operand" ""))) |
| 11608 | + (set (zero_extract:SI (match_operand 2 "register_operand" "") |
| 11609 | + (match_operand:SI 3 "immediate_operand" "") |
| 11610 | + (match_operand:SI 4 "immediate_operand" "")) |
| 11611 | + (match_dup 0))] |
| 11612 | + "((peep2_reg_dead_p(2, operands[0]) && |
| 11613 | + (INTVAL(operands[3]) <= 16)))" |
| 11614 | + [(set (zero_extract:SI (match_dup 2) |
| 11615 | + (match_dup 3) |
| 11616 | + (match_dup 4)) |
| 11617 | + (match_dup 1))] |
| 11618 | + ) |
| 11619 | + |
| 11620 | +;;============================================================================= |
| 11621 | +;; push bytes |
| 11622 | +;;----------------------------------------------------------------------------- |
| 11623 | +;; Implements the push instruction |
| 11624 | +;;============================================================================= |
| 11625 | +(define_insn "pushm" |
| 11626 | + [(set (mem:BLK (pre_dec:BLK (reg:SI SP_REGNUM))) |
| 11627 | + (unspec:BLK [(match_operand 0 "const_int_operand" "")] |
| 11628 | + UNSPEC_PUSHM))] |
| 11629 | + "" |
| 11630 | + { |
| 11631 | + if (INTVAL(operands[0])) { |
| 11632 | + return "pushm\t%r0"; |
| 11633 | + } else { |
| 11634 | + return ""; |
| 11635 | + } |
| 11636 | + } |
| 11637 | + [(set_attr "type" "store") |
| 11638 | + (set_attr "length" "2") |
| 11639 | + (set_attr "cc" "none")]) |
| 11640 | + |
| 11641 | +(define_insn "stm" |
| 11642 | + [(unspec [(match_operand 0 "register_operand" "r") |
| 11643 | + (match_operand 1 "const_int_operand" "") |
| 11644 | + (match_operand 2 "const_int_operand" "")] |
| 11645 | + UNSPEC_STM)] |
| 11646 | + "" |
| 11647 | + { |
| 11648 | + if (INTVAL(operands[1])) { |
| 11649 | + if (INTVAL(operands[2]) != 0) |
| 11650 | + return "stm\t--%0, %s1"; |
| 11651 | + else |
| 11652 | + return "stm\t%0, %s1"; |
| 11653 | + } else { |
| 11654 | + return ""; |
| 11655 | + } |
| 11656 | + } |
| 11657 | + [(set_attr "type" "store") |
| 11658 | + (set_attr "length" "4") |
| 11659 | + (set_attr "cc" "none")]) |
| 11660 | + |
| 11661 | + |
| 11662 | + |
| 11663 | +(define_insn "popm" |
| 11664 | + [(unspec [(match_operand 0 "const_int_operand" "")] |
| 11665 | + UNSPEC_POPM)] |
| 11666 | + "" |
| 11667 | + { |
| 11668 | + if (INTVAL(operands[0])) { |
| 11669 | + return "popm %r0"; |
| 11670 | + } else { |
| 11671 | + return ""; |
| 11672 | + } |
| 11673 | + } |
| 11674 | + [(set_attr "type" "load") |
| 11675 | + (set_attr "length" "2")]) |
| 11676 | + |
| 11677 | + |
| 11678 | + |
| 11679 | +;;============================================================================= |
| 11680 | +;; add |
| 11681 | +;;----------------------------------------------------------------------------- |
| 11682 | +;; Adds reg1 with reg2 and puts the result in reg0. |
| 11683 | +;;============================================================================= |
| 11684 | +(define_insn "add<mode>3" |
| 11685 | + [(set (match_operand:INTM 0 "register_operand" "=r,r,r,r,r") |
| 11686 | + (plus:INTM (match_operand:INTM 1 "register_operand" "%0,r,0,r,0") |
| 11687 | + (match_operand:INTM 2 "avr32_add_operand" "r,r,Is08,Is16,Is21")))] |
| 11688 | + "" |
| 11689 | + "@ |
| 11690 | + add %0, %2 |
| 11691 | + add %0, %1, %2 |
| 11692 | + sub %0, %n2 |
| 11693 | + sub %0, %1, %n2 |
| 11694 | + sub %0, %n2" |
| 11695 | + |
| 11696 | + [(set_attr "length" "2,4,2,4,4") |
| 11697 | + (set_attr "cc" "<INTM:alu_cc_attr>")]) |
| 11698 | + |
| 11699 | +(define_insn "*addsi3_lsl" |
| 11700 | + [(set (match_operand:SI 0 "register_operand" "=r") |
| 11701 | + (plus:SI (ashift:SI (match_operand:SI 1 "register_operand" "r") |
| 11702 | + (match_operand:SI 3 "avr32_add_shift_immediate_operand" "Ku02")) |
| 11703 | + (match_operand:SI 2 "register_operand" "r")))] |
| 11704 | + "" |
| 11705 | + "add %0, %2, %1 << %3" |
| 11706 | + [(set_attr "length" "4") |
| 11707 | + (set_attr "cc" "set_vncz")]) |
| 11708 | + |
| 11709 | + |
| 11710 | +(define_insn "*addsi3_mul" |
| 11711 | + [(set (match_operand:SI 0 "register_operand" "=r") |
| 11712 | + (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "r") |
| 11713 | + (match_operand:SI 3 "immediate_operand" "Ku04" )) |
| 11714 | + (match_operand:SI 2 "register_operand" "r")))] |
| 11715 | + "(INTVAL(operands[3]) == 0) || (INTVAL(operands[3]) == 2) || |
| 11716 | + (INTVAL(operands[3]) == 4) || (INTVAL(operands[3]) == 8)" |
| 11717 | + "add %0, %2, %1 << %p3" |
| 11718 | + [(set_attr "length" "4") |
| 11719 | + (set_attr "cc" "set_vncz")]) |
| 11720 | + |
| 11721 | + |
| 11722 | +(define_peephole2 |
| 11723 | + [(set (match_operand:SI 0 "register_operand" "") |
| 11724 | + (ashift:SI (match_operand:SI 1 "register_operand" "") |
| 11725 | + (match_operand:SI 2 "immediate_operand" ""))) |
| 11726 | + (set (match_operand:SI 3 "register_operand" "") |
| 11727 | + (plus:SI (match_dup 0) |
| 11728 | + (match_operand:SI 4 "register_operand" "")))] |
| 11729 | + "(peep2_reg_dead_p(2, operands[0]) && |
| 11730 | + (INTVAL(operands[2]) < 4 && INTVAL(operands[2]) > 0))" |
| 11731 | + [(set (match_dup 3) |
| 11732 | + (plus:SI (ashift:SI (match_dup 1) |
| 11733 | + (match_dup 2)) |
| 11734 | + (match_dup 4)))] |
| 11735 | + ) |
| 11736 | + |
| 11737 | +(define_peephole2 |
| 11738 | + [(set (match_operand:SI 0 "register_operand" "") |
| 11739 | + (ashift:SI (match_operand:SI 1 "register_operand" "") |
| 11740 | + (match_operand:SI 2 "immediate_operand" ""))) |
| 11741 | + (set (match_operand:SI 3 "register_operand" "") |
| 11742 | + (plus:SI (match_operand:SI 4 "register_operand" "") |
| 11743 | + (match_dup 0)))] |
| 11744 | + "(peep2_reg_dead_p(2, operands[0]) && |
| 11745 | + (INTVAL(operands[2]) < 4 && INTVAL(operands[2]) > 0))" |
| 11746 | + [(set (match_dup 3) |
| 11747 | + (plus:SI (ashift:SI (match_dup 1) |
| 11748 | + (match_dup 2)) |
| 11749 | + (match_dup 4)))] |
| 11750 | + ) |
| 11751 | + |
| 11752 | +(define_insn "adddi3" |
| 11753 | + [(set (match_operand:DI 0 "register_operand" "=r,r") |
| 11754 | + (plus:DI (match_operand:DI 1 "register_operand" "%r,0") |
| 11755 | + (match_operand:DI 2 "register_operand" "r,r")))] |
| 11756 | + "" |
| 11757 | + "@ |
| 11758 | + add %0, %1, %2\;adc %m0, %m1, %m2 |
| 11759 | + add %0, %2\;adc %m0, %m0, %m2" |
| 11760 | + [(set_attr "length" "8,6") |
| 11761 | + (set_attr "type" "alu2") |
| 11762 | + (set_attr "cc" "set_vncz")]) |
| 11763 | + |
| 11764 | + |
| 11765 | + |
| 11766 | +;;============================================================================= |
| 11767 | +;; subtract |
| 11768 | +;;----------------------------------------------------------------------------- |
| 11769 | +;; Subtract reg2 or immediate value from reg0 and puts the result in reg0. |
| 11770 | +;;============================================================================= |
| 11771 | + |
| 11772 | +(define_peephole2 |
| 11773 | + [(set (match_operand:QI 0 "register_operand" "") |
| 11774 | + (minus:QI (match_operand:QI 1 "general_operand" "") |
| 11775 | + (match_operand:QI 2 "general_operand" ""))) |
| 11776 | + (set (match_operand:QI 3 "register_operand" "") |
| 11777 | + (match_dup 0))] |
| 11778 | + "peep2_reg_dead_p(2, operands[0])" |
| 11779 | + [(set (match_dup 3) |
| 11780 | + (minus:QI (match_dup 1) (match_dup 2)))] |
| 11781 | + ) |
| 11782 | + |
| 11783 | +(define_peephole |
| 11784 | + [(set (match_operand:QI 0 "register_operand" "") |
| 11785 | + (minus:QI (match_operand:QI 1 "immediate_operand" "Ks08") |
| 11786 | + (match_operand:QI 2 "register_operand" "r"))) |
| 11787 | + (set (match_operand:QI 3 "register_operand" "r") |
| 11788 | + (match_dup 0))] |
| 11789 | + "dead_or_set_p(insn, operands[0])" |
| 11790 | + "rsub %3, %2, %1" |
| 11791 | + [(set_attr "length" "4") |
| 11792 | + (set_attr "cc" "clobber")] |
| 11793 | + ) |
| 11794 | + |
| 11795 | + |
| 11796 | + |
| 11797 | +(define_insn "sub<mode>3" |
| 11798 | + [(set (match_operand:INTM 0 "general_operand" "=r,r,r,r,r,r,r") |
| 11799 | + (minus:INTM (match_operand:INTM 1 "nonmemory_operand" "0,r,0,r,0,r,Ks08") |
| 11800 | + (match_operand:INTM 2 "nonmemory_operand" "r,r,Ks08,Ks16,Ks21,0,r")))] |
| 11801 | + "" |
| 11802 | + "@ |
| 11803 | + sub %0, %2 |
| 11804 | + sub %0, %1, %2 |
| 11805 | + sub %0, %2 |
| 11806 | + sub %0, %1, %2 |
| 11807 | + sub %0, %2 |
| 11808 | + rsub %0, %1 |
| 11809 | + rsub %0, %2, %1" |
| 11810 | + [(set_attr "length" "2,4,2,4,4,2,4") |
| 11811 | + (set_attr "cc" "<INTM:alu_cc_attr>")]) |
| 11812 | + |
| 11813 | +(define_insn "*sub<mode>3_mul" |
| 11814 | + [(set (match_operand:INTM 0 "register_operand" "=r,r,r") |
| 11815 | + (minus:INTM (match_operand:INTM 1 "register_operand" "r,0,r") |
| 11816 | + (mult:INTM (match_operand:INTM 2 "register_operand" "r,r,0") |
| 11817 | + (match_operand:SI 3 "immediate_operand" "Ku04,Ku04,Ku04" ))))] |
| 11818 | + "(INTVAL(operands[3]) == 0) || (INTVAL(operands[3]) == 2) || |
| 11819 | + (INTVAL(operands[3]) == 4) || (INTVAL(operands[3]) == 8)" |
| 11820 | + "@ |
| 11821 | + sub %0, %1, %2 << %p3 |
| 11822 | + sub %0, %0, %2 << %p3 |
| 11823 | + sub %0, %1, %0 << %p3" |
| 11824 | + [(set_attr "length" "4,4,4") |
| 11825 | + (set_attr "cc" "<INTM:alu_cc_attr>")]) |
| 11826 | + |
| 11827 | +(define_insn "*sub<mode>3_lsl" |
| 11828 | + [(set (match_operand:INTM 0 "register_operand" "=r") |
| 11829 | + (minus:INTM (ashift:INTM (match_operand:INTM 1 "register_operand" "r") |
| 11830 | + (match_operand:SI 3 "avr32_add_shift_immediate_operand" "Ku02")) |
| 11831 | + (match_operand:INTM 2 "register_operand" "r")))] |
| 11832 | + "" |
| 11833 | + "sub %0, %2, %1 << %3" |
| 11834 | + [(set_attr "length" "4") |
| 11835 | + (set_attr "cc" "<INTM:alu_cc_attr>")]) |
| 11836 | + |
| 11837 | + |
| 11838 | +(define_insn "subdi3" |
| 11839 | + [(set (match_operand:DI 0 "register_operand" "=r,r") |
| 11840 | + (minus:DI (match_operand:DI 1 "register_operand" "%r,0") |
| 11841 | + (match_operand:DI 2 "register_operand" "r,r")))] |
| 11842 | + "" |
| 11843 | + "@ |
| 11844 | + sub %0, %1, %2\;sbc %m0, %m1, %m2 |
| 11845 | + sub %0, %2\;sbc %m0, %m0, %m2" |
| 11846 | + [(set_attr "length" "8,6") |
| 11847 | + (set_attr "type" "alu2") |
| 11848 | + (set_attr "cc" "set_vncz")]) |
| 11849 | + |
| 11850 | + |
| 11851 | + |
| 11852 | +;;============================================================================= |
| 11853 | +;; multiply |
| 11854 | +;;----------------------------------------------------------------------------- |
| 11855 | +;; Multiply op1 and op2 and put the value in op0. |
| 11856 | +;;============================================================================= |
| 11857 | + |
| 11858 | + |
| 11859 | +(define_insn "mulqi3" |
| 11860 | + [(set (match_operand:QI 0 "register_operand" "=r,r,r") |
| 11861 | + (mult:QI (match_operand:QI 1 "register_operand" "%0,r,r") |
| 11862 | + (match_operand:QI 2 "avr32_mul_operand" "r,r,Ks08")))] |
| 11863 | + "" |
| 11864 | + { |
| 11865 | + switch (which_alternative){ |
| 11866 | + case 0: |
| 11867 | + return "mul %0, %2"; |
| 11868 | + case 1: |
| 11869 | + return "mul %0, %1, %2"; |
| 11870 | + case 2: |
| 11871 | + return "mul %0, %1, %2"; |
| 11872 | + default: |
| 11873 | + abort(); |
| 11874 | + } |
| 11875 | + } |
| 11876 | + [(set_attr "type" "mulww_w,mulww_w,mulwh") |
| 11877 | + (set_attr "length" "2,4,4") |
| 11878 | + (set_attr "cc" "none")]) |
| 11879 | + |
| 11880 | +(define_insn "mulsi3" |
| 11881 | + [(set (match_operand:SI 0 "register_operand" "=r,r,r") |
| 11882 | + (mult:SI (match_operand:SI 1 "register_operand" "%0,r,r") |
| 11883 | + (match_operand:SI 2 "avr32_mul_operand" "r,r,Ks08")))] |
| 11884 | + "" |
| 11885 | + { |
| 11886 | + switch (which_alternative){ |
| 11887 | + case 0: |
| 11888 | + return "mul %0, %2"; |
| 11889 | + case 1: |
| 11890 | + return "mul %0, %1, %2"; |
| 11891 | + case 2: |
| 11892 | + return "mul %0, %1, %2"; |
| 11893 | + default: |
| 11894 | + abort(); |
| 11895 | + } |
| 11896 | + } |
| 11897 | + [(set_attr "type" "mulww_w,mulww_w,mulwh") |
| 11898 | + (set_attr "length" "2,4,4") |
| 11899 | + (set_attr "cc" "none")]) |
| 11900 | + |
| 11901 | + |
| 11902 | +(define_insn "mulhisi3" |
| 11903 | + [(set (match_operand:SI 0 "register_operand" "=r") |
| 11904 | + (mult:SI |
| 11905 | + (sign_extend:SI (match_operand:HI 1 "register_operand" "%r")) |
| 11906 | + (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))] |
| 11907 | + "TARGET_DSP" |
| 11908 | + "mulhh.w %0, %1:b, %2:b" |
| 11909 | + [(set_attr "type" "mulhh") |
| 11910 | + (set_attr "length" "4") |
| 11911 | + (set_attr "cc" "none")]) |
| 11912 | + |
| 11913 | +(define_peephole2 |
| 11914 | + [(match_scratch:DI 6 "r") |
| 11915 | + (set (match_operand:SI 0 "register_operand" "") |
| 11916 | + (mult:SI |
| 11917 | + (sign_extend:SI (match_operand:HI 1 "register_operand" "")) |
| 11918 | + (sign_extend:SI (match_operand:HI 2 "register_operand" "")))) |
| 11919 | + (set (match_operand:SI 3 "register_operand" "") |
| 11920 | + (ashiftrt:SI (match_dup 0) |
| 11921 | + (const_int 16)))] |
| 11922 | + "TARGET_DSP |
| 11923 | + && (peep2_reg_dead_p(1, operands[0]) || (REGNO(operands[0]) == REGNO(operands[3])))" |
| 11924 | + [(set (match_dup 4) (sign_extend:SI (match_dup 1))) |
| 11925 | + (set (match_dup 6) |
| 11926 | + (ashift:DI (mult:DI (sign_extend:DI (match_dup 4)) |
| 11927 | + (sign_extend:DI (match_dup 2))) |
| 11928 | + (const_int 16))) |
| 11929 | + (set (match_dup 3) (match_dup 5))] |
| 11930 | + |
| 11931 | + "{ |
| 11932 | + operands[4] = gen_rtx_REG(SImode, REGNO(operands[1])); |
| 11933 | + operands[5] = gen_highpart (SImode, operands[4]); |
| 11934 | + }" |
| 11935 | + ) |
| 11936 | + |
| 11937 | +(define_insn "mulnhisi3" |
| 11938 | + [(set (match_operand:SI 0 "register_operand" "=r") |
| 11939 | + (mult:SI |
| 11940 | + (sign_extend:SI (neg:HI (match_operand:HI 1 "register_operand" "r"))) |
| 11941 | + (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))] |
| 11942 | + "TARGET_DSP" |
| 11943 | + "mulnhh.w %0, %1:b, %2:b" |
| 11944 | + [(set_attr "type" "mulhh") |
| 11945 | + (set_attr "length" "4") |
| 11946 | + (set_attr "cc" "none")]) |
| 11947 | + |
| 11948 | +(define_insn "machisi3" |
| 11949 | + [(set (match_operand:SI 0 "register_operand" "+r") |
| 11950 | + (plus:SI (mult:SI |
| 11951 | + (sign_extend:SI (match_operand:HI 1 "register_operand" "%r")) |
| 11952 | + (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))) |
| 11953 | + (match_dup 0)))] |
| 11954 | + "TARGET_DSP" |
| 11955 | + "machh.w %0, %1:b, %2:b" |
| 11956 | + [(set_attr "type" "machh_w") |
| 11957 | + (set_attr "length" "4") |
| 11958 | + (set_attr "cc" "none")]) |
| 11959 | + |
| 11960 | + |
| 11961 | + |
| 11962 | +(define_insn "mulsidi3" |
| 11963 | + [(set (match_operand:DI 0 "register_operand" "=r") |
| 11964 | + (mult:DI |
| 11965 | + (sign_extend:DI (match_operand:SI 1 "register_operand" "%r")) |
| 11966 | + (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))] |
| 11967 | + "" |
| 11968 | + "muls.d %0, %1, %2" |
| 11969 | + [(set_attr "type" "mulww_d") |
| 11970 | + (set_attr "length" "4") |
| 11971 | + (set_attr "cc" "none")]) |
| 11972 | + |
| 11973 | +(define_insn "umulsidi3" |
| 11974 | + [(set (match_operand:DI 0 "register_operand" "=r") |
| 11975 | + (mult:DI |
| 11976 | + (zero_extend:DI (match_operand:SI 1 "register_operand" "%r")) |
| 11977 | + (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))] |
| 11978 | + "" |
| 11979 | + "mulu.d %0, %1, %2" |
| 11980 | + [(set_attr "type" "mulww_d") |
| 11981 | + (set_attr "length" "4") |
| 11982 | + (set_attr "cc" "none")]) |
| 11983 | + |
| 11984 | +(define_insn "*mulaccsi3" |
| 11985 | + [(set (match_operand:SI 0 "register_operand" "+r") |
| 11986 | + (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "%r") |
| 11987 | + (match_operand:SI 2 "register_operand" "r")) |
| 11988 | + (match_dup 0)))] |
| 11989 | + "" |
| 11990 | + "mac %0, %1, %2" |
| 11991 | + [(set_attr "type" "macww_w") |
| 11992 | + (set_attr "length" "4") |
| 11993 | + (set_attr "cc" "none")]) |
| 11994 | + |
| 11995 | +(define_insn "mulaccsidi3" |
| 11996 | + [(set (match_operand:DI 0 "register_operand" "+r") |
| 11997 | + (plus:DI (mult:DI |
| 11998 | + (sign_extend:DI (match_operand:SI 1 "register_operand" "%r")) |
| 11999 | + (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))) |
| 12000 | + (match_dup 0)))] |
| 12001 | + "" |
| 12002 | + "macs.d %0, %1, %2" |
| 12003 | + [(set_attr "type" "macww_d") |
| 12004 | + (set_attr "length" "4") |
| 12005 | + (set_attr "cc" "none")]) |
| 12006 | + |
| 12007 | +(define_insn "umulaccsidi3" |
| 12008 | + [(set (match_operand:DI 0 "register_operand" "+r") |
| 12009 | + (plus:DI (mult:DI |
| 12010 | + (zero_extend:DI (match_operand:SI 1 "register_operand" "%r")) |
| 12011 | + (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))) |
| 12012 | + (match_dup 0)))] |
| 12013 | + "" |
| 12014 | + "macu.d %0, %1, %2" |
| 12015 | + [(set_attr "type" "macww_d") |
| 12016 | + (set_attr "length" "4") |
| 12017 | + (set_attr "cc" "none")]) |
| 12018 | + |
| 12019 | + |
| 12020 | + |
| 12021 | +;; Try to avoid Write-After-Write hazards for mul operations |
| 12022 | +;; if it can be done |
| 12023 | +(define_peephole2 |
| 12024 | + [(set (match_operand:SI 0 "register_operand" "") |
| 12025 | + (mult:SI |
| 12026 | + (sign_extend:SI (match_operand 1 "general_operand" "")) |
| 12027 | + (sign_extend:SI (match_operand 2 "general_operand" "")))) |
| 12028 | + (set (match_dup 0) |
| 12029 | + (match_operator:SI 3 "alu_operator" [(match_dup 0) |
| 12030 | + (match_operand 4 "general_operand" "")]))] |
| 12031 | + "peep2_reg_dead_p(1, operands[2])" |
| 12032 | + [(set (match_dup 5) |
| 12033 | + (mult:SI |
| 12034 | + (sign_extend:SI (match_dup 1)) |
| 12035 | + (sign_extend:SI (match_dup 2)))) |
| 12036 | + (set (match_dup 0) |
| 12037 | + (match_op_dup 3 [(match_dup 5) |
| 12038 | + (match_dup 4)]))] |
| 12039 | + "{operands[5] = gen_rtx_REG(SImode, REGNO(operands[2]));}" |
| 12040 | + ) |
| 12041 | + |
| 12042 | + |
| 12043 | + |
| 12044 | +;;============================================================================= |
| 12045 | +;; DSP instructions |
| 12046 | +;;============================================================================= |
| 12047 | +(define_insn "mulsathh_h" |
| 12048 | + [(set (match_operand:HI 0 "register_operand" "=r") |
| 12049 | + (ss_truncate:HI (ashiftrt:SI (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "%r")) |
| 12050 | + (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))) |
| 12051 | + (const_int 15))))] |
| 12052 | + "TARGET_DSP" |
| 12053 | + "mulsathh.h\t%0, %1:b, %2:b" |
| 12054 | + [(set_attr "length" "4") |
| 12055 | + (set_attr "cc" "none") |
| 12056 | + (set_attr "type" "mulhh")]) |
| 12057 | + |
| 12058 | +(define_insn "mulsatrndhh_h" |
| 12059 | + [(set (match_operand:HI 0 "register_operand" "=r") |
| 12060 | + (ss_truncate:HI (ashiftrt:SI |
| 12061 | + (plus:SI (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "%r")) |
| 12062 | + (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))) |
| 12063 | + (const_int 1073741824)) |
| 12064 | + (const_int 15))))] |
| 12065 | + "TARGET_DSP" |
| 12066 | + "mulsatrndhh.h\t%0, %1:b, %2:b" |
| 12067 | + [(set_attr "length" "4") |
| 12068 | + (set_attr "cc" "none") |
| 12069 | + (set_attr "type" "mulhh")]) |
| 12070 | + |
| 12071 | +(define_insn "mulsathh_w" |
| 12072 | + [(set (match_operand:SI 0 "register_operand" "=r") |
| 12073 | + (ss_truncate:SI (ashift:DI (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r")) |
| 12074 | + (sign_extend:DI (match_operand:HI 2 "register_operand" "r"))) |
| 12075 | + (const_int 1))))] |
| 12076 | + "TARGET_DSP" |
| 12077 | + "mulsathh.w\t%0, %1:b, %2:b" |
| 12078 | + [(set_attr "length" "4") |
| 12079 | + (set_attr "cc" "none") |
| 12080 | + (set_attr "type" "mulhh")]) |
| 12081 | + |
| 12082 | +(define_insn "mulsatwh_w" |
| 12083 | + [(set (match_operand:SI 0 "register_operand" "=r") |
| 12084 | + (ss_truncate:SI (ashiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r")) |
| 12085 | + (sign_extend:DI (match_operand:HI 2 "register_operand" "r"))) |
| 12086 | + (const_int 15))))] |
| 12087 | + "TARGET_DSP" |
| 12088 | + "mulsatwh.w\t%0, %1, %2:b" |
| 12089 | + [(set_attr "length" "4") |
| 12090 | + (set_attr "cc" "none") |
| 12091 | + (set_attr "type" "mulwh")]) |
| 12092 | + |
| 12093 | +(define_insn "mulsatrndwh_w" |
| 12094 | + [(set (match_operand:SI 0 "register_operand" "=r") |
| 12095 | + (ss_truncate:SI (ashiftrt:DI (plus:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r")) |
| 12096 | + (sign_extend:DI (match_operand:HI 2 "register_operand" "r"))) |
| 12097 | + (const_int 1073741824)) |
| 12098 | + (const_int 15))))] |
| 12099 | + "TARGET_DSP" |
| 12100 | + "mulsatrndwh.w\t%0, %1, %2:b" |
| 12101 | + [(set_attr "length" "4") |
| 12102 | + (set_attr "cc" "none") |
| 12103 | + (set_attr "type" "mulwh")]) |
| 12104 | + |
| 12105 | +(define_insn "macsathh_w" |
| 12106 | + [(set (match_operand:SI 0 "register_operand" "+r") |
| 12107 | + (plus:SI (match_dup 0) |
| 12108 | + (ss_truncate:SI (ashift:DI (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r")) |
| 12109 | + (sign_extend:DI (match_operand:HI 2 "register_operand" "r"))) |
| 12110 | + (const_int 1)))))] |
| 12111 | + "TARGET_DSP" |
| 12112 | + "macsathh.w\t%0, %1:b, %2:b" |
| 12113 | + [(set_attr "length" "4") |
| 12114 | + (set_attr "cc" "none") |
| 12115 | + (set_attr "type" "mulhh")]) |
| 12116 | + |
| 12117 | + |
| 12118 | +(define_insn "mulwh_d" |
| 12119 | + [(set (match_operand:DI 0 "register_operand" "=r") |
| 12120 | + (ashift:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r")) |
| 12121 | + (sign_extend:DI (match_operand:HI 2 "register_operand" "r"))) |
| 12122 | + (const_int 16)))] |
| 12123 | + "TARGET_DSP" |
| 12124 | + "mulwh.d\t%0, %1, %2:b" |
| 12125 | + [(set_attr "length" "4") |
| 12126 | + (set_attr "cc" "none") |
| 12127 | + (set_attr "type" "mulwh")]) |
| 12128 | + |
| 12129 | + |
| 12130 | +(define_insn "mulnwh_d" |
| 12131 | + [(set (match_operand:DI 0 "register_operand" "=r") |
| 12132 | + (ashift:DI (mult:DI (not:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))) |
| 12133 | + (sign_extend:DI (match_operand:HI 2 "register_operand" "r"))) |
| 12134 | + (const_int 16)))] |
| 12135 | + "TARGET_DSP" |
| 12136 | + "mulnwh.d\t%0, %1, %2:b" |
| 12137 | + [(set_attr "length" "4") |
| 12138 | + (set_attr "cc" "none") |
| 12139 | + (set_attr "type" "mulwh")]) |
| 12140 | + |
| 12141 | +(define_insn "macwh_d" |
| 12142 | + [(set (match_operand:DI 0 "register_operand" "+r") |
| 12143 | + (plus:DI (match_dup 0) |
| 12144 | + (ashift:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "%r")) |
| 12145 | + (sign_extend:DI (match_operand:HI 2 "register_operand" "r"))) |
| 12146 | + (const_int 16))))] |
| 12147 | + "TARGET_DSP" |
| 12148 | + "macwh.d\t%0, %1, %2:b" |
| 12149 | + [(set_attr "length" "4") |
| 12150 | + (set_attr "cc" "none") |
| 12151 | + (set_attr "type" "mulwh")]) |
| 12152 | + |
| 12153 | +(define_insn "machh_d" |
| 12154 | + [(set (match_operand:DI 0 "register_operand" "+r") |
| 12155 | + (plus:DI (match_dup 0) |
| 12156 | + (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r")) |
| 12157 | + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))))] |
| 12158 | + "TARGET_DSP" |
| 12159 | + "machh.d\t%0, %1:b, %2:b" |
| 12160 | + [(set_attr "length" "4") |
| 12161 | + (set_attr "cc" "none") |
| 12162 | + (set_attr "type" "mulwh")]) |
| 12163 | + |
| 12164 | +(define_insn "satadd_w" |
| 12165 | + [(set (match_operand:SI 0 "register_operand" "=r") |
| 12166 | + (ss_plus:SI (match_operand:SI 1 "register_operand" "r") |
| 12167 | + (match_operand:SI 2 "register_operand" "r")))] |
| 12168 | + "TARGET_DSP" |
| 12169 | + "satadd.w\t%0, %1, %2" |
| 12170 | + [(set_attr "length" "4") |
| 12171 | + (set_attr "cc" "none") |
| 12172 | + (set_attr "type" "alu_sat")]) |
| 12173 | + |
| 12174 | +(define_insn "satsub_w" |
| 12175 | + [(set (match_operand:SI 0 "register_operand" "=r") |
| 12176 | + (ss_minus:SI (match_operand:SI 1 "register_operand" "r") |
| 12177 | + (match_operand:SI 2 "register_operand" "r")))] |
| 12178 | + "TARGET_DSP" |
| 12179 | + "satsub.w\t%0, %1, %2" |
| 12180 | + [(set_attr "length" "4") |
| 12181 | + (set_attr "cc" "none") |
| 12182 | + (set_attr "type" "alu_sat")]) |
| 12183 | + |
| 12184 | +(define_insn "satadd_h" |
| 12185 | + [(set (match_operand:HI 0 "register_operand" "=r") |
| 12186 | + (ss_plus:HI (match_operand:HI 1 "register_operand" "r") |
| 12187 | + (match_operand:HI 2 "register_operand" "r")))] |
| 12188 | + "TARGET_DSP" |
| 12189 | + "satadd.h\t%0, %1, %2" |
| 12190 | + [(set_attr "length" "4") |
| 12191 | + (set_attr "cc" "none") |
| 12192 | + (set_attr "type" "alu_sat")]) |
| 12193 | + |
| 12194 | +(define_insn "satsub_h" |
| 12195 | + [(set (match_operand:HI 0 "register_operand" "=r") |
| 12196 | + (ss_minus:HI (match_operand:HI 1 "register_operand" "r") |
| 12197 | + (match_operand:HI 2 "register_operand" "r")))] |
| 12198 | + "TARGET_DSP" |
| 12199 | + "satsub.h\t%0, %1, %2" |
| 12200 | + [(set_attr "length" "4") |
| 12201 | + (set_attr "cc" "none") |
| 12202 | + (set_attr "type" "alu_sat")]) |
| 12203 | + |
| 12204 | + |
| 12205 | +;;============================================================================= |
| 12206 | +;; smin |
| 12207 | +;;----------------------------------------------------------------------------- |
| 12208 | +;; Set reg0 to the smallest value of reg1 and reg2. It is used for signed |
| 12209 | +;; values in the registers. |
| 12210 | +;;============================================================================= |
| 12211 | +(define_insn "sminsi3" |
| 12212 | + [(set (match_operand:SI 0 "register_operand" "=r") |
| 12213 | + (smin:SI (match_operand:SI 1 "register_operand" "r") |
| 12214 | + (match_operand:SI 2 "register_operand" "r")))] |
| 12215 | + "" |
| 12216 | + "min %0, %1, %2" |
| 12217 | + [(set_attr "length" "4") |
| 12218 | + (set_attr "cc" "none")]) |
| 12219 | + |
| 12220 | +;;============================================================================= |
| 12221 | +;; smax |
| 12222 | +;;----------------------------------------------------------------------------- |
| 12223 | +;; Set reg0 to the largest value of reg1 and reg2. It is used for signed |
| 12224 | +;; values in the registers. |
| 12225 | +;;============================================================================= |
| 12226 | +(define_insn "smaxsi3" |
| 12227 | + [(set (match_operand:SI 0 "register_operand" "=r") |
| 12228 | + (smax:SI (match_operand:SI 1 "register_operand" "r") |
| 12229 | + (match_operand:SI 2 "register_operand" "r")))] |
| 12230 | + "" |
| 12231 | + "max %0, %1, %2" |
| 12232 | + [(set_attr "length" "4") |
| 12233 | + (set_attr "cc" "none")]) |
| 12234 | + |
| 12235 | + |
| 12236 | +;;============================================================================= |
| 12237 | +;; Logical operations |
| 12238 | +;;----------------------------------------------------------------------------- |
| 12239 | + |
| 12240 | +;; Split up simple DImode logical operations. Simply perform the logical |
| 12241 | +;; operation on the upper and lower halves of the registers. |
| 12242 | +(define_split |
| 12243 | + [(set (match_operand:DI 0 "register_operand" "") |
| 12244 | + (match_operator:DI 6 "logical_binary_operator" |
| 12245 | + [(match_operand:DI 1 "register_operand" "") |
| 12246 | + (match_operand:DI 2 "register_operand" "")]))] |
| 12247 | + "reload_completed" |
| 12248 | + [(set (match_dup 0) (match_op_dup:SI 6 [(match_dup 1) (match_dup 2)])) |
| 12249 | + (set (match_dup 3) (match_op_dup:SI 6 [(match_dup 4) (match_dup 5)]))] |
| 12250 | + " |
| 12251 | + { |
| 12252 | + operands[3] = gen_highpart (SImode, operands[0]); |
| 12253 | + operands[0] = gen_lowpart (SImode, operands[0]); |
| 12254 | + operands[4] = gen_highpart (SImode, operands[1]); |
| 12255 | + operands[1] = gen_lowpart (SImode, operands[1]); |
| 12256 | + operands[5] = gen_highpart (SImode, operands[2]); |
| 12257 | + operands[2] = gen_lowpart (SImode, operands[2]); |
| 12258 | + }" |
| 12259 | +) |
| 12260 | + |
| 12261 | +;;============================================================================= |
| 12262 | +;; Logical operations with shifted operand |
| 12263 | +;;============================================================================= |
| 12264 | +(define_insn "<code>si_lshift" |
| 12265 | + [(set (match_operand:SI 0 "register_operand" "=r") |
| 12266 | + (logical:SI (match_operator:SI 4 "logical_shift_operator" |
| 12267 | + [(match_operand:SI 2 "register_operand" "r") |
| 12268 | + (match_operand:SI 3 "immediate_operand" "Ku05")]) |
| 12269 | + (match_operand:SI 1 "register_operand" "r")))] |
| 12270 | + "" |
| 12271 | + { |
| 12272 | + if ( GET_CODE(operands[4]) == ASHIFT ) |
| 12273 | + return "<logical_insn>\t%0, %1, %2 << %3"; |
| 12274 | + else |
| 12275 | + return "<logical_insn>\t%0, %1, %2 >> %3"; |
| 12276 | + } |
| 12277 | + |
| 12278 | + [(set_attr "cc" "set_z")] |
| 12279 | +) |
| 12280 | + |
| 12281 | + |
| 12282 | +;;************************************************ |
| 12283 | +;; Peepholes for detecting logical operantions |
| 12284 | +;; with shifted operands |
| 12285 | +;;************************************************ |
| 12286 | + |
| 12287 | +(define_peephole |
| 12288 | + [(set (match_operand:SI 3 "register_operand" "") |
| 12289 | + (match_operator:SI 5 "logical_shift_operator" |
| 12290 | + [(match_operand:SI 1 "register_operand" "") |
| 12291 | + (match_operand:SI 2 "immediate_operand" "")])) |
| 12292 | + (set (match_operand:SI 0 "register_operand" "") |
| 12293 | + (logical:SI (match_operand:SI 4 "register_operand" "") |
| 12294 | + (match_dup 3)))] |
| 12295 | + "(dead_or_set_p(insn, operands[3])) || (REGNO(operands[3]) == REGNO(operands[0]))" |
| 12296 | + { |
| 12297 | + if ( GET_CODE(operands[5]) == ASHIFT ) |
| 12298 | + return "<logical_insn>\t%0, %4, %1 << %2"; |
| 12299 | + else |
| 12300 | + return "<logical_insn>\t%0, %4, %1 >> %2"; |
| 12301 | + } |
| 12302 | + [(set_attr "cc" "set_z")] |
| 12303 | + ) |
| 12304 | + |
| 12305 | +(define_peephole |
| 12306 | + [(set (match_operand:SI 3 "register_operand" "") |
| 12307 | + (match_operator:SI 5 "logical_shift_operator" |
| 12308 | + [(match_operand:SI 1 "register_operand" "") |
| 12309 | + (match_operand:SI 2 "immediate_operand" "")])) |
| 12310 | + (set (match_operand:SI 0 "register_operand" "") |
| 12311 | + (logical:SI (match_dup 3) |
| 12312 | + (match_operand:SI 4 "register_operand" "")))] |
| 12313 | + "(dead_or_set_p(insn, operands[3])) || (REGNO(operands[3]) == REGNO(operands[0]))" |
| 12314 | + { |
| 12315 | + if ( GET_CODE(operands[5]) == ASHIFT ) |
| 12316 | + return "<logical_insn>\t%0, %4, %1 << %2"; |
| 12317 | + else |
| 12318 | + return "<logical_insn>\t%0, %4, %1 >> %2"; |
| 12319 | + } |
| 12320 | + [(set_attr "cc" "set_z")] |
| 12321 | + ) |
| 12322 | + |
| 12323 | + |
| 12324 | +(define_peephole2 |
| 12325 | + [(set (match_operand:SI 0 "register_operand" "") |
| 12326 | + (match_operator:SI 5 "logical_shift_operator" |
| 12327 | + [(match_operand:SI 1 "register_operand" "") |
| 12328 | + (match_operand:SI 2 "immediate_operand" "")])) |
| 12329 | + (set (match_operand:SI 3 "register_operand" "") |
| 12330 | + (logical:SI (match_operand:SI 4 "register_operand" "") |
| 12331 | + (match_dup 0)))] |
| 12332 | + "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[3]) == REGNO(operands[0]))" |
| 12333 | + |
| 12334 | + [(set (match_dup 3) |
| 12335 | + (logical:SI (match_op_dup:SI 5 [(match_dup 1) (match_dup 2)]) |
| 12336 | + (match_dup 4)))] |
| 12337 | + |
| 12338 | + "" |
| 12339 | +) |
| 12340 | + |
| 12341 | +(define_peephole2 |
| 12342 | + [(set (match_operand:SI 0 "register_operand" "") |
| 12343 | + (match_operator:SI 5 "logical_shift_operator" |
| 12344 | + [(match_operand:SI 1 "register_operand" "") |
| 12345 | + (match_operand:SI 2 "immediate_operand" "")])) |
| 12346 | + (set (match_operand:SI 3 "register_operand" "") |
| 12347 | + (logical:SI (match_dup 0) |
| 12348 | + (match_operand:SI 4 "register_operand" "")))] |
| 12349 | + "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[3]) == REGNO(operands[0]))" |
| 12350 | + |
| 12351 | + [(set (match_dup 3) |
| 12352 | + (logical:SI (match_op_dup:SI 5 [(match_dup 1) (match_dup 2)]) |
| 12353 | + (match_dup 4)))] |
| 12354 | + |
| 12355 | + "" |
| 12356 | +) |
| 12357 | + |
| 12358 | + |
| 12359 | +;;============================================================================= |
| 12360 | +;; and |
| 12361 | +;;----------------------------------------------------------------------------- |
| 12362 | +;; Store the result after a bitwise logical-and between reg0 and reg2 in reg0. |
| 12363 | +;;============================================================================= |
| 12364 | + |
| 12365 | +(define_insn "andnsi" |
| 12366 | + [(set (match_operand:SI 0 "register_operand" "=r") |
| 12367 | + (and:SI (match_operand:SI 1 "register_operand" "0") |
| 12368 | + (not:SI (match_operand:SI 2 "register_operand" "r"))))] |
| 12369 | + "" |
| 12370 | + "andn %0, %2" |
| 12371 | + [(set_attr "cc" "set_z") |
| 12372 | + (set_attr "length" "2")] |
| 12373 | +) |
| 12374 | + |
| 12375 | + |
| 12376 | + |
| 12377 | + |
| 12378 | +(define_insn "andsi3" |
| 12379 | + [(set (match_operand:SI 0 "register_operand" "=r, r, r, r") |
| 12380 | + (and:SI (match_operand:SI 1 "register_operand" "%0, r, 0, r") |
| 12381 | + (match_operand:SI 2 "nonmemory_operand" "r, M, i, r")))] |
| 12382 | + "" |
| 12383 | + { |
| 12384 | + switch (which_alternative){ |
| 12385 | + case 0: |
| 12386 | + return "and\t%0, %2"; |
| 12387 | + case 1: |
| 12388 | + { |
| 12389 | + int i, first_set = -1; |
| 12390 | + /* Search for first bit set in mask */ |
| 12391 | + for ( i = 31; i >= 0; --i ) |
| 12392 | + if ( INTVAL(operands[2]) & (1 << i) ){ |
| 12393 | + first_set = i; |
| 12394 | + break; |
| 12395 | + } |
| 12396 | + operands[2] = gen_rtx_CONST_INT(SImode, first_set + 1); |
| 12397 | + return "bfextu\t%0, %1, 0, %2"; |
| 12398 | + } |
| 12399 | + case 2: |
| 12400 | + if ( one_bit_cleared_operand(operands[2], VOIDmode) ){ |
| 12401 | + int bitpos; |
| 12402 | + for ( bitpos = 0; bitpos < 32; bitpos++ ) |
| 12403 | + if ( !(INTVAL(operands[2]) & (1 << bitpos)) ) |
| 12404 | + break; |
| 12405 | + operands[2] = gen_rtx_CONST_INT(SImode, bitpos); |
| 12406 | + return "cbr\t%0, %2"; |
| 12407 | + } else if ( (INTVAL(operands[2]) >= 0) && |
| 12408 | + (INTVAL(operands[2]) <= 65535) ) |
| 12409 | + return "andl\t%0, %2, COH"; |
| 12410 | + else if ( (INTVAL(operands[2]) < 0) && |
| 12411 | + (INTVAL(operands[2]) >= -65536 ) ) |
| 12412 | + return "andl\t%0, lo(%2)"; |
| 12413 | + else if ( ((INTVAL(operands[2]) & 0xffff) == 0xffff) ) |
| 12414 | + return "andh\t%0, hi(%2)"; |
| 12415 | + else if ( ((INTVAL(operands[2]) & 0xffff) == 0x0) ) |
| 12416 | + return "andh\t%0, hi(%2), COH"; |
| 12417 | + else |
| 12418 | + return "andh\t%0, hi(%2)\;andl\t%0, lo(%2)"; |
| 12419 | + case 3: |
| 12420 | + return "and\t%0, %1, %2"; |
| 12421 | + default: |
| 12422 | + abort(); |
| 12423 | + } |
| 12424 | + } |
| 12425 | + |
| 12426 | + [(set_attr "length" "2,4,8,4") |
| 12427 | + (set_attr "cc" "set_z")]) |
| 12428 | + |
| 12429 | + |
| 12430 | +(define_insn "anddi3" |
| 12431 | + [(set (match_operand:DI 0 "register_operand" "=&r,&r") |
| 12432 | + (and:DI (match_operand:DI 1 "register_operand" "%0,r") |
| 12433 | + (match_operand:DI 2 "register_operand" "r,r")))] |
| 12434 | + "" |
| 12435 | + "#" |
| 12436 | + [(set_attr "length" "8") |
| 12437 | + (set_attr "cc" "clobber")] |
| 12438 | +) |
| 12439 | + |
| 12440 | +;;============================================================================= |
| 12441 | +;; or |
| 12442 | +;;----------------------------------------------------------------------------- |
| 12443 | +;; Store the result after a bitwise inclusive-or between reg0 and reg2 in reg0. |
| 12444 | +;;============================================================================= |
| 12445 | + |
| 12446 | +(define_insn "iorsi3" |
| 12447 | + [(set (match_operand:SI 0 "register_operand" "=r,r,r") |
| 12448 | + (ior:SI (match_operand:SI 1 "register_operand" "%0,0,r" ) |
| 12449 | + (match_operand:SI 2 "nonmemory_operand" "r ,i,r")))] |
| 12450 | + "" |
| 12451 | + { |
| 12452 | + switch (which_alternative){ |
| 12453 | + case 0: |
| 12454 | + return "or\t%0, %2"; |
| 12455 | + case 1: |
| 12456 | + if ( one_bit_set_operand(operands[2], VOIDmode) ){ |
| 12457 | + int bitpos; |
| 12458 | + for (bitpos = 0; bitpos < 32; bitpos++) |
| 12459 | + if (INTVAL(operands[2]) & (1 << bitpos)) |
| 12460 | + break; |
| 12461 | + operands[2] = gen_rtx_CONST_INT( SImode, bitpos); |
| 12462 | + return "sbr\t%0, %2"; |
| 12463 | + } else if ( (INTVAL(operands[2]) >= 0) && |
| 12464 | + (INTVAL(operands[2]) <= 65535) ) |
| 12465 | + return "orl\t%0, %2"; |
| 12466 | + else if ( ((INTVAL(operands[2]) & 0xffff) == 0x0) ) |
| 12467 | + return "orh\t%0, hi(%2)"; |
| 12468 | + else |
| 12469 | + return "orh\t%0, hi(%2)\;orl\t%0, lo(%2)"; |
| 12470 | + case 2: |
| 12471 | + return "or\t%0, %1, %2"; |
| 12472 | + default: |
| 12473 | + abort(); |
| 12474 | + } |
| 12475 | + } |
| 12476 | + [(set_attr "length" "2,8,4") |
| 12477 | + (set_attr "cc" "set_z")]) |
| 12478 | + |
| 12479 | + |
| 12480 | +;(define_insn "iorsi3" |
| 12481 | +; [(set (match_operand:SI 0 "register_operand" "=r, r, r") |
| 12482 | +; (ior:SI (match_operand:SI 1 "avr32_logical_insn_operand" "r, r, rA" ) |
| 12483 | +; (match_operand:SI 2 "register_operand" "0, i, r")))] |
| 12484 | +; "" |
| 12485 | +; { |
| 12486 | +; switch (which_alternative){ |
| 12487 | +; case 0: |
| 12488 | +; return "or %0, %2"; |
| 12489 | +; case 1: |
| 12490 | +; if ( one_bit_set_operand(operands[2], VOIDmode) ){ |
| 12491 | +; int i, bitpos; |
| 12492 | +; for ( i = 0; i < 32; i++ ) |
| 12493 | +; if ( INTVAL(operands[2]) & (1 << i) ){ |
| 12494 | +; bitpos = i; |
| 12495 | +; break; |
| 12496 | +; } |
| 12497 | +; operands[2] = gen_rtx_CONST_INT( SImode, bitpos); |
| 12498 | +; return "sbr %0, %2"; |
| 12499 | +; } else if ( (INTVAL(operands[2]) >= 0) && |
| 12500 | +; (INTVAL(operands[2]) <= 65535) ) |
| 12501 | +; return "orl %0, %2"; |
| 12502 | +; else if ( ((INTVAL(operands[2]) & 0xffff) == 0x0) ) |
| 12503 | +; return "orh %0, hi(%2)"; |
| 12504 | +; else |
| 12505 | +; return "orh %0, hi(%2)\;orl %0, lo(%2)"; |
| 12506 | +; case 2: |
| 12507 | +; return "or %0, %2, %1"; |
| 12508 | +; } |
| 12509 | +; } |
| 12510 | +; [(set_attr "length" "2,8,4") |
| 12511 | +; (set_attr "cc" "set_z")]) |
| 12512 | + |
| 12513 | +(define_insn "iordi3" |
| 12514 | + [(set (match_operand:DI 0 "register_operand" "=&r,&r") |
| 12515 | + (ior:DI (match_operand:DI 1 "register_operand" "%0,r") |
| 12516 | + (match_operand:DI 2 "register_operand" "r,r")))] |
| 12517 | + "" |
| 12518 | + "#" |
| 12519 | + [(set_attr "length" "8") |
| 12520 | + (set_attr "cc" "clobber")] |
| 12521 | +) |
| 12522 | + |
| 12523 | +;;============================================================================= |
| 12524 | +;; xor bytes |
| 12525 | +;;----------------------------------------------------------------------------- |
| 12526 | +;; Store the result after a bitwise exclusive-or between reg0 and reg2 in reg0. |
| 12527 | +;;============================================================================= |
| 12528 | + |
| 12529 | +(define_insn "xorsi3" |
| 12530 | + [(set (match_operand:SI 0 "register_operand" "=r,r,r") |
| 12531 | + (xor:SI (match_operand:SI 1 "register_operand" "0,0,r") |
| 12532 | + (match_operand:SI 2 "nonmemory_operand" "r,i,r")))] |
| 12533 | + "" |
| 12534 | + { |
| 12535 | + switch (which_alternative){ |
| 12536 | + case 0: |
| 12537 | + return "eor %0, %2"; |
| 12538 | + case 1: |
| 12539 | + if ( (INTVAL(operands[2]) >= 0) && |
| 12540 | + (INTVAL(operands[2]) <= 65535) ) |
| 12541 | + return "eorl %0, %2"; |
| 12542 | + else if ( ((INTVAL(operands[2]) & 0xffff) == 0x0) ) |
| 12543 | + return "eorh %0, hi(%2)"; |
| 12544 | + else |
| 12545 | + return "eorh %0, hi(%2)\;eorl %0, lo(%2)"; |
| 12546 | + case 2: |
| 12547 | + return "eor %0, %1, %2"; |
| 12548 | + default: |
| 12549 | + abort(); |
| 12550 | + } |
| 12551 | + } |
| 12552 | + |
| 12553 | + [(set_attr "length" "2,8,4") |
| 12554 | + (set_attr "cc" "set_z")]) |
| 12555 | + |
| 12556 | +(define_insn "xordi3" |
| 12557 | + [(set (match_operand:DI 0 "register_operand" "=&r,&r") |
| 12558 | + (xor:DI (match_operand:DI 1 "register_operand" "%0,r") |
| 12559 | + (match_operand:DI 2 "register_operand" "r,r")))] |
| 12560 | + "" |
| 12561 | + "#" |
| 12562 | + [(set_attr "length" "8") |
| 12563 | + (set_attr "cc" "clobber")] |
| 12564 | +) |
| 12565 | + |
| 12566 | +;;============================================================================= |
| 12567 | +;; divmod |
| 12568 | +;;----------------------------------------------------------------------------- |
| 12569 | +;; Signed division that produces both a quotient and a remainder. |
| 12570 | +;;============================================================================= |
| 12571 | +(define_expand "divmodsi4" |
| 12572 | + [(parallel [ |
| 12573 | + (parallel [ |
| 12574 | + (set (match_operand:SI 0 "register_operand" "=r") |
| 12575 | + (div:SI (match_operand:SI 1 "register_operand" "r") |
| 12576 | + (match_operand:SI 2 "register_operand" "r"))) |
| 12577 | + (set (match_operand:SI 3 "register_operand" "=r") |
| 12578 | + (mod:SI (match_dup 1) |
| 12579 | + (match_dup 2)))]) |
| 12580 | + (use (match_dup 4))])] |
| 12581 | + "" |
| 12582 | + { |
| 12583 | + if (! no_new_pseudos) { |
| 12584 | + operands[4] = gen_reg_rtx (DImode); |
| 12585 | + |
| 12586 | + emit_insn(gen_divmodsi4_internal(operands[4],operands[1],operands[2])); |
| 12587 | + emit_move_insn(operands[0], gen_rtx_SUBREG( SImode, operands[4], 4)); |
| 12588 | + emit_move_insn(operands[3], gen_rtx_SUBREG( SImode, operands[4], 0)); |
| 12589 | + |
| 12590 | + DONE; |
| 12591 | + } else { |
| 12592 | + FAIL; |
| 12593 | + } |
| 12594 | + |
| 12595 | + }) |
| 12596 | + |
| 12597 | + |
| 12598 | +(define_insn "divmodsi4_internal" |
| 12599 | + [(set (match_operand:DI 0 "register_operand" "=r") |
| 12600 | + (unspec:DI [(match_operand:SI 1 "register_operand" "r") |
| 12601 | + (match_operand:SI 2 "register_operand" "r")] |
| 12602 | + UNSPEC_DIVMODSI4_INTERNAL))] |
| 12603 | + "" |
| 12604 | + "divs %0, %1, %2" |
| 12605 | + [(set_attr "type" "div") |
| 12606 | + (set_attr "cc" "none")]) |
| 12607 | + |
| 12608 | + |
| 12609 | +;;============================================================================= |
| 12610 | +;; udivmod |
| 12611 | +;;----------------------------------------------------------------------------- |
| 12612 | +;; Unsigned division that produces both a quotient and a remainder. |
| 12613 | +;;============================================================================= |
| 12614 | +(define_expand "udivmodsi4" |
| 12615 | + [(parallel [ |
| 12616 | + (parallel [ |
| 12617 | + (set (match_operand:SI 0 "register_operand" "=r") |
| 12618 | + (udiv:SI (match_operand:SI 1 "register_operand" "r") |
| 12619 | + (match_operand:SI 2 "register_operand" "r"))) |
| 12620 | + (set (match_operand:SI 3 "register_operand" "=r") |
| 12621 | + (umod:SI (match_dup 1) |
| 12622 | + (match_dup 2)))]) |
| 12623 | + (use (match_dup 4))])] |
| 12624 | + "" |
| 12625 | + { |
| 12626 | + if (! no_new_pseudos) { |
| 12627 | + operands[4] = gen_reg_rtx (DImode); |
| 12628 | + |
| 12629 | + emit_insn(gen_udivmodsi4_internal(operands[4],operands[1],operands[2])); |
| 12630 | + emit_move_insn(operands[0], gen_rtx_SUBREG( SImode, operands[4], 4)); |
| 12631 | + emit_move_insn(operands[3], gen_rtx_SUBREG( SImode, operands[4], 0)); |
| 12632 | + |
| 12633 | + DONE; |
| 12634 | + } else { |
| 12635 | + FAIL; |
| 12636 | + } |
| 12637 | + }) |
| 12638 | + |
| 12639 | +(define_insn "udivmodsi4_internal" |
| 12640 | + [(set (match_operand:DI 0 "register_operand" "=r") |
| 12641 | + (unspec:DI [(match_operand:SI 1 "register_operand" "r") |
| 12642 | + (match_operand:SI 2 "register_operand" "r")] |
| 12643 | + UNSPEC_UDIVMODSI4_INTERNAL))] |
| 12644 | + "" |
| 12645 | + "divu %0, %1, %2" |
| 12646 | + [(set_attr "type" "div") |
| 12647 | + (set_attr "cc" "none")]) |
| 12648 | + |
| 12649 | + |
| 12650 | +;;============================================================================= |
| 12651 | +;; Arithmetic-shift left |
| 12652 | +;;----------------------------------------------------------------------------- |
| 12653 | +;; Arithmetic-shift reg0 left by reg2 or immediate value. |
| 12654 | +;;============================================================================= |
| 12655 | + |
| 12656 | +(define_insn "ashlsi3" |
| 12657 | + [(set (match_operand:SI 0 "register_operand" "=r,r,r") |
| 12658 | + (ashift:SI (match_operand:SI 1 "register_operand" "r,0,r") |
| 12659 | + (match_operand:SI 2 "nonmemory_operand" "r,Ku05,Ku05")))] |
| 12660 | + "" |
| 12661 | + "@ |
| 12662 | + lsl %0, %1, %2 |
| 12663 | + lsl %0, %2 |
| 12664 | + lsl %0, %1, %2" |
| 12665 | + [(set_attr "length" "4,2,4") |
| 12666 | + (set_attr "cc" "set_ncz")]) |
| 12667 | + |
| 12668 | +;;============================================================================= |
| 12669 | +;; Arithmetic-shift right |
| 12670 | +;;----------------------------------------------------------------------------- |
| 12671 | +;; Arithmetic-shift reg0 right by an immediate value. |
| 12672 | +;;============================================================================= |
| 12673 | + |
| 12674 | +(define_insn "ashrsi3" |
| 12675 | + [(set (match_operand:SI 0 "register_operand" "=r,r,r") |
| 12676 | + (ashiftrt:SI (match_operand:SI 1 "register_operand" "r,0,r") |
| 12677 | + (match_operand:SI 2 "nonmemory_operand" "r,Ku05,Ku05")))] |
| 12678 | + "" |
| 12679 | + "@ |
| 12680 | + asr %0, %1, %2 |
| 12681 | + asr %0, %2 |
| 12682 | + asr %0, %1, %2" |
| 12683 | + [(set_attr "length" "4,2,4") |
| 12684 | + (set_attr "cc" "set_ncz")]) |
| 12685 | + |
| 12686 | +;;============================================================================= |
| 12687 | +;; Logical shift right |
| 12688 | +;;----------------------------------------------------------------------------- |
| 12689 | +;; Logical shift reg0 right by an immediate value. |
| 12690 | +;;============================================================================= |
| 12691 | + |
| 12692 | +(define_insn "lshrsi3" |
| 12693 | + [(set (match_operand:SI 0 "register_operand" "=r,r,r") |
| 12694 | + (lshiftrt:SI (match_operand:SI 1 "register_operand" "r,0,r") |
| 12695 | + (match_operand:SI 2 "nonmemory_operand" "r,Ku05,Ku05")))] |
| 12696 | + "" |
| 12697 | + "@ |
| 12698 | + lsr %0, %1, %2 |
| 12699 | + lsr %0, %2 |
| 12700 | + lsr %0, %1, %2" |
| 12701 | + [(set_attr "length" "4,2,4") |
| 12702 | + (set_attr "cc" "set_ncz")]) |
| 12703 | + |
| 12704 | + |
| 12705 | +;;============================================================================= |
| 12706 | +;; neg |
| 12707 | +;;----------------------------------------------------------------------------- |
| 12708 | +;; Negate operand 1 and store the result in operand 0. |
| 12709 | +;;============================================================================= |
| 12710 | +(define_insn "negsi2" |
| 12711 | + [(set (match_operand:SI 0 "register_operand" "=r") |
| 12712 | + (neg:SI (match_operand:SI 1 "register_operand" "0")))] |
| 12713 | + "" |
| 12714 | + "neg %0" |
| 12715 | + [(set_attr "length" "2") |
| 12716 | + (set_attr "cc" "set_vncz")]) |
| 12717 | + |
| 12718 | +;;============================================================================= |
| 12719 | +;; abs |
| 12720 | +;;----------------------------------------------------------------------------- |
| 12721 | +;; Store the absolute value of operand 1 into operand 0. |
| 12722 | +;;============================================================================= |
| 12723 | +(define_insn "abssi2" |
| 12724 | + [(set (match_operand:SI 0 "register_operand" "=r") |
| 12725 | + (abs:SI (match_operand:SI 1 "register_operand" "0")))] |
| 12726 | + "" |
| 12727 | + "abs %0" |
| 12728 | + [(set_attr "length" "2") |
| 12729 | + (set_attr "cc" "set_z")]) |
| 12730 | + |
| 12731 | + |
| 12732 | +;;============================================================================= |
| 12733 | +;; one_cmpl |
| 12734 | +;;----------------------------------------------------------------------------- |
| 12735 | +;; Store the bitwise-complement of operand 1 into operand 0. |
| 12736 | +;;============================================================================= |
| 12737 | + |
| 12738 | +(define_insn "one_cmplsi2" |
| 12739 | + [(set (match_operand:SI 0 "register_operand" "=r,r") |
| 12740 | + (not:SI (match_operand:SI 1 "register_operand" "r,0")))] |
| 12741 | + "" |
| 12742 | + "@ |
| 12743 | + rsub %0, %1, -1 |
| 12744 | + com %0" |
| 12745 | + [(set_attr "length" "4,2") |
| 12746 | + (set_attr "cc" "set_z")]) |
| 12747 | + |
| 12748 | + |
| 12749 | +;;============================================================================= |
| 12750 | +;; Bit load |
| 12751 | +;;----------------------------------------------------------------------------- |
| 12752 | +;; Load a bit into Z and C flags |
| 12753 | +;;============================================================================= |
| 12754 | +(define_insn "bldsi" |
| 12755 | + [(set (cc0) |
| 12756 | + (and:SI (match_operand:SI 0 "register_operand" "r") |
| 12757 | + (match_operand:SI 1 "one_bit_set_operand" "i")))] |
| 12758 | + "" |
| 12759 | + "bld\t%0, %p1" |
| 12760 | + [(set_attr "length" "4") |
| 12761 | + (set_attr "cc" "bld")] |
| 12762 | + ) |
| 12763 | + |
| 12764 | + |
| 12765 | +;;============================================================================= |
| 12766 | +;; Compare |
| 12767 | +;;----------------------------------------------------------------------------- |
| 12768 | +;; Compare reg0 with reg1 or an immediate value. |
| 12769 | +;;============================================================================= |
| 12770 | + |
| 12771 | +(define_expand "cmpqi" |
| 12772 | + [(set (cc0) |
| 12773 | + (compare:QI |
| 12774 | + (match_operand:QI 0 "general_operand" "") |
| 12775 | + (match_operand:QI 1 "general_operand" "")))] |
| 12776 | + "" |
| 12777 | + "{ |
| 12778 | + |
| 12779 | + if ( GET_CODE(operands[0]) != REG |
| 12780 | + && GET_CODE(operands[0]) != SUBREG) |
| 12781 | + operands[0] = force_reg(QImode, operands[0]); |
| 12782 | + |
| 12783 | + |
| 12784 | + if ( GET_CODE(operands[1]) != REG |
| 12785 | + && GET_CODE(operands[1]) != SUBREG ) |
| 12786 | + operands[1] = force_reg(QImode, operands[1]); |
| 12787 | + |
| 12788 | + avr32_compare_op0 = operands[0]; |
| 12789 | + avr32_compare_op1 = operands[1]; |
| 12790 | + emit_insn(gen_cmpqi_internal(operands[0], operands[1])); |
| 12791 | + DONE; |
| 12792 | + }" |
| 12793 | +) |
| 12794 | + |
| 12795 | +(define_insn "cmpqi_internal" |
| 12796 | + [(set (cc0) |
| 12797 | + (compare:QI |
| 12798 | + (match_operand:QI 0 "register_operand" "r") |
| 12799 | + (match_operand:QI 1 "register_operand" "r")))] |
| 12800 | + "" |
| 12801 | + { |
| 12802 | + set_next_insn_cond(insn, |
| 12803 | + avr32_output_cmp(get_next_insn_cond(insn), QImode, operands[0], operands[1])); |
| 12804 | + return ""; |
| 12805 | + } |
| 12806 | + [(set_attr "length" "4") |
| 12807 | + (set_attr "cc" "compare")]) |
| 12808 | + |
| 12809 | +(define_expand "cmphi" |
| 12810 | + [(set (cc0) |
| 12811 | + (compare:HI |
| 12812 | + (match_operand:HI 0 "general_operand" "") |
| 12813 | + (match_operand:HI 1 "general_operand" "")))] |
| 12814 | + "" |
| 12815 | + "{ |
| 12816 | + if ( GET_CODE(operands[0]) != REG |
| 12817 | + && GET_CODE(operands[0]) != SUBREG ) |
| 12818 | + operands[0] = force_reg(HImode, operands[0]); |
| 12819 | + |
| 12820 | + |
| 12821 | + if ( GET_CODE(operands[1]) != REG |
| 12822 | + && GET_CODE(operands[1]) != SUBREG) |
| 12823 | + operands[1] = force_reg(HImode, operands[1]); |
| 12824 | + |
| 12825 | + avr32_compare_op0 = operands[0]; |
| 12826 | + avr32_compare_op1 = operands[1]; |
| 12827 | + emit_insn(gen_cmphi_internal(operands[0], operands[1])); |
| 12828 | + DONE; |
| 12829 | + }" |
| 12830 | +) |
| 12831 | + |
| 12832 | + |
| 12833 | +(define_insn "cmphi_internal" |
| 12834 | + [(set (cc0) |
| 12835 | + (compare:HI |
| 12836 | + (match_operand:HI 0 "register_operand" "r") |
| 12837 | + (match_operand:HI 1 "register_operand" "r")))] |
| 12838 | + "" |
| 12839 | + { |
| 12840 | + set_next_insn_cond(insn, |
| 12841 | + avr32_output_cmp(get_next_insn_cond(insn), HImode, operands[0], operands[1])); |
| 12842 | + return ""; |
| 12843 | + } |
| 12844 | + [(set_attr "length" "4") |
| 12845 | + (set_attr "cc" "compare")]) |
| 12846 | + |
| 12847 | + |
| 12848 | +(define_expand "cmpsi" |
| 12849 | + [(set (cc0) |
| 12850 | + (compare:SI |
| 12851 | + (match_operand:SI 0 "general_operand" "") |
| 12852 | + (match_operand:SI 1 "general_operand" "")))] |
| 12853 | + "" |
| 12854 | + "{ |
| 12855 | + if ( GET_CODE(operands[0]) != REG |
| 12856 | + && GET_CODE(operands[0]) != SUBREG ) |
| 12857 | + operands[0] = force_reg(SImode, operands[0]); |
| 12858 | + |
| 12859 | + if ( GET_CODE(operands[1]) != REG |
| 12860 | + && GET_CODE(operands[1]) != SUBREG |
| 12861 | + && GET_CODE(operands[1]) != CONST_INT ) |
| 12862 | + operands[1] = force_reg(SImode, operands[1]); |
| 12863 | + |
| 12864 | + avr32_compare_op0 = operands[0]; |
| 12865 | + avr32_compare_op1 = operands[1]; |
| 12866 | + |
| 12867 | + |
| 12868 | + emit_insn(gen_cmpsi_internal(operands[0], operands[1])); |
| 12869 | + DONE; |
| 12870 | + }" |
| 12871 | +) |
| 12872 | + |
| 12873 | + |
| 12874 | + |
| 12875 | + |
| 12876 | +(define_insn "cmpsi_internal" |
| 12877 | + [(set (cc0) |
| 12878 | + (compare:SI |
| 12879 | + (match_operand:SI 0 "register_operand" "r, r, r") |
| 12880 | + (match_operand:SI 1 "nonmemory_operand" "r, Ks06, Ks21")))] |
| 12881 | + "" |
| 12882 | + { |
| 12883 | + set_next_insn_cond(insn, |
| 12884 | + avr32_output_cmp(get_next_insn_cond(insn), SImode, operands[0], operands[1])); |
| 12885 | + return ""; |
| 12886 | + } |
| 12887 | + |
| 12888 | + [(set_attr "length" "2,2,4") |
| 12889 | + (set_attr "cc" "compare")]) |
| 12890 | + |
| 12891 | + |
| 12892 | +(define_expand "cmpdi" |
| 12893 | + [(set (cc0) |
| 12894 | + (compare:DI |
| 12895 | + (match_operand:DI 0 "register_operand" "") |
| 12896 | + (match_operand:DI 1 "register_operand" "")))] |
| 12897 | + "" |
| 12898 | + { |
| 12899 | + avr32_compare_op0 = operands[0]; |
| 12900 | + avr32_compare_op1 = operands[1]; |
| 12901 | + emit_insn(gen_cmpdi_internal(operands[0], operands[1])); |
| 12902 | + DONE; |
| 12903 | + } |
| 12904 | +) |
| 12905 | + |
| 12906 | +(define_insn "cmpdi_internal" |
| 12907 | + [(set (cc0) |
| 12908 | + (compare:DI |
| 12909 | + (match_operand:DI 0 "register_operand" "r") |
| 12910 | + (match_operand:DI 1 "register_operand" "r")))] |
| 12911 | + "" |
| 12912 | + { |
| 12913 | + set_next_insn_cond(insn, |
| 12914 | + avr32_output_cmp(get_next_insn_cond(insn), DImode, operands[0], operands[1])); |
| 12915 | + return ""; |
| 12916 | + } |
| 12917 | + |
| 12918 | + [(set_attr "length" "6") |
| 12919 | + (set_attr "type" "alu2") |
| 12920 | + (set_attr "cc" "compare")]) |
| 12921 | + |
| 12922 | + |
| 12923 | + |
| 12924 | +;;============================================================================= |
| 12925 | +;; Test if zero |
| 12926 | +;;----------------------------------------------------------------------------- |
| 12927 | +;; Compare reg against zero and set the condition codes. |
| 12928 | +;;============================================================================= |
| 12929 | + |
| 12930 | + |
| 12931 | +(define_expand "tstsi" |
| 12932 | + [(set (cc0) |
| 12933 | + (match_operand:SI 0 "register_operand" ""))] |
| 12934 | + "" |
| 12935 | + { |
| 12936 | + avr32_compare_op0 = operands[0]; |
| 12937 | + avr32_compare_op1 = gen_rtx_CONST_INT(SImode, 0); |
| 12938 | + emit_insn(gen_tstsi_internal(operands[0])); |
| 12939 | + DONE; |
| 12940 | + } |
| 12941 | +) |
| 12942 | + |
| 12943 | +(define_insn "tstsi_internal" |
| 12944 | + [(set (cc0) |
| 12945 | + (match_operand:SI 0 "register_operand" "r"))] |
| 12946 | + "" |
| 12947 | + { |
| 12948 | + set_next_insn_cond(insn, |
| 12949 | + avr32_output_cmp(get_next_insn_cond(insn), SImode, operands[0], const0_rtx)); |
| 12950 | + |
| 12951 | + return ""; |
| 12952 | + } |
| 12953 | + [(set_attr "length" "2") |
| 12954 | + (set_attr "cc" "compare")]) |
| 12955 | + |
| 12956 | + |
| 12957 | +(define_expand "tstdi" |
| 12958 | + [(set (cc0) |
| 12959 | + (match_operand:DI 0 "register_operand" ""))] |
| 12960 | + "" |
| 12961 | + { |
| 12962 | + avr32_compare_op0 = operands[0]; |
| 12963 | + avr32_compare_op1 = gen_rtx_CONST_INT(DImode, 0); |
| 12964 | + emit_insn(gen_tstdi_internal(operands[0])); |
| 12965 | + DONE; |
| 12966 | + } |
| 12967 | +) |
| 12968 | + |
| 12969 | +(define_insn "tstdi_internal" |
| 12970 | + [(set (cc0) |
| 12971 | + (match_operand:DI 0 "register_operand" "r"))] |
| 12972 | + "" |
| 12973 | + { |
| 12974 | + set_next_insn_cond(insn, |
| 12975 | + avr32_output_cmp(get_next_insn_cond(insn), DImode, operands[0], const0_rtx)); |
| 12976 | + return ""; |
| 12977 | + } |
| 12978 | + [(set_attr "length" "4") |
| 12979 | + (set_attr "type" "alu2") |
| 12980 | + (set_attr "cc" "compare")]) |
| 12981 | + |
| 12982 | + |
| 12983 | + |
| 12984 | +;;============================================================================= |
| 12985 | +;; Convert operands |
| 12986 | +;;----------------------------------------------------------------------------- |
| 12987 | +;; |
| 12988 | +;;============================================================================= |
| 12989 | +(define_insn "truncdisi2" |
| 12990 | + [(set (match_operand:SI 0 "general_operand" "") |
| 12991 | + (truncate:SI (match_operand:DI 1 "general_operand" "")))] |
| 12992 | + "" |
| 12993 | + "truncdisi2") |
| 12994 | + |
| 12995 | +;;============================================================================= |
| 12996 | +;; Extend |
| 12997 | +;;----------------------------------------------------------------------------- |
| 12998 | +;; |
| 12999 | +;;============================================================================= |
| 13000 | + |
| 13001 | + |
| 13002 | +(define_insn "extendhisi2" |
| 13003 | + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r") |
| 13004 | + (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))] |
| 13005 | + "" |
| 13006 | + { |
| 13007 | + switch ( which_alternative ){ |
| 13008 | + case 0: |
| 13009 | + return "casts.h\t%0"; |
| 13010 | + case 1: |
| 13011 | + return "bfexts\t%0, %1, 0, 16"; |
| 13012 | + case 2: |
| 13013 | + case 3: |
| 13014 | + return "ld.sh\t%0, %1"; |
| 13015 | + default: |
| 13016 | + abort(); |
| 13017 | + } |
| 13018 | + } |
| 13019 | + [(set_attr "length" "2,4,2,4") |
| 13020 | + (set_attr "cc" "set_ncz,set_ncz,none,none") |
| 13021 | + (set_attr "type" "alu,alu,load_rm,load_rm")]) |
| 13022 | + |
| 13023 | +(define_insn "extendqisi2" |
| 13024 | + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r") |
| 13025 | + (sign_extend:SI (match_operand:QI 1 "extendqi_operand" "0,r,RKu00,m")))] |
| 13026 | + "" |
| 13027 | + { |
| 13028 | + switch ( which_alternative ){ |
| 13029 | + case 0: |
| 13030 | + return "casts.b\t%0"; |
| 13031 | + case 1: |
| 13032 | + return "bfexts\t%0, %1, 0, 8"; |
| 13033 | + case 2: |
| 13034 | + case 3: |
| 13035 | + return "ld.sb\t%0, %1"; |
| 13036 | + default: |
| 13037 | + abort(); |
| 13038 | + } |
| 13039 | + } |
| 13040 | + [(set_attr "length" "2,4,2,4") |
| 13041 | + (set_attr "cc" "set_ncz,set_ncz,none,none") |
| 13042 | + (set_attr "type" "alu,alu,load_rm,load_rm")]) |
| 13043 | + |
| 13044 | +(define_insn "extendqihi2" |
| 13045 | + [(set (match_operand:HI 0 "register_operand" "=r,r,r,r") |
| 13046 | + (sign_extend:HI (match_operand:QI 1 "extendqi_operand" "0,r,RKu00,m")))] |
| 13047 | + "" |
| 13048 | + { |
| 13049 | + switch ( which_alternative ){ |
| 13050 | + case 0: |
| 13051 | + return "casts.b\t%0"; |
| 13052 | + case 1: |
| 13053 | + return "bfexts\t%0, %1, 0, 8"; |
| 13054 | + case 2: |
| 13055 | + case 3: |
| 13056 | + return "ld.sb\t%0, %1"; |
| 13057 | + default: |
| 13058 | + abort(); |
| 13059 | + } |
| 13060 | + } |
| 13061 | + [(set_attr "length" "2,4,2,4") |
| 13062 | + (set_attr "cc" "set_ncz,set_ncz,none,none") |
| 13063 | + (set_attr "type" "alu,alu,load_rm,load_rm")]) |
| 13064 | + |
| 13065 | + |
| 13066 | +;;============================================================================= |
| 13067 | +;; Zero-extend |
| 13068 | +;;----------------------------------------------------------------------------- |
| 13069 | +;; |
| 13070 | +;;============================================================================= |
| 13071 | + |
| 13072 | +(define_insn "zero_extendhisi2" |
| 13073 | + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r") |
| 13074 | + (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))] |
| 13075 | + "" |
| 13076 | + { |
| 13077 | + switch ( which_alternative ){ |
| 13078 | + case 0: |
| 13079 | + return "castu.h\t%0"; |
| 13080 | + case 1: |
| 13081 | + return "bfextu\t%0, %1, 0, 16"; |
| 13082 | + case 2: |
| 13083 | + case 3: |
| 13084 | + return "ld.uh\t%0, %1"; |
| 13085 | + default: |
| 13086 | + abort(); |
| 13087 | + } |
| 13088 | + } |
| 13089 | + |
| 13090 | + [(set_attr "length" "2,4,2,4") |
| 13091 | + (set_attr "cc" "set_ncz,set_ncz,none,none") |
| 13092 | + (set_attr "type" "alu,alu,load_rm,load_rm")]) |
| 13093 | + |
| 13094 | +(define_insn "zero_extendqisi2" |
| 13095 | + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r") |
| 13096 | + (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))] |
| 13097 | + "" |
| 13098 | + { |
| 13099 | + switch ( which_alternative ){ |
| 13100 | + case 0: |
| 13101 | + return "castu.b\t%0"; |
| 13102 | + case 1: |
| 13103 | + return "bfextu\t%0, %1, 0, 8"; |
| 13104 | + case 2: |
| 13105 | + case 3: |
| 13106 | + return "ld.ub\t%0, %1"; |
| 13107 | + default: |
| 13108 | + abort(); |
| 13109 | + } |
| 13110 | + } |
| 13111 | + [(set_attr "length" "2,4,2,4") |
| 13112 | + (set_attr "cc" "set_ncz, set_ncz, none, none") |
| 13113 | + (set_attr "type" "alu, alu, load_rm, load_rm")]) |
| 13114 | + |
| 13115 | +(define_insn "zero_extendqihi2" |
| 13116 | + [(set (match_operand:HI 0 "register_operand" "=r,r,r,r") |
| 13117 | + (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))] |
| 13118 | + "" |
| 13119 | + { |
| 13120 | + switch ( which_alternative ){ |
| 13121 | + case 0: |
| 13122 | + return "castu.b\t%0"; |
| 13123 | + case 1: |
| 13124 | + return "bfextu\t%0, %1, 0, 8"; |
| 13125 | + case 2: |
| 13126 | + case 3: |
| 13127 | + return "ld.ub\t%0, %1"; |
| 13128 | + default: |
| 13129 | + abort(); |
| 13130 | + } |
| 13131 | + } |
| 13132 | + [(set_attr "length" "2,4,2,4") |
| 13133 | + (set_attr "cc" "set_ncz, set_ncz, none, none") |
| 13134 | + (set_attr "type" "alu, alu, load_rm, load_rm")]) |
| 13135 | + |
| 13136 | + |
| 13137 | + |
| 13138 | +;;============================================================================= |
| 13139 | +;; Conditional set register |
| 13140 | +;; sr{cond4} rd |
| 13141 | +;;----------------------------------------------------------------------------- |
| 13142 | + |
| 13143 | +;;Because of the same issue as with conditional moves and adds we must |
| 13144 | +;;not separate the compare instrcution from the scc instruction as |
| 13145 | +;;they might be sheduled "badly". |
| 13146 | + |
| 13147 | +(define_expand "s<code>" |
| 13148 | + [(set (match_operand:SI 0 "register_operand" "") |
| 13149 | + (any_cond (cc0) |
| 13150 | + (const_int 0)))] |
| 13151 | + "" |
| 13152 | + { |
| 13153 | + if ( !avr32_expand_scc(<CODE>, operands) ){ |
| 13154 | + FAIL; |
| 13155 | + } |
| 13156 | + DONE; |
| 13157 | + } |
| 13158 | + ) |
| 13159 | + |
| 13160 | + |
| 13161 | +(define_insn "comparesi_and_set" |
| 13162 | + [(set (match_operand:SI 0 "register_operand" "=r") |
| 13163 | + (match_operator 1 "avr32_comparison_operator" |
| 13164 | + [ (compare (match_operand:SI 2 "register_operand" "r") |
| 13165 | + (match_operand:SI 3 "general_operand" "rKs06Ks21")) |
| 13166 | + (const_int 0)]))] |
| 13167 | + "" |
| 13168 | + { |
| 13169 | + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[2]), operands[2], operands[3]); |
| 13170 | + return "sr%1\t%0"; |
| 13171 | + } |
| 13172 | + [(set_attr "length" "6") |
| 13173 | + (set_attr "cc" "clobber")]) |
| 13174 | + |
| 13175 | +(define_insn "comparehi_and_set" |
| 13176 | + [(set (match_operand:SI 0 "register_operand" "=r") |
| 13177 | + (match_operator 1 "avr32_comparison_operator" |
| 13178 | + [ (compare (match_operand:HI 2 "register_operand" "r") |
| 13179 | + (match_operand:HI 3 "register_operand" "r")) |
| 13180 | + (const_int 0)]))] |
| 13181 | + "" |
| 13182 | + { |
| 13183 | + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[2]), operands[2], operands[3]); |
| 13184 | + return "sr%1\t%0"; |
| 13185 | + } |
| 13186 | + [(set_attr "length" "6") |
| 13187 | + (set_attr "cc" "clobber")]) |
| 13188 | + |
| 13189 | +(define_insn "compareqi_and_set" |
| 13190 | + [(set (match_operand:SI 0 "register_operand" "=r") |
| 13191 | + (match_operator 1 "avr32_comparison_operator" |
| 13192 | + [ (compare (match_operand:QI 2 "register_operand" "r") |
| 13193 | + (match_operand:QI 3 "register_operand" "r")) |
| 13194 | + (const_int 0)]))] |
| 13195 | + "" |
| 13196 | + { |
| 13197 | + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[2]), operands[2], operands[3]); |
| 13198 | + return "sr%1\t%0"; |
| 13199 | + } |
| 13200 | + [(set_attr "length" "6") |
| 13201 | + (set_attr "cc" "clobber")]) |
| 13202 | + |
| 13203 | +(define_insn "*comparedi_and_set" |
| 13204 | + [(set (match_operand:SI 0 "register_operand" "=r") |
| 13205 | + (match_operator 1 "avr32_comparison_operator" |
| 13206 | + [ (compare (match_operand:DI 2 "register_operand" "r") |
| 13207 | + (match_operand:DI 3 "register_operand" "r")) |
| 13208 | + (const_int 0)]))] |
| 13209 | + "" |
| 13210 | + { |
| 13211 | + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[2]), operands[2], operands[3]); |
| 13212 | + return "sr%1\t%0"; |
| 13213 | + } |
| 13214 | + [(set_attr "length" "6") |
| 13215 | + (set_attr "cc" "clobber")]) |
| 13216 | + |
| 13217 | +(define_insn "*tstdi_and_set" |
| 13218 | + [(set (match_operand:SI 0 "register_operand" "=r") |
| 13219 | + (match_operator 1 "avr32_comparison_operator" |
| 13220 | + [ (compare (match_operand:DI 2 "register_operand" "r") |
| 13221 | + (const_int 0)) |
| 13222 | + (const_int 0)]))] |
| 13223 | + "" |
| 13224 | + { |
| 13225 | + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[2]), operands[2], const0_rtx); |
| 13226 | + return "sr%1\t%0"; |
| 13227 | + } |
| 13228 | + [(set_attr "length" "6") |
| 13229 | + (set_attr "cc" "clobber")]) |
| 13230 | + |
| 13231 | + |
| 13232 | + |
| 13233 | +;;============================================================================= |
| 13234 | +;; Conditional branch |
| 13235 | +;;----------------------------------------------------------------------------- |
| 13236 | +;; Branch to label if the specified condition codes are set. |
| 13237 | +;;============================================================================= |
| 13238 | +; branch if negative |
| 13239 | +(define_insn "bmi" |
| 13240 | + [(set (pc) |
| 13241 | + (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_MI) |
| 13242 | + (label_ref (match_operand 0 "" "")) |
| 13243 | + (pc)))] |
| 13244 | + "" |
| 13245 | + "brmi %0" |
| 13246 | + [(set_attr "type" "branch") |
| 13247 | + (set (attr "length") |
| 13248 | + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254)) |
| 13249 | + (le (minus (pc) (match_dup 0)) (const_int 256))) |
| 13250 | + (const_int 2)] ; use compact branch |
| 13251 | + (const_int 4))) ; use extended branch |
| 13252 | + (set_attr "cc" "none")]) |
| 13253 | + |
| 13254 | +(define_insn "*bmi-reverse" |
| 13255 | + [(set (pc) |
| 13256 | + (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_MI) |
| 13257 | + (pc) |
| 13258 | + (label_ref (match_operand 0 "" ""))))] |
| 13259 | + "" |
| 13260 | + "brpl %0" |
| 13261 | + [(set_attr "type" "branch") |
| 13262 | + (set (attr "length") |
| 13263 | + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254)) |
| 13264 | + (le (minus (pc) (match_dup 0)) (const_int 256))) |
| 13265 | + (const_int 2)] ; use compact branch |
| 13266 | + (const_int 4))) ; use extended branch |
| 13267 | + (set_attr "cc" "none")]) |
| 13268 | + |
| 13269 | +; branch if positive |
| 13270 | +(define_insn "bpl" |
| 13271 | + [(set (pc) |
| 13272 | + (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_PL) |
| 13273 | + (label_ref (match_operand 0 "" "")) |
| 13274 | + (pc)))] |
| 13275 | + "" |
| 13276 | + "brpl %0" |
| 13277 | + [(set_attr "type" "branch") |
| 13278 | + (set (attr "length") |
| 13279 | + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254)) |
| 13280 | + (le (minus (pc) (match_dup 0)) (const_int 256))) |
| 13281 | + (const_int 2)] ; use compact branch |
| 13282 | + (const_int 4))) ; use extended branch |
| 13283 | + (set_attr "cc" "none")]) |
| 13284 | + |
| 13285 | +(define_insn "*bpl-reverse" |
| 13286 | + [(set (pc) |
| 13287 | + (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_PL) |
| 13288 | + (pc) |
| 13289 | + (label_ref (match_operand 0 "" ""))))] |
| 13290 | + "" |
| 13291 | + "brmi %0" |
| 13292 | + [(set_attr "type" "branch") |
| 13293 | + (set (attr "length") |
| 13294 | + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254)) |
| 13295 | + (le (minus (pc) (match_dup 0)) (const_int 256))) |
| 13296 | + (const_int 2)] ; use compact branch |
| 13297 | + (const_int 4))) ; use extended branch |
| 13298 | + (set_attr "cc" "none")]) |
| 13299 | + |
| 13300 | +; branch if equal |
| 13301 | +(define_insn "b<code>" |
| 13302 | + [(set (pc) |
| 13303 | + (if_then_else (any_cond:CC (cc0) |
| 13304 | + (const_int 0)) |
| 13305 | + (label_ref (match_operand 0 "" "")) |
| 13306 | + (pc)))] |
| 13307 | + "" |
| 13308 | + "br<cond> %0 " |
| 13309 | + [(set_attr "type" "branch") |
| 13310 | + (set (attr "length") |
| 13311 | + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254)) |
| 13312 | + (le (minus (pc) (match_dup 0)) (const_int 256))) |
| 13313 | + (const_int 2)] ; use compact branch |
| 13314 | + (const_int 4))) ; use extended branch |
| 13315 | + (set_attr "cc" "none")]) |
| 13316 | + |
| 13317 | + |
| 13318 | +(define_insn "*b<code>-reverse" |
| 13319 | + [(set (pc) |
| 13320 | + (if_then_else (any_cond:CC (cc0) |
| 13321 | + (const_int 0)) |
| 13322 | + (pc) |
| 13323 | + (label_ref (match_operand 0 "" ""))))] |
| 13324 | + "" |
| 13325 | + "br<invcond> %0 " |
| 13326 | + [(set_attr "type" "branch") |
| 13327 | + (set (attr "length") |
| 13328 | + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254)) |
| 13329 | + (le (minus (pc) (match_dup 0)) (const_int 256))) |
| 13330 | + (const_int 2)] ; use compact branch |
| 13331 | + (const_int 4))) ; use extended branch |
| 13332 | + (set_attr "cc" "none")]) |
| 13333 | + |
| 13334 | + |
| 13335 | + |
| 13336 | +;============================================================================= |
| 13337 | +; Conditional Add/Subtract |
| 13338 | +;----------------------------------------------------------------------------- |
| 13339 | +; sub{cond4} Rd, imm |
| 13340 | +;============================================================================= |
| 13341 | + |
| 13342 | + |
| 13343 | +(define_expand "add<mode>cc" |
| 13344 | + [(set (match_operand:ADDCC 0 "register_operand" "") |
| 13345 | + (if_then_else:ADDCC (match_operand 1 "avr32_comparison_operator" "") |
| 13346 | + (match_operand:ADDCC 2 "register_immediate_operand" "") |
| 13347 | + (match_operand:ADDCC 3 "register_immediate_operand" "")))] |
| 13348 | + "" |
| 13349 | + { |
| 13350 | + if ( avr32_expand_addcc(<MODE>mode, operands ) ) |
| 13351 | + DONE; |
| 13352 | + else |
| 13353 | + FAIL; |
| 13354 | + } |
| 13355 | + ) |
| 13356 | + |
| 13357 | + |
| 13358 | +(define_insn "add<ADDCC:mode>cc_cmp<CMP:mode>" |
| 13359 | + [(set (match_operand:ADDCC 0 "register_operand" "=&r") |
| 13360 | + (unspec:ADDCC [(match_operand 1 "avr32_comparison_operator" "") |
| 13361 | + (match_operand:ADDCC 2 "register_operand" "0") |
| 13362 | + (match_operand:ADDCC 3 "immediate_operand" "Ks08") |
| 13363 | + (match_operand:CMP 4 "register_operand" "r") |
| 13364 | + (match_operand:CMP 5 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>") |
| 13365 | + ] |
| 13366 | + UNSPEC_ADDSICC ))] |
| 13367 | + "" |
| 13368 | + { |
| 13369 | + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]); |
| 13370 | + |
| 13371 | + return "sub%1\t%0, %3"; |
| 13372 | + } |
| 13373 | + [(set_attr "length" "8") |
| 13374 | + (set_attr "cc" "clobber")]) |
| 13375 | + |
| 13376 | + |
| 13377 | +;============================================================================= |
| 13378 | +; Conditional Move |
| 13379 | +;----------------------------------------------------------------------------- |
| 13380 | +; mov{cond4} Rd, (Rs/imm) |
| 13381 | +;============================================================================= |
| 13382 | +(define_expand "mov<mode>cc" |
| 13383 | + [(set (match_operand:ADDCC 0 "register_operand" "") |
| 13384 | + (if_then_else:ADDCC (match_operand 1 "avr32_comparison_operator" "") |
| 13385 | + (match_operand:ADDCC 2 "register_immediate_operand" "") |
| 13386 | + (match_operand:ADDCC 3 "register_immediate_operand" "")))] |
| 13387 | + "" |
| 13388 | + { |
| 13389 | + if ( avr32_expand_movcc(<MODE>mode, operands ) ) |
| 13390 | + DONE; |
| 13391 | + else |
| 13392 | + FAIL; |
| 13393 | + } |
| 13394 | + ) |
| 13395 | + |
| 13396 | +(define_insn "mov<MOVCC:mode>cc_cmp<CMP:mode>" |
| 13397 | + [(set (match_operand:MOVCC 0 "register_operand" "=r,r,r") |
| 13398 | + (unspec:MOVCC [(match_operand 1 "avr32_comparison_operator" "") |
| 13399 | + (match_operand:MOVCC 2 "register_immediate_operand" "0,rKs08,rKs08") |
| 13400 | + (match_operand:MOVCC 3 "register_immediate_operand" "rKs08,0,rKs08") |
| 13401 | + (match_operand:CMP 4 "register_operand" "r, r, r") |
| 13402 | + (match_operand:CMP 5 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>, <CMP:cmp_constraint>, <CMP:cmp_constraint>") |
| 13403 | + ] |
| 13404 | + UNSPEC_MOVSICC ))] |
| 13405 | + "" |
| 13406 | + { |
| 13407 | + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]); |
| 13408 | + |
| 13409 | + switch( which_alternative ){ |
| 13410 | + case 0: |
| 13411 | + return "mov%i1 %0, %3"; |
| 13412 | + case 1: |
| 13413 | + return "mov%1 %0, %2"; |
| 13414 | + case 2: |
| 13415 | + return "mov%1 %0, %2\;mov%i1 %0, %3"; |
| 13416 | + default: |
| 13417 | + abort(); |
| 13418 | + } |
| 13419 | + |
| 13420 | + |
| 13421 | + } |
| 13422 | + [(set_attr "length" "8,8,12") |
| 13423 | + (set_attr "cc" "clobber")]) |
| 13424 | + |
| 13425 | + |
| 13426 | +;;============================================================================= |
| 13427 | +;; jump |
| 13428 | +;;----------------------------------------------------------------------------- |
| 13429 | +;; Jump inside a function; an unconditional branch to a label. |
| 13430 | +;;============================================================================= |
| 13431 | +(define_insn "jump" |
| 13432 | + [(set (pc) |
| 13433 | + (label_ref (match_operand 0 "" "")))] |
| 13434 | + "" |
| 13435 | + { |
| 13436 | + if (get_attr_length(insn) > 4) |
| 13437 | + return "Can't jump this far"; |
| 13438 | + return (get_attr_length(insn) == 2 ? |
| 13439 | + "rjmp %0" : "bral %0"); |
| 13440 | + } |
| 13441 | + [(set_attr "type" "branch") |
| 13442 | + (set (attr "length") |
| 13443 | + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 1022)) |
| 13444 | + (le (minus (pc) (match_dup 0)) (const_int 1024))) |
| 13445 | + (const_int 2) ; use rjmp |
| 13446 | + (le (match_dup 0) (const_int 1048575)) |
| 13447 | + (const_int 4)] ; use bral |
| 13448 | + (const_int 8))) ; do something else |
| 13449 | + (set_attr "cc" "none")]) |
| 13450 | + |
| 13451 | +;;============================================================================= |
| 13452 | +;; call |
| 13453 | +;;----------------------------------------------------------------------------- |
| 13454 | +;; Subroutine call instruction returning no value. |
| 13455 | +;;============================================================================= |
| 13456 | +(define_insn "call_internal" |
| 13457 | + [(parallel [(call (mem:SI (match_operand:SI 0 "avr32_call_operand" "r,U,T,W")) |
| 13458 | + (match_operand 1 "" "")) |
| 13459 | + (clobber (reg:SI LR_REGNUM))])] |
| 13460 | + "" |
| 13461 | + { |
| 13462 | + switch (which_alternative){ |
| 13463 | + case 0: |
| 13464 | + return "icall\t%0"; |
| 13465 | + case 1: |
| 13466 | + return "rcall\t%0"; |
| 13467 | + case 2: |
| 13468 | + return "mcall\t%0"; |
| 13469 | + case 3: |
| 13470 | + if ( TARGET_HAS_ASM_ADDR_PSEUDOS ) |
| 13471 | + return "call\t%0"; |
| 13472 | + else |
| 13473 | + return "mcall\tr6[%0@got]"; |
| 13474 | + default: |
| 13475 | + abort(); |
| 13476 | + } |
| 13477 | + } |
| 13478 | + [(set_attr "type" "call") |
| 13479 | + (set_attr "length" "2,4,4,10") |
| 13480 | + (set_attr "cc" "clobber")]) |
| 13481 | + |
| 13482 | + |
| 13483 | +(define_expand "call" |
| 13484 | + [(parallel [(call (match_operand:SI 0 "" "") |
| 13485 | + (match_operand 1 "" "")) |
| 13486 | + (clobber (reg:SI LR_REGNUM))])] |
| 13487 | + "" |
| 13488 | + { |
| 13489 | + rtx call_address; |
| 13490 | + if ( GET_CODE(operands[0]) != MEM ) |
| 13491 | + FAIL; |
| 13492 | + |
| 13493 | + call_address = XEXP(operands[0], 0); |
| 13494 | + |
| 13495 | + /* If assembler supports call pseudo insn and the call |
| 13496 | + address is a symbol then nothing special needs to be done. */ |
| 13497 | + if ( TARGET_HAS_ASM_ADDR_PSEUDOS |
| 13498 | + && (GET_CODE(call_address) == SYMBOL_REF) ){ |
| 13499 | + /* We must however mark the function as using the GOT if |
| 13500 | + flag_pic is set, since the call insn might turn into |
| 13501 | + a mcall using the GOT ptr register. */ |
| 13502 | + if ( flag_pic ){ |
| 13503 | + current_function_uses_pic_offset_table = 1; |
| 13504 | + emit_call_insn(gen_call_internal(call_address, operands[1])); |
| 13505 | + DONE; |
| 13506 | + } |
| 13507 | + } else { |
| 13508 | + if ( flag_pic && |
| 13509 | + GET_CODE(call_address) == SYMBOL_REF ){ |
| 13510 | + current_function_uses_pic_offset_table = 1; |
| 13511 | + emit_call_insn(gen_call_internal(call_address, operands[1])); |
| 13512 | + DONE; |
| 13513 | + } |
| 13514 | + |
| 13515 | + if ( !SYMBOL_REF_RCALL_FUNCTION_P(operands[0]) ){ |
| 13516 | + if ( optimize_size && |
| 13517 | + GET_CODE(call_address) == SYMBOL_REF ){ |
| 13518 | + call_address = force_const_mem(SImode, call_address); |
| 13519 | + } else { |
| 13520 | + call_address = force_reg(SImode, call_address); |
| 13521 | + } |
| 13522 | + } |
| 13523 | + } |
| 13524 | + emit_call_insn(gen_call_internal(call_address, operands[1])); |
| 13525 | + DONE; |
| 13526 | + } |
| 13527 | +) |
| 13528 | + |
| 13529 | +;;============================================================================= |
| 13530 | +;; call_value |
| 13531 | +;;----------------------------------------------------------------------------- |
| 13532 | +;; Subrutine call instruction returning a value. |
| 13533 | +;;============================================================================= |
| 13534 | +(define_expand "call_value" |
| 13535 | + [(parallel [(set (match_operand:SI 0 "" "") |
| 13536 | + (call (match_operand:SI 1 "" "") |
| 13537 | + (match_operand 2 "" ""))) |
| 13538 | + (clobber (reg:SI LR_REGNUM))])] |
| 13539 | + "" |
| 13540 | + { |
| 13541 | + rtx call_address; |
| 13542 | + if ( GET_CODE(operands[1]) != MEM ) |
| 13543 | + FAIL; |
| 13544 | + |
| 13545 | + call_address = XEXP(operands[1], 0); |
| 13546 | + |
| 13547 | + /* If assembler supports call pseudo insn and the call |
| 13548 | + address is a symbol then nothing special needs to be done. */ |
| 13549 | + if ( TARGET_HAS_ASM_ADDR_PSEUDOS |
| 13550 | + && (GET_CODE(call_address) == SYMBOL_REF) ){ |
| 13551 | + /* We must however mark the function as using the GOT if |
| 13552 | + flag_pic is set, since the call insn might turn into |
| 13553 | + a mcall using the GOT ptr register. */ |
| 13554 | + if ( flag_pic ) { |
| 13555 | + current_function_uses_pic_offset_table = 1; |
| 13556 | + emit_call_insn(gen_call_value_internal(operands[0], call_address, operands[2])); |
| 13557 | + DONE; |
| 13558 | + } |
| 13559 | + } else { |
| 13560 | + if ( flag_pic && |
| 13561 | + GET_CODE(call_address) == SYMBOL_REF ){ |
| 13562 | + current_function_uses_pic_offset_table = 1; |
| 13563 | + emit_call_insn(gen_call_value_internal(operands[0], call_address, operands[2])); |
| 13564 | + DONE; |
| 13565 | + } |
| 13566 | + |
| 13567 | + if ( !SYMBOL_REF_RCALL_FUNCTION_P(operands[1]) ){ |
| 13568 | + if ( optimize_size && |
| 13569 | + GET_CODE(call_address) == SYMBOL_REF){ |
| 13570 | + call_address = force_const_mem(SImode, call_address); |
| 13571 | + } else { |
| 13572 | + call_address = force_reg(SImode, call_address); |
| 13573 | + } |
| 13574 | + } |
| 13575 | + } |
| 13576 | + emit_call_insn(gen_call_value_internal(operands[0], call_address, |
| 13577 | + operands[2])); |
| 13578 | + DONE; |
| 13579 | + |
| 13580 | + }) |
| 13581 | + |
| 13582 | +(define_insn "call_value_internal" |
| 13583 | + [(parallel [(set (match_operand 0 "register_operand" "=r,r,r,r") |
| 13584 | + (call (mem:SI (match_operand:SI 1 "avr32_call_operand" "r,U,T,W")) |
| 13585 | + (match_operand 2 "" ""))) |
| 13586 | + (clobber (reg:SI LR_REGNUM))])] |
| 13587 | + ;; Operand 2 not used on the AVR32. |
| 13588 | + "" |
| 13589 | + { |
| 13590 | + switch (which_alternative){ |
| 13591 | + case 0: |
| 13592 | + return "icall\t%1"; |
| 13593 | + case 1: |
| 13594 | + return "rcall\t%1"; |
| 13595 | + case 2: |
| 13596 | + return "mcall\t%1"; |
| 13597 | + case 3: |
| 13598 | + if ( TARGET_HAS_ASM_ADDR_PSEUDOS ) |
| 13599 | + return "call\t%1"; |
| 13600 | + else |
| 13601 | + return "mcall\tr6[%1@got]"; |
| 13602 | + default: |
| 13603 | + abort(); |
| 13604 | + } |
| 13605 | + } |
| 13606 | + [(set_attr "type" "call") |
| 13607 | + (set_attr "length" "2,4,4,10") |
| 13608 | + (set_attr "cc" "call_set")]) |
| 13609 | + |
| 13610 | + |
| 13611 | +;;============================================================================= |
| 13612 | +;; untyped_call |
| 13613 | +;;----------------------------------------------------------------------------- |
| 13614 | +;; Subrutine call instruction returning a value of any type. |
| 13615 | +;; The code is copied from m68k.md (except gen_blockage is removed) |
| 13616 | +;; Fixme! |
| 13617 | +;;============================================================================= |
| 13618 | +(define_expand "untyped_call" |
| 13619 | + [(parallel [(call (match_operand 0 "avr32_call_operand" "") |
| 13620 | + (const_int 0)) |
| 13621 | + (match_operand 1 "" "") |
| 13622 | + (match_operand 2 "" "")])] |
| 13623 | + "" |
| 13624 | + { |
| 13625 | + int i; |
| 13626 | + |
| 13627 | + emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx)); |
| 13628 | + |
| 13629 | + for (i = 0; i < XVECLEN (operands[2], 0); i++) { |
| 13630 | + rtx set = XVECEXP (operands[2], 0, i); |
| 13631 | + emit_move_insn (SET_DEST (set), SET_SRC (set)); |
| 13632 | + } |
| 13633 | + |
| 13634 | + /* The optimizer does not know that the call sets the function value |
| 13635 | + registers we stored in the result block. We avoid problems by |
| 13636 | + claiming that all hard registers are used and clobbered at this |
| 13637 | + point. */ |
| 13638 | + emit_insn (gen_blockage ()); |
| 13639 | + |
| 13640 | + DONE; |
| 13641 | + }) |
| 13642 | + |
| 13643 | + |
| 13644 | +;;============================================================================= |
| 13645 | +;; return |
| 13646 | +;;============================================================================= |
| 13647 | + |
| 13648 | +(define_insn "return" |
| 13649 | + [(return)] |
| 13650 | + "USE_RETURN_INSN (FALSE)" |
| 13651 | + { |
| 13652 | + avr32_output_return_instruction(TRUE, FALSE, NULL, NULL); |
| 13653 | + return ""; |
| 13654 | + } |
| 13655 | + [(set_attr "length" "4") |
| 13656 | + (set_attr "type" "call")] |
| 13657 | + ) |
| 13658 | + |
| 13659 | +(define_insn "*return_value_imm" |
| 13660 | + [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i")) |
| 13661 | + (use (reg RETVAL_REGNUM)) |
| 13662 | + (return)])] |
| 13663 | + "USE_RETURN_INSN (FALSE) && |
| 13664 | + ((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))" |
| 13665 | + { |
| 13666 | + avr32_output_return_instruction(TRUE, FALSE, NULL, operands[0]); |
| 13667 | + return ""; |
| 13668 | + } |
| 13669 | + [(set_attr "length" "4") |
| 13670 | + (set_attr "type" "call")] |
| 13671 | + ) |
| 13672 | + |
| 13673 | +(define_insn "*return_value_si" |
| 13674 | + [(set (reg RETVAL_REGNUM) (match_operand:SI 0 "register_operand" "r")) |
| 13675 | + (use (reg RETVAL_REGNUM)) |
| 13676 | + (return)] |
| 13677 | + "USE_RETURN_INSN (TRUE)" |
| 13678 | + "retal %0"; |
| 13679 | + [(set_attr "type" "call")] |
| 13680 | + ) |
| 13681 | + |
| 13682 | +(define_insn "*return_value_hi" |
| 13683 | + [(parallel [(set (reg RETVAL_REGNUM) (match_operand:HI 0 "register_operand" "r")) |
| 13684 | + (use (reg RETVAL_REGNUM)) |
| 13685 | + (return)])] |
| 13686 | + "USE_RETURN_INSN (TRUE)" |
| 13687 | + "retal %0" |
| 13688 | + [(set_attr "type" "call")] |
| 13689 | + ) |
| 13690 | + |
| 13691 | +(define_insn "*return_value_qi" |
| 13692 | + [(parallel [(set (reg RETVAL_REGNUM) (match_operand:QI 0 "register_operand" "r")) |
| 13693 | + (use (reg RETVAL_REGNUM)) |
| 13694 | + (return)])] |
| 13695 | + "USE_RETURN_INSN (TRUE)" |
| 13696 | + "retal %0" |
| 13697 | + [(set_attr "type" "call")] |
| 13698 | + ) |
| 13699 | + |
| 13700 | +;;============================================================================= |
| 13701 | +;; nop |
| 13702 | +;;----------------------------------------------------------------------------- |
| 13703 | +;; No-op instruction. |
| 13704 | +;;============================================================================= |
| 13705 | +(define_insn "nop" |
| 13706 | + [(const_int 0)] |
| 13707 | + "" |
| 13708 | + "nop" |
| 13709 | + [(set_attr "length" "2") |
| 13710 | + (set_attr "type" "alu") |
| 13711 | + (set_attr "cc" "none")]) |
| 13712 | + |
| 13713 | +;;============================================================================= |
| 13714 | +;; nonlocal_goto |
| 13715 | +;;----------------------------------------------------------------------------- |
| 13716 | +;; Jump from one function to a label in an outer function. |
| 13717 | +;; Must invalidate return stack, since the function will be exited without |
| 13718 | +;; a return |
| 13719 | +;;============================================================================= |
| 13720 | +(define_expand "nonlocal_goto" |
| 13721 | + [(use (match_operand 0 "" "")) |
| 13722 | + (use (match_operand 1 "" "")) |
| 13723 | + (use (match_operand 2 "" "")) |
| 13724 | + (use (match_operand 3 "" ""))] |
| 13725 | + "" |
| 13726 | + { |
| 13727 | + emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__nonlocal_goto"), |
| 13728 | + 0, VOIDmode, 3, |
| 13729 | + operands[0], SImode, |
| 13730 | + operands[1], Pmode, |
| 13731 | + operands[2], SImode); |
| 13732 | + |
| 13733 | + DONE; |
| 13734 | + } |
| 13735 | +) |
| 13736 | + |
| 13737 | + |
| 13738 | +(define_expand "builtin_longjmp" |
| 13739 | + [(use (match_operand 0 "" ""))] |
| 13740 | + "" |
| 13741 | + { |
| 13742 | + rtx ops[3]; |
| 13743 | + |
| 13744 | + ops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS(SImode, operands[0], gen_rtx_CONST_INT(SImode,0))); |
| 13745 | + ops[1] = gen_rtx_MEM (Pmode, gen_rtx_PLUS(SImode, operands[0], gen_rtx_CONST_INT(SImode,4))); |
| 13746 | + ops[2] = gen_rtx_MEM (Pmode, gen_rtx_PLUS(SImode, operands[0], gen_rtx_CONST_INT(SImode,8))); |
| 13747 | + |
| 13748 | + |
| 13749 | + emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__nonlocal_goto"), |
| 13750 | + 0, VOIDmode, 3, |
| 13751 | + ops[0], SImode, |
| 13752 | + ops[1], Pmode, |
| 13753 | + ops[2], SImode); |
| 13754 | + |
| 13755 | + DONE; |
| 13756 | + } |
| 13757 | + ) |
| 13758 | + |
| 13759 | + |
| 13760 | +;;============================================================================= |
| 13761 | +;; indirect_jump |
| 13762 | +;;----------------------------------------------------------------------------- |
| 13763 | +;; Jump to an address in reg or memory. |
| 13764 | +;;============================================================================= |
| 13765 | +(define_expand "indirect_jump" |
| 13766 | + [(set (pc) |
| 13767 | + (match_operand:SI 0 "general_operand" "r,m"))] |
| 13768 | + "" |
| 13769 | + { |
| 13770 | + /* One of the ops has to be in a register. */ |
| 13771 | + if ( (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS ) |
| 13772 | + && !avr32_legitimate_pic_operand_p(operands[0]) ) |
| 13773 | + operands[0] = legitimize_pic_address (operands[0], SImode, 0); |
| 13774 | + else if ( flag_pic && avr32_address_operand(operands[0], GET_MODE(operands[0])) ) |
| 13775 | + /* If we have an address operand then this function uses the pic register. */ |
| 13776 | + current_function_uses_pic_offset_table = 1; |
| 13777 | + }) |
| 13778 | + |
| 13779 | + |
| 13780 | +(define_insn "indirect_jump_internal" |
| 13781 | + [(set (pc) |
| 13782 | + (match_operand:SI 0 "general_operand" "r,m,W"))] |
| 13783 | + "" |
| 13784 | + { |
| 13785 | + switch( which_alternative ){ |
| 13786 | + case 0: |
| 13787 | + return "mov\tpc, %0"; |
| 13788 | + case 1: |
| 13789 | + if ( avr32_const_pool_ref_operand(operands[0], GET_MODE(operands[0])) ) |
| 13790 | + return "lddpc\tpc, %0"; |
| 13791 | + else |
| 13792 | + return "ld.w\tpc, %0"; |
| 13793 | + case 2: |
| 13794 | + if ( flag_pic ) |
| 13795 | + return "ld.w\tpc, r6[%0@got]"; |
| 13796 | + else |
| 13797 | + return "lda.w\tpc, %0"; |
| 13798 | + default: |
| 13799 | + abort(); |
| 13800 | + } |
| 13801 | + } |
| 13802 | + [(set_attr "length" "2,4,8") |
| 13803 | + (set_attr "type" "call,call,call") |
| 13804 | + (set_attr "cc" "none,none,clobber")]) |
| 13805 | + |
| 13806 | + |
| 13807 | +;;============================================================================= |
| 13808 | +;; casesi |
| 13809 | +;;============================================================================= |
| 13810 | + |
| 13811 | + |
| 13812 | +(define_expand "casesi" |
| 13813 | + [(match_operand:SI 0 "register_operand" "") ; index to jump on |
| 13814 | + (match_operand:SI 1 "const_int_operand" "") ; lower bound |
| 13815 | + (match_operand:SI 2 "const_int_operand" "") ; total range |
| 13816 | + (match_operand:SI 3 "" "") ; table label |
| 13817 | + (match_operand:SI 4 "" "")] ; Out of range label |
| 13818 | + "" |
| 13819 | + " |
| 13820 | + { |
| 13821 | + rtx reg; |
| 13822 | + if (operands[1] != const0_rtx) |
| 13823 | + { |
| 13824 | + if (!avr32_const_ok_for_constraint_p(INTVAL (operands[1]), 'I', \"Is21\")){ |
| 13825 | + reg = force_reg(SImode, GEN_INT (INTVAL (operands[1]))); |
| 13826 | + emit_insn (gen_subsi3 (reg, operands[0], |
| 13827 | + reg)); |
| 13828 | + } else { |
| 13829 | + reg = gen_reg_rtx (SImode); |
| 13830 | + emit_insn (gen_addsi3 (reg, operands[0], |
| 13831 | + GEN_INT (-INTVAL (operands[1])))); |
| 13832 | + } |
| 13833 | + operands[0] = reg; |
| 13834 | + } |
| 13835 | + |
| 13836 | + if (!avr32_const_ok_for_constraint_p(INTVAL (operands[2]), 'K', \"Ks21\")) |
| 13837 | + operands[2] = force_reg (SImode, operands[2]); |
| 13838 | + |
| 13839 | + emit_jump_insn (gen_casesi_internal (operands[0], operands[2], operands[3], |
| 13840 | + operands[4], gen_reg_rtx(SImode))); |
| 13841 | + DONE; |
| 13842 | + }" |
| 13843 | +) |
| 13844 | + |
| 13845 | +;; The USE in this pattern is needed to tell flow analysis that this is |
| 13846 | +;; a CASESI insn. It has no other purpose. |
| 13847 | +(define_insn "casesi_internal" |
| 13848 | + [(parallel [(set (pc) |
| 13849 | + (if_then_else |
| 13850 | + (leu (match_operand:SI 0 "register_operand" "r") |
| 13851 | + (match_operand:SI 1 "register_immediate_operand" "rKu03")) |
| 13852 | + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4)) |
| 13853 | + (label_ref (match_operand 2 "" "")))) |
| 13854 | + (label_ref (match_operand 3 "" "")))) |
| 13855 | + (clobber (match_operand:SI 4 "register_operand" "=r")) |
| 13856 | + (use (label_ref (match_dup 2)))])] |
| 13857 | + "" |
| 13858 | + { |
| 13859 | + if (flag_pic) |
| 13860 | + return "cp.w\t%0, %1\;brhi\t%3\;sub\t%4, pc, -(%2 - .)\;add\tpc, %4, %0 << 2"; |
| 13861 | + return "cp.w\t%0, %1\;brhi\t%3\;sub\t%4, pc, -(%2 - .)\;ld.w\tpc, %4[%0 << 2]"; |
| 13862 | + } |
| 13863 | + [(set_attr "cc" "clobber") |
| 13864 | + (set_attr "length" "16")] |
| 13865 | +) |
| 13866 | + |
| 13867 | + |
| 13868 | +(define_insn "prefetch" |
| 13869 | + [(prefetch (match_operand:SI 0 "register_operand" "r") |
| 13870 | + (match_operand 1 "const_int_operand" "") |
| 13871 | + (match_operand 2 "const_int_operand" ""))] |
| 13872 | + "" |
| 13873 | + { |
| 13874 | + return "pref\t%0[0]"; |
| 13875 | + } |
| 13876 | + |
| 13877 | + [(set_attr "length" "4") |
| 13878 | + (set_attr "type" "load") |
| 13879 | + (set_attr "cc" "none")]) |
| 13880 | + |
| 13881 | + |
| 13882 | + |
| 13883 | +;;============================================================================= |
| 13884 | +;; prologue |
| 13885 | +;;----------------------------------------------------------------------------- |
| 13886 | +;; This pattern, if defined, emits RTL for entry to a function. The function |
| 13887 | +;; entry i responsible for setting up the stack frame, initializing the frame |
| 13888 | +;; pointer register, saving callee saved registers, etc. |
| 13889 | +;;============================================================================= |
| 13890 | +(define_expand "prologue" |
| 13891 | + [(clobber (const_int 0))] |
| 13892 | + "" |
| 13893 | + " |
| 13894 | + avr32_expand_prologue(); |
| 13895 | + DONE; |
| 13896 | + " |
| 13897 | + ) |
| 13898 | + |
| 13899 | +;;============================================================================= |
| 13900 | +;; eh_return |
| 13901 | +;;----------------------------------------------------------------------------- |
| 13902 | +;; This pattern, if defined, affects the way __builtin_eh_return, and |
| 13903 | +;; thence the call frame exception handling library routines, are |
| 13904 | +;; built. It is intended to handle non-trivial actions needed along |
| 13905 | +;; the abnormal return path. |
| 13906 | +;; |
| 13907 | +;; The address of the exception handler to which the function should |
| 13908 | +;; return is passed as operand to this pattern. It will normally need |
| 13909 | +;; to copied by the pattern to some special register or memory |
| 13910 | +;; location. If the pattern needs to determine the location of the |
| 13911 | +;; target call frame in order to do so, it may use |
| 13912 | +;; EH_RETURN_STACKADJ_RTX, if defined; it will have already been |
| 13913 | +;; assigned. |
| 13914 | +;; |
| 13915 | +;; If this pattern is not defined, the default action will be to |
| 13916 | +;; simply copy the return address to EH_RETURN_HANDLER_RTX. Either |
| 13917 | +;; that macro or this pattern needs to be defined if call frame |
| 13918 | +;; exception handling is to be used. |
| 13919 | +(define_expand "eh_return" |
| 13920 | + [(use (match_operand 0 "general_operand" ""))] |
| 13921 | + "" |
| 13922 | + " |
| 13923 | + avr32_set_return_address (operands[0]); |
| 13924 | + DONE; |
| 13925 | + " |
| 13926 | + ) |
| 13927 | + |
| 13928 | +;;============================================================================= |
| 13929 | +;; ffssi2 |
| 13930 | +;;----------------------------------------------------------------------------- |
| 13931 | +(define_insn "ffssi2" |
| 13932 | + [ (set (match_operand:SI 0 "register_operand" "=r") |
| 13933 | + (ffs:SI (match_operand:SI 1 "register_operand" "r"))) ] |
| 13934 | + "" |
| 13935 | + "mov %0, %1 |
| 13936 | + brev %0 |
| 13937 | + clz %0, %0 |
| 13938 | + sub %0, -1 |
| 13939 | + cp %0, 33 |
| 13940 | + moveq %0, 0" |
| 13941 | + [(set_attr "length" "18") |
| 13942 | + (set_attr "cc" "clobber")] |
| 13943 | + ) |
| 13944 | + |
| 13945 | + |
| 13946 | + |
| 13947 | +;;============================================================================= |
| 13948 | +;; swap_h |
| 13949 | +;;----------------------------------------------------------------------------- |
| 13950 | +(define_insn "*swap_h" |
| 13951 | + [ (set (match_operand:SI 0 "register_operand" "=r") |
| 13952 | + (ior:SI (ashift:SI (match_dup 0) (const_int 16)) |
| 13953 | + (lshiftrt:SI (match_dup 0) (const_int 16))))] |
| 13954 | + "" |
| 13955 | + "swap.h %0" |
| 13956 | + [(set_attr "length" "2")] |
| 13957 | + ) |
| 13958 | + |
| 13959 | +(define_insn_and_split "bswap_16" |
| 13960 | + [ (set (match_operand:HI 0 "avr32_bswap_operand" "=r,RKs13,r") |
| 13961 | + (ior:HI (and:HI (lshiftrt:HI (match_operand:HI 1 "avr32_bswap_operand" "r,r,RKs13") |
| 13962 | + (const_int 8)) |
| 13963 | + (const_int 255)) |
| 13964 | + (ashift:HI (and:HI (match_dup 1) |
| 13965 | + (const_int 255)) |
| 13966 | + (const_int 8))))] |
| 13967 | + "" |
| 13968 | + { |
| 13969 | + switch ( which_alternative ){ |
| 13970 | + case 0: |
| 13971 | + if ( REGNO(operands[0]) == REGNO(operands[1])) |
| 13972 | + return "swap.bh\t%0"; |
| 13973 | + else |
| 13974 | + return "mov\t%0, %1\;swap.bh\t%0"; |
| 13975 | + case 1: |
| 13976 | + return "stswp.h\t%0, %1"; |
| 13977 | + case 2: |
| 13978 | + return "ldswp.sh\t%0, %1"; |
| 13979 | + default: |
| 13980 | + abort(); |
| 13981 | + } |
| 13982 | + } |
| 13983 | + |
| 13984 | + "(reload_completed && |
| 13985 | + REG_P(operands[0]) && REG_P(operands[1]) |
| 13986 | + && (REGNO(operands[0]) != REGNO(operands[1])))" |
| 13987 | + [(set (match_dup 0) (match_dup 1)) |
| 13988 | + (set (match_dup 0) |
| 13989 | + (ior:HI (and:HI (lshiftrt:HI (match_dup 0) |
| 13990 | + (const_int 8)) |
| 13991 | + (const_int 255)) |
| 13992 | + (ashift:HI (and:HI (match_dup 0) |
| 13993 | + (const_int 255)) |
| 13994 | + (const_int 8))))] |
| 13995 | + "" |
| 13996 | + |
| 13997 | + [(set_attr "length" "4,4,4") |
| 13998 | + (set_attr "type" "alu,store,load_rm")] |
| 13999 | + ) |
| 14000 | + |
| 14001 | +(define_insn_and_split "bswap_32" |
| 14002 | + [ (set (match_operand:SI 0 "avr32_bswap_operand" "=r,RKs14,r") |
| 14003 | + (ior:SI (ior:SI (lshiftrt:SI (and:SI (match_operand:SI 1 "avr32_bswap_operand" "=r,r,RKs14") |
| 14004 | + (const_int 4278190080)) |
| 14005 | + (const_int 24)) |
| 14006 | + (lshiftrt:SI (and:SI (match_dup 1) |
| 14007 | + (const_int 16711680)) |
| 14008 | + (const_int 8))) |
| 14009 | + (ior:SI (ashift:SI (and:SI (match_dup 1) |
| 14010 | + (const_int 65280)) |
| 14011 | + (const_int 8)) |
| 14012 | + (ashift:SI (and:SI (match_dup 1) |
| 14013 | + (const_int 255)) |
| 14014 | + (const_int 24)))))] |
| 14015 | + "" |
| 14016 | + { |
| 14017 | + switch ( which_alternative ){ |
| 14018 | + case 0: |
| 14019 | + if ( REGNO(operands[0]) == REGNO(operands[1])) |
| 14020 | + return "swap.b\t%0"; |
| 14021 | + else |
| 14022 | + return "mov\t%0, %1\;swap.b\t%0"; |
| 14023 | + case 1: |
| 14024 | + return "stswp.w\t%0, %1"; |
| 14025 | + case 2: |
| 14026 | + return "ldswp.w\t%0, %1"; |
| 14027 | + default: |
| 14028 | + abort(); |
| 14029 | + } |
| 14030 | + } |
| 14031 | + "(reload_completed && |
| 14032 | + REG_P(operands[0]) && REG_P(operands[1]) |
| 14033 | + && (REGNO(operands[0]) != REGNO(operands[1])))" |
| 14034 | + [(set (match_dup 0) (match_dup 1)) |
| 14035 | + (set (match_dup 0) |
| 14036 | + (ior:SI (ior:SI (lshiftrt:SI (and:SI (match_dup 0) |
| 14037 | + (const_int 4278190080)) |
| 14038 | + (const_int 24)) |
| 14039 | + (lshiftrt:SI (and:SI (match_dup 0) |
| 14040 | + (const_int 16711680)) |
| 14041 | + (const_int 8))) |
| 14042 | + (ior:SI (ashift:SI (and:SI (match_dup 0) |
| 14043 | + (const_int 65280)) |
| 14044 | + (const_int 8)) |
| 14045 | + (ashift:SI (and:SI (match_dup 0) |
| 14046 | + (const_int 255)) |
| 14047 | + (const_int 24)))))] |
| 14048 | + "" |
| 14049 | + |
| 14050 | + [(set_attr "length" "4,4,4") |
| 14051 | + (set_attr "type" "alu,store,load_rm")] |
| 14052 | + ) |
| 14053 | + |
| 14054 | + |
| 14055 | +;;============================================================================= |
| 14056 | +;; blockage |
| 14057 | +;;----------------------------------------------------------------------------- |
| 14058 | +;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and |
| 14059 | +;; all of memory. This blocks insns from being moved across this point. |
| 14060 | + |
| 14061 | +(define_insn "blockage" |
| 14062 | + [(unspec_volatile [(const_int 0)] VUNSPEC_BLOCKAGE)] |
| 14063 | + "" |
| 14064 | + "" |
| 14065 | + [(set_attr "length" "0")] |
| 14066 | +) |
| 14067 | + |
| 14068 | +;;============================================================================= |
| 14069 | +;; clzsi2 |
| 14070 | +;;----------------------------------------------------------------------------- |
| 14071 | +(define_insn "clzsi2" |
| 14072 | + [ (set (match_operand:SI 0 "register_operand" "=r") |
| 14073 | + (clz:SI (match_operand:SI 1 "register_operand" "r"))) ] |
| 14074 | + "" |
| 14075 | + "clz %0, %1" |
| 14076 | + [(set_attr "length" "4") |
| 14077 | + (set_attr "cc" "set_z")] |
| 14078 | + ) |
| 14079 | + |
| 14080 | +;;============================================================================= |
| 14081 | +;; ctzsi2 |
| 14082 | +;;----------------------------------------------------------------------------- |
| 14083 | +(define_insn "ctzsi2" |
| 14084 | + [ (set (match_operand:SI 0 "register_operand" "=r,r") |
| 14085 | + (ctz:SI (match_operand:SI 1 "register_operand" "0,r"))) ] |
| 14086 | + "" |
| 14087 | + "@ |
| 14088 | + brev\t%0\;clz\t%0, %0 |
| 14089 | + mov\t%0, %1\;brev\t%0\;clz\t%0, %0" |
| 14090 | + [(set_attr "length" "8") |
| 14091 | + (set_attr "cc" "set_z")] |
| 14092 | + ) |
| 14093 | + |
| 14094 | +;;============================================================================= |
| 14095 | +;; cache instructions |
| 14096 | +;;----------------------------------------------------------------------------- |
| 14097 | +(define_insn "cache" |
| 14098 | + [ (unspec_volatile [(match_operand:SI 0 "register_operand" "r") |
| 14099 | + (match_operand:SI 1 "immediate_operand" "Ku05")] VUNSPEC_CACHE)] |
| 14100 | + "" |
| 14101 | + "cache %0[0], %1" |
| 14102 | + [(set_attr "length" "4")] |
| 14103 | + ) |
| 14104 | + |
| 14105 | +(define_insn "sync" |
| 14106 | + [ (unspec_volatile [(match_operand:SI 0 "immediate_operand" "Ku08")] VUNSPEC_SYNC)] |
| 14107 | + "" |
| 14108 | + "sync %0" |
| 14109 | + [(set_attr "length" "4")] |
| 14110 | + ) |
| 14111 | + |
| 14112 | +;;============================================================================= |
| 14113 | +;; TLB instructions |
| 14114 | +;;----------------------------------------------------------------------------- |
| 14115 | +(define_insn "tlbr" |
| 14116 | + [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBR)] |
| 14117 | + "" |
| 14118 | + "tlbr" |
| 14119 | + [(set_attr "length" "2")] |
| 14120 | + ) |
| 14121 | + |
| 14122 | +(define_insn "tlbw" |
| 14123 | + [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBW)] |
| 14124 | + "" |
| 14125 | + "tlbw" |
| 14126 | + [(set_attr "length" "2")] |
| 14127 | + ) |
| 14128 | + |
| 14129 | +(define_insn "tlbs" |
| 14130 | + [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBS)] |
| 14131 | + "" |
| 14132 | + "tlbs" |
| 14133 | + [(set_attr "length" "2")] |
| 14134 | + ) |
| 14135 | + |
| 14136 | +;;============================================================================= |
| 14137 | +;; Breakpoint instruction |
| 14138 | +;;----------------------------------------------------------------------------- |
| 14139 | +(define_insn "breakpoint" |
| 14140 | + [ (unspec_volatile [(const_int 0)] VUNSPEC_BREAKPOINT)] |
| 14141 | + "" |
| 14142 | + "breakpoint" |
| 14143 | + [(set_attr "length" "2")] |
| 14144 | + ) |
| 14145 | + |
| 14146 | +;;============================================================================= |
| 14147 | +;; Xchg instruction |
| 14148 | +;;----------------------------------------------------------------------------- |
| 14149 | +(define_insn "xchg" |
| 14150 | + [ (parallel [(set (match_operand:SI 0 "register_operand" "=&r") |
| 14151 | + (mem:SI (match_operand:SI 1 "register_operand" "r"))) |
| 14152 | + (set (mem:SI (match_operand:SI 2 "register_operand" "=1")) |
| 14153 | + (match_operand:SI 3 "register_operand" "r"))])] |
| 14154 | + "" |
| 14155 | + "xchg\t%0, %1, %3" |
| 14156 | + [(set_attr "length" "4")] |
| 14157 | + ) |
| 14158 | + |
| 14159 | +;;============================================================================= |
| 14160 | +;; mtsr/mfsr instruction |
| 14161 | +;;----------------------------------------------------------------------------- |
| 14162 | +(define_insn "mtsr" |
| 14163 | + [ (unspec_volatile [(match_operand 0 "immediate_operand" "i") |
| 14164 | + (match_operand:SI 1 "register_operand" "r")] VUNSPEC_MTSR)] |
| 14165 | + "" |
| 14166 | + "mtsr\t%0, %1" |
| 14167 | + [(set_attr "length" "4")] |
| 14168 | + ) |
| 14169 | + |
| 14170 | +(define_insn "mfsr" |
| 14171 | + [ (set (match_operand:SI 0 "register_operand" "=r") |
| 14172 | + (unspec_volatile:SI [(match_operand 1 "immediate_operand" "i")] VUNSPEC_MFSR)) ] |
| 14173 | + "" |
| 14174 | + "mfsr\t%0, %1" |
| 14175 | + [(set_attr "length" "4")] |
| 14176 | + ) |
| 14177 | + |
| 14178 | +;;============================================================================= |
| 14179 | +;; mtdr/mfdr instruction |
| 14180 | +;;----------------------------------------------------------------------------- |
| 14181 | +(define_insn "mtdr" |
| 14182 | + [ (unspec_volatile [(match_operand 0 "immediate_operand" "i") |
| 14183 | + (match_operand:SI 1 "register_operand" "r")] VUNSPEC_MTDR)] |
| 14184 | + "" |
| 14185 | + "mtdr\t%0, %1" |
| 14186 | + [(set_attr "length" "4")] |
| 14187 | + ) |
| 14188 | + |
| 14189 | +(define_insn "mfdr" |
| 14190 | + [ (set (match_operand:SI 0 "register_operand" "=r") |
| 14191 | + (unspec_volatile:SI [(match_operand 1 "immediate_operand" "i")] VUNSPEC_MFDR)) ] |
| 14192 | + "" |
| 14193 | + "mfdr\t%0, %1" |
| 14194 | + [(set_attr "length" "4")] |
| 14195 | + ) |
| 14196 | + |
| 14197 | +;;============================================================================= |
| 14198 | +;; musfr |
| 14199 | +;;----------------------------------------------------------------------------- |
| 14200 | +(define_insn "musfr" |
| 14201 | + [ (unspec_volatile [(match_operand:SI 0 "register_operand" "r")] VUNSPEC_MUSFR)] |
| 14202 | + "" |
| 14203 | + "musfr\t%0" |
| 14204 | + [(set_attr "length" "2") |
| 14205 | + (set_attr "cc" "clobber")] |
| 14206 | + ) |
| 14207 | + |
| 14208 | +(define_insn "mustr" |
| 14209 | + [ (set (match_operand:SI 0 "register_operand" "=r") |
| 14210 | + (unspec_volatile:SI [(const_int 0)] VUNSPEC_MUSTR)) ] |
| 14211 | + "" |
| 14212 | + "mustr\t%0" |
| 14213 | + [(set_attr "length" "2")] |
| 14214 | + ) |
| 14215 | + |
| 14216 | +;;============================================================================= |
| 14217 | +;; Saturation Round Scale instruction |
| 14218 | +;;----------------------------------------------------------------------------- |
| 14219 | +(define_insn "sats" |
| 14220 | + [ (set (match_operand:SI 0 "register_operand" "+r") |
| 14221 | + (unspec:SI [(match_dup 0) |
| 14222 | + (match_operand 1 "immediate_operand" "Ku05") |
| 14223 | + (match_operand 2 "immediate_operand" "Ku05")] |
| 14224 | + UNSPEC_SATS)) ] |
| 14225 | + "TARGET_DSP" |
| 14226 | + "sats\t%0 >> %1, %2" |
| 14227 | + [(set_attr "type" "alu_sat") |
| 14228 | + (set_attr "length" "4")] |
| 14229 | + ) |
| 14230 | + |
| 14231 | +(define_insn "satu" |
| 14232 | + [ (set (match_operand:SI 0 "register_operand" "+r") |
| 14233 | + (unspec:SI [(match_dup 0) |
| 14234 | + (match_operand 1 "immediate_operand" "Ku05") |
| 14235 | + (match_operand 2 "immediate_operand" "Ku05")] |
| 14236 | + UNSPEC_SATU)) ] |
| 14237 | + "TARGET_DSP" |
| 14238 | + "satu\t%0 >> %1, %2" |
| 14239 | + [(set_attr "type" "alu_sat") |
| 14240 | + (set_attr "length" "4")] |
| 14241 | + ) |
| 14242 | + |
| 14243 | +(define_insn "satrnds" |
| 14244 | + [ (set (match_operand:SI 0 "register_operand" "+r") |
| 14245 | + (unspec:SI [(match_dup 0) |
| 14246 | + (match_operand 1 "immediate_operand" "Ku05") |
| 14247 | + (match_operand 2 "immediate_operand" "Ku05")] |
| 14248 | + UNSPEC_SATRNDS)) ] |
| 14249 | + "TARGET_DSP" |
| 14250 | + "satrnds\t%0 >> %1, %2" |
| 14251 | + [(set_attr "type" "alu_sat") |
| 14252 | + (set_attr "length" "4")] |
| 14253 | + ) |
| 14254 | + |
| 14255 | +(define_insn "satrndu" |
| 14256 | + [ (set (match_operand:SI 0 "register_operand" "+r") |
| 14257 | + (unspec:SI [(match_dup 0) |
| 14258 | + (match_operand 1 "immediate_operand" "Ku05") |
| 14259 | + (match_operand 2 "immediate_operand" "Ku05")] |
| 14260 | + UNSPEC_SATRNDU)) ] |
| 14261 | + "TARGET_DSP" |
| 14262 | + "sats\t%0 >> %1, %2" |
| 14263 | + [(set_attr "type" "alu_sat") |
| 14264 | + (set_attr "length" "4")] |
| 14265 | + ) |
| 14266 | + |
| 14267 | +;; Special patterns for dealing with the constant pool |
| 14268 | + |
| 14269 | +(define_insn "align_4" |
| 14270 | + [(unspec_volatile [(const_int 0)] VUNSPEC_ALIGN)] |
| 14271 | + "" |
| 14272 | + { |
| 14273 | + assemble_align (32); |
| 14274 | + return ""; |
| 14275 | + } |
| 14276 | + [(set_attr "length" "2")] |
| 14277 | +) |
| 14278 | + |
| 14279 | +(define_insn "consttable_start" |
| 14280 | + [(unspec_volatile [(const_int 0)] VUNSPEC_POOL_START)] |
| 14281 | + "" |
| 14282 | + { |
| 14283 | + return ".cpool"; |
| 14284 | + } |
| 14285 | + [(set_attr "length" "0")] |
| 14286 | + ) |
| 14287 | + |
| 14288 | +(define_insn "consttable_end" |
| 14289 | + [(unspec_volatile [(const_int 0)] VUNSPEC_POOL_END)] |
| 14290 | + "" |
| 14291 | + { |
| 14292 | + making_const_table = FALSE; |
| 14293 | + return ""; |
| 14294 | + } |
| 14295 | + [(set_attr "length" "0")] |
| 14296 | +) |
| 14297 | + |
| 14298 | + |
| 14299 | +(define_insn "consttable_4" |
| 14300 | + [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_4)] |
| 14301 | + "" |
| 14302 | + { |
| 14303 | + making_const_table = TRUE; |
| 14304 | + switch (GET_MODE_CLASS (GET_MODE (operands[0]))) |
| 14305 | + { |
| 14306 | + case MODE_FLOAT: |
| 14307 | + { |
| 14308 | + REAL_VALUE_TYPE r; |
| 14309 | + char real_string[1024]; |
| 14310 | + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[0]); |
| 14311 | + real_to_decimal(real_string, &r, 1024, 0, 1); |
| 14312 | + asm_fprintf (asm_out_file, "\t.float\t%s\n", real_string); |
| 14313 | + break; |
| 14314 | + } |
| 14315 | + default: |
| 14316 | + assemble_integer (operands[0], 4, 0, 1); |
| 14317 | + break; |
| 14318 | + } |
| 14319 | + return ""; |
| 14320 | + } |
| 14321 | + [(set_attr "length" "4")] |
| 14322 | +) |
| 14323 | + |
| 14324 | +(define_insn "consttable_8" |
| 14325 | + [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_8)] |
| 14326 | + "" |
| 14327 | + { |
| 14328 | + making_const_table = TRUE; |
| 14329 | + switch (GET_MODE_CLASS (GET_MODE (operands[0]))) |
| 14330 | + { |
| 14331 | + case MODE_FLOAT: |
| 14332 | + { |
| 14333 | + REAL_VALUE_TYPE r; |
| 14334 | + char real_string[1024]; |
| 14335 | + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[0]); |
| 14336 | + real_to_decimal(real_string, &r, 1024, 0, 1); |
| 14337 | + asm_fprintf (asm_out_file, "\t.double\t%s\n", real_string); |
| 14338 | + break; |
| 14339 | + } |
| 14340 | + default: |
| 14341 | + assemble_integer(operands[0], 8, 0, 1); |
| 14342 | + break; |
| 14343 | + } |
| 14344 | + return ""; |
| 14345 | + } |
| 14346 | + [(set_attr "length" "8")] |
| 14347 | +) |
| 14348 | + |
| 14349 | +;;============================================================================= |
| 14350 | +;; coprocessor instructions |
| 14351 | +;;----------------------------------------------------------------------------- |
| 14352 | +(define_insn "cop" |
| 14353 | + [ (unspec_volatile [(match_operand 0 "immediate_operand" "Ku03") |
| 14354 | + (match_operand 1 "immediate_operand" "Ku04") |
| 14355 | + (match_operand 2 "immediate_operand" "Ku04") |
| 14356 | + (match_operand 3 "immediate_operand" "Ku04") |
| 14357 | + (match_operand 4 "immediate_operand" "Ku07")] VUNSPEC_COP)] |
| 14358 | + "" |
| 14359 | + "cop\tcp%0, cr%1, cr%2, cr%3, %4" |
| 14360 | + [(set_attr "length" "4")] |
| 14361 | + ) |
| 14362 | + |
| 14363 | +(define_insn "mvcrsi" |
| 14364 | + [ (set (match_operand:SI 0 "avr32_cop_move_operand" "=r,<,Z") |
| 14365 | + (unspec_volatile:SI [(match_operand 1 "immediate_operand" "Ku03,Ku03,Ku03") |
| 14366 | + (match_operand 2 "immediate_operand" "Ku04,Ku04,Ku04")] |
| 14367 | + VUNSPEC_MVCR)) ] |
| 14368 | + "" |
| 14369 | + "@ |
| 14370 | + mvcr.w\tcp%1, %0, cr%2 |
| 14371 | + stcm.w\tcp%1, %0, cr%2 |
| 14372 | + stc.w\tcp%1, %0, cr%2" |
| 14373 | + [(set_attr "length" "4")] |
| 14374 | + ) |
| 14375 | + |
| 14376 | +(define_insn "mvcrdi" |
| 14377 | + [ (set (match_operand:DI 0 "avr32_cop_move_operand" "=r,<,Z") |
| 14378 | + (unspec_volatile:DI [(match_operand 1 "immediate_operand" "Ku03,Ku03,Ku03") |
| 14379 | + (match_operand 2 "immediate_operand" "Ku04,Ku04,Ku04")] |
| 14380 | + VUNSPEC_MVCR)) ] |
| 14381 | + "" |
| 14382 | + "@ |
| 14383 | + mvcr.d\tcp%1, %0, cr%2 |
| 14384 | + stcm.d\tcp%1, %0, cr%2-cr%i2 |
| 14385 | + stc.d\tcp%1, %0, cr%2" |
| 14386 | + [(set_attr "length" "4")] |
| 14387 | + ) |
| 14388 | + |
| 14389 | +(define_insn "mvrcsi" |
| 14390 | + [ (unspec_volatile:SI [(match_operand 0 "immediate_operand" "Ku03,Ku03,Ku03") |
| 14391 | + (match_operand 1 "immediate_operand" "Ku04,Ku04,Ku04") |
| 14392 | + (match_operand:SI 2 "avr32_cop_move_operand" "r,>,Z")] |
| 14393 | + VUNSPEC_MVRC)] |
| 14394 | + "" |
| 14395 | + { |
| 14396 | + switch (which_alternative){ |
| 14397 | + case 0: |
| 14398 | + return "mvrc.w\tcp%0, cr%1, %2"; |
| 14399 | + case 1: |
| 14400 | + return "ldcm.w\tcp%0, %2, cr%1"; |
| 14401 | + case 2: |
| 14402 | + return "ldc.w\tcp%0, cr%1, %2"; |
| 14403 | + default: |
| 14404 | + abort(); |
| 14405 | + } |
| 14406 | + } |
| 14407 | + [(set_attr "length" "4")] |
| 14408 | + ) |
| 14409 | + |
| 14410 | +(define_insn "mvrcdi" |
| 14411 | + [ (unspec_volatile:DI [(match_operand 0 "immediate_operand" "Ku03,Ku03,Ku03") |
| 14412 | + (match_operand 1 "immediate_operand" "Ku04,Ku04,Ku04") |
| 14413 | + (match_operand:DI 2 "avr32_cop_move_operand" "r,>,Z")] |
| 14414 | + VUNSPEC_MVRC)] |
| 14415 | + "" |
| 14416 | + { |
| 14417 | + switch (which_alternative){ |
| 14418 | + case 0: |
| 14419 | + return "mvrc.d\tcp%0, cr%1, %2"; |
| 14420 | + case 1: |
| 14421 | + return "ldcm.d\tcp%0, %2, cr%1-cr%i1"; |
| 14422 | + case 2: |
| 14423 | + return "ldc.d\tcp%0, cr%1, %2"; |
| 14424 | + default: |
| 14425 | + abort(); |
| 14426 | + } |
| 14427 | + } |
| 14428 | + [(set_attr "length" "4")] |
| 14429 | + ) |
| 14430 | + |
| 14431 | +;;============================================================================= |
| 14432 | +;; epilogue |
| 14433 | +;;----------------------------------------------------------------------------- |
| 14434 | +;; This pattern emits RTL for exit from a function. The function exit is |
| 14435 | +;; responsible for deallocating the stack frame, restoring callee saved |
| 14436 | +;; registers and emitting the return instruction. |
| 14437 | +;; ToDo: using TARGET_ASM_FUNCTION_PROLOGUE instead. |
| 14438 | +;;============================================================================= |
| 14439 | +(define_expand "epilogue" |
| 14440 | + [(unspec_volatile [(return)] VUNSPEC_EPILOGUE)] |
| 14441 | + "" |
| 14442 | + " |
| 14443 | + if (USE_RETURN_INSN (FALSE)){ |
| 14444 | + emit_jump_insn (gen_return ()); |
| 14445 | + DONE; |
| 14446 | + } |
| 14447 | + emit_jump_insn (gen_rtx_UNSPEC_VOLATILE (VOIDmode, |
| 14448 | + gen_rtvec (1, |
| 14449 | + gen_rtx_RETURN (VOIDmode)), |
| 14450 | + VUNSPEC_EPILOGUE)); |
| 14451 | + DONE; |
| 14452 | + " |
| 14453 | + ) |
| 14454 | + |
| 14455 | +(define_insn "*epilogue_insns" |
| 14456 | + [(unspec_volatile [(return)] VUNSPEC_EPILOGUE)] |
| 14457 | + "" |
| 14458 | + { |
| 14459 | + avr32_output_return_instruction (FALSE, FALSE, NULL, NULL); |
| 14460 | + return ""; |
| 14461 | + } |
| 14462 | + ; Length is absolute worst case |
| 14463 | + [(set_attr "type" "branch") |
| 14464 | + (set_attr "length" "12")] |
| 14465 | + ) |
| 14466 | + |
| 14467 | +(define_insn "*epilogue_insns_ret_imm" |
| 14468 | + [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i")) |
| 14469 | + (use (reg RETVAL_REGNUM)) |
| 14470 | + (unspec_volatile [(return)] VUNSPEC_EPILOGUE)])] |
| 14471 | + "((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))" |
| 14472 | + { |
| 14473 | + avr32_output_return_instruction (FALSE, FALSE, NULL, operands[0]); |
| 14474 | + return ""; |
| 14475 | + } |
| 14476 | + ; Length is absolute worst case |
| 14477 | + [(set_attr "type" "branch") |
| 14478 | + (set_attr "length" "12")] |
| 14479 | + ) |
| 14480 | + |
| 14481 | +(define_insn "sibcall_epilogue" |
| 14482 | + [(unspec_volatile [(const_int 0)] VUNSPEC_EPILOGUE)] |
| 14483 | + "" |
| 14484 | + { |
| 14485 | + avr32_output_return_instruction (FALSE, FALSE, NULL, NULL); |
| 14486 | + return ""; |
| 14487 | + } |
| 14488 | +;; Length is absolute worst case |
| 14489 | + [(set_attr "type" "branch") |
| 14490 | + (set_attr "length" "12")] |
| 14491 | + ) |
| 14492 | + |
| 14493 | +(define_insn "*sibcall_epilogue_insns_ret_imm" |
| 14494 | + [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i")) |
| 14495 | + (use (reg RETVAL_REGNUM)) |
| 14496 | + (unspec_volatile [(const_int 0)] VUNSPEC_EPILOGUE)])] |
| 14497 | + "((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))" |
| 14498 | + { |
| 14499 | + avr32_output_return_instruction (FALSE, FALSE, NULL, operands[0]); |
| 14500 | + return ""; |
| 14501 | + } |
| 14502 | + ; Length is absolute worst case |
| 14503 | + [(set_attr "type" "branch") |
| 14504 | + (set_attr "length" "12")] |
| 14505 | + ) |
| 14506 | + |
| 14507 | +(define_insn "ldxi" |
| 14508 | + [(set (match_operand:SI 0 "register_operand" "=r") |
| 14509 | + (mem:SI (plus:SI |
| 14510 | + (match_operand:SI 1 "register_operand" "r") |
| 14511 | + (mult:SI (zero_extract:SI (match_operand:SI 2 "register_operand" "r") |
| 14512 | + (const_int 8) |
| 14513 | + (match_operand:SI 3 "immediate_operand" "Ku05")) |
| 14514 | + (const_int 4)))))] |
| 14515 | + "(INTVAL(operands[3]) == 24 || INTVAL(operands[3]) == 16 || INTVAL(operands[3]) == 8 |
| 14516 | + || INTVAL(operands[3]) == 0)" |
| 14517 | + { |
| 14518 | + switch ( INTVAL(operands[3]) ){ |
| 14519 | + case 0: |
| 14520 | + return "ld.w %0, %1[%2:b << 2]"; |
| 14521 | + case 8: |
| 14522 | + return "ld.w %0, %1[%2:l << 2]"; |
| 14523 | + case 16: |
| 14524 | + return "ld.w %0, %1[%2:u << 2]"; |
| 14525 | + case 24: |
| 14526 | + return "ld.w %0, %1[%2:t << 2]"; |
| 14527 | + default: |
| 14528 | + internal_error("illegal operand for ldxi"); |
| 14529 | + } |
| 14530 | + } |
| 14531 | + [(set_attr "type" "load") |
| 14532 | + (set_attr "length" "4") |
| 14533 | + (set_attr "cc" "none")]) |
| 14534 | + |
| 14535 | + |
| 14536 | + |
| 14537 | + |
| 14538 | + |
| 14539 | + |
| 14540 | +;;============================================================================= |
| 14541 | +;; Peephole optimizing |
| 14542 | +;;----------------------------------------------------------------------------- |
| 14543 | +;; Changing |
| 14544 | +;; sub r8, r7, 8 |
| 14545 | +;; st.w r8[0x0], r12 |
| 14546 | +;; to |
| 14547 | +;; sub r8, r7, 8 |
| 14548 | +;; st.w r7[-0x8], r12 |
| 14549 | +;;============================================================================= |
| 14550 | +; (set (reg:SI 9 r8) |
| 14551 | +; (plus:SI (reg/f:SI 6 r7) |
| 14552 | +; (const_int ...))) |
| 14553 | +; (set (mem:SI (reg:SI 9 r8)) |
| 14554 | +; (reg:SI 12 r12)) |
| 14555 | +(define_peephole2 |
| 14556 | + [(set (match_operand:SI 0 "register_operand" "") |
| 14557 | + (plus:SI (match_operand:SI 1 "register_operand" "") |
| 14558 | + (match_operand:SI 2 "immediate_operand" ""))) |
| 14559 | + (set (mem:SI (match_dup 0)) |
| 14560 | + (match_operand:SI 3 "register_operand" ""))] |
| 14561 | + "REGNO(operands[0]) != REGNO(operands[1]) && avr32_const_ok_for_constraint_p(INTVAL(operands[2]), 'K', \"Ks16\")" |
| 14562 | + [(set (match_dup 0) |
| 14563 | + (plus:SI (match_dup 1) |
| 14564 | + (match_dup 2))) |
| 14565 | + (set (mem:SI (plus:SI (match_dup 1) |
| 14566 | + (match_dup 2))) |
| 14567 | + (match_dup 3))] |
| 14568 | + "") |
| 14569 | + |
| 14570 | +;;============================================================================= |
| 14571 | +;; Peephole optimizing |
| 14572 | +;;----------------------------------------------------------------------------- |
| 14573 | +;; Changing |
| 14574 | +;; sub r6, r7, 4 |
| 14575 | +;; ld.w r6, r6[0x0] |
| 14576 | +;; to |
| 14577 | +;; sub r6, r7, 4 |
| 14578 | +;; ld.w r6, r7[-0x4] |
| 14579 | +;;============================================================================= |
| 14580 | +; (set (reg:SI 7 r6) |
| 14581 | +; (plus:SI (reg/f:SI 6 r7) |
| 14582 | +; (const_int -4 [0xfffffffc]))) |
| 14583 | +; (set (reg:SI 7 r6) |
| 14584 | +; (mem:SI (reg:SI 7 r6))) |
| 14585 | +(define_peephole2 |
| 14586 | + [(set (match_operand:SI 0 "register_operand" "") |
| 14587 | + (plus:SI (match_operand:SI 1 "register_operand" "") |
| 14588 | + (match_operand:SI 2 "immediate_operand" ""))) |
| 14589 | + (set (match_operand:SI 3 "register_operand" "") |
| 14590 | + (mem:SI (match_dup 0)))] |
| 14591 | + "REGNO(operands[0]) != REGNO(operands[1]) && avr32_const_ok_for_constraint_p(INTVAL(operands[2]), 'K', \"Ks16\")" |
| 14592 | + [(set (match_dup 0) |
| 14593 | + (plus:SI (match_dup 1) |
| 14594 | + (match_dup 2))) |
| 14595 | + (set (match_dup 3) |
| 14596 | + (mem:SI (plus:SI (match_dup 1) |
| 14597 | + (match_dup 2))))] |
| 14598 | + "") |
| 14599 | + |
| 14600 | +;;============================================================================= |
| 14601 | +;; Peephole optimizing |
| 14602 | +;;----------------------------------------------------------------------------- |
| 14603 | +;; Changing |
| 14604 | +;; ld.sb r0, r7[-0x6] |
| 14605 | +;; cashs.b r0 |
| 14606 | +;; to |
| 14607 | +;; ld.sb r0, r7[-0x6] |
| 14608 | +;;============================================================================= |
| 14609 | +(define_peephole2 |
| 14610 | + [(set (match_operand:QI 0 "register_operand" "") |
| 14611 | + (match_operand:QI 1 "load_sb_memory_operand" "")) |
| 14612 | + (set (match_operand:SI 2 "register_operand" "") |
| 14613 | + (sign_extend:SI (match_dup 0)))] |
| 14614 | + "(REGNO(operands[0]) == REGNO(operands[2]) || peep2_reg_dead_p(2, operands[0]))" |
| 14615 | + [(set (match_dup 2) |
| 14616 | + (sign_extend:SI (match_dup 1)))] |
| 14617 | + "") |
| 14618 | + |
| 14619 | +;;============================================================================= |
| 14620 | +;; Peephole optimizing |
| 14621 | +;;----------------------------------------------------------------------------- |
| 14622 | +;; Changing |
| 14623 | +;; ld.ub r0, r7[-0x6] |
| 14624 | +;; cashu.b r0 |
| 14625 | +;; to |
| 14626 | +;; ld.ub r0, r7[-0x6] |
| 14627 | +;;============================================================================= |
| 14628 | +(define_peephole2 |
| 14629 | + [(set (match_operand:QI 0 "register_operand" "") |
| 14630 | + (match_operand:QI 1 "memory_operand" "")) |
| 14631 | + (set (match_operand:SI 2 "register_operand" "") |
| 14632 | + (zero_extend:SI (match_dup 0)))] |
| 14633 | + "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])" |
| 14634 | + [(set (match_dup 2) |
| 14635 | + (zero_extend:SI (match_dup 1)))] |
| 14636 | + "") |
| 14637 | + |
| 14638 | +;;============================================================================= |
| 14639 | +;; Peephole optimizing |
| 14640 | +;;----------------------------------------------------------------------------- |
| 14641 | +;; Changing |
| 14642 | +;; ld.sh r0, r7[-0x6] |
| 14643 | +;; casts.h r0 |
| 14644 | +;; to |
| 14645 | +;; ld.sh r0, r7[-0x6] |
| 14646 | +;;============================================================================= |
| 14647 | +(define_peephole2 |
| 14648 | + [(set (match_operand:HI 0 "register_operand" "") |
| 14649 | + (match_operand:HI 1 "memory_operand" "")) |
| 14650 | + (set (match_operand:SI 2 "register_operand" "") |
| 14651 | + (sign_extend:SI (match_dup 0)))] |
| 14652 | + "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])" |
| 14653 | + [(set (match_dup 2) |
| 14654 | + (sign_extend:SI (match_dup 1)))] |
| 14655 | + "") |
| 14656 | + |
| 14657 | +;;============================================================================= |
| 14658 | +;; Peephole optimizing |
| 14659 | +;;----------------------------------------------------------------------------- |
| 14660 | +;; Changing |
| 14661 | +;; ld.uh r0, r7[-0x6] |
| 14662 | +;; castu.h r0 |
| 14663 | +;; to |
| 14664 | +;; ld.uh r0, r7[-0x6] |
| 14665 | +;;============================================================================= |
| 14666 | +(define_peephole2 |
| 14667 | + [(set (match_operand:HI 0 "register_operand" "") |
| 14668 | + (match_operand:HI 1 "memory_operand" "")) |
| 14669 | + (set (match_operand:SI 2 "register_operand" "") |
| 14670 | + (zero_extend:SI (match_dup 0)))] |
| 14671 | + "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])" |
| 14672 | + [(set (match_dup 2) |
| 14673 | + (zero_extend:SI (match_dup 1)))] |
| 14674 | + "") |
| 14675 | + |
| 14676 | +;;============================================================================= |
| 14677 | +;; Peephole optimizing |
| 14678 | +;;----------------------------------------------------------------------------- |
| 14679 | +;; Changing |
| 14680 | +;; mul rd, rx, ry |
| 14681 | +;; add rd2, rd |
| 14682 | +;; to |
| 14683 | +;; mac rd2, rx, ry |
| 14684 | +;;============================================================================= |
| 14685 | +(define_peephole2 |
| 14686 | + [(set (match_operand:SI 0 "register_operand" "") |
| 14687 | + (mult:SI (match_operand:SI 1 "register_operand" "") |
| 14688 | + (match_operand:SI 2 "register_operand" ""))) |
| 14689 | + (set (match_operand:SI 3 "register_operand" "") |
| 14690 | + (plus:SI (match_dup 3) |
| 14691 | + (match_dup 0)))] |
| 14692 | + "peep2_reg_dead_p(2, operands[0])" |
| 14693 | + [(set (match_dup 3) |
| 14694 | + (plus:SI (mult:SI (match_dup 1) |
| 14695 | + (match_dup 2)) |
| 14696 | + (match_dup 3)))] |
| 14697 | + "") |
| 14698 | + |
| 14699 | + |
| 14700 | + |
| 14701 | +;;============================================================================= |
| 14702 | +;; Peephole optimizing |
| 14703 | +;;----------------------------------------------------------------------------- |
| 14704 | +;; Changing |
| 14705 | +;; bfextu rd, rs, k5, 1 or and(h/l) rd, one_bit_set_mask |
| 14706 | +;; to |
| 14707 | +;; bld rs, k5 |
| 14708 | +;; |
| 14709 | +;; If rd is dead after the operation. |
| 14710 | +;;============================================================================= |
| 14711 | +(define_peephole2 |
| 14712 | + [ (set (match_operand:SI 0 "register_operand" "") |
| 14713 | + (zero_extract:SI (match_operand:SI 1 "register_operand" "") |
| 14714 | + (const_int 1) |
| 14715 | + (match_operand:SI 2 "immediate_operand" ""))) |
| 14716 | + (set (cc0) |
| 14717 | + (match_dup 0))] |
| 14718 | + "peep2_reg_dead_p(2, operands[0])" |
| 14719 | + [(set (cc0) |
| 14720 | + (and:SI (match_dup 1) |
| 14721 | + (match_dup 2)))] |
| 14722 | + "operands[2] = GEN_INT(1 << INTVAL(operands[2]));") |
| 14723 | + |
| 14724 | +(define_peephole2 |
| 14725 | + [ (set (match_operand:SI 0 "register_operand" "") |
| 14726 | + (and:SI (match_operand:SI 1 "register_operand" "") |
| 14727 | + (match_operand:SI 2 "one_bit_set_operand" ""))) |
| 14728 | + (set (cc0) |
| 14729 | + (match_dup 0))] |
| 14730 | + "peep2_reg_dead_p(2, operands[0])" |
| 14731 | + [(set (cc0) |
| 14732 | + (and:SI (match_dup 1) |
| 14733 | + (match_dup 2)))] |
| 14734 | + "") |
| 14735 | + |
| 14736 | +;;============================================================================= |
| 14737 | +;; Peephole optimizing |
| 14738 | +;;----------------------------------------------------------------------------- |
| 14739 | +;; Load with extracted index: ld.w Rd, Rb[Ri:{t/u/b/l} << 2] |
| 14740 | +;; |
| 14741 | +;;============================================================================= |
| 14742 | + |
| 14743 | + |
| 14744 | +(define_peephole |
| 14745 | + [(set (match_operand:SI 0 "register_operand" "") |
| 14746 | + (zero_extract:SI (match_operand:SI 1 "register_operand" "") |
| 14747 | + (const_int 8) |
| 14748 | + (match_operand:SI 2 "avr32_extract_shift_operand" ""))) |
| 14749 | + (set (match_operand:SI 3 "register_operand" "") |
| 14750 | + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4)) |
| 14751 | + (match_operand:SI 4 "register_operand" ""))))] |
| 14752 | + |
| 14753 | + "(dead_or_set_p(insn, operands[0]))" |
| 14754 | + { |
| 14755 | + switch ( INTVAL(operands[2]) ){ |
| 14756 | + case 0: |
| 14757 | + return "ld.w %3, %4[%1:b << 2]"; |
| 14758 | + case 8: |
| 14759 | + return "ld.w %3, %4[%1:l << 2]"; |
| 14760 | + case 16: |
| 14761 | + return "ld.w %3, %4[%1:u << 2]"; |
| 14762 | + case 24: |
| 14763 | + return "ld.w %3, %4[%1:t << 2]"; |
| 14764 | + default: |
| 14765 | + internal_error("illegal operand for ldxi"); |
| 14766 | + } |
| 14767 | + } |
| 14768 | + [(set_attr "type" "load") |
| 14769 | + (set_attr "length" "4") |
| 14770 | + (set_attr "cc" "clobber")] |
| 14771 | + ) |
| 14772 | + |
| 14773 | + |
| 14774 | + |
| 14775 | +(define_peephole |
| 14776 | + [(set (match_operand:SI 0 "register_operand" "") |
| 14777 | + (and:SI (match_operand:SI 1 "register_operand" "") (const_int 255))) |
| 14778 | + (set (match_operand:SI 2 "register_operand" "") |
| 14779 | + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4)) |
| 14780 | + (match_operand:SI 3 "register_operand" ""))))] |
| 14781 | + |
| 14782 | + "(dead_or_set_p(insn, operands[0]))" |
| 14783 | + |
| 14784 | + "ld.w %2, %3[%1:b << 2]" |
| 14785 | + [(set_attr "type" "load") |
| 14786 | + (set_attr "length" "4") |
| 14787 | + (set_attr "cc" "clobber")] |
| 14788 | + ) |
| 14789 | + |
| 14790 | + |
| 14791 | +(define_peephole2 |
| 14792 | + [(set (match_operand:SI 0 "register_operand" "") |
| 14793 | + (zero_extract:SI (match_operand:SI 1 "register_operand" "") |
| 14794 | + (const_int 8) |
| 14795 | + (match_operand:SI 2 "avr32_extract_shift_operand" ""))) |
| 14796 | + (set (match_operand:SI 3 "register_operand" "") |
| 14797 | + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4)) |
| 14798 | + (match_operand:SI 4 "register_operand" ""))))] |
| 14799 | + |
| 14800 | + "(peep2_reg_dead_p(2, operands[0])) |
| 14801 | + || (REGNO(operands[0]) == REGNO(operands[3]))" |
| 14802 | + [(set (match_dup 3) |
| 14803 | + (mem:SI (plus:SI |
| 14804 | + (match_dup 4) |
| 14805 | + (mult:SI (zero_extract:SI (match_dup 1) |
| 14806 | + (const_int 8) |
| 14807 | + (match_dup 2)) |
| 14808 | + (const_int 4)))))] |
| 14809 | + ) |
| 14810 | + |
| 14811 | +(define_peephole2 |
| 14812 | + [(set (match_operand:SI 0 "register_operand" "") |
| 14813 | + (zero_extend:SI (match_operand:QI 1 "register_operand" ""))) |
| 14814 | + (set (match_operand:SI 2 "register_operand" "") |
| 14815 | + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4)) |
| 14816 | + (match_operand:SI 3 "register_operand" ""))))] |
| 14817 | + |
| 14818 | + "(peep2_reg_dead_p(2, operands[0])) |
| 14819 | + || (REGNO(operands[0]) == REGNO(operands[2]))" |
| 14820 | + [(set (match_dup 2) |
| 14821 | + (mem:SI (plus:SI |
| 14822 | + (match_dup 3) |
| 14823 | + (mult:SI (zero_extract:SI (match_dup 1) |
| 14824 | + (const_int 8) |
| 14825 | + (const_int 0)) |
| 14826 | + (const_int 4)))))] |
| 14827 | + "operands[1] = gen_rtx_REG(SImode, REGNO(operands[1]));" |
| 14828 | + ) |
| 14829 | + |
| 14830 | + |
| 14831 | +(define_peephole2 |
| 14832 | + [(set (match_operand:SI 0 "register_operand" "") |
| 14833 | + (and:SI (match_operand:SI 1 "register_operand" "") |
| 14834 | + (const_int 255))) |
| 14835 | + (set (match_operand:SI 2 "register_operand" "") |
| 14836 | + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4)) |
| 14837 | + (match_operand:SI 3 "register_operand" ""))))] |
| 14838 | + |
| 14839 | + "(peep2_reg_dead_p(2, operands[0])) |
| 14840 | + || (REGNO(operands[0]) == REGNO(operands[2]))" |
| 14841 | + [(set (match_dup 2) |
| 14842 | + (mem:SI (plus:SI |
| 14843 | + (match_dup 3) |
| 14844 | + (mult:SI (zero_extract:SI (match_dup 1) |
| 14845 | + (const_int 8) |
| 14846 | + (const_int 0)) |
| 14847 | + (const_int 4)))))] |
| 14848 | + "" |
| 14849 | + ) |
| 14850 | + |
| 14851 | + |
| 14852 | + |
| 14853 | +(define_peephole2 |
| 14854 | + [(set (match_operand:SI 0 "register_operand" "") |
| 14855 | + (lshiftrt:SI (match_operand:SI 1 "register_operand" "") |
| 14856 | + (const_int 24))) |
| 14857 | + (set (match_operand:SI 2 "register_operand" "") |
| 14858 | + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4)) |
| 14859 | + (match_operand:SI 3 "register_operand" ""))))] |
| 14860 | + |
| 14861 | + "(peep2_reg_dead_p(2, operands[0])) |
| 14862 | + || (REGNO(operands[0]) == REGNO(operands[2]))" |
| 14863 | + [(set (match_dup 2) |
| 14864 | + (mem:SI (plus:SI |
| 14865 | + (match_dup 3) |
| 14866 | + (mult:SI (zero_extract:SI (match_dup 1) |
| 14867 | + (const_int 8) |
| 14868 | + (const_int 24)) |
| 14869 | + (const_int 4)))))] |
| 14870 | + "" |
| 14871 | + ) |
| 14872 | + |
| 14873 | + |
| 14874 | +;;************************************************ |
| 14875 | +;; ANDN |
| 14876 | +;; |
| 14877 | +;;************************************************ |
| 14878 | + |
| 14879 | + |
| 14880 | +(define_peephole2 |
| 14881 | + [(set (match_operand:SI 0 "register_operand" "") |
| 14882 | + (not:SI (match_operand:SI 1 "register_operand" ""))) |
| 14883 | + (set (match_operand:SI 2 "register_operand" "") |
| 14884 | + (and:SI (match_dup 2) |
| 14885 | + (match_dup 0)))] |
| 14886 | + "peep2_reg_dead_p(2, operands[0])" |
| 14887 | + |
| 14888 | + [(set (match_dup 2) |
| 14889 | + (and:SI (match_dup 2) |
| 14890 | + (not:SI (match_dup 1)) |
| 14891 | + ))] |
| 14892 | + "" |
| 14893 | +) |
| 14894 | + |
| 14895 | +(define_peephole2 |
| 14896 | + [(set (match_operand:SI 0 "register_operand" "") |
| 14897 | + (not:SI (match_operand:SI 1 "register_operand" ""))) |
| 14898 | + (set (match_operand:SI 2 "register_operand" "") |
| 14899 | + (and:SI (match_dup 0) |
| 14900 | + (match_dup 2) |
| 14901 | + ))] |
| 14902 | + "peep2_reg_dead_p(2, operands[0])" |
| 14903 | + |
| 14904 | + [(set (match_dup 2) |
| 14905 | + (and:SI (match_dup 2) |
| 14906 | + (not:SI (match_dup 1)) |
| 14907 | + ))] |
| 14908 | + |
| 14909 | + "" |
| 14910 | +) |
| 14911 | + |
| 14912 | + |
| 14913 | +;;================================================================= |
| 14914 | +;; Addabs peephole |
| 14915 | +;;================================================================= |
| 14916 | + |
| 14917 | +(define_peephole |
| 14918 | + [(set (match_operand:SI 2 "register_operand" "=r") |
| 14919 | + (abs:SI (match_operand:SI 1 "register_operand" "r"))) |
| 14920 | + (set (match_operand:SI 0 "register_operand" "=r") |
| 14921 | + (plus:SI (match_operand:SI 3 "register_operand" "r") |
| 14922 | + (match_dup 2)))] |
| 14923 | + "dead_or_set_p(insn, operands[2])" |
| 14924 | + "addabs %0, %3, %1" |
| 14925 | + [(set_attr "length" "4") |
| 14926 | + (set_attr "cc" "set_z")]) |
| 14927 | + |
| 14928 | +(define_peephole |
| 14929 | + [(set (match_operand:SI 2 "register_operand" "=r") |
| 14930 | + (abs:SI (match_operand:SI 1 "register_operand" "r"))) |
| 14931 | + (set (match_operand:SI 0 "register_operand" "=r") |
| 14932 | + (plus:SI (match_dup 2) |
| 14933 | + (match_operand:SI 3 "register_operand" "r")))] |
| 14934 | + "dead_or_set_p(insn, operands[2])" |
| 14935 | + "addabs %0, %3, %1" |
| 14936 | + [(set_attr "length" "4") |
| 14937 | + (set_attr "cc" "set_z")]) |
| 14938 | + |
| 14939 | + |
| 14940 | +;;================================================================= |
| 14941 | +;; Detect roundings |
| 14942 | +;;================================================================= |
| 14943 | + |
| 14944 | +(define_insn "*round" |
| 14945 | + [(set (match_operand:SI 0 "register_operand" "=r") |
| 14946 | + (ashiftrt:SI (plus:SI (match_operand:SI 1 "register_operand" "0") |
| 14947 | + (match_operand:SI 2 "immediate_operand" "i")) |
| 14948 | + (match_operand:SI 3 "immediate_operand" "i")))] |
| 14949 | + "avr32_rnd_operands(operands[2], operands[3])" |
| 14950 | + |
| 14951 | + "satrnds %0 >> %3, 31" |
| 14952 | + |
| 14953 | + [(set_attr "type" "alu_sat") |
| 14954 | + (set_attr "length" "4")] |
| 14955 | + |
| 14956 | + ) |
| 14957 | + |
| 14958 | + |
| 14959 | +(define_peephole2 |
| 14960 | + [(set (match_operand:SI 0 "register_operand" "") |
| 14961 | + (plus:SI (match_dup 0) |
| 14962 | + (match_operand:SI 1 "immediate_operand" ""))) |
| 14963 | + (set (match_dup 0) |
| 14964 | + (ashiftrt:SI (match_dup 0) |
| 14965 | + (match_operand:SI 2 "immediate_operand" "")))] |
| 14966 | + "avr32_rnd_operands(operands[1], operands[2])" |
| 14967 | + |
| 14968 | + [(set (match_dup 0) |
| 14969 | + (ashiftrt:SI (plus:SI (match_dup 0) |
| 14970 | + (match_dup 1)) |
| 14971 | + (match_dup 2)))] |
| 14972 | + ) |
| 14973 | + |
| 14974 | +(define_peephole |
| 14975 | + [(set (match_operand:SI 0 "register_operand" "r") |
| 14976 | + (plus:SI (match_dup 0) |
| 14977 | + (match_operand:SI 1 "immediate_operand" "i"))) |
| 14978 | + (set (match_dup 0) |
| 14979 | + (ashiftrt:SI (match_dup 0) |
| 14980 | + (match_operand:SI 2 "immediate_operand" "i")))] |
| 14981 | + "avr32_rnd_operands(operands[1], operands[2])" |
| 14982 | + |
| 14983 | + "satrnds %0 >> %2, 31" |
| 14984 | + |
| 14985 | + [(set_attr "type" "alu_sat") |
| 14986 | + (set_attr "length" "4") |
| 14987 | + (set_attr "cc" "clobber")] |
| 14988 | + |
| 14989 | + ) |
| 14990 | + |
| 14991 | + |
| 14992 | + |
| 14993 | + |
| 14994 | +;;================================================================= |
| 14995 | +;; Conditional Subtract |
| 14996 | +;;================================================================= |
| 14997 | + |
| 14998 | + |
| 14999 | +(define_peephole |
| 15000 | + [(set (match_operand:SI 0 "register_operand" "") |
| 15001 | + (minus:SI (match_operand:SI 1 "register_operand" "") |
| 15002 | + (match_operand:SI 2 "immediate_operand" ""))) |
| 15003 | + (set (match_dup 1) |
| 15004 | + (unspec:SI [(match_operand 5 "avr32_comparison_operator" "") |
| 15005 | + (match_dup 0) |
| 15006 | + (match_dup 1) |
| 15007 | + (match_operand 3 "general_operand" "") |
| 15008 | + (match_operand 4 "general_operand" "")] |
| 15009 | + UNSPEC_MOVSICC))] |
| 15010 | + |
| 15011 | + "(dead_or_set_p(insn, operands[0])) && avr32_const_ok_for_constraint_p(INTVAL(operands[2]), 'K', \"Ks08\")" |
| 15012 | + |
| 15013 | + { |
| 15014 | + |
| 15015 | + operands[5] = avr32_output_cmp(operands[5], GET_MODE(operands[3]), operands[3], operands[4]); |
| 15016 | + |
| 15017 | + return "sub%5 %1, %2"; |
| 15018 | + } |
| 15019 | + |
| 15020 | + [(set_attr "length" "10") |
| 15021 | + (set_attr "cc" "clobber")] |
| 15022 | + ) |
| 15023 | + |
| 15024 | +(define_peephole |
| 15025 | + [(set (match_operand:SI 0 "register_operand" "") |
| 15026 | + (plus:SI (match_operand:SI 1 "register_operand" "") |
| 15027 | + (match_operand:SI 2 "immediate_operand" ""))) |
| 15028 | + (set (match_dup 1) |
| 15029 | + (unspec:SI [(match_operand 5 "avr32_comparison_operator" "") |
| 15030 | + (match_dup 0) |
| 15031 | + (match_dup 1) |
| 15032 | + (match_operand 3 "general_operand" "") |
| 15033 | + (match_operand 4 "general_operand" "")] |
| 15034 | + UNSPEC_MOVSICC))] |
| 15035 | + |
| 15036 | + "(dead_or_set_p(insn, operands[0]) && avr32_const_ok_for_constraint_p(INTVAL(operands[2]), 'I', \"Is08\"))" |
| 15037 | + |
| 15038 | + { |
| 15039 | + operands[5] = avr32_output_cmp(operands[5], GET_MODE(operands[3]), operands[3], operands[4]); |
| 15040 | + |
| 15041 | + return "sub%5 %1, %n2"; |
| 15042 | + } |
| 15043 | + [(set_attr "length" "10") |
| 15044 | + (set_attr "cc" "clobber")] |
| 15045 | + ) |
| 15046 | + |
| 15047 | +;;================================================================= |
| 15048 | +;; mcall |
| 15049 | +;;================================================================= |
| 15050 | +(define_peephole |
| 15051 | + [(set (match_operand:SI 0 "register_operand" "") |
| 15052 | + (match_operand 1 "avr32_const_pool_ref_operand" "")) |
| 15053 | + (parallel [(call (mem:SI (match_dup 0)) |
| 15054 | + (match_operand 2 "" "")) |
| 15055 | + (clobber (reg:SI LR_REGNUM))])] |
| 15056 | + "dead_or_set_p(insn, operands[0])" |
| 15057 | + "mcall %1" |
| 15058 | + [(set_attr "type" "call") |
| 15059 | + (set_attr "length" "4") |
| 15060 | + (set_attr "cc" "clobber")] |
| 15061 | +) |
| 15062 | + |
| 15063 | +(define_peephole |
| 15064 | + [(set (match_operand:SI 2 "register_operand" "") |
| 15065 | + (match_operand 1 "avr32_const_pool_ref_operand" "")) |
| 15066 | + (parallel [(set (match_operand 0 "register_operand" "") |
| 15067 | + (call (mem:SI (match_dup 2)) |
| 15068 | + (match_operand 3 "" ""))) |
| 15069 | + (clobber (reg:SI LR_REGNUM))])] |
| 15070 | + "dead_or_set_p(insn, operands[2])" |
| 15071 | + "mcall %1" |
| 15072 | + [(set_attr "type" "call") |
| 15073 | + (set_attr "length" "4") |
| 15074 | + (set_attr "cc" "call_set")] |
| 15075 | +) |
| 15076 | + |
| 15077 | + |
| 15078 | +(define_peephole2 |
| 15079 | + [(set (match_operand:SI 0 "register_operand" "") |
| 15080 | + (match_operand 1 "avr32_const_pool_ref_operand" "")) |
| 15081 | + (parallel [(call (mem:SI (match_dup 0)) |
| 15082 | + (match_operand 2 "" "")) |
| 15083 | + (clobber (reg:SI LR_REGNUM))])] |
| 15084 | + "peep2_reg_dead_p(2, operands[0])" |
| 15085 | + [(parallel [(call (mem:SI (match_dup 1)) |
| 15086 | + (match_dup 2)) |
| 15087 | + (clobber (reg:SI LR_REGNUM))])] |
| 15088 | + "" |
| 15089 | +) |
| 15090 | + |
| 15091 | +(define_peephole2 |
| 15092 | + [(set (match_operand:SI 0 "register_operand" "") |
| 15093 | + (match_operand 1 "avr32_const_pool_ref_operand" "")) |
| 15094 | + (parallel [(set (match_operand 2 "register_operand" "") |
| 15095 | + (call (mem:SI (match_dup 0)) |
| 15096 | + (match_operand 3 "" ""))) |
| 15097 | + (clobber (reg:SI LR_REGNUM))])] |
| 15098 | + "(peep2_reg_dead_p(2, operands[0]) || (REGNO(operands[2]) == REGNO(operands[0])))" |
| 15099 | + [(parallel [(set (match_dup 2) |
| 15100 | + (call (mem:SI (match_dup 1)) |
| 15101 | + (match_dup 3))) |
| 15102 | + (clobber (reg:SI LR_REGNUM))])] |
| 15103 | + "" |
| 15104 | +) |
| 15105 | + |
| 15106 | +;;================================================================= |
| 15107 | +;; Returning a value |
| 15108 | +;;================================================================= |
| 15109 | + |
| 15110 | + |
| 15111 | +(define_peephole |
| 15112 | + [(set (match_operand 0 "register_operand" "") |
| 15113 | + (match_operand 1 "register_operand" "")) |
| 15114 | + (return)] |
| 15115 | + "USE_RETURN_INSN (TRUE) && (REGNO(operands[0]) == RETVAL_REGNUM) |
| 15116 | + && (REGNO(operands[1]) != LR_REGNUM) |
| 15117 | + && (REGNO_REG_CLASS(REGNO(operands[1])) == GENERAL_REGS)" |
| 15118 | + "retal %1" |
| 15119 | + [(set_attr "type" "call") |
| 15120 | + (set_attr "length" "2")] |
| 15121 | + ) |
| 15122 | + |
| 15123 | + |
| 15124 | +(define_peephole |
| 15125 | + [(set (match_operand 0 "register_operand" "r") |
| 15126 | + (match_operand 1 "immediate_operand" "i")) |
| 15127 | + (return)] |
| 15128 | + "(USE_RETURN_INSN (FALSE) && (REGNO(operands[0]) == RETVAL_REGNUM) && |
| 15129 | + ((INTVAL(operands[1]) == -1) || (INTVAL(operands[1]) == 0) || (INTVAL(operands[1]) == 1)))" |
| 15130 | + { |
| 15131 | + avr32_output_return_instruction (TRUE, FALSE, NULL, operands[1]); |
| 15132 | + return ""; |
| 15133 | + } |
| 15134 | + [(set_attr "type" "call") |
| 15135 | + (set_attr "length" "4")] |
| 15136 | + ) |
| 15137 | + |
| 15138 | +(define_peephole |
| 15139 | + [(set (match_operand 0 "register_operand" "r") |
| 15140 | + (match_operand 1 "immediate_operand" "i")) |
| 15141 | + (unspec_volatile [(return)] VUNSPEC_EPILOGUE)] |
| 15142 | + "(REGNO(operands[0]) == RETVAL_REGNUM) && |
| 15143 | + ((INTVAL(operands[1]) == -1) || (INTVAL(operands[1]) == 0) || (INTVAL(operands[1]) == 1))" |
| 15144 | + { |
| 15145 | + avr32_output_return_instruction (FALSE, FALSE, NULL, operands[1]); |
| 15146 | + return ""; |
| 15147 | + } |
| 15148 | + ; Length is absolute worst case |
| 15149 | + [(set_attr "type" "branch") |
| 15150 | + (set_attr "length" "12")] |
| 15151 | + ) |
| 15152 | + |
| 15153 | +(define_peephole |
| 15154 | + [(set (match_operand 0 "register_operand" "r") |
| 15155 | + (unspec [(match_operand 1 "avr32_comparison_operator" "") |
| 15156 | + (match_operand 2 "register_immediate_operand" "rKs08") |
| 15157 | + (match_operand 3 "register_immediate_operand" "rKs08") |
| 15158 | + (match_operand 4 "register_immediate_operand" "r") |
| 15159 | + (match_operand 5 "register_immediate_operand" "rKs21") |
| 15160 | + ] |
| 15161 | + UNSPEC_MOVSICC )) |
| 15162 | + (return)] |
| 15163 | + "USE_RETURN_INSN (TRUE) && (REGNO(operands[0]) == RETVAL_REGNUM) && |
| 15164 | + ((GET_MODE(operands[4]) == SImode) || |
| 15165 | + ((GET_MODE(operands[4]) != SImode) && (GET_CODE(operands[5]) == REG)))" |
| 15166 | + { |
| 15167 | + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]); |
| 15168 | + |
| 15169 | + if ( GET_CODE(operands[2]) == REG |
| 15170 | + && GET_CODE(operands[3]) == REG |
| 15171 | + && REGNO(operands[2]) != LR_REGNUM |
| 15172 | + && REGNO(operands[3]) != LR_REGNUM ){ |
| 15173 | + return "ret%1 %2\;ret%i1 %3"; |
| 15174 | + } else if ( GET_CODE(operands[2]) == REG |
| 15175 | + && GET_CODE(operands[3]) == CONST_INT ){ |
| 15176 | + if ( INTVAL(operands[3]) == -1 |
| 15177 | + || INTVAL(operands[3]) == 0 |
| 15178 | + || INTVAL(operands[3]) == 1 ){ |
| 15179 | + return "ret%1 %2\;ret%i1 %d3"; |
| 15180 | + } else { |
| 15181 | + return "mov%1 r12, %2\;mov%i1 r12, %3\;retal r12"; |
| 15182 | + } |
| 15183 | + } else if ( GET_CODE(operands[2]) == CONST_INT |
| 15184 | + && GET_CODE(operands[3]) == REG ){ |
| 15185 | + if ( INTVAL(operands[2]) == -1 |
| 15186 | + || INTVAL(operands[2]) == 0 |
| 15187 | + || INTVAL(operands[2]) == 1 ){ |
| 15188 | + return "ret%1 %d2\;ret%i1 %3"; |
| 15189 | + } else { |
| 15190 | + return "mov%1 r12, %2\;mov%i1 r12, %3\;retal r12"; |
| 15191 | + } |
| 15192 | + } else { |
| 15193 | + if ( (INTVAL(operands[2]) == -1 |
| 15194 | + || INTVAL(operands[2]) == 0 |
| 15195 | + || INTVAL(operands[2]) == 1 ) |
| 15196 | + && (INTVAL(operands[3]) == -1 |
| 15197 | + || INTVAL(operands[3]) == 0 |
| 15198 | + || INTVAL(operands[3]) == 1 )){ |
| 15199 | + return "ret%1 %d2\;ret%i1 %d3"; |
| 15200 | + } else { |
| 15201 | + return "mov%1 r12, %2\;mov%i1 r12, %3\;retal r12"; |
| 15202 | + } |
| 15203 | + } |
| 15204 | + } |
| 15205 | + |
| 15206 | + [(set_attr "length" "14") |
| 15207 | + (set_attr "cc" "clobber") |
| 15208 | + (set_attr "type" "call")]) |
| 15209 | + |
| 15210 | + |
| 15211 | +;;================================================================= |
| 15212 | +;; mulnhh.w |
| 15213 | +;;================================================================= |
| 15214 | + |
| 15215 | +(define_peephole2 |
| 15216 | + [(set (match_operand:HI 0 "register_operand" "") |
| 15217 | + (neg:HI (match_operand:HI 1 "register_operand" ""))) |
| 15218 | + (set (match_operand:SI 2 "register_operand" "") |
| 15219 | + (mult:SI |
| 15220 | + (sign_extend:SI (match_dup 0)) |
| 15221 | + (sign_extend:SI (match_operand:HI 3 "register_operand" ""))))] |
| 15222 | + "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[2]) == REGNO(operands[0]))" |
| 15223 | + [ (set (match_dup 2) |
| 15224 | + (mult:SI |
| 15225 | + (sign_extend:SI (neg:HI (match_dup 1))) |
| 15226 | + (sign_extend:SI (match_dup 3))))] |
| 15227 | + "" |
| 15228 | + ) |
| 15229 | + |
| 15230 | +(define_peephole2 |
| 15231 | + [(set (match_operand:HI 0 "register_operand" "") |
| 15232 | + (neg:HI (match_operand:HI 1 "register_operand" ""))) |
| 15233 | + (set (match_operand:SI 2 "register_operand" "") |
| 15234 | + (mult:SI |
| 15235 | + (sign_extend:SI (match_operand:HI 3 "register_operand" "")) |
| 15236 | + (sign_extend:SI (match_dup 0))))] |
| 15237 | + "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[2]) == REGNO(operands[0]))" |
| 15238 | + [ (set (match_dup 2) |
| 15239 | + (mult:SI |
| 15240 | + (sign_extend:SI (neg:HI (match_dup 1))) |
| 15241 | + (sign_extend:SI (match_dup 3))))] |
| 15242 | + "" |
| 15243 | + ) |
| 15244 | + |
| 15245 | + |
| 15246 | + |
| 15247 | +;;================================================================= |
| 15248 | +;; sthh.w |
| 15249 | +;;================================================================= |
| 15250 | +(define_insn "vec_setv2hi" |
| 15251 | + [(set (match_operand:V2HI 0 "register_operand" "=r") |
| 15252 | + (vec_merge:V2HI |
| 15253 | + (match_dup 0) |
| 15254 | + (vec_duplicate:V2HI |
| 15255 | + (match_operand:HI 1 "register_operand" "r")) |
| 15256 | + (const_int 1)))] |
| 15257 | + "" |
| 15258 | + "bfins\t%0, %1, 16, 16" |
| 15259 | + [(set_attr "type" "alu") |
| 15260 | + (set_attr "length" "4") |
| 15261 | + (set_attr "cc" "clobber")]) |
| 15262 | + |
| 15263 | +(define_insn "vec_setv2lo" |
| 15264 | + [(set (match_operand:V2HI 0 "register_operand" "+r") |
| 15265 | + (vec_merge:V2HI |
| 15266 | + (match_dup 0) |
| 15267 | + (vec_duplicate:V2HI |
| 15268 | + (match_operand:HI 1 "register_operand" "r")) |
| 15269 | + (const_int 2)))] |
| 15270 | + "" |
| 15271 | + "bfins\t%0, %1, 0, 16" |
| 15272 | + [(set_attr "type" "alu") |
| 15273 | + (set_attr "length" "4") |
| 15274 | + (set_attr "cc" "clobber")]) |
| 15275 | + |
| 15276 | +(define_expand "vec_setv2" |
| 15277 | + [(set (match_operand:V2HI 0 "register_operand" "") |
| 15278 | + (vec_merge:V2HI |
| 15279 | + (match_dup 0) |
| 15280 | + (vec_duplicate:V2HI |
| 15281 | + (match_operand:HI 1 "register_operand" "")) |
| 15282 | + (match_operand 2 "immediate_operand" "")))] |
| 15283 | + "" |
| 15284 | + { operands[2] = GEN_INT(INTVAL(operands[2]) + 1); } |
| 15285 | + ) |
| 15286 | + |
| 15287 | +(define_insn "vec_extractv2hi" |
| 15288 | + [(set (match_operand:HI 0 "register_operand" "=r") |
| 15289 | + (vec_select:HI |
| 15290 | + (match_operand:V2HI 1 "register_operand" "r") |
| 15291 | + (parallel [(match_operand:SI 2 "immediate_operand" "i")])))] |
| 15292 | + "" |
| 15293 | + { |
| 15294 | + if ( INTVAL(operands[2]) == 0 ) |
| 15295 | + return "bfextu\t%0, %1, 16, 16"; |
| 15296 | + else |
| 15297 | + return "bfextu\t%0, %1, 0, 16"; |
| 15298 | + } |
| 15299 | + [(set_attr "type" "alu") |
| 15300 | + (set_attr "length" "4") |
| 15301 | + (set_attr "cc" "clobber")]) |
| 15302 | + |
| 15303 | +(define_insn "vec_extractv4qi" |
| 15304 | + [(set (match_operand:QI 0 "register_operand" "=r") |
| 15305 | + (vec_select:QI |
| 15306 | + (match_operand:V4QI 1 "register_operand" "r") |
| 15307 | + (parallel [(match_operand:SI 2 "immediate_operand" "i")])))] |
| 15308 | + "" |
| 15309 | + { |
| 15310 | + switch ( INTVAL(operands[2]) ){ |
| 15311 | + case 0: |
| 15312 | + return "bfextu\t%0, %1, 24, 8"; |
| 15313 | + case 1: |
| 15314 | + return "bfextu\t%0, %1, 16, 8"; |
| 15315 | + case 2: |
| 15316 | + return "bfextu\t%0, %1, 8, 8"; |
| 15317 | + case 3: |
| 15318 | + return "bfextu\t%0, %1, 0, 8"; |
| 15319 | + default: |
| 15320 | + abort(); |
| 15321 | + } |
| 15322 | + } |
| 15323 | + [(set_attr "type" "alu") |
| 15324 | + (set_attr "length" "4") |
| 15325 | + (set_attr "cc" "clobber")]) |
| 15326 | + |
| 15327 | + |
| 15328 | +(define_insn "concatv2hi" |
| 15329 | + [(set (match_operand:V2HI 0 "register_operand" "=r, r, r") |
| 15330 | + (vec_concat:V2HI |
| 15331 | + (match_operand:HI 1 "register_operand" "r, r, 0") |
| 15332 | + (match_operand:HI 2 "register_operand" "r, 0, r")))] |
| 15333 | + "" |
| 15334 | + "@ |
| 15335 | + mov\t%0, %1\;bfins\t%0, %2, 0, 16 |
| 15336 | + bfins\t%0, %2, 0, 16 |
| 15337 | + bfins\t%0, %1, 16, 16" |
| 15338 | + [(set_attr "length" "6, 4, 4") |
| 15339 | + (set_attr "type" "alu")]) |
| 15340 | + |
| 15341 | +;(define_peephole2 |
| 15342 | +; [(set (match_operand:HI 0 "register_operand" "r") |
| 15343 | +; (plus:HI (match_operand:HI 3 "register_operand" "r") |
| 15344 | +; (match_operand:HI 4 "register_operand" "r"))) |
| 15345 | +; (set (match_operand:HI 1 "register_operand" "r") |
| 15346 | +; (minus:HI (match_dup 3) |
| 15347 | +; (match_dup 4)))] |
| 15348 | +; "REGNO(operands[0]) != REGNO(operands[3])" |
| 15349 | +; [(set (match_dup 2) |
| 15350 | +; (vec_concat:V2HI |
| 15351 | +; (minus:HI (match_dup 3) |
| 15352 | +; (match_dup 4)) |
| 15353 | +; (plus:HI (match_dup 3) (match_dup 4)))) |
| 15354 | +; (set (match_dup 1) (vec_select:HI (match_dup 2) |
| 15355 | +; (parallel [(const_int 0)])))] |
| 15356 | +; |
| 15357 | +; "operands[2] = gen_rtx_REG(V2HImode, REGNO(operands[0]));" |
| 15358 | +; ) |
| 15359 | +; |
| 15360 | +;(define_peephole2 |
| 15361 | +; [(set (match_operand:HI 0 "register_operand" "r") |
| 15362 | +; (minus:HI (match_operand:HI 3 "register_operand" "r") |
| 15363 | +; (match_operand:HI 4 "register_operand" "r"))) |
| 15364 | +; (set (match_operand:HI 1 "register_operand" "r") |
| 15365 | +; (plus:HI (match_dup 3) |
| 15366 | +; (match_dup 4)))] |
| 15367 | +; "REGNO(operands[0]) != REGNO(operands[3])" |
| 15368 | +; [(set (match_dup 2) |
| 15369 | +; (vec_concat:V2HI |
| 15370 | +; (plus:HI (match_dup 3) |
| 15371 | +; (match_dup 4)) |
| 15372 | +; (minus:HI (match_dup 3) (match_dup 4)))) |
| 15373 | +; (set (match_dup 1) (vec_select:HI (match_dup 2) |
| 15374 | +; (parallel [(const_int 0)])))] |
| 15375 | +; |
| 15376 | +; "operands[2] = gen_rtx_REG(V2HImode, REGNO(operands[0]));" |
| 15377 | +; ) |
| 15378 | + |
| 15379 | + |
| 15380 | +;(define_peephole2 |
| 15381 | +; [(match_scratch:V2HI 5 "r") |
| 15382 | +; (set (mem:HI (plus:SI (match_operand:SI 0 "register_operand" "") |
| 15383 | +; (match_operand:HI 1 "immediate_operand" ""))) |
| 15384 | +; (match_operand:HI 2 "register_operand" "r")) |
| 15385 | +; (set (mem:HI (plus:SI (match_dup 0) |
| 15386 | +; (match_operand:HI 3 "immediate_operand" ""))) |
| 15387 | +; (match_operand:HI 4 "register_operand" "r"))] |
| 15388 | +; "(GET_CODE(operands[1]) == CONST_INT) && (GET_CODE(operands[3]) == CONST_INT) |
| 15389 | +; && (INTVAL(operands[3]) == (INTVAL(operands[1]) + 2))" |
| 15390 | +; |
| 15391 | +; [(set (match_dup 5) |
| 15392 | +; (vec_concat:V2HI |
| 15393 | +; (match_dup 2) |
| 15394 | +; (match_dup 4))) |
| 15395 | +; (set (mem:V2HI (plus:SI (match_dup 0) (match_dup 1))) |
| 15396 | +; (match_dup 5))] |
| 15397 | +; "" |
| 15398 | +; ) |
| 15399 | +; |
| 15400 | + |
| 15401 | +;(define_insn "sthh_w" |
| 15402 | +; [(set (match_operand:V2HI 0 "avr32_sthh_w_memory_operand" "m") |
| 15403 | +; (vec_concat:V2HI |
| 15404 | +; (vec_select:HI (match_operand:V2HI 1 "register_operand" "r") |
| 15405 | +; (parallel [(match_operand 3 "immediate_operand" "i")])) |
| 15406 | +; (vec_select:HI (match_operand:V2HI 2 "register_operand" "r") |
| 15407 | +; (parallel [(match_operand 4 "immediate_operand" "i")]))))] |
| 15408 | +; "MEM_ALIGN(operands[0]) >= 32" |
| 15409 | +; "sthh.w\t%0, %1:%h3, %2:%h4" |
| 15410 | +; [(set_attr "length" "4") |
| 15411 | +; (set_attr "type" "store")]) |
| 15412 | +; |
| 15413 | +;(define_peephole2 |
| 15414 | +; [(set (mem:HI (plus:SI (match_operand:SI 0 "register_operand" "") |
| 15415 | +; (match_operand:HI 1 "immediate_operand" ""))) |
| 15416 | +; (match_operand:HI 2 "register_operand" "r")) |
| 15417 | +; (set (mem:HI (plus:SI (match_dup 0) |
| 15418 | +; (match_operand:HI 3 "avr32_sthh_operand" ""))) |
| 15419 | +; (match_operand:HI 4 "register_operand" "r"))] |
| 15420 | +; "(GET_CODE(operands[1]) == CONST_INT) && (GET_CODE(operands[3]) == CONST_INT) |
| 15421 | +; && (INTVAL(operands[3]) == (INTVAL(operands[1]) - 2))" |
| 15422 | +; |
| 15423 | +; [(paralell [(set (mem:HI (plus:SI (match_dup 0) |
| 15424 | +; (match_dup 3))) |
| 15425 | +; (match_dup 4)) |
| 15426 | +; (set (mem:HI (plus:SI (match_dup 0) |
| 15427 | +; (plus:SI (match_dup 3) (const_int 2)))) |
| 15428 | +; (match_dup 2))])] |
| 15429 | +; "" |
| 15430 | +; ) |
| 15431 | + |
| 15432 | + |
| 15433 | +;; Load the SIMD description |
| 15434 | +(include "simd.md") |
| 15435 | + |
| 15436 | +;; Load the FP coprocessor patterns |
| 15437 | +(include "fpcp.md") |
| 15438 | --- /dev/null |
| 15439 | +++ b/gcc/config/avr32/avr32-modes.def |
| 15440 | @@ -0,0 +1 @@ |
| 15441 | +VECTOR_MODES (INT, 4); /* V4QI V2HI */ |
| 15442 | --- /dev/null |
| 15443 | +++ b/gcc/config/avr32/avr32.opt |
| 15444 | @@ -0,0 +1,78 @@ |
| 15445 | +; Options for the ATMEL AVR32 port of the compiler. |
| 15446 | + |
| 15447 | +; Copyright 2007 Atmel Corporation. |
| 15448 | +; |
| 15449 | +; This file is part of GCC. |
| 15450 | +; |
| 15451 | +; GCC is free software; you can redistribute it and/or modify it under |
| 15452 | +; the terms of the GNU General Public License as published by the Free |
| 15453 | +; Software Foundation; either version 2, or (at your option) any later |
| 15454 | +; version. |
| 15455 | +; |
| 15456 | +; GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
| 15457 | +; WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 15458 | +; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 15459 | +; for more details. |
| 15460 | +; |
| 15461 | +; You should have received a copy of the GNU General Public License |
| 15462 | +; along with GCC; see the file COPYING. If not, write to the Free |
| 15463 | +; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA |
| 15464 | +; 02110-1301, USA. |
| 15465 | + |
| 15466 | +muse-rodata-section |
| 15467 | +Target Report Mask(USE_RODATA_SECTION) |
| 15468 | +Do not put readonly-data in .text section, but in .rodata. |
| 15469 | + |
| 15470 | +mhard-float |
| 15471 | +Target Report Mask(HARD_FLOAT) |
| 15472 | +Use floating point coprocessor instructions. |
| 15473 | + |
| 15474 | +msoft-float |
| 15475 | +Target Report InverseMask(HARD_FLOAT, SOFT_FLOAT) |
| 15476 | +Use software floating-point library for floating-point operations. |
| 15477 | + |
| 15478 | +force-double-align |
| 15479 | +Target Report RejectNegative Mask(FORCE_DOUBLE_ALIGN) |
| 15480 | +Force double-word alignment for double-word memory accesses. |
| 15481 | + |
| 15482 | +mno-init-got |
| 15483 | +Target Report RejectNegative Mask(NO_INIT_GOT) |
| 15484 | +Do not initialize GOT register before using it when compiling PIC code. |
| 15485 | + |
| 15486 | +mrelax |
| 15487 | +Target Report Mask(RELAX) |
| 15488 | +Let invoked assembler and linker do relaxing (Enabled by default when optimization level is >1). |
| 15489 | + |
| 15490 | +mno-reorg-opt |
| 15491 | +Target Report RejectNegative Mask(NO_REORG_OPT) |
| 15492 | +Do not perform machine dependent optimizations in reorg stage. |
| 15493 | + |
| 15494 | +mmd-reorg-opt |
| 15495 | +Target Report RejectNegative InverseMask(NO_REORG_OPT,MD_REORG_OPTIMIZATION) |
| 15496 | +Perform machine dependent optimizations in reorg stage. |
| 15497 | + |
| 15498 | +masm-addr-pseudos |
| 15499 | +Target Report RejectNegative InverseMask(NO_ASM_ADDR_PSEUDOS, HAS_ASM_ADDR_PSEUDOS) |
| 15500 | +Use assembler pseudo-instructions lda.w and call for handling direct addresses. (Enabled by default) |
| 15501 | + |
| 15502 | +mno-asm-addr-pseudos |
| 15503 | +Target Report RejectNegative Mask(NO_ASM_ADDR_PSEUDOS) |
| 15504 | +Do not use assembler pseudo-instructions lda.w and call for handling direct addresses. |
| 15505 | + |
| 15506 | +mno-pic |
| 15507 | +Target Report RejectNegative Mask(NO_PIC) |
| 15508 | +Do not emit position-independent code (will break dynamic linking.) |
| 15509 | + |
| 15510 | +mpart= |
| 15511 | +Target Report RejectNegative Joined Var(avr32_part_name) |
| 15512 | +Specify the AVR32 part name |
| 15513 | + |
| 15514 | +mcpu= |
| 15515 | +Target Report RejectNegative Joined Undocumented Var(avr32_part_name) |
| 15516 | +Specify the AVR32 part name (deprecated) |
| 15517 | + |
| 15518 | +march= |
| 15519 | +Target Report RejectNegative Joined Var(avr32_arch_name) |
| 15520 | +Specify the AVR32 architecture name |
| 15521 | + |
| 15522 | + |
| 15523 | --- /dev/null |
| 15524 | +++ b/gcc/config/avr32/avr32-protos.h |
| 15525 | @@ -0,0 +1,175 @@ |
| 15526 | +/* |
| 15527 | + Prototypes for exported functions defined in avr32.c |
| 15528 | + Copyright 2003-2006 Atmel Corporation. |
| 15529 | + |
| 15530 | + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com> |
| 15531 | + Initial porting by Anders Ådland. |
| 15532 | + |
| 15533 | + This file is part of GCC. |
| 15534 | + |
| 15535 | + This program is free software; you can redistribute it and/or modify |
| 15536 | + it under the terms of the GNU General Public License as published by |
| 15537 | + the Free Software Foundation; either version 2 of the License, or |
| 15538 | + (at your option) any later version. |
| 15539 | + |
| 15540 | + This program is distributed in the hope that it will be useful, |
| 15541 | + but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15542 | + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 15543 | + GNU General Public License for more details. |
| 15544 | + |
| 15545 | + You should have received a copy of the GNU General Public License |
| 15546 | + along with this program; if not, write to the Free Software |
| 15547 | + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ |
| 15548 | + |
| 15549 | + |
| 15550 | +#ifndef AVR32_PROTOS_H |
| 15551 | +#define AVR32_PROTOS_H |
| 15552 | + |
| 15553 | +extern const int swap_reg[]; |
| 15554 | + |
| 15555 | +extern int avr32_valid_macmac_bypass (rtx, rtx); |
| 15556 | +extern int avr32_valid_mulmac_bypass (rtx, rtx); |
| 15557 | + |
| 15558 | +extern int avr32_decode_lcomm_symbol_offset (rtx, int *); |
| 15559 | +extern void avr32_encode_lcomm_symbol_offset (tree, char *, int); |
| 15560 | + |
| 15561 | +extern const char *avr32_strip_name_encoding (const char *); |
| 15562 | + |
| 15563 | +extern rtx avr32_get_note_reg_equiv (rtx insn); |
| 15564 | + |
| 15565 | +extern int avr32_use_return_insn (int iscond); |
| 15566 | + |
| 15567 | +extern void avr32_make_reglist16 (int reglist16_vect, char *reglist16_string); |
| 15568 | + |
| 15569 | +extern void avr32_make_reglist8 (int reglist8_vect, char *reglist8_string); |
| 15570 | +extern void avr32_make_fp_reglist_w (int reglist_mask, char *reglist_string); |
| 15571 | +extern void avr32_make_fp_reglist_d (int reglist_mask, char *reglist_string); |
| 15572 | + |
| 15573 | +extern void avr32_output_return_instruction (int single_ret_inst, |
| 15574 | + int iscond, rtx cond, |
| 15575 | + rtx r12_imm); |
| 15576 | +extern void avr32_expand_prologue (void); |
| 15577 | +extern void avr32_set_return_address (rtx source); |
| 15578 | + |
| 15579 | +extern int avr32_hard_regno_mode_ok (int regno, enum machine_mode mode); |
| 15580 | +extern int avr32_extra_constraint_s (rtx value, const int strict); |
| 15581 | +extern int avr32_eh_return_data_regno (const int n); |
| 15582 | +extern int avr32_initial_elimination_offset (const int from, const int to); |
| 15583 | +extern rtx avr32_function_arg (CUMULATIVE_ARGS * cum, enum machine_mode mode, |
| 15584 | + tree type, int named); |
| 15585 | +extern void avr32_init_cumulative_args (CUMULATIVE_ARGS * cum, tree fntype, |
| 15586 | + rtx libname, tree fndecl); |
| 15587 | +extern void avr32_function_arg_advance (CUMULATIVE_ARGS * cum, |
| 15588 | + enum machine_mode mode, |
| 15589 | + tree type, int named); |
| 15590 | +#ifdef ARGS_SIZE_RTX |
| 15591 | +/* expr.h defines ARGS_SIZE_RTX and `enum direction'. */ |
| 15592 | +extern enum direction avr32_function_arg_padding (enum machine_mode mode, |
| 15593 | + tree type); |
| 15594 | +#endif /* ARGS_SIZE_RTX */ |
| 15595 | +extern rtx avr32_function_value (tree valtype, tree func); |
| 15596 | +extern rtx avr32_libcall_value (enum machine_mode mode); |
| 15597 | +extern int avr32_sched_use_dfa_pipeline_interface (void); |
| 15598 | +extern bool avr32_return_in_memory (tree type, tree fntype); |
| 15599 | +extern void avr32_regs_to_save (char *operand); |
| 15600 | +extern void avr32_target_asm_function_prologue (FILE * file, |
| 15601 | + HOST_WIDE_INT size); |
| 15602 | +extern void avr32_target_asm_function_epilogue (FILE * file, |
| 15603 | + HOST_WIDE_INT size); |
| 15604 | +extern void avr32_trampoline_template (FILE * file); |
| 15605 | +extern void avr32_initialize_trampoline (rtx addr, rtx fnaddr, |
| 15606 | + rtx static_chain); |
| 15607 | +extern int avr32_legitimate_address (enum machine_mode mode, rtx x, |
| 15608 | + int strict); |
| 15609 | +extern int avr32_legitimate_constant_p (rtx x); |
| 15610 | + |
| 15611 | +extern int avr32_legitimate_pic_operand_p (rtx x); |
| 15612 | + |
| 15613 | +extern rtx avr32_find_symbol (rtx x); |
| 15614 | +extern void avr32_select_section (rtx exp, int reloc, int align); |
| 15615 | +extern void avr32_encode_section_info (tree decl, rtx rtl, int first); |
| 15616 | +extern void avr32_asm_file_end (FILE * stream); |
| 15617 | +extern void avr32_asm_output_ascii (FILE * stream, char *ptr, int len); |
| 15618 | +extern void avr32_asm_output_common (FILE * stream, const char *name, |
| 15619 | + int size, int rounded); |
| 15620 | +extern void avr32_asm_output_label (FILE * stream, const char *name); |
| 15621 | +extern void avr32_asm_declare_object_name (FILE * stream, char *name, |
| 15622 | + tree decl); |
| 15623 | +extern void avr32_asm_globalize_label (FILE * stream, const char *name); |
| 15624 | +extern void avr32_asm_weaken_label (FILE * stream, const char *name); |
| 15625 | +extern void avr32_asm_output_external (FILE * stream, tree decl, |
| 15626 | + const char *name); |
| 15627 | +extern void avr32_asm_output_external_libcall (FILE * stream, rtx symref); |
| 15628 | +extern void avr32_asm_output_labelref (FILE * stream, const char *name); |
| 15629 | +extern void avr32_notice_update_cc (rtx exp, rtx insn); |
| 15630 | +extern void avr32_print_operand (FILE * stream, rtx x, int code); |
| 15631 | +extern void avr32_print_operand_address (FILE * stream, rtx x); |
| 15632 | + |
| 15633 | +extern int avr32_symbol (rtx x); |
| 15634 | + |
| 15635 | +extern void avr32_select_rtx_section (enum machine_mode mode, rtx x, |
| 15636 | + unsigned HOST_WIDE_INT align); |
| 15637 | + |
| 15638 | +extern int avr32_load_multiple_operation (rtx op, enum machine_mode mode); |
| 15639 | +extern int avr32_store_multiple_operation (rtx op, enum machine_mode mode); |
| 15640 | + |
| 15641 | +extern int avr32_const_ok_for_constraint_p (HOST_WIDE_INT value, char c, |
| 15642 | + const char *str); |
| 15643 | + |
| 15644 | +extern bool avr32_cannot_force_const_mem (rtx x); |
| 15645 | + |
| 15646 | +extern void avr32_init_builtins (void); |
| 15647 | + |
| 15648 | +extern rtx avr32_expand_builtin (tree exp, rtx target, rtx subtarget, |
| 15649 | + enum machine_mode mode, int ignore); |
| 15650 | + |
| 15651 | +extern bool avr32_must_pass_in_stack (enum machine_mode mode, tree type); |
| 15652 | + |
| 15653 | +extern bool avr32_strict_argument_naming (CUMULATIVE_ARGS * ca); |
| 15654 | + |
| 15655 | +extern bool avr32_pass_by_reference (CUMULATIVE_ARGS * cum, |
| 15656 | + enum machine_mode mode, |
| 15657 | + tree type, bool named); |
| 15658 | + |
| 15659 | +extern rtx avr32_gen_load_multiple (rtx * regs, int count, rtx from, |
| 15660 | + int write_back, int in_struct_p, |
| 15661 | + int scalar_p); |
| 15662 | +extern rtx avr32_gen_store_multiple (rtx * regs, int count, rtx to, |
| 15663 | + int in_struct_p, int scalar_p); |
| 15664 | +extern int avr32_gen_movmemsi (rtx * operands); |
| 15665 | + |
| 15666 | +extern int avr32_rnd_operands (rtx add, rtx shift); |
| 15667 | +extern int avr32_adjust_insn_length (rtx insn, int length); |
| 15668 | + |
| 15669 | +extern int symbol_mentioned_p (rtx x); |
| 15670 | +extern int label_mentioned_p (rtx x); |
| 15671 | +extern rtx legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg); |
| 15672 | +extern int avr32_address_register_rtx_p (rtx x, int strict_p); |
| 15673 | +extern int avr32_legitimate_index_p (enum machine_mode mode, rtx index, |
| 15674 | + int strict_p); |
| 15675 | + |
| 15676 | +extern int avr32_const_double_immediate (rtx value); |
| 15677 | +extern void avr32_init_expanders (void); |
| 15678 | +extern rtx avr32_return_addr (int count, rtx frame); |
| 15679 | +extern bool avr32_got_mentioned_p (rtx addr); |
| 15680 | + |
| 15681 | +extern void avr32_final_prescan_insn (rtx insn, rtx * opvec, int noperands); |
| 15682 | + |
| 15683 | +extern int avr32_expand_movcc (enum machine_mode mode, rtx operands[]); |
| 15684 | +extern int avr32_expand_addcc (enum machine_mode mode, rtx operands[]); |
| 15685 | +#ifdef RTX_CODE |
| 15686 | +extern int avr32_expand_scc (RTX_CODE cond, rtx * operands); |
| 15687 | +#endif |
| 15688 | + |
| 15689 | +extern int avr32_store_bypass (rtx insn_out, rtx insn_in); |
| 15690 | +extern int avr32_mul_waw_bypass (rtx insn_out, rtx insn_in); |
| 15691 | +extern int avr32_valid_load_double_bypass (rtx insn_out, rtx insn_in); |
| 15692 | +extern int avr32_valid_load_quad_bypass (rtx insn_out, rtx insn_in); |
| 15693 | +extern rtx avr32_output_cmp (rtx cond, enum machine_mode mode, |
| 15694 | + rtx op0, rtx op1); |
| 15695 | + |
| 15696 | +rtx get_next_insn_cond (rtx cur_insn); |
| 15697 | +int set_next_insn_cond (rtx cur_insn, rtx cond); |
| 15698 | +void avr32_override_options (void); |
| 15699 | + |
| 15700 | +#endif /* AVR32_PROTOS_H */ |
| 15701 | --- /dev/null |
| 15702 | +++ b/gcc/config/avr32/crti.asm |
| 15703 | @@ -0,0 +1,64 @@ |
| 15704 | +/* |
| 15705 | + Init/fini stuff for AVR32. |
| 15706 | + Copyright 2003-2006 Atmel Corporation. |
| 15707 | + |
| 15708 | + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com> |
| 15709 | + |
| 15710 | + This file is part of GCC. |
| 15711 | + |
| 15712 | + This program is free software; you can redistribute it and/or modify |
| 15713 | + it under the terms of the GNU General Public License as published by |
| 15714 | + the Free Software Foundation; either version 2 of the License, or |
| 15715 | + (at your option) any later version. |
| 15716 | + |
| 15717 | + This program is distributed in the hope that it will be useful, |
| 15718 | + but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15719 | + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 15720 | + GNU General Public License for more details. |
| 15721 | + |
| 15722 | + You should have received a copy of the GNU General Public License |
| 15723 | + along with this program; if not, write to the Free Software |
| 15724 | + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ |
| 15725 | + |
| 15726 | + |
| 15727 | +/* The code in sections .init and .fini is supposed to be a single |
| 15728 | + regular function. The function in .init is called directly from |
| 15729 | + start in crt1.asm. The function in .fini is atexit()ed in crt1.asm |
| 15730 | + too. |
| 15731 | + |
| 15732 | + crti.asm contributes the prologue of a function to these sections, |
| 15733 | + and crtn.asm comes up the epilogue. STARTFILE_SPEC should list |
| 15734 | + crti.o before any other object files that might add code to .init |
| 15735 | + or .fini sections, and ENDFILE_SPEC should list crtn.o after any |
| 15736 | + such object files. */ |
| 15737 | + |
| 15738 | + .file "crti.asm" |
| 15739 | + |
| 15740 | + .section ".init" |
| 15741 | +/* Just load the GOT */ |
| 15742 | + .align 2 |
| 15743 | + .global _init |
| 15744 | +_init: |
| 15745 | + stm --sp, r6, lr |
| 15746 | + lddpc r6, 1f |
| 15747 | +0: |
| 15748 | + rsub r6, pc |
| 15749 | + rjmp 2f |
| 15750 | + .align 2 |
| 15751 | +1: .long 0b - _GLOBAL_OFFSET_TABLE_ |
| 15752 | +2: |
| 15753 | + |
| 15754 | + .section ".fini" |
| 15755 | +/* Just load the GOT */ |
| 15756 | + .align 2 |
| 15757 | + .global _fini |
| 15758 | +_fini: |
| 15759 | + stm --sp, r6, lr |
| 15760 | + lddpc r6, 1f |
| 15761 | +0: |
| 15762 | + rsub r6, pc |
| 15763 | + rjmp 2f |
| 15764 | + .align 2 |
| 15765 | +1: .long 0b - _GLOBAL_OFFSET_TABLE_ |
| 15766 | +2: |
| 15767 | + |
| 15768 | --- /dev/null |
| 15769 | +++ b/gcc/config/avr32/crtn.asm |
| 15770 | @@ -0,0 +1,44 @@ |
| 15771 | +/* Copyright (C) 2001 Free Software Foundation, Inc. |
| 15772 | + Written By Nick Clifton |
| 15773 | + |
| 15774 | + This file is free software; you can redistribute it and/or modify it |
| 15775 | + under the terms of the GNU General Public License as published by the |
| 15776 | + Free Software Foundation; either version 2, or (at your option) any |
| 15777 | + later version. |
| 15778 | + |
| 15779 | + In addition to the permissions in the GNU General Public License, the |
| 15780 | + Free Software Foundation gives you unlimited permission to link the |
| 15781 | + compiled version of this file with other programs, and to distribute |
| 15782 | + those programs without any restriction coming from the use of this |
| 15783 | + file. (The General Public License restrictions do apply in other |
| 15784 | + respects; for example, they cover modification of the file, and |
| 15785 | + distribution when not linked into another program.) |
| 15786 | + |
| 15787 | + This file is distributed in the hope that it will be useful, but |
| 15788 | + WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15789 | + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 15790 | + General Public License for more details. |
| 15791 | + |
| 15792 | + You should have received a copy of the GNU General Public License |
| 15793 | + along with this program; see the file COPYING. If not, write to |
| 15794 | + the Free Software Foundation, 59 Temple Place - Suite 330, |
| 15795 | + Boston, MA 02111-1307, USA. |
| 15796 | + |
| 15797 | + As a special exception, if you link this library with files |
| 15798 | + compiled with GCC to produce an executable, this does not cause |
| 15799 | + the resulting executable to be covered by the GNU General Public License. |
| 15800 | + This exception does not however invalidate any other reasons why |
| 15801 | + the executable file might be covered by the GNU General Public License. |
| 15802 | +*/ |
| 15803 | + |
| 15804 | + |
| 15805 | + |
| 15806 | + |
| 15807 | + .file "crtn.asm" |
| 15808 | + |
| 15809 | + .section ".init" |
| 15810 | + ldm sp++, r6, pc |
| 15811 | + |
| 15812 | + .section ".fini" |
| 15813 | + ldm sp++, r6, pc |
| 15814 | + |
| 15815 | --- /dev/null |
| 15816 | +++ b/gcc/config/avr32/fpcp.md |
| 15817 | @@ -0,0 +1,551 @@ |
| 15818 | +;; AVR32 machine description file for Floating-Point instructions. |
| 15819 | +;; Copyright 2003-2006 Atmel Corporation. |
| 15820 | +;; |
| 15821 | +;; Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com> |
| 15822 | +;; |
| 15823 | +;; This file is part of GCC. |
| 15824 | +;; |
| 15825 | +;; This program is free software; you can redistribute it and/or modify |
| 15826 | +;; it under the terms of the GNU General Public License as published by |
| 15827 | +;; the Free Software Foundation; either version 2 of the License, or |
| 15828 | +;; (at your option) any later version. |
| 15829 | +;; |
| 15830 | +;; This program is distributed in the hope that it will be useful, |
| 15831 | +;; but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15832 | +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 15833 | +;; GNU General Public License for more details. |
| 15834 | +;; |
| 15835 | +;; You should have received a copy of the GNU General Public License |
| 15836 | +;; along with this program; if not, write to the Free Software |
| 15837 | +;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 15838 | + |
| 15839 | +;; -*- Mode: Scheme -*- |
| 15840 | + |
| 15841 | +;;****************************************************************************** |
| 15842 | +;; Automaton pipeline description for floating-point coprocessor insns |
| 15843 | +;;****************************************************************************** |
| 15844 | +(define_cpu_unit "fid,fm1,fm2,fm3,fm4,fwb,fcmp,fcast" "avr32_ap") |
| 15845 | + |
| 15846 | +(define_insn_reservation "fmv_op" 1 |
| 15847 | + (and (eq_attr "pipeline" "ap") |
| 15848 | + (eq_attr "type" "fmv")) |
| 15849 | + "is,da,d,fid,fwb") |
| 15850 | + |
| 15851 | +(define_insn_reservation "fmul_op" 5 |
| 15852 | + (and (eq_attr "pipeline" "ap") |
| 15853 | + (eq_attr "type" "fmul")) |
| 15854 | + "is,da,d,fid,fm1,fm2,fm3,fm4,fwb") |
| 15855 | + |
| 15856 | +(define_insn_reservation "fcmps_op" 1 |
| 15857 | + (and (eq_attr "pipeline" "ap") |
| 15858 | + (eq_attr "type" "fcmps")) |
| 15859 | + "is,da,d,fid,fcmp") |
| 15860 | + |
| 15861 | +(define_insn_reservation "fcmpd_op" 2 |
| 15862 | + (and (eq_attr "pipeline" "ap") |
| 15863 | + (eq_attr "type" "fcmpd")) |
| 15864 | + "is,da,d,fid*2,fcmp") |
| 15865 | + |
| 15866 | +(define_insn_reservation "fcast_op" 3 |
| 15867 | + (and (eq_attr "pipeline" "ap") |
| 15868 | + (eq_attr "type" "fcast")) |
| 15869 | + "is,da,d,fid,fcmp,fcast,fwb") |
| 15870 | + |
| 15871 | +(define_insn_reservation "fmvcpu_op" 2 |
| 15872 | + (and (eq_attr "pipeline" "ap") |
| 15873 | + (eq_attr "type" "fmvcpu")) |
| 15874 | + "is,da,d") |
| 15875 | + |
| 15876 | +(define_insn_reservation "fldd_op" 1 |
| 15877 | + (and (eq_attr "pipeline" "ap") |
| 15878 | + (eq_attr "type" "fldd")) |
| 15879 | + "is,da,d,fwb") |
| 15880 | + |
| 15881 | +(define_insn_reservation "flds_op" 1 |
| 15882 | + (and (eq_attr "pipeline" "ap") |
| 15883 | + (eq_attr "type" "flds")) |
| 15884 | + "is,da,d,fwb") |
| 15885 | + |
| 15886 | +(define_insn_reservation "fsts_op" 0 |
| 15887 | + (and (eq_attr "pipeline" "ap") |
| 15888 | + (eq_attr "type" "fsts")) |
| 15889 | + "is,da*2,d") |
| 15890 | + |
| 15891 | +(define_insn_reservation "fstd_op" 0 |
| 15892 | + (and (eq_attr "pipeline" "ap") |
| 15893 | + (eq_attr "type" "fstd")) |
| 15894 | + "is,da*2,d") |
| 15895 | + |
| 15896 | + |
| 15897 | +(define_insn "*movsf_fpcp" |
| 15898 | + [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,r,f,m,r,r,r,m") |
| 15899 | + (match_operand:SF 1 "general_operand" " f,r,f,m,f,r,G,m,r"))] |
| 15900 | + "TARGET_HARD_FLOAT" |
| 15901 | + "@ |
| 15902 | + fmov.s\t%0, %1 |
| 15903 | + fmov.s\t%0, %1 |
| 15904 | + fmov.s\t%0, %1 |
| 15905 | + fld.s\t%0, %1 |
| 15906 | + fst.s\t%0, %1 |
| 15907 | + mov\t%0, %1 |
| 15908 | + mov\t%0, %1 |
| 15909 | + ld.w\t%0, %1 |
| 15910 | + st.w\t%0, %1" |
| 15911 | + [(set_attr "length" "4,4,4,4,4,2,4,4,4") |
| 15912 | + (set_attr "type" "fmv,flds,fmvcpu,flds,fsts,alu,alu,load,store")]) |
| 15913 | + |
| 15914 | +(define_insn_and_split "*movdf_fpcp" |
| 15915 | + [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,r,f,m,r,r,m") |
| 15916 | + (match_operand:DF 1 "general_operand" " f,r,f,m,f,r,m,r"))] |
| 15917 | + "TARGET_HARD_FLOAT" |
| 15918 | + "@ |
| 15919 | + fmov.d\t%0, %1 |
| 15920 | + fmov.d\t%0, %1 |
| 15921 | + fmov.d\t%0, %1 |
| 15922 | + fld.d\t%0, %1 |
| 15923 | + fst.d\t%0, %1 |
| 15924 | + mov\t%0, %1\;mov\t%m0, %m1 |
| 15925 | + ld.d\t%0, %1 |
| 15926 | + st.d\t%0, %1" |
| 15927 | + |
| 15928 | + "TARGET_HARD_FLOAT |
| 15929 | + && reload_completed |
| 15930 | + && (REG_P(operands[0]) && (REGNO_REG_CLASS(REGNO(operands[0])) == GENERAL_REGS)) |
| 15931 | + && (REG_P(operands[1]) && (REGNO_REG_CLASS(REGNO(operands[1])) == GENERAL_REGS))" |
| 15932 | + [(set (match_dup 0) (match_dup 1)) |
| 15933 | + (set (match_dup 2) (match_dup 3))] |
| 15934 | + " |
| 15935 | + { |
| 15936 | + operands[2] = gen_highpart (SImode, operands[0]); |
| 15937 | + operands[0] = gen_lowpart (SImode, operands[0]); |
| 15938 | + operands[3] = gen_highpart(SImode, operands[1]); |
| 15939 | + operands[1] = gen_lowpart(SImode, operands[1]); |
| 15940 | + } |
| 15941 | + " |
| 15942 | + |
| 15943 | + [(set_attr "length" "4,4,4,4,4,4,4,4") |
| 15944 | + (set_attr "type" "fmv,fldd,fmvcpu,fldd,fstd,alu2,load2,store2")]) |
| 15945 | + |
| 15946 | + |
| 15947 | +(define_insn "mulsf3" |
| 15948 | + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f") |
| 15949 | + (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f") |
| 15950 | + (match_operand:SF 2 "avr32_fp_register_operand" "f")))] |
| 15951 | + "TARGET_HARD_FLOAT" |
| 15952 | + "fmul.s\t%0, %1, %2" |
| 15953 | + [(set_attr "length" "4") |
| 15954 | + (set_attr "type" "fmul")]) |
| 15955 | + |
| 15956 | +(define_insn "nmulsf3" |
| 15957 | + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f") |
| 15958 | + (neg:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f") |
| 15959 | + (match_operand:SF 2 "avr32_fp_register_operand" "f"))))] |
| 15960 | + "TARGET_HARD_FLOAT" |
| 15961 | + "fnmul.s\t%0, %1, %2" |
| 15962 | + [(set_attr "length" "4") |
| 15963 | + (set_attr "type" "fmul")]) |
| 15964 | + |
| 15965 | +(define_peephole2 |
| 15966 | + [(set (match_operand:SF 0 "avr32_fp_register_operand" "") |
| 15967 | + (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "") |
| 15968 | + (match_operand:SF 2 "avr32_fp_register_operand" ""))) |
| 15969 | + (set (match_operand:SF 3 "avr32_fp_register_operand" "") |
| 15970 | + (neg:SF (match_dup 0)))] |
| 15971 | + "TARGET_HARD_FLOAT && |
| 15972 | + (peep2_reg_dead_p(2, operands[0]) || (REGNO(operands[3]) == REGNO(operands[0])))" |
| 15973 | + [(set (match_dup 3) |
| 15974 | + (neg:SF (mult:SF (match_dup 1) |
| 15975 | + (match_dup 2))))] |
| 15976 | +) |
| 15977 | + |
| 15978 | + |
| 15979 | +(define_insn "macsf3" |
| 15980 | + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f") |
| 15981 | + (plus:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f") |
| 15982 | + (match_operand:SF 2 "avr32_fp_register_operand" "f")) |
| 15983 | + (match_operand:SF 3 "avr32_fp_register_operand" "0")))] |
| 15984 | + "TARGET_HARD_FLOAT" |
| 15985 | + "fmac.s\t%0, %1, %2" |
| 15986 | + [(set_attr "length" "4") |
| 15987 | + (set_attr "type" "fmul")]) |
| 15988 | + |
| 15989 | +(define_insn "nmacsf3" |
| 15990 | + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f") |
| 15991 | + (plus:SF (neg:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f") |
| 15992 | + (match_operand:SF 2 "avr32_fp_register_operand" "f"))) |
| 15993 | + (match_operand:SF 3 "avr32_fp_register_operand" "0")))] |
| 15994 | + "TARGET_HARD_FLOAT" |
| 15995 | + "fnmac.s\t%0, %1, %2" |
| 15996 | + [(set_attr "length" "4") |
| 15997 | + (set_attr "type" "fmul")]) |
| 15998 | + |
| 15999 | +(define_peephole2 |
| 16000 | + [(set (match_operand:SF 0 "avr32_fp_register_operand" "") |
| 16001 | + (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "") |
| 16002 | + (match_operand:SF 2 "avr32_fp_register_operand" ""))) |
| 16003 | + (set (match_operand:SF 3 "avr32_fp_register_operand" "") |
| 16004 | + (minus:SF |
| 16005 | + (match_dup 3) |
| 16006 | + (match_dup 0)))] |
| 16007 | + "TARGET_HARD_FLOAT && peep2_reg_dead_p(2, operands[0])" |
| 16008 | + [(set (match_dup 3) |
| 16009 | + (plus:SF (neg:SF (mult:SF (match_dup 1) |
| 16010 | + (match_dup 2))) |
| 16011 | + (match_dup 3)))] |
| 16012 | +) |
| 16013 | + |
| 16014 | + |
| 16015 | +(define_insn "msubacsf3" |
| 16016 | + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f") |
| 16017 | + (minus:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f") |
| 16018 | + (match_operand:SF 2 "avr32_fp_register_operand" "f")) |
| 16019 | + (match_operand:SF 3 "avr32_fp_register_operand" "0")))] |
| 16020 | + "TARGET_HARD_FLOAT" |
| 16021 | + "fmsc.s\t%0, %1, %2" |
| 16022 | + [(set_attr "length" "4") |
| 16023 | + (set_attr "type" "fmul")]) |
| 16024 | + |
| 16025 | +(define_peephole2 |
| 16026 | + [(set (match_operand:SF 0 "avr32_fp_register_operand" "") |
| 16027 | + (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "") |
| 16028 | + (match_operand:SF 2 "avr32_fp_register_operand" ""))) |
| 16029 | + (set (match_operand:SF 3 "avr32_fp_register_operand" "") |
| 16030 | + (minus:SF |
| 16031 | + (match_dup 0) |
| 16032 | + (match_dup 3)))] |
| 16033 | + "TARGET_HARD_FLOAT && peep2_reg_dead_p(2, operands[0])" |
| 16034 | + [(set (match_dup 3) |
| 16035 | + (minus:SF (mult:SF (match_dup 1) |
| 16036 | + (match_dup 2)) |
| 16037 | + (match_dup 3)))] |
| 16038 | +) |
| 16039 | + |
| 16040 | +(define_insn "nmsubacsf3" |
| 16041 | + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f") |
| 16042 | + (minus:SF (neg:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f") |
| 16043 | + (match_operand:SF 2 "avr32_fp_register_operand" "f"))) |
| 16044 | + (match_operand:SF 3 "avr32_fp_register_operand" "0")))] |
| 16045 | + "TARGET_HARD_FLOAT" |
| 16046 | + "fnmsc.s\t%0, %1, %2" |
| 16047 | + [(set_attr "length" "4") |
| 16048 | + (set_attr "type" "fmul")]) |
| 16049 | + |
| 16050 | + |
| 16051 | + |
| 16052 | +(define_insn "addsf3" |
| 16053 | + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f") |
| 16054 | + (plus:SF (match_operand:SF 1 "avr32_fp_register_operand" "f") |
| 16055 | + (match_operand:SF 2 "avr32_fp_register_operand" "f")))] |
| 16056 | + "TARGET_HARD_FLOAT" |
| 16057 | + "fadd.s\t%0, %1, %2" |
| 16058 | + [(set_attr "length" "4") |
| 16059 | + (set_attr "type" "fmul")]) |
| 16060 | + |
| 16061 | +(define_insn "subsf3" |
| 16062 | + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f") |
| 16063 | + (minus:SF (match_operand:SF 1 "avr32_fp_register_operand" "f") |
| 16064 | + (match_operand:SF 2 "avr32_fp_register_operand" "f")))] |
| 16065 | + "TARGET_HARD_FLOAT" |
| 16066 | + "fsub.s\t%0, %1, %2" |
| 16067 | + [(set_attr "length" "4") |
| 16068 | + (set_attr "type" "fmul")]) |
| 16069 | + |
| 16070 | + |
| 16071 | +(define_insn "negsf2" |
| 16072 | + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f") |
| 16073 | + (neg:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")))] |
| 16074 | + "TARGET_HARD_FLOAT" |
| 16075 | + "fneg.s\t%0, %1" |
| 16076 | + [(set_attr "length" "4") |
| 16077 | + (set_attr "type" "fmv")]) |
| 16078 | + |
| 16079 | +(define_insn "abssf2" |
| 16080 | + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f") |
| 16081 | + (abs:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")))] |
| 16082 | + "TARGET_HARD_FLOAT" |
| 16083 | + "fabs.s\t%0, %1" |
| 16084 | + [(set_attr "length" "4") |
| 16085 | + (set_attr "type" "fmv")]) |
| 16086 | + |
| 16087 | +(define_insn "truncdfsf2" |
| 16088 | + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f") |
| 16089 | + (float_truncate:SF |
| 16090 | + (match_operand:DF 1 "avr32_fp_register_operand" "f")))] |
| 16091 | + "TARGET_HARD_FLOAT" |
| 16092 | + "fcastd.s\t%0, %1" |
| 16093 | + [(set_attr "length" "4") |
| 16094 | + (set_attr "type" "fcast")]) |
| 16095 | + |
| 16096 | +(define_insn "extendsfdf2" |
| 16097 | + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f") |
| 16098 | + (float_extend:DF |
| 16099 | + (match_operand:SF 1 "avr32_fp_register_operand" "f")))] |
| 16100 | + "TARGET_HARD_FLOAT" |
| 16101 | + "fcasts.d\t%0, %1" |
| 16102 | + [(set_attr "length" "4") |
| 16103 | + (set_attr "type" "fcast")]) |
| 16104 | + |
| 16105 | +(define_insn "muldf3" |
| 16106 | + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f") |
| 16107 | + (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f") |
| 16108 | + (match_operand:DF 2 "avr32_fp_register_operand" "f")))] |
| 16109 | + "TARGET_HARD_FLOAT" |
| 16110 | + "fmul.d\t%0, %1, %2" |
| 16111 | + [(set_attr "length" "4") |
| 16112 | + (set_attr "type" "fmul")]) |
| 16113 | + |
| 16114 | +(define_insn "nmuldf3" |
| 16115 | + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f") |
| 16116 | + (neg:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f") |
| 16117 | + (match_operand:DF 2 "avr32_fp_register_operand" "f"))))] |
| 16118 | + "TARGET_HARD_FLOAT" |
| 16119 | + "fnmul.d\t%0, %1, %2" |
| 16120 | + [(set_attr "length" "4") |
| 16121 | + (set_attr "type" "fmul")]) |
| 16122 | + |
| 16123 | +(define_peephole2 |
| 16124 | + [(set (match_operand:DF 0 "avr32_fp_register_operand" "") |
| 16125 | + (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "") |
| 16126 | + (match_operand:DF 2 "avr32_fp_register_operand" ""))) |
| 16127 | + (set (match_operand:DF 3 "avr32_fp_register_operand" "") |
| 16128 | + (neg:DF (match_dup 0)))] |
| 16129 | + "TARGET_HARD_FLOAT && |
| 16130 | + (peep2_reg_dead_p(2, operands[0]) || (REGNO(operands[3]) == REGNO(operands[0])))" |
| 16131 | + [(set (match_dup 3) |
| 16132 | + (neg:DF (mult:DF (match_dup 1) |
| 16133 | + (match_dup 2))))] |
| 16134 | +) |
| 16135 | + |
| 16136 | +(define_insn "macdf3" |
| 16137 | + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f") |
| 16138 | + (plus:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f") |
| 16139 | + (match_operand:DF 2 "avr32_fp_register_operand" "f")) |
| 16140 | + (match_operand:DF 3 "avr32_fp_register_operand" "0")))] |
| 16141 | + "TARGET_HARD_FLOAT" |
| 16142 | + "fmac.d\t%0, %1, %2" |
| 16143 | + [(set_attr "length" "4") |
| 16144 | + (set_attr "type" "fmul")]) |
| 16145 | + |
| 16146 | +(define_insn "msubacdf3" |
| 16147 | + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f") |
| 16148 | + (minus:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f") |
| 16149 | + (match_operand:DF 2 "avr32_fp_register_operand" "f")) |
| 16150 | + (match_operand:DF 3 "avr32_fp_register_operand" "0")))] |
| 16151 | + "TARGET_HARD_FLOAT" |
| 16152 | + "fmsc.d\t%0, %1, %2" |
| 16153 | + [(set_attr "length" "4") |
| 16154 | + (set_attr "type" "fmul")]) |
| 16155 | + |
| 16156 | +(define_peephole2 |
| 16157 | + [(set (match_operand:DF 0 "avr32_fp_register_operand" "") |
| 16158 | + (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "") |
| 16159 | + (match_operand:DF 2 "avr32_fp_register_operand" ""))) |
| 16160 | + (set (match_operand:DF 3 "avr32_fp_register_operand" "") |
| 16161 | + (minus:DF |
| 16162 | + (match_dup 0) |
| 16163 | + (match_dup 3)))] |
| 16164 | + "TARGET_HARD_FLOAT && peep2_reg_dead_p(2, operands[0])" |
| 16165 | + [(set (match_dup 3) |
| 16166 | + (minus:DF (mult:DF (match_dup 1) |
| 16167 | + (match_dup 2)) |
| 16168 | + (match_dup 3)))] |
| 16169 | + ) |
| 16170 | + |
| 16171 | +(define_insn "nmsubacdf3" |
| 16172 | + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f") |
| 16173 | + (minus:DF (neg:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f") |
| 16174 | + (match_operand:DF 2 "avr32_fp_register_operand" "f"))) |
| 16175 | + (match_operand:DF 3 "avr32_fp_register_operand" "0")))] |
| 16176 | + "TARGET_HARD_FLOAT" |
| 16177 | + "fnmsc.d\t%0, %1, %2" |
| 16178 | + [(set_attr "length" "4") |
| 16179 | + (set_attr "type" "fmul")]) |
| 16180 | + |
| 16181 | +(define_insn "nmacdf3" |
| 16182 | + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f") |
| 16183 | + (plus:DF (neg:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f") |
| 16184 | + (match_operand:DF 2 "avr32_fp_register_operand" "f"))) |
| 16185 | + (match_operand:DF 3 "avr32_fp_register_operand" "0")))] |
| 16186 | + "TARGET_HARD_FLOAT" |
| 16187 | + "fnmac.d\t%0, %1, %2" |
| 16188 | + [(set_attr "length" "4") |
| 16189 | + (set_attr "type" "fmul")]) |
| 16190 | + |
| 16191 | +(define_peephole2 |
| 16192 | + [(set (match_operand:DF 0 "avr32_fp_register_operand" "") |
| 16193 | + (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "") |
| 16194 | + (match_operand:DF 2 "avr32_fp_register_operand" ""))) |
| 16195 | + (set (match_operand:DF 3 "avr32_fp_register_operand" "") |
| 16196 | + (minus:DF |
| 16197 | + (match_dup 3) |
| 16198 | + (match_dup 0)))] |
| 16199 | + "TARGET_HARD_FLOAT && peep2_reg_dead_p(2, operands[0])" |
| 16200 | + [(set (match_dup 3) |
| 16201 | + (plus:DF (neg:DF (mult:DF (match_dup 1) |
| 16202 | + (match_dup 2))) |
| 16203 | + (match_dup 3)))] |
| 16204 | +) |
| 16205 | + |
| 16206 | +(define_insn "adddf3" |
| 16207 | + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f") |
| 16208 | + (plus:DF (match_operand:DF 1 "avr32_fp_register_operand" "f") |
| 16209 | + (match_operand:DF 2 "avr32_fp_register_operand" "f")))] |
| 16210 | + "TARGET_HARD_FLOAT" |
| 16211 | + "fadd.d\t%0, %1, %2" |
| 16212 | + [(set_attr "length" "4") |
| 16213 | + (set_attr "type" "fmul")]) |
| 16214 | + |
| 16215 | +(define_insn "subdf3" |
| 16216 | + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f") |
| 16217 | + (minus:DF (match_operand:DF 1 "avr32_fp_register_operand" "f") |
| 16218 | + (match_operand:DF 2 "avr32_fp_register_operand" "f")))] |
| 16219 | + "TARGET_HARD_FLOAT" |
| 16220 | + "fsub.d\t%0, %1, %2" |
| 16221 | + [(set_attr "length" "4") |
| 16222 | + (set_attr "type" "fmul")]) |
| 16223 | + |
| 16224 | +(define_insn "negdf2" |
| 16225 | + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f") |
| 16226 | + (neg:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")))] |
| 16227 | + "TARGET_HARD_FLOAT" |
| 16228 | + "fneg.d\t%0, %1" |
| 16229 | + [(set_attr "length" "4") |
| 16230 | + (set_attr "type" "fmv")]) |
| 16231 | + |
| 16232 | +(define_insn "absdf2" |
| 16233 | + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f") |
| 16234 | + (abs:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")))] |
| 16235 | + "TARGET_HARD_FLOAT" |
| 16236 | + "fabs.d\t%0, %1" |
| 16237 | + [(set_attr "length" "4") |
| 16238 | + (set_attr "type" "fmv")]) |
| 16239 | + |
| 16240 | + |
| 16241 | +(define_expand "cmpdf" |
| 16242 | + [(set (cc0) |
| 16243 | + (compare:DF |
| 16244 | + (match_operand:DF 0 "general_operand" "") |
| 16245 | + (match_operand:DF 1 "general_operand" "")))] |
| 16246 | + "TARGET_HARD_FLOAT" |
| 16247 | + "{ |
| 16248 | + rtx tmpreg; |
| 16249 | + if ( !REG_P(operands[0]) ) |
| 16250 | + operands[0] = force_reg(DFmode, operands[0]); |
| 16251 | + |
| 16252 | + if ( !REG_P(operands[1]) ) |
| 16253 | + operands[1] = force_reg(DFmode, operands[1]); |
| 16254 | + |
| 16255 | + avr32_compare_op0 = operands[0]; |
| 16256 | + avr32_compare_op1 = operands[1]; |
| 16257 | + |
| 16258 | + emit_insn(gen_cmpdf_internal(operands[0], operands[1])); |
| 16259 | + |
| 16260 | + tmpreg = gen_reg_rtx(SImode); |
| 16261 | + emit_insn(gen_fpcc_to_reg(tmpreg)); |
| 16262 | + emit_insn(gen_reg_to_cc(tmpreg)); |
| 16263 | + |
| 16264 | + DONE; |
| 16265 | + }" |
| 16266 | +) |
| 16267 | + |
| 16268 | +(define_insn "cmpdf_internal" |
| 16269 | + [(set (reg:CC FPCC_REGNUM) |
| 16270 | + (compare:CC |
| 16271 | + (match_operand:DF 0 "avr32_fp_register_operand" "f") |
| 16272 | + (match_operand:DF 1 "avr32_fp_register_operand" "f")))] |
| 16273 | + "TARGET_HARD_FLOAT" |
| 16274 | + { |
| 16275 | + if (!rtx_equal_p(cc_prev_status.mdep.fpvalue, SET_SRC(PATTERN (insn))) ) |
| 16276 | + return "fcmp.d\t%0, %1"; |
| 16277 | + return ""; |
| 16278 | + } |
| 16279 | + [(set_attr "length" "4") |
| 16280 | + (set_attr "type" "fcmpd") |
| 16281 | + (set_attr "cc" "fpcompare")]) |
| 16282 | + |
| 16283 | +(define_expand "cmpsf" |
| 16284 | + [(set (cc0) |
| 16285 | + (compare:SF |
| 16286 | + (match_operand:SF 0 "general_operand" "") |
| 16287 | + (match_operand:SF 1 "general_operand" "")))] |
| 16288 | + "TARGET_HARD_FLOAT" |
| 16289 | + "{ |
| 16290 | + rtx tmpreg; |
| 16291 | + if ( !REG_P(operands[0]) ) |
| 16292 | + operands[0] = force_reg(SFmode, operands[0]); |
| 16293 | + |
| 16294 | + if ( !REG_P(operands[1]) ) |
| 16295 | + operands[1] = force_reg(SFmode, operands[1]); |
| 16296 | + |
| 16297 | + avr32_compare_op0 = operands[0]; |
| 16298 | + avr32_compare_op1 = operands[1]; |
| 16299 | + |
| 16300 | + emit_insn(gen_cmpsf_internal(operands[0], operands[1])); |
| 16301 | + |
| 16302 | + tmpreg = gen_reg_rtx(SImode); |
| 16303 | + emit_insn(gen_fpcc_to_reg(tmpreg)); |
| 16304 | + emit_insn(gen_reg_to_cc(tmpreg)); |
| 16305 | + |
| 16306 | + DONE; |
| 16307 | + }" |
| 16308 | +) |
| 16309 | + |
| 16310 | +(define_insn "cmpsf_internal" |
| 16311 | + [(set (reg:CC FPCC_REGNUM) |
| 16312 | + (compare:CC |
| 16313 | + (match_operand:SF 0 "avr32_fp_register_operand" "f") |
| 16314 | + (match_operand:SF 1 "avr32_fp_register_operand" "f")))] |
| 16315 | + "TARGET_HARD_FLOAT" |
| 16316 | + { |
| 16317 | + if (!rtx_equal_p(cc_prev_status.mdep.fpvalue, SET_SRC(PATTERN (insn))) ) |
| 16318 | + return "fcmp.s\t%0, %1"; |
| 16319 | + return ""; |
| 16320 | + } |
| 16321 | + [(set_attr "length" "4") |
| 16322 | + (set_attr "type" "fcmps") |
| 16323 | + (set_attr "cc" "fpcompare")]) |
| 16324 | + |
| 16325 | +(define_insn "fpcc_to_reg" |
| 16326 | + [(set (match_operand:SI 0 "register_operand" "=r") |
| 16327 | + (unspec:SI [(reg:CC FPCC_REGNUM)] |
| 16328 | + UNSPEC_FPCC_TO_REG))] |
| 16329 | + "TARGET_HARD_FLOAT" |
| 16330 | + "fmov.s\t%0, fsr" |
| 16331 | + [(set_attr "length" "4") |
| 16332 | + (set_attr "type" "fmvcpu")]) |
| 16333 | + |
| 16334 | +(define_insn "reg_to_cc" |
| 16335 | + [(set (cc0) |
| 16336 | + (unspec:SI [(match_operand:SI 0 "register_operand" "r")] |
| 16337 | + UNSPEC_REG_TO_CC))] |
| 16338 | + "TARGET_HARD_FLOAT" |
| 16339 | + "musfr\t%0" |
| 16340 | + [(set_attr "length" "2") |
| 16341 | + (set_attr "type" "alu") |
| 16342 | + (set_attr "cc" "from_fpcc")]) |
| 16343 | + |
| 16344 | +(define_insn "stm_fp" |
| 16345 | + [(unspec [(match_operand 0 "register_operand" "r") |
| 16346 | + (match_operand 1 "const_int_operand" "") |
| 16347 | + (match_operand 2 "const_int_operand" "")] |
| 16348 | + UNSPEC_STMFP)] |
| 16349 | + "TARGET_HARD_FLOAT" |
| 16350 | + { |
| 16351 | + int cop_reglist = INTVAL(operands[1]); |
| 16352 | + |
| 16353 | + if (INTVAL(operands[2]) != 0) |
| 16354 | + return "stcm.w\tcp0, --%0, %C1"; |
| 16355 | + else |
| 16356 | + return "stcm.w\tcp0, %0, %C1"; |
| 16357 | + |
| 16358 | + if ( cop_reglist & ~0xff ){ |
| 16359 | + operands[1] = GEN_INT(cop_reglist & ~0xff); |
| 16360 | + if (INTVAL(operands[2]) != 0) |
| 16361 | + return "stcm.d\tcp0, --%0, %D1"; |
| 16362 | + else |
| 16363 | + return "stcm.d\tcp0, %0, %D1"; |
| 16364 | + } |
| 16365 | + } |
| 16366 | + [(set_attr "type" "fstm") |
| 16367 | + (set_attr "length" "4") |
| 16368 | + (set_attr "cc" "none")]) |
| 16369 | --- /dev/null |
| 16370 | +++ b/gcc/config/avr32/lib1funcs.S |
| 16371 | @@ -0,0 +1,1678 @@ |
| 16372 | +/*#define __IEEE_LARGE_FLOATS__*/ |
| 16373 | + |
| 16374 | +/* Adjust the unpacked double number if it is a subnormal number. |
| 16375 | + The exponent and mantissa pair are stored |
| 16376 | + in [mant_hi,mant_lo] and [exp]. A register with the correct sign bit in |
| 16377 | + the MSB is passed in [sign]. Needs two scratch |
| 16378 | + registers [scratch1] and [scratch2]. An adjusted and packed double float |
| 16379 | + is present in [mant_hi,mant_lo] after macro has executed */ |
| 16380 | +.macro adjust_subnormal_df exp, mant_lo, mant_hi, sign, scratch1, scratch2 |
| 16381 | + /* We have an exponent which is <=0 indicating a subnormal number |
| 16382 | + As it should be stored as if the exponent was 1 (although the |
| 16383 | + exponent field is all zeros to indicate a subnormal number) |
| 16384 | + we have to shift down the mantissa to its correct position. */ |
| 16385 | + neg \exp |
| 16386 | + sub \exp,-1 /* amount to shift down */ |
| 16387 | + cp.w \exp,54 |
| 16388 | + brlo 50f /* if more than 53 shift steps, the |
| 16389 | + entire mantissa will disappear |
| 16390 | + without any rounding to occur */ |
| 16391 | + mov \mant_hi, 0 |
| 16392 | + mov \mant_lo, 0 |
| 16393 | + rjmp 52f |
| 16394 | +50: |
| 16395 | + sub \exp,-10 /* do the shift to position the |
| 16396 | + mantissa at the same time |
| 16397 | + note! this does not include the |
| 16398 | + final 1 step shift to add the sign */ |
| 16399 | + |
| 16400 | + /* when shifting, save all shifted out bits in [scratch2]. we may need to |
| 16401 | + look at them to make correct rounding. */ |
| 16402 | + |
| 16403 | + rsub \scratch1,\exp,32 /* get inverted shift count */ |
| 16404 | + cp.w \exp,32 /* handle shifts >= 32 separately */ |
| 16405 | + brhs 51f |
| 16406 | + |
| 16407 | + /* small (<32) shift amount, both words are part of the shift */ |
| 16408 | + lsl \scratch2,\mant_lo,\scratch1 /* save bits to shift out from lsw*/ |
| 16409 | + lsl \scratch1,\mant_hi,\scratch1 /* get bits from msw destined for lsw*/ |
| 16410 | + lsr \mant_lo,\mant_lo,\exp /* shift down lsw */ |
| 16411 | + lsr \mant_hi,\mant_hi,\exp /* shift down msw */ |
| 16412 | + or \mant_hi,\scratch1 /* add bits from msw with prepared lsw */ |
| 16413 | + rjmp 50f |
| 16414 | + |
| 16415 | + /* large (>=32) shift amount, only lsw will have bits left after shift. |
| 16416 | + note that shift operations will use ((shift count) mod 32) so |
| 16417 | + we do not need to subtract 32 from shift count. */ |
| 16418 | +51: |
| 16419 | + lsl \scratch2,\mant_hi,\scratch1 /* save bits to shift out from msw */ |
| 16420 | + or \scratch2,\mant_lo /* also save all bits from lsw */ |
| 16421 | + mov \mant_lo,\mant_hi /* msw -> lsw (i.e. "shift 32 first") */ |
| 16422 | + mov \mant_hi,0 /* clear msw */ |
| 16423 | + lsr \mant_lo,\mant_lo,\exp /* make rest of shift inside lsw */ |
| 16424 | + |
| 16425 | +50: |
| 16426 | + /* result is almost ready to return, except that least significant bit |
| 16427 | + and the part we already shifted out may cause the result to be |
| 16428 | + rounded */ |
| 16429 | + bld \mant_lo,0 /* get bit to be shifted out */ |
| 16430 | + brcc 51f /* if bit was 0, no rounding */ |
| 16431 | + |
| 16432 | + /* msb of part to remove is 1, so rounding depends on rest of bits */ |
| 16433 | + tst \scratch2,\scratch2 /* get shifted out tail */ |
| 16434 | + brne 50f /* if rest > 0, do round */ |
| 16435 | + bld \mant_lo,1 /* we have to look at lsb in result */ |
| 16436 | + brcc 51f /* if lsb is 0, don't round */ |
| 16437 | + |
| 16438 | +50: |
| 16439 | + /* subnormal result requires rounding |
| 16440 | + rounding may cause subnormal to become smallest normal number |
| 16441 | + luckily, smallest normal number has exactly the representation |
| 16442 | + we got by rippling a one bit up from mantissa into exponent field. */ |
| 16443 | + sub \mant_lo,-1 |
| 16444 | + subcc \mant_hi,-1 |
| 16445 | + |
| 16446 | +51: |
| 16447 | + /* shift and return packed double with correct sign */ |
| 16448 | + rol \sign |
| 16449 | + ror \mant_hi |
| 16450 | + ror \mant_lo |
| 16451 | +52: |
| 16452 | +.endm |
| 16453 | + |
| 16454 | + |
| 16455 | +/* Adjust subnormal single float number with exponent [exp] |
| 16456 | + and mantissa [mant] and round. */ |
| 16457 | +.macro adjust_subnormal_sf sf, exp, mant, sign, scratch |
| 16458 | + /* subnormal number */ |
| 16459 | + rsub \exp,\exp, 1 /* shift amount */ |
| 16460 | + cp.w \exp, 25 |
| 16461 | + movhs \mant, 0 |
| 16462 | + brhs 90f /* Return zero */ |
| 16463 | + rsub \scratch, \exp, 32 |
| 16464 | + lsl \scratch, \mant,\scratch/* Check if there are any bits set |
| 16465 | + in the bits discarded in the mantissa */ |
| 16466 | + srne \scratch /* If so set the lsb of the shifted mantissa */ |
| 16467 | + lsr \mant,\mant,\exp /* Shift the mantissa */ |
| 16468 | + or \mant, \scratch /* Round lsb if any bits were shifted out */ |
| 16469 | + /* Rounding : For explaination, see round_sf. */ |
| 16470 | + mov \scratch, 0x7f /* Set rounding constant */ |
| 16471 | + bld \mant, 8 |
| 16472 | + subeq \scratch, -1 /* For odd numbers use rounding constant 0x80 */ |
| 16473 | + add \mant, \scratch /* Add rounding constant to mantissa */ |
| 16474 | + /* We can't overflow because mantissa is at least shifted one position |
| 16475 | + to the right so the implicit bit is zero. We can however get the implicit |
| 16476 | + bit set after rounding which means that we have the lowest normal number |
| 16477 | + but this is ok since this bit has the same position as the LSB of the |
| 16478 | + exponent */ |
| 16479 | + lsr \sf, \mant, 7 |
| 16480 | + /* Rotate in sign */ |
| 16481 | + lsl \sign, 1 |
| 16482 | + ror \sf |
| 16483 | +90: |
| 16484 | +.endm |
| 16485 | + |
| 16486 | + |
| 16487 | +/* Round the unpacked df number with exponent [exp] and |
| 16488 | + mantissa [mant_hi, mant_lo]. Uses scratch register |
| 16489 | + [scratch] */ |
| 16490 | +.macro round_df exp, mant_lo, mant_hi, scratch |
| 16491 | + mov \scratch, 0x3ff /* Rounding constant */ |
| 16492 | + bld \mant_lo,11 /* Check if lsb in the final result is |
| 16493 | + set */ |
| 16494 | + subeq \scratch, -1 /* Adjust rounding constant to 0x400 |
| 16495 | + if rounding 0.5 upwards */ |
| 16496 | + add \mant_lo, \scratch /* Round */ |
| 16497 | + acr \mant_hi /* If overflowing we know that |
| 16498 | + we have all zeros in the bits not |
| 16499 | + scaled out so we can leave them |
| 16500 | + but we must increase the exponent with |
| 16501 | + two since we had an implicit bit |
| 16502 | + which is lost + the extra overflow bit */ |
| 16503 | + subcs \exp, -2 /* Update exponent */ |
| 16504 | +.endm |
| 16505 | + |
| 16506 | +/* Round single float number stored in [mant] and [exp] */ |
| 16507 | +.macro round_sf exp, mant, scratch |
| 16508 | + /* Round: |
| 16509 | + For 0.5 we round to nearest even integer |
| 16510 | + for all other cases we round to nearest integer. |
| 16511 | + This means that if the digit left of the "point" (.) |
| 16512 | + is 1 we can add 0x80 to the mantissa since the |
| 16513 | + corner case 0x180 will round up to 0x200. If the |
| 16514 | + digit left of the "point" is 0 we will have to |
| 16515 | + add 0x7f since this will give 0xff and hence a |
| 16516 | + truncation/rounding downwards for the corner |
| 16517 | + case when the 9 lowest bits are 0x080 */ |
| 16518 | + mov \scratch, 0x7f /* Set rounding constant */ |
| 16519 | + /* Check if the mantissa is even or odd */ |
| 16520 | + bld \mant, 8 |
| 16521 | + subeq \scratch, -1 /* Rounding constant should be 0x80 */ |
| 16522 | + add \mant, \scratch |
| 16523 | + subcs \exp, -2 /* Adjust exponent if we overflowed */ |
| 16524 | +.endm |
| 16525 | + |
| 16526 | +/* Scale mantissa [mant_hi, mant_lo] with amount [shift_count]. |
| 16527 | + Uses scratch registers [scratch1] and [scratch2] */ |
| 16528 | +.macro scale_df shift_count, mant_lo, mant_hi, scratch1, scratch2 |
| 16529 | + /* Scale [mant_hi, mant_lo] with shift_amount. |
| 16530 | + Must not forget the sticky bits we intend to shift out. */ |
| 16531 | + |
| 16532 | + rsub \scratch1,\shift_count,32/* get (32 - shift count) |
| 16533 | + (if shift count > 32 we get a |
| 16534 | + negative value, but that will |
| 16535 | + work as well in the code below.) */ |
| 16536 | + |
| 16537 | + cp.w \shift_count,32 /* handle shifts >= 32 separately */ |
| 16538 | + brhs 70f |
| 16539 | + |
| 16540 | + /* small (<32) shift amount, both words are part of the shift |
| 16541 | + first remember whether part that is lost contains any 1 bits ... */ |
| 16542 | + lsl \scratch2,\mant_lo,\scratch1 /*shift away bits that are part of |
| 16543 | + final mantissa. only part that goes |
| 16544 | + to scratch2 are bits that will be lost */ |
| 16545 | + |
| 16546 | + /* ... and now to the actual shift */ |
| 16547 | + lsl \scratch1,\mant_hi,\scratch1 /* get bits from msw destined for lsw*/ |
| 16548 | + lsr \mant_lo,\mant_lo,\shift_count /* shift down lsw of mantissa */ |
| 16549 | + lsr \mant_hi,\mant_hi,\shift_count /* shift down msw of mantissa */ |
| 16550 | + or \mant_lo,\scratch1 /* combine these bits with prepared lsw*/ |
| 16551 | + rjmp 71f |
| 16552 | + |
| 16553 | + /* large (>=32) shift amount, only lsw will have bits left after shift. |
| 16554 | + note that shift operations will use ((shift count) mod 32) so |
| 16555 | + we do not need to subtract 32 from shift count. */ |
| 16556 | +70: |
| 16557 | + /* first remember whether part that is lost contains any 1 bits ... */ |
| 16558 | + lsl \scratch2,\mant_hi,\scratch1 /* save all lost bits from msw */ |
| 16559 | + or \scratch2,\mant_lo /* also save lost bits (all) from lsw |
| 16560 | + now scratch2<>0 if we lose any bits */ |
| 16561 | + |
| 16562 | + /* ... and now to the actual shift */ |
| 16563 | + mov \mant_lo,\mant_hi /* msw -> lsw (i.e. "shift 32 first")*/ |
| 16564 | + mov \mant_hi,0 /* clear msw */ |
| 16565 | + lsr \mant_lo,\mant_lo,\shift_count /* make rest of shift inside lsw*/ |
| 16566 | + |
| 16567 | +71: |
| 16568 | + cp.w \scratch2,0 /* if any '1' bit in part we lost ...*/ |
| 16569 | + breq 70f |
| 16570 | + |
| 16571 | + sbr \mant_lo,0 /* ... we need to set sticky bit*/ |
| 16572 | +70: |
| 16573 | +.endm |
| 16574 | + |
| 16575 | +/* Unpack exponent and mantissa from the double number |
| 16576 | + stored in [df_hi,df_lo]. The exponent is stored in [exp] |
| 16577 | + while the mantissa is stored in [df_hi,df_lo]. */ |
| 16578 | + |
| 16579 | +.macro unpack_df exp, df_lo, df_hi |
| 16580 | + lsr \exp, \df_hi,21 /* Extract exponent */ |
| 16581 | + lsl \df_hi,10 /* Get mantissa */ |
| 16582 | + or \df_hi,\df_hi,\df_lo>>21 |
| 16583 | + lsl \df_lo,11 |
| 16584 | + |
| 16585 | + neg \exp /* Fix implicit bit */ |
| 16586 | + bst \df_hi,31 |
| 16587 | + subeq \exp,1 |
| 16588 | + neg \exp /* negate back exponent */ |
| 16589 | + .endm |
| 16590 | + |
| 16591 | +/* Unpack exponent and mantissa from the single float number |
| 16592 | + stored in [sf]. The exponent is stored in [exp] |
| 16593 | + while the mantissa is stored in [sf]. */ |
| 16594 | +.macro unpack_sf exp, sf |
| 16595 | + lsr \exp, \sf, 24 |
| 16596 | + brne 80f |
| 16597 | + /* Fix subnormal number */ |
| 16598 | + lsl \sf,7 |
| 16599 | + clz \exp,\sf |
| 16600 | + lsl \sf,\sf,\exp |
| 16601 | + rsub \exp,\exp,1 |
| 16602 | + rjmp 81f |
| 16603 | +80: |
| 16604 | + lsl \sf,7 |
| 16605 | + sbr \sf, 31 /*Implicit bit*/ |
| 16606 | +81: |
| 16607 | +.endm |
| 16608 | + |
| 16609 | + |
| 16610 | + |
| 16611 | +/* Pack a single float number stored in [mant] and [exp] |
| 16612 | + into a single float number in [sf] */ |
| 16613 | +.macro pack_sf sf, exp, mant |
| 16614 | + bld \mant,31 /* implicit bit to z */ |
| 16615 | + subne \exp,1 /* if subnormal (implicit bit 0) |
| 16616 | + adjust exponent to storage format */ |
| 16617 | + |
| 16618 | + lsr \sf, \mant, 7 |
| 16619 | + bfins \sf, \exp, 24, 8 |
| 16620 | +.endm |
| 16621 | + |
| 16622 | +/* Pack exponent [exp] and mantissa [mant_hi, mant_lo] |
| 16623 | + into [df_hi, df_lo]. [df_hi] is shifted |
| 16624 | + one bit up so the sign bit can be shifted into it */ |
| 16625 | + |
| 16626 | +.macro pack_df exp, mant_lo, mant_hi, df_lo, df_hi |
| 16627 | + bld \mant_hi,31 /* implicit bit to z */ |
| 16628 | + subne \exp,1 /* if subnormal (implicit bit 0) |
| 16629 | + adjust exponent to storage format */ |
| 16630 | + |
| 16631 | + lsr \mant_lo,11 /* shift back lsw */ |
| 16632 | + or \df_lo,\mant_lo,\mant_hi<<21 /* combine with low bits from msw */ |
| 16633 | + lsl \mant_hi,1 /* get rid of implicit bit */ |
| 16634 | + lsr \mant_hi,11 /* shift back msw except for one step*/ |
| 16635 | + or \df_hi,\mant_hi,\exp<<21 /* combine msw with exponent */ |
| 16636 | +.endm |
| 16637 | + |
| 16638 | +/* Normalize single float number stored in [mant] and [exp] |
| 16639 | + using scratch register [scratch] */ |
| 16640 | +.macro normalize_sf exp, mant, scratch |
| 16641 | + /* Adjust exponent and mantissa */ |
| 16642 | + clz \scratch, \mant |
| 16643 | + sub \exp, \scratch |
| 16644 | + lsl \mant, \mant, \scratch |
| 16645 | +.endm |
| 16646 | + |
| 16647 | +/* Normalize the exponent and mantissa pair stored |
| 16648 | + in [mant_hi,mant_lo] and [exp]. Needs two scratch |
| 16649 | + registers [scratch1] and [scratch2]. */ |
| 16650 | +.macro normalize_df exp, mant_lo, mant_hi, scratch1, scratch2 |
| 16651 | + clz \scratch1,\mant_hi /* Check if we have zeros in high bits */ |
| 16652 | + breq 80f /* No need for scaling if no zeros in high bits */ |
| 16653 | + cp.w \scratch1,32 /* Check for all zeros */ |
| 16654 | + breq 81f |
| 16655 | + |
| 16656 | + /* shift amount is smaller than 32, and involves both msw and lsw*/ |
| 16657 | + rsub \scratch2,\scratch1,32 /* shift mantissa */ |
| 16658 | + lsl \mant_hi,\mant_hi,\scratch1 |
| 16659 | + lsr \scratch2,\mant_lo,\scratch2 |
| 16660 | + or \mant_hi,\scratch2 |
| 16661 | + lsl \mant_lo,\mant_lo,\scratch1 |
| 16662 | + sub \exp,\scratch1 /* adjust exponent */ |
| 16663 | + rjmp 80f /* Finished */ |
| 16664 | +81: |
| 16665 | + /* shift amount is greater than 32 */ |
| 16666 | + clz \scratch1,\mant_lo /* shift mantissa */ |
| 16667 | + sub \scratch1,-32 |
| 16668 | + mov \mant_hi,\mant_lo |
| 16669 | + lsl \mant_hi,\mant_hi,\scratch1 |
| 16670 | + mov \mant_lo,0 |
| 16671 | + sub \exp,\scratch1 /* adjust exponent */ |
| 16672 | +80: |
| 16673 | +.endm |
| 16674 | + |
| 16675 | + |
| 16676 | +#ifdef L_avr32_f64_mul |
| 16677 | + .align 2 |
| 16678 | + .global __avr32_f64_mul |
| 16679 | + .type __avr32_f64_mul,@function |
| 16680 | + |
| 16681 | +__avr32_f64_mul: |
| 16682 | + pushm r0-r3,r4-r7,lr |
| 16683 | + |
| 16684 | + /* Unpack */ |
| 16685 | + eor r12, r11, r9 /* Sign op1 ^ Sign op2 is MSB of r12*/ |
| 16686 | + lsl r11,1 /* Unpack op1 */ |
| 16687 | + lsl r9,1 /* Unpack op2 */ |
| 16688 | + |
| 16689 | + /* Sort operands op1 >= op2 */ |
| 16690 | + lddpc r5, .Linf |
| 16691 | + cp.w r10,r8 |
| 16692 | + cpc r11,r9 |
| 16693 | + brhs 0f |
| 16694 | + |
| 16695 | + mov r7,r11 /* swap operands if op2 was larger */ |
| 16696 | + mov r6,r10 |
| 16697 | + mov r11,r9 |
| 16698 | + mov r10,r8 |
| 16699 | + mov r9,r7 |
| 16700 | + mov r8,r6 |
| 16701 | + |
| 16702 | +0: |
| 16703 | + /* Check against infinity */ |
| 16704 | + cp.w r11,r5 |
| 16705 | + brlo 1f |
| 16706 | + /* infinity or nan */ |
| 16707 | + /* we have to check low word as well as nan mantissa may be 0 in msw*/ |
| 16708 | + cpc r10 |
| 16709 | + /* we know that op1 is inf or nan. if z != 1 then we have nan. |
| 16710 | + in this case, also return nan. */ |
| 16711 | + breq 0f |
| 16712 | + /* Return NaN */ |
| 16713 | + mov r11, -1 |
| 16714 | + rjmp __dfmul_return_op1 |
| 16715 | +0: |
| 16716 | + |
| 16717 | + /* op1 is infinity. op2 is smaller or same so it cannot be nan. |
| 16718 | + it can be infinity or a (sub-)normal number. |
| 16719 | + we should return op1 (infinity) except when op2 is zero when |
| 16720 | + result should be nan. */ |
| 16721 | + or r5,r9,r8 |
| 16722 | + brne __dfmul_return_op1 /* op2 is not zero. return op1.*/ |
| 16723 | + /* Return NaN */ |
| 16724 | + mov r11, -1 |
| 16725 | + rjmp __dfmul_return_op1 |
| 16726 | + |
| 16727 | +1: |
| 16728 | + /* no operand is inf/nan, and operands have been arranged in order |
| 16729 | + with op1 >= op2, implying that if we have a zero, it is found in |
| 16730 | + op2. in this case, result should be zero (with sign from both ops). */ |
| 16731 | + |
| 16732 | + or r5,r9,r8 /* check the smaller value for zero */ |
| 16733 | + brne 0f |
| 16734 | + mov r10, 0 |
| 16735 | + mov r11, 0 |
| 16736 | + rjmp __dfmul_return_op1 /* Early exit */ |
| 16737 | +0: |
| 16738 | + |
| 16739 | + /* we have two "normal" (can be subnormal) nonzero numbers in r11:r10 |
| 16740 | + and r9:r8. sign of result is already calculated in r12. |
| 16741 | + perform a normal multiplication. */ |
| 16742 | + |
| 16743 | + /* Unpack and normalize*/ |
| 16744 | + unpack_df r7 /*exp*/, r10, r11 /* mantissa */ |
| 16745 | + normalize_df r7 /*exp*/, r10, r11 /* mantissa */, r4, r5 /* scratch */ |
| 16746 | + |
| 16747 | + |
| 16748 | + /* Unpack and normalize*/ |
| 16749 | + unpack_df r6 /*exp*/, r8, r9 /* mantissa */ |
| 16750 | + normalize_df r6 /*exp*/, r8, r9 /* mantissa */, r4, r5 /* scratch */ |
| 16751 | + |
| 16752 | + /* Multiply */ |
| 16753 | + |
| 16754 | + mulu.d r0,r10,r8 |
| 16755 | + add lr,r7,r6 /* calculate new exponent after mul */ |
| 16756 | + mulu.d r2,r11,r8 |
| 16757 | + sub lr,(1023-1) /* remove exponent bias as we have |
| 16758 | + included bias from both op1 and op2 |
| 16759 | + sub one less, or in other words |
| 16760 | + add one to exponent. see below why. */ |
| 16761 | + mulu.d r6,r11,r9 |
| 16762 | + add r2,r1 |
| 16763 | + mulu.d r4,r10,r9 |
| 16764 | + |
| 16765 | + |
| 16766 | + adc r6,r6,r3 |
| 16767 | + acr r7 |
| 16768 | + |
| 16769 | + add r4,r2 |
| 16770 | + adc r6,r6,r5 |
| 16771 | + acr r7 |
| 16772 | + |
| 16773 | + // r7:r6 is now in range [0x4000...0000 - 0xffff...fffe] |
| 16774 | + // remaining bits in r0 and r4 are of no interest, except that we have |
| 16775 | + // to add a sticky bit to r10 in case we had a 1 bit in r4 or r0. |
| 16776 | + |
| 16777 | + or r4,r0 |
| 16778 | + movne r0, 1 /* If we have bits in r4 or r0 */ |
| 16779 | + or r6,r0 /* set lsb of result to 1 */ |
| 16780 | + |
| 16781 | + |
| 16782 | + // if msb is set, it was because multiplication gave an "overflow" |
| 16783 | + // of one bit so exponent should be incremented. |
| 16784 | + // we already did that above so we are done. |
| 16785 | + // if msb is *not* set it will be normalized and exponent will be |
| 16786 | + // decremented (which will compensate the one we added above). |
| 16787 | + |
| 16788 | + normalize_df lr /*exp*/, r6, r7 /* mantissa */, r8, r9 /* scratch */ |
| 16789 | + |
| 16790 | + /* Check if a subnormal result was created */ |
| 16791 | + cp.w lr, 0 |
| 16792 | + brgt 0f |
| 16793 | + |
| 16794 | + adjust_subnormal_df lr, r6, r7, r12, r8, r9 |
| 16795 | + mov r10, r6 |
| 16796 | + mov r11, r7 |
| 16797 | + popm r0-r3,r4-r7, pc |
| 16798 | +0: |
| 16799 | + |
| 16800 | + /* Round result */ |
| 16801 | + round_df lr /*exp*/, r6, r7 /* Mantissa */, r4 /*scratch*/ |
| 16802 | + cp.w lr,0x7ff |
| 16803 | + brlt 0f |
| 16804 | + /*Return infinity */ |
| 16805 | + lddpc r11, .Linf |
| 16806 | + mov r10, 0 |
| 16807 | + rjmp __dfmul_return_op1 |
| 16808 | + |
| 16809 | +0: |
| 16810 | + |
| 16811 | + /* Pack */ |
| 16812 | + pack_df lr /*exp*/, r6, r7 /* mantissa */, r10, r11 /* Output df number*/ |
| 16813 | +__dfmul_return_op1: |
| 16814 | + lsl r12,1 /* shift in sign bit */ |
| 16815 | + ror r11 |
| 16816 | + |
| 16817 | + popm r0-r3,r4-r7, pc |
| 16818 | + |
| 16819 | +#endif |
| 16820 | + |
| 16821 | + |
| 16822 | +#ifdef L_avr32_f64_addsub |
| 16823 | + .align 2 |
| 16824 | + .global __avr32_f64_sub |
| 16825 | + .type __avr32_f64_sub,@function |
| 16826 | + |
| 16827 | +__avr32_f64_sub: |
| 16828 | + pushm r4-r7,lr |
| 16829 | + |
| 16830 | + eor r12,r11,r9 // compare signs of operands |
| 16831 | + bld r12,31 |
| 16832 | + brcc __dfsub // same sign => subtract |
| 16833 | + |
| 16834 | + eorh r9,0x8000 |
| 16835 | + rjmp __dfadd // different signs => op1 + (-op2) |
| 16836 | +__dfsub: |
| 16837 | + |
| 16838 | + lsl r11,1 // unpack op1 msw and get sign in c |
| 16839 | + or r4,r11,r10 // check if all bits zero |
| 16840 | + brne 1f |
| 16841 | + |
| 16842 | + // op1 is zero, negate op2 and handle as add |
| 16843 | + eorh r9,0x8000 |
| 16844 | + // op1 is +/-0, and is unpacked with sign in c. add to op2. |
| 16845 | + // also used by sub, but op2 has been negated in this case |
| 16846 | + ror r12 // save sign of op1 in msb of r12 |
| 16847 | + lsl r9,1 // unpack msw and get sign of op2 |
| 16848 | + or r4,r9,r8 // check all bits in op2 |
| 16849 | + breq 0f |
| 16850 | + |
| 16851 | + // if op2 != 0, then return op2 unchanged. |
| 16852 | + ror r9 // pack op2 msw again with sign from c |
| 16853 | + mov r11,r9 |
| 16854 | + mov r10,r8 |
| 16855 | + popm r4-r7,pc |
| 16856 | + |
| 16857 | +0: |
| 16858 | + // both op1 and op2 zero, but sign unknown. result should and signs. |
| 16859 | + ror r9 // pack op2 msw again with sign from c |
| 16860 | + lsl r12,1 // get back sign of op1 into c ... |
| 16861 | + ror r11 // and back in original op1 |
| 16862 | + and r11,r9 // and sign bits. as op1 is zero, the |
| 16863 | + // only bit which can be 1 is sign bit |
| 16864 | + popm r4-r7,pc |
| 16865 | + |
| 16866 | +1: |
| 16867 | + ror r12 // save op1 sign in msb of r12 |
| 16868 | + |
| 16869 | + lsl r9,1 // unpack op2 msw |
| 16870 | + or r4,r8,r9 |
| 16871 | + brne 0f |
| 16872 | + // op2 is zero, return op1 |
| 16873 | + // whatever it is. the only case |
| 16874 | + // requiring special handling is if |
| 16875 | + // op1 is zero, but that was handled |
| 16876 | + // above. |
| 16877 | + lsl r12, 1 |
| 16878 | + ror r11 |
| 16879 | + popm r4-r7,pc |
| 16880 | + |
| 16881 | +0: |
| 16882 | + // make sure that op1 >= op2, flip sign if we swap ops |
| 16883 | + cp.w r10,r8 |
| 16884 | + cpc r11,r9 |
| 16885 | + brhs 0f |
| 16886 | + |
| 16887 | + com r12 // sign of op1 and result in lsb(r12) |
| 16888 | + mov r7,r11 // swap operands if op2 was larger |
| 16889 | + mov r6,r10 |
| 16890 | + mov r11,r9 |
| 16891 | + mov r10,r8 |
| 16892 | + mov r9,r7 |
| 16893 | + mov r8,r6 |
| 16894 | + |
| 16895 | +0: |
| 16896 | + // check if op1 is nan or inf. |
| 16897 | + lddpc r5,.Linf |
| 16898 | + cp.w r11,r5 |
| 16899 | + brlo 1f |
| 16900 | + /* Op 1 is nan or inf */ |
| 16901 | + // we have to check low word as well as nan mantissa may be 0 in msw |
| 16902 | + cpc r10 |
| 16903 | + // we know that op1 is inf or nan. if z != 1 then we have nan. |
| 16904 | + // if we have nan, return nan. |
| 16905 | + breq 0f |
| 16906 | + mov r11, -1 |
| 16907 | + rjmp __dfsub_return_op1 |
| 16908 | +0: |
| 16909 | + |
| 16910 | + // op1 is infinity. check if op2 is nan, infinty or a normal number. |
| 16911 | + cp.w r9,r5 |
| 16912 | + movhs r11, -1 // op2 is a normal number. return op1. |
| 16913 | + |
| 16914 | + // op2 can be infinity (of the same sign as op1) or nan. |
| 16915 | + // in both cases we should return nan. |
| 16916 | + rjmp __dfsub_return_op1 |
| 16917 | +1: |
| 16918 | + // if op1 is not inf or nan, then op2 cannot be since op1 >= op2 |
| 16919 | + |
| 16920 | + // now prepare the operands by expanding them and shifting op2 |
| 16921 | + // to the correct position for the subtract. note! if op2 is |
| 16922 | + // insignificant compared to op1, the function will take care of |
| 16923 | + // this and return op1 directly to the application. |
| 16924 | + |
| 16925 | + /* Unpack operands */ |
| 16926 | + unpack_df r7 /* exp op1*/, r10, r11 /* Mantissa op1 */ |
| 16927 | + unpack_df r6 /* exp op2*/, r8, r9 /* Mantissa op2 */ |
| 16928 | + |
| 16929 | + /* Get shift amount required for aligning op1 and op2 */ |
| 16930 | + rsub r6, r7 |
| 16931 | + breq __perform_dfsub /* No shift needed */ |
| 16932 | + |
| 16933 | + cp.w r6, 63 |
| 16934 | + brhs __dfsub_pack_result /* Op 2 insignificant compared to op1 */ |
| 16935 | + |
| 16936 | + /* Shift mantissa of op2 so that op1 and op2 are aligned */ |
| 16937 | + scale_df r6 /* shift_count*/, r8, r9 /* Mantissa */, r4, r5 /*Scratch*/ |
| 16938 | + |
| 16939 | +__perform_dfsub: |
| 16940 | + sub r10,r8 /* subtract mantissa of op2 from op1 */ |
| 16941 | + sbc r11,r11,r9 |
| 16942 | + or r4,r11,r10 /* check if result is all zeroes */ |
| 16943 | + brne 0f |
| 16944 | + popm r4-r7,pc /* Early return */ |
| 16945 | +0: |
| 16946 | + |
| 16947 | + normalize_df r7 /*exp*/, r10, r11 /* mantissa */, r8, r9 /* scratch */ |
| 16948 | + |
| 16949 | + /* Check if a subnormal result was created */ |
| 16950 | + cp.w r7, 0 |
| 16951 | + brgt 0f |
| 16952 | + |
| 16953 | + adjust_subnormal_df r7 /*exp*/, r10, r11 /* Mantissa */, r12 /*sign*/, r8, r9 /*scratch*/ |
| 16954 | + popm r4-r7,pc |
| 16955 | +0: |
| 16956 | + |
| 16957 | + /* Round result */ |
| 16958 | + round_df r7 /*exp*/, r10, r11 /* Mantissa */, r9 /*scratch*/ |
| 16959 | + cp.w r7,0x7ff |
| 16960 | + brlt __dfsub_pack_result |
| 16961 | + /*Return infinity */ |
| 16962 | + lddpc r11, .Linf |
| 16963 | + mov r10, 0 |
| 16964 | + rjmp __dfsub_return_op1 |
| 16965 | + |
| 16966 | +__dfsub_pack_result: |
| 16967 | + /* Pack */ |
| 16968 | + pack_df r7 /*exp*/, r10, r11 /* mantissa */, r10, r11 /* Output df number*/ |
| 16969 | + |
| 16970 | +__dfsub_return_op1: |
| 16971 | + lsl r12,1 |
| 16972 | + ror r11 |
| 16973 | + popm r4-r7,pc |
| 16974 | + |
| 16975 | + .align 2 |
| 16976 | + .global __avr32_f64_add |
| 16977 | + .type __avr32_f64_add,@function |
| 16978 | +__avr32_f64_add: |
| 16979 | + pushm r4-r7,lr |
| 16980 | + eor r12,r11,r9 // compare signs of operands |
| 16981 | + lsl r12,1 |
| 16982 | + brcc __dfadd // same sign => add |
| 16983 | + |
| 16984 | + eorh r9,0x8000 |
| 16985 | + rjmp __dfsub // different signs => op1 - (-op2) |
| 16986 | +__dfadd: |
| 16987 | + |
| 16988 | + lsl r11,1 // unpack op1 msw and get sign in c |
| 16989 | + or r4,r11,r10 // check if all bits zero |
| 16990 | + brne 1f |
| 16991 | + |
| 16992 | + // op1 is +/-0, and is unpacked with sign in c. add to op2. |
| 16993 | + // also used by sub, but op2 has been negated in this case |
| 16994 | + ror r12 // save sign of op1 in msb of r12 |
| 16995 | + lsl r9,1 // unpack msw and get sign of op2 |
| 16996 | + or r4,r9,r8 // check all bits in op2 |
| 16997 | + breq 0f |
| 16998 | + |
| 16999 | + // if op2 != 0, then return op2 unchanged. |
| 17000 | + ror r9 // pack op2 msw again with sign from c |
| 17001 | + mov r11,r9 |
| 17002 | + mov r10,r8 |
| 17003 | + popm r4-r7,pc |
| 17004 | + |
| 17005 | +0: |
| 17006 | + // both op1 and op2 zero, but sign unknown. result should and signs. |
| 17007 | + ror r9 // pack op2 msw again with sign from c |
| 17008 | + lsl r12,1 // get back sign of op1 into c ... |
| 17009 | + ror r11 // and back in original op1 |
| 17010 | + and r11,r9 // and sign bits. as op1 is zero, the |
| 17011 | + // only bit which can be 1 is sign bit |
| 17012 | + popm r4-r7,pc |
| 17013 | +1: |
| 17014 | + ror r12 // save op1 sign in msb of r12 |
| 17015 | + |
| 17016 | + lsl r9,1 // unpack op2 msw |
| 17017 | + or r4,r8,r9 |
| 17018 | + brne 0f |
| 17019 | + // op2 is zero, return op1 |
| 17020 | + // whatever it is. the only case |
| 17021 | + // requiring special handling is if |
| 17022 | + // op1 is zero, but that was handled |
| 17023 | + // above. |
| 17024 | + lsl r12, 1 |
| 17025 | + ror r11 |
| 17026 | + popm r4-r7,pc |
| 17027 | +0: |
| 17028 | + // make sure that exp[op1] >= exp[op2] |
| 17029 | + cp.w r11,r9 |
| 17030 | + brhs 0f |
| 17031 | + |
| 17032 | + mov r7,r11 // swap operands if op2 was larger |
| 17033 | + mov r6,r10 |
| 17034 | + mov r11,r9 |
| 17035 | + mov r10,r8 |
| 17036 | + mov r9,r7 |
| 17037 | + mov r8,r6 |
| 17038 | + |
| 17039 | +0: |
| 17040 | + // check if op1 is nan or inf. |
| 17041 | + lddpc r5,.Linf |
| 17042 | + cp.w r11,r5 |
| 17043 | + brlo 1f |
| 17044 | + /* Op 1 is nan or inf */ |
| 17045 | + // we have to check low word as well as nan mantissa may be 0 in msw |
| 17046 | + cpc r10 |
| 17047 | + // we know that op1 is inf or nan. if z != 1 then we have nan. |
| 17048 | + // if we have nan, return nan. |
| 17049 | + breq 0f |
| 17050 | + mov r11, -1 |
| 17051 | + rjmp __dfadd_return_op1 |
| 17052 | +0: |
| 17053 | + |
| 17054 | + // op1 is infinity. check if op2 is nan, infinty or a normal number. |
| 17055 | + cp.w r9,r5 |
| 17056 | + // Op2 is NaN of Inf. Return op2 but with sign of result. |
| 17057 | + // If Op2 is NaN, sign doesn't matter but no need to separate NaN |
| 17058 | + movhs r11, r9 |
| 17059 | + movhs r10, r8 |
| 17060 | + |
| 17061 | + // op2 can be infinity (of the same sign as op1) or nan. |
| 17062 | + // in both cases we should return nan. |
| 17063 | + rjmp __dfadd_return_op1 |
| 17064 | +1: |
| 17065 | + // if op1 is not inf or nan, then op2 cannot be since exp[op1] >= |
| 17066 | + // exp[op2] |
| 17067 | + |
| 17068 | + // now prepare the operands by expanding them and shifting op2 |
| 17069 | + // to the correct position for the add. note! if op2 is |
| 17070 | + // insignificant compared to op1, the function will take care of |
| 17071 | + // this and return op1 directly to the application. |
| 17072 | + |
| 17073 | + /* Unpack operands */ |
| 17074 | + unpack_df r7 /* exp op1*/, r10, r11 /* Mantissa op1 */ |
| 17075 | + unpack_df r6 /* exp op2*/, r8, r9 /* Mantissa op2 */ |
| 17076 | + |
| 17077 | + /* Get shift amount required for aligning op1 and op2 */ |
| 17078 | + rsub r6, r7 |
| 17079 | + breq __perform_dfadd /* No shift needed */ |
| 17080 | + |
| 17081 | + cp.w r6, 63 |
| 17082 | + brhs __dfadd_pack_result /* Op 2 insignificant compared to op1 */ |
| 17083 | + |
| 17084 | + /* Shift mantissa of op2 so that op1 and op2 are aligned */ |
| 17085 | + scale_df r6 /* shift_count*/, r8, r9 /* Mantissa */, r4, r5 /*Scratch*/ |
| 17086 | + |
| 17087 | +__perform_dfadd: |
| 17088 | + add r10,r8 // add mantissas |
| 17089 | + adc r11,r11,r9 |
| 17090 | + brcc 0f |
| 17091 | + ror r11 // overflow => shift down mantissa |
| 17092 | + ror r10 |
| 17093 | + brcc 1f // sticky bit shifted out? |
| 17094 | + sbr r10,0 // if so, merge it into result again |
| 17095 | +1: |
| 17096 | + sub r7,-1 // increase exponent with 1 |
| 17097 | +0: |
| 17098 | + normalize_df r7 /*exp*/, r10, r11 /* mantissa */, r8, r9 /* scratch */ |
| 17099 | + |
| 17100 | + /* Check if a subnormal result was created */ |
| 17101 | + cp.w r7, 0 |
| 17102 | + brgt 0f |
| 17103 | + |
| 17104 | + adjust_subnormal_df r7 /*exp*/, r10, r11 /* Mantissa */, r12 /*sign*/, r8, r9 /*scratch*/ |
| 17105 | + popm r4-r7,pc |
| 17106 | +0: |
| 17107 | + |
| 17108 | + /* Round result */ |
| 17109 | + round_df r7 /*exp*/, r10, r11 /* Mantissa */, r9 /*scratch*/ |
| 17110 | + cp.w r7,0x7ff |
| 17111 | + brlt __dfadd_pack_result |
| 17112 | + /*Return infinity */ |
| 17113 | + lddpc r11, .Linf |
| 17114 | + mov r10, 0 |
| 17115 | + rjmp __dfadd_return_op1 |
| 17116 | + |
| 17117 | +__dfadd_pack_result: |
| 17118 | + /* Pack */ |
| 17119 | + pack_df r7 /*exp*/, r10, r11 /* mantissa */, r10, r11 /* Output df number*/ |
| 17120 | + |
| 17121 | +__dfadd_return_op1: |
| 17122 | + lsl r12,1 |
| 17123 | + ror r11 |
| 17124 | + popm r4-r7,pc |
| 17125 | +#endif |
| 17126 | + |
| 17127 | +#ifdef L_avr32_f64_to_u32 |
| 17128 | + /* This goes into L_fixdfsi */ |
| 17129 | +#endif |
| 17130 | + |
| 17131 | + |
| 17132 | +#ifdef L_avr32_f64_to_s32 |
| 17133 | + .global __avr32_f64_to_u32 |
| 17134 | + .type __avr32_f64_to_u32,@function |
| 17135 | +__avr32_f64_to_u32: |
| 17136 | + cp.w r11, 0 |
| 17137 | + retmi 0 /* Negative returns 0 */ |
| 17138 | +#ifdef __LARGE_FLOATS__ |
| 17139 | + lsl r12,r11,1 |
| 17140 | + lsr r12,21 /* extract exponent*/ |
| 17141 | + sub r12,1023 /* convert to unbiased exponent.*/ |
| 17142 | + retlo 0 /* too small exponent implies zero. */ |
| 17143 | + cp.w r12,32 |
| 17144 | + brcc 0f |
| 17145 | + rjmp 1f |
| 17146 | +#endif |
| 17147 | + |
| 17148 | + /* Fallthrough to df to signed si conversion */ |
| 17149 | + .global __avr32_f64_to_s32 |
| 17150 | + .type __avr32_f64_to_s32,@function |
| 17151 | +__avr32_f64_to_s32: |
| 17152 | + lsl r12,r11,1 |
| 17153 | + lsr r12,21 /* extract exponent*/ |
| 17154 | + sub r12,1023 /* convert to unbiased exponent.*/ |
| 17155 | + retlo 0 /* too small exponent implies zero. */ |
| 17156 | + |
| 17157 | +#ifdef __LARGE_FLOATS__ |
| 17158 | + cp.w r12,31 |
| 17159 | + brcc 0f |
| 17160 | +#endif |
| 17161 | +1: |
| 17162 | + rsub r12,r12,31 /* shift count = 31 - exponent */ |
| 17163 | + mov r9,r11 /* save sign for later...*/ |
| 17164 | + lsl r11,11 /* remove exponent and sign*/ |
| 17165 | + sbr r11,31 /* add implicit bit*/ |
| 17166 | + or r11,r11,r10>>21 /* get rest of bits from lsw of double */ |
| 17167 | + lsr r11,r11,r12 /* shift down mantissa to final place */ |
| 17168 | + lsl r9,1 /* sign -> carry */ |
| 17169 | + retcc r11 /* if positive, we are done */ |
| 17170 | + neg r11 /* if negative float, negate result */ |
| 17171 | + ret r11 |
| 17172 | + |
| 17173 | +#ifdef __LARGE_FLOATS__ |
| 17174 | +0: |
| 17175 | + mov r12,-1 /* r11 = 0xffffffff */ |
| 17176 | + lsr r12,1 /* r11 = 0x7fffffff */ |
| 17177 | + lsl r11,1 /* sign -> carry */ |
| 17178 | + acr r12 /* r11 = signed ? 0x80000000 |
| 17179 | + : 0x7fffffff */ |
| 17180 | + ret r12 |
| 17181 | +#endif |
| 17182 | +#endif /* L_fixdfsi*/ |
| 17183 | + |
| 17184 | +#ifdef L_avr32_f64_to_u64 |
| 17185 | + /* Actual function is in L_fixdfdi */ |
| 17186 | +#endif |
| 17187 | + |
| 17188 | +#ifdef L_avr32_f64_to_s64 |
| 17189 | + .global __avr32_f64_to_u64 |
| 17190 | + .type __avr32_f64_to_u64,@function |
| 17191 | +__avr32_f64_to_u64: |
| 17192 | + cp.w r11,0 |
| 17193 | + /* Negative numbers return zero */ |
| 17194 | + movmi r10, 0 |
| 17195 | + movmi r11, 0 |
| 17196 | + retmi r11 |
| 17197 | +#ifdef __LARGE_FLOATS__ |
| 17198 | + lsl r9,r11,1 |
| 17199 | + lsr r9,21 /* get exponent*/ |
| 17200 | + sub r9,1023 /* convert to correct range*/ |
| 17201 | + /* Return zero if exponent to small */ |
| 17202 | + movlo r10, 0 |
| 17203 | + movlo r11, 0 |
| 17204 | + retlo r11 |
| 17205 | + cp.w r9,64 |
| 17206 | + mov r8,r11 /* save sign for later...*/ |
| 17207 | + brcs 1f |
| 17208 | + rjmp 2f /* Number to large */ |
| 17209 | + |
| 17210 | +#endif |
| 17211 | + |
| 17212 | + |
| 17213 | + |
| 17214 | + /* Fallthrough */ |
| 17215 | + .global __avr32_f64_to_s64 |
| 17216 | + .type __avr32_f64_to_s64,@function |
| 17217 | +__avr32_f64_to_s64: |
| 17218 | + lsl r9,r11,1 |
| 17219 | + lsr r9,21 /* get exponent*/ |
| 17220 | + sub r9,1023 /* convert to correct range*/ |
| 17221 | + /* Return zero if exponent to small */ |
| 17222 | + movlo r10, 0 |
| 17223 | + movlo r11, 0 |
| 17224 | + retlo r11 |
| 17225 | + |
| 17226 | +#ifdef __LARGE_FLOATS__ |
| 17227 | + cp.w r9,63 |
| 17228 | + mov r8,r11 /* save sign for later...*/ |
| 17229 | + brcc 2f |
| 17230 | +#else |
| 17231 | + mov r8,r11 /* save sign for later...*/ |
| 17232 | +#endif |
| 17233 | +1: |
| 17234 | + lsl r11,11 /* remove exponent */ |
| 17235 | + sbr r11,31 /* add implicit bit*/ |
| 17236 | + or r11,r11,r10>>21 /* get rest of bits from lsw of double*/ |
| 17237 | + lsl r10,11 /* align lsw correctly as well */ |
| 17238 | + rsub r9,r9,63 /* shift count = 63 - exponent */ |
| 17239 | + breq 1f |
| 17240 | + |
| 17241 | + cp.w r9,32 /* is shift count more than one reg? */ |
| 17242 | + brhs 0f |
| 17243 | + |
| 17244 | + mov r12,r11 /* save msw */ |
| 17245 | + lsr r10,r10,r9 /* small shift count, shift down lsw */ |
| 17246 | + lsr r11,r11,r9 /* small shift count, shift down msw */ |
| 17247 | + rsub r9,r9,32 /* get 32-size of shifted out tail */ |
| 17248 | + lsl r12,r12,r9 /* align part to move from msw to lsw */ |
| 17249 | + or r10,r12 /* combine to get new lsw */ |
| 17250 | + rjmp 1f |
| 17251 | + |
| 17252 | +0: |
| 17253 | + lsr r10,r11,r9 /* large shift count,only lsw get bits |
| 17254 | + note that shift count is modulo 32*/ |
| 17255 | + mov r11,0 /* msw will be 0 */ |
| 17256 | + |
| 17257 | +1: |
| 17258 | + lsl r8,1 /* sign -> carry */ |
| 17259 | + retcc r11 /* if positive, we are done */ |
| 17260 | + |
| 17261 | + neg r11 /* if negative float, negate result */ |
| 17262 | + neg r10 |
| 17263 | + scr r11 |
| 17264 | + ret r11 |
| 17265 | + |
| 17266 | + |
| 17267 | +#ifdef __LARGE_FLOATS__ |
| 17268 | +2: |
| 17269 | + mov r11,-1 /* r11 = 0xffffffff */ |
| 17270 | + lsr r11,1 /* r11 = 0x7fffffff */ |
| 17271 | + lsl r8,1 /* sign -> carry */ |
| 17272 | + acr r11 /* r11 = signed ? 0x80000000 */ |
| 17273 | + /* : 0x7fffffff */ |
| 17274 | + lsl r10,r11,31 /* extend last bit of msw*/ |
| 17275 | + asr r10,31 |
| 17276 | + ret r11 |
| 17277 | +#endif |
| 17278 | +#endif |
| 17279 | + |
| 17280 | +#ifdef L_avr32_u32_to_f64 |
| 17281 | + /* Code located in L_floatsidf */ |
| 17282 | +#endif |
| 17283 | + |
| 17284 | +#ifdef L_avr32_s32_to_f64 |
| 17285 | + .global __avr32_u32_to_f64 |
| 17286 | + .type __avr32_u32_to_f64,@function |
| 17287 | +__avr32_u32_to_f64: |
| 17288 | + sub r11, r12, 0 /* Move to r11 and force Z flag to be updated */ |
| 17289 | + mov r12, 0 /* always positive */ |
| 17290 | + rjmp 0f /* Jump to common code for floatsidf */ |
| 17291 | + |
| 17292 | + .global __avr32_s32_to_f64 |
| 17293 | + .type __avr32_s32_to_f64,@function |
| 17294 | +__avr32_s32_to_f64: |
| 17295 | + mov r11, r12 /* Keep original value in r12 for sign */ |
| 17296 | + abs r11 /* Absolute value if r12 */ |
| 17297 | +0: |
| 17298 | + mov r10,0 /* let remaining bits be zero */ |
| 17299 | + reteq r11 /* zero long will return zero float */ |
| 17300 | + |
| 17301 | + pushm lr |
| 17302 | + mov r9,31+1023 /* set exponent */ |
| 17303 | + |
| 17304 | + normalize_df r9 /*exp*/, r10, r11 /* mantissa */, r8, lr /* scratch */ |
| 17305 | + |
| 17306 | + /* Check if a subnormal result was created */ |
| 17307 | + cp.w r9, 0 |
| 17308 | + brgt 0f |
| 17309 | + |
| 17310 | + adjust_subnormal_df r9 /* exp */, r10, r11 /* Mantissa */, r12 /*sign*/, r8, lr /* scratch */ |
| 17311 | + popm pc |
| 17312 | +0: |
| 17313 | + |
| 17314 | + /* Round result */ |
| 17315 | + round_df r9 /*exp*/, r10, r11 /* Mantissa */, r8 /*scratch*/ |
| 17316 | + cp.w r9,0x7ff |
| 17317 | + brlt 0f |
| 17318 | + /*Return infinity */ |
| 17319 | + lddpc r11, .Linf |
| 17320 | + mov r10, 0 |
| 17321 | + rjmp __floatsidf_return_op1 |
| 17322 | + |
| 17323 | +0: |
| 17324 | + |
| 17325 | + /* Pack */ |
| 17326 | + pack_df r9 /*exp*/, r10, r11 /* mantissa */, r10, r11 /* Output df number*/ |
| 17327 | +__floatsidf_return_op1: |
| 17328 | + lsl r12,1 /* shift in sign bit */ |
| 17329 | + ror r11 |
| 17330 | + |
| 17331 | + popm pc |
| 17332 | +#endif |
| 17333 | + |
| 17334 | + |
| 17335 | +#ifdef L_avr32_f32_cmp_eq |
| 17336 | + .global __avr32_f32_cmp_eq |
| 17337 | + .type __avr32_f32_cmp_eq,@function |
| 17338 | +__avr32_f32_cmp_eq: |
| 17339 | + cp.w r12, r11 |
| 17340 | + brne 0f /* If not equal check for +/-0 */ |
| 17341 | + |
| 17342 | + /* Check for NaN or Inf */ |
| 17343 | + lddpc r11,.Linf_sf |
| 17344 | + lsl r12, 1 |
| 17345 | + cp.w r12, r11 |
| 17346 | + srls r12 /* 0 if NaN, 1 otherwise */ |
| 17347 | + ret r12 |
| 17348 | +0: |
| 17349 | + /* Or together the two values and shift out the sign bit. |
| 17350 | + If the result is zero, then the two values are both zero. */ |
| 17351 | + or r12, r11 |
| 17352 | + lsl r12, 1 |
| 17353 | + sreq r12 |
| 17354 | + ret r12 |
| 17355 | +#endif |
| 17356 | + |
| 17357 | +#if defined(L_avr32_f32_cmp_ge) || defined(L_avr32_f32_cmp_lt) |
| 17358 | +#ifdef L_avr32_f32_cmp_ge |
| 17359 | + .global __avr32_f32_cmp_ge |
| 17360 | + .type __avr32_f32_cmp_ge,@function |
| 17361 | +__avr32_f32_cmp_ge: |
| 17362 | +#endif |
| 17363 | +#ifdef L_avr32_f32_cmp_lt |
| 17364 | + .global __avr32_f32_cmp_lt |
| 17365 | + .type __avr32_f32_cmp_lt,@function |
| 17366 | +__avr32_f32_cmp_lt: |
| 17367 | +#endif |
| 17368 | + lsl r10, r12, 1 /* Remove sign bits */ |
| 17369 | + lsl r9, r11, 1 |
| 17370 | + lddpc r8, .Linf_sf |
| 17371 | + cp.w r10, r8 |
| 17372 | + rethi 0 /* Op0 is NaN */ |
| 17373 | + cp.w r9, r8 |
| 17374 | + rethi 0 /* Op1 is Nan */ |
| 17375 | + |
| 17376 | + eor r8, r11, r12 |
| 17377 | + bld r12, 31 |
| 17378 | +#ifdef L_avr32_f32_cmp_ge |
| 17379 | + srcc r8 /* Set result to true if op0 is positive*/ |
| 17380 | +#endif |
| 17381 | +#ifdef L_avr32_f32_cmp_lt |
| 17382 | + srcs r8 /* Set result to true if op0 is negative*/ |
| 17383 | +#endif |
| 17384 | + retmi r8 /* Return if signs are different */ |
| 17385 | + brcs 0f /* Both signs negative? */ |
| 17386 | + |
| 17387 | + /* Both signs positive */ |
| 17388 | + cp.w r12, r11 |
| 17389 | +#ifdef L_avr32_f32_cmp_ge |
| 17390 | + srhs r12 |
| 17391 | +#endif |
| 17392 | +#ifdef L_avr32_f32_cmp_lt |
| 17393 | + srlo r12 |
| 17394 | +#endif |
| 17395 | + retal r12 |
| 17396 | +0: |
| 17397 | + /* Both signs negative */ |
| 17398 | + cp.w r11, r12 |
| 17399 | +#ifdef L_avr32_f32_cmp_ge |
| 17400 | + srhs r12 |
| 17401 | +#endif |
| 17402 | +#ifdef L_avr32_f32_cmp_lt |
| 17403 | + srlo r12 |
| 17404 | +#endif |
| 17405 | + retal r12 |
| 17406 | +#endif |
| 17407 | + |
| 17408 | + |
| 17409 | +#ifdef L_avr32_f64_cmp_eq |
| 17410 | + .global __avr32_f64_cmp_eq |
| 17411 | + .type __avr32_f64_cmp_eq,@function |
| 17412 | +__avr32_f64_cmp_eq: |
| 17413 | + cp.w r10,r8 |
| 17414 | + cpc r11,r9 |
| 17415 | + brne 0f /* Both args could be zero with different sign bits */ |
| 17416 | + |
| 17417 | + /* check for NaN */ |
| 17418 | + lsl r11,1 |
| 17419 | + lddpc r12,.Linf |
| 17420 | + cp.w r10,0 |
| 17421 | + cpc r11,r12 /* check if nan or inf */ |
| 17422 | + srls r12 /* If Arg is NaN return 0 else 1*/ |
| 17423 | + ret r12 /* Return */ |
| 17424 | + |
| 17425 | +0: |
| 17426 | + lsl r11,1 /* get rid of sign bits */ |
| 17427 | + lsl r9,1 |
| 17428 | + or r11,r10 /* Check if all bits are zero */ |
| 17429 | + or r11,r9 |
| 17430 | + or r11,r8 |
| 17431 | + sreq r12 /* If all zeros the arguments are equal |
| 17432 | + so return 1 else return 0 */ |
| 17433 | + ret r12 |
| 17434 | +#endif |
| 17435 | + |
| 17436 | + |
| 17437 | +#if defined(L_avr32_f64_cmp_ge) || defined(L_avr32_f64_cmp_lt) |
| 17438 | + |
| 17439 | +#ifdef L_avr32_f64_cmp_ge |
| 17440 | + .global __avr32_f64_cmp_ge |
| 17441 | + .type __avr32_f64_cmp_ge,@function |
| 17442 | +__avr32_f64_cmp_ge: |
| 17443 | +#endif |
| 17444 | +#ifdef L_avr32_f64_cmp_lt |
| 17445 | + .global __avr32_f64_cmp_lt |
| 17446 | + .type __avr32_f64_cmp_lt,@function |
| 17447 | +__avr32_f64_cmp_lt: |
| 17448 | +#endif |
| 17449 | + |
| 17450 | + /* compare magnitude of op1 and op2 */ |
| 17451 | + pushm lr |
| 17452 | + |
| 17453 | + lsl r11,1 /* Remove sign bit of op1 */ |
| 17454 | + srcs lr /* Sign op1 to lsb of lr*/ |
| 17455 | + lsl r9,1 /* Remove sign bit of op2 */ |
| 17456 | + rol lr /* Sign op2 to lsb of lr, sign bit op1 bit 1 of lr*/ |
| 17457 | + |
| 17458 | + /* Check for Nan */ |
| 17459 | + lddpc r12,.Linf |
| 17460 | + cp.w r10,0 |
| 17461 | + cpc r11,r12 |
| 17462 | + movhi r12, 0 /* Return false for NaN */ |
| 17463 | + brhi 0f /* We have NaN */ |
| 17464 | + cp.w r8,0 |
| 17465 | + cpc r9,r12 |
| 17466 | + movhi r12, 0 /* Return false for NaN */ |
| 17467 | + brhi 0f /* We have NaN */ |
| 17468 | + |
| 17469 | + cp.w lr,3 /* both operands negative ?*/ |
| 17470 | + breq 1f |
| 17471 | + |
| 17472 | + cp.w lr,1 /* both operands positive? */ |
| 17473 | + brlo 2f |
| 17474 | + |
| 17475 | + /* Different signs. If sign of op1 is negative the difference |
| 17476 | + between op1 and op2 will always be negative, and if op1 is |
| 17477 | + positive the difference will always be positive */ |
| 17478 | +#ifdef L_avr32_f64_cmp_ge |
| 17479 | + sreq r12 |
| 17480 | +#endif |
| 17481 | +#ifdef L_avr32_f64_cmp_lt |
| 17482 | + srne r12 |
| 17483 | +#endif |
| 17484 | + popm pc |
| 17485 | + |
| 17486 | + |
| 17487 | +2: |
| 17488 | + /* Both operands positive. Just compute the difference */ |
| 17489 | + cp.w r10,r8 |
| 17490 | + cpc r11,r9 |
| 17491 | +#ifdef L_avr32_f64_cmp_ge |
| 17492 | + srhs r12 |
| 17493 | +#endif |
| 17494 | +#ifdef L_avr32_f64_cmp_lt |
| 17495 | + srlo r12 |
| 17496 | +#endif |
| 17497 | + popm pc |
| 17498 | + |
| 17499 | +1: |
| 17500 | + /* Both operands negative. Compute the difference with operands switched */ |
| 17501 | + cp r8,r10 |
| 17502 | + cpc r9,r11 |
| 17503 | +#ifdef L_avr32_f64_cmp_ge |
| 17504 | + srhs r12 |
| 17505 | +#endif |
| 17506 | +#ifdef L_avr32_f64_cmp_lt |
| 17507 | + srlo r12 |
| 17508 | +#endif |
| 17509 | +0: |
| 17510 | + popm pc |
| 17511 | +#endif |
| 17512 | + |
| 17513 | + |
| 17514 | + |
| 17515 | +#ifdef L_avr32_f64_div |
| 17516 | + .global __avr32_f64_div |
| 17517 | + .type __avr32_f64_div,@function |
| 17518 | +__avr32_f64_div: |
| 17519 | + stm --sp, r2-r7,lr |
| 17520 | + eor r12, r11, r9 /* Sign(op1) ^ Sign(op2) to msb of r12*/ |
| 17521 | + lsl r11,1 /* unpack op1*/ |
| 17522 | + lddpc lr,.Linf |
| 17523 | + lsl r9,1 /* unpack op2*/ |
| 17524 | + |
| 17525 | + cp.w r11,lr |
| 17526 | + brhs 0f /* op1 is NaN or infinity */ |
| 17527 | + cp.w r9,lr |
| 17528 | + brhs 1f /* op2 is NaN or infinity */ |
| 17529 | + or r5,r9,r8 |
| 17530 | + breq 2f /* op2 is zero */ |
| 17531 | + or r5,r11,r10 |
| 17532 | + breq __dfdiv_return_op1 /* op1 is zero return zero*/ |
| 17533 | + |
| 17534 | + /* Unpack and normalize */ |
| 17535 | + /* op1 */ |
| 17536 | + unpack_df r7 /*exp*/, r10, r11 /*df number*/ |
| 17537 | + normalize_df r7 /*exp*/, r10, r11 /*Mantissa*/, r4, r5 /*scratch*/ |
| 17538 | + |
| 17539 | + /* op1 */ |
| 17540 | + unpack_df r6 /*exp*/, r8, r9 /*df number*/ |
| 17541 | + normalize_df r6 /*exp*/, r8, r9 /*Mantissa*/, r4, r5 /*scratch*/ |
| 17542 | + |
| 17543 | + /* Compute new exponent */ |
| 17544 | + sub r7,r6 |
| 17545 | + sub r7,-1023 |
| 17546 | + |
| 17547 | + /* Do fixed point division of mantissas*/ |
| 17548 | + mov r6,55 |
| 17549 | + lsr r11,1 |
| 17550 | + ror r10 |
| 17551 | + lsr r9,1 |
| 17552 | + ror r8 |
| 17553 | + |
| 17554 | +3: |
| 17555 | + /* Check if dividend is higher or same than divisor */ |
| 17556 | + sub r2,r10,r8 |
| 17557 | + sbc r3,r11,r9 |
| 17558 | + /* If so move the difference back into the dividend */ |
| 17559 | + movhs r10, r2 |
| 17560 | + movhs r11, r3 |
| 17561 | + /* Update the Quotient */ |
| 17562 | + rol r4 |
| 17563 | + rol r5 |
| 17564 | + eorl r4,1 |
| 17565 | + |
| 17566 | + /* Shift the dividend */ |
| 17567 | + lsl r10,1 |
| 17568 | + rol r11 |
| 17569 | + |
| 17570 | + sub r6,1 |
| 17571 | + brne 3b |
| 17572 | + |
| 17573 | + /* Check if we have a remainder which will the propagate into |
| 17574 | + the last bit */ |
| 17575 | + |
| 17576 | + or r11,r11,r10 |
| 17577 | + neg r11 |
| 17578 | + rol r4 |
| 17579 | + rol r5 |
| 17580 | + |
| 17581 | + /* Adjust mantissa into correct alignment */ |
| 17582 | + lsl r11, r5,(64-56) |
| 17583 | + or r11,r11,r4>>(32-64+56) |
| 17584 | + lsl r10,r4, (64-56) |
| 17585 | + |
| 17586 | + /* Normalize result */ |
| 17587 | + normalize_df r7 /*exp*/, r10, r11 /* mantissa */, r8, r9 /* scratch */ |
| 17588 | + |
| 17589 | + /* Check if a subnormal result was created */ |
| 17590 | + cp.w r7, 0 |
| 17591 | + brgt 3f |
| 17592 | + |
| 17593 | + adjust_subnormal_df r7 /*exp*/, r10, r11 /* Mantissa */, r12 /*sign*/, r8, r9 /*scratch*/ |
| 17594 | + ldm sp++, r2-r7,pc |
| 17595 | +3: |
| 17596 | + |
| 17597 | + /* Round result */ |
| 17598 | + round_df r7 /*exp*/, r10, r11 /* Mantissa */, r9 /*scratch*/ |
| 17599 | + cp.w r7,0x7ff |
| 17600 | + brlt __dfdiv_pack_result |
| 17601 | + /*Return infinity */ |
| 17602 | + lddpc r11, .Linf |
| 17603 | + mov r10, 0 |
| 17604 | + rjmp __dfdiv_return_op1 |
| 17605 | + |
| 17606 | +__dfdiv_pack_result: |
| 17607 | + /* Pack */ |
| 17608 | + pack_df r7 /*exp*/, r10, r11 /* mantissa */, r10, r11 /* Output df number*/ |
| 17609 | + |
| 17610 | +__dfdiv_return_op1: |
| 17611 | + lsl r12,1 |
| 17612 | + ror r11 |
| 17613 | + ldm sp++, r2-r7,pc |
| 17614 | + |
| 17615 | +0: |
| 17616 | + /* Op1 is NaN or Inf */ |
| 17617 | + cpc r10 |
| 17618 | + /* If op1 is a NaN the we should return a NaN */ |
| 17619 | + brne __dfdiv_return_op1 |
| 17620 | + |
| 17621 | + /* Op1 is infinity, check op2*/ |
| 17622 | + cp.w r9,lr |
| 17623 | + brlo __dfdiv_return_op1 /* Op2 is a normal number return inf */ |
| 17624 | + /* Other combinations: return NaN */ |
| 17625 | + mov r11, -1 |
| 17626 | + ldm sp++, r2-r7,pc |
| 17627 | + |
| 17628 | +1: |
| 17629 | + /* Op2 is NaN or Inf */ |
| 17630 | + cpc r8 |
| 17631 | + /* If inf return zero else return NaN*/ |
| 17632 | + mov r10, 0 |
| 17633 | + moveq r11, 0 |
| 17634 | + movne r11, -1 |
| 17635 | + ldm sp++, r2-r7,pc |
| 17636 | + |
| 17637 | +2: |
| 17638 | + /* Op2 is zero */ |
| 17639 | + or r6,r11,r10 /* 0.0/0.0 yields NaN */ |
| 17640 | + mov r10, 0 |
| 17641 | + moveq r11, -1 /* Return NaN */ |
| 17642 | + movne r11, lr /* Return inf */ |
| 17643 | + rjmp __dfdiv_return_op1 |
| 17644 | + |
| 17645 | +#endif |
| 17646 | + |
| 17647 | + |
| 17648 | +#ifdef L_avr32_f32_div |
| 17649 | + .global __avr32_f32_div |
| 17650 | + .type __avr32_f32_div,@function |
| 17651 | +__avr32_f32_div: |
| 17652 | + eor r8, r11, r12 /* MSB(r8) = Sign(op1) ^ Sign(op2) */ |
| 17653 | + /* Unpack */ |
| 17654 | + lsl r12,1 |
| 17655 | + reteq 0 /* Return zero if op1 is zero */ |
| 17656 | + lddpc r9, .Linf_sf |
| 17657 | + lsl r11,1 |
| 17658 | + |
| 17659 | + /* Check op1 for NaN or Inf */ |
| 17660 | + cp r12,r9 |
| 17661 | + brhs 2f |
| 17662 | + |
| 17663 | + /* Check op2 for NaN or Inf */ |
| 17664 | + cp r11,r9 |
| 17665 | + brhs 3f |
| 17666 | + /* Check op2 for zero */ |
| 17667 | + tst r11,r11 |
| 17668 | + breq 4f |
| 17669 | + |
| 17670 | + /* If op1 is zero return zero */ |
| 17671 | + tst r12, r12 |
| 17672 | + reteq 0 |
| 17673 | + |
| 17674 | + /* Unpack op1*/ |
| 17675 | + unpack_sf r9 /*exp*/, r12 /*sf*/ |
| 17676 | + |
| 17677 | + /* Unpack op2*/ |
| 17678 | + unpack_sf r10 /*exp*/, r11 /*sf*/ |
| 17679 | + |
| 17680 | + /* Calculate new exponent */ |
| 17681 | + stm --sp,r7,lr |
| 17682 | + sub r9, r10 |
| 17683 | + sub r9,-127 |
| 17684 | + |
| 17685 | + /* Divide */ |
| 17686 | + mov r7,26 |
| 17687 | + |
| 17688 | + lsr r12,1 /* Make room for one more bit in mantissas */ |
| 17689 | + lsr r11,1 |
| 17690 | + |
| 17691 | +0: |
| 17692 | + sub r10,r12,r11 |
| 17693 | + movcc r12, r10 /* update dividend if divisor smaller */ |
| 17694 | + rol lr /* shift result into lr */ |
| 17695 | + eorl lr,1 /* flip bit. */ |
| 17696 | + lsl r12,1 /* Shift dividend */ |
| 17697 | + sub r7,1 |
| 17698 | + brne 0b |
| 17699 | + |
| 17700 | + /* round and scale*/ |
| 17701 | + neg r12 /* c = 1 iff r12 != 0 */ |
| 17702 | + rol lr |
| 17703 | + lsl r10,lr,(32-27) /* Adjust mantissa */ |
| 17704 | + ldm sp++, r7, lr |
| 17705 | + |
| 17706 | + |
| 17707 | + normalize_sf r9 /*exp*/, r10 /*mant*/, r11 /*scratch*/ |
| 17708 | + |
| 17709 | + /* Check for subnormal result */ |
| 17710 | + cp.w r9, 0 |
| 17711 | + brgt 0f |
| 17712 | + |
| 17713 | + /* Adjust a subnormal result */ |
| 17714 | + adjust_subnormal_sf r12 /*sf*/, r9 /*exp*/, r10 /*mant*/, r8 /*sign*/,r11 /*scratch*/ |
| 17715 | + ret r12 |
| 17716 | +0: |
| 17717 | + round_sf r9 /*exp*/, r10 /*mant*/, r11 /*scratch*/ |
| 17718 | + pack_sf r12 /*sf*/, r9 /*exp*/, r10 /*mant*/ |
| 17719 | +__divsf_return_op1: |
| 17720 | + lsl r8, 1 |
| 17721 | + ror r12 |
| 17722 | + ret r12 |
| 17723 | + |
| 17724 | +2: |
| 17725 | + /* Op1 is NaN or inf */ |
| 17726 | + retne -1 /* Return NaN if op1 is NaN */ |
| 17727 | + /* Op1 is inf check op2 */ |
| 17728 | + cp r11, r9 |
| 17729 | + brlo __divsf_return_op1 /* inf/number gives inf */ |
| 17730 | + ret -1 /* The rest gives NaN*/ |
| 17731 | +3: |
| 17732 | + /* Op1 is NaN or inf */ |
| 17733 | + reteq 0 /* Return zero if number/inf*/ |
| 17734 | + ret -1 /* Return NaN*/ |
| 17735 | +4: |
| 17736 | + /* Op2 is zero ? */ |
| 17737 | + tst r12,r12 |
| 17738 | + reteq -1 /* 0.0/0.0 is NaN */ |
| 17739 | + lddpc r12, .Linf_sf |
| 17740 | + rjmp __divsf_return_op1 |
| 17741 | + |
| 17742 | +#endif |
| 17743 | + |
| 17744 | +#ifdef L_avr32_f32_mul |
| 17745 | + .global __avr32_f32_mul |
| 17746 | + .type __avr32_f32_mul,@function |
| 17747 | +__avr32_f32_mul: |
| 17748 | + eor r8, r11, r12 /* MSB(r8) = Sign(op1) ^ Sign(op2) */ |
| 17749 | + lsl r12,1 /* unpack op1 */ |
| 17750 | + lsl r11,1 /* unpack op2 */ |
| 17751 | + |
| 17752 | + /* arrange operands so that that op1 >= op2 */ |
| 17753 | + sub r9,r12,r11 |
| 17754 | + brcc 0f |
| 17755 | + |
| 17756 | + sub r12,r9 /* swap operands if op2 was larger */ |
| 17757 | + add r11,r9 |
| 17758 | + |
| 17759 | +0: |
| 17760 | + lddpc r9,.Linf_sf |
| 17761 | + cp r12,r9 |
| 17762 | + brhs 2f |
| 17763 | + |
| 17764 | + /* Check op2 for zero */ |
| 17765 | + tst r11,r11 |
| 17766 | + reteq 0 /* Return zero */ |
| 17767 | + |
| 17768 | + /* Unpack op1 */ |
| 17769 | + unpack_sf r9 /*exp*/, r12 /*sf*/ |
| 17770 | + /* Unpack op2 */ |
| 17771 | + unpack_sf r10 /*exp*/, r11 /*sf*/ |
| 17772 | + |
| 17773 | + /* Calculate new exponent */ |
| 17774 | + add r9,r10 |
| 17775 | + |
| 17776 | + /* Do the multiplication */ |
| 17777 | + mulu.d r10,r12,r11 |
| 17778 | + |
| 17779 | + sub r9,(127-1) /* remove extra exponent bias */ |
| 17780 | + |
| 17781 | + /* Check if we have any bits in r10 which |
| 17782 | + means a rounding bit should be inserted in LSB of result */ |
| 17783 | + tst r10,r10 |
| 17784 | + srne r10 |
| 17785 | + or r12,r11,r10 |
| 17786 | + |
| 17787 | + /* Normalize */ |
| 17788 | + normalize_sf r9 /*exp*/, r12 /*mant*/, r11 /*scratch*/ |
| 17789 | + |
| 17790 | + /* Check for subnormal result */ |
| 17791 | + cp.w r9, 0 |
| 17792 | + brgt 0f |
| 17793 | + |
| 17794 | + /* Adjust a subnormal result */ |
| 17795 | + adjust_subnormal_sf r12/*sf*/, r9 /*exp*/, r12 /*mant*/, r8 /*sign*/, r11 /*scratch */ |
| 17796 | + ret r12 |
| 17797 | +0: |
| 17798 | + round_sf r9 /*exp*/, r12 /*mant*/, r11 /*scratch*/ |
| 17799 | + cp.w r9, 0xff |
| 17800 | + brlo 1f |
| 17801 | + lddpc r12,.Linf_sf |
| 17802 | + rjmp __mulsf_return_op1 |
| 17803 | +1: |
| 17804 | + pack_sf r12 /*sf*/, r9 /*exp*/, r12 /*mant*/ |
| 17805 | +__mulsf_return_op1: |
| 17806 | + lsl r8, 1 |
| 17807 | + ror r12 |
| 17808 | + ret r12 |
| 17809 | + |
| 17810 | +2: |
| 17811 | + /* Op1 is inf or NaN */ |
| 17812 | + retne -1 /* Op1 is NaN return NaN */ |
| 17813 | + |
| 17814 | + /* Op1 is inf and op2 is smaller so it is either infinity |
| 17815 | + or a subnormal number */ |
| 17816 | + cp r11,0 |
| 17817 | + brne __mulsf_return_op1 /* op2 is not zero. return op1.*/ |
| 17818 | + ret -1 /* inf * 0 return NaN */ |
| 17819 | +#endif |
| 17820 | + |
| 17821 | + |
| 17822 | +#ifdef L_avr32_s32_to_f32 |
| 17823 | + .global __avr32_s32_to_f32 |
| 17824 | + .type __avr32_s32_to_f32,@function |
| 17825 | +__avr32_s32_to_f32: |
| 17826 | + cp r12, 0 |
| 17827 | + reteq r12 /* If zero then return zero float */ |
| 17828 | + mov r11, r12 /* Keep the sign */ |
| 17829 | + abs r12 /* Compute the absolute value */ |
| 17830 | + mov r10, 31 + 127 /* Set the correct exponent */ |
| 17831 | + |
| 17832 | + /* Normalize */ |
| 17833 | + normalize_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/ |
| 17834 | + |
| 17835 | + /* Check for subnormal result */ |
| 17836 | + cp.w r10, 0 |
| 17837 | + brgt 0f |
| 17838 | + |
| 17839 | + /* Adjust a subnormal result */ |
| 17840 | + adjust_subnormal_sf r12/*sf*/, r10 /*exp*/, r12 /*mant*/, r11/*sign*/, r9 /*scratch*/ |
| 17841 | + ret r12 |
| 17842 | +0: |
| 17843 | + round_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/ |
| 17844 | + pack_sf r12 /*sf*/, r10 /*exp*/, r12 /*mant*/ |
| 17845 | +__floatsisf_return_op1: |
| 17846 | + lsl r11, 1 |
| 17847 | + ror r12 |
| 17848 | + ret r12 |
| 17849 | +#endif |
| 17850 | + |
| 17851 | +#ifdef L_avr32_u32_to_f32 |
| 17852 | + .global __avr32_u32_to_f32 |
| 17853 | + .type __avr32_u32_to_f32,@function |
| 17854 | +__avr32_u32_to_f32: |
| 17855 | + cp r12, 0 |
| 17856 | + reteq r12 /* If zero then return zero float */ |
| 17857 | + mov r10, 31 + 127 /* Set the correct exponent */ |
| 17858 | + |
| 17859 | + /* Normalize */ |
| 17860 | + normalize_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/ |
| 17861 | + |
| 17862 | + /* Check for subnormal result */ |
| 17863 | + cp.w r10, 0 |
| 17864 | + brgt 0f |
| 17865 | + |
| 17866 | + /* Adjust a subnormal result */ |
| 17867 | + mov r8, 0 |
| 17868 | + adjust_subnormal_sf r12/*sf*/,r10 /*exp*/, r12 /*mant*/,r8/*sign*/, r9 /*scratch*/ |
| 17869 | + ret r12 |
| 17870 | +0: |
| 17871 | + round_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/ |
| 17872 | + pack_sf r12 /*sf*/, r10 /*exp*/, r12 /*mant*/ |
| 17873 | +__floatunsisf_return_op1: |
| 17874 | + lsr r12,1 /* Sign bit is 0 for unsigned int */ |
| 17875 | + ret r12 |
| 17876 | +#endif |
| 17877 | + |
| 17878 | + |
| 17879 | +#ifdef L_avr32_f32_to_s32 |
| 17880 | + .global __avr32_f32_to_s32 |
| 17881 | + .type __avr32_f32_to_s32,@function |
| 17882 | +__avr32_f32_to_s32: |
| 17883 | + lsr r11,r12,23 /* Extract exponent */ |
| 17884 | + castu.b r11 |
| 17885 | + sub r11,127 /* Fix bias */ |
| 17886 | + retlo 0 /* Negative exponent yields zero integer */ |
| 17887 | + |
| 17888 | +#ifdef __IEEE_LARGE_FLOATS__ |
| 17889 | + cp r11,31 |
| 17890 | + brcc 0f |
| 17891 | +#endif |
| 17892 | + /* Shift mantissa into correct position */ |
| 17893 | + rsub r11,r11,31 /* Shift amount */ |
| 17894 | + lsl r10,r12,8 /* Get mantissa */ |
| 17895 | + sbr r10,31 /* Add implicit bit */ |
| 17896 | + lsr r10,r10,r11 /* Perform shift */ |
| 17897 | + lsl r12,1 /* Check sign */ |
| 17898 | + retcc r10 /* if positive, we are done */ |
| 17899 | + neg r10 /* if negative float, negate result */ |
| 17900 | + ret r10 |
| 17901 | + |
| 17902 | +#ifdef __IEEE_LARGE_FLOATS__ |
| 17903 | +0: |
| 17904 | + mov r11,-1 |
| 17905 | + lsr r11,1 |
| 17906 | + lsl r12,1 |
| 17907 | + acr r11 |
| 17908 | + |
| 17909 | + ret r11 |
| 17910 | +#endif |
| 17911 | +#endif |
| 17912 | + |
| 17913 | +#ifdef L_avr32_f32_to_u32 |
| 17914 | + .global __avr32_f32_to_u32 |
| 17915 | + .type __avr32_f32_to_u32,@function |
| 17916 | +__avr32_f32_to_u32: |
| 17917 | + cp r12,0 |
| 17918 | + retmi 0 /* Negative numbers gives 0 */ |
| 17919 | + bfextu r11, r12, 23, 8 /* Extract exponent */ |
| 17920 | + sub r11,127 /* Fix bias */ |
| 17921 | + retlo 0 /* Negative exponent yields zero integer */ |
| 17922 | + |
| 17923 | +#ifdef __IEEE_LARGE_FLOATS__ |
| 17924 | + cp r11,32 |
| 17925 | + brcc 0f |
| 17926 | +#endif |
| 17927 | + /* Shift mantissa into correct position */ |
| 17928 | + rsub r11,r11,31 /* Shift amount */ |
| 17929 | + lsl r12,8 /* Get mantissa */ |
| 17930 | + sbr r12,31 /* Add implicit bit */ |
| 17931 | + lsr r12,r12,r11 /* Perform shift */ |
| 17932 | + ret r12 |
| 17933 | + |
| 17934 | +#ifdef __IEEE_LARGE_FLOATS__ |
| 17935 | +0: |
| 17936 | + mov r11,-1 |
| 17937 | + lsr r11,1 |
| 17938 | + lsl r12,1 |
| 17939 | + acr r11 |
| 17940 | + |
| 17941 | + ret r11 |
| 17942 | +#endif |
| 17943 | +#endif |
| 17944 | + |
| 17945 | +#ifdef L_avr32_f32_to_f64 |
| 17946 | + .global __avr32_f32_to_f64 |
| 17947 | + .type __avr32_f32_to_f64,@function |
| 17948 | + |
| 17949 | +__avr32_f32_to_f64: |
| 17950 | + lsl r11,r12,1 /* Remove sign bit, keep original value in r12*/ |
| 17951 | + moveq r10, 0 |
| 17952 | + reteq r11 /* Return zero if input is zero */ |
| 17953 | + |
| 17954 | + bfextu r9,r11,24,8 /* Get exponent */ |
| 17955 | + cp.w r9,0xff /* check for NaN or inf */ |
| 17956 | + breq 0f |
| 17957 | + |
| 17958 | + lsl r11,7 /* Convert sf mantissa to df format */ |
| 17959 | + mov r10,0 |
| 17960 | + |
| 17961 | + /* Check if implicit bit should be set */ |
| 17962 | + cp.w r9, 0 |
| 17963 | + subeq r9,-1 /* Adjust exponent if it was 0 */ |
| 17964 | + srne r8 |
| 17965 | + or r11, r11, r8 << 31 /* Set implicit bit if needed */ |
| 17966 | + sub r9,(127-0x3ff) /* Convert exponent to df format exponent */ |
| 17967 | + |
| 17968 | + pushm lr |
| 17969 | + normalize_df r9 /*exp*/, r10, r11 /*mantissa*/, r8, lr /*scratch*/ |
| 17970 | + popm lr |
| 17971 | + pack_df r9 /*exp*/, r10, r11 /*mantissa*/, r10, r11 /*df*/ |
| 17972 | + |
| 17973 | +__extendsfdf_return_op1: |
| 17974 | + /* Rotate in sign bit */ |
| 17975 | + lsl r12, 1 |
| 17976 | + ror r11 |
| 17977 | + ret r11 |
| 17978 | + |
| 17979 | +0: |
| 17980 | + /* Inf or NaN*/ |
| 17981 | + lddpc r10, .Linf |
| 17982 | + lsl r11,8 /* check mantissa */ |
| 17983 | + movne r11, -1 /* Return NaN */ |
| 17984 | + moveq r11, r10 /* Return inf */ |
| 17985 | + rjmp __extendsfdf_return_op1 |
| 17986 | +#endif |
| 17987 | + |
| 17988 | + |
| 17989 | +#ifdef L_avr32_f64_to_f32 |
| 17990 | + .global __avr32_f64_to_f32 |
| 17991 | + .type __avr32_f64_to_f32,@function |
| 17992 | + |
| 17993 | +__avr32_f64_to_f32: |
| 17994 | + /* Unpack */ |
| 17995 | + lsl r9,r11,1 /* Unpack exponent */ |
| 17996 | + lsr r9,21 |
| 17997 | + |
| 17998 | + reteq 0 /* If exponent is 0 the number is so small |
| 17999 | + that the conversion to single float gives |
| 18000 | + zero */ |
| 18001 | + |
| 18002 | + lsl r8,r11,10 /* Adjust mantissa */ |
| 18003 | + or r12,r8,r10>>22 |
| 18004 | + |
| 18005 | + lsl r10,10 /* Check if there are any remaining bits |
| 18006 | + in the low part of the mantissa.*/ |
| 18007 | + neg r10 |
| 18008 | + rol r12 /* If there were remaining bits then set lsb |
| 18009 | + of mantissa to 1 */ |
| 18010 | + |
| 18011 | + cp r9,0x7ff |
| 18012 | + breq 2f /* Check for NaN or inf */ |
| 18013 | + |
| 18014 | + sub r9,(0x3ff-127) /* Adjust bias of exponent */ |
| 18015 | + sbr r12,31 /* set the implicit bit.*/ |
| 18016 | + |
| 18017 | + cp.w r9, 0 /* Check for subnormal number */ |
| 18018 | + brgt 0f |
| 18019 | + |
| 18020 | + /* Adjust a subnormal result */ |
| 18021 | + adjust_subnormal_sf r12/*sf*/,r9 /*exp*/, r12 /*mant*/, r11/*sign*/, r10 /*scratch*/ |
| 18022 | + ret r12 |
| 18023 | +0: |
| 18024 | + round_sf r9 /*exp*/, r12 /*mant*/, r10 /*scratch*/ |
| 18025 | + pack_sf r12 /*sf*/, r9 /*exp*/, r12 /*mant*/ |
| 18026 | +__truncdfsf_return_op1: |
| 18027 | + /* Rotate in sign bit */ |
| 18028 | + lsl r11, 1 |
| 18029 | + ror r12 |
| 18030 | + ret r12 |
| 18031 | + |
| 18032 | + |
| 18033 | +2: |
| 18034 | + /* NaN or inf */ |
| 18035 | + cbr r12,31 /* clear implicit bit */ |
| 18036 | + retne -1 /* Return NaN if mantissa not zero */ |
| 18037 | + lddpc r12,.Linf_sf |
| 18038 | + ret r12 /* Return inf */ |
| 18039 | +#endif |
| 18040 | + |
| 18041 | + |
| 18042 | + .align 2 |
| 18043 | +.Linf: |
| 18044 | + .long 0xffe00000 |
| 18045 | + |
| 18046 | + .align 2 |
| 18047 | +.Linf_sf: |
| 18048 | + .long 0xff000000 |
| 18049 | + |
| 18050 | --- /dev/null |
| 18051 | +++ b/gcc/config/avr32/lib2funcs.S |
| 18052 | @@ -0,0 +1,21 @@ |
| 18053 | + .align 4 |
| 18054 | + .global __nonlocal_goto |
| 18055 | + .type __nonlocal_goto,@function |
| 18056 | + |
| 18057 | +/* __nonlocal_goto: This function handles nonlocal_goto's in gcc. |
| 18058 | + |
| 18059 | + parameter 0 (r12) = New Frame Pointer |
| 18060 | + parameter 1 (r11) = Address to goto |
| 18061 | + parameter 2 (r10) = New Stack Pointer |
| 18062 | + |
| 18063 | + This function invalidates the return stack, since it returns from a |
| 18064 | + function without using a return instruction. |
| 18065 | +*/ |
| 18066 | +__nonlocal_goto: |
| 18067 | + mov r7, r12 |
| 18068 | + mov sp, r10 |
| 18069 | + frs # Flush return stack |
| 18070 | + mov pc, r11 |
| 18071 | + |
| 18072 | + |
| 18073 | + |
| 18074 | --- /dev/null |
| 18075 | +++ b/gcc/config/avr32/linux-elf.h |
| 18076 | @@ -0,0 +1,154 @@ |
| 18077 | +/* |
| 18078 | + Linux/Elf specific definitions. |
| 18079 | + Copyright 2003-2006 Atmel Corporation. |
| 18080 | + |
| 18081 | + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com> |
| 18082 | + and Håvard Skinnemoen, Atmel Norway, <hskinnemoen@atmel.com> |
| 18083 | + |
| 18084 | + This file is part of GCC. |
| 18085 | + |
| 18086 | + This program is free software; you can redistribute it and/or modify |
| 18087 | + it under the terms of the GNU General Public License as published by |
| 18088 | + the Free Software Foundation; either version 2 of the License, or |
| 18089 | + (at your option) any later version. |
| 18090 | + |
| 18091 | + This program is distributed in the hope that it will be useful, |
| 18092 | + but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 18093 | + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 18094 | + GNU General Public License for more details. |
| 18095 | + |
| 18096 | + You should have received a copy of the GNU General Public License |
| 18097 | + along with this program; if not, write to the Free Software |
| 18098 | + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ |
| 18099 | + |
| 18100 | + |
| 18101 | + |
| 18102 | +/* elfos.h should have already been included. Now just override |
| 18103 | + any conflicting definitions and add any extras. */ |
| 18104 | + |
| 18105 | +/* Run-time Target Specification. */ |
| 18106 | +#undef TARGET_VERSION |
| 18107 | +#define TARGET_VERSION fputs (" (AVR32 GNU/Linux with ELF)", stderr); |
| 18108 | + |
| 18109 | +/* Do not assume anything about header files. */ |
| 18110 | +#define NO_IMPLICIT_EXTERN_C |
| 18111 | + |
| 18112 | +/* The GNU C++ standard library requires that these macros be defined. */ |
| 18113 | +#undef CPLUSPLUS_CPP_SPEC |
| 18114 | +#define CPLUSPLUS_CPP_SPEC "-D_GNU_SOURCE %(cpp)" |
| 18115 | + |
| 18116 | +/* Now we define the strings used to build the spec file. */ |
| 18117 | +#undef LIB_SPEC |
| 18118 | +#define LIB_SPEC \ |
| 18119 | + "%{pthread:-lpthread} \ |
| 18120 | + %{shared:-lc} \ |
| 18121 | + %{!shared:%{profile:-lc_p}%{!profile:-lc}}" |
| 18122 | + |
| 18123 | +/* Provide a STARTFILE_SPEC appropriate for GNU/Linux. Here we add |
| 18124 | + the GNU/Linux magical crtbegin.o file (see crtstuff.c) which |
| 18125 | + provides part of the support for getting C++ file-scope static |
| 18126 | + object constructed before entering `main'. */ |
| 18127 | + |
| 18128 | +#undef STARTFILE_SPEC |
| 18129 | +#define STARTFILE_SPEC \ |
| 18130 | + "%{!shared: \ |
| 18131 | + %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} \ |
| 18132 | + %{!p:%{profile:gcrt1.o%s} \ |
| 18133 | + %{!profile:crt1.o%s}}}} \ |
| 18134 | + crti.o%s %{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}" |
| 18135 | + |
| 18136 | +/* Provide a ENDFILE_SPEC appropriate for GNU/Linux. Here we tack on |
| 18137 | + the GNU/Linux magical crtend.o file (see crtstuff.c) which |
| 18138 | + provides part of the support for getting C++ file-scope static |
| 18139 | + object constructed before entering `main', followed by a normal |
| 18140 | + GNU/Linux "finalizer" file, `crtn.o'. */ |
| 18141 | + |
| 18142 | +#undef ENDFILE_SPEC |
| 18143 | +#define ENDFILE_SPEC \ |
| 18144 | + "%{!shared:crtend.o%s} %{shared:crtendS.o%s} crtn.o%s" |
| 18145 | + |
| 18146 | +#undef ASM_SPEC |
| 18147 | +#define ASM_SPEC "%{!mno-pic:--pic} %{mrelax|O*:%{mno-relax|O0|O1: ;:--linkrelax}} %{mcpu=*:-mcpu=%*}" |
| 18148 | + |
| 18149 | +#undef LINK_SPEC |
| 18150 | +#define LINK_SPEC "%{version:-v} \ |
| 18151 | + %{static:-Bstatic} \ |
| 18152 | + %{shared:-shared} \ |
| 18153 | + %{symbolic:-Bsymbolic} \ |
| 18154 | + %{rdynamic:-export-dynamic} \ |
| 18155 | + %{!dynamic-linker:-dynamic-linker /lib/ld-uClibc.so.0} \ |
| 18156 | + %{mrelax|O*:%{mno-relax|O0|O1: ;:--relax}}" |
| 18157 | + |
| 18158 | +#define TARGET_OS_CPP_BUILTINS() LINUX_TARGET_OS_CPP_BUILTINS() |
| 18159 | + |
| 18160 | +/* This is how we tell the assembler that two symbols have the same value. */ |
| 18161 | +#define ASM_OUTPUT_DEF(FILE, NAME1, NAME2) \ |
| 18162 | + do \ |
| 18163 | + { \ |
| 18164 | + assemble_name (FILE, NAME1); \ |
| 18165 | + fputs (" = ", FILE); \ |
| 18166 | + assemble_name (FILE, NAME2); \ |
| 18167 | + fputc ('\n', FILE); \ |
| 18168 | + } \ |
| 18169 | + while (0) |
| 18170 | + |
| 18171 | + |
| 18172 | + |
| 18173 | +#undef CC1_SPEC |
| 18174 | +#define CC1_SPEC "%{profile:-p}" |
| 18175 | + |
| 18176 | +/* Target CPU builtins. */ |
| 18177 | +#define TARGET_CPU_CPP_BUILTINS() \ |
| 18178 | + do \ |
| 18179 | + { \ |
| 18180 | + builtin_define ("__avr32__"); \ |
| 18181 | + builtin_define ("__AVR32__"); \ |
| 18182 | + builtin_define ("__AVR32_LINUX__"); \ |
| 18183 | + builtin_define (avr32_part->macro); \ |
| 18184 | + builtin_define (avr32_arch->macro); \ |
| 18185 | + if (avr32_arch->uarch_type == UARCH_TYPE_AVR32A) \ |
| 18186 | + builtin_define ("__AVR32_AVR32A__"); \ |
| 18187 | + else \ |
| 18188 | + builtin_define ("__AVR32_AVR32B__"); \ |
| 18189 | + if (TARGET_UNALIGNED_WORD) \ |
| 18190 | + builtin_define ("__AVR32_HAS_UNALIGNED_WORD__"); \ |
| 18191 | + if (TARGET_SIMD) \ |
| 18192 | + builtin_define ("__AVR32_HAS_SIMD__"); \ |
| 18193 | + if (TARGET_DSP) \ |
| 18194 | + builtin_define ("__AVR32_HAS_DSP__"); \ |
| 18195 | + if (TARGET_RMW) \ |
| 18196 | + builtin_define ("__AVR32_HAS_RMW__"); \ |
| 18197 | + if (TARGET_BRANCH_PRED) \ |
| 18198 | + builtin_define ("__AVR32_HAS_BRANCH_PRED__"); \ |
| 18199 | + if (flag_pic) \ |
| 18200 | + { \ |
| 18201 | + builtin_define ("__PIC__"); \ |
| 18202 | + builtin_define ("__pic__"); \ |
| 18203 | + } \ |
| 18204 | + } \ |
| 18205 | + while (0) |
| 18206 | + |
| 18207 | + |
| 18208 | + |
| 18209 | +/* Call the function profiler with a given profile label. */ |
| 18210 | +#undef FUNCTION_PROFILER |
| 18211 | +#define FUNCTION_PROFILER(STREAM, LABELNO) \ |
| 18212 | + do \ |
| 18213 | + { \ |
| 18214 | + fprintf (STREAM, "\tmov\tlr, lo(mcount)\n\torh\tlr, hi(mcount)\n"); \ |
| 18215 | + fprintf (STREAM, "\ticall lr\n"); \ |
| 18216 | + } \ |
| 18217 | + while (0) |
| 18218 | + |
| 18219 | +#define NO_PROFILE_COUNTERS 1 |
| 18220 | + |
| 18221 | +/* For dynamic libraries to work */ |
| 18222 | +/* #define PLT_REG_CALL_CLOBBERED 1 */ |
| 18223 | +#define AVR32_ALWAYS_PIC 1 |
| 18224 | + |
| 18225 | +/* uclibc does not implement sinf, cosf etc. */ |
| 18226 | +#undef TARGET_C99_FUNCTIONS |
| 18227 | +#define TARGET_C99_FUNCTIONS 0 |
| 18228 | + |
| 18229 | +#define LINK_GCC_C_SEQUENCE_SPEC \ |
| 18230 | + "%{static:--start-group} %G %L %{static:--end-group}%{!static:%G}" |
| 18231 | --- /dev/null |
| 18232 | +++ b/gcc/config/avr32/predicates.md |
| 18233 | @@ -0,0 +1,303 @@ |
| 18234 | +;; AVR32 predicates file. |
| 18235 | +;; Copyright 2003-2006 Atmel Corporation. |
| 18236 | +;; |
| 18237 | +;; Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com> |
| 18238 | +;; |
| 18239 | +;; This file is part of GCC. |
| 18240 | +;; |
| 18241 | +;; This program is free software; you can redistribute it and/or modify |
| 18242 | +;; it under the terms of the GNU General Public License as published by |
| 18243 | +;; the Free Software Foundation; either version 2 of the License, or |
| 18244 | +;; (at your option) any later version. |
| 18245 | +;; |
| 18246 | +;; This program is distributed in the hope that it will be useful, |
| 18247 | +;; but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 18248 | +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 18249 | +;; GNU General Public License for more details. |
| 18250 | +;; |
| 18251 | +;; You should have received a copy of the GNU General Public License |
| 18252 | +;; along with this program; if not, write to the Free Software |
| 18253 | +;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 18254 | + |
| 18255 | + |
| 18256 | +;; True if the operand is a memory reference which contains an |
| 18257 | +;; Address consisting of a single pointer register |
| 18258 | +(define_predicate "avr32_indirect_register_operand" |
| 18259 | + (and (match_code "mem") |
| 18260 | + (match_test "register_operand(XEXP(op, 0), SImode)"))) |
| 18261 | + |
| 18262 | + |
| 18263 | + |
| 18264 | +;; Address expression with a base pointer offset with |
| 18265 | +;; a register displacement |
| 18266 | +(define_predicate "avr32_indexed_memory_operand" |
| 18267 | + (and (match_code "mem") |
| 18268 | + (match_test "GET_CODE(XEXP(op, 0)) == PLUS")) |
| 18269 | + { |
| 18270 | + |
| 18271 | + rtx op0 = XEXP(XEXP(op, 0), 0); |
| 18272 | + rtx op1 = XEXP(XEXP(op, 0), 1); |
| 18273 | + |
| 18274 | + return ((avr32_address_register_rtx_p (op0, 0) |
| 18275 | + && avr32_legitimate_index_p (GET_MODE(op), op1, 0)) |
| 18276 | + || (avr32_address_register_rtx_p (op1, 0) |
| 18277 | + && avr32_legitimate_index_p (GET_MODE(op), op0, 0))); |
| 18278 | + |
| 18279 | + }) |
| 18280 | + |
| 18281 | +;; Operand suitable for the ld.sb instruction |
| 18282 | +(define_predicate "load_sb_memory_operand" |
| 18283 | + (ior (match_operand 0 "avr32_indirect_register_operand") |
| 18284 | + (match_operand 0 "avr32_indexed_memory_operand"))) |
| 18285 | + |
| 18286 | + |
| 18287 | +;; Operand suitable as operand to insns sign extending QI values |
| 18288 | +(define_predicate "extendqi_operand" |
| 18289 | + (ior (match_operand 0 "load_sb_memory_operand") |
| 18290 | + (match_operand 0 "register_operand"))) |
| 18291 | + |
| 18292 | +(define_predicate "post_inc_memory_operand" |
| 18293 | + (and (match_code "mem") |
| 18294 | + (match_test "(GET_CODE(XEXP(op, 0)) == POST_INC) |
| 18295 | + && REG_P(XEXP(XEXP(op, 0), 0))"))) |
| 18296 | + |
| 18297 | +;; Operand suitable for loading TImode values |
| 18298 | +(define_predicate "loadti_operand" |
| 18299 | + (ior (ior (match_operand 0 "register_operand") |
| 18300 | + (match_operand 0 "avr32_indirect_register_operand")) |
| 18301 | + (match_operand 0 "post_inc_memory_operand"))) |
| 18302 | + |
| 18303 | +;; Operand suitable for add instructions |
| 18304 | +(define_predicate "avr32_add_operand" |
| 18305 | + (ior (match_operand 0 "register_operand") |
| 18306 | + (and (match_operand 0 "immediate_operand") |
| 18307 | + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'I', \"Is21\")")))) |
| 18308 | + |
| 18309 | +;; Operand is a power of two immediate |
| 18310 | +(define_predicate "power_of_two_operand" |
| 18311 | + (match_code "const_int") |
| 18312 | +{ |
| 18313 | + HOST_WIDE_INT value = INTVAL (op); |
| 18314 | + |
| 18315 | + return value != 0 && (value & (value - 1)) == 0; |
| 18316 | +}) |
| 18317 | + |
| 18318 | +;; Operand is a multiple of 8 immediate |
| 18319 | +(define_predicate "multiple_of_8_operand" |
| 18320 | + (match_code "const_int") |
| 18321 | +{ |
| 18322 | + HOST_WIDE_INT value = INTVAL (op); |
| 18323 | + |
| 18324 | + return (value & 0x7) == 0 ; |
| 18325 | +}) |
| 18326 | + |
| 18327 | +;; Operand is a multiple of 16 immediate |
| 18328 | +(define_predicate "multiple_of_16_operand" |
| 18329 | + (match_code "const_int") |
| 18330 | +{ |
| 18331 | + HOST_WIDE_INT value = INTVAL (op); |
| 18332 | + |
| 18333 | + return (value & 0xf) == 0 ; |
| 18334 | +}) |
| 18335 | + |
| 18336 | +;; Operand is a mask used for masking away upper bits of a reg |
| 18337 | +(define_predicate "avr32_mask_upper_bits_operand" |
| 18338 | + (match_code "const_int") |
| 18339 | +{ |
| 18340 | + HOST_WIDE_INT value = INTVAL (op) + 1; |
| 18341 | + |
| 18342 | + return value != 1 && value != 0 && (value & (value - 1)) == 0; |
| 18343 | +}) |
| 18344 | + |
| 18345 | + |
| 18346 | +;; Operand suitable for mul instructions |
| 18347 | +(define_predicate "avr32_mul_operand" |
| 18348 | + (ior (match_operand 0 "register_operand") |
| 18349 | + (and (match_operand 0 "immediate_operand") |
| 18350 | + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks08\")")))) |
| 18351 | + |
| 18352 | +;; True for logical binary operators. |
| 18353 | +(define_predicate "logical_binary_operator" |
| 18354 | + (match_code "ior,xor,and")) |
| 18355 | + |
| 18356 | +;; True for logical shift operators |
| 18357 | +(define_predicate "logical_shift_operator" |
| 18358 | + (match_code "ashift,lshiftrt")) |
| 18359 | + |
| 18360 | +;; True for shift operand for logical and, or and eor insns |
| 18361 | +(define_predicate "avr32_logical_shift_operand" |
| 18362 | + (and (match_code "ashift,lshiftrt") |
| 18363 | + (ior (and (match_test "GET_CODE(XEXP(op, 1)) == CONST_INT") |
| 18364 | + (match_test "register_operand(XEXP(op, 0), GET_MODE(XEXP(op, 0)))")) |
| 18365 | + (and (match_test "GET_CODE(XEXP(op, 0)) == CONST_INT") |
| 18366 | + (match_test "register_operand(XEXP(op, 1), GET_MODE(XEXP(op, 1)))")))) |
| 18367 | + { |
| 18368 | + return 1; |
| 18369 | + } |
| 18370 | + ) |
| 18371 | + |
| 18372 | + |
| 18373 | +;; Predicate for second operand to and, ior and xor insn patterns |
| 18374 | +(define_predicate "avr32_logical_insn_operand" |
| 18375 | + (ior (match_operand 0 "register_operand") |
| 18376 | + (match_operand 0 "avr32_logical_shift_operand")) |
| 18377 | + { |
| 18378 | + return 1; |
| 18379 | + } |
| 18380 | +) |
| 18381 | + |
| 18382 | + |
| 18383 | +;; True for avr32 comparison operators |
| 18384 | +(define_predicate "avr32_comparison_operator" |
| 18385 | + (ior (match_code "eq, ne, gt, ge, lt, le, gtu, geu, ltu, leu") |
| 18386 | + (and (match_code "unspec") |
| 18387 | + (match_test "(XINT(op, 1) == UNSPEC_COND_MI) |
| 18388 | + || (XINT(op, 1) == UNSPEC_COND_PL)")))) |
| 18389 | + |
| 18390 | +;; True if this is a const_int with one bit set |
| 18391 | +(define_predicate "one_bit_set_operand" |
| 18392 | + (match_code "const_int") |
| 18393 | + { |
| 18394 | + int i; |
| 18395 | + int value; |
| 18396 | + int ones = 0; |
| 18397 | + |
| 18398 | + value = INTVAL(op); |
| 18399 | + for ( i = 0 ; i < 32; i++ ){ |
| 18400 | + if ( value & ( 1 << i ) ){ |
| 18401 | + ones++; |
| 18402 | + } |
| 18403 | + } |
| 18404 | + |
| 18405 | + return ( ones == 1 ); |
| 18406 | + }) |
| 18407 | + |
| 18408 | + |
| 18409 | +;; True if this is a const_int with one bit cleared |
| 18410 | +(define_predicate "one_bit_cleared_operand" |
| 18411 | + (match_code "const_int") |
| 18412 | + { |
| 18413 | + int i; |
| 18414 | + int value; |
| 18415 | + int zeroes = 0; |
| 18416 | + |
| 18417 | + value = INTVAL(op); |
| 18418 | + for ( i = 0 ; i < 32; i++ ){ |
| 18419 | + if ( !(value & ( 1 << i )) ){ |
| 18420 | + zeroes++; |
| 18421 | + } |
| 18422 | + } |
| 18423 | + |
| 18424 | + return ( zeroes == 1 ); |
| 18425 | + }) |
| 18426 | + |
| 18427 | + |
| 18428 | +;; True if this is a register or immediate operand |
| 18429 | +(define_predicate "register_immediate_operand" |
| 18430 | + (ior (match_operand 0 "register_operand") |
| 18431 | + (match_operand 0 "immediate_operand"))) |
| 18432 | + |
| 18433 | + |
| 18434 | +;; True is this is an operand containing a label_ref |
| 18435 | +(define_predicate "avr32_label_ref_operand" |
| 18436 | + (and (match_code "mem") |
| 18437 | + (match_test "avr32_find_symbol(op) |
| 18438 | + && (GET_CODE(avr32_find_symbol(op)) == LABEL_REF)"))) |
| 18439 | + |
| 18440 | +;; True is this is a valid symbol pointing to the constant pool |
| 18441 | +(define_predicate "avr32_const_pool_operand" |
| 18442 | + (and (match_code "symbol_ref") |
| 18443 | + (match_test "CONSTANT_POOL_ADDRESS_P(op)")) |
| 18444 | + { |
| 18445 | + return (flag_pic ? (!(symbol_mentioned_p (get_pool_constant (op)) |
| 18446 | + || label_mentioned_p (get_pool_constant (op))) |
| 18447 | + || avr32_got_mentioned_p(get_pool_constant (op))) |
| 18448 | + : true); |
| 18449 | + } |
| 18450 | +) |
| 18451 | + |
| 18452 | +;; True is this is a memory reference to the constant or mini pool |
| 18453 | +(define_predicate "avr32_const_pool_ref_operand" |
| 18454 | + (ior (match_operand 0 "avr32_label_ref_operand") |
| 18455 | + (and (match_code "mem") |
| 18456 | + (match_test "avr32_const_pool_operand(XEXP(op,0), GET_MODE(XEXP(op,0)))")))) |
| 18457 | + |
| 18458 | + |
| 18459 | + |
| 18460 | +;; True is this is a k12 offseted memory operand |
| 18461 | +(define_predicate "avr32_k12_memory_operand" |
| 18462 | + (and (match_code "mem") |
| 18463 | + (ior (match_test "REG_P(XEXP(op, 0))") |
| 18464 | + (match_test "GET_CODE(XEXP(op, 0)) == PLUS |
| 18465 | + && REG_P(XEXP(XEXP(op, 0), 0)) |
| 18466 | + && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT) |
| 18467 | + && (CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(XEXP(op, 0), 0)), |
| 18468 | + 'K', (mode == SImode) ? \"Ks14\" : ((mode == HImode) ? \"Ks13\" : \"Ks12\")))")))) |
| 18469 | + |
| 18470 | +;; True is this is a memory operand with an immediate displacement |
| 18471 | +(define_predicate "avr32_imm_disp_memory_operand" |
| 18472 | + (and (match_code "mem") |
| 18473 | + (match_test "GET_CODE(XEXP(op, 0)) == PLUS |
| 18474 | + && REG_P(XEXP(XEXP(op, 0), 0)) |
| 18475 | + && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT)"))) |
| 18476 | + |
| 18477 | +;; True is this is a bswap operand |
| 18478 | +(define_predicate "avr32_bswap_operand" |
| 18479 | + (ior (match_operand 0 "avr32_k12_memory_operand") |
| 18480 | + (match_operand 0 "register_operand"))) |
| 18481 | + |
| 18482 | +;; True is this is a valid coprocessor insn memory operand |
| 18483 | +(define_predicate "avr32_cop_memory_operand" |
| 18484 | + (and (match_operand 0 "memory_operand") |
| 18485 | + (not (match_test "GET_CODE(XEXP(op, 0)) == PLUS |
| 18486 | + && REG_P(XEXP(XEXP(op, 0), 0)) |
| 18487 | + && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT) |
| 18488 | + && !(CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(XEXP(op, 0), 0)), 'K', \"Ku10\"))")))) |
| 18489 | + |
| 18490 | +;; True is this is a valid source/destination operand |
| 18491 | +;; for moving values to/from a coprocessor |
| 18492 | +(define_predicate "avr32_cop_move_operand" |
| 18493 | + (ior (match_operand 0 "register_operand") |
| 18494 | + (match_operand 0 "avr32_cop_memory_operand"))) |
| 18495 | + |
| 18496 | + |
| 18497 | +;; True is this is a valid extract byte offset for use in |
| 18498 | +;; load extracted index insns |
| 18499 | +(define_predicate "avr32_extract_shift_operand" |
| 18500 | + (and (match_operand 0 "const_int_operand") |
| 18501 | + (match_test "(INTVAL(op) == 0) || (INTVAL(op) == 8) |
| 18502 | + || (INTVAL(op) == 16) || (INTVAL(op) == 24)"))) |
| 18503 | + |
| 18504 | +;; True is this is a floating-point register |
| 18505 | +(define_predicate "avr32_fp_register_operand" |
| 18506 | + (and (match_operand 0 "register_operand") |
| 18507 | + (match_test "REGNO_REG_CLASS(REGNO(op)) == FP_REGS"))) |
| 18508 | + |
| 18509 | +;; True is this is valid avr32 symbol operand |
| 18510 | +(define_predicate "avr32_symbol_operand" |
| 18511 | + (ior (match_code "label_ref, symbol_ref") |
| 18512 | + (and (match_code "const") |
| 18513 | + (match_test "avr32_find_symbol(op)")))) |
| 18514 | + |
| 18515 | +;; True is this is valid operand for the lda.w and call pseudo insns |
| 18516 | +(define_predicate "avr32_address_operand" |
| 18517 | + (and (match_code "label_ref, symbol_ref") |
| 18518 | + (ior (match_test "TARGET_HAS_ASM_ADDR_PSEUDOS") |
| 18519 | + (match_test "flag_pic")) )) |
| 18520 | + |
| 18521 | +;; True if this is a avr32 call operand |
| 18522 | +(define_predicate "avr32_call_operand" |
| 18523 | + (ior (ior (match_operand 0 "register_operand") |
| 18524 | + (ior (match_operand 0 "avr32_const_pool_ref_operand") |
| 18525 | + (match_operand 0 "avr32_address_operand"))) |
| 18526 | + (match_test "SYMBOL_REF_RCALL_FUNCTION_P(op)"))) |
| 18527 | + |
| 18528 | +;; Return true for operators performing ALU operations |
| 18529 | + |
| 18530 | +(define_predicate "alu_operator" |
| 18531 | + (match_code "ior, xor, and, plus, minus, ashift, lshiftrt, ashiftrt")) |
| 18532 | + |
| 18533 | +(define_predicate "avr32_add_shift_immediate_operand" |
| 18534 | + (and (match_operand 0 "immediate_operand") |
| 18535 | + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ku02\")"))) |
| 18536 | + |
| 18537 | --- /dev/null |
| 18538 | +++ b/gcc/config/avr32/simd.md |
| 18539 | @@ -0,0 +1,145 @@ |
| 18540 | +;; AVR32 machine description file for SIMD instructions. |
| 18541 | +;; Copyright 2003-2006 Atmel Corporation. |
| 18542 | +;; |
| 18543 | +;; Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com> |
| 18544 | +;; |
| 18545 | +;; This file is part of GCC. |
| 18546 | +;; |
| 18547 | +;; This program is free software; you can redistribute it and/or modify |
| 18548 | +;; it under the terms of the GNU General Public License as published by |
| 18549 | +;; the Free Software Foundation; either version 2 of the License, or |
| 18550 | +;; (at your option) any later version. |
| 18551 | +;; |
| 18552 | +;; This program is distributed in the hope that it will be useful, |
| 18553 | +;; but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 18554 | +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 18555 | +;; GNU General Public License for more details. |
| 18556 | +;; |
| 18557 | +;; You should have received a copy of the GNU General Public License |
| 18558 | +;; along with this program; if not, write to the Free Software |
| 18559 | +;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 18560 | + |
| 18561 | +;; -*- Mode: Scheme -*- |
| 18562 | + |
| 18563 | + |
| 18564 | +;; Vector modes |
| 18565 | +(define_mode_macro VECM [V2HI V4QI]) |
| 18566 | +(define_mode_attr size [(V2HI "h") (V4QI "b")]) |
| 18567 | + |
| 18568 | +(define_insn "add<mode>3" |
| 18569 | + [(set (match_operand:VECM 0 "register_operand" "=r") |
| 18570 | + (plus:VECM (match_operand:VECM 1 "register_operand" "r") |
| 18571 | + (match_operand:VECM 2 "register_operand" "r")))] |
| 18572 | + "TARGET_SIMD" |
| 18573 | + "padd.<size>\t%0, %1, %2" |
| 18574 | + [(set_attr "length" "4") |
| 18575 | + (set_attr "type" "alu")]) |
| 18576 | + |
| 18577 | + |
| 18578 | +(define_insn "sub<mode>3" |
| 18579 | + [(set (match_operand:VECM 0 "register_operand" "=r") |
| 18580 | + (minus:VECM (match_operand:VECM 1 "register_operand" "r") |
| 18581 | + (match_operand:VECM 2 "register_operand" "r")))] |
| 18582 | + "TARGET_SIMD" |
| 18583 | + "psub.<size>\t%0, %1, %2" |
| 18584 | + [(set_attr "length" "4") |
| 18585 | + (set_attr "type" "alu")]) |
| 18586 | + |
| 18587 | + |
| 18588 | +(define_insn "abs<mode>2" |
| 18589 | + [(set (match_operand:VECM 0 "register_operand" "=r") |
| 18590 | + (abs:VECM (match_operand:VECM 1 "register_operand" "r")))] |
| 18591 | + "TARGET_SIMD" |
| 18592 | + "pabs.s<size>\t%0, %1" |
| 18593 | + [(set_attr "length" "4") |
| 18594 | + (set_attr "type" "alu")]) |
| 18595 | + |
| 18596 | +(define_insn "ashl<mode>3" |
| 18597 | + [(set (match_operand:VECM 0 "register_operand" "=r") |
| 18598 | + (ashift:VECM (match_operand:VECM 1 "register_operand" "r") |
| 18599 | + (match_operand:SI 2 "immediate_operand" "Ku04")))] |
| 18600 | + "TARGET_SIMD" |
| 18601 | + "plsl.<size>\t%0, %1, %2" |
| 18602 | + [(set_attr "length" "4") |
| 18603 | + (set_attr "type" "alu")]) |
| 18604 | + |
| 18605 | +(define_insn "ashr<mode>3" |
| 18606 | + [(set (match_operand:VECM 0 "register_operand" "=r") |
| 18607 | + (ashiftrt:VECM (match_operand:VECM 1 "register_operand" "r") |
| 18608 | + (match_operand:SI 2 "immediate_operand" "Ku04")))] |
| 18609 | + "TARGET_SIMD" |
| 18610 | + "pasr.<size>\t%0, %1, %2" |
| 18611 | + [(set_attr "length" "4") |
| 18612 | + (set_attr "type" "alu")]) |
| 18613 | + |
| 18614 | +(define_insn "lshr<mode>3" |
| 18615 | + [(set (match_operand:VECM 0 "register_operand" "=r") |
| 18616 | + (lshiftrt:VECM (match_operand:VECM 1 "register_operand" "r") |
| 18617 | + (match_operand:SI 2 "immediate_operand" "Ku04")))] |
| 18618 | + "TARGET_SIMD" |
| 18619 | + "plsr.<size>\t%0, %1, %2" |
| 18620 | + [(set_attr "length" "4") |
| 18621 | + (set_attr "type" "alu")]) |
| 18622 | + |
| 18623 | +(define_insn "smaxv2hi3" |
| 18624 | + [(set (match_operand:V2HI 0 "register_operand" "=r") |
| 18625 | + (smax:V2HI (match_operand:V2HI 1 "register_operand" "r") |
| 18626 | + (match_operand:V2HI 2 "register_operand" "r")))] |
| 18627 | + |
| 18628 | + "TARGET_SIMD" |
| 18629 | + "pmax.sh\t%0, %1, %2" |
| 18630 | + [(set_attr "length" "4") |
| 18631 | + (set_attr "type" "alu")]) |
| 18632 | + |
| 18633 | +(define_insn "sminv2hi3" |
| 18634 | + [(set (match_operand:V2HI 0 "register_operand" "=r") |
| 18635 | + (smin:V2HI (match_operand:V2HI 1 "register_operand" "r") |
| 18636 | + (match_operand:V2HI 2 "register_operand" "r")))] |
| 18637 | + |
| 18638 | + "TARGET_SIMD" |
| 18639 | + "pmin.sh\t%0, %1, %2" |
| 18640 | + [(set_attr "length" "4") |
| 18641 | + (set_attr "type" "alu")]) |
| 18642 | + |
| 18643 | +(define_insn "umaxv4qi3" |
| 18644 | + [(set (match_operand:V4QI 0 "register_operand" "=r") |
| 18645 | + (umax:V4QI (match_operand:V4QI 1 "register_operand" "r") |
| 18646 | + (match_operand:V4QI 2 "register_operand" "r")))] |
| 18647 | + |
| 18648 | + "TARGET_SIMD" |
| 18649 | + "pmax.ub\t%0, %1, %2" |
| 18650 | + [(set_attr "length" "4") |
| 18651 | + (set_attr "type" "alu")]) |
| 18652 | + |
| 18653 | +(define_insn "uminv4qi3" |
| 18654 | + [(set (match_operand:V4QI 0 "register_operand" "=r") |
| 18655 | + (umin:V4QI (match_operand:V4QI 1 "register_operand" "r") |
| 18656 | + (match_operand:V4QI 2 "register_operand" "r")))] |
| 18657 | + |
| 18658 | + "TARGET_SIMD" |
| 18659 | + "pmin.ub\t%0, %1, %2" |
| 18660 | + [(set_attr "length" "4") |
| 18661 | + (set_attr "type" "alu")]) |
| 18662 | + |
| 18663 | + |
| 18664 | +(define_insn "addsubv2hi" |
| 18665 | + [(set (match_operand:V2HI 0 "register_operand" "=r") |
| 18666 | + (vec_concat:V2HI |
| 18667 | + (plus:HI (match_operand:HI 1 "register_operand" "r") |
| 18668 | + (match_operand:HI 2 "register_operand" "r")) |
| 18669 | + (minus:HI (match_dup 1) (match_dup 2))))] |
| 18670 | + "TARGET_SIMD" |
| 18671 | + "paddsub.h\t%0, %1:b, %2:b" |
| 18672 | + [(set_attr "length" "4") |
| 18673 | + (set_attr "type" "alu")]) |
| 18674 | + |
| 18675 | +(define_insn "subaddv2hi" |
| 18676 | + [(set (match_operand:V2HI 0 "register_operand" "=r") |
| 18677 | + (vec_concat:V2HI |
| 18678 | + (minus:HI (match_operand:HI 1 "register_operand" "r") |
| 18679 | + (match_operand:HI 2 "register_operand" "r")) |
| 18680 | + (plus:HI (match_dup 1) (match_dup 2))))] |
| 18681 | + "TARGET_SIMD" |
| 18682 | + "psubadd.h\t%0, %1:b, %2:b" |
| 18683 | + [(set_attr "length" "4") |
| 18684 | + (set_attr "type" "alu")]) |
| 18685 | --- /dev/null |
| 18686 | +++ b/gcc/config/avr32/t-avr32 |
| 18687 | @@ -0,0 +1,63 @@ |
| 18688 | + |
| 18689 | +MD_INCLUDES= $(srcdir)/config/avr32/avr32.md \ |
| 18690 | + $(srcdir)/config/avr32/fpcp.md \ |
| 18691 | + $(srcdir)/config/avr32/simd.md \ |
| 18692 | + $(srcdir)/config/avr32/predicates.md |
| 18693 | + |
| 18694 | +s-config s-conditions s-flags s-codes s-constants s-emit s-recog s-preds \ |
| 18695 | + s-opinit s-extract s-peep s-attr s-attrtab s-output: $(MD_INCLUDES) |
| 18696 | + |
| 18697 | +# We want fine grained libraries, so use the new code |
| 18698 | +# to build the floating point emulation libraries. |
| 18699 | +FPBIT = fp-bit.c |
| 18700 | +DPBIT = dp-bit.c |
| 18701 | + |
| 18702 | +LIB1ASMSRC = avr32/lib1funcs.S |
| 18703 | +LIB1ASMFUNCS = _avr32_f64_mul _avr32_f64_addsub _avr32_f64_to_u32 _avr32_f64_to_s32 \ |
| 18704 | + _avr32_f64_to_u64 _avr32_f64_to_s64 _avr32_u32_to_f64 _avr32_s32_to_f64 \ |
| 18705 | + _avr32_f64_cmp_eq _avr32_f64_cmp_ge _avr32_f64_cmp_lt \ |
| 18706 | + _avr32_f32_cmp_eq _avr32_f32_cmp_ge _avr32_f32_cmp_lt \ |
| 18707 | + _avr32_f64_div _avr32_f32_div\ |
| 18708 | + _avr32_f32_mul _avr32_s32_to_f32 _avr32_u32_to_f32 _avr32_f32_to_s32 \ |
| 18709 | + _avr32_f32_to_u32 _avr32_f32_to_f64 _avr32_f64_to_f32 |
| 18710 | + |
| 18711 | +LIB2FUNCS_EXTRA += $(srcdir)/config/avr32/lib2funcs.S |
| 18712 | + |
| 18713 | +MULTILIB_OPTIONS = march=ap/march=uc |
| 18714 | +MULTILIB_DIRNAMES = ap uc |
| 18715 | +MULTILIB_EXCEPTIONS = |
| 18716 | +MULTILIB_MATCHES = march?ap=mcpu?ap7000 |
| 18717 | +MULTILIB_MATCHES += march?ap=mcpu?ap7010 |
| 18718 | +MULTILIB_MATCHES += march?ap=mcpu?ap7020 |
| 18719 | +MULTILIB_MATCHES += march?uc=mcpu?uc3a0256 |
| 18720 | +MULTILIB_MATCHES += march?uc=mcpu?uc3a0512 |
| 18721 | +MULTILIB_MATCHES += march?uc=mcpu?uc3a1128 |
| 18722 | +MULTILIB_MATCHES += march?uc=mcpu?uc3a1256 |
| 18723 | +MULTILIB_MATCHES += march?uc=mcpu?uc3a1512 |
| 18724 | +MULTILIB_MATCHES += march?ap=mpart?ap7000 |
| 18725 | +MULTILIB_MATCHES += march?ap=mpart?ap7010 |
| 18726 | +MULTILIB_MATCHES += march?ap=mpart?ap7020 |
| 18727 | +MULTILIB_MATCHES += march?uc=mpart?uc3a0256 |
| 18728 | +MULTILIB_MATCHES += march?uc=mpart?uc3a0512 |
| 18729 | +MULTILIB_MATCHES += march?uc=mpart?uc3a1128 |
| 18730 | +MULTILIB_MATCHES += march?uc=mpart?uc3a1256 |
| 18731 | +MULTILIB_MATCHES += march?uc=mpart?uc3a1512 |
| 18732 | + |
| 18733 | +EXTRA_MULTILIB_PARTS = crtbegin.o crtbeginS.o crtend.o crtendS.o crti.o crtn.o |
| 18734 | + |
| 18735 | +CRTSTUFF_T_CFLAGS = -mrelax |
| 18736 | +CRTSTUFF_T_CFLAGS_S = -mrelax -fPIC |
| 18737 | +TARGET_LIBGCC2_CFLAGS += -mrelax |
| 18738 | + |
| 18739 | +LIBGCC = stmp-multilib |
| 18740 | +INSTALL_LIBGCC = install-multilib |
| 18741 | + |
| 18742 | +fp-bit.c: $(srcdir)/config/fp-bit.c |
| 18743 | + echo '#define FLOAT' > fp-bit.c |
| 18744 | + cat $(srcdir)/config/fp-bit.c >> fp-bit.c |
| 18745 | + |
| 18746 | +dp-bit.c: $(srcdir)/config/fp-bit.c |
| 18747 | + cat $(srcdir)/config/fp-bit.c > dp-bit.c |
| 18748 | + |
| 18749 | + |
| 18750 | + |
| 18751 | --- /dev/null |
| 18752 | +++ b/gcc/config/avr32/t-elf |
| 18753 | @@ -0,0 +1,16 @@ |
| 18754 | + |
| 18755 | +# Assemble startup files. |
| 18756 | +$(T)crti.o: $(srcdir)/config/avr32/crti.asm $(GCC_PASSES) |
| 18757 | + $(GCC_FOR_TARGET) $(CRTSTUFF_CFLAGS) $(CRTSTUFF_T_CFLAGS) $(INCLUDES) \ |
| 18758 | + -c -o $(T)crti.o -x assembler-with-cpp $(srcdir)/config/avr32/crti.asm |
| 18759 | + |
| 18760 | +$(T)crtn.o: $(srcdir)/config/avr32/crtn.asm $(GCC_PASSES) |
| 18761 | + $(GCC_FOR_TARGET) $(CRTSTUFF_CFLAGS) $(CRTSTUFF_T_CFLAGS) $(INCLUDES) \ |
| 18762 | + -c -o $(T)crtn.o -x assembler-with-cpp $(srcdir)/config/avr32/crtn.asm |
| 18763 | + |
| 18764 | + |
| 18765 | +# Build the libraries for both hard and soft floating point |
| 18766 | +EXTRA_MULTILIB_PARTS = crtbegin.o crtbeginS.o crtend.o crtendS.o crti.o crtn.o |
| 18767 | + |
| 18768 | +LIBGCC = stmp-multilib |
| 18769 | +INSTALL_LIBGCC = install-multilib |
| 18770 | --- /dev/null |
| 18771 | +++ b/gcc/config/avr32/uclinux-elf.h |
| 18772 | @@ -0,0 +1,20 @@ |
| 18773 | + |
| 18774 | +/* Run-time Target Specification. */ |
| 18775 | +#undef TARGET_VERSION |
| 18776 | +#define TARGET_VERSION fputs (" (AVR32 uClinux with ELF)", stderr) |
| 18777 | + |
| 18778 | +/* We don't want a .jcr section on uClinux. As if this makes a difference... */ |
| 18779 | +#define TARGET_USE_JCR_SECTION 0 |
| 18780 | + |
| 18781 | +/* Here we go. Drop the crtbegin/crtend stuff completely. */ |
| 18782 | +#undef STARTFILE_SPEC |
| 18783 | +#define STARTFILE_SPEC \ |
| 18784 | + "%{!shared: %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s}" \ |
| 18785 | + " %{!p:%{profile:gcrt1.o%s}" \ |
| 18786 | + " %{!profile:crt1.o%s}}}} crti.o%s" |
| 18787 | + |
| 18788 | +#undef ENDFILE_SPEC |
| 18789 | +#define ENDFILE_SPEC "crtn.o%s" |
| 18790 | + |
| 18791 | +#undef TARGET_DEFAULT |
| 18792 | +#define TARGET_DEFAULT (AVR32_FLAG_NO_INIT_GOT) |
| 18793 | --- a/gcc/config/host-linux.c |
| 18794 | +++ b/gcc/config/host-linux.c |
| 18795 | @@ -26,6 +26,9 @@ |
| 18796 | #include "hosthooks.h" |
| 18797 | #include "hosthooks-def.h" |
| 18798 | |
| 18799 | +#ifndef SSIZE_MAX |
| 18800 | +#define SSIZE_MAX LONG_MAX |
| 18801 | +#endif |
| 18802 | |
| 18803 | /* Linux has a feature called exec-shield-randomize that perturbs the |
| 18804 | address of non-fixed mapped segments by a (relatively) small amount. |
| 18805 | --- a/gcc/config.gcc |
| 18806 | +++ b/gcc/config.gcc |
| 18807 | @@ -751,6 +751,24 @@ avr-*-*) |
| 18808 | tm_file="avr/avr.h dbxelf.h" |
| 18809 | use_fixproto=yes |
| 18810 | ;; |
| 18811 | +avr32*-*-linux*) |
| 18812 | + tm_file="dbxelf.h elfos.h linux.h avr32/linux-elf.h avr32/avr32.h " |
| 18813 | + tmake_file="t-linux avr32/t-avr32 avr32/t-elf" |
| 18814 | + extra_parts="crtbegin.o crtbeginS.o crtend.o crtendS.o" |
| 18815 | + extra_modes=avr32/avr32-modes.def |
| 18816 | + gnu_ld=yes |
| 18817 | + ;; |
| 18818 | +avr32*-*-uclinux*) |
| 18819 | + tm_file="dbxelf.h elfos.h linux.h avr32/linux-elf.h avr32/uclinux-elf.h avr32/avr32.h" |
| 18820 | + tmake_file="t-linux avr32/t-avr32 avr32/t-elf" |
| 18821 | + extra_modes=avr32/avr32-modes.def |
| 18822 | + gnu_ld=yes |
| 18823 | + ;; |
| 18824 | +avr32-*-*) |
| 18825 | + tm_file="dbxelf.h elfos.h avr32/avr32.h avr32/avr32-elf.h" |
| 18826 | + tmake_file="avr32/t-avr32 avr32/t-elf" |
| 18827 | + extra_modes=avr32/avr32-modes.def |
| 18828 | + ;; |
| 18829 | bfin*-elf*) |
| 18830 | tm_file="${tm_file} dbxelf.h elfos.h bfin/elf.h" |
| 18831 | tmake_file=bfin/t-bfin-elf |
| 18832 | @@ -1614,6 +1632,9 @@ pdp11-*-bsd) |
| 18833 | pdp11-*-*) |
| 18834 | use_fixproto=yes |
| 18835 | ;; |
| 18836 | +avr-*-*) |
| 18837 | + use_fixproto=yes |
| 18838 | + ;; |
| 18839 | # port not yet contributed |
| 18840 | #powerpc-*-openbsd*) |
| 18841 | # tmake_file="${tmake_file} rs6000/t-fprules " |
| 18842 | @@ -2581,6 +2602,21 @@ case "${target}" in |
| 18843 | fi |
| 18844 | ;; |
| 18845 | |
| 18846 | + avr32*-*-*) |
| 18847 | + supported_defaults="cpu" |
| 18848 | + |
| 18849 | + case "$with_cpu" in |
| 18850 | + "" \ |
| 18851 | + | morgan | ap7000 ) |
| 18852 | + # OK |
| 18853 | + ;; |
| 18854 | + *) |
| 18855 | + echo "Unknown arch used in --with-arch=$with_arch" 1>&2 |
| 18856 | + exit 1 |
| 18857 | + ;; |
| 18858 | + esac |
| 18859 | + ;; |
| 18860 | + |
| 18861 | fr*-*-*linux*) |
| 18862 | supported_defaults=cpu |
| 18863 | case "$with_cpu" in |
| 18864 | --- a/gcc/doc/extend.texi |
| 18865 | +++ b/gcc/doc/extend.texi |
| 18866 | @@ -1887,7 +1887,7 @@ this attribute to work correctly. |
| 18867 | |
| 18868 | @item interrupt |
| 18869 | @cindex interrupt handler functions |
| 18870 | -Use this attribute on the ARM, AVR, C4x, CRX, M32C, M32R/D, MS1, and Xstormy16 |
| 18871 | +Use this attribute on the ARM, AVR, AVR32, C4x, CRX, M32C, M32R/D, MS1, and Xstormy16 |
| 18872 | ports to indicate that the specified function is an interrupt handler. |
| 18873 | The compiler will generate function entry and exit sequences suitable |
| 18874 | for use in an interrupt handler when this attribute is present. |
| 18875 | @@ -1906,6 +1906,15 @@ void f () __attribute__ ((interrupt ("IR |
| 18876 | |
| 18877 | Permissible values for this parameter are: IRQ, FIQ, SWI, ABORT and UNDEF@. |
| 18878 | |
| 18879 | +Note, for the AVR32, you can specify which banking scheme is used for |
| 18880 | +the interrupt mode this interrupt handler is used in like this: |
| 18881 | + |
| 18882 | +@smallexample |
| 18883 | +void f () __attribute__ ((interrupt ("FULL"))); |
| 18884 | +@end smallexample |
| 18885 | + |
| 18886 | +Permissible values for this parameter are: FULL, HALF, NONE and UNDEF. |
| 18887 | + |
| 18888 | @item interrupt_handler |
| 18889 | @cindex interrupt handler functions on the Blackfin, m68k, H8/300 and SH processors |
| 18890 | Use this attribute on the Blackfin, m68k, H8/300, H8/300H, H8S, and SH to |
| 18891 | @@ -5807,6 +5816,7 @@ instructions, but allow the compiler to |
| 18892 | @menu |
| 18893 | * Alpha Built-in Functions:: |
| 18894 | * ARM Built-in Functions:: |
| 18895 | +* AVR32 Built-in Functions:: |
| 18896 | * Blackfin Built-in Functions:: |
| 18897 | * FR-V Built-in Functions:: |
| 18898 | * X86 Built-in Functions:: |
| 18899 | @@ -6045,6 +6055,54 @@ long long __builtin_arm_wxor (long long, |
| 18900 | long long __builtin_arm_wzero () |
| 18901 | @end smallexample |
| 18902 | |
| 18903 | +@node AVR32 Built-in Functions |
| 18904 | +@subsection AVR32 Built-in Functions |
| 18905 | + |
| 18906 | + |
| 18907 | +@smallexample |
| 18908 | + |
| 18909 | +int __builtin_sats (int /*Rd*/,int /*sa*/, int /*bn*/) |
| 18910 | +int __builtin_satu (int /*Rd*/,int /*sa*/, int /*bn*/) |
| 18911 | +int __builtin_satrnds (int /*Rd*/,int /*sa*/, int /*bn*/) |
| 18912 | +int __builtin_satrndu (int /*Rd*/,int /*sa*/, int /*bn*/) |
| 18913 | +short __builtin_mulsathh_h (short, short) |
| 18914 | +int __builtin_mulsathh_w (short, short) |
| 18915 | +short __builtin_mulsatrndhh_h (short, short) |
| 18916 | +int __builtin_mulsatrndwh_w (int, short) |
| 18917 | +int __builtin_mulsatwh_w (int, short) |
| 18918 | +int __builtin_macsathh_w (int, short, short) |
| 18919 | +short __builtin_satadd_h (short, short) |
| 18920 | +short __builtin_satsub_h (short, short) |
| 18921 | +int __builtin_satadd_w (int, int) |
| 18922 | +int __builtin_satsub_w (int, int) |
| 18923 | +long long __builtin_mulwh_d(int, short) |
| 18924 | +long long __builtin_mulnwh_d(int, short) |
| 18925 | +long long __builtin_macwh_d(long long, int, short) |
| 18926 | +long long __builtin_machh_d(long long, short, short) |
| 18927 | + |
| 18928 | +void __builtin_musfr(int); |
| 18929 | +int __builtin_mustr(void); |
| 18930 | +int __builtin_mfsr(int /*Status Register Address*/) |
| 18931 | +void __builtin_mtsr(int /*Status Register Address*/, int /*Value*/) |
| 18932 | +int __builtin_mfdr(int /*Debug Register Address*/) |
| 18933 | +void __builtin_mtdr(int /*Debug Register Address*/, int /*Value*/) |
| 18934 | +void __builtin_cache(void * /*Address*/, int /*Cache Operation*/) |
| 18935 | +void __builtin_sync(int /*Sync Operation*/) |
| 18936 | +void __builtin_tlbr(void) |
| 18937 | +void __builtin_tlbs(void) |
| 18938 | +void __builtin_tlbw(void) |
| 18939 | +void __builtin_breakpoint(void) |
| 18940 | +int __builtin_xchg(void * /*Address*/, int /*Value*/ ) |
| 18941 | +short __builtin_bswap_16(short) |
| 18942 | +int __builtin_bswap_32(int) |
| 18943 | +void __builtin_cop(int/*cpnr*/, int/*crd*/, int/*crx*/, int/*cry*/, int/*op*/) |
| 18944 | +int __builtin_mvcr_w(int/*cpnr*/, int/*crs*/) |
| 18945 | +void __builtin_mvrc_w(int/*cpnr*/, int/*crd*/, int/*value*/) |
| 18946 | +long long __builtin_mvcr_d(int/*cpnr*/, int/*crs*/) |
| 18947 | +void __builtin_mvrc_d(int/*cpnr*/, int/*crd*/, long long/*value*/) |
| 18948 | + |
| 18949 | +@end smallexample |
| 18950 | + |
| 18951 | @node Blackfin Built-in Functions |
| 18952 | @subsection Blackfin Built-in Functions |
| 18953 | |
| 18954 | --- a/gcc/doc/invoke.texi |
| 18955 | +++ b/gcc/doc/invoke.texi |
| 18956 | @@ -185,7 +185,7 @@ in the following sections. |
| 18957 | -fno-default-inline -fvisibility-inlines-hidden @gol |
| 18958 | -Wabi -Wctor-dtor-privacy @gol |
| 18959 | -Wnon-virtual-dtor -Wreorder @gol |
| 18960 | --Weffc++ -Wno-deprecated -Wstrict-null-sentinel @gol |
| 18961 | +-Weffc++ -Wno-deprecated @gol |
| 18962 | -Wno-non-template-friend -Wold-style-cast @gol |
| 18963 | -Woverloaded-virtual -Wno-pmf-conversions @gol |
| 18964 | -Wsign-promo} |
| 18965 | @@ -569,6 +569,10 @@ Objective-C and Objective-C++ Dialects}. |
| 18966 | -mauto-incdec -minmax -mlong-calls -mshort @gol |
| 18967 | -msoft-reg-count=@var{count}} |
| 18968 | |
| 18969 | +@emph{AVR32 Options} |
| 18970 | +@gccoptlist{-muse-rodata-section -mhard-float -msoft-float -mrelax @gol |
| 18971 | +-muse-oscall -mforce-double-align -mno-init-got -mcpu=@var{cpu}} |
| 18972 | + |
| 18973 | @emph{MCore Options} |
| 18974 | @gccoptlist{-mhardlit -mno-hardlit -mdiv -mno-div -mrelax-immediates @gol |
| 18975 | -mno-relax-immediates -mwide-bitfields -mno-wide-bitfields @gol |
| 18976 | @@ -1797,14 +1801,6 @@ to filter out those warnings. |
| 18977 | @opindex Wno-deprecated |
| 18978 | Do not warn about usage of deprecated features. @xref{Deprecated Features}. |
| 18979 | |
| 18980 | -@item -Wstrict-null-sentinel @r{(C++ only)} |
| 18981 | -@opindex Wstrict-null-sentinel |
| 18982 | -Warn also about the use of an uncasted @code{NULL} as sentinel. When |
| 18983 | -compiling only with GCC this is a valid sentinel, as @code{NULL} is defined |
| 18984 | -to @code{__null}. Although it is a null pointer constant not a null pointer, |
| 18985 | -it is guaranteed to of the same size as a pointer. But this use is |
| 18986 | -not portable across different compilers. |
| 18987 | - |
| 18988 | @item -Wno-non-template-friend @r{(C++ only)} |
| 18989 | @opindex Wno-non-template-friend |
| 18990 | Disable warnings when non-templatized friend functions are declared |
| 18991 | @@ -2662,13 +2658,11 @@ get these warnings. |
| 18992 | If you want to warn about code which uses the uninitialized value of the |
| 18993 | variable in its own initializer, use the @option{-Winit-self} option. |
| 18994 | |
| 18995 | -These warnings occur for individual uninitialized or clobbered |
| 18996 | -elements of structure, union or array variables as well as for |
| 18997 | -variables which are uninitialized or clobbered as a whole. They do |
| 18998 | -not occur for variables or elements declared @code{volatile}. Because |
| 18999 | -these warnings depend on optimization, the exact variables or elements |
| 19000 | -for which there are warnings will depend on the precise optimization |
| 19001 | -options and version of GCC used. |
| 19002 | +These warnings occur only for variables that are candidates for |
| 19003 | +register allocation. Therefore, they do not occur for a variable that |
| 19004 | +is declared @code{volatile}, or whose address is taken, or whose size |
| 19005 | +is other than 1, 2, 4 or 8 bytes. Also, they do not occur for |
| 19006 | +structures, unions or arrays, even when they are in registers. |
| 19007 | |
| 19008 | Note that there may be no warning about a variable that is used only |
| 19009 | to compute a value that itself is never used, because such |
| 19010 | @@ -5935,10 +5929,6 @@ If number of candidates in the set is sm |
| 19011 | we always try to remove unnecessary ivs from the set during its |
| 19012 | optimization when a new iv is added to the set. |
| 19013 | |
| 19014 | -@item scev-max-expr-size |
| 19015 | -Bound on size of expressions used in the scalar evolutions analyzer. |
| 19016 | -Large expressions slow the analyzer. |
| 19017 | - |
| 19018 | @item vect-max-version-checks |
| 19019 | The maximum number of runtime checks that can be performed when doing |
| 19020 | loop versioning in the vectorizer. See option ftree-vect-loop-version |
| 19021 | @@ -7115,7 +7105,7 @@ platform. |
| 19022 | * ARC Options:: |
| 19023 | * ARM Options:: |
| 19024 | * AVR Options:: |
| 19025 | -* Blackfin Options:: |
| 19026 | +* AVR32 Options:: |
| 19027 | * CRIS Options:: |
| 19028 | * CRX Options:: |
| 19029 | * Darwin Options:: |
| 19030 | @@ -7578,81 +7568,55 @@ comply to the C standards, but it will p |
| 19031 | size. |
| 19032 | @end table |
| 19033 | |
| 19034 | -@node Blackfin Options |
| 19035 | -@subsection Blackfin Options |
| 19036 | -@cindex Blackfin Options |
| 19037 | +@node AVR32 Options |
| 19038 | +@subsection AVR32 Options |
| 19039 | +@cindex AVR32 Options |
| 19040 | |
| 19041 | -@table @gcctabopt |
| 19042 | -@item -momit-leaf-frame-pointer |
| 19043 | -@opindex momit-leaf-frame-pointer |
| 19044 | -Don't keep the frame pointer in a register for leaf functions. This |
| 19045 | -avoids the instructions to save, set up and restore frame pointers and |
| 19046 | -makes an extra register available in leaf functions. The option |
| 19047 | -@option{-fomit-frame-pointer} removes the frame pointer for all functions |
| 19048 | -which might make debugging harder. |
| 19049 | +These options are defined for AVR32 implementations: |
| 19050 | |
| 19051 | -@item -mspecld-anomaly |
| 19052 | -@opindex mspecld-anomaly |
| 19053 | -When enabled, the compiler will ensure that the generated code does not |
| 19054 | -contain speculative loads after jump instructions. This option is enabled |
| 19055 | -by default. |
| 19056 | - |
| 19057 | -@item -mno-specld-anomaly |
| 19058 | -@opindex mno-specld-anomaly |
| 19059 | -Don't generate extra code to prevent speculative loads from occurring. |
| 19060 | - |
| 19061 | -@item -mcsync-anomaly |
| 19062 | -@opindex mcsync-anomaly |
| 19063 | -When enabled, the compiler will ensure that the generated code does not |
| 19064 | -contain CSYNC or SSYNC instructions too soon after conditional branches. |
| 19065 | -This option is enabled by default. |
| 19066 | - |
| 19067 | -@item -mno-csync-anomaly |
| 19068 | -@opindex mno-csync-anomaly |
| 19069 | -Don't generate extra code to prevent CSYNC or SSYNC instructions from |
| 19070 | -occurring too soon after a conditional branch. |
| 19071 | - |
| 19072 | -@item -mlow-64k |
| 19073 | -@opindex mlow-64k |
| 19074 | -When enabled, the compiler is free to take advantage of the knowledge that |
| 19075 | -the entire program fits into the low 64k of memory. |
| 19076 | - |
| 19077 | -@item -mno-low-64k |
| 19078 | -@opindex mno-low-64k |
| 19079 | -Assume that the program is arbitrarily large. This is the default. |
| 19080 | +@table @gcctabopt |
| 19081 | +@item -muse-rodata-section |
| 19082 | +@opindex muse-rodata-section |
| 19083 | +Use section @samp{.rodata} for read-only data instead of @samp{.text}. |
| 19084 | |
| 19085 | -@item -mid-shared-library |
| 19086 | -@opindex mid-shared-library |
| 19087 | -Generate code that supports shared libraries via the library ID method. |
| 19088 | -This allows for execute in place and shared libraries in an environment |
| 19089 | -without virtual memory management. This option implies @option{-fPIC}. |
| 19090 | +@item -mhard-float |
| 19091 | +@opindex mhard-float |
| 19092 | +Use floating-point coprocessor instructions. |
| 19093 | |
| 19094 | -@item -mno-id-shared-library |
| 19095 | -@opindex mno-id-shared-library |
| 19096 | -Generate code that doesn't assume ID based shared libraries are being used. |
| 19097 | -This is the default. |
| 19098 | +@item -msoft-float |
| 19099 | +@opindex msoft-float |
| 19100 | +Use software floating-point library. |
| 19101 | |
| 19102 | -@item -mshared-library-id=n |
| 19103 | -@opindex mshared-library-id |
| 19104 | -Specified the identification number of the ID based shared library being |
| 19105 | -compiled. Specifying a value of 0 will generate more compact code, specifying |
| 19106 | -other values will force the allocation of that number to the current |
| 19107 | -library but is no more space or time efficient than omitting this option. |
| 19108 | +@item -mrelax |
| 19109 | +@opindex mrelax |
| 19110 | +Enable relaxing in linker. This means that when the address of symbols |
| 19111 | +are known at link time, the linker can optimize @samp{icall} and @samp{mcall} |
| 19112 | +instructions into a @samp{rcall} instruction if possible. Loading the address |
| 19113 | +of a symbol can also be optimized. |
| 19114 | + |
| 19115 | +@item -muse-oscall |
| 19116 | +@opindex muse-oscall |
| 19117 | +When using gcc as a frontend for linking this switch forces the use of |
| 19118 | +@samp{fake} system calls in the newlib c-library. These fake system |
| 19119 | +calls are handled by some AVR32 simulators which redirects these calls |
| 19120 | +to the OS in which the simulator is running. This is practical for |
| 19121 | +being able to perform file I/O when running programs in a simulator. |
| 19122 | + |
| 19123 | +@item -mforce-double-align |
| 19124 | +@opindex mforce-double-align |
| 19125 | +Force double-word alignment for double-word memory accesses. |
| 19126 | + |
| 19127 | +@item -mno-init-got |
| 19128 | +@opindex mno-init-got |
| 19129 | +Do not initialize the GOT register before using it when compiling PIC |
| 19130 | +code. |
| 19131 | |
| 19132 | -@item -mlong-calls |
| 19133 | -@itemx -mno-long-calls |
| 19134 | -@opindex mlong-calls |
| 19135 | -@opindex mno-long-calls |
| 19136 | -Tells the compiler to perform function calls by first loading the |
| 19137 | -address of the function into a register and then performing a subroutine |
| 19138 | -call on this register. This switch is needed if the target function |
| 19139 | -will lie outside of the 24 bit addressing range of the offset based |
| 19140 | -version of subroutine call instruction. |
| 19141 | +@item -mcpu=@var{cpu-type} |
| 19142 | +@opindex mcpu |
| 19143 | +Generate code for the specified cpu. Permissible names are: @samp{morgan}, |
| 19144 | +@samp{ap7000} and @samp{default}. @samp{default} is a dummy cpu which |
| 19145 | +allows all avr32 instructions. |
| 19146 | |
| 19147 | -This feature is not enabled by default. Specifying |
| 19148 | -@option{-mno-long-calls} will restore the default behavior. Note these |
| 19149 | -switches have no effect on how the compiler generates code to handle |
| 19150 | -function calls via function pointers. |
| 19151 | @end table |
| 19152 | |
| 19153 | @node CRIS Options |
| 19154 | @@ -11341,6 +11305,7 @@ conventions that adheres to the March 19 |
| 19155 | Application Binary Interface, PowerPC processor supplement. This is the |
| 19156 | default unless you configured GCC using @samp{powerpc-*-eabiaix}. |
| 19157 | |
| 19158 | + |
| 19159 | @item -mcall-sysv-eabi |
| 19160 | @opindex mcall-sysv-eabi |
| 19161 | Specify both @option{-mcall-sysv} and @option{-meabi} options. |
| 19162 | --- a/gcc/doc/md.texi |
| 19163 | +++ b/gcc/doc/md.texi |
| 19164 | @@ -1686,6 +1686,59 @@ A memory reference suitable for iWMMXt l |
| 19165 | A memory reference suitable for the ARMv4 ldrsb instruction. |
| 19166 | @end table |
| 19167 | |
| 19168 | +@item AVR32 family---@file{avr32.h} |
| 19169 | +@table @code |
| 19170 | +@item f |
| 19171 | +Floating-point registers (f0 to f15) |
| 19172 | + |
| 19173 | +@item Ku@var{bits} |
| 19174 | +Unsigned constant representable with @var{bits} number of bits (Must be |
| 19175 | +two digits). I.e: An unsigned 8-bit constant is written as @samp{Ku08} |
| 19176 | + |
| 19177 | +@item Ks@var{bits} |
| 19178 | +Signed constant representable with @var{bits} number of bits (Must be |
| 19179 | +two digits). I.e: A signed 12-bit constant is written as @samp{Ks12} |
| 19180 | + |
| 19181 | +@item Is@var{bits} |
| 19182 | +The negated range of a signed constant representable with @var{bits} |
| 19183 | +number of bits. The same as @samp{Ks@var{bits}} with a negated range. |
| 19184 | +This means that the constant must be in the range @math{-2^{bits-1}-1} to @math{2^{bits-1}} |
| 19185 | + |
| 19186 | +@item G |
| 19187 | +A single/double precision floating-point immediate or 64-bit integer |
| 19188 | +immediate where the least and most significant words both can be |
| 19189 | +loaded with a move instruction. That is the the integer form of the |
| 19190 | +values in the least and most significant words both are in the range |
| 19191 | +@math{-2^{20}} to @math{2^{20}-1}. |
| 19192 | + |
| 19193 | +@item RKs@var{bits} |
| 19194 | +A memory reference where the address consists of a base register |
| 19195 | +plus a signed immediate displacement with range given by @samp{Ks@var{bits}} |
| 19196 | +which has the same format as for the signed immediate integer constraint |
| 19197 | +given above. |
| 19198 | + |
| 19199 | +@item RKu@var{bits} |
| 19200 | +A memory reference where the address consists of a base register |
| 19201 | +plus an unsigned immediate displacement with range given by @samp{Ku@var{bits}} |
| 19202 | +which has the same format as for the unsigned immediate integer constraint |
| 19203 | +given above. |
| 19204 | + |
| 19205 | +@item S |
| 19206 | +A memory reference with an immediate or register offset |
| 19207 | + |
| 19208 | +@item T |
| 19209 | +A memory reference to a constant pool entry |
| 19210 | + |
| 19211 | +@item W |
| 19212 | +A valid operand for use in the @samp{lda.w} instruction macro when |
| 19213 | +relaxing is enabled |
| 19214 | + |
| 19215 | +@item Z |
| 19216 | +A memory reference valid for coprocessor memory instructions |
| 19217 | + |
| 19218 | +@end table |
| 19219 | + |
| 19220 | + |
| 19221 | @item AVR family---@file{avr.h} |
| 19222 | @table @code |
| 19223 | @item l |
| 19224 | @@ -2132,102 +2185,6 @@ range of 1 to 2047. |
| 19225 | |
| 19226 | @end table |
| 19227 | |
| 19228 | -@item Blackfin family---@file{bfin.h} |
| 19229 | -@table @code |
| 19230 | -@item a |
| 19231 | -P register |
| 19232 | - |
| 19233 | -@item d |
| 19234 | -D register |
| 19235 | - |
| 19236 | -@item z |
| 19237 | -A call clobbered P register. |
| 19238 | - |
| 19239 | -@item D |
| 19240 | -Even-numbered D register |
| 19241 | - |
| 19242 | -@item W |
| 19243 | -Odd-numbered D register |
| 19244 | - |
| 19245 | -@item e |
| 19246 | -Accumulator register. |
| 19247 | - |
| 19248 | -@item A |
| 19249 | -Even-numbered accumulator register. |
| 19250 | - |
| 19251 | -@item B |
| 19252 | -Odd-numbered accumulator register. |
| 19253 | - |
| 19254 | -@item b |
| 19255 | -I register |
| 19256 | - |
| 19257 | -@item B |
| 19258 | -B register |
| 19259 | - |
| 19260 | -@item f |
| 19261 | -M register |
| 19262 | - |
| 19263 | -@item c |
| 19264 | -Registers used for circular buffering, i.e. I, B, or L registers. |
| 19265 | - |
| 19266 | -@item C |
| 19267 | -The CC register. |
| 19268 | - |
| 19269 | -@item x |
| 19270 | -Any D, P, B, M, I or L register. |
| 19271 | - |
| 19272 | -@item y |
| 19273 | -Additional registers typically used only in prologues and epilogues: RETS, |
| 19274 | -RETN, RETI, RETX, RETE, ASTAT, SEQSTAT and USP. |
| 19275 | - |
| 19276 | -@item w |
| 19277 | -Any register except accumulators or CC. |
| 19278 | - |
| 19279 | -@item Ksh |
| 19280 | -Signed 16 bit integer (in the range -32768 to 32767) |
| 19281 | - |
| 19282 | -@item Kuh |
| 19283 | -Unsigned 16 bit integer (in the range 0 to 65535) |
| 19284 | - |
| 19285 | -@item Ks7 |
| 19286 | -Signed 7 bit integer (in the range -64 to 63) |
| 19287 | - |
| 19288 | -@item Ku7 |
| 19289 | -Unsigned 7 bit integer (in the range 0 to 127) |
| 19290 | - |
| 19291 | -@item Ku5 |
| 19292 | -Unsigned 5 bit integer (in the range 0 to 31) |
| 19293 | - |
| 19294 | -@item Ks4 |
| 19295 | -Signed 4 bit integer (in the range -8 to 7) |
| 19296 | - |
| 19297 | -@item Ks3 |
| 19298 | -Signed 3 bit integer (in the range -3 to 4) |
| 19299 | - |
| 19300 | -@item Ku3 |
| 19301 | -Unsigned 3 bit integer (in the range 0 to 7) |
| 19302 | - |
| 19303 | -@item P@var{n} |
| 19304 | -Constant @var{n}, where @var{n} is a single-digit constant in the range 0 to 4. |
| 19305 | - |
| 19306 | -@item M1 |
| 19307 | -Constant 255. |
| 19308 | - |
| 19309 | -@item M2 |
| 19310 | -Constant 65535. |
| 19311 | - |
| 19312 | -@item J |
| 19313 | -An integer constant with exactly a single bit set. |
| 19314 | - |
| 19315 | -@item L |
| 19316 | -An integer constant with all bits set except exactly one. |
| 19317 | - |
| 19318 | -@item H |
| 19319 | - |
| 19320 | -@item Q |
| 19321 | -Any SYMBOL_REF. |
| 19322 | -@end table |
| 19323 | - |
| 19324 | @item M32C---@file{m32c.c} |
| 19325 | |
| 19326 | @item Rsp |
| 19327 | --- a/gcc/expr.c |
| 19328 | +++ b/gcc/expr.c |
| 19329 | @@ -3401,18 +3401,19 @@ emit_single_push_insn (enum machine_mode |
| 19330 | } |
| 19331 | else |
| 19332 | { |
| 19333 | + emit_move_insn (stack_pointer_rtx, |
| 19334 | + expand_binop (Pmode, |
| 19335 | #ifdef STACK_GROWS_DOWNWARD |
| 19336 | - /* ??? This seems wrong if STACK_PUSH_CODE == POST_DEC. */ |
| 19337 | - dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx, |
| 19338 | - GEN_INT (-(HOST_WIDE_INT) rounded_size)); |
| 19339 | + sub_optab, |
| 19340 | #else |
| 19341 | - /* ??? This seems wrong if STACK_PUSH_CODE == POST_INC. */ |
| 19342 | - dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx, |
| 19343 | - GEN_INT (rounded_size)); |
| 19344 | + add_optab, |
| 19345 | #endif |
| 19346 | - dest_addr = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx, dest_addr); |
| 19347 | + stack_pointer_rtx, |
| 19348 | + GEN_INT (rounded_size), |
| 19349 | + NULL_RTX, 0, OPTAB_LIB_WIDEN)); |
| 19350 | + dest_addr = stack_pointer_rtx; |
| 19351 | } |
| 19352 | - |
| 19353 | + |
| 19354 | dest = gen_rtx_MEM (mode, dest_addr); |
| 19355 | |
| 19356 | if (type != 0) |
| 19357 | --- a/gcc/genoutput.c |
| 19358 | +++ b/gcc/genoutput.c |
| 19359 | @@ -383,7 +383,7 @@ output_insn_data (void) |
| 19360 | } |
| 19361 | |
| 19362 | if (d->name && d->name[0] != '*') |
| 19363 | - printf (" (insn_gen_fn) gen_%s,\n", d->name); |
| 19364 | + printf (" gen_%s,\n", d->name); |
| 19365 | else |
| 19366 | printf (" 0,\n"); |
| 19367 | |
| 19368 | --- a/gcc/longlong.h |
| 19369 | +++ b/gcc/longlong.h |
| 19370 | @@ -227,6 +227,39 @@ UDItype __umulsidi3 (USItype, USItype); |
| 19371 | #define UDIV_TIME 100 |
| 19372 | #endif /* __arm__ */ |
| 19373 | |
| 19374 | +#if defined (__avr32__) && W_TYPE_SIZE == 32 |
| 19375 | +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ |
| 19376 | + __asm__ ("add\t%1, %4, %5\n\tadc\t%0, %2, %3" \ |
| 19377 | + : "=r" ((USItype) (sh)), \ |
| 19378 | + "=&r" ((USItype) (sl)) \ |
| 19379 | + : "r" ((USItype) (ah)), \ |
| 19380 | + "r" ((USItype) (bh)), \ |
| 19381 | + "r" ((USItype) (al)), \ |
| 19382 | + "r" ((USItype) (bl)) __CLOBBER_CC) |
| 19383 | +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ |
| 19384 | + __asm__ ("sub\t%1, %4, %5\n\tsbc\t%0, %2, %3" \ |
| 19385 | + : "=r" ((USItype) (sh)), \ |
| 19386 | + "=&r" ((USItype) (sl)) \ |
| 19387 | + : "r" ((USItype) (ah)), \ |
| 19388 | + "r" ((USItype) (bh)), \ |
| 19389 | + "r" ((USItype) (al)), \ |
| 19390 | + "r" ((USItype) (bl)) __CLOBBER_CC) |
| 19391 | + |
| 19392 | +#define __umulsidi3(a,b) ((UDItype)(a) * (UDItype)(b)) |
| 19393 | + |
| 19394 | +#define umul_ppmm(w1, w0, u, v) \ |
| 19395 | +{ \ |
| 19396 | + DWunion __w; \ |
| 19397 | + __w.ll = __umulsidi3 (u, v); \ |
| 19398 | + w1 = __w.s.high; \ |
| 19399 | + w0 = __w.s.low; \ |
| 19400 | +} |
| 19401 | + |
| 19402 | +#define count_leading_zeros(COUNT,X) ((COUNT) = __builtin_clz (X)) |
| 19403 | +#define count_trailing_zeros(COUNT,X) ((COUNT) = __builtin_ctz (X)) |
| 19404 | +#define COUNT_LEADING_ZEROS_0 32 |
| 19405 | +#endif |
| 19406 | + |
| 19407 | #if defined (__hppa) && W_TYPE_SIZE == 32 |
| 19408 | #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ |
| 19409 | __asm__ ("add %4,%5,%1\n\taddc %2,%3,%0" \ |
| 19410 | --- a/libstdc++-v3/acinclude.m4 |
| 19411 | +++ b/libstdc++-v3/acinclude.m4 |
| 19412 | @@ -125,6 +125,15 @@ AC_DEFUN([GLIBCXX_CONFIGURE], [ |
| 19413 | ## other macros from doing the same. This should be automated.) -pme |
| 19414 | need_libmath=no |
| 19415 | |
| 19416 | + # Check for uClibc since Linux platforms use different configuration |
| 19417 | + # directories depending on the C library in use. |
| 19418 | + AC_EGREP_CPP([_using_uclibc], [ |
| 19419 | + #include <stdio.h> |
| 19420 | + #if __UCLIBC__ |
| 19421 | + _using_uclibc |
| 19422 | + #endif |
| 19423 | + ], uclibc=yes, uclibc=no) |
| 19424 | + |
| 19425 | # Find platform-specific directories containing configuration info. |
| 19426 | # Also possibly modify flags used elsewhere, as needed by the platform. |
| 19427 | GLIBCXX_CHECK_HOST |
| 19428 | @@ -1043,8 +1052,8 @@ AC_DEFUN([GLIBCXX_ENABLE_CLOCALE], [ |
| 19429 | #endif |
| 19430 | int main() |
| 19431 | { |
| 19432 | - const char __one[] = "Äuglein Augmen"; |
| 19433 | - const char __two[] = "Äuglein"; |
| 19434 | + const char __one[] = "�uglein Augmen"; |
| 19435 | + const char __two[] = "�uglein"; |
| 19436 | int i; |
| 19437 | int j; |
| 19438 | __locale_t loc; |
| 19439 | @@ -1953,6 +1962,14 @@ AC_DEFUN([AC_LC_MESSAGES], [ |
| 19440 | ]) |
| 19441 | ]) |
| 19442 | |
| 19443 | +# Macros that should have automatically be included, but... |
| 19444 | +m4_include([../config/enable.m4]) |
| 19445 | +m4_include([../config/lead-dot.m4]) |
| 19446 | +m4_include([../config/no-executables.m4]) |
| 19447 | +m4_include([../libtool.m4]) |
| 19448 | +m4_include([crossconfig.m4]) |
| 19449 | +m4_include([linkage.m4]) |
| 19450 | + |
| 19451 | # Macros from the top-level gcc directory. |
| 19452 | m4_include([../config/tls.m4]) |
| 19453 | |
| 19454 | --- a/libstdc++-v3/config/os/gnu-linux/ctype_base.h |
| 19455 | +++ b/libstdc++-v3/config/os/gnu-linux/ctype_base.h |
| 19456 | @@ -43,8 +43,8 @@ |
| 19457 | struct ctype_base |
| 19458 | { |
| 19459 | // Non-standard typedefs. |
| 19460 | - typedef const int* __to_type; |
| 19461 | - |
| 19462 | + typedef const int* __to_type; |
| 19463 | + |
| 19464 | // NB: Offsets into ctype<char>::_M_table force a particular size |
| 19465 | // on the mask type. Because of this, we don't use an enum. |
| 19466 | typedef unsigned short mask; |
| 19467 | --- a/libstdc++-v3/configure.host |
| 19468 | +++ b/libstdc++-v3/configure.host |
| 19469 | @@ -214,8 +214,15 @@ case "${host_os}" in |
| 19470 | freebsd*) |
| 19471 | os_include_dir="os/bsd/freebsd" |
| 19472 | ;; |
| 19473 | + linux-uclibc*) |
| 19474 | + os_include_dir="os/uclibc-linux" |
| 19475 | + ;; |
| 19476 | gnu* | linux* | kfreebsd*-gnu | knetbsd*-gnu) |
| 19477 | - os_include_dir="os/gnu-linux" |
| 19478 | + if [ "$uclibc" = "yes" ]; then |
| 19479 | + os_include_dir="os/uclibc" |
| 19480 | + else |
| 19481 | + os_include_dir="os/gnu-linux" |
| 19482 | + fi |
| 19483 | ;; |
| 19484 | hpux*) |
| 19485 | os_include_dir="os/hpux" |
| 19486 | --- a/libstdc++-v3/include/Makefile.in |
| 19487 | +++ b/libstdc++-v3/include/Makefile.in |
| 19488 | @@ -36,6 +36,7 @@ POST_UNINSTALL = : |
| 19489 | build_triplet = @build@ |
| 19490 | host_triplet = @host@ |
| 19491 | target_triplet = @target@ |
| 19492 | +LIBOBJDIR = |
| 19493 | DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ |
| 19494 | $(top_srcdir)/fragment.am |
| 19495 | subdir = include |
| 19496 | --- a/libstdc++-v3/libmath/Makefile.in |
| 19497 | +++ b/libstdc++-v3/libmath/Makefile.in |
| 19498 | @@ -37,6 +37,7 @@ POST_UNINSTALL = : |
| 19499 | build_triplet = @build@ |
| 19500 | host_triplet = @host@ |
| 19501 | target_triplet = @target@ |
| 19502 | +LIBOBJDIR = |
| 19503 | subdir = libmath |
| 19504 | DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in |
| 19505 | ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 |
| 19506 | --- a/libstdc++-v3/libsupc++/Makefile.in |
| 19507 | +++ b/libstdc++-v3/libsupc++/Makefile.in |
| 19508 | @@ -38,6 +38,7 @@ POST_UNINSTALL = : |
| 19509 | build_triplet = @build@ |
| 19510 | host_triplet = @host@ |
| 19511 | target_triplet = @target@ |
| 19512 | +LIBOBJDIR = |
| 19513 | DIST_COMMON = $(glibcxxinstall_HEADERS) $(srcdir)/Makefile.am \ |
| 19514 | $(srcdir)/Makefile.in $(top_srcdir)/fragment.am |
| 19515 | subdir = libsupc++ |
| 19516 | --- a/libstdc++-v3/Makefile.in |
| 19517 | +++ b/libstdc++-v3/Makefile.in |
| 19518 | @@ -36,6 +36,7 @@ POST_UNINSTALL = : |
| 19519 | build_triplet = @build@ |
| 19520 | host_triplet = @host@ |
| 19521 | target_triplet = @target@ |
| 19522 | +LIBOBJDIR = |
| 19523 | DIST_COMMON = README $(am__configure_deps) $(srcdir)/../config.guess \ |
| 19524 | $(srcdir)/../config.sub $(srcdir)/../install-sh \ |
| 19525 | $(srcdir)/../ltmain.sh $(srcdir)/../missing \ |
| 19526 | --- a/libstdc++-v3/po/Makefile.in |
| 19527 | +++ b/libstdc++-v3/po/Makefile.in |
| 19528 | @@ -36,6 +36,7 @@ POST_UNINSTALL = : |
| 19529 | build_triplet = @build@ |
| 19530 | host_triplet = @host@ |
| 19531 | target_triplet = @target@ |
| 19532 | +LIBOBJDIR = |
| 19533 | DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ |
| 19534 | $(top_srcdir)/fragment.am |
| 19535 | subdir = po |
| 19536 | --- a/libstdc++-v3/src/Makefile.in |
| 19537 | +++ b/libstdc++-v3/src/Makefile.in |
| 19538 | @@ -36,6 +36,7 @@ POST_UNINSTALL = : |
| 19539 | build_triplet = @build@ |
| 19540 | host_triplet = @host@ |
| 19541 | target_triplet = @target@ |
| 19542 | +LIBOBJDIR = |
| 19543 | DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ |
| 19544 | $(top_srcdir)/fragment.am |
| 19545 | subdir = src |
| 19546 | |