aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBernhard Rosenkraenzer <Bernhard.Rosenkranzer@linaro.org>2012-10-17 01:48:49 +0159
committerBernhard Rosenkraenzer <Bernhard.Rosenkranzer@linaro.org>2012-10-17 01:48:49 +0159
commit6d2a81ab9501f411e50b71aba3ec853188bde9dc (patch)
tree9b5e3ffab7a9629501cf7e9d67caab0a1c74070b
parente3f94b743d3dbd8e4795d93c3c2cf404342ca2d3 (diff)
downloadgcc-aarch64-6d2a81ab9501f411e50b71aba3ec853188bde9dc.tar.gz
Sync with svn rev. 192508
Signed-off-by: Bernhard Rosenkraenzer <Bernhard.Rosenkranzer@linaro.org>
-rw-r--r--gcc/ChangeLog5
-rw-r--r--gcc/ChangeLog.aarch6438
-rw-r--r--gcc/DATESTAMP2
-rw-r--r--gcc/config/aarch64/aarch64-protos.h4
-rw-r--r--gcc/config/aarch64/aarch64.c11
-rw-r--r--gcc/config/aarch64/aarch64.h34
-rw-r--r--gcc/config/aarch64/aarch64.md13
-rw-r--r--gcc/config/aarch64/arm_neon.h136
-rw-r--r--gcc/cp/ChangeLog5
-rw-r--r--gcc/cp/tree.c5
-rw-r--r--gcc/doc/invoke.texi4
-rw-r--r--gcc/fortran/ChangeLog6
-rw-r--r--gcc/fortran/trans-stmt.c12
-rw-r--r--gcc/po/ChangeLog4
-rw-r--r--gcc/po/es.po12
-rw-r--r--gcc/testsuite/ChangeLog21
-rw-r--r--gcc/testsuite/ChangeLog.aarch6415
-rw-r--r--gcc/testsuite/g++.dg/template/pr54858.C21
-rw-r--r--gcc/testsuite/gcc.target/aarch64/predefine_large.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/predefine_small.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/predefine_tiny.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vector_intrinsics.c12
-rw-r--r--gcc/testsuite/gcc.target/arm/arm.exp7
-rw-r--r--gcc/testsuite/gfortran.dg/class_allocate_13.f9031
-rw-r--r--gcc/testsuite/lib/gcc-dg.exp8
-rw-r--r--gcc/testsuite/lib/target-supports.exp42
-rw-r--r--libgfortran/ChangeLog8
-rw-r--r--libgfortran/runtime/environ.c42
-rw-r--r--libstdc++-v3/ChangeLog6
-rw-r--r--libstdc++-v3/include/bits/atomic_base.h2
30 files changed, 413 insertions, 114 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 7330e09fe..7b3219d06 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,8 @@
+2012-10-08 Georg-Johann Lay <avr@gjlay.de>
+
+ PR target/54854
+ * doc/invoke.texi (AVR Options): Deprecate -mshort-calls.
+
2012-10-05 Mark Kettenis <kettenis@openbsd.org>
* config.gcc (*-*-openbsd4.[3-9]|*-*-openbsd[5-9]*): Set
diff --git a/gcc/ChangeLog.aarch64 b/gcc/ChangeLog.aarch64
index e42903e2d..47ed27e46 100644
--- a/gcc/ChangeLog.aarch64
+++ b/gcc/ChangeLog.aarch64
@@ -1,3 +1,41 @@
+2012-10-16 Tejas Belagod <tejas.belagod@arm.com>
+
+ * config/aarch64/arm_neon.h (vmla_lane_f32, vmla_lane_s16,
+ vmla_lane_s32, vmla_lane_u16, vmla_lane_u32, vmlal_lane_s16,
+ vmlal_lane_s32, vmlal_lane_u16, vmlal_lane_u32,
+ vmls_lane_s16, vmls_lane_s32, vmls_lane_u16, vmls_lane_u32,
+ vmlsl_lane_s16, vmlsl_lane_s32, vmlsl_lane_u16,
+ vmlsl_lane_u32, vmul_lane_f32, vmul_lane_s16, vmul_lane_s32,
+ vmul_lane_u16, vmul_lane_u32, vmull_lane_s16, vmull_lane_s32,
+ vmull_lane_u16, vmull_lane_u32, vmulq_lane_f32, vmulq_lane_f64,
+ vmulq_lane_s16, vmulq_lane_s32, vmulq_lane_u16, vmulq_lane_u32,
+ vqdmlal_lane_s16, vqdmlal_lane_s32, vqdmlalh_lane_s16,
+ vqdmlsl_lane_s16, vqdmlsl_lane_s32, vqdmulh_lane_s16, vqdmulh_lane_s32,
+ vqdmulhq_lane_s16, vqdmulhq_lane_s32, vqdmull_lane_s16,
+ vqdmull_lane_s32, vqrdmulh_lane_s16, vqrdmulh_lane_s32,
+ vqrdmulhq_lane_s16, vqrdmulhq_lane_s32): Update prototype and
+ implementation.
+
+2012-10-16 Ian Bolton <ian.bolton@arm.com>
+
+ * gcc/config/aarch64/aarch64.md
+ (<optab><ALLX:mode>_shft_<GPI:mode>): Restrict operands.
+
+2012-10-16 Marcus Shawcroft <marcus.shawcroft@arm.com>
+
+ * config/aarch64/aarch64-protos.h (aarch64_split_doubleword_move):
+ Rename to aarch64_split_128bit_move.
+ (aarch64_split_128bit_move_p): New.
+ * config/aarch64/aarch64.c (aarch64_split_doubleword_move):
+ Rename to aarch64_split_128bit_move.
+ (aarch64_split_128bit_move_p): New.
+ * config/aarch64/aarch64.md: Adjust TImode move split.
+
+2012-10-15 Chris Schlumberger-Socha <chris.schlumberger-socha@arm.com>
+
+ * config/aarch64/aarch64.h (TARGET_CPU_CPP_BUILTINS): Add predefine for
+ AArch64 code models.
+
2012-10-05 Tejas Belagod <tejas.belagod@arm.com>
* config/aarch64/arm_neon.h (vqdmlalh_lane_s16, vqdmlalh_s16,
diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP
index 3bda1e32d..bc72ebf08 100644
--- a/gcc/DATESTAMP
+++ b/gcc/DATESTAMP
@@ -1 +1 @@
-20121008
+20121015
diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
index e6d35e4db..712d2f683 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -245,7 +245,9 @@ void aarch64_simd_lane_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
/* Emit code for reinterprets. */
void aarch64_simd_reinterpret (rtx, rtx);
-void aarch64_split_doubleword_move (rtx, rtx);
+void aarch64_split_128bit_move (rtx, rtx);
+
+bool aarch64_split_128bit_move_p (rtx, rtx);
#if defined (RTX_CODE)
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index a148ee569..809db0bcd 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -537,7 +537,7 @@ aarch64_emit_move (rtx dest, rtx src)
}
void
-aarch64_split_doubleword_move (rtx dst, rtx src)
+aarch64_split_128bit_move (rtx dst, rtx src)
{
rtx low_dst;
@@ -569,7 +569,7 @@ aarch64_split_doubleword_move (rtx dst, rtx src)
}
/* Fall through to r -> r cases. */
}
-
+
low_dst = gen_lowpart (word_mode, dst);
if (REG_P (low_dst)
&& reg_overlap_mentioned_p (low_dst, src))
@@ -586,6 +586,13 @@ aarch64_split_doubleword_move (rtx dst, rtx src)
}
}
+bool
+aarch64_split_128bit_move_p (rtx dst, rtx src)
+{
+ return (! REG_P (src)
+ || ! (FP_REGNUM_P (REGNO (dst)) && FP_REGNUM_P (REGNO (src))));
+}
+
static rtx
aarch64_force_temporary (rtx x, rtx value)
{
diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
index dac110498..d354bc2b6 100644
--- a/gcc/config/aarch64/aarch64.h
+++ b/gcc/config/aarch64/aarch64.h
@@ -23,14 +23,32 @@
#define GCC_AARCH64_H
/* Target CPU builtins. */
-#define TARGET_CPU_CPP_BUILTINS() \
- do \
- { \
- builtin_define ("__aarch64__"); \
- if (TARGET_BIG_END) \
- builtin_define ("__AARCH64EB__"); \
- else \
- builtin_define ("__AARCH64EL__"); \
+#define TARGET_CPU_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__aarch64__"); \
+ if (TARGET_BIG_END) \
+ builtin_define ("__AARCH64EB__"); \
+ else \
+ builtin_define ("__AARCH64EL__"); \
+ \
+ switch (aarch64_cmodel) \
+ { \
+ case AARCH64_CMODEL_TINY: \
+ case AARCH64_CMODEL_TINY_PIC: \
+ builtin_define ("__AARCH64_CMODEL_TINY__"); \
+ break; \
+ case AARCH64_CMODEL_SMALL: \
+ case AARCH64_CMODEL_SMALL_PIC: \
+ builtin_define ("__AARCH64_CMODEL_SMALL__");\
+ break; \
+ case AARCH64_CMODEL_LARGE: \
+ builtin_define ("__AARCH64_CMODEL_LARGE__"); \
+ break; \
+ default: \
+ break; \
+ } \
+ \
} while (0)
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 5c92a5b17..a6781fd50 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -947,10 +947,10 @@
(define_split
[(set (match_operand:TI 0 "register_operand" "")
(match_operand:TI 1 "aarch64_reg_or_imm" ""))]
- "reload_completed"
+ "reload_completed && aarch64_split_128bit_move_p (operands[0], operands[1])"
[(const_int 0)]
{
- aarch64_split_doubleword_move (operands[0], operands[1]);
+ aarch64_split_128bit_move (operands[0], operands[1]);
DONE;
})
@@ -2315,8 +2315,13 @@
(ashift:GPI (ANY_EXTEND:GPI
(match_operand:ALLX 1 "register_operand" "r"))
(match_operand 2 "const_int_operand" "n")))]
- ""
- "<su>bfiz\\t%<GPI:w>0, %<GPI:w>1, %2, #<ALLX:sizen>"
+ "UINTVAL (operands[2]) < <GPI:sizen>"
+{
+ operands[3] = (<ALLX:sizen> <= (<GPI:sizen> - UINTVAL (operands[2])))
+ ? GEN_INT (<ALLX:sizen>)
+ : GEN_INT (<GPI:sizen> - UINTVAL (operands[2]));
+ return "<su>bfiz\t%<GPI:w>0, %<GPI:w>1, %2, %3";
+}
[(set_attr "v8type" "bfm")
(set_attr "mode" "<GPI:MODE>")]
)
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index f1fb239e3..e8fafa6d1 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -9526,7 +9526,7 @@ vminvq_u32 (uint32x4_t a)
#define vmla_lane_f32(a, b, c, d) \
__extension__ \
({ \
- float32x4_t c_ = (c); \
+ float32x2_t c_ = (c); \
float32x2_t b_ = (b); \
float32x2_t a_ = (a); \
float32x2_t result; \
@@ -9541,7 +9541,7 @@ vminvq_u32 (uint32x4_t a)
#define vmla_lane_s16(a, b, c, d) \
__extension__ \
({ \
- int16x8_t c_ = (c); \
+ int16x4_t c_ = (c); \
int16x4_t b_ = (b); \
int16x4_t a_ = (a); \
int16x4_t result; \
@@ -9555,7 +9555,7 @@ vminvq_u32 (uint32x4_t a)
#define vmla_lane_s32(a, b, c, d) \
__extension__ \
({ \
- int32x4_t c_ = (c); \
+ int32x2_t c_ = (c); \
int32x2_t b_ = (b); \
int32x2_t a_ = (a); \
int32x2_t result; \
@@ -9569,7 +9569,7 @@ vminvq_u32 (uint32x4_t a)
#define vmla_lane_u16(a, b, c, d) \
__extension__ \
({ \
- uint16x8_t c_ = (c); \
+ uint16x4_t c_ = (c); \
uint16x4_t b_ = (b); \
uint16x4_t a_ = (a); \
uint16x4_t result; \
@@ -9583,7 +9583,7 @@ vminvq_u32 (uint32x4_t a)
#define vmla_lane_u32(a, b, c, d) \
__extension__ \
({ \
- uint32x4_t c_ = (c); \
+ uint32x2_t c_ = (c); \
uint32x2_t b_ = (b); \
uint32x2_t a_ = (a); \
uint32x2_t result; \
@@ -9997,7 +9997,7 @@ vmlal_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c)
#define vmlal_lane_s16(a, b, c, d) \
__extension__ \
({ \
- int16x8_t c_ = (c); \
+ int16x4_t c_ = (c); \
int16x4_t b_ = (b); \
int32x4_t a_ = (a); \
int32x4_t result; \
@@ -10011,7 +10011,7 @@ vmlal_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c)
#define vmlal_lane_s32(a, b, c, d) \
__extension__ \
({ \
- int32x4_t c_ = (c); \
+ int32x2_t c_ = (c); \
int32x2_t b_ = (b); \
int64x2_t a_ = (a); \
int64x2_t result; \
@@ -10025,7 +10025,7 @@ vmlal_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c)
#define vmlal_lane_u16(a, b, c, d) \
__extension__ \
({ \
- uint16x8_t c_ = (c); \
+ uint16x4_t c_ = (c); \
uint16x4_t b_ = (b); \
uint32x4_t a_ = (a); \
uint32x4_t result; \
@@ -10039,7 +10039,7 @@ vmlal_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c)
#define vmlal_lane_u32(a, b, c, d) \
__extension__ \
({ \
- uint32x4_t c_ = (c); \
+ uint32x2_t c_ = (c); \
uint32x2_t b_ = (b); \
uint64x2_t a_ = (a); \
uint64x2_t result; \
@@ -10480,7 +10480,7 @@ vmlaq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
#define vmls_lane_f32(a, b, c, d) \
__extension__ \
({ \
- float32x4_t c_ = (c); \
+ float32x2_t c_ = (c); \
float32x2_t b_ = (b); \
float32x2_t a_ = (a); \
float32x2_t result; \
@@ -10495,7 +10495,7 @@ vmlaq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
#define vmls_lane_s16(a, b, c, d) \
__extension__ \
({ \
- int16x8_t c_ = (c); \
+ int16x4_t c_ = (c); \
int16x4_t b_ = (b); \
int16x4_t a_ = (a); \
int16x4_t result; \
@@ -10509,7 +10509,7 @@ vmlaq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
#define vmls_lane_s32(a, b, c, d) \
__extension__ \
({ \
- int32x4_t c_ = (c); \
+ int32x2_t c_ = (c); \
int32x2_t b_ = (b); \
int32x2_t a_ = (a); \
int32x2_t result; \
@@ -10523,7 +10523,7 @@ vmlaq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
#define vmls_lane_u16(a, b, c, d) \
__extension__ \
({ \
- uint16x8_t c_ = (c); \
+ uint16x4_t c_ = (c); \
uint16x4_t b_ = (b); \
uint16x4_t a_ = (a); \
uint16x4_t result; \
@@ -10537,7 +10537,7 @@ vmlaq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
#define vmls_lane_u32(a, b, c, d) \
__extension__ \
({ \
- uint32x4_t c_ = (c); \
+ uint32x2_t c_ = (c); \
uint32x2_t b_ = (b); \
uint32x2_t a_ = (a); \
uint32x2_t result; \
@@ -10895,7 +10895,7 @@ vmlsl_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c)
#define vmlsl_lane_s16(a, b, c, d) \
__extension__ \
({ \
- int16x8_t c_ = (c); \
+ int16x4_t c_ = (c); \
int16x4_t b_ = (b); \
int32x4_t a_ = (a); \
int32x4_t result; \
@@ -10909,7 +10909,7 @@ vmlsl_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c)
#define vmlsl_lane_s32(a, b, c, d) \
__extension__ \
({ \
- int32x4_t c_ = (c); \
+ int32x2_t c_ = (c); \
int32x2_t b_ = (b); \
int64x2_t a_ = (a); \
int64x2_t result; \
@@ -10923,7 +10923,7 @@ vmlsl_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c)
#define vmlsl_lane_u16(a, b, c, d) \
__extension__ \
({ \
- uint16x8_t c_ = (c); \
+ uint16x4_t c_ = (c); \
uint16x4_t b_ = (b); \
uint32x4_t a_ = (a); \
uint32x4_t result; \
@@ -10937,7 +10937,7 @@ vmlsl_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c)
#define vmlsl_lane_u32(a, b, c, d) \
__extension__ \
({ \
- uint32x4_t c_ = (c); \
+ uint32x2_t c_ = (c); \
uint32x2_t b_ = (b); \
uint64x2_t a_ = (a); \
uint64x2_t result; \
@@ -11905,7 +11905,7 @@ vmovq_n_u64 (uint64_t a)
#define vmul_lane_f32(a, b, c) \
__extension__ \
({ \
- float32x4_t b_ = (b); \
+ float32x2_t b_ = (b); \
float32x2_t a_ = (a); \
float32x2_t result; \
__asm__ ("fmul %0.2s,%1.2s,%2.s[%3]" \
@@ -11918,7 +11918,7 @@ vmovq_n_u64 (uint64_t a)
#define vmul_lane_s16(a, b, c) \
__extension__ \
({ \
- int16x8_t b_ = (b); \
+ int16x4_t b_ = (b); \
int16x4_t a_ = (a); \
int16x4_t result; \
__asm__ ("mul %0.4h,%1.4h,%2.h[%3]" \
@@ -11931,7 +11931,7 @@ vmovq_n_u64 (uint64_t a)
#define vmul_lane_s32(a, b, c) \
__extension__ \
({ \
- int32x4_t b_ = (b); \
+ int32x2_t b_ = (b); \
int32x2_t a_ = (a); \
int32x2_t result; \
__asm__ ("mul %0.2s,%1.2s,%2.s[%3]" \
@@ -11944,7 +11944,7 @@ vmovq_n_u64 (uint64_t a)
#define vmul_lane_u16(a, b, c) \
__extension__ \
({ \
- uint16x8_t b_ = (b); \
+ uint16x4_t b_ = (b); \
uint16x4_t a_ = (a); \
uint16x4_t result; \
__asm__ ("mul %0.4h,%1.4h,%2.h[%3]" \
@@ -11957,7 +11957,7 @@ vmovq_n_u64 (uint64_t a)
#define vmul_lane_u32(a, b, c) \
__extension__ \
({ \
- uint32x4_t b_ = (b); \
+ uint32x2_t b_ = (b); \
uint32x2_t a_ = (a); \
uint32x2_t result; \
__asm__ ("mul %0.2s, %1.2s, %2.s[%3]" \
@@ -12328,7 +12328,7 @@ vmull_high_u32 (uint32x4_t a, uint32x4_t b)
#define vmull_lane_s16(a, b, c) \
__extension__ \
({ \
- int16x8_t b_ = (b); \
+ int16x4_t b_ = (b); \
int16x4_t a_ = (a); \
int32x4_t result; \
__asm__ ("smull %0.4s,%1.4h,%2.h[%3]" \
@@ -12341,7 +12341,7 @@ vmull_high_u32 (uint32x4_t a, uint32x4_t b)
#define vmull_lane_s32(a, b, c) \
__extension__ \
({ \
- int32x4_t b_ = (b); \
+ int32x2_t b_ = (b); \
int32x2_t a_ = (a); \
int64x2_t result; \
__asm__ ("smull %0.2d,%1.2s,%2.s[%3]" \
@@ -12354,7 +12354,7 @@ vmull_high_u32 (uint32x4_t a, uint32x4_t b)
#define vmull_lane_u16(a, b, c) \
__extension__ \
({ \
- uint16x8_t b_ = (b); \
+ uint16x4_t b_ = (b); \
uint16x4_t a_ = (a); \
uint32x4_t result; \
__asm__ ("umull %0.4s,%1.4h,%2.h[%3]" \
@@ -12367,7 +12367,7 @@ vmull_high_u32 (uint32x4_t a, uint32x4_t b)
#define vmull_lane_u32(a, b, c) \
__extension__ \
({ \
- uint32x4_t b_ = (b); \
+ uint32x2_t b_ = (b); \
uint32x2_t a_ = (a); \
uint64x2_t result; \
__asm__ ("umull %0.2d, %1.2s, %2.s[%3]" \
@@ -12553,7 +12553,7 @@ vmull_u32 (uint32x2_t a, uint32x2_t b)
#define vmulq_lane_f32(a, b, c) \
__extension__ \
({ \
- float32x4_t b_ = (b); \
+ float32x2_t b_ = (b); \
float32x4_t a_ = (a); \
float32x4_t result; \
__asm__ ("fmul %0.4s, %1.4s, %2.s[%3]" \
@@ -12566,7 +12566,7 @@ vmull_u32 (uint32x2_t a, uint32x2_t b)
#define vmulq_lane_f64(a, b, c) \
__extension__ \
({ \
- float64x2_t b_ = (b); \
+ float64x1_t b_ = (b); \
float64x2_t a_ = (a); \
float64x2_t result; \
__asm__ ("fmul %0.2d,%1.2d,%2.d[%3]" \
@@ -12579,7 +12579,7 @@ vmull_u32 (uint32x2_t a, uint32x2_t b)
#define vmulq_lane_s16(a, b, c) \
__extension__ \
({ \
- int16x8_t b_ = (b); \
+ int16x4_t b_ = (b); \
int16x8_t a_ = (a); \
int16x8_t result; \
__asm__ ("mul %0.8h,%1.8h,%2.h[%3]" \
@@ -12592,7 +12592,7 @@ vmull_u32 (uint32x2_t a, uint32x2_t b)
#define vmulq_lane_s32(a, b, c) \
__extension__ \
({ \
- int32x4_t b_ = (b); \
+ int32x2_t b_ = (b); \
int32x4_t a_ = (a); \
int32x4_t result; \
__asm__ ("mul %0.4s,%1.4s,%2.s[%3]" \
@@ -12605,7 +12605,7 @@ vmull_u32 (uint32x2_t a, uint32x2_t b)
#define vmulq_lane_u16(a, b, c) \
__extension__ \
({ \
- uint16x8_t b_ = (b); \
+ uint16x4_t b_ = (b); \
uint16x8_t a_ = (a); \
uint16x8_t result; \
__asm__ ("mul %0.8h,%1.8h,%2.h[%3]" \
@@ -12618,7 +12618,7 @@ vmull_u32 (uint32x2_t a, uint32x2_t b)
#define vmulq_lane_u32(a, b, c) \
__extension__ \
({ \
- uint32x4_t b_ = (b); \
+ uint32x2_t b_ = (b); \
uint32x4_t a_ = (a); \
uint32x4_t result; \
__asm__ ("mul %0.4s, %1.4s, %2.s[%3]" \
@@ -21748,9 +21748,10 @@ vqdmlal_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c)
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmlal_lane_s16 (int32x4_t __a, int16x4_t __b, int16x8_t __c, int const __d)
+vqdmlal_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, int const __d)
{
- return __builtin_aarch64_sqdmlal_lanev4hi (__a, __b, __c, __d);
+ int16x8_t __tmp = vcombine_s16 (__c, vcreate_s16 (INT64_C (0)));
+ return __builtin_aarch64_sqdmlal_lanev4hi (__a, __b, __tmp, __d);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
@@ -21798,9 +21799,10 @@ vqdmlal_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c)
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmlal_lane_s32 (int64x2_t __a, int32x2_t __b, int32x4_t __c, int const __d)
+vqdmlal_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, int const __d)
{
- return __builtin_aarch64_sqdmlal_lanev2si (__a, __b, __c, __d);
+ int32x4_t __tmp = vcombine_s32 (__c, vcreate_s32 (INT64_C (0)));
+ return __builtin_aarch64_sqdmlal_lanev2si (__a, __b, __tmp, __d);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
@@ -21874,9 +21876,10 @@ vqdmlsl_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c)
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmlsl_lane_s16 (int32x4_t __a, int16x4_t __b, int16x8_t __c, int const __d)
+vqdmlsl_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, int const __d)
{
- return __builtin_aarch64_sqdmlsl_lanev4hi (__a, __b, __c, __d);
+ int16x8_t __tmp = vcombine_s16 (__c, vcreate_s16 (INT64_C (0)));
+ return __builtin_aarch64_sqdmlsl_lanev4hi (__a, __b, __tmp, __d);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
@@ -21924,9 +21927,10 @@ vqdmlsl_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c)
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x4_t __c, int const __d)
+vqdmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, int const __d)
{
- return __builtin_aarch64_sqdmlsl_lanev2si (__a, __b, __c, __d);
+ int32x4_t __tmp = vcombine_s32 (__c, vcreate_s32 (INT64_C (0)));
+ return __builtin_aarch64_sqdmlsl_lanev2si (__a, __b, __tmp, __d);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
@@ -21968,27 +21972,31 @@ vqdmlsls_lane_s32 (int64x1_t __a, int32x1_t __b, int32x4_t __c, const int __d)
/* vqdmulh */
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vqdmulh_lane_s16 (int16x4_t __a, int16x8_t __b, const int __c)
+vqdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
{
- return __builtin_aarch64_sqdmulh_lanev4hi (__a, __b, __c);
+ int16x8_t __tmp = vcombine_s16 (__b, vcreate_s16 (INT64_C (0)));
+ return __builtin_aarch64_sqdmulh_lanev4hi (__a, __tmp, __c);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vqdmulh_lane_s32 (int32x2_t __a, int32x4_t __b, const int __c)
+vqdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
{
- return __builtin_aarch64_sqdmulh_lanev2si (__a, __b, __c);
+ int32x4_t __tmp = vcombine_s32 (__b, vcreate_s32 (INT64_C (0)));
+ return __builtin_aarch64_sqdmulh_lanev2si (__a, __tmp, __c);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vqdmulhq_lane_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+vqdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
{
- return __builtin_aarch64_sqdmulh_lanev8hi (__a, __b, __c);
+ int16x8_t __tmp = vcombine_s16 (__b, vcreate_s16 (INT64_C (0)));
+ return __builtin_aarch64_sqdmulh_lanev8hi (__a, __tmp, __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmulhq_lane_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+vqdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
{
- return __builtin_aarch64_sqdmulh_lanev4si (__a, __b, __c);
+ int32x4_t __tmp = vcombine_s32 (__b, vcreate_s32 (INT64_C (0)));
+ return __builtin_aarch64_sqdmulh_lanev4si (__a, __tmp, __c);
}
__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
@@ -22048,9 +22056,10 @@ vqdmull_high_n_s16 (int16x8_t __a, int16_t __b)
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmull_lane_s16 (int16x4_t __a, int16x8_t __b, int const __c)
+vqdmull_lane_s16 (int16x4_t __a, int16x4_t __b, int const __c)
{
- return __builtin_aarch64_sqdmull_lanev4hi (__a, __b, __c);
+ int16x8_t __tmp = vcombine_s16 (__b, vcreate_s16 (INT64_C (0)));
+ return __builtin_aarch64_sqdmull_lanev4hi (__a, __tmp, __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
@@ -22096,9 +22105,10 @@ vqdmull_high_n_s32 (int32x4_t __a, int32_t __b)
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmull_lane_s32 (int32x2_t __a, int32x4_t __b, int const __c)
+vqdmull_lane_s32 (int32x2_t __a, int32x2_t __b, int const __c)
{
- return __builtin_aarch64_sqdmull_lanev2si (__a, __b, __c);
+ int32x4_t __tmp = vcombine_s32 (__b, vcreate_s32 (INT64_C (0)));
+ return __builtin_aarch64_sqdmull_lanev2si (__a, __tmp, __c);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
@@ -22278,27 +22288,31 @@ vqnegs_s32 (int32x1_t __a)
/* vqrdmulh */
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vqrdmulh_lane_s16 (int16x4_t __a, int16x8_t __b, const int __c)
+vqrdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
{
- return __builtin_aarch64_sqrdmulh_lanev4hi (__a, __b, __c);
+ int16x8_t __tmp = vcombine_s16 (__b, vcreate_s16 (INT64_C (0)));
+ return __builtin_aarch64_sqrdmulh_lanev4hi (__a, __tmp, __c);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vqrdmulh_lane_s32 (int32x2_t __a, int32x4_t __b, const int __c)
+vqrdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
{
- return __builtin_aarch64_sqrdmulh_lanev2si (__a, __b, __c);
+ int32x4_t __tmp = vcombine_s32 (__b, vcreate_s32 (INT64_C (0)));
+ return __builtin_aarch64_sqrdmulh_lanev2si (__a, __tmp, __c);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vqrdmulhq_lane_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+vqrdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
{
- return __builtin_aarch64_sqrdmulh_lanev8hi (__a, __b, __c);
+ int16x8_t __tmp = vcombine_s16 (__b, vcreate_s16 (INT64_C (0)));
+ return __builtin_aarch64_sqrdmulh_lanev8hi (__a, __tmp, __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqrdmulhq_lane_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+vqrdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
{
- return __builtin_aarch64_sqrdmulh_lanev4si (__a, __b, __c);
+ int32x4_t __tmp = vcombine_s32 (__b, vcreate_s32 (INT64_C (0)));
+ return __builtin_aarch64_sqrdmulh_lanev4si (__a, __tmp, __c);
}
__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index afe99a280..3b933f724 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,3 +1,8 @@
+2012-10-08 Jakub Jelinek <jakub@redhat.com>
+
+ PR c++/54858
+ * tree.c (cp_tree_equal): Handle FIELD_DECL.
+
2012-10-03 Jakub Jelinek <jakub@redhat.com>
PR c++/54777
diff --git a/gcc/cp/tree.c b/gcc/cp/tree.c
index de9e0f601..2878ba576 100644
--- a/gcc/cp/tree.c
+++ b/gcc/cp/tree.c
@@ -1,7 +1,7 @@
/* Language-dependent node constructors for parse phase of GNU compiler.
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009, 2010, 2011
- Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009, 2010, 2011,
+ 2012 Free Software Foundation, Inc.
Hacked by Michael Tiemann (tiemann@cygnus.com)
This file is part of GCC.
@@ -2366,6 +2366,7 @@ cp_tree_equal (tree t1, tree t2)
case VAR_DECL:
case CONST_DECL:
+ case FIELD_DECL:
case FUNCTION_DECL:
case TEMPLATE_DECL:
case IDENTIFIER_NODE:
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index acdf7958f..0ffc7a7d3 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -11268,10 +11268,12 @@ section on @code{EIND} and linker stubs below.
@item -mshort-calls
@opindex mshort-calls
+This option has been deprecated and will be removed in GCC 4.8.
+See @code{-mrelax} for a replacement.
+
Use @code{RCALL}/@code{RJMP} instructions even on devices with
16@tie{}KiB or more of program memory, i.e.@: on devices that
have the @code{CALL} and @code{JMP} instructions.
-See also the @code{-mrelax} command line option.
@item -msp8
@opindex msp8
diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog
index fc827cb70..688f57238 100644
--- a/gcc/fortran/ChangeLog
+++ b/gcc/fortran/ChangeLog
@@ -1,3 +1,9 @@
+2012-10-14 Janus Weil <janus@gcc.gnu.org>
+
+ PR fortran/54784
+ * trans-stmt.c (gfc_trans_allocate): Correctly determine the reference
+ to the _data component for polymorphic allocation with SOURCE.
+
2012-09-20 Release Manager
* GCC 4.7.2 released.
diff --git a/gcc/fortran/trans-stmt.c b/gcc/fortran/trans-stmt.c
index bb3a89084..630816ed4 100644
--- a/gcc/fortran/trans-stmt.c
+++ b/gcc/fortran/trans-stmt.c
@@ -5087,7 +5087,7 @@ gfc_trans_allocate (gfc_code * code)
gfc_actual_arglist *actual;
gfc_expr *ppc;
gfc_code *ppc_code;
- gfc_ref *dataref;
+ gfc_ref *ref, *dataref;
/* Do a polymorphic deep copy. */
actual = gfc_get_actual_arglist ();
@@ -5099,13 +5099,15 @@ gfc_trans_allocate (gfc_code * code)
actual->next->expr->ts.type = BT_CLASS;
gfc_add_data_component (actual->next->expr);
- dataref = actual->next->expr->ref;
+ dataref = NULL;
/* Make sure we go up through the reference chain to
the _data reference, where the arrayspec is found. */
- while (dataref->next && dataref->next->type != REF_ARRAY)
- dataref = dataref->next;
+ for (ref = actual->next->expr->ref; ref; ref = ref->next)
+ if (ref->type == REF_COMPONENT
+ && strcmp (ref->u.c.component->name, "_data") == 0)
+ dataref = ref;
- if (dataref->u.c.component->as)
+ if (dataref && dataref->u.c.component->as)
{
int dim;
gfc_expr *temp;
diff --git a/gcc/po/ChangeLog b/gcc/po/ChangeLog
index 60e1ccd53..23c448f83 100644
--- a/gcc/po/ChangeLog
+++ b/gcc/po/ChangeLog
@@ -1,3 +1,7 @@
+2012-10-08 Joseph Myers <joseph@codesourcery.com>
+
+ * es.po: Update.
+
2012-09-26 Joseph Myers <joseph@codesourcery.com>
* sv.po: Update.
diff --git a/gcc/po/es.po b/gcc/po/es.po
index 245d16d71..07878db19 100644
--- a/gcc/po/es.po
+++ b/gcc/po/es.po
@@ -1,4 +1,4 @@
-# Mensajes en español para gcc-4.7.1.
+# Mensajes en español para gcc-4.7.2.
# Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
# This file is distributed under the same license as the gcc package.
# Cristian Othón Martínez Vera <cfuga@cfuga.mx>, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
@@ -7,10 +7,10 @@
#
msgid ""
msgstr ""
-"Project-Id-Version: gcc 4.7.1\n"
+"Project-Id-Version: gcc 4.7.2\n"
"Report-Msgid-Bugs-To: http://gcc.gnu.org/bugs.html\n"
"POT-Creation-Date: 2012-09-19 14:50+0000\n"
-"PO-Revision-Date: 2012-06-22 13:49-0500\n"
+"PO-Revision-Date: 2012-09-24 13:50-0500\n"
"Last-Translator: Cristian Othón Martínez Vera <cfuga@cfuga.mx>\n"
"Language-Team: Spanish <es@li.org>\n"
"Language: es\n"
@@ -7589,10 +7589,8 @@ msgid "Follow Renesas (formerly Hitachi) / SuperH calling conventions"
msgstr "Sigue las convenciones de llamada Renesas (anteriormente Hitachi) / SuperH"
#: config/sh/sh.opt:274
-#, fuzzy
-#| msgid "Increase the IEEE compliance for floating-point code"
msgid "Increase the IEEE compliance for floating-point comparisons"
-msgstr "Incrementa el cumplimiento con IEEE para el código de coma flotante"
+msgstr "Incrementa el cumplimiento con IEEE para las comparaciones de coma flotante"
#: config/sh/sh.opt:278
msgid "Enable the use of the indexed addressing mode for SHmedia32/SHcompact"
@@ -12044,7 +12042,7 @@ msgstr "se esperaba una expresión booleana"
#: go/gofrontend/statements.cc:4198
msgid "cannot type switch on non-interface value"
-msgstr ""
+msgstr "no se puede cambiar el tipo en valores que no son de interfaz"
#: go/gofrontend/statements.cc:4320
msgid "incompatible types in send"
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index f94e944cb..bf0106147 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,24 @@
+2012-10-14 Janus Weil <janus@gcc.gnu.org>
+
+ PR fortran/54784
+ * gfortran.dg/class_allocate_13.f90: New.
+
+2012-10-08 Terry Guo <terry.guo@arm.com>
+
+ Backported from mainline
+ 2012-09-19 Terry Guo <terry.guo@arm.com>
+
+ * lib/gcc-dg.exp (dg_runtest_extra_prunes): New variable to define
+ extra prune rules that will be applied to all tests in a .exp file.
+ (gcc-dg-prune): Use rules defined by the above variable.
+ * gcc.target/arm/arm.exp (dg_runtest_extra_prunes): Skip all the
+ harmless warnings on architecture switch conflict.
+
+2012-10-08 Jakub Jelinek <jakub@redhat.com>
+
+ PR c++/54858
+ * g++.dg/template/pr54858.C: New test.
+
2012-10-05 Jakub Jelinek <jakub@redhat.com>
PR tree-optimization/33763
diff --git a/gcc/testsuite/ChangeLog.aarch64 b/gcc/testsuite/ChangeLog.aarch64
index 5b577d5ba..914fad33c 100644
--- a/gcc/testsuite/ChangeLog.aarch64
+++ b/gcc/testsuite/ChangeLog.aarch64
@@ -1,3 +1,18 @@
+2011-10-16 Tejas Belagod <tejas.belagod@arm.com>
+
+ * gcc.target/aarch64/vector_intrinsics.c: Update tests to reflect
+ changes and introduce new tests for the new intrinsics.
+
+2012-10-15 Chris Schlumberger-Socha <chris.schlumberger-socha@arm.com>
+
+ * gcc.target/aarch64/predefine_large.c: New test.
+ * gcc.target/aarch64/predefine_small.c: New test.
+ * gcc.target/aarch64/predefine_tiny.c: New test.
+ * lib/target-supports.exp
+ (check_effective_target_aarch64_tiny): New.
+ (check_effective_target_aarch64_small): New.
+ (check_effective_target_aarch64_large): New.
+
2012-09-25 Tejas Belagod <tejas.belagod@arm.com>
* testsuite/lib/target-supports.exp
diff --git a/gcc/testsuite/g++.dg/template/pr54858.C b/gcc/testsuite/g++.dg/template/pr54858.C
new file mode 100644
index 000000000..51610ad68
--- /dev/null
+++ b/gcc/testsuite/g++.dg/template/pr54858.C
@@ -0,0 +1,21 @@
+// PR c++/54858
+// { dg-do compile }
+
+template <int> struct A {};
+template <typename T, T *> struct B {};
+template <typename D> struct C
+{
+ A<0> c0; B<A<0>, &C::c0> d0; // { dg-error "could not convert template argument" }
+ A<0> c1; B<A<0>, &C::c1> d1; // { dg-error "could not convert template argument" }
+ A<0> c2; B<A<0>, &C::c2> d2; // { dg-error "could not convert template argument" }
+ A<0> c3; B<A<0>, &C::c3> d3; // { dg-error "could not convert template argument" }
+ A<0> c4; B<A<0>, &C::c4> d4; // { dg-error "could not convert template argument" }
+ A<0> c5; B<A<0>, &C::c5> d5; // { dg-error "could not convert template argument" }
+ A<0> c6; B<A<0>, &C::c6> d6; // { dg-error "could not convert template argument" }
+ A<0> c7; B<A<0>, &C::c7> d7; // { dg-error "could not convert template argument" }
+ A<0> c8; B<A<0>, &C::c8> d8; // { dg-error "could not convert template argument" }
+ A<0> c9; B<A<0>, &C::c9> d9; // { dg-error "could not convert template argument" }
+ A<0> ca; B<A<0>, &C::ca> da; // { dg-error "could not convert template argument" }
+ A<0> cb; B<A<0>, &C::cb> db; // { dg-error "could not convert template argument" }
+};
+C<int> e;
diff --git a/gcc/testsuite/gcc.target/aarch64/predefine_large.c b/gcc/testsuite/gcc.target/aarch64/predefine_large.c
new file mode 100644
index 000000000..0d7d4da47
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/predefine_large.c
@@ -0,0 +1,7 @@
+/* { dg-skip-if "Code model already defined" { aarch64_tiny || aarch64_small } } */
+
+#ifdef __AARCH64_CMODEL_LARGE__
+ int dummy;
+#else
+ #error
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/predefine_small.c b/gcc/testsuite/gcc.target/aarch64/predefine_small.c
new file mode 100644
index 000000000..b1362845c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/predefine_small.c
@@ -0,0 +1,7 @@
+/* { dg-skip-if "Code model already defined" { aarch64_tiny || aarch64_large } } */
+
+#ifdef __AARCH64_CMODEL_SMALL__
+ int dummy;
+#else
+ #error
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/predefine_tiny.c b/gcc/testsuite/gcc.target/aarch64/predefine_tiny.c
new file mode 100644
index 000000000..d2c844bac
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/predefine_tiny.c
@@ -0,0 +1,7 @@
+/* { dg-skip-if "Code model already defined" { aarch64_small || aarch64_large } } */
+
+#ifdef __AARCH64_CMODEL_TINY__
+ int dummy;
+#else
+ #error
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/vector_intrinsics.c b/gcc/testsuite/gcc.target/aarch64/vector_intrinsics.c
index 7bc9caf4e..affb8a8a1 100644
--- a/gcc/testsuite/gcc.target/aarch64/vector_intrinsics.c
+++ b/gcc/testsuite/gcc.target/aarch64/vector_intrinsics.c
@@ -325,7 +325,7 @@ test_vqdmlal_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c)
/* { dg-final { scan-assembler-times "\\tsqdmlal\\tv\[0-9\]+\.4s, v\[0-9\]+\.4h, v\[0-9\]+\.h" 3 } } */
int32x4_t
-test_vqdmlal_lane_s16 (int32x4_t a, int16x4_t b, int16x8_t c)
+test_vqdmlal_lane_s16 (int32x4_t a, int16x4_t b, int16x4_t c)
{
return vqdmlal_lane_s16 (a, b, c, 3);
}
@@ -381,7 +381,7 @@ test_vqdmlal_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c)
/* { dg-final { scan-assembler-times "\\tsqdmlal\\tv\[0-9\]+\.2d, v\[0-9\]+\.2s, v\[0-9\]+\.s" 3 } } */
int64x2_t
-test_vqdmlal_lane_s32 (int64x2_t __a, int32x2_t __b, int32x4_t __c)
+test_vqdmlal_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
{
return vqdmlal_lane_s32 (__a, __b, __c, 1);
}
@@ -437,7 +437,7 @@ test_vqdmlsl_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c)
/* { dg-final { scan-assembler-times "\\tsqdmlsl\\tv\[0-9\]+\.4s, v\[0-9\]+\.4h, v\[0-9\]+\.h" 3 } } */
int32x4_t
-test_vqdmlsl_lane_s16 (int32x4_t a, int16x4_t b, int16x8_t c)
+test_vqdmlsl_lane_s16 (int32x4_t a, int16x4_t b, int16x4_t c)
{
return vqdmlsl_lane_s16 (a, b, c, 3);
}
@@ -493,7 +493,7 @@ test_vqdmlsl_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c)
/* { dg-final { scan-assembler-times "\\tsqdmlsl\\tv\[0-9\]+\.2d, v\[0-9\]+\.2s, v\[0-9\]+\.s" 3 } } */
int64x2_t
-test_vqdmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x4_t __c)
+test_vqdmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
{
return vqdmlsl_lane_s32 (__a, __b, __c, 1);
}
@@ -549,7 +549,7 @@ test_vqdmull_high_n_s16 (int16x8_t __a, int16_t __b)
/* { dg-final { scan-assembler-times "\\tsqdmull\\tv\[0-9\]+\.4s, v\[0-9\]+\.4h, v\[0-9\]+\.h" 3 } } */
int32x4_t
-test_vqdmull_lane_s16 (int16x4_t a, int16x8_t b)
+test_vqdmull_lane_s16 (int16x4_t a, int16x4_t b)
{
return vqdmull_lane_s16 (a, b, 3);
}
@@ -605,7 +605,7 @@ test_vqdmull_high_n_s32 (int32x4_t __a, int32_t __b)
/* { dg-final { scan-assembler-times "\\tsqdmull\\tv\[0-9\]+\.2d, v\[0-9\]+\.2s, v\[0-9\]+\.s" 3 } } */
int64x2_t
-test_vqdmull_lane_s32 (int32x2_t __a, int32x4_t __b)
+test_vqdmull_lane_s32 (int32x2_t __a, int32x2_t __b)
{
return vqdmull_lane_s32 (__a, __b, 1);
}
diff --git a/gcc/testsuite/gcc.target/arm/arm.exp b/gcc/testsuite/gcc.target/arm/arm.exp
index 0838d37b3..dc6c16ad5 100644
--- a/gcc/testsuite/gcc.target/arm/arm.exp
+++ b/gcc/testsuite/gcc.target/arm/arm.exp
@@ -30,6 +30,11 @@ if ![info exists DEFAULT_CFLAGS] then {
set DEFAULT_CFLAGS " -ansi -pedantic-errors"
}
+# This variable should only apply to tests called in this exp file.
+global dg_runtest_extra_prunes
+set dg_runtest_extra_prunes ""
+lappend dg_runtest_extra_prunes "warning: switch -m(cpu|arch)=.* conflicts with -m(cpu|arch)=.* switch"
+
# Initialize `dg'.
dg-init
@@ -39,3 +44,5 @@ dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cCS\]]] \
# All done.
dg-finish
+
+set dg_runtest_extra_prunes ""
diff --git a/gcc/testsuite/gfortran.dg/class_allocate_13.f90 b/gcc/testsuite/gfortran.dg/class_allocate_13.f90
new file mode 100644
index 000000000..64f37dc59
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/class_allocate_13.f90
@@ -0,0 +1,31 @@
+! { dg-do run }
+!
+! PR 54784: [4.7/4.8 Regression] [OOP] wrong code in polymorphic allocation with SOURCE
+!
+! Contributed by Jeremy Kozdon <jkozdon@gmail.com>
+
+program bug
+ implicit none
+
+ type :: block
+ real, allocatable :: fields
+ end type
+
+ type :: list
+ class(block),allocatable :: B
+ end type
+
+ type :: domain
+ type(list),dimension(2) :: L
+ end type
+
+ type(domain) :: d
+ type(block) :: b1
+
+ allocate(b1%fields,source=5.)
+
+ allocate(d%L(2)%B,source=b1) ! wrong code
+
+ if (d%L(2)%B%fields/=5.) call abort()
+
+end program
diff --git a/gcc/testsuite/lib/gcc-dg.exp b/gcc/testsuite/lib/gcc-dg.exp
index b6a73fe0d..8d8c53870 100644
--- a/gcc/testsuite/lib/gcc-dg.exp
+++ b/gcc/testsuite/lib/gcc-dg.exp
@@ -211,9 +211,13 @@ proc gcc-dg-test { prog do_what extra_tool_flags } {
proc gcc-dg-prune { system text } {
global additional_prunes
+ # Extra prune rules that will apply to tests defined in a .exp file.
+ # Always remember to clear it in .exp file after executed all tests.
+ global dg_runtest_extra_prunes
+
set text [prune_gcc_output $text]
- foreach p $additional_prunes {
+ foreach p "$additional_prunes $dg_runtest_extra_prunes" {
if { [string length $p] > 0 } {
# Following regexp matches a complete line containing $p.
regsub -all "(^|\n)\[^\n\]*$p\[^\n\]*" $text "" text
@@ -672,6 +676,7 @@ if { [info procs saved-dg-test] == [list] } {
set additional_files ""
set additional_sources ""
set additional_prunes ""
+ set dg_runtest_extra_prunes ""
set shouldfail 0
if [info exists compiler_conditional_xfail_data] {
unset compiler_conditional_xfail_data
@@ -809,3 +814,4 @@ proc gdb-exists { args } {
}
set additional_prunes ""
+set dg_runtest_extra_prunes ""
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index 3c5e91a78..4cb962eee 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -4662,3 +4662,45 @@ proc check_effective_target_ucontext_h { } {
#include <ucontext.h>
}]
}
+
+proc check_effective_target_aarch64_tiny { } {
+ if { [istarget aarch64*-*-*] } {
+ return [check_no_compiler_messages aarch64_tiny object {
+ #ifdef __AARCH64_CMODEL_TINY__
+ int dummy;
+ #else
+ #error target not AArch64 tiny code model
+ #endif
+ }]
+ } else {
+ return 0
+ }
+}
+
+proc check_effective_target_aarch64_small { } {
+ if { [istarget aarch64*-*-*] } {
+ return [check_no_compiler_messages aarch64_small object {
+ #ifdef __AARCH64_CMODEL_SMALL__
+ int dummy;
+ #else
+ #error target not AArch64 small code model
+ #endif
+ }]
+ } else {
+ return 0
+ }
+}
+
+proc check_effective_target_aarch64_large { } {
+ if { [istarget aarch64*-*-*] } {
+ return [check_no_compiler_messages aarch64_large object {
+ #ifdef __AARCH64_CMODEL_LARGE__
+ int dummy;
+ #else
+ #error target not AArch64 large code model
+ #endif
+ }]
+ } else {
+ return 0
+ }
+}
diff --git a/libgfortran/ChangeLog b/libgfortran/ChangeLog
index cfddafe90..a9c38c810 100644
--- a/libgfortran/ChangeLog
+++ b/libgfortran/ChangeLog
@@ -1,3 +1,11 @@
+2012-10-12 Thomas König <tkoenig@gcc.gnu.org>
+
+ PR libfortran/54736
+ Backport from trunk
+ * runtime/environ.c (search_unit): Correct logic
+ for binary search.
+ (mark_single): Fix index errors.
+
2012-09-20 Release Manager
* GCC 4.7.2 released.
diff --git a/libgfortran/runtime/environ.c b/libgfortran/runtime/environ.c
index 6bd88865d..a7bda45ce 100644
--- a/libgfortran/runtime/environ.c
+++ b/libgfortran/runtime/environ.c
@@ -446,21 +446,35 @@ search_unit (int unit, int *ip)
{
int low, high, mid;
- low = -1;
- high = n_elist;
- while (high - low > 1)
+ if (n_elist == 0)
+ {
+ *ip = 0;
+ return 0;
+ }
+
+ low = 0;
+ high = n_elist - 1;
+
+ do
{
mid = (low + high) / 2;
- if (unit <= elist[mid].unit)
- high = mid;
+ if (unit == elist[mid].unit)
+ {
+ *ip = mid;
+ return 1;
+ }
+ else if (unit > elist[mid].unit)
+ low = mid + 1;
else
- low = mid;
- }
- *ip = high;
- if (elist[high].unit == unit)
- return 1;
+ high = mid - 1;
+ } while (low <= high);
+
+ if (unit > elist[mid].unit)
+ *ip = mid + 1;
else
- return 0;
+ *ip = mid;
+
+ return 0;
}
/* This matches a keyword. If it is found, return the token supplied,
@@ -575,13 +589,13 @@ mark_single (int unit)
}
if (search_unit (unit, &i))
{
- elist[unit].conv = endian;
+ elist[i].conv = endian;
}
else
{
- for (j=n_elist; j>=i; j--)
+ for (j=n_elist-1; j>=i; j--)
elist[j+1] = elist[j];
-
+
n_elist += 1;
elist[i].unit = unit;
elist[i].conv = endian;
diff --git a/libstdc++-v3/ChangeLog b/libstdc++-v3/ChangeLog
index 4652d5c53..71de3de2a 100644
--- a/libstdc++-v3/ChangeLog
+++ b/libstdc++-v3/ChangeLog
@@ -1,3 +1,9 @@
+2012-10-10 Andrew MacLeod <amacleod@redhat.com>
+
+ PR libstdc++/54861
+ * include/bits/atomic_base.h (atomic_signal_fence): Call
+ __atomic_signal_fence instead of __atomic_thread_fence.
+
2012-09-20 Benjamin Kosnik <bkoz@redhat.com>
PR libstdc++/54102, part 2
diff --git a/libstdc++-v3/include/bits/atomic_base.h b/libstdc++-v3/include/bits/atomic_base.h
index 9d5f4eb6f..bd14e35cd 100644
--- a/libstdc++-v3/include/bits/atomic_base.h
+++ b/libstdc++-v3/include/bits/atomic_base.h
@@ -73,7 +73,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
inline void
atomic_signal_fence(memory_order __m) noexcept
- { __atomic_thread_fence(__m); }
+ { __atomic_signal_fence(__m); }
/// kill_dependency
template<typename _Tp>